col2im_kernels.cu 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. #include <cuda_runtime.h>
  2. #include <curand.h>
  3. #include <cublas_v2.h>
  4. #include "col2im.h"
  5. #include "dark_cuda.h"
  6. // src: https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
  7. // You may also want to read: https://github.com/BVLC/caffe/blob/master/LICENSE
  8. __global__ void col2im_gpu_kernel(const int n, const float* data_col,
  9. const int height, const int width, const int ksize,
  10. const int pad,
  11. const int stride,
  12. const int height_col, const int width_col,
  13. float *data_im) {
  14. int index = blockIdx.x*blockDim.x+threadIdx.x;
  15. for(; index < n; index += blockDim.x*gridDim.x){
  16. float val = 0;
  17. int w = index % width + pad;
  18. int h = (index / width) % height + pad;
  19. int c = index / (width * height);
  20. // compute the start and end of the output
  21. int w_col_start = (w < ksize) ? 0 : (w - ksize) / stride + 1;
  22. int w_col_end = min(w / stride + 1, width_col);
  23. int h_col_start = (h < ksize) ? 0 : (h - ksize) / stride + 1;
  24. int h_col_end = min(h / stride + 1, height_col);
  25. // equivalent implementation
  26. int offset =
  27. (c * ksize * ksize + h * ksize + w) * height_col * width_col;
  28. int coeff_h_col = (1 - stride * ksize * height_col) * width_col;
  29. int coeff_w_col = (1 - stride * height_col * width_col);
  30. for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
  31. for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
  32. val += data_col[offset + h_col * coeff_h_col + w_col * coeff_w_col];
  33. }
  34. }
  35. data_im[index] += val;
  36. }
  37. }
  38. void col2im_ongpu(float *data_col,
  39. int channels, int height, int width,
  40. int ksize, int stride, int pad, float *data_im){
  41. // We are going to launch channels * height_col * width_col kernels, each
  42. // kernel responsible for copying a single-channel grid.
  43. int height_col = (height + 2 * pad - ksize) / stride + 1;
  44. int width_col = (width + 2 * pad - ksize) / stride + 1;
  45. int num_kernels = channels * height * width;
  46. col2im_gpu_kernel<<<(num_kernels+BLOCK-1)/BLOCK,
  47. BLOCK, 0, get_cuda_stream() >>>(
  48. num_kernels, data_col, height, width, ksize, pad,
  49. stride, height_col,
  50. width_col, data_im);
  51. CHECK_CUDA(cudaPeekAtLastError());
  52. }
  53. // -----------------------------------------
  54. // CUDA: use 512 threads per block
  55. const int CAFFE_CUDA_NUM_THREADS = 512;
  56. // CUDA: number of blocks for threads.
  57. inline int CAFFE_GET_BLOCKS(const int N) {
  58. return (N + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS;
  59. }
  60. // CUDA: grid stride looping
  61. #define CUDA_KERNEL_LOOP(i, n) \
  62. for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
  63. i < (n); \
  64. i += blockDim.x * gridDim.x)
  65. // https://github.com/BVLC/caffe/blob/master/src/caffe/util/im2col.cu
  66. __global__ void col2im_gpu_kernel_ext(const int n, const float* data_col,
  67. const int height, const int width, const int channels,
  68. const int kernel_h, const int kernel_w,
  69. const int pad_h, const int pad_w,
  70. const int stride_h, const int stride_w,
  71. const int dilation_h, const int dilation_w,
  72. const int height_col, const int width_col,
  73. float* data_im) {
  74. CUDA_KERNEL_LOOP(index, n) {
  75. float val = 0;
  76. const int w_im = index % width + pad_w;
  77. const int h_im = (index / width) % height + pad_h;
  78. const int c_im = index / (width * height);
  79. int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
  80. int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
  81. // compute the start and end of the output
  82. const int w_col_start =
  83. (w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
  84. const int w_col_end = min(w_im / stride_w + 1, width_col);
  85. const int h_col_start =
  86. (h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
  87. const int h_col_end = min(h_im / stride_h + 1, height_col);
  88. // TODO: use LCM of stride and dilation to avoid unnecessary loops
  89. for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
  90. for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
  91. int h_k = (h_im - h_col * stride_h);
  92. int w_k = (w_im - w_col * stride_w);
  93. if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
  94. h_k /= dilation_h;
  95. w_k /= dilation_w;
  96. int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) *
  97. height_col + h_col) * width_col + w_col;
  98. val += data_col[data_col_index];
  99. }
  100. }
  101. }
  102. data_im[index] = val;
  103. }
  104. }
  105. void col2im_gpu_ext(const float* data_col, const int channels,
  106. const int height, const int width, const int kernel_h, const int kernel_w,
  107. const int pad_h, const int pad_w, const int stride_h,
  108. const int stride_w, const int dilation_h, const int dilation_w,
  109. float* data_im)
  110. {
  111. int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) /
  112. stride_h + 1;
  113. int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) /
  114. stride_w + 1;
  115. int num_kernels = channels * height * width;
  116. // To avoid involving atomic operations, we will launch one kernel per
  117. // bottom dimension, and then in the kernel add up the top dimensions.
  118. // NOLINT_NEXT_LINE(whitespace/operators)
  119. col2im_gpu_kernel_ext<< <CAFFE_GET_BLOCKS(num_kernels),
  120. CAFFE_CUDA_NUM_THREADS >> >(
  121. num_kernels, data_col, height, width, channels, kernel_h, kernel_w,
  122. pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w,
  123. height_col, width_col, data_im);
  124. CHECK_CUDA(cudaPeekAtLastError());
  125. }