1#include <algorithm>
2
3#include "conv2d-transpose.cuh"
4#include "ggml.h"
5
6__global__ void conv2d_transpose_kernel(const float * __restrict__ input, const half * __restrict__ kernel,
7 float * __restrict__ output, const int in_w, const int in_h, const int out_w,
8 const int out_h, const int kernel_w, const int kernel_h, const int stride,
9 const int c_in, const int c_out, const int batches) {
10 const int global_idx = blockIdx.x * blockDim.x + threadIdx.x;
11
12 const int total_elements = out_w * out_h * c_out * batches;
13
14 if (global_idx >= total_elements) {
15 return;
16 }
17
18 const int out_x_idx = global_idx % out_w;
19 const int out_y_idx = (global_idx / out_w) % out_h;
20 const int c_idx = (global_idx / (out_w * out_h)) % c_out;
21 const int n_idx = global_idx / (out_w * out_h * c_out);
22
23 float accumulator = 0;
24 // For each output idx, find the inputs that contribute to it by checking stride alignment and bounds
25
26 for (int c_in_idx = 0; c_in_idx < c_in; c_in_idx++) {
27 for (int kh = 0; kh < kernel_h; ++kh) {
28 int in_y = out_y_idx - kh;
29 if (in_y < 0 || in_y % stride) continue;
30 in_y /= stride;
31 if (in_y >= in_h) continue;
32
33 for (int kw = 0; kw < kernel_w; ++kw) {
34 int in_x = out_x_idx - kw;
35 if (in_x < 0 || in_x % stride) continue;
36 in_x /= stride;
37 if (in_x >= in_w) continue;
38
39 const int input_idx = (in_w * in_h * c_in) * n_idx + (in_w * in_h) * c_in_idx + (in_w) *in_y + in_x;
40 const int kernel_idx =
41 (kernel_h * kernel_w * c_out) * c_in_idx + (kernel_h * kernel_w) * c_idx + (kernel_w) *kh + kw;
42
43 float input_val = input[input_idx];
44 half kern_val = kernel[kernel_idx];
45
46 accumulator += input_val * (float) kern_val;
47 }
48 }
49 }
50
51 output[(out_w * out_h * c_out) * n_idx + (out_w * out_h) * c_idx + (out_w) *out_y_idx + out_x_idx] = accumulator;
52}
53
54//input is (W, H, C_in, N), Kernel is (W, H, C_out, C_in)
55void ggml_cuda_conv_2d_transpose_p0(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
56 const ggml_tensor * kernel = dst->src[0];
57 const ggml_tensor * input = dst->src[1];
58
59 GGML_ASSERT(kernel->type == GGML_TYPE_F16 && input->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32);
60
61 const float * input_data = (const float *) input->data;
62 float * output_data = (float *) dst->data;
63 const half * kernel_data = (const half *) kernel->data;
64
65 const int input_w = input->ne[0];
66 const int input_h = input->ne[1];
67 const int output_w = dst->ne[0];
68 const int output_h = dst->ne[1];
69 const int channels_in = input->ne[2];
70 const int channels_out = kernel->ne[2];
71 const int kernel_w = kernel->ne[0];
72 const int kernel_h = kernel->ne[1];
73 const int stride = dst->op_params[0];
74 const int batches = input->ne[3];
75
76 GGML_ASSERT(channels_in == kernel->ne[3]);
77 GGML_ASSERT(stride > 0);
78
79 cudaStream_t st = ctx.stream();
80
81 GGML_ASSERT(ggml_is_contiguous(input));
82 GGML_ASSERT(ggml_is_contiguous(kernel));
83 GGML_ASSERT(ggml_is_contiguous(dst));
84
85 const int total = (output_w * output_h * channels_out * batches);
86 const int blocks = (total + CUDA_CONV2D_TRANSPOSE_BLOCK_SIZE - 1) / CUDA_CONV2D_TRANSPOSE_BLOCK_SIZE;
87
88 conv2d_transpose_kernel<<<gridDim: blocks, CUDA_CONV2D_TRANSPOSE_BLOCK_SIZE, sharedMem: 0, stream: st>>>(
89 input: input_data, kernel: kernel_data, output: output_data, in_w: input_w, in_h: input_h, out_w: output_w, out_h: output_h, kernel_w, kernel_h, stride,
90 c_in: channels_in, c_out: channels_out, batches);
91}
92