| 1 | #include "getrows.cuh" |
| 2 | #include "dequantize.cuh" |
| 3 | #include "convert.cuh" |
| 4 | |
| 5 | template<int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t> |
| 6 | static __global__ void k_get_rows( |
| 7 | const void * __restrict__ src0, const int32_t * __restrict__ src1, dst_t * __restrict__ dst, |
| 8 | const int64_t ne00, /*const int64_t ne01, const int64_t ne02, const int64_t ne03,*/ |
| 9 | /*const int64_t ne10,*/ const int64_t ne11, const int64_t ne12, /*const int64_t ne13,*/ |
| 10 | /*const size_t s0,*/ const size_t s1, const size_t s2, const size_t s3, |
| 11 | /*const size_t nb00,*/ const size_t nb01, const size_t nb02, const size_t nb03, |
| 12 | const size_t s10, const size_t s11, const size_t s12/*, const size_t s13*/) { |
| 13 | |
| 14 | for (int64_t z = blockIdx.z; z < ne11*ne12; z += gridDim.z) { |
| 15 | for (int64_t i00 = 2*(blockIdx.y*blockDim.x + threadIdx.x); i00 < ne00; i00 += gridDim.y*blockDim.x) { |
| 16 | // The x and y dimensions of the grid are swapped because the maximum allowed grid size for x is higher. |
| 17 | const int i10 = blockIdx.x; |
| 18 | const int i11 = z / ne12; // TODO fastdiv |
| 19 | const int i12 = z % ne12; |
| 20 | |
| 21 | const int i01 = src1[i10*s10 + i11*s11 + i12*s12]; |
| 22 | |
| 23 | dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3; |
| 24 | const void * src0_row = (const char *) src0 + i01*nb01 + i11*nb02 + i12*nb03; |
| 25 | |
| 26 | const int ib = i00/qk; // block index |
| 27 | const int iqs = (i00%qk)/qr; // quant index |
| 28 | const int iybs = i00 - i00%qk; // dst block start index |
| 29 | const int y_offset = qr == 1 ? 1 : qk/2; |
| 30 | |
| 31 | // dequantize |
| 32 | float2 v; |
| 33 | dequantize_kernel(src0_row, ib, iqs, v); |
| 34 | |
| 35 | dst_row[iybs + iqs + 0] = ggml_cuda_cast<dst_t>(v.x); |
| 36 | dst_row[iybs + iqs + y_offset] = ggml_cuda_cast<dst_t>(v.y); |
| 37 | } |
| 38 | } |
| 39 | } |
| 40 | |
| 41 | template<typename src0_t, typename dst_t> |
| 42 | static __global__ void k_get_rows_float( |
| 43 | const src0_t * __restrict__ src0, const int32_t * __restrict__ src1, dst_t * __restrict__ dst, |
| 44 | const int64_t ne00, /*const int64_t ne01, const int64_t ne02, const int64_t ne03,*/ |
| 45 | /*const int64_t ne10,*/ const int64_t ne11, const int64_t ne12, /*const int64_t ne13,*/ |
| 46 | /*const size_t s0,*/ const size_t s1, const size_t s2, const size_t s3, |
| 47 | /*const size_t nb00,*/ const size_t nb01, const size_t nb02, const size_t nb03, |
| 48 | const size_t s10, const size_t s11, const size_t s12/*, const size_t s13*/) { |
| 49 | |
| 50 | for (int64_t z = blockIdx.z; z < ne11*ne12; z += gridDim.z) { |
| 51 | for (int64_t i00 = blockIdx.y*blockDim.x + threadIdx.x; i00 < ne00; i00 += gridDim.y*blockDim.x) { |
| 52 | // The x and y dimensions of the grid are swapped because the maximum allowed grid size for x is higher. |
| 53 | const int i10 = blockIdx.x; |
| 54 | const int i11 = z / ne12; // TODO fastdiv |
| 55 | const int i12 = z % ne12; |
| 56 | |
| 57 | if (i00 >= ne00) { |
| 58 | return; |
| 59 | } |
| 60 | |
| 61 | const int i01 = src1[i10*s10 + i11*s11 + i12*s12]; |
| 62 | |
| 63 | dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3; |
| 64 | const src0_t * src0_row = (const src0_t *)((const char *) src0 + i01*nb01 + i11*nb02 + i12*nb03); |
| 65 | |
| 66 | dst_row[i00] = ggml_cuda_cast<dst_t>(src0_row[i00]); |
| 67 | } |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | template<typename grad_t, typename dst_t> |
| 72 | static __global__ void k_get_rows_back_float( |
| 73 | const grad_t * __restrict__ grad, const int32_t * __restrict__ rows, dst_t * __restrict__ dst, const int64_t ncols, const int64_t nrows_grad) { |
| 74 | const int col = blockIdx.x*blockDim.x + threadIdx.x; |
| 75 | |
| 76 | if (col >= ncols) { |
| 77 | return; |
| 78 | } |
| 79 | |
| 80 | const int dst_row = blockIdx.y*blockDim.y + threadIdx.y; |
| 81 | |
| 82 | float sum = 0.0f; |
| 83 | |
| 84 | for (int64_t i = 0; i < nrows_grad; ++i) { |
| 85 | if (rows[i] != dst_row) { |
| 86 | continue; |
| 87 | } |
| 88 | sum += grad[i*ncols + col]; |
| 89 | } |
| 90 | |
| 91 | dst[dst_row*ncols + col] = sum; |
| 92 | } |
| 93 | |
| 94 | template<int qk, int qr, dequantize_kernel_t dq, typename dst_t> |
| 95 | static void get_rows_cuda_q( |
| 96 | const void * src0_d, const int32_t * src1_d, dst_t * dst_d, |
| 97 | const int64_t ne00, const size_t nb01, const size_t nb02, const size_t nb03, |
| 98 | const int64_t ne10, const int64_t ne11, const int64_t ne12, const size_t nb10, const size_t nb11, const size_t nb12, |
| 99 | const size_t nb1, const size_t nb2, const size_t nb3, |
| 100 | cudaStream_t stream) { |
| 101 | const dim3 block_dims(CUDA_GET_ROWS_BLOCK_SIZE, 1, 1); |
| 102 | const int block_num_y = (ne00 + 2*CUDA_GET_ROWS_BLOCK_SIZE - 1) / (2*CUDA_GET_ROWS_BLOCK_SIZE); |
| 103 | const dim3 block_nums(ne10, MIN(block_num_y, UINT16_MAX), MIN(ne11*ne12, UINT16_MAX)); |
| 104 | |
| 105 | // strides in elements |
| 106 | // const size_t s0 = nb0 / sizeof(dst_t); |
| 107 | const size_t s1 = nb1 / sizeof(dst_t); |
| 108 | const size_t s2 = nb2 / sizeof(dst_t); |
| 109 | const size_t s3 = nb3 / sizeof(dst_t); |
| 110 | |
| 111 | const size_t s10 = nb10 / sizeof(int32_t); |
| 112 | const size_t s11 = nb11 / sizeof(int32_t); |
| 113 | const size_t s12 = nb12 / sizeof(int32_t); |
| 114 | // const size_t s13 = nb13 / sizeof(int32_t); |
| 115 | |
| 116 | GGML_ASSERT(ne00 % 2 == 0); |
| 117 | |
| 118 | k_get_rows<qk, qr, dq><<<gridDim: block_nums, blockDim: block_dims, sharedMem: 0, stream>>>( |
| 119 | src0_d, src1_d, dst_d, |
| 120 | ne00, /*ne01, ne02, ne03,*/ |
| 121 | /*ne10,*/ ne11, ne12, /*ne13,*/ |
| 122 | /* s0,*/ s1, s2, s3, |
| 123 | /* nb00,*/ nb01, nb02, nb03, |
| 124 | s10, s11, s12/*, s13*/); |
| 125 | } |
| 126 | |
| 127 | template<typename src0_t, typename dst_t> |
| 128 | static void get_rows_cuda_float( |
| 129 | const src0_t * src0_d, const int32_t * src1_d, dst_t * dst_d, |
| 130 | const int64_t ne00, const size_t nb01, const size_t nb02, const size_t nb03, |
| 131 | const int64_t ne10, const int64_t ne11, const int64_t ne12, const size_t nb10, const size_t nb11, const size_t nb12, |
| 132 | const size_t nb1, const size_t nb2, const size_t nb3, |
| 133 | cudaStream_t stream) { |
| 134 | const dim3 block_dims(CUDA_GET_ROWS_BLOCK_SIZE, 1, 1); |
| 135 | const int block_num_y = (ne00 + CUDA_GET_ROWS_BLOCK_SIZE - 1) / CUDA_GET_ROWS_BLOCK_SIZE; |
| 136 | const dim3 block_nums(ne10, MIN(block_num_y, UINT16_MAX), MIN(ne11*ne12, UINT16_MAX)); |
| 137 | |
| 138 | // strides in elements |
| 139 | // const size_t s0 = nb0 / sizeof(dst_t); |
| 140 | const size_t s1 = nb1 / sizeof(dst_t); |
| 141 | const size_t s2 = nb2 / sizeof(dst_t); |
| 142 | const size_t s3 = nb3 / sizeof(dst_t); |
| 143 | |
| 144 | const size_t s10 = nb10 / sizeof(int32_t); |
| 145 | const size_t s11 = nb11 / sizeof(int32_t); |
| 146 | const size_t s12 = nb12 / sizeof(int32_t); |
| 147 | // const size_t s13 = nb13 / sizeof(int32_t); |
| 148 | |
| 149 | k_get_rows_float<<<gridDim: block_nums, blockDim: block_dims, sharedMem: 0, stream>>>( |
| 150 | src0_d, src1_d, dst_d, |
| 151 | ne00, /*ne01, ne02, ne03,*/ |
| 152 | /*ne10,*/ ne11, ne12, /*ne13,*/ |
| 153 | /* s0,*/ s1, s2, s3, |
| 154 | /* nb00,*/ nb01, nb02, nb03, |
| 155 | s10, s11, s12/*, s13*/); |
| 156 | } |
| 157 | |
| 158 | template <typename dst_t> |
| 159 | static void ggml_cuda_get_rows_switch_src0_type( |
| 160 | const void * src0_d, const ggml_type src0_type, const int32_t * src1_d, dst_t * dst_d, |
| 161 | const int64_t ne00, const size_t nb01, const size_t nb02, const size_t nb03, |
| 162 | const int64_t ne10, const int64_t ne11, const int64_t ne12, const size_t nb10, const size_t nb11, const size_t nb12, |
| 163 | const size_t nb1, const size_t nb2, const size_t nb3, |
| 164 | cudaStream_t stream) { |
| 165 | switch (src0_type) { |
| 166 | case GGML_TYPE_F16: |
| 167 | get_rows_cuda_float((const half *) src0_d, src1_d, dst_d, |
| 168 | ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); |
| 169 | break; |
| 170 | case GGML_TYPE_F32: |
| 171 | get_rows_cuda_float((const float *) src0_d, src1_d, dst_d, |
| 172 | ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); |
| 173 | break; |
| 174 | case GGML_TYPE_I32: |
| 175 | get_rows_cuda_float((const int32_t *) src0_d, src1_d, dst_d, |
| 176 | ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); |
| 177 | break; |
| 178 | case GGML_TYPE_BF16: |
| 179 | get_rows_cuda_float((const nv_bfloat16 *) src0_d, src1_d, dst_d, |
| 180 | ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); |
| 181 | break; |
| 182 | case GGML_TYPE_Q4_0: |
| 183 | get_rows_cuda_q<QK4_0, QR4_0, dequantize_q4_0>(src0_d, src1_d, dst_d, |
| 184 | ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); |
| 185 | break; |
| 186 | case GGML_TYPE_Q4_1: |
| 187 | get_rows_cuda_q<QK4_1, QR4_1, dequantize_q4_1>(src0_d, src1_d, dst_d, |
| 188 | ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); |
| 189 | break; |
| 190 | case GGML_TYPE_Q5_0: |
| 191 | get_rows_cuda_q<QK5_0, QR5_0, dequantize_q5_0>(src0_d, src1_d, dst_d, |
| 192 | ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); |
| 193 | break; |
| 194 | case GGML_TYPE_Q5_1: |
| 195 | get_rows_cuda_q<QK5_1, QR5_1, dequantize_q5_1>(src0_d, src1_d, dst_d, |
| 196 | ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); |
| 197 | break; |
| 198 | case GGML_TYPE_Q8_0: |
| 199 | get_rows_cuda_q<QK8_0, QR8_0, dequantize_q8_0>(src0_d, src1_d, dst_d, |
| 200 | ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); |
| 201 | break; |
| 202 | default: |
| 203 | // TODO: k-quants |
| 204 | GGML_ABORT("%s: unsupported src0 type: %s\n" , __func__, ggml_type_name(src0_type)); |
| 205 | break; |
| 206 | } |
| 207 | } |
| 208 | |
| 209 | void get_rows_cuda( |
| 210 | const void * src0_d, ggml_type src0_type, const int32_t * src1_d, void * dst_d, ggml_type dst_type, |
| 211 | int64_t ne00, size_t nb01, size_t nb02, size_t nb03, |
| 212 | int64_t ne10, int64_t ne11, int64_t ne12, size_t nb10, size_t nb11, size_t nb12, |
| 213 | size_t nb1, size_t nb2, size_t nb3, |
| 214 | cudaStream_t stream) { |
| 215 | switch (dst_type) { |
| 216 | case GGML_TYPE_F32: |
| 217 | ggml_cuda_get_rows_switch_src0_type(src0_d, src0_type, src1_d, (float *) dst_d, |
| 218 | ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); |
| 219 | break; |
| 220 | case GGML_TYPE_I32: |
| 221 | ggml_cuda_get_rows_switch_src0_type(src0_d, src0_type, src1_d, (int32_t *) dst_d, |
| 222 | ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); |
| 223 | break; |
| 224 | case GGML_TYPE_F16: |
| 225 | ggml_cuda_get_rows_switch_src0_type(src0_d, src0_type, src1_d, (half *) dst_d, |
| 226 | ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); |
| 227 | break; |
| 228 | case GGML_TYPE_BF16: |
| 229 | ggml_cuda_get_rows_switch_src0_type(src0_d, src0_type, src1_d, (nv_bfloat16 *) dst_d, |
| 230 | ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); |
| 231 | break; |
| 232 | default: |
| 233 | GGML_ABORT("%s: unsupported dst type: %s\n" , __func__, ggml_type_name(dst_type)); |
| 234 | break; |
| 235 | } |
| 236 | } |
| 237 | |
| 238 | void ggml_cuda_op_get_rows(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { |
| 239 | const ggml_tensor * src0 = dst->src[0]; |
| 240 | const ggml_tensor * src1 = dst->src[1]; |
| 241 | |
| 242 | cudaStream_t stream = ctx.stream(); |
| 243 | |
| 244 | GGML_TENSOR_BINARY_OP_LOCALS |
| 245 | |
| 246 | GGML_ASSERT(src1->type == GGML_TYPE_I32); |
| 247 | GGML_ASSERT(ne13 == 1); |
| 248 | |
| 249 | GGML_ASSERT(src0->nb[0] == ggml_type_size(src0->type)); |
| 250 | GGML_ASSERT(src1->nb[0] == ggml_type_size(src1->type)); |
| 251 | GGML_ASSERT(dst->nb[0] == ggml_type_size(dst->type)); |
| 252 | |
| 253 | get_rows_cuda(src0->data, src0->type, (const int32_t *) src1->data, dst->data, dst->type, |
| 254 | ne00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb1, nb2, nb3, stream); |
| 255 | } |
| 256 | |
| 257 | void ggml_cuda_op_get_rows_back(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { |
| 258 | const ggml_tensor * src0 = dst->src[0]; // gradients of forward pass output |
| 259 | const ggml_tensor * src1 = dst->src[1]; // src1 in forward pass |
| 260 | |
| 261 | GGML_TENSOR_BINARY_OP_LOCALS |
| 262 | |
| 263 | const float * src0_d = (const float *) src0->data; |
| 264 | const int32_t * src1_d = (const int32_t *) src1->data; |
| 265 | float * dst_d = (float *) dst->data; |
| 266 | |
| 267 | cudaStream_t stream = ctx.stream(); |
| 268 | |
| 269 | GGML_ASSERT(src0->type == GGML_TYPE_F32); |
| 270 | GGML_ASSERT(src1->type == GGML_TYPE_I32); |
| 271 | GGML_ASSERT(dst->type == GGML_TYPE_F32); |
| 272 | |
| 273 | GGML_ASSERT(ggml_is_contiguous(src0)); |
| 274 | GGML_ASSERT(ggml_is_contiguous(src1)); |
| 275 | GGML_ASSERT(ggml_is_contiguous(dst)); |
| 276 | |
| 277 | GGML_ASSERT(ne02*ne03 == 1); |
| 278 | GGML_ASSERT(ne12*ne13 == 1); |
| 279 | GGML_ASSERT(ne2*ne3 == 1); |
| 280 | |
| 281 | const dim3 block_dims(CUDA_GET_ROWS_BACK_BLOCK_SIZE, 1, 1); |
| 282 | const int block_num_x = (ne00 + CUDA_GET_ROWS_BACK_BLOCK_SIZE - 1) / CUDA_GET_ROWS_BACK_BLOCK_SIZE; |
| 283 | const dim3 block_nums(block_num_x, ne1, 1); |
| 284 | |
| 285 | k_get_rows_back_float<<<block_nums, block_dims, 0, stream>>>(src0_d, src1_d, dst_d, ne00, ne10); |
| 286 | } |
| 287 | |