1#include "ggml-impl.h"
2#include "opt-step-sgd.cuh"
3
4#include <cstdint>
5
6static __global__ void opt_step_sgd_f32(
7 float * __restrict__ x, const float * __restrict__ g,
8 const float * __restrict__ pars, const int64_t k) {
9
10 const int64_t i = (int64_t) blockIdx.x*blockDim.x + threadIdx.x;
11
12 if (i >= k) {
13 return;
14 }
15 x[i] = x[i] * (1.0f - pars[0] * pars[1]) - pars[0] * g[i];
16}
17
18static void opt_step_sgd_f32_cuda(
19 float * x, const float * g, const float * __restrict__ pars, const int64_t k, cudaStream_t stream) {
20
21 const dim3 block_dims(CUDA_OPT_STEP_SGD_BLOCK_SIZE, 1, 1);
22 const dim3 block_nums((k + CUDA_OPT_STEP_SGD_BLOCK_SIZE - 1) / CUDA_OPT_STEP_SGD_BLOCK_SIZE, 1, 1);
23 opt_step_sgd_f32<<<gridDim: block_nums, blockDim: block_dims, sharedMem: 0, stream>>>(x, g, pars, k);
24}
25
26void ggml_cuda_opt_step_sgd(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
27 const ggml_tensor * src0 = dst->src[0];
28 const ggml_tensor * src0_grad = dst->src[1];
29 const ggml_tensor * params = dst->src[2];
30
31 GGML_ASSERT(src0->type == GGML_TYPE_F32);
32 GGML_ASSERT(src0_grad->type == GGML_TYPE_F32);
33 GGML_ASSERT(params->type == GGML_TYPE_F32);
34 GGML_ASSERT(ggml_is_contiguous(src0));
35 GGML_ASSERT(ggml_is_contiguous(src0_grad));
36 GGML_ASSERT(ggml_is_contiguous(params));
37 GGML_ASSERT(ggml_are_same_shape(src0, src0_grad));
38 GGML_ASSERT(ggml_nelements(params) == 2);
39
40 float * src0_d = (float *) src0->data;
41 const float * src0_grad_d = (const float *) src0_grad->data;
42 const float * params_d = (const float *) params->data;
43
44 cudaStream_t stream = ctx.stream();
45
46 const int64_t ne = ggml_nelements(src0);
47
48 opt_step_sgd_f32_cuda(x: src0_d, g: src0_grad_d, pars: params_d, k: ne, stream);
49}
50