1#pragma once
2
3#include "ggml.h"
4
5//
6// cache line
7//
8
9#if defined(__cpp_lib_hardware_interference_size)
10#define CACHE_LINE_SIZE std::hardware_destructive_interference_size
11#else
12#if defined(__POWER9_VECTOR__)
13#define CACHE_LINE_SIZE 128
14#elif defined(__VXE__) || defined(__VXE2__)
15#define CACHE_LINE_SIZE 256
16#else
17#define CACHE_LINE_SIZE 64
18#endif
19#endif
20
21static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
22
23// Work buffer size for im2col operations in CONV2D
24#define GGML_IM2COL_WORK_SIZE (16 * 1024 * 1024)
25
26#ifdef __cplusplus
27extern "C" {
28#endif
29
30void ggml_compute_forward_dup(const struct ggml_compute_params * params, struct ggml_tensor * dst);
31void ggml_compute_forward_add(const struct ggml_compute_params * params, struct ggml_tensor * dst);
32void ggml_compute_forward_add_id(const struct ggml_compute_params * params, struct ggml_tensor * dst);
33void ggml_compute_forward_add1(const struct ggml_compute_params * params, struct ggml_tensor * dst);
34void ggml_compute_forward_acc(const struct ggml_compute_params * params, struct ggml_tensor * dst);
35void ggml_compute_forward_sum(const struct ggml_compute_params * params, struct ggml_tensor * dst);
36void ggml_compute_forward_sum_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst);
37void ggml_compute_forward_mean(const struct ggml_compute_params * params, struct ggml_tensor * dst);
38void ggml_compute_forward_argmax(const struct ggml_compute_params * params, struct ggml_tensor * dst);
39void ggml_compute_forward_count_equal(const struct ggml_compute_params * params, struct ggml_tensor * dst);
40void ggml_compute_forward_repeat(const struct ggml_compute_params * params, struct ggml_tensor * dst);
41void ggml_compute_forward_repeat_back(const struct ggml_compute_params * params, struct ggml_tensor * dst);
42void ggml_compute_forward_concat(const struct ggml_compute_params * params, struct ggml_tensor * dst);
43void ggml_compute_forward_silu_back(const struct ggml_compute_params * params, struct ggml_tensor * dst);
44void ggml_compute_forward_norm(const struct ggml_compute_params * params, struct ggml_tensor * dst);
45void ggml_compute_forward_rms_norm(const struct ggml_compute_params * params, struct ggml_tensor * dst);
46void ggml_compute_forward_rms_norm_back(const struct ggml_compute_params * params, struct ggml_tensor * dst);
47void ggml_compute_forward_group_norm(const struct ggml_compute_params * params, struct ggml_tensor * dst);
48void ggml_compute_forward_l2_norm(const struct ggml_compute_params * params, struct ggml_tensor * dst);
49void ggml_compute_forward_out_prod(const struct ggml_compute_params * params, struct ggml_tensor * dst);
50void ggml_compute_forward_scale(const struct ggml_compute_params * params, struct ggml_tensor * dst);
51void ggml_compute_forward_set(const struct ggml_compute_params * params, struct ggml_tensor * dst);
52void ggml_compute_forward_cpy(const struct ggml_compute_params * params, struct ggml_tensor * dst);
53void ggml_compute_forward_cont(const struct ggml_compute_params * params, struct ggml_tensor * dst);
54void ggml_compute_forward_reshape(const struct ggml_compute_params * params, struct ggml_tensor * dst);
55void ggml_compute_forward_view(const struct ggml_compute_params * params, struct ggml_tensor * dst);
56void ggml_compute_forward_permute(const struct ggml_compute_params * params, struct ggml_tensor * dst);
57void ggml_compute_forward_transpose(const struct ggml_compute_params * params, struct ggml_tensor * dst);
58void ggml_compute_forward_get_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst);
59void ggml_compute_forward_get_rows_back(const struct ggml_compute_params * params, struct ggml_tensor * dst);
60void ggml_compute_forward_set_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst);
61void ggml_compute_forward_diag(const struct ggml_compute_params * params, struct ggml_tensor * dst);
62void ggml_compute_forward_diag_mask_inf(const struct ggml_compute_params * params, struct ggml_tensor * dst);
63void ggml_compute_forward_diag_mask_zero(const struct ggml_compute_params * params, struct ggml_tensor * dst);
64void ggml_compute_forward_soft_max(const struct ggml_compute_params * params, struct ggml_tensor * dst);
65void ggml_compute_forward_soft_max_ext_back(const struct ggml_compute_params * params, struct ggml_tensor * dst);
66void ggml_compute_forward_rope(const struct ggml_compute_params * params, struct ggml_tensor * dst);
67void ggml_compute_forward_rope_back(const struct ggml_compute_params * params, struct ggml_tensor * dst);
68void ggml_compute_forward_clamp(const struct ggml_compute_params * params, struct ggml_tensor * dst);
69void ggml_compute_forward_conv_transpose_1d(const struct ggml_compute_params * params, struct ggml_tensor * dst);
70void ggml_compute_forward_im2col(const struct ggml_compute_params * params, struct ggml_tensor * dst);
71void ggml_compute_forward_im2col_back_f32(const struct ggml_compute_params * params, struct ggml_tensor * dst);
72void ggml_compute_forward_im2col_3d(const struct ggml_compute_params * params, struct ggml_tensor * dst);
73void ggml_compute_forward_conv_2d(const struct ggml_compute_params * params, struct ggml_tensor * dst);
74void ggml_compute_forward_conv_3d(const struct ggml_compute_params * params, struct ggml_tensor * dst);
75void ggml_compute_forward_conv_transpose_2d(const struct ggml_compute_params * params, struct ggml_tensor * dst);
76void ggml_compute_forward_conv_2d_dw(const struct ggml_compute_params * params, struct ggml_tensor * dst);
77void ggml_compute_forward_pool_1d(const struct ggml_compute_params * params, struct ggml_tensor * dst);
78void ggml_compute_forward_pool_2d(const struct ggml_compute_params * params, struct ggml_tensor * dst);
79void ggml_compute_forward_pool_2d_back(const struct ggml_compute_params * params, struct ggml_tensor * dst);
80void ggml_compute_forward_upscale(const struct ggml_compute_params * params, struct ggml_tensor * dst);
81void ggml_compute_forward_pad(const struct ggml_compute_params * params, struct ggml_tensor * dst);
82void ggml_compute_forward_pad_reflect_1d(const struct ggml_compute_params * params, struct ggml_tensor * dst);
83void ggml_compute_forward_roll(const struct ggml_compute_params * params, struct ggml_tensor * dst);
84void ggml_compute_forward_arange(const struct ggml_compute_params * params, struct ggml_tensor * dst);
85void ggml_compute_forward_timestep_embedding(const struct ggml_compute_params * params, struct ggml_tensor * dst);
86void ggml_compute_forward_argsort(const struct ggml_compute_params * params, struct ggml_tensor * dst);
87void ggml_compute_forward_leaky_relu(const struct ggml_compute_params * params, struct ggml_tensor * dst);
88void ggml_compute_forward_flash_attn_ext(const struct ggml_compute_params * params, struct ggml_tensor * dst);
89void ggml_compute_forward_flash_attn_back(
90 const struct ggml_compute_params * params,
91 const bool masked,
92 struct ggml_tensor * dst);
93void ggml_compute_forward_ssm_conv(const struct ggml_compute_params * params, struct ggml_tensor * dst);
94void ggml_compute_forward_ssm_scan(const struct ggml_compute_params * params, struct ggml_tensor * dst);
95void ggml_compute_forward_win_part(const struct ggml_compute_params * params, struct ggml_tensor * dst);
96void ggml_compute_forward_win_unpart(const struct ggml_compute_params * params, struct ggml_tensor * dst);
97void ggml_compute_forward_unary(const struct ggml_compute_params * params, struct ggml_tensor * dst);
98void ggml_compute_forward_glu(const struct ggml_compute_params * params, struct ggml_tensor * dst);
99void ggml_compute_forward_get_rel_pos(const struct ggml_compute_params * params, struct ggml_tensor * dst);
100void ggml_compute_forward_add_rel_pos(const struct ggml_compute_params * params, struct ggml_tensor * dst);
101void ggml_compute_forward_rwkv_wkv6(const struct ggml_compute_params * params, struct ggml_tensor * dst);
102void ggml_compute_forward_rwkv_wkv7(const struct ggml_compute_params * params, struct ggml_tensor * dst);
103void ggml_compute_forward_gla(const struct ggml_compute_params * params, struct ggml_tensor * dst);
104void ggml_compute_forward_map_custom1(const struct ggml_compute_params * params, struct ggml_tensor * dst);
105void ggml_compute_forward_map_custom2(const struct ggml_compute_params * params, struct ggml_tensor * dst);
106void ggml_compute_forward_map_custom3(const struct ggml_compute_params * params, struct ggml_tensor * dst);
107void ggml_compute_forward_custom(const struct ggml_compute_params * params, struct ggml_tensor * dst);
108void ggml_compute_forward_cross_entropy_loss(const struct ggml_compute_params * params, struct ggml_tensor * dst);
109void ggml_compute_forward_cross_entropy_loss_back(const struct ggml_compute_params * params, struct ggml_tensor * dst);
110void ggml_compute_forward_opt_step_adamw(const struct ggml_compute_params * params, struct ggml_tensor * dst);
111void ggml_compute_forward_mul_mat(const struct ggml_compute_params * params, struct ggml_tensor * dst);
112void ggml_compute_forward_opt_step_sgd(const struct ggml_compute_params * params, struct ggml_tensor * dst);
113#ifdef __cplusplus
114}
115#endif
116