| 1 | #pragma once |
| 2 | |
| 3 | #include "ggml.h" |
| 4 | |
| 5 | // |
| 6 | // cache line |
| 7 | // |
| 8 | |
| 9 | #if defined(__cpp_lib_hardware_interference_size) |
| 10 | #define CACHE_LINE_SIZE std::hardware_destructive_interference_size |
| 11 | #else |
| 12 | #if defined(__POWER9_VECTOR__) |
| 13 | #define CACHE_LINE_SIZE 128 |
| 14 | #elif defined(__VXE__) || defined(__VXE2__) |
| 15 | #define CACHE_LINE_SIZE 256 |
| 16 | #else |
| 17 | #define CACHE_LINE_SIZE 64 |
| 18 | #endif |
| 19 | #endif |
| 20 | |
| 21 | static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float); |
| 22 | |
| 23 | // Work buffer size for im2col operations in CONV2D |
| 24 | #define GGML_IM2COL_WORK_SIZE (16 * 1024 * 1024) |
| 25 | |
| 26 | #ifdef __cplusplus |
| 27 | extern "C" { |
| 28 | #endif |
| 29 | |
| 30 | void ggml_compute_forward_dup(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 31 | void ggml_compute_forward_add(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 32 | void ggml_compute_forward_add_id(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 33 | void ggml_compute_forward_add1(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 34 | void ggml_compute_forward_acc(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 35 | void ggml_compute_forward_sum(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 36 | void ggml_compute_forward_sum_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 37 | void ggml_compute_forward_mean(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 38 | void ggml_compute_forward_argmax(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 39 | void ggml_compute_forward_count_equal(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 40 | void ggml_compute_forward_repeat(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 41 | void ggml_compute_forward_repeat_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 42 | void ggml_compute_forward_concat(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 43 | void ggml_compute_forward_silu_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 44 | void ggml_compute_forward_norm(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 45 | void ggml_compute_forward_rms_norm(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 46 | void ggml_compute_forward_rms_norm_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 47 | void ggml_compute_forward_group_norm(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 48 | void ggml_compute_forward_l2_norm(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 49 | void ggml_compute_forward_out_prod(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 50 | void ggml_compute_forward_scale(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 51 | void ggml_compute_forward_set(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 52 | void ggml_compute_forward_cpy(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 53 | void ggml_compute_forward_cont(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 54 | void ggml_compute_forward_reshape(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 55 | void ggml_compute_forward_view(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 56 | void ggml_compute_forward_permute(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 57 | void ggml_compute_forward_transpose(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 58 | void ggml_compute_forward_get_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 59 | void ggml_compute_forward_get_rows_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 60 | void ggml_compute_forward_set_rows(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 61 | void ggml_compute_forward_diag(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 62 | void ggml_compute_forward_diag_mask_inf(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 63 | void ggml_compute_forward_diag_mask_zero(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 64 | void ggml_compute_forward_soft_max(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 65 | void ggml_compute_forward_soft_max_ext_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 66 | void ggml_compute_forward_rope(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 67 | void ggml_compute_forward_rope_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 68 | void ggml_compute_forward_clamp(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 69 | void ggml_compute_forward_conv_transpose_1d(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 70 | void ggml_compute_forward_im2col(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 71 | void ggml_compute_forward_im2col_back_f32(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 72 | void ggml_compute_forward_im2col_3d(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 73 | void ggml_compute_forward_conv_2d(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 74 | void ggml_compute_forward_conv_3d(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 75 | void ggml_compute_forward_conv_transpose_2d(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 76 | void ggml_compute_forward_conv_2d_dw(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 77 | void ggml_compute_forward_pool_1d(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 78 | void ggml_compute_forward_pool_2d(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 79 | void ggml_compute_forward_pool_2d_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 80 | void ggml_compute_forward_upscale(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 81 | void ggml_compute_forward_pad(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 82 | void ggml_compute_forward_pad_reflect_1d(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 83 | void ggml_compute_forward_roll(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 84 | void ggml_compute_forward_arange(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 85 | void ggml_compute_forward_timestep_embedding(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 86 | void ggml_compute_forward_argsort(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 87 | void ggml_compute_forward_leaky_relu(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 88 | void ggml_compute_forward_flash_attn_ext(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 89 | void ggml_compute_forward_flash_attn_back( |
| 90 | const struct ggml_compute_params * params, |
| 91 | const bool masked, |
| 92 | struct ggml_tensor * dst); |
| 93 | void ggml_compute_forward_ssm_conv(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 94 | void ggml_compute_forward_ssm_scan(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 95 | void ggml_compute_forward_win_part(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 96 | void ggml_compute_forward_win_unpart(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 97 | void ggml_compute_forward_unary(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 98 | void ggml_compute_forward_glu(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 99 | void ggml_compute_forward_get_rel_pos(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 100 | void ggml_compute_forward_add_rel_pos(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 101 | void ggml_compute_forward_rwkv_wkv6(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 102 | void ggml_compute_forward_rwkv_wkv7(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 103 | void ggml_compute_forward_gla(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 104 | void ggml_compute_forward_map_custom1(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 105 | void ggml_compute_forward_map_custom2(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 106 | void ggml_compute_forward_map_custom3(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 107 | void ggml_compute_forward_custom(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 108 | void ggml_compute_forward_cross_entropy_loss(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 109 | void ggml_compute_forward_cross_entropy_loss_back(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 110 | void ggml_compute_forward_opt_step_adamw(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 111 | void ggml_compute_forward_mul_mat(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 112 | void ggml_compute_forward_opt_step_sgd(const struct ggml_compute_params * params, struct ggml_tensor * dst); |
| 113 | #ifdef __cplusplus |
| 114 | } |
| 115 | #endif |
| 116 | |