1#include "models.h"
2
3#include "../llama-memory-hybrid.h"
4
5
6llm_build_lfm2::llm_build_lfm2(const llama_model & model, const llm_graph_params & params) :
7 llm_graph_context(params),
8 model(model) {
9 ggml_tensor * cur = build_inp_embd(tok_embd: model.tok_embd);
10 cb(cur, name: "model.embed_tokens", il: -1);
11
12 ggml_tensor * inp_pos = build_inp_pos();
13 auto * inp_hybrid = build_inp_mem_hybrid();
14 ggml_tensor * inp_out_ids = build_inp_out_ids();
15
16 for (int il = 0; il < n_layer; ++il) {
17 const bool is_moe_layer = il >= static_cast<int>(hparams.n_layer_dense_lead);
18
19 auto * prev_cur = cur;
20 cur = build_norm(cur, mw: model.layers[il].attn_norm, NULL, type: LLM_NORM_RMS, il);
21 cb(cur, name: "model.layers.{}.operator_norm", il);
22
23 cur = hparams.is_recurrent(il) ? build_shortconv_block(cur, inp_recr: inp_hybrid->get_recr(), il) :
24 build_attn_block(cur, inp_pos, inp_attn: inp_hybrid->get_attn(), il);
25
26 if (il == n_layer - 1 && inp_out_ids) {
27 cur = ggml_get_rows(ctx: ctx0, a: cur, b: inp_out_ids);
28 prev_cur = ggml_get_rows(ctx: ctx0, a: prev_cur, b: inp_out_ids);
29 }
30
31 cur = ggml_add(ctx: ctx0, a: prev_cur, b: cur);
32
33 auto * ffn_norm_out = build_norm(cur, mw: model.layers[il].ffn_norm, NULL, type: LLM_NORM_RMS, il);
34 cb(cur: ffn_norm_out, name: "model.layers.{}.ffn_norm", il);
35
36 ggml_tensor * ffn_out =
37 is_moe_layer ? build_moe_feed_forward(cur: ffn_norm_out, il) : build_dense_feed_forward(cur: ffn_norm_out, il);
38 cb(cur: ffn_norm_out, name: "model.layers.{}.ffn_out", il);
39
40 cur = ggml_add(ctx: ctx0, a: cur, b: ffn_out);
41 }
42
43 cur = build_norm(cur, mw: model.tok_norm, NULL, type: LLM_NORM_RMS, il: -1);
44 cb(cur, name: "model.embedding_norm", il: -1);
45 res->t_embd = cur;
46
47 cur = build_lora_mm(w: model.output, cur);
48 cb(cur, name: "lm_head", il: -1);
49
50 res->t_logits = cur;
51
52 ggml_build_forward_expand(cgraph: gf, tensor: cur);
53}
54
55ggml_tensor * llm_build_lfm2::build_moe_feed_forward(ggml_tensor * cur, int il) const {
56 return build_moe_ffn(cur,
57 gate_inp: model.layers[il].ffn_gate_inp, up_exps: model.layers[il].ffn_up_exps,
58 gate_exps: model.layers[il].ffn_gate_exps, down_exps: model.layers[il].ffn_down_exps,
59 exp_probs_b: model.layers[il].ffn_exp_probs_b, n_expert, n_expert_used, type_op: LLM_FFN_SILU, norm_w: true, scale_w: false, w_scale: 0.0,
60 gating_op: static_cast<llama_expert_gating_func_type>(hparams.expert_gating_func), il);
61}
62
63ggml_tensor * llm_build_lfm2::build_dense_feed_forward(ggml_tensor * cur, int il) const {
64 GGML_ASSERT(!model.layers[il].ffn_up_b);
65 GGML_ASSERT(!model.layers[il].ffn_gate_b);
66 GGML_ASSERT(!model.layers[il].ffn_down_b);
67 return build_ffn(cur,
68 up: model.layers[il].ffn_up, NULL, NULL,
69 gate: model.layers[il].ffn_gate, NULL, NULL,
70 down: model.layers[il].ffn_down, NULL, NULL,
71 NULL, type_op: LLM_FFN_SILU, type_gate: LLM_FFN_PAR, il);
72}
73
74ggml_tensor * llm_build_lfm2::build_attn_block(ggml_tensor * cur,
75 ggml_tensor * inp_pos,
76 llm_graph_input_attn_kv * inp_attn,
77 int il) const {
78 GGML_ASSERT(hparams.n_embd_v_gqa(il) == hparams.n_embd_k_gqa(il));
79 const auto n_embd_head = hparams.n_embd_head_v;
80 const auto n_head_kv = hparams.n_head_kv(il);
81
82 auto * q = build_lora_mm(w: model.layers[il].wq, cur);
83 cb(cur: q, name: "model.layers.{}.self_attn.q_proj", il);
84 auto * k = build_lora_mm(w: model.layers[il].wk, cur);
85 cb(cur: k, name: "model.layers.{}.self_attn.k_proj", il);
86 auto * v = build_lora_mm(w: model.layers[il].wv, cur);
87 cb(cur: v, name: "model.layers.{}.self_attn.v_proj", il);
88
89 q = ggml_reshape_3d(ctx: ctx0, a: q, ne0: n_embd_head, ne1: n_head, ne2: n_tokens);
90 k = ggml_reshape_3d(ctx: ctx0, a: k, ne0: n_embd_head, ne1: n_head_kv, ne2: n_tokens);
91 v = ggml_reshape_3d(ctx: ctx0, a: v, ne0: n_embd_head, ne1: n_head_kv, ne2: n_tokens);
92
93 // qk norm
94 q = build_norm(cur: q, mw: model.layers[il].attn_q_norm, NULL, type: LLM_NORM_RMS, il);
95 cb(cur: q, name: "model.layers.{}.self_attn.q_layernorm", il);
96 k = build_norm(cur: k, mw: model.layers[il].attn_k_norm, NULL, type: LLM_NORM_RMS, il);
97 cb(cur: k, name: "model.layers.{}.self_attn.k_layernorm", il);
98
99 // RoPE
100 q = ggml_rope_ext(ctx: ctx0, a: q, b: inp_pos, c: nullptr, n_dims: n_rot, mode: rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor,
101 attn_factor, beta_fast, beta_slow);
102 k = ggml_rope_ext(ctx: ctx0, a: k, b: inp_pos, c: nullptr, n_dims: n_rot, mode: rope_type, n_ctx_orig, freq_base, freq_scale, ext_factor,
103 attn_factor, beta_fast, beta_slow);
104
105 cur = build_attn(inp: inp_attn,
106 wo: model.layers[il].wo, NULL,
107 q_cur: q, k_cur: k, v_cur: v, kq_b: nullptr, sinks: nullptr, v_mla: nullptr, kq_scale: 1.0f / sqrtf(x: float(n_embd_head)), il);
108
109 cb(cur, name: "model.layers.{}.self_attn.out_proj", il);
110
111 return cur;
112}
113
114ggml_tensor * llm_build_lfm2::build_shortconv_block(ggml_tensor * cur, llm_graph_input_rs * inp_recr, int il) {
115 const auto * mctx_cur = static_cast<const llama_memory_hybrid_context *>(mctx)->get_recr();
116 const uint32_t kv_head = mctx_cur->get_head();
117 const int64_t n_seq_tokens = ubatch.n_seq_tokens;
118 const int64_t n_seqs = ubatch.n_seqs;
119 GGML_ASSERT(n_seqs != 0);
120 GGML_ASSERT(ubatch.equal_seqs());
121 GGML_ASSERT(ubatch.n_tokens == n_seq_tokens * n_seqs);
122
123 GGML_ASSERT(hparams.n_shortconv_l_cache > 1);
124 const uint32_t d_conv = hparams.n_shortconv_l_cache - 1;
125
126 // {n_embd, n_tokens} => {n_embd, n_seq_tokens, n_seqs}
127 cur = ggml_reshape_3d(ctx: ctx0, a: cur, ne0: cur->ne[0], ne1: n_seq_tokens, ne2: n_seqs);
128
129 auto * bcx = build_lora_mm(w: model.layers[il].shortconv.in_proj, cur);
130 cb(cur: bcx, name: "model.layers.{}.conv.in_proj", il);
131
132 constexpr auto n_chunks = 3;
133 GGML_ASSERT(bcx->ne[0] % n_chunks == 0);
134 const auto chunk_size = bcx->ne[0] / n_chunks;
135 auto * b = ggml_view_3d(ctx: ctx0, a: bcx, ne0: chunk_size, ne1: bcx->ne[1], ne2: bcx->ne[2], nb1: bcx->nb[1], nb2: bcx->nb[2],
136 offset: 0 * chunk_size * ggml_element_size(tensor: bcx));
137 auto * c = ggml_view_3d(ctx: ctx0, a: bcx, ne0: chunk_size, ne1: bcx->ne[1], ne2: bcx->ne[2], nb1: bcx->nb[1], nb2: bcx->nb[2],
138 offset: 1 * chunk_size * ggml_element_size(tensor: bcx));
139 auto * x = ggml_view_3d(ctx: ctx0, a: bcx, ne0: chunk_size, ne1: bcx->ne[1], ne2: bcx->ne[2], nb1: bcx->nb[1], nb2: bcx->nb[2],
140 offset: 2 * chunk_size * ggml_element_size(tensor: bcx));
141
142 auto * bx = ggml_transpose(ctx: ctx0, a: ggml_mul(ctx: ctx0, a: b, b: x));
143
144 // read conv state
145 auto * conv_state = mctx_cur->get_r_l(il);
146 auto * conv_rs = build_rs(inp: inp_recr, s: conv_state, state_size: hparams.n_embd_r(), n_seqs);
147 auto * conv = ggml_reshape_3d(ctx: ctx0, a: conv_rs, ne0: d_conv, ne1: hparams.n_embd, ne2: n_seqs);
148
149 bx = ggml_concat(ctx: ctx0, a: conv, b: bx, dim: 0);
150 GGML_ASSERT(bx->ne[0] > conv->ne[0]);
151
152 // last d_conv columns is a new conv state
153 auto * new_conv = ggml_view_3d(ctx: ctx0, a: bx, ne0: conv->ne[0], ne1: bx->ne[1], ne2: bx->ne[2], nb1: bx->nb[1], nb2: bx->nb[2],
154 offset: (bx->ne[0] - conv->ne[0]) * ggml_element_size(tensor: bx));
155 GGML_ASSERT(ggml_are_same_shape(conv, new_conv));
156
157 // write new conv conv state
158 ggml_build_forward_expand(cgraph: gf, tensor: ggml_cpy(ctx: ctx0, a: new_conv,
159 b: ggml_view_1d(ctx: ctx0, a: conv_state, ne0: ggml_nelements(tensor: new_conv),
160 offset: kv_head * d_conv * n_embd * ggml_element_size(tensor: new_conv))));
161
162 auto * conv_kernel = model.layers[il].shortconv.conv;
163 auto * conv_out = ggml_ssm_conv(ctx: ctx0, sx: bx, c: conv_kernel);
164 cb(cur: conv_out, name: "model.layers.{}.conv.conv", il);
165
166 auto * y = ggml_mul(ctx: ctx0, a: c, b: conv_out);
167 y = build_lora_mm(w: model.layers[il].shortconv.out_proj, cur: y);
168 cb(cur: y, name: "model.layers.{}.conv.out_proj", il);
169 // {n_embd, n_seq_tokens, n_seqs} => {n_embd, n_tokens}
170 y = ggml_reshape_2d(ctx: ctx0, a: y, ne0: y->ne[0], ne1: n_seq_tokens * n_seqs);
171
172 return y;
173}
174