1#include "models.h"
2
3
4llm_build_granite_hybrid::llm_build_granite_hybrid(const llama_model & model, const llm_graph_params & params) :
5 llm_graph_context_mamba(params) {
6 const int64_t n_embd_head = hparams.n_embd_head_v;
7 GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
8
9 ggml_tensor * cur;
10 ggml_tensor * inpL;
11
12 inpL = build_inp_embd(tok_embd: model.tok_embd);
13
14 auto * inp = build_inp_mem_hybrid();
15
16 ggml_tensor * inp_out_ids = build_inp_out_ids();
17
18 // Positional embeddings populated if rope enabled
19 ggml_tensor * inp_pos = nullptr;
20 if (hparams.rope_finetuned) {
21 inp_pos = build_inp_pos();
22 }
23
24 for (int il = 0; il < n_layer; ++il) {
25 struct ggml_tensor * inpSA = inpL;
26
27 // norm
28 cur = build_norm(cur: inpL, mw: model.layers[il].attn_norm, NULL, type: LLM_NORM_RMS, il);
29 cb(cur, name: "attn_norm", il);
30
31 if (hparams.is_recurrent(il)) {
32 // ssm layer //
33 cur = build_mamba2_layer(inp: inp->get_recr(), cur, model, ubatch, il);
34 } else {
35 // attention layer //
36 cur = build_attention_layer(cur, inp_pos, inp_attn: inp->get_attn(), model, n_embd_head, il);
37 }
38
39 if (il == n_layer - 1 && inp_out_ids) {
40 cur = ggml_get_rows(ctx: ctx0, a: cur, b: inp_out_ids);
41 inpSA = ggml_get_rows(ctx: ctx0, a: inpSA, b: inp_out_ids);
42 }
43
44 // ffn
45 cur = build_layer_ffn(cur, inpSA, model, il);
46
47 // input for next layer
48 inpL = cur;
49 }
50
51 cur = inpL;
52
53 cur = build_norm(cur, mw: model.output_norm, NULL, type: LLM_NORM_RMS, il: -1);
54
55 cb(cur, name: "result_norm", il: -1);
56 res->t_embd = cur;
57
58 // lm_head
59 cur = build_lora_mm(w: model.output, cur);
60
61 // For Granite architectures - scale logits
62 if (hparams.f_logit_scale) {
63 cur = ggml_scale(ctx: ctx0, a: cur, s: 1.0f / hparams.f_logit_scale);
64 }
65 cb(cur, name: "result_output", il: -1);
66 res->t_logits = cur;
67
68 ggml_build_forward_expand(cgraph: gf, tensor: cur);
69}
70
71ggml_tensor * llm_build_granite_hybrid::build_attention_layer(ggml_tensor * cur,
72 ggml_tensor * inp_pos,
73 llm_graph_input_attn_kv * inp_attn,
74 const llama_model & model,
75 const int64_t n_embd_head,
76 const int il) {
77 // compute Q and K and (optionally) RoPE them
78 ggml_tensor * Qcur = build_lora_mm(w: model.layers[il].wq, cur);
79 cb(cur: Qcur, name: "Qcur", il);
80 if (model.layers[il].bq) {
81 Qcur = ggml_add(ctx: ctx0, a: Qcur, b: model.layers[il].bq);
82 cb(cur: Qcur, name: "Qcur", il);
83 }
84
85 ggml_tensor * Kcur = build_lora_mm(w: model.layers[il].wk, cur);
86 cb(cur: Kcur, name: "Kcur", il);
87 if (model.layers[il].bk) {
88 Kcur = ggml_add(ctx: ctx0, a: Kcur, b: model.layers[il].bk);
89 cb(cur: Kcur, name: "Kcur", il);
90 }
91
92 ggml_tensor * Vcur = build_lora_mm(w: model.layers[il].wv, cur);
93 cb(cur: Vcur, name: "Vcur", il);
94 if (model.layers[il].bv) {
95 Vcur = ggml_add(ctx: ctx0, a: Vcur, b: model.layers[il].bv);
96 cb(cur: Vcur, name: "Vcur", il);
97 }
98
99 Qcur = ggml_reshape_3d(ctx: ctx0, a: Qcur, ne0: n_embd_head, ne1: hparams.n_head(il), ne2: n_tokens);
100 Kcur = ggml_reshape_3d(ctx: ctx0, a: Kcur, ne0: n_embd_head, ne1: hparams.n_head_kv(il), ne2: n_tokens);
101 Vcur = ggml_reshape_3d(ctx: ctx0, a: Vcur, ne0: n_embd_head, ne1: hparams.n_head_kv(il), ne2: n_tokens);
102
103 const bool use_rope = hparams.rope_finetuned;
104 if (use_rope) {
105 ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
106 Qcur = ggml_rope_ext(ctx: ctx0, a: Qcur, b: inp_pos, c: rope_factors, n_dims: n_rot, mode: rope_type, n_ctx_orig, freq_base, freq_scale,
107 ext_factor, attn_factor, beta_fast, beta_slow);
108
109 Kcur = ggml_rope_ext(ctx: ctx0, a: Kcur, b: inp_pos, c: rope_factors, n_dims: n_rot, mode: rope_type, n_ctx_orig, freq_base, freq_scale,
110 ext_factor, attn_factor, beta_fast, beta_slow);
111 }
112
113 cb(cur: Qcur, name: "Qcur", il);
114 cb(cur: Kcur, name: "Kcur", il);
115 cb(cur: Vcur, name: "Vcur", il);
116
117 const float kq_scale =
118 hparams.f_attention_scale == 0.0f ? 1.0f / sqrtf(x: float(n_embd_head)) : hparams.f_attention_scale;
119 cur = build_attn(inp: inp_attn,
120 wo: model.layers[il].wo, wo_b: model.layers[il].bo,
121 q_cur: Qcur, k_cur: Kcur, v_cur: Vcur, kq_b: nullptr, sinks: nullptr, v_mla: nullptr, kq_scale, il);
122 cb(cur, name: "attn_out", il);
123 return cur;
124}
125
126ggml_tensor * llm_build_granite_hybrid::build_layer_ffn(ggml_tensor * cur,
127 ggml_tensor * inpSA,
128 const llama_model & model,
129 const int il) {
130 // For Granite architectures - scale residual
131 if (hparams.f_residual_scale) {
132 cur = ggml_scale(ctx: ctx0, a: cur, s: hparams.f_residual_scale);
133 }
134 ggml_tensor * ffn_inp = ggml_add(ctx: ctx0, a: cur, b: inpSA);
135 cb(cur: ffn_inp, name: "ffn_inp", il);
136
137 // feed-forward network (non-MoE)
138 if (model.layers[il].ffn_gate_inp == nullptr) {
139 cur = build_norm(cur: ffn_inp, mw: model.layers[il].ffn_norm, NULL, type: LLM_NORM_RMS, il);
140 cb(cur, name: "ffn_norm", il);
141
142 cur = build_ffn(cur,
143 up: model.layers[il].ffn_up, up_b: model.layers[il].ffn_up_b, NULL,
144 gate: model.layers[il].ffn_gate, gate_b: model.layers[il].ffn_gate_b, NULL,
145 down: model.layers[il].ffn_down, down_b: model.layers[il].ffn_down_b, NULL,
146 NULL, type_op: LLM_FFN_SILU, type_gate: LLM_FFN_PAR, il);
147 cb(cur, name: "ffn_out", il);
148
149 } else {
150 // MoE branch
151 cur = build_norm(cur: ffn_inp, mw: model.layers[il].ffn_norm, NULL, type: LLM_NORM_RMS, il);
152 cb(cur, name: "ffn_norm", il);
153
154 ggml_tensor * moe_out =
155 build_moe_ffn(cur,
156 gate_inp: model.layers[il].ffn_gate_inp,
157 up_exps: model.layers[il].ffn_up_exps,
158 gate_exps: model.layers[il].ffn_gate_exps,
159 down_exps: model.layers[il].ffn_down_exps,
160 exp_probs_b: nullptr,
161 n_expert, n_expert_used,
162 type_op: LLM_FFN_SILU, norm_w: true,
163 scale_w: false, w_scale: 0.0,
164 gating_op: LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
165 il);
166 cb(cur: moe_out, name: "ffn_moe_out", il);
167
168 // For Granite MoE Shared
169 if (hparams.n_ff_shexp > 0) {
170 ggml_tensor * ffn_shexp =
171 build_ffn(cur,
172 up: model.layers[il].ffn_up_shexp, NULL, NULL,
173 gate: model.layers[il].ffn_gate_shexp, NULL, NULL,
174 down: model.layers[il].ffn_down_shexp, NULL, NULL,
175 NULL, type_op: LLM_FFN_SILU, type_gate: LLM_FFN_PAR, il);
176 cb(cur: ffn_shexp, name: "ffn_shexp", il);
177
178 cur = ggml_add(ctx: ctx0, a: moe_out, b: ffn_shexp);
179 cb(cur, name: "ffn_out", il);
180 } else {
181 cur = moe_out;
182 }
183 }
184
185 // For Granite architectures - scale residual
186 if (hparams.f_residual_scale) {
187 cur = ggml_scale(ctx: ctx0, a: cur, s: hparams.f_residual_scale);
188 }
189 cur = ggml_add(ctx: ctx0, a: cur, b: ffn_inp);
190 cb(cur, name: "ffn_out", il);
191
192 cur = build_cvec(cur, il);
193 cb(cur, name: "l_out", il);
194
195 return cur;
196}
197