1#include "models.h"
2
3
4llm_build_mamba::llm_build_mamba(const llama_model & model, const llm_graph_params & params) : llm_graph_context_mamba(params) {
5 ggml_tensor * cur;
6 ggml_tensor * inpL;
7
8 // {n_embd, n_tokens}
9 inpL = build_inp_embd(tok_embd: model.tok_embd);
10
11 auto * rs_inp = build_rs_inp();
12
13 ggml_tensor * inp_out_ids = build_inp_out_ids();
14
15 for (int il = 0; il < n_layer; ++il) {
16 // norm
17 cur = build_norm(cur: inpL, mw: model.layers[il].attn_norm, NULL, type: LLM_NORM_RMS, il);
18 cb(cur, name: "attn_norm", il);
19
20 if (model.arch == LLM_ARCH_MAMBA2) {
21 cur = build_mamba2_layer(inp: rs_inp, cur, model, ubatch, il);
22 } else {
23 cur = build_mamba_layer(inp: rs_inp, cur, model, ubatch, il);
24 }
25
26 if (il == n_layer - 1 && inp_out_ids) {
27 cur = ggml_get_rows(ctx: ctx0, a: cur, b: inp_out_ids);
28 inpL = ggml_get_rows(ctx: ctx0, a: inpL, b: inp_out_ids);
29 }
30
31 // residual
32 cur = ggml_add(ctx: ctx0, a: cur, b: inpL);
33
34 cur = build_cvec(cur, il);
35 cb(cur, name: "l_out", il);
36
37 // input for next layer
38 inpL = cur;
39 }
40
41 // final rmsnorm
42 cur = build_norm(cur: inpL, mw: model.output_norm, NULL, type: LLM_NORM_RMS, il: -1);
43
44 cb(cur, name: "result_norm", il: -1);
45 res->t_embd = cur;
46
47 // lm_head
48 cur = build_lora_mm(w: model.output, cur);
49
50 cb(cur, name: "result_output", il: -1);
51 res->t_logits = cur;
52
53 ggml_build_forward_expand(cgraph: gf, tensor: cur);
54}
55
56