1#include "models.h"
2
3llm_build_codeshell::llm_build_codeshell(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
4 const int64_t n_embd_head = hparams.n_embd_head_v;
5 const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
6
7 GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
8 GGML_ASSERT(n_embd_head == hparams.n_rot);
9
10 ggml_tensor * cur;
11 ggml_tensor * inpL;
12
13 inpL = build_inp_embd(tok_embd: model.tok_embd);
14
15 // inp_pos - contains the positions
16 ggml_tensor * inp_pos = build_inp_pos();
17
18 auto * inp_attn = build_attn_inp_kv();
19
20 ggml_tensor * inp_out_ids = build_inp_out_ids();
21
22 for (int il = 0; il < n_layer; ++il) {
23 cur = build_norm(cur: inpL,
24 mw: model.layers[il].attn_norm,
25 mb: model.layers[il].attn_norm_b,
26 type: LLM_NORM, il);
27 cb(cur, name: "attn_norm", il);
28
29 // self-attention
30 {
31 cur = build_lora_mm(w: model.layers[il].wqkv, cur);
32 cb(cur, name: "wqkv", il);
33
34 cur = ggml_add(ctx: ctx0, a: cur, b: model.layers[il].bqkv);
35 cb(cur, name: "bqkv", il);
36
37 ggml_tensor * Qcur = ggml_view_3d(ctx: ctx0, a: cur, ne0: n_embd_head, ne1: n_head, ne2: n_tokens, nb1: n_embd_head*sizeof(float), nb2: cur->nb[1], offset: 0*sizeof(float)*(n_embd));
38 ggml_tensor * Kcur = ggml_view_3d(ctx: ctx0, a: cur, ne0: n_embd_head, ne1: n_head_kv, ne2: n_tokens, nb1: n_embd_head*sizeof(float), nb2: cur->nb[1], offset: 1*sizeof(float)*(n_embd));
39 ggml_tensor * Vcur = ggml_view_3d(ctx: ctx0, a: cur, ne0: n_embd_head, ne1: n_head_kv, ne2: n_tokens, nb1: n_embd_head*sizeof(float), nb2: cur->nb[1], offset: 1*sizeof(float)*(n_embd + n_embd_gqa));
40
41 Qcur = ggml_rope_ext(
42 ctx: ctx0, a: Qcur, b: inp_pos, c: nullptr,
43 n_dims: n_rot, mode: rope_type, n_ctx_orig, freq_base, freq_scale,
44 ext_factor, attn_factor, beta_fast, beta_slow
45 );
46
47 Kcur = ggml_rope_ext(
48 ctx: ctx0, a: Kcur, b: inp_pos, c: nullptr,
49 n_dims: n_rot, mode: rope_type, n_ctx_orig, freq_base, freq_scale,
50 ext_factor, attn_factor, beta_fast, beta_slow
51 );
52
53 cb(cur: Qcur, name: "Qcur", il);
54 cb(cur: Kcur, name: "Kcur", il);
55 cb(cur: Vcur, name: "Vcur", il);
56
57 cur = build_attn(inp: inp_attn,
58 wo: model.layers[il].wo, wo_b: model.layers[il].bo,
59 q_cur: Qcur, k_cur: Kcur, v_cur: Vcur, kq_b: nullptr, sinks: nullptr, v_mla: nullptr, kq_scale: 1.0f/sqrtf(x: float(n_embd_head)), il);
60 }
61
62 if (il == n_layer - 1 && inp_out_ids) {
63 cur = ggml_get_rows(ctx: ctx0, a: cur, b: inp_out_ids);
64 inpL = ggml_get_rows(ctx: ctx0, a: inpL, b: inp_out_ids);
65 }
66
67 // add the input
68 ggml_tensor * ffn_inp = ggml_add(ctx: ctx0, a: cur, b: inpL);
69 cb(cur: ffn_inp, name: "ffn_inp", il);
70
71 // FF
72 {
73 cur = build_norm(cur: ffn_inp,
74 mw: model.layers[il].ffn_norm,
75 mb: model.layers[il].ffn_norm_b,
76 type: LLM_NORM, il);
77 cb(cur, name: "ffn_norm", il);
78
79 cur = build_ffn(cur,
80 up: model.layers[il].ffn_up, up_b: model.layers[il].ffn_up_b, NULL,
81 NULL, NULL, NULL,
82 down: model.layers[il].ffn_down, down_b: model.layers[il].ffn_down_b, NULL,
83 NULL,
84 type_op: LLM_FFN_GELU, type_gate: LLM_FFN_SEQ, il);
85 cb(cur, name: "ffn_out", il);
86 }
87
88 cur = ggml_add(ctx: ctx0, a: cur, b: ffn_inp);
89
90 cur = build_cvec(cur, il);
91 cb(cur, name: "l_out", il);
92
93 // input for next layer
94 inpL = cur;
95 }
96
97 cur = build_norm(cur: inpL,
98 mw: model.output_norm,
99 mb: model.output_norm_b,
100 type: LLM_NORM, il: -1);
101
102 cb(cur, name: "result_norm", il: -1);
103 res->t_embd = cur;
104
105 cur = build_lora_mm(w: model.output, cur);
106
107 cb(cur, name: "result_output", il: -1);
108 res->t_logits = cur;
109
110 ggml_build_forward_expand(cgraph: gf, tensor: cur);
111}
112