1#include "models.h"
2
3template<bool iswa>
4llm_build_phi3<iswa>::llm_build_phi3(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
5 const int64_t n_embd_head = hparams.n_embd_head_v;
6 const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
7
8 GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
9
10 ggml_tensor * cur;
11 ggml_tensor * inpL;
12
13 inpL = build_inp_embd(tok_embd: model.tok_embd);
14
15 // inp_pos - contains the positions
16 ggml_tensor * inp_pos = build_inp_pos();
17
18 using inp_attn_type = std::conditional_t<iswa, llm_graph_input_attn_kv_iswa, llm_graph_input_attn_kv>;
19 inp_attn_type * inp_attn = nullptr;
20
21 if constexpr (iswa) {
22 inp_attn = build_attn_inp_kv_iswa();
23 } else {
24 inp_attn = build_attn_inp_kv();
25 }
26 ggml_tensor * inp_out_ids = build_inp_out_ids();
27
28 for (int il = 0; il < n_layer; ++il) {
29 auto * residual = inpL;
30
31 // self-attention
32 {
33 // rope freq factors for 128k context
34 ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
35
36 ggml_tensor* attn_norm_output = build_norm(cur: inpL,
37 mw: model.layers[il].attn_norm,
38 mb: model.layers[il].attn_norm_b,
39 type: LLM_NORM_RMS, il);
40 cb(cur: attn_norm_output, name: "attn_norm", il);
41
42 ggml_tensor * Qcur = nullptr;
43 ggml_tensor * Kcur = nullptr;
44 ggml_tensor * Vcur = nullptr;
45
46 if (model.layers[il].wqkv) {
47 cur = build_lora_mm(w: model.layers[il].wqkv, cur: attn_norm_output);
48 cb(cur, name: "wqkv", il);
49
50 Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head * sizeof(float), cur->nb[1], 0 * sizeof(float) * (n_embd));
51 Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head * sizeof(float), cur->nb[1], 1 * sizeof(float) * (n_embd));
52 Vcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head * sizeof(float), cur->nb[1], 1 * sizeof(float) * (n_embd + n_embd_gqa));
53 }
54 else {
55 Qcur = ggml_add(ctx0, build_lora_mm(w: model.layers[il].wq, cur: attn_norm_output), model.layers[il].bq);
56 Kcur = ggml_add(ctx0, build_lora_mm(w: model.layers[il].wk, cur: attn_norm_output), model.layers[il].bk);
57 Vcur = ggml_add(ctx0, build_lora_mm(w: model.layers[il].wv, cur: attn_norm_output), model.layers[il].bv);
58
59 Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
60 Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
61 Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
62 }
63 Qcur = ggml_rope_ext(
64 ctx0, Qcur, inp_pos, rope_factors,
65 n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
66 ext_factor, attn_factor, beta_fast, beta_slow
67 );
68
69 Kcur = ggml_rope_ext(
70 ctx0, Kcur, inp_pos, rope_factors,
71 n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
72 ext_factor, attn_factor, beta_fast, beta_slow
73 );
74
75 cb(cur: Qcur, name: "Qcur", il);
76 cb(cur: Kcur, name: "Kcur", il);
77 cb(cur: Vcur, name: "Vcur", il);
78
79 Qcur = ggml_scale(ctx0, Qcur, 1.0f / sqrtf(x: float(n_embd_head)));
80 cb(cur: Qcur, name: "Qcur", il);
81
82 cur = build_attn(inp_attn,
83 model.layers[il].wo, model.layers[il].bo,
84 Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f, il);
85 }
86 if (il == n_layer - 1 && inp_out_ids) {
87 cur = ggml_get_rows(ctx0, cur, inp_out_ids);
88 residual = ggml_get_rows(ctx0, residual, inp_out_ids);
89 }
90 cur = ggml_add(ctx0, cur, residual);
91 residual = cur;
92
93 cur = build_norm(cur,
94 mw: model.layers[il].ffn_norm, mb: model.layers[il].ffn_norm_b,
95 type: LLM_NORM_RMS, il);
96 cb(cur, name: "ffn_norm", il);
97
98 // feed-forward network
99 if (model.layers[il].ffn_gate_inp == nullptr) {
100 cur = build_ffn(cur,
101 up: model.layers[il].ffn_up, NULL, NULL,
102 NULL, NULL, NULL,
103 down: model.layers[il].ffn_down, NULL, NULL,
104 NULL,
105 type_op: LLM_FFN_SWIGLU, type_gate: LLM_FFN_SEQ, il);
106 cb(cur, name: "ffn_out", il);
107 } else {
108 // MoE branch
109 cur = build_moe_ffn(cur,
110 model.layers[il].ffn_gate_inp,
111 model.layers[il].ffn_up_exps,
112 model.layers[il].ffn_gate_exps,
113 model.layers[il].ffn_down_exps,
114 nullptr,
115 n_expert, n_expert_used,
116 LLM_FFN_SILU, true,
117 false, 0.0,
118 LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
119 il);
120 cb(cur, name: "ffn_moe_out", il);
121 }
122 cur = ggml_add(ctx0, residual, cur);
123
124 cur = build_cvec(cur, il);
125 cb(cur, name: "l_out", il);
126
127 // input for next layer
128 inpL = cur;
129 }
130 cur = build_norm(cur: inpL,
131 mw: model.output_norm,
132 mb: model.output_norm_b,
133 type: LLM_NORM_RMS, il: -1);
134
135 cb(cur, name: "result_norm", il: -1);
136 res->t_embd = cur;
137
138 cur = build_lora_mm(w: model.output, cur);
139
140 if (model.output_b != nullptr) {
141 cb(cur, name: "result_output_no_bias", il: -1);
142 cur = ggml_add(ctx0, cur, model.output_b);
143 }
144 cb(cur, name: "result_output", il: -1);
145 res->t_logits = cur;
146
147 ggml_build_forward_expand(gf, cur);
148}
149
150// Explicit template instantiations
151template struct llm_build_phi3<false>;
152template struct llm_build_phi3<true>;
153