1#include "models.h"
2
3llm_build_rwkv7::llm_build_rwkv7(const llama_model & model, const llm_graph_params & params) :
4 llm_build_rwkv7_base(model, params) {
5 GGML_ASSERT(hparams.token_shift_count == 2);
6
7 ggml_tensor * cur;
8 ggml_tensor * inpL;
9 ggml_tensor * v_first = nullptr;
10
11 inpL = build_inp_embd(tok_embd: model.tok_embd);
12 inpL = build_norm(cur: inpL, mw: model.tok_norm, mb: model.tok_norm_b, type: LLM_NORM, il: -1);
13
14 auto * rs_inp = build_rs_inp();
15
16 const auto n_embd = hparams.n_embd;
17 const auto n_seq_tokens = ubatch.n_seq_tokens;
18 const auto n_seqs = ubatch.n_seqs;
19
20 ggml_tensor * inp_out_ids = build_inp_out_ids();
21
22 for (int il = 0; il < n_layer; ++il) {
23 const llama_layer * layer = &model.layers[il];
24 inpL = ggml_reshape_3d(ctx: ctx0, a: inpL, ne0: n_embd, ne1: n_seq_tokens, ne2: n_seqs);
25
26 ggml_tensor * token_shift = build_rwkv_token_shift_load(inp: rs_inp, ubatch, il);
27
28 ggml_tensor * att_shift =
29 ggml_view_3d(ctx: ctx0, a: token_shift, ne0: n_embd, ne1: 1, ne2: n_seqs, nb1: token_shift->nb[1], nb2: token_shift->nb[2], offset: 0);
30 ggml_tensor * ffn_shift = ggml_view_3d(ctx: ctx0, a: token_shift, ne0: n_embd, ne1: 1, ne2: n_seqs, nb1: token_shift->nb[1],
31 nb2: token_shift->nb[2], offset: n_embd * ggml_element_size(tensor: token_shift));
32
33 ggml_tensor * att_norm = build_norm(cur: inpL, mw: layer->attn_norm, mb: layer->attn_norm_b, type: LLM_NORM, il);
34 cb(cur: att_norm, name: "attn_norm", il);
35
36 ggml_tensor * x_prev = ggml_concat(
37 ctx: ctx0, a: att_shift,
38 b: ggml_view_3d(ctx: ctx0, a: att_norm, ne0: n_embd, ne1: n_seq_tokens - 1, ne2: n_seqs, nb1: att_norm->nb[1], nb2: att_norm->nb[2], offset: 0), dim: 1);
39
40 cur = build_rwkv7_time_mix(inp: rs_inp, cur: att_norm, x_prev, first_layer_value&: v_first, ubatch, il);
41
42 ggml_tensor * ffn_inp = ggml_add(ctx: ctx0, a: cur, b: inpL);
43 cb(cur: ffn_inp, name: "ffn_inp", il);
44
45 ggml_tensor * ffn_norm = build_norm(cur: ffn_inp, mw: layer->attn_norm_2, mb: layer->attn_norm_2_b, type: LLM_NORM, il);
46 cb(cur: ffn_norm, name: "ffn_norm", il);
47
48 x_prev = ggml_concat(
49 ctx: ctx0, a: ffn_shift,
50 b: ggml_view_3d(ctx: ctx0, a: ffn_norm, ne0: n_embd, ne1: n_seq_tokens - 1, ne2: n_seqs, nb1: ffn_norm->nb[1], nb2: ffn_norm->nb[2], offset: 0), dim: 1);
51
52 token_shift = ggml_concat(ctx: ctx0,
53 a: ggml_view_3d(ctx: ctx0, a: att_norm, ne0: n_embd, ne1: 1, ne2: n_seqs, nb1: att_norm->nb[1], nb2: att_norm->nb[2],
54 offset: (n_seq_tokens - 1) * n_embd * ggml_element_size(tensor: att_norm)),
55 b: ggml_view_3d(ctx: ctx0, a: ffn_norm, ne0: n_embd, ne1: 1, ne2: n_seqs, nb1: ffn_norm->nb[1], nb2: ffn_norm->nb[2],
56 offset: (n_seq_tokens - 1) * n_embd * ggml_element_size(tensor: ffn_norm)),
57 dim: 1);
58 ggml_build_forward_expand(cgraph: gf, tensor: build_rwkv_token_shift_store(token_shift, ubatch, il));
59
60 ffn_inp = ggml_reshape_2d(ctx: ctx0, a: ffn_inp, ne0: n_embd, ne1: n_tokens);
61 ffn_norm = ggml_reshape_2d(ctx: ctx0, a: ffn_norm, ne0: n_embd, ne1: n_tokens);
62 x_prev = ggml_reshape_2d(ctx: ctx0, a: x_prev, ne0: n_embd, ne1: n_tokens);
63
64 if (il == n_layer - 1 && inp_out_ids) {
65 ffn_inp = ggml_get_rows(ctx: ctx0, a: ffn_inp, b: inp_out_ids);
66 ffn_norm = ggml_get_rows(ctx: ctx0, a: ffn_norm, b: inp_out_ids);
67 x_prev = ggml_get_rows(ctx: ctx0, a: x_prev, b: inp_out_ids);
68 }
69 cur = build_rwkv7_channel_mix(layer, cur: ffn_norm, x_prev, arch: LLM_ARCH_RWKV7);
70 cur = ggml_add(ctx: ctx0, a: cur, b: ffn_inp);
71
72 cur = build_cvec(cur, il);
73 cb(cur, name: "l_out", il);
74
75 // input for next layer
76 inpL = cur;
77 }
78 cur = inpL;
79 cur = build_norm(cur, mw: model.output_norm, mb: model.output_norm_b, type: LLM_NORM, il: -1);
80
81 cb(cur, name: "result_norm", il: -1);
82 res->t_embd = cur;
83
84 cur = build_lora_mm(w: model.output, cur);
85
86 cb(cur, name: "result_output", il: -1);
87 res->t_logits = cur;
88
89 ggml_build_forward_expand(cgraph: gf, tensor: cur);
90}
91