| 1 | #include "llama-model-saver.h" |
| 2 | |
| 3 | #include "gguf.h" |
| 4 | |
| 5 | #include "llama.h" |
| 6 | #include "llama-hparams.h" |
| 7 | #include "llama-model.h" |
| 8 | #include "llama-vocab.h" |
| 9 | |
| 10 | #include <string> |
| 11 | |
| 12 | llama_model_saver::llama_model_saver(const struct llama_model & model) : model(model), llm_kv(model.arch) { |
| 13 | gguf_ctx = gguf_init_empty(); |
| 14 | } |
| 15 | |
| 16 | llama_model_saver::~llama_model_saver() { |
| 17 | gguf_free(ctx: gguf_ctx); |
| 18 | } |
| 19 | |
| 20 | void llama_model_saver::add_kv(const enum llm_kv key, const uint32_t value) { |
| 21 | gguf_set_val_u32(ctx: gguf_ctx, key: llm_kv(key).c_str(), val: value); |
| 22 | } |
| 23 | |
| 24 | void llama_model_saver::add_kv(const enum llm_kv key, const int32_t value) { |
| 25 | gguf_set_val_i32(ctx: gguf_ctx, key: llm_kv(key).c_str(), val: value); |
| 26 | } |
| 27 | |
| 28 | void llama_model_saver::add_kv(const enum llm_kv key, const float value) { |
| 29 | gguf_set_val_f32(ctx: gguf_ctx, key: llm_kv(key).c_str(), val: value); |
| 30 | } |
| 31 | |
| 32 | void llama_model_saver::add_kv(const enum llm_kv key, const bool value) { |
| 33 | gguf_set_val_bool(ctx: gguf_ctx, key: llm_kv(key).c_str(), val: value); |
| 34 | } |
| 35 | |
| 36 | void llama_model_saver::add_kv(const enum llm_kv key, const char * value) { |
| 37 | gguf_set_val_str(ctx: gguf_ctx, key: llm_kv(key).c_str(), val: value); |
| 38 | } |
| 39 | |
| 40 | [[noreturn]] |
| 41 | void llama_model_saver::add_kv(const enum llm_kv key, const char value) { |
| 42 | GGML_UNUSED(key); |
| 43 | GGML_UNUSED(value); |
| 44 | GGML_ABORT("fatal error" ); // this should never be called, only needed to make the template below compile |
| 45 | } |
| 46 | |
| 47 | template <typename Container> |
| 48 | void llama_model_saver::add_kv(const enum llm_kv key, const Container & value, const bool per_layer) { |
| 49 | const size_t n_values = per_layer ? size_t(model.hparams.n_layer) : value.size(); |
| 50 | GGML_ASSERT(n_values <= value.size()); |
| 51 | |
| 52 | if (n_values == 0) { |
| 53 | return; |
| 54 | } |
| 55 | |
| 56 | if (per_layer) { |
| 57 | bool all_values_the_same = true; |
| 58 | for (size_t i = 1; i < n_values; ++i) { |
| 59 | if (value[i] != value[0]) { |
| 60 | all_values_the_same = false; |
| 61 | break; |
| 62 | } |
| 63 | } |
| 64 | if (all_values_the_same) { |
| 65 | add_kv(key, value[0]); |
| 66 | return; |
| 67 | } |
| 68 | } |
| 69 | |
| 70 | if (std::is_same<typename Container::value_type, uint8_t>::value) { |
| 71 | gguf_set_arr_data(gguf_ctx, llm_kv(key).c_str(), GGUF_TYPE_UINT8, value.data(), n_values); |
| 72 | } else if (std::is_same<typename Container::value_type, int8_t>::value) { |
| 73 | gguf_set_arr_data(gguf_ctx, llm_kv(key).c_str(), GGUF_TYPE_INT8, value.data(), n_values); |
| 74 | } else if (std::is_same<typename Container::value_type, uint32_t>::value) { |
| 75 | gguf_set_arr_data(gguf_ctx, llm_kv(key).c_str(), GGUF_TYPE_UINT32, value.data(), n_values); |
| 76 | } else if (std::is_same<typename Container::value_type, int32_t>::value) { |
| 77 | gguf_set_arr_data(gguf_ctx, llm_kv(key).c_str(), GGUF_TYPE_INT32, value.data(), n_values); |
| 78 | } else if (std::is_same<typename Container::value_type, float>::value) { |
| 79 | gguf_set_arr_data(gguf_ctx, llm_kv(key).c_str(), GGUF_TYPE_FLOAT32, value.data(), n_values); |
| 80 | } else if (std::is_same<Container, std::string>::value) { |
| 81 | gguf_set_val_str(ctx: gguf_ctx, key: llm_kv(key).c_str(), val: reinterpret_cast<const char *>(value.data())); |
| 82 | } else { |
| 83 | GGML_ABORT("fatal error" ); |
| 84 | } |
| 85 | } |
| 86 | |
| 87 | void llama_model_saver::add_kv(const enum llm_kv key, const std::vector<std::string> & value) { |
| 88 | std::vector<const char *> tmp(value.size()); |
| 89 | for (size_t i = 0; i < value.size(); ++i) { |
| 90 | tmp[i] = value[i].c_str(); |
| 91 | } |
| 92 | gguf_set_arr_str(ctx: gguf_ctx, key: llm_kv(key).c_str(), data: tmp.data(), n: tmp.size()); |
| 93 | } |
| 94 | |
| 95 | void llama_model_saver::add_tensor(const struct ggml_tensor * tensor) { |
| 96 | if (!tensor) { |
| 97 | return; |
| 98 | } |
| 99 | if (gguf_find_tensor(ctx: gguf_ctx, name: tensor->name) >= 0) { |
| 100 | GGML_ASSERT(std::string(tensor->name) == "rope_freqs.weight" ); // FIXME |
| 101 | return; |
| 102 | } |
| 103 | gguf_add_tensor(ctx: gguf_ctx, tensor); |
| 104 | } |
| 105 | |
| 106 | void llama_model_saver::add_kv_from_model() { |
| 107 | const llama_hparams & hparams = model.hparams; |
| 108 | const llama_vocab & vocab = model.vocab; |
| 109 | |
| 110 | const int32_t n_vocab = vocab.n_tokens(); |
| 111 | std::vector<std::string> tokens(n_vocab); |
| 112 | std::vector<float> scores(n_vocab); |
| 113 | std::vector<int32_t> token_types(n_vocab); |
| 114 | |
| 115 | for (int32_t id = 0; id < n_vocab; ++id) { |
| 116 | const llama_vocab::token_data & token_data = vocab.get_token_data(id); |
| 117 | |
| 118 | tokens[id] = token_data.text; |
| 119 | scores[id] = token_data.score; |
| 120 | |
| 121 | switch(token_data.attr) { |
| 122 | case LLAMA_TOKEN_ATTR_UNKNOWN: token_types[id] = LLAMA_TOKEN_TYPE_UNKNOWN; break; |
| 123 | case LLAMA_TOKEN_ATTR_UNUSED: token_types[id] = LLAMA_TOKEN_TYPE_UNUSED; break; |
| 124 | case LLAMA_TOKEN_ATTR_NORMAL: token_types[id] = LLAMA_TOKEN_TYPE_NORMAL; break; |
| 125 | case LLAMA_TOKEN_ATTR_CONTROL: token_types[id] = LLAMA_TOKEN_TYPE_CONTROL; break; |
| 126 | case LLAMA_TOKEN_ATTR_USER_DEFINED: token_types[id] = LLAMA_TOKEN_TYPE_USER_DEFINED; break; |
| 127 | case LLAMA_TOKEN_ATTR_BYTE: token_types[id] = LLAMA_TOKEN_TYPE_BYTE; break; |
| 128 | case LLAMA_TOKEN_ATTR_UNDEFINED: |
| 129 | default: token_types[id] = LLAMA_TOKEN_TYPE_UNDEFINED; break; |
| 130 | } |
| 131 | } |
| 132 | |
| 133 | // add_kv(LLM_KV_GENERAL_TYPE, ???); |
| 134 | add_kv(key: LLM_KV_GENERAL_ARCHITECTURE, value: model.arch_name()); |
| 135 | // add_kv(LLM_KV_GENERAL_QUANTIZATION_VERSION, ???); |
| 136 | // add_kv(LLM_KV_GENERAL_ALIGNMENT, ???); |
| 137 | add_kv(key: LLM_KV_GENERAL_NAME, value: model.name); |
| 138 | // add_kv(LLM_KV_GENERAL_AUTHOR, ???); |
| 139 | // add_kv(LLM_KV_GENERAL_VERSION, ???); |
| 140 | // add_kv(LLM_KV_GENERAL_URL, ???); |
| 141 | // add_kv(LLM_KV_GENERAL_DESCRIPTION, ???); |
| 142 | // add_kv(LLM_KV_GENERAL_LICENSE, ???); |
| 143 | // add_kv(LLM_KV_GENERAL_SOURCE_URL, ???); |
| 144 | // add_kv(LLM_KV_GENERAL_SOURCE_HF_REPO, ???); |
| 145 | |
| 146 | add_kv(key: LLM_KV_VOCAB_SIZE, value: vocab.n_tokens()); |
| 147 | add_kv(key: LLM_KV_CONTEXT_LENGTH, value: hparams.n_ctx_train); |
| 148 | add_kv(key: LLM_KV_EMBEDDING_LENGTH, value: hparams.n_embd); |
| 149 | add_kv(key: LLM_KV_BLOCK_COUNT, value: hparams.n_layer); |
| 150 | add_kv(key: LLM_KV_LEADING_DENSE_BLOCK_COUNT, value: hparams.n_layer_dense_lead); |
| 151 | add_kv(key: LLM_KV_FEED_FORWARD_LENGTH, value: hparams.n_ff_arr, per_layer: true); |
| 152 | add_kv(key: LLM_KV_EXPERT_FEED_FORWARD_LENGTH, value: hparams.n_ff_exp); |
| 153 | add_kv(key: LLM_KV_EXPERT_SHARED_FEED_FORWARD_LENGTH, value: hparams.n_ff_exp); |
| 154 | add_kv(key: LLM_KV_USE_PARALLEL_RESIDUAL, value: hparams.use_par_res); |
| 155 | // add_kv(LLM_KV_TENSOR_DATA_LAYOUT, ???); |
| 156 | add_kv(key: LLM_KV_EXPERT_COUNT, value: hparams.n_expert); |
| 157 | add_kv(key: LLM_KV_EXPERT_USED_COUNT, value: hparams.n_expert_used); |
| 158 | add_kv(key: LLM_KV_EXPERT_SHARED_COUNT, value: hparams.n_expert_shared); |
| 159 | add_kv(key: LLM_KV_EXPERT_WEIGHTS_SCALE, value: hparams.expert_weights_scale); |
| 160 | add_kv(key: LLM_KV_POOLING_TYPE, value: uint32_t(hparams.pooling_type)); |
| 161 | add_kv(key: LLM_KV_LOGIT_SCALE, value: hparams.f_logit_scale); |
| 162 | add_kv(key: LLM_KV_DECODER_START_TOKEN_ID, value: hparams.dec_start_token_id); |
| 163 | add_kv(key: LLM_KV_ATTN_LOGIT_SOFTCAPPING, value: hparams.f_attn_logit_softcapping); |
| 164 | add_kv(key: LLM_KV_FINAL_LOGIT_SOFTCAPPING, value: hparams.f_final_logit_softcapping); |
| 165 | add_kv(key: LLM_KV_SWIN_NORM, value: hparams.swin_norm); |
| 166 | add_kv(key: LLM_KV_RESCALE_EVERY_N_LAYERS, value: hparams.rescale_every_n_layers); |
| 167 | add_kv(key: LLM_KV_TIME_MIX_EXTRA_DIM, value: hparams.time_mix_extra_dim); |
| 168 | add_kv(key: LLM_KV_TIME_DECAY_EXTRA_DIM, value: hparams.time_decay_extra_dim); |
| 169 | add_kv(key: LLM_KV_RESIDUAL_SCALE, value: hparams.f_residual_scale); |
| 170 | add_kv(key: LLM_KV_EMBEDDING_SCALE, value: hparams.f_embedding_scale); |
| 171 | |
| 172 | add_kv(key: LLM_KV_ATTENTION_HEAD_COUNT, value: hparams.n_head_arr, per_layer: true); |
| 173 | add_kv(key: LLM_KV_ATTENTION_HEAD_COUNT_KV, value: hparams.n_head_kv_arr, per_layer: true); |
| 174 | add_kv(key: LLM_KV_ATTENTION_MAX_ALIBI_BIAS, value: hparams.f_max_alibi_bias); |
| 175 | add_kv(key: LLM_KV_ATTENTION_CLAMP_KQV, value: hparams.f_clamp_kqv); |
| 176 | add_kv(key: LLM_KV_ATTENTION_KEY_LENGTH, value: hparams.n_embd_head_k); |
| 177 | add_kv(key: LLM_KV_ATTENTION_VALUE_LENGTH, value: hparams.n_embd_head_v); |
| 178 | add_kv(key: LLM_KV_ATTENTION_LAYERNORM_EPS, value: hparams.f_norm_eps); |
| 179 | add_kv(key: LLM_KV_ATTENTION_LAYERNORM_RMS_EPS, value: hparams.f_norm_rms_eps); |
| 180 | add_kv(key: LLM_KV_ATTENTION_CAUSAL, value: hparams.causal_attn); |
| 181 | add_kv(key: LLM_KV_ATTENTION_Q_LORA_RANK, value: hparams.n_lora_q); |
| 182 | add_kv(key: LLM_KV_ATTENTION_KV_LORA_RANK, value: hparams.n_lora_kv); |
| 183 | add_kv(key: LLM_KV_ATTENTION_RELATIVE_BUCKETS_COUNT, value: hparams.n_rel_attn_bkts); |
| 184 | add_kv(key: LLM_KV_ATTENTION_SLIDING_WINDOW, value: hparams.n_swa); |
| 185 | add_kv(key: LLM_KV_ATTENTION_SCALE, value: hparams.f_attention_scale); |
| 186 | |
| 187 | const float rope_scaling_factor = hparams.rope_freq_scale_train == 1.0f ? 0.0f : 1.0f/hparams.rope_freq_scale_train; |
| 188 | |
| 189 | add_kv(key: LLM_KV_ROPE_DIMENSION_COUNT, value: hparams.n_rot); |
| 190 | add_kv(key: LLM_KV_ROPE_FREQ_BASE, value: hparams.rope_freq_base_train); |
| 191 | // add_kv(LLM_KV_ROPE_SCALE_LINEAR, rope_scaling_factor); // old name |
| 192 | add_kv(key: LLM_KV_ROPE_SCALING_TYPE, value: llama_rope_scaling_type_name(rope_scaling_type: hparams.rope_scaling_type_train)); |
| 193 | add_kv(key: LLM_KV_ROPE_SCALING_FACTOR, value: rope_scaling_factor); |
| 194 | add_kv(key: LLM_KV_ROPE_SCALING_ATTN_FACTOR, value: hparams.rope_attn_factor); |
| 195 | add_kv(key: LLM_KV_ROPE_SCALING_ORIG_CTX_LEN, value: hparams.n_ctx_orig_yarn); |
| 196 | add_kv(key: LLM_KV_ROPE_SCALING_FINETUNED, value: hparams.rope_finetuned); |
| 197 | add_kv(key: LLM_KV_ROPE_SCALING_YARN_LOG_MUL, value: hparams.rope_yarn_log_mul); |
| 198 | |
| 199 | // TODO: implement split file support |
| 200 | // add_kv(LLM_KV_SPLIT_NO, ???); |
| 201 | // add_kv(LLM_KV_SPLIT_COUNT, ???); |
| 202 | // add_kv(LLM_KV_SPLIT_TENSORS_COUNT, ???); |
| 203 | |
| 204 | add_kv(key: LLM_KV_SSM_INNER_SIZE, value: hparams.ssm_d_inner); |
| 205 | add_kv(key: LLM_KV_SSM_CONV_KERNEL, value: hparams.ssm_d_conv); |
| 206 | add_kv(key: LLM_KV_SSM_STATE_SIZE, value: hparams.ssm_d_state); |
| 207 | add_kv(key: LLM_KV_SSM_TIME_STEP_RANK, value: hparams.ssm_dt_rank); |
| 208 | add_kv(key: LLM_KV_SSM_DT_B_C_RMS, value: hparams.ssm_dt_b_c_rms); |
| 209 | |
| 210 | add_kv(key: LLM_KV_WKV_HEAD_SIZE, value: hparams.wkv_head_size); |
| 211 | |
| 212 | add_kv(key: LLM_KV_TOKENIZER_MODEL, value: vocab.get_tokenizer_model()); |
| 213 | add_kv(key: LLM_KV_TOKENIZER_PRE, value: vocab.get_tokenizer_pre()); |
| 214 | add_kv(key: LLM_KV_TOKENIZER_LIST, value: tokens); |
| 215 | add_kv(key: LLM_KV_TOKENIZER_TOKEN_TYPE, value: token_types); |
| 216 | add_kv(key: LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, value: vocab.n_token_types()); |
| 217 | add_kv(key: LLM_KV_TOKENIZER_SCORES, value: scores); |
| 218 | add_kv(key: LLM_KV_TOKENIZER_MERGES, value: vocab.get_bpe_merges()); |
| 219 | // FIXME llama_token is type i32 but when reading in a GGUF file u32 is expected, not an issue for writing though |
| 220 | add_kv(key: LLM_KV_TOKENIZER_BOS_ID, value: uint32_t(vocab.token_bos())); |
| 221 | add_kv(key: LLM_KV_TOKENIZER_EOS_ID, value: uint32_t(vocab.token_eos())); |
| 222 | add_kv(key: LLM_KV_TOKENIZER_EOT_ID, value: uint32_t(vocab.token_eot())); |
| 223 | add_kv(key: LLM_KV_TOKENIZER_EOM_ID, value: uint32_t(vocab.token_eom())); |
| 224 | add_kv(key: LLM_KV_TOKENIZER_UNK_ID, value: uint32_t(vocab.token_unk())); |
| 225 | add_kv(key: LLM_KV_TOKENIZER_SEP_ID, value: uint32_t(vocab.token_sep())); |
| 226 | add_kv(key: LLM_KV_TOKENIZER_PAD_ID, value: uint32_t(vocab.token_pad())); |
| 227 | // add_kv(LLM_KV_TOKENIZER_CLS_ID, uint32_t(vocab.token_bos())); // deprecated |
| 228 | // add_kv(LLM_KV_TOKENIZER_MASK_ID, ???); |
| 229 | add_kv(key: LLM_KV_TOKENIZER_ADD_BOS, value: vocab.get_add_bos()); |
| 230 | add_kv(key: LLM_KV_TOKENIZER_ADD_EOS, value: vocab.get_add_eos()); |
| 231 | add_kv(key: LLM_KV_TOKENIZER_ADD_SEP, value: vocab.get_add_sep()); |
| 232 | add_kv(key: LLM_KV_TOKENIZER_ADD_PREFIX, value: vocab.get_add_space_prefix()); |
| 233 | add_kv(key: LLM_KV_TOKENIZER_REMOVE_EXTRA_WS, value: vocab.get_remove_extra_whitespaces()); |
| 234 | add_kv(key: LLM_KV_TOKENIZER_PRECOMPILED_CHARSMAP, value: vocab.get_precompiled_charsmap()); |
| 235 | // add_kv(LLM_KV_TOKENIZER_HF_JSON, ???); |
| 236 | // add_kv(LLM_KV_TOKENIZER_RWKV, ???); |
| 237 | add_kv(key: LLM_KV_TOKENIZER_FIM_PRE_ID, value: uint32_t(vocab.token_fim_pre())); |
| 238 | add_kv(key: LLM_KV_TOKENIZER_FIM_SUF_ID, value: uint32_t(vocab.token_fim_suf())); |
| 239 | add_kv(key: LLM_KV_TOKENIZER_FIM_MID_ID, value: uint32_t(vocab.token_fim_mid())); |
| 240 | add_kv(key: LLM_KV_TOKENIZER_FIM_PAD_ID, value: uint32_t(vocab.token_fim_pad())); |
| 241 | add_kv(key: LLM_KV_TOKENIZER_FIM_REP_ID, value: uint32_t(vocab.token_fim_rep())); |
| 242 | add_kv(key: LLM_KV_TOKENIZER_FIM_SEP_ID, value: uint32_t(vocab.token_fim_sep())); |
| 243 | |
| 244 | // TODO: implement LoRA support |
| 245 | // add_kv(LLM_KV_ADAPTER_TYPE, ???); |
| 246 | // add_kv(LLM_KV_ADAPTER_LORA_ALPHA, ???); |
| 247 | |
| 248 | // deprecated |
| 249 | // add_kv(LLM_KV_TOKENIZER_PREFIX_ID, ???); |
| 250 | // add_kv(LLM_KV_TOKENIZER_SUFFIX_ID, ???); |
| 251 | // add_kv(LLM_KV_TOKENIZER_MIDDLE_ID, ???); |
| 252 | } |
| 253 | |
| 254 | void llama_model_saver::add_tensors_from_model() { |
| 255 | if (std::string(model.output->name) != std::string(model.tok_embd->name)) { |
| 256 | add_tensor(tensor: model.tok_embd); // some models use the same tensor for tok_embd and output |
| 257 | } |
| 258 | add_tensor(tensor: model.type_embd); |
| 259 | add_tensor(tensor: model.pos_embd); |
| 260 | add_tensor(tensor: model.tok_norm); |
| 261 | add_tensor(tensor: model.tok_norm_b); |
| 262 | add_tensor(tensor: model.output_norm); |
| 263 | add_tensor(tensor: model.output_norm_b); |
| 264 | add_tensor(tensor: model.output); |
| 265 | add_tensor(tensor: model.output_b); |
| 266 | add_tensor(tensor: model.output_norm_enc); |
| 267 | add_tensor(tensor: model.cls); |
| 268 | add_tensor(tensor: model.cls_b); |
| 269 | add_tensor(tensor: model.cls_out); |
| 270 | add_tensor(tensor: model.cls_out_b); |
| 271 | |
| 272 | for (const struct llama_layer & layer : model.layers) { |
| 273 | for (size_t i = 0; i < sizeof(layer)/sizeof(struct ggml_tensor *); ++i) { |
| 274 | add_tensor(tensor: reinterpret_cast<const struct ggml_tensor * const *>(&layer)[i]); |
| 275 | } |
| 276 | } |
| 277 | } |
| 278 | |
| 279 | void llama_model_saver::save(const std::string & path_model) { |
| 280 | gguf_write_to_file(ctx: gguf_ctx, fname: path_model.c_str(), only_meta: false); |
| 281 | } |
| 282 | |
| 283 | |