| 1 | // fix problem with std::min and std::max |
| 2 | #if defined(_WIN32) |
| 3 | #define WIN32_LEAN_AND_MEAN |
| 4 | #ifndef NOMINMAX |
| 5 | # define NOMINMAX |
| 6 | #endif |
| 7 | #include <windows.h> |
| 8 | #endif |
| 9 | |
| 10 | #include "mtmd.h" |
| 11 | #include "mtmd-helper.h" |
| 12 | #include "llama.h" |
| 13 | |
| 14 | #include <algorithm> |
| 15 | #include <cinttypes> |
| 16 | #include <vector> |
| 17 | |
| 18 | //#define MTMD_AUDIO_DEBUG |
| 19 | |
| 20 | #define MINIAUDIO_IMPLEMENTATION |
| 21 | #ifndef MTMD_AUDIO_DEBUG |
| 22 | # define MA_NO_ENCODING |
| 23 | #endif |
| 24 | #define MA_NO_DEVICE_IO |
| 25 | #define MA_NO_RESOURCE_MANAGER |
| 26 | #define MA_NO_NODE_GRAPH |
| 27 | #define MA_NO_ENGINE |
| 28 | #define MA_NO_GENERATION |
| 29 | #define MA_API static |
| 30 | #include "miniaudio/miniaudio.h" |
| 31 | |
| 32 | #define STB_IMAGE_IMPLEMENTATION |
| 33 | #include "stb/stb_image.h" |
| 34 | |
| 35 | #define LOG_INF(...) fprintf(stdout, __VA_ARGS__) |
| 36 | #define LOG_ERR(...) fprintf(stderr, __VA_ARGS__) |
| 37 | |
| 38 | size_t mtmd_helper_get_n_tokens(const mtmd_input_chunks * chunks) { |
| 39 | size_t n_tokens = 0; |
| 40 | for (size_t i = 0; i < mtmd_input_chunks_size(chunks); i++) { |
| 41 | auto chunk = mtmd_input_chunks_get(chunks, idx: i); |
| 42 | n_tokens += mtmd_input_chunk_get_n_tokens(chunk); |
| 43 | } |
| 44 | return n_tokens; |
| 45 | } |
| 46 | |
| 47 | llama_pos mtmd_helper_get_n_pos(const mtmd_input_chunks * chunks) { |
| 48 | llama_pos n_pos = 0; |
| 49 | for (size_t i = 0; i < mtmd_input_chunks_size(chunks); i++) { |
| 50 | auto chunk = mtmd_input_chunks_get(chunks, idx: i); |
| 51 | n_pos += mtmd_input_chunk_get_n_pos(chunk); |
| 52 | } |
| 53 | return n_pos; |
| 54 | } |
| 55 | |
| 56 | // helper struct to make working with embd batch easier |
| 57 | // note: this will be removed after llama_batch_ext refactoring |
| 58 | struct decode_embd_batch { |
| 59 | int n_pos_per_embd; |
| 60 | int n_mmproj_embd; |
| 61 | std::vector<llama_pos> pos; |
| 62 | std::vector<llama_pos> pos_view; // used by mrope |
| 63 | std::vector<int32_t> n_seq_id; |
| 64 | std::vector<llama_seq_id> seq_id_0; |
| 65 | std::vector<llama_seq_id *> seq_ids; |
| 66 | std::vector<int8_t> logits; |
| 67 | llama_batch batch; |
| 68 | decode_embd_batch(float * embd, int32_t n_tokens, int n_pos_per_embd, int n_mmproj_embd) : n_pos_per_embd(n_pos_per_embd), n_mmproj_embd(n_mmproj_embd) { |
| 69 | pos .resize(new_size: n_tokens * n_pos_per_embd); |
| 70 | n_seq_id.resize(new_size: n_tokens); |
| 71 | seq_ids .resize(new_size: n_tokens + 1); |
| 72 | logits .resize(new_size: n_tokens); |
| 73 | seq_id_0.resize(new_size: 1); |
| 74 | seq_ids [n_tokens] = nullptr; |
| 75 | batch = { |
| 76 | /*n_tokens =*/ n_tokens, |
| 77 | /*tokens =*/ .token: nullptr, |
| 78 | /*embd =*/ embd, |
| 79 | /*pos =*/ pos.data(), |
| 80 | /*n_seq_id =*/ n_seq_id.data(), |
| 81 | /*seq_id =*/ seq_ids.data(), |
| 82 | /*logits =*/ logits.data(), |
| 83 | }; |
| 84 | } |
| 85 | |
| 86 | void set_position_normal(llama_pos pos_0, llama_seq_id seq_id) { |
| 87 | seq_id_0[0] = seq_id; |
| 88 | for (int i = 0; i < batch.n_tokens; i++) { |
| 89 | batch.pos [i] = pos_0 + i; |
| 90 | batch.n_seq_id[i] = 1; |
| 91 | batch.seq_id [i] = seq_id_0.data(); |
| 92 | batch.logits [i] = false; |
| 93 | } |
| 94 | } |
| 95 | |
| 96 | // M-RoPE for image |
| 97 | void set_position_mrope_2d(llama_pos pos_0, int nx, int ny, llama_seq_id seq_id) { |
| 98 | GGML_ASSERT(n_pos_per_embd == 4); |
| 99 | seq_id_0[0] = seq_id; |
| 100 | for (int y = 0; y < ny; y++) { |
| 101 | for (int x = 0; x < nx; x++) { |
| 102 | int i = y * nx + x; |
| 103 | pos[i ] = pos_0; |
| 104 | pos[i + batch.n_tokens ] = pos_0 + y; |
| 105 | pos[i + batch.n_tokens * 2] = pos_0 + x; |
| 106 | pos[i + batch.n_tokens * 3] = 0; // last pos dim is unused |
| 107 | } |
| 108 | } |
| 109 | for (int i = 0; i < batch.n_tokens; i++) { |
| 110 | batch.n_seq_id[i] = 1; |
| 111 | batch.seq_id [i] = seq_id_0.data(); |
| 112 | batch.logits [i] = false; |
| 113 | } |
| 114 | } |
| 115 | |
| 116 | // M-RoPE for audio |
| 117 | void set_position_mrope_1d(llama_pos pos_0, llama_seq_id seq_id) { |
| 118 | GGML_ASSERT(n_pos_per_embd == 4); |
| 119 | seq_id_0[0] = seq_id; |
| 120 | for (int i = 0; i < batch.n_tokens; i++) { |
| 121 | pos[i ] = pos_0 + i; |
| 122 | pos[i + batch.n_tokens ] = pos_0 + i; |
| 123 | pos[i + batch.n_tokens * 2] = pos_0 + i; |
| 124 | pos[i + batch.n_tokens * 3] = 0; // last pos dim is unused |
| 125 | } |
| 126 | for (int i = 0; i < batch.n_tokens; i++) { |
| 127 | batch.n_seq_id[i] = 1; |
| 128 | batch.seq_id [i] = seq_id_0.data(); |
| 129 | batch.logits [i] = false; |
| 130 | } |
| 131 | } |
| 132 | |
| 133 | llama_batch get_view(int offset, int n_tokens) { |
| 134 | llama_pos * pos_ptr; |
| 135 | pos_view.clear(); |
| 136 | pos_view.reserve(n: n_tokens * n_pos_per_embd); |
| 137 | if (n_pos_per_embd > 1) { |
| 138 | // mrope |
| 139 | // for example, with layout of src: 1234...1234...1234...1234... |
| 140 | // offset 2 will give us dst: 34...34...34...34... |
| 141 | for (int i = 0; i < n_pos_per_embd; i++) { |
| 142 | // assume n_tokens is less than or equal to batch.n_tokens |
| 143 | // batch.n_tokens is number of **total** tokens |
| 144 | // n_tokens is number of viewed token |
| 145 | size_t src_idx = i * batch.n_tokens + offset; |
| 146 | pos_view.insert(position: pos_view.end(), |
| 147 | first: pos.data() + src_idx, |
| 148 | last: pos.data() + src_idx + n_tokens); |
| 149 | } |
| 150 | pos_ptr = pos_view.data(); |
| 151 | } else { |
| 152 | // normal |
| 153 | pos_ptr = pos.data() + offset; |
| 154 | } |
| 155 | return { |
| 156 | /*n_tokens =*/ n_tokens, |
| 157 | /*tokens =*/ .token: nullptr, |
| 158 | /*embd =*/ batch.embd + offset * n_mmproj_embd, |
| 159 | /*pos =*/ pos_ptr, |
| 160 | /*n_seq_id =*/ batch.n_seq_id + offset, |
| 161 | /*seq_id =*/ batch.seq_id + offset, |
| 162 | /*logits =*/ batch.logits + offset, |
| 163 | }; |
| 164 | } |
| 165 | }; |
| 166 | |
| 167 | // Helper function for decoding an image whose embeddings have already been calculated |
| 168 | int32_t mtmd_helper_decode_image_chunk( |
| 169 | mtmd_context * ctx, |
| 170 | struct llama_context * lctx, |
| 171 | const mtmd_input_chunk * chunk, |
| 172 | float * encoded_embd, |
| 173 | llama_pos n_past, |
| 174 | llama_seq_id seq_id, |
| 175 | int32_t n_batch, |
| 176 | llama_pos * new_n_past) { |
| 177 | auto chunk_type = mtmd_input_chunk_get_type(chunk); |
| 178 | const char * name = chunk_type == MTMD_INPUT_CHUNK_TYPE_IMAGE ? "image" : "audio" ; |
| 179 | if (chunk_type == MTMD_INPUT_CHUNK_TYPE_TEXT) { |
| 180 | LOG_ERR("failed to decode chunk: input chunk not of image/audio type\n" ); |
| 181 | return -1; |
| 182 | } |
| 183 | |
| 184 | const llama_model * model = llama_get_model(ctx: lctx); |
| 185 | int n_mmproj_embd = llama_model_n_embd(model); |
| 186 | int n_pos_per_embd = mtmd_decode_use_mrope(ctx) ? 4 : 1; |
| 187 | |
| 188 | int32_t n_tokens = mtmd_input_chunk_get_n_tokens(chunk); |
| 189 | int32_t i_batch = 0; |
| 190 | int32_t n_img_batches = GGML_PAD(n_tokens, n_batch) / n_batch; |
| 191 | decode_embd_batch batch_embd(encoded_embd, n_tokens, n_pos_per_embd, n_mmproj_embd); |
| 192 | |
| 193 | if (mtmd_decode_use_mrope(ctx)) { |
| 194 | if (chunk_type == MTMD_INPUT_CHUNK_TYPE_IMAGE) { |
| 195 | const auto image_tokens = mtmd_input_chunk_get_tokens_image(chunk); |
| 196 | if (!image_tokens) { |
| 197 | LOG_ERR("failed to decode chunk: image tokens are null\n" ); |
| 198 | return -1; |
| 199 | } |
| 200 | const int nx = mtmd_image_tokens_get_nx(image_tokens); |
| 201 | const int ny = mtmd_image_tokens_get_ny(image_tokens); |
| 202 | batch_embd.set_position_mrope_2d(pos_0: n_past, nx, ny, seq_id); |
| 203 | } else if (chunk_type == MTMD_INPUT_CHUNK_TYPE_AUDIO) { |
| 204 | batch_embd.set_position_mrope_1d(pos_0: n_past, seq_id); |
| 205 | } else { |
| 206 | GGML_ABORT("invalid chunk type for M-RoPE" ); |
| 207 | } |
| 208 | } else { |
| 209 | batch_embd.set_position_normal(pos_0: n_past, seq_id); |
| 210 | } |
| 211 | |
| 212 | if (mtmd_decode_use_non_causal(ctx)) { |
| 213 | llama_set_causal_attn(ctx: lctx, causal_attn: false); |
| 214 | // TODO @ngxson : need to make sure only one image is processed at a time, and n_ubatch must be enough to hold the image |
| 215 | } |
| 216 | |
| 217 | while (i_batch < n_img_batches) { // split into batches |
| 218 | int pos_offset = i_batch*n_batch; |
| 219 | int n_tokens_batch = std::min(a: n_batch, b: n_tokens - pos_offset); |
| 220 | llama_batch batch_embd_view = batch_embd.get_view(offset: pos_offset, n_tokens: n_tokens_batch); |
| 221 | |
| 222 | LOG_INF("decoding %s batch %d/%d, n_tokens_batch = %d\n" , name, i_batch+1, n_img_batches, n_tokens_batch); |
| 223 | |
| 224 | int64_t t1 = ggml_time_ms(); |
| 225 | int32_t ret = llama_decode(ctx: lctx, batch: batch_embd_view); |
| 226 | if (ret != 0) { |
| 227 | LOG_ERR("failed to decode %s\n" , name); |
| 228 | llama_set_causal_attn(ctx: lctx, causal_attn: true); // restore causal attn |
| 229 | return ret; |
| 230 | } |
| 231 | |
| 232 | LOG_INF("%s decoded (batch %d/%d) in %" PRId64 " ms\n" , name, i_batch+1, n_img_batches, ggml_time_ms() - t1); |
| 233 | |
| 234 | i_batch++; |
| 235 | } |
| 236 | |
| 237 | n_past += mtmd_input_chunk_get_n_pos(chunk); |
| 238 | *new_n_past = n_past; |
| 239 | |
| 240 | if (mtmd_decode_use_non_causal(ctx)) { |
| 241 | llama_set_causal_attn(ctx: lctx, causal_attn: true); |
| 242 | } |
| 243 | return 0; |
| 244 | } |
| 245 | |
| 246 | int32_t mtmd_helper_eval_chunk_single(mtmd_context * ctx, |
| 247 | struct llama_context * lctx, |
| 248 | const mtmd_input_chunk * chunk, |
| 249 | llama_pos n_past, |
| 250 | llama_seq_id seq_id, |
| 251 | int32_t n_batch, |
| 252 | bool logits_last, |
| 253 | llama_pos * new_n_past) { |
| 254 | int32_t ret; |
| 255 | llama_batch text_batch = llama_batch_init(n_tokens: n_batch, embd: 0, n_seq_max: 1); |
| 256 | auto chunk_type = mtmd_input_chunk_get_type(chunk); |
| 257 | |
| 258 | if (chunk_type == MTMD_INPUT_CHUNK_TYPE_TEXT) { |
| 259 | size_t n_tokens; |
| 260 | const auto tokens = mtmd_input_chunk_get_tokens_text(chunk, n_tokens_output: &n_tokens); |
| 261 | // LOG_INF("decoding text chunk, n_tokens = %zu\n", n_tokens); |
| 262 | size_t i = 0; |
| 263 | while (i < n_tokens) { // split into batches |
| 264 | text_batch.n_tokens = 0; // clear the batch |
| 265 | for (; i < n_tokens && text_batch.n_tokens < n_batch; i++) { |
| 266 | int32_t j = text_batch.n_tokens; |
| 267 | text_batch.token [j] = tokens[i]; |
| 268 | text_batch.pos [j] = n_past++; |
| 269 | text_batch.n_seq_id[j] = 1; |
| 270 | text_batch.seq_id [j][0] = seq_id; |
| 271 | text_batch.logits [j] = false; |
| 272 | |
| 273 | text_batch.n_tokens++; |
| 274 | } |
| 275 | bool is_last_token = (i == n_tokens); |
| 276 | if (logits_last && is_last_token) { |
| 277 | text_batch.logits[text_batch.n_tokens - 1] = true; |
| 278 | } |
| 279 | ret = llama_decode(ctx: lctx, batch: text_batch); |
| 280 | if (ret != 0) { |
| 281 | LOG_ERR("failed to decode text\n" ); |
| 282 | llama_batch_free(batch: text_batch); |
| 283 | return ret; |
| 284 | } |
| 285 | *new_n_past += text_batch.n_tokens; |
| 286 | } |
| 287 | |
| 288 | } else if (chunk_type == MTMD_INPUT_CHUNK_TYPE_IMAGE || chunk_type == MTMD_INPUT_CHUNK_TYPE_AUDIO) { |
| 289 | const char * name = chunk_type == MTMD_INPUT_CHUNK_TYPE_IMAGE ? "image" : "audio" ; |
| 290 | int64_t t0 = ggml_time_ms(); |
| 291 | |
| 292 | LOG_INF("encoding %s slice...\n" , name); |
| 293 | |
| 294 | ret = mtmd_encode_chunk(ctx, chunk); |
| 295 | if (ret != 0) { |
| 296 | LOG_ERR("failed to encode %s slice\n" , name); |
| 297 | llama_batch_free(batch: text_batch); |
| 298 | return ret; |
| 299 | } |
| 300 | |
| 301 | LOG_INF("%s slice encoded in %" PRId64 " ms\n" , name, ggml_time_ms() - t0); |
| 302 | |
| 303 | float * embd = mtmd_get_output_embd(ctx); |
| 304 | ret = mtmd_helper_decode_image_chunk(ctx, lctx, chunk, encoded_embd: embd, n_past, seq_id, n_batch, new_n_past); |
| 305 | if (ret != 0) { |
| 306 | LOG_ERR("failed to decode %s\n" , name); |
| 307 | llama_batch_free(batch: text_batch); |
| 308 | return ret; |
| 309 | } |
| 310 | } else { |
| 311 | GGML_ABORT("chunk type not supported" ); |
| 312 | } |
| 313 | |
| 314 | llama_batch_free(batch: text_batch); |
| 315 | return 0; |
| 316 | } |
| 317 | |
| 318 | int32_t mtmd_helper_eval_chunks(mtmd_context * ctx, |
| 319 | struct llama_context * lctx, |
| 320 | const mtmd_input_chunks * chunks, |
| 321 | llama_pos n_past, |
| 322 | llama_seq_id seq_id, |
| 323 | int32_t n_batch, |
| 324 | bool logits_last, |
| 325 | llama_pos * new_n_past) { |
| 326 | size_t n_chunks = mtmd_input_chunks_size(chunks); |
| 327 | if (n_chunks == 0) { |
| 328 | LOG_ERR("no chunks to eval\n" ); |
| 329 | return 0; |
| 330 | } |
| 331 | |
| 332 | for (size_t i = 0; i < n_chunks; i++) { |
| 333 | bool chunk_logits_last = (i == n_chunks - 1) && logits_last; |
| 334 | auto chunk = mtmd_input_chunks_get(chunks, idx: i); |
| 335 | |
| 336 | int32_t res = mtmd_helper_eval_chunk_single(ctx, lctx, chunk, n_past, seq_id, n_batch, logits_last: chunk_logits_last, new_n_past: &n_past); |
| 337 | if (res != 0) { |
| 338 | LOG_ERR("failed to eval chunk %zu\n" , i); |
| 339 | return res; |
| 340 | } |
| 341 | *new_n_past = n_past; |
| 342 | } |
| 343 | |
| 344 | return 0; |
| 345 | } |
| 346 | |
| 347 | namespace audio_helpers { |
| 348 | |
| 349 | static bool is_audio_file(const char * buf, size_t len) { |
| 350 | if (len < 12) { |
| 351 | return false; |
| 352 | } |
| 353 | |
| 354 | // RIFF ref: https://en.wikipedia.org/wiki/Resource_Interchange_File_Format |
| 355 | // WAV ref: https://www.mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/WAVE.html |
| 356 | bool is_wav = memcmp(s1: buf, s2: "RIFF" , n: 4) == 0 && memcmp(s1: buf + 8, s2: "WAVE" , n: 4) == 0; |
| 357 | bool is_mp3 = len >= 3 && ( |
| 358 | memcmp(s1: buf, s2: "ID3" , n: 3) == 0 || |
| 359 | // Check for MPEG sync word (simplified check) |
| 360 | ((unsigned char)buf[0] == 0xFF && ((unsigned char)buf[1] & 0xE0) == 0xE0) |
| 361 | ); |
| 362 | bool is_flac = memcmp(s1: buf, s2: "fLaC" , n: 4) == 0; |
| 363 | |
| 364 | return is_wav || is_mp3 || is_flac; |
| 365 | } |
| 366 | |
| 367 | // returns true if the buffer is a valid audio file |
| 368 | static bool decode_audio_from_buf(const unsigned char * buf_in, size_t len, int target_sampler_rate, std::vector<float> & pcmf32_mono) { |
| 369 | ma_result result; |
| 370 | const int channels = 1; |
| 371 | ma_decoder_config decoder_config = ma_decoder_config_init(outputFormat: ma_format_f32, outputChannels: channels, outputSampleRate: target_sampler_rate); |
| 372 | ma_decoder decoder; |
| 373 | |
| 374 | result = ma_decoder_init_memory(pData: buf_in, dataSize: len, pConfig: &decoder_config, pDecoder: &decoder); |
| 375 | if (result != MA_SUCCESS) { |
| 376 | return false; |
| 377 | } |
| 378 | |
| 379 | ma_uint64 frame_count; |
| 380 | ma_uint64 frames_read; |
| 381 | result = ma_decoder_get_length_in_pcm_frames(pDecoder: &decoder, pLength: &frame_count); |
| 382 | if (result != MA_SUCCESS) { |
| 383 | ma_decoder_uninit(pDecoder: &decoder); |
| 384 | return false; |
| 385 | } |
| 386 | |
| 387 | pcmf32_mono.resize(new_size: frame_count); |
| 388 | result = ma_decoder_read_pcm_frames(pDecoder: &decoder, pFramesOut: pcmf32_mono.data(), frameCount: frame_count, pFramesRead: &frames_read); |
| 389 | if (result != MA_SUCCESS) { |
| 390 | ma_decoder_uninit(pDecoder: &decoder); |
| 391 | return false; |
| 392 | } |
| 393 | |
| 394 | #ifdef MTMD_AUDIO_DEBUG |
| 395 | // save audio to wav file |
| 396 | ma_encoder_config config = ma_encoder_config_init(ma_encoding_format_wav, ma_format_f32, 1, target_sampler_rate); |
| 397 | ma_encoder encoder; |
| 398 | ma_encoder_init_file("output.wav" , &config, &encoder); |
| 399 | ma_encoder_write_pcm_frames(&encoder, pcmf32_mono.data(), pcmf32_mono.size(), &frames_read); |
| 400 | ma_encoder_uninit(&encoder); |
| 401 | #endif |
| 402 | |
| 403 | ma_decoder_uninit(pDecoder: &decoder); |
| 404 | return true; |
| 405 | } |
| 406 | |
| 407 | } // namespace audio_helpers |
| 408 | |
| 409 | mtmd_bitmap * mtmd_helper_bitmap_init_from_buf(mtmd_context * ctx, const unsigned char * buf, size_t len) { |
| 410 | if (audio_helpers::is_audio_file(buf: (const char *)buf, len)) { |
| 411 | std::vector<float> pcmf32; |
| 412 | int bitrate = mtmd_get_audio_bitrate(ctx); |
| 413 | if (bitrate < 0) { |
| 414 | LOG_ERR("This model does not support audio input\n" ); |
| 415 | return nullptr; |
| 416 | } |
| 417 | if (!audio_helpers::decode_audio_from_buf(buf_in: buf, len, target_sampler_rate: bitrate, pcmf32_mono&: pcmf32)) { |
| 418 | LOG_ERR("Unable to read WAV audio file from buffer\n" ); |
| 419 | return nullptr; |
| 420 | } |
| 421 | return mtmd_bitmap_init_from_audio(n_samples: pcmf32.size(), data: pcmf32.data()); |
| 422 | } |
| 423 | |
| 424 | // otherwise, we assume it's an image |
| 425 | mtmd_bitmap * result = nullptr; |
| 426 | { |
| 427 | int nx, ny, nc; |
| 428 | auto * data = stbi_load_from_memory(buffer: buf, len, x: &nx, y: &ny, comp: &nc, req_comp: 3); |
| 429 | if (!data) { |
| 430 | LOG_ERR("%s: failed to decode image bytes\n" , __func__); |
| 431 | return nullptr; |
| 432 | } |
| 433 | result = mtmd_bitmap_init(nx, ny, data); |
| 434 | stbi_image_free(retval_from_stbi_load: data); |
| 435 | } |
| 436 | return result; |
| 437 | } |
| 438 | |
| 439 | mtmd_bitmap * mtmd_helper_bitmap_init_from_file(mtmd_context * ctx, const char * fname) { |
| 440 | std::vector<unsigned char> buf; |
| 441 | FILE * f = fopen(filename: fname, modes: "rb" ); |
| 442 | if (!f) { |
| 443 | LOG_ERR("Unable to open file %s: %s\n" , fname, strerror(errno)); |
| 444 | return nullptr; |
| 445 | } |
| 446 | |
| 447 | fseek(stream: f, off: 0, SEEK_END); |
| 448 | long file_size = ftell(stream: f); |
| 449 | fseek(stream: f, off: 0, SEEK_SET); |
| 450 | buf.resize(new_size: file_size); |
| 451 | |
| 452 | size_t n_read = fread(ptr: buf.data(), size: 1, n: file_size, stream: f); |
| 453 | fclose(stream: f); |
| 454 | if (n_read != (size_t)file_size) { |
| 455 | LOG_ERR("Failed to read entire file %s" , fname); |
| 456 | return nullptr; |
| 457 | } |
| 458 | |
| 459 | return mtmd_helper_bitmap_init_from_buf(ctx, buf: buf.data(), len: buf.size()); |
| 460 | } |
| 461 | |