| 1 | // Copyright 2014 Google Inc. All Rights Reserved. |
| 2 | // |
| 3 | // Use of this source code is governed by a BSD-style license |
| 4 | // that can be found in the COPYING file in the root of the source |
| 5 | // tree. An additional intellectual property rights grant can be found |
| 6 | // in the file PATENTS. All contributing project authors may |
| 7 | // be found in the AUTHORS file in the root of the source tree. |
| 8 | // ----------------------------------------------------------------------------- |
| 9 | // |
| 10 | // AnimEncoder implementation. |
| 11 | // |
| 12 | |
| 13 | #include <assert.h> |
| 14 | #include <limits.h> |
| 15 | #include <math.h> // for pow() |
| 16 | #include <stdio.h> |
| 17 | #include <stdlib.h> // for abs() |
| 18 | |
| 19 | #include "src/mux/animi.h" |
| 20 | #include "src/utils/utils.h" |
| 21 | #include "src/webp/decode.h" |
| 22 | #include "src/webp/encode.h" |
| 23 | #include "src/webp/format_constants.h" |
| 24 | #include "src/webp/mux.h" |
| 25 | |
| 26 | #if defined(_MSC_VER) && _MSC_VER < 1900 |
| 27 | #define snprintf _snprintf |
| 28 | #endif |
| 29 | |
| 30 | #define ERROR_STR_MAX_LENGTH 100 |
| 31 | |
| 32 | //------------------------------------------------------------------------------ |
| 33 | // Internal structs. |
| 34 | |
| 35 | // Stores frame rectangle dimensions. |
| 36 | typedef struct { |
| 37 | int x_offset_, y_offset_, width_, height_; |
| 38 | } FrameRectangle; |
| 39 | |
| 40 | // Used to store two candidates of encoded data for an animation frame. One of |
| 41 | // the two will be chosen later. |
| 42 | typedef struct { |
| 43 | WebPMuxFrameInfo sub_frame_; // Encoded frame rectangle. |
| 44 | WebPMuxFrameInfo key_frame_; // Encoded frame if it is a key-frame. |
| 45 | int is_key_frame_; // True if 'key_frame' has been chosen. |
| 46 | } EncodedFrame; |
| 47 | |
| 48 | struct WebPAnimEncoder { |
| 49 | const int canvas_width_; // Canvas width. |
| 50 | const int canvas_height_; // Canvas height. |
| 51 | const WebPAnimEncoderOptions options_; // Global encoding options. |
| 52 | |
| 53 | FrameRectangle prev_rect_; // Previous WebP frame rectangle. |
| 54 | WebPConfig last_config_; // Cached in case a re-encode is needed. |
| 55 | WebPConfig last_config_reversed_; // If 'last_config_' uses lossless, then |
| 56 | // this config uses lossy and vice versa; |
| 57 | // only valid if 'options_.allow_mixed' |
| 58 | // is true. |
| 59 | |
| 60 | WebPPicture* curr_canvas_; // Only pointer; we don't own memory. |
| 61 | |
| 62 | // Canvas buffers. |
| 63 | WebPPicture curr_canvas_copy_; // Possibly modified current canvas. |
| 64 | int curr_canvas_copy_modified_; // True if pixels in 'curr_canvas_copy_' |
| 65 | // differ from those in 'curr_canvas_'. |
| 66 | |
| 67 | WebPPicture prev_canvas_; // Previous canvas. |
| 68 | WebPPicture prev_canvas_disposed_; // Previous canvas disposed to background. |
| 69 | |
| 70 | // Encoded data. |
| 71 | EncodedFrame* encoded_frames_; // Array of encoded frames. |
| 72 | size_t size_; // Number of allocated frames. |
| 73 | size_t start_; // Frame start index. |
| 74 | size_t count_; // Number of valid frames. |
| 75 | size_t flush_count_; // If >0, 'flush_count' frames starting from |
| 76 | // 'start' are ready to be added to mux. |
| 77 | |
| 78 | // key-frame related. |
| 79 | int64_t best_delta_; // min(canvas size - frame size) over the frames. |
| 80 | // Can be negative in certain cases due to |
| 81 | // transparent pixels in a frame. |
| 82 | int keyframe_; // Index of selected key-frame relative to 'start_'. |
| 83 | int count_since_key_frame_; // Frames seen since the last key-frame. |
| 84 | |
| 85 | int first_timestamp_; // Timestamp of the first frame. |
| 86 | int prev_timestamp_; // Timestamp of the last added frame. |
| 87 | int prev_candidate_undecided_; // True if it's not yet decided if previous |
| 88 | // frame would be a sub-frame or a key-frame. |
| 89 | |
| 90 | // Misc. |
| 91 | int is_first_frame_; // True if first frame is yet to be added/being added. |
| 92 | int got_null_frame_; // True if WebPAnimEncoderAdd() has already been called |
| 93 | // with a NULL frame. |
| 94 | |
| 95 | size_t in_frame_count_; // Number of input frames processed so far. |
| 96 | size_t out_frame_count_; // Number of frames added to mux so far. This may be |
| 97 | // different from 'in_frame_count_' due to merging. |
| 98 | |
| 99 | WebPMux* mux_; // Muxer to assemble the WebP bitstream. |
| 100 | char error_str_[ERROR_STR_MAX_LENGTH]; // Error string. Empty if no error. |
| 101 | }; |
| 102 | |
| 103 | // ----------------------------------------------------------------------------- |
| 104 | // Life of WebPAnimEncoder object. |
| 105 | |
| 106 | #define DELTA_INFINITY (1ULL << 32) |
| 107 | #define KEYFRAME_NONE (-1) |
| 108 | |
| 109 | // Reset the counters in the WebPAnimEncoder. |
| 110 | static void ResetCounters(WebPAnimEncoder* const enc) { |
| 111 | enc->start_ = 0; |
| 112 | enc->count_ = 0; |
| 113 | enc->flush_count_ = 0; |
| 114 | enc->best_delta_ = DELTA_INFINITY; |
| 115 | enc->keyframe_ = KEYFRAME_NONE; |
| 116 | } |
| 117 | |
| 118 | static void DisableKeyframes(WebPAnimEncoderOptions* const enc_options) { |
| 119 | enc_options->kmax = INT_MAX; |
| 120 | enc_options->kmin = enc_options->kmax - 1; |
| 121 | } |
| 122 | |
| 123 | #define MAX_CACHED_FRAMES 30 |
| 124 | |
| 125 | static void SanitizeEncoderOptions(WebPAnimEncoderOptions* const enc_options) { |
| 126 | int print_warning = enc_options->verbose; |
| 127 | |
| 128 | if (enc_options->minimize_size) { |
| 129 | DisableKeyframes(enc_options); |
| 130 | } |
| 131 | |
| 132 | if (enc_options->kmax == 1) { // All frames will be key-frames. |
| 133 | enc_options->kmin = 0; |
| 134 | enc_options->kmax = 0; |
| 135 | return; |
| 136 | } else if (enc_options->kmax <= 0) { |
| 137 | DisableKeyframes(enc_options); |
| 138 | print_warning = 0; |
| 139 | } |
| 140 | |
| 141 | if (enc_options->kmin >= enc_options->kmax) { |
| 142 | enc_options->kmin = enc_options->kmax - 1; |
| 143 | if (print_warning) { |
| 144 | fprintf(stderr, "WARNING: Setting kmin = %d, so that kmin < kmax.\n" , |
| 145 | enc_options->kmin); |
| 146 | } |
| 147 | } else { |
| 148 | const int kmin_limit = enc_options->kmax / 2 + 1; |
| 149 | if (enc_options->kmin < kmin_limit && kmin_limit < enc_options->kmax) { |
| 150 | // This ensures that enc.keyframe + kmin >= kmax is always true. So, we |
| 151 | // can flush all the frames in the 'count_since_key_frame == kmax' case. |
| 152 | enc_options->kmin = kmin_limit; |
| 153 | if (print_warning) { |
| 154 | fprintf(stderr, |
| 155 | "WARNING: Setting kmin = %d, so that kmin >= kmax / 2 + 1.\n" , |
| 156 | enc_options->kmin); |
| 157 | } |
| 158 | } |
| 159 | } |
| 160 | // Limit the max number of frames that are allocated. |
| 161 | if (enc_options->kmax - enc_options->kmin > MAX_CACHED_FRAMES) { |
| 162 | enc_options->kmin = enc_options->kmax - MAX_CACHED_FRAMES; |
| 163 | if (print_warning) { |
| 164 | fprintf(stderr, |
| 165 | "WARNING: Setting kmin = %d, so that kmax - kmin <= %d.\n" , |
| 166 | enc_options->kmin, MAX_CACHED_FRAMES); |
| 167 | } |
| 168 | } |
| 169 | assert(enc_options->kmin < enc_options->kmax); |
| 170 | } |
| 171 | |
| 172 | #undef MAX_CACHED_FRAMES |
| 173 | |
| 174 | static void DefaultEncoderOptions(WebPAnimEncoderOptions* const enc_options) { |
| 175 | enc_options->anim_params.loop_count = 0; |
| 176 | enc_options->anim_params.bgcolor = 0xffffffff; // White. |
| 177 | enc_options->minimize_size = 0; |
| 178 | DisableKeyframes(enc_options); |
| 179 | enc_options->allow_mixed = 0; |
| 180 | enc_options->verbose = 0; |
| 181 | } |
| 182 | |
| 183 | int WebPAnimEncoderOptionsInitInternal(WebPAnimEncoderOptions* enc_options, |
| 184 | int abi_version) { |
| 185 | if (enc_options == NULL || |
| 186 | WEBP_ABI_IS_INCOMPATIBLE(abi_version, WEBP_MUX_ABI_VERSION)) { |
| 187 | return 0; |
| 188 | } |
| 189 | DefaultEncoderOptions(enc_options); |
| 190 | return 1; |
| 191 | } |
| 192 | |
| 193 | // This starting value is more fit to WebPCleanupTransparentAreaLossless(). |
| 194 | #define TRANSPARENT_COLOR 0x00000000 |
| 195 | |
| 196 | static void ClearRectangle(WebPPicture* const picture, |
| 197 | int left, int top, int width, int height) { |
| 198 | int j; |
| 199 | for (j = top; j < top + height; ++j) { |
| 200 | uint32_t* const dst = picture->argb + j * picture->argb_stride; |
| 201 | int i; |
| 202 | for (i = left; i < left + width; ++i) { |
| 203 | dst[i] = TRANSPARENT_COLOR; |
| 204 | } |
| 205 | } |
| 206 | } |
| 207 | |
| 208 | static void WebPUtilClearPic(WebPPicture* const picture, |
| 209 | const FrameRectangle* const rect) { |
| 210 | if (rect != NULL) { |
| 211 | ClearRectangle(picture, rect->x_offset_, rect->y_offset_, |
| 212 | rect->width_, rect->height_); |
| 213 | } else { |
| 214 | ClearRectangle(picture, 0, 0, picture->width, picture->height); |
| 215 | } |
| 216 | } |
| 217 | |
| 218 | static void MarkNoError(WebPAnimEncoder* const enc) { |
| 219 | enc->error_str_[0] = '\0'; // Empty string. |
| 220 | } |
| 221 | |
| 222 | static void MarkError(WebPAnimEncoder* const enc, const char* str) { |
| 223 | if (snprintf(enc->error_str_, ERROR_STR_MAX_LENGTH, "%s." , str) < 0) { |
| 224 | assert(0); // FIX ME! |
| 225 | } |
| 226 | } |
| 227 | |
| 228 | static void MarkError2(WebPAnimEncoder* const enc, |
| 229 | const char* str, int error_code) { |
| 230 | if (snprintf(enc->error_str_, ERROR_STR_MAX_LENGTH, "%s: %d." , str, |
| 231 | error_code) < 0) { |
| 232 | assert(0); // FIX ME! |
| 233 | } |
| 234 | } |
| 235 | |
| 236 | WebPAnimEncoder* WebPAnimEncoderNewInternal( |
| 237 | int width, int height, const WebPAnimEncoderOptions* enc_options, |
| 238 | int abi_version) { |
| 239 | WebPAnimEncoder* enc; |
| 240 | |
| 241 | if (WEBP_ABI_IS_INCOMPATIBLE(abi_version, WEBP_MUX_ABI_VERSION)) { |
| 242 | return NULL; |
| 243 | } |
| 244 | if (width <= 0 || height <= 0 || |
| 245 | (width * (uint64_t)height) >= MAX_IMAGE_AREA) { |
| 246 | return NULL; |
| 247 | } |
| 248 | |
| 249 | enc = (WebPAnimEncoder*)WebPSafeCalloc(1, sizeof(*enc)); |
| 250 | if (enc == NULL) return NULL; |
| 251 | // sanity inits, so we can call WebPAnimEncoderDelete(): |
| 252 | enc->encoded_frames_ = NULL; |
| 253 | enc->mux_ = NULL; |
| 254 | MarkNoError(enc); |
| 255 | |
| 256 | // Dimensions and options. |
| 257 | *(int*)&enc->canvas_width_ = width; |
| 258 | *(int*)&enc->canvas_height_ = height; |
| 259 | if (enc_options != NULL) { |
| 260 | *(WebPAnimEncoderOptions*)&enc->options_ = *enc_options; |
| 261 | SanitizeEncoderOptions((WebPAnimEncoderOptions*)&enc->options_); |
| 262 | } else { |
| 263 | DefaultEncoderOptions((WebPAnimEncoderOptions*)&enc->options_); |
| 264 | } |
| 265 | |
| 266 | // Canvas buffers. |
| 267 | if (!WebPPictureInit(&enc->curr_canvas_copy_) || |
| 268 | !WebPPictureInit(&enc->prev_canvas_) || |
| 269 | !WebPPictureInit(&enc->prev_canvas_disposed_)) { |
| 270 | goto Err; |
| 271 | } |
| 272 | enc->curr_canvas_copy_.width = width; |
| 273 | enc->curr_canvas_copy_.height = height; |
| 274 | enc->curr_canvas_copy_.use_argb = 1; |
| 275 | if (!WebPPictureAlloc(&enc->curr_canvas_copy_) || |
| 276 | !WebPPictureCopy(&enc->curr_canvas_copy_, &enc->prev_canvas_) || |
| 277 | !WebPPictureCopy(&enc->curr_canvas_copy_, &enc->prev_canvas_disposed_)) { |
| 278 | goto Err; |
| 279 | } |
| 280 | WebPUtilClearPic(&enc->prev_canvas_, NULL); |
| 281 | enc->curr_canvas_copy_modified_ = 1; |
| 282 | |
| 283 | // Encoded frames. |
| 284 | ResetCounters(enc); |
| 285 | // Note: one extra storage is for the previous frame. |
| 286 | enc->size_ = enc->options_.kmax - enc->options_.kmin + 1; |
| 287 | // We need space for at least 2 frames. But when kmin, kmax are both zero, |
| 288 | // enc->size_ will be 1. So we handle that special case below. |
| 289 | if (enc->size_ < 2) enc->size_ = 2; |
| 290 | enc->encoded_frames_ = |
| 291 | (EncodedFrame*)WebPSafeCalloc(enc->size_, sizeof(*enc->encoded_frames_)); |
| 292 | if (enc->encoded_frames_ == NULL) goto Err; |
| 293 | |
| 294 | enc->mux_ = WebPMuxNew(); |
| 295 | if (enc->mux_ == NULL) goto Err; |
| 296 | |
| 297 | enc->count_since_key_frame_ = 0; |
| 298 | enc->first_timestamp_ = 0; |
| 299 | enc->prev_timestamp_ = 0; |
| 300 | enc->prev_candidate_undecided_ = 0; |
| 301 | enc->is_first_frame_ = 1; |
| 302 | enc->got_null_frame_ = 0; |
| 303 | |
| 304 | return enc; // All OK. |
| 305 | |
| 306 | Err: |
| 307 | WebPAnimEncoderDelete(enc); |
| 308 | return NULL; |
| 309 | } |
| 310 | |
| 311 | // Release the data contained by 'encoded_frame'. |
| 312 | static void FrameRelease(EncodedFrame* const encoded_frame) { |
| 313 | if (encoded_frame != NULL) { |
| 314 | WebPDataClear(&encoded_frame->sub_frame_.bitstream); |
| 315 | WebPDataClear(&encoded_frame->key_frame_.bitstream); |
| 316 | memset(encoded_frame, 0, sizeof(*encoded_frame)); |
| 317 | } |
| 318 | } |
| 319 | |
| 320 | void WebPAnimEncoderDelete(WebPAnimEncoder* enc) { |
| 321 | if (enc != NULL) { |
| 322 | WebPPictureFree(&enc->curr_canvas_copy_); |
| 323 | WebPPictureFree(&enc->prev_canvas_); |
| 324 | WebPPictureFree(&enc->prev_canvas_disposed_); |
| 325 | if (enc->encoded_frames_ != NULL) { |
| 326 | size_t i; |
| 327 | for (i = 0; i < enc->size_; ++i) { |
| 328 | FrameRelease(&enc->encoded_frames_[i]); |
| 329 | } |
| 330 | WebPSafeFree(enc->encoded_frames_); |
| 331 | } |
| 332 | WebPMuxDelete(enc->mux_); |
| 333 | WebPSafeFree(enc); |
| 334 | } |
| 335 | } |
| 336 | |
| 337 | // ----------------------------------------------------------------------------- |
| 338 | // Frame addition. |
| 339 | |
| 340 | // Returns cached frame at the given 'position'. |
| 341 | static EncodedFrame* GetFrame(const WebPAnimEncoder* const enc, |
| 342 | size_t position) { |
| 343 | assert(enc->start_ + position < enc->size_); |
| 344 | return &enc->encoded_frames_[enc->start_ + position]; |
| 345 | } |
| 346 | |
| 347 | typedef int (*ComparePixelsFunc)(const uint32_t*, int, const uint32_t*, int, |
| 348 | int, int); |
| 349 | |
| 350 | // Returns true if 'length' number of pixels in 'src' and 'dst' are equal, |
| 351 | // assuming the given step sizes between pixels. |
| 352 | // 'max_allowed_diff' is unused and only there to allow function pointer use. |
| 353 | static WEBP_INLINE int ComparePixelsLossless(const uint32_t* src, int src_step, |
| 354 | const uint32_t* dst, int dst_step, |
| 355 | int length, int max_allowed_diff) { |
| 356 | (void)max_allowed_diff; |
| 357 | assert(length > 0); |
| 358 | while (length-- > 0) { |
| 359 | if (*src != *dst) { |
| 360 | return 0; |
| 361 | } |
| 362 | src += src_step; |
| 363 | dst += dst_step; |
| 364 | } |
| 365 | return 1; |
| 366 | } |
| 367 | |
| 368 | // Helper to check if each channel in 'src' and 'dst' is at most off by |
| 369 | // 'max_allowed_diff'. |
| 370 | static WEBP_INLINE int PixelsAreSimilar(uint32_t src, uint32_t dst, |
| 371 | int max_allowed_diff) { |
| 372 | const int src_a = (src >> 24) & 0xff; |
| 373 | const int src_r = (src >> 16) & 0xff; |
| 374 | const int src_g = (src >> 8) & 0xff; |
| 375 | const int src_b = (src >> 0) & 0xff; |
| 376 | const int dst_a = (dst >> 24) & 0xff; |
| 377 | const int dst_r = (dst >> 16) & 0xff; |
| 378 | const int dst_g = (dst >> 8) & 0xff; |
| 379 | const int dst_b = (dst >> 0) & 0xff; |
| 380 | |
| 381 | return (src_a == dst_a) && |
| 382 | (abs(src_r - dst_r) * dst_a <= (max_allowed_diff * 255)) && |
| 383 | (abs(src_g - dst_g) * dst_a <= (max_allowed_diff * 255)) && |
| 384 | (abs(src_b - dst_b) * dst_a <= (max_allowed_diff * 255)); |
| 385 | } |
| 386 | |
| 387 | // Returns true if 'length' number of pixels in 'src' and 'dst' are within an |
| 388 | // error bound, assuming the given step sizes between pixels. |
| 389 | static WEBP_INLINE int ComparePixelsLossy(const uint32_t* src, int src_step, |
| 390 | const uint32_t* dst, int dst_step, |
| 391 | int length, int max_allowed_diff) { |
| 392 | assert(length > 0); |
| 393 | while (length-- > 0) { |
| 394 | if (!PixelsAreSimilar(*src, *dst, max_allowed_diff)) { |
| 395 | return 0; |
| 396 | } |
| 397 | src += src_step; |
| 398 | dst += dst_step; |
| 399 | } |
| 400 | return 1; |
| 401 | } |
| 402 | |
| 403 | static int IsEmptyRect(const FrameRectangle* const rect) { |
| 404 | return (rect->width_ == 0) || (rect->height_ == 0); |
| 405 | } |
| 406 | |
| 407 | static int QualityToMaxDiff(float quality) { |
| 408 | const double val = pow(quality / 100., 0.5); |
| 409 | const double max_diff = 31 * (1 - val) + 1 * val; |
| 410 | return (int)(max_diff + 0.5); |
| 411 | } |
| 412 | |
| 413 | // Assumes that an initial valid guess of change rectangle 'rect' is passed. |
| 414 | static void MinimizeChangeRectangle(const WebPPicture* const src, |
| 415 | const WebPPicture* const dst, |
| 416 | FrameRectangle* const rect, |
| 417 | int is_lossless, float quality) { |
| 418 | int i, j; |
| 419 | const ComparePixelsFunc compare_pixels = |
| 420 | is_lossless ? ComparePixelsLossless : ComparePixelsLossy; |
| 421 | const int max_allowed_diff_lossy = QualityToMaxDiff(quality); |
| 422 | const int max_allowed_diff = is_lossless ? 0 : max_allowed_diff_lossy; |
| 423 | |
| 424 | // Sanity checks. |
| 425 | assert(src->width == dst->width && src->height == dst->height); |
| 426 | assert(rect->x_offset_ + rect->width_ <= dst->width); |
| 427 | assert(rect->y_offset_ + rect->height_ <= dst->height); |
| 428 | |
| 429 | // Left boundary. |
| 430 | for (i = rect->x_offset_; i < rect->x_offset_ + rect->width_; ++i) { |
| 431 | const uint32_t* const src_argb = |
| 432 | &src->argb[rect->y_offset_ * src->argb_stride + i]; |
| 433 | const uint32_t* const dst_argb = |
| 434 | &dst->argb[rect->y_offset_ * dst->argb_stride + i]; |
| 435 | if (compare_pixels(src_argb, src->argb_stride, dst_argb, dst->argb_stride, |
| 436 | rect->height_, max_allowed_diff)) { |
| 437 | --rect->width_; // Redundant column. |
| 438 | ++rect->x_offset_; |
| 439 | } else { |
| 440 | break; |
| 441 | } |
| 442 | } |
| 443 | if (rect->width_ == 0) goto NoChange; |
| 444 | |
| 445 | // Right boundary. |
| 446 | for (i = rect->x_offset_ + rect->width_ - 1; i >= rect->x_offset_; --i) { |
| 447 | const uint32_t* const src_argb = |
| 448 | &src->argb[rect->y_offset_ * src->argb_stride + i]; |
| 449 | const uint32_t* const dst_argb = |
| 450 | &dst->argb[rect->y_offset_ * dst->argb_stride + i]; |
| 451 | if (compare_pixels(src_argb, src->argb_stride, dst_argb, dst->argb_stride, |
| 452 | rect->height_, max_allowed_diff)) { |
| 453 | --rect->width_; // Redundant column. |
| 454 | } else { |
| 455 | break; |
| 456 | } |
| 457 | } |
| 458 | if (rect->width_ == 0) goto NoChange; |
| 459 | |
| 460 | // Top boundary. |
| 461 | for (j = rect->y_offset_; j < rect->y_offset_ + rect->height_; ++j) { |
| 462 | const uint32_t* const src_argb = |
| 463 | &src->argb[j * src->argb_stride + rect->x_offset_]; |
| 464 | const uint32_t* const dst_argb = |
| 465 | &dst->argb[j * dst->argb_stride + rect->x_offset_]; |
| 466 | if (compare_pixels(src_argb, 1, dst_argb, 1, rect->width_, |
| 467 | max_allowed_diff)) { |
| 468 | --rect->height_; // Redundant row. |
| 469 | ++rect->y_offset_; |
| 470 | } else { |
| 471 | break; |
| 472 | } |
| 473 | } |
| 474 | if (rect->height_ == 0) goto NoChange; |
| 475 | |
| 476 | // Bottom boundary. |
| 477 | for (j = rect->y_offset_ + rect->height_ - 1; j >= rect->y_offset_; --j) { |
| 478 | const uint32_t* const src_argb = |
| 479 | &src->argb[j * src->argb_stride + rect->x_offset_]; |
| 480 | const uint32_t* const dst_argb = |
| 481 | &dst->argb[j * dst->argb_stride + rect->x_offset_]; |
| 482 | if (compare_pixels(src_argb, 1, dst_argb, 1, rect->width_, |
| 483 | max_allowed_diff)) { |
| 484 | --rect->height_; // Redundant row. |
| 485 | } else { |
| 486 | break; |
| 487 | } |
| 488 | } |
| 489 | if (rect->height_ == 0) goto NoChange; |
| 490 | |
| 491 | if (IsEmptyRect(rect)) { |
| 492 | NoChange: |
| 493 | rect->x_offset_ = 0; |
| 494 | rect->y_offset_ = 0; |
| 495 | rect->width_ = 0; |
| 496 | rect->height_ = 0; |
| 497 | } |
| 498 | } |
| 499 | |
| 500 | // Snap rectangle to even offsets (and adjust dimensions if needed). |
| 501 | static WEBP_INLINE void SnapToEvenOffsets(FrameRectangle* const rect) { |
| 502 | rect->width_ += (rect->x_offset_ & 1); |
| 503 | rect->height_ += (rect->y_offset_ & 1); |
| 504 | rect->x_offset_ &= ~1; |
| 505 | rect->y_offset_ &= ~1; |
| 506 | } |
| 507 | |
| 508 | typedef struct { |
| 509 | int should_try_; // Should try this set of parameters. |
| 510 | int empty_rect_allowed_; // Frame with empty rectangle can be skipped. |
| 511 | FrameRectangle rect_ll_; // Frame rectangle for lossless compression. |
| 512 | WebPPicture sub_frame_ll_; // Sub-frame pic for lossless compression. |
| 513 | FrameRectangle rect_lossy_; // Frame rectangle for lossy compression. |
| 514 | // Could be smaller than rect_ll_ as pixels |
| 515 | // with small diffs can be ignored. |
| 516 | WebPPicture sub_frame_lossy_; // Sub-frame pic for lossless compression. |
| 517 | } SubFrameParams; |
| 518 | |
| 519 | static int SubFrameParamsInit(SubFrameParams* const params, |
| 520 | int should_try, int empty_rect_allowed) { |
| 521 | params->should_try_ = should_try; |
| 522 | params->empty_rect_allowed_ = empty_rect_allowed; |
| 523 | if (!WebPPictureInit(¶ms->sub_frame_ll_) || |
| 524 | !WebPPictureInit(¶ms->sub_frame_lossy_)) { |
| 525 | return 0; |
| 526 | } |
| 527 | return 1; |
| 528 | } |
| 529 | |
| 530 | static void SubFrameParamsFree(SubFrameParams* const params) { |
| 531 | WebPPictureFree(¶ms->sub_frame_ll_); |
| 532 | WebPPictureFree(¶ms->sub_frame_lossy_); |
| 533 | } |
| 534 | |
| 535 | // Given previous and current canvas, picks the optimal rectangle for the |
| 536 | // current frame based on 'is_lossless' and other parameters. Assumes that the |
| 537 | // initial guess 'rect' is valid. |
| 538 | static int GetSubRect(const WebPPicture* const prev_canvas, |
| 539 | const WebPPicture* const curr_canvas, int is_key_frame, |
| 540 | int is_first_frame, int empty_rect_allowed, |
| 541 | int is_lossless, float quality, |
| 542 | FrameRectangle* const rect, |
| 543 | WebPPicture* const sub_frame) { |
| 544 | if (!is_key_frame || is_first_frame) { // Optimize frame rectangle. |
| 545 | // Note: This behaves as expected for first frame, as 'prev_canvas' is |
| 546 | // initialized to a fully transparent canvas in the beginning. |
| 547 | MinimizeChangeRectangle(prev_canvas, curr_canvas, rect, |
| 548 | is_lossless, quality); |
| 549 | } |
| 550 | |
| 551 | if (IsEmptyRect(rect)) { |
| 552 | if (empty_rect_allowed) { // No need to get 'sub_frame'. |
| 553 | return 1; |
| 554 | } else { // Force a 1x1 rectangle. |
| 555 | rect->width_ = 1; |
| 556 | rect->height_ = 1; |
| 557 | assert(rect->x_offset_ == 0); |
| 558 | assert(rect->y_offset_ == 0); |
| 559 | } |
| 560 | } |
| 561 | |
| 562 | SnapToEvenOffsets(rect); |
| 563 | return WebPPictureView(curr_canvas, rect->x_offset_, rect->y_offset_, |
| 564 | rect->width_, rect->height_, sub_frame); |
| 565 | } |
| 566 | |
| 567 | // Picks optimal frame rectangle for both lossless and lossy compression. The |
| 568 | // initial guess for frame rectangles will be the full canvas. |
| 569 | static int GetSubRects(const WebPPicture* const prev_canvas, |
| 570 | const WebPPicture* const curr_canvas, int is_key_frame, |
| 571 | int is_first_frame, float quality, |
| 572 | SubFrameParams* const params) { |
| 573 | // Lossless frame rectangle. |
| 574 | params->rect_ll_.x_offset_ = 0; |
| 575 | params->rect_ll_.y_offset_ = 0; |
| 576 | params->rect_ll_.width_ = curr_canvas->width; |
| 577 | params->rect_ll_.height_ = curr_canvas->height; |
| 578 | if (!GetSubRect(prev_canvas, curr_canvas, is_key_frame, is_first_frame, |
| 579 | params->empty_rect_allowed_, 1, quality, |
| 580 | ¶ms->rect_ll_, ¶ms->sub_frame_ll_)) { |
| 581 | return 0; |
| 582 | } |
| 583 | // Lossy frame rectangle. |
| 584 | params->rect_lossy_ = params->rect_ll_; // seed with lossless rect. |
| 585 | return GetSubRect(prev_canvas, curr_canvas, is_key_frame, is_first_frame, |
| 586 | params->empty_rect_allowed_, 0, quality, |
| 587 | ¶ms->rect_lossy_, ¶ms->sub_frame_lossy_); |
| 588 | } |
| 589 | |
| 590 | static WEBP_INLINE int clip(int v, int min_v, int max_v) { |
| 591 | return (v < min_v) ? min_v : (v > max_v) ? max_v : v; |
| 592 | } |
| 593 | |
| 594 | int WebPAnimEncoderRefineRect( |
| 595 | const WebPPicture* const prev_canvas, const WebPPicture* const curr_canvas, |
| 596 | int is_lossless, float quality, int* const x_offset, int* const y_offset, |
| 597 | int* const width, int* const height) { |
| 598 | FrameRectangle rect; |
| 599 | const int right = clip(*x_offset + *width, 0, curr_canvas->width); |
| 600 | const int left = clip(*x_offset, 0, curr_canvas->width - 1); |
| 601 | const int bottom = clip(*y_offset + *height, 0, curr_canvas->height); |
| 602 | const int top = clip(*y_offset, 0, curr_canvas->height - 1); |
| 603 | if (prev_canvas == NULL || curr_canvas == NULL || |
| 604 | prev_canvas->width != curr_canvas->width || |
| 605 | prev_canvas->height != curr_canvas->height || |
| 606 | !prev_canvas->use_argb || !curr_canvas->use_argb) { |
| 607 | return 0; |
| 608 | } |
| 609 | rect.x_offset_ = left; |
| 610 | rect.y_offset_ = top; |
| 611 | rect.width_ = clip(right - left, 0, curr_canvas->width - rect.x_offset_); |
| 612 | rect.height_ = clip(bottom - top, 0, curr_canvas->height - rect.y_offset_); |
| 613 | MinimizeChangeRectangle(prev_canvas, curr_canvas, &rect, is_lossless, |
| 614 | quality); |
| 615 | SnapToEvenOffsets(&rect); |
| 616 | *x_offset = rect.x_offset_; |
| 617 | *y_offset = rect.y_offset_; |
| 618 | *width = rect.width_; |
| 619 | *height = rect.height_; |
| 620 | return 1; |
| 621 | } |
| 622 | |
| 623 | static void DisposeFrameRectangle(int dispose_method, |
| 624 | const FrameRectangle* const rect, |
| 625 | WebPPicture* const curr_canvas) { |
| 626 | assert(rect != NULL); |
| 627 | if (dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) { |
| 628 | WebPUtilClearPic(curr_canvas, rect); |
| 629 | } |
| 630 | } |
| 631 | |
| 632 | static uint32_t RectArea(const FrameRectangle* const rect) { |
| 633 | return (uint32_t)rect->width_ * rect->height_; |
| 634 | } |
| 635 | |
| 636 | static int IsLosslessBlendingPossible(const WebPPicture* const src, |
| 637 | const WebPPicture* const dst, |
| 638 | const FrameRectangle* const rect) { |
| 639 | int i, j; |
| 640 | assert(src->width == dst->width && src->height == dst->height); |
| 641 | assert(rect->x_offset_ + rect->width_ <= dst->width); |
| 642 | assert(rect->y_offset_ + rect->height_ <= dst->height); |
| 643 | for (j = rect->y_offset_; j < rect->y_offset_ + rect->height_; ++j) { |
| 644 | for (i = rect->x_offset_; i < rect->x_offset_ + rect->width_; ++i) { |
| 645 | const uint32_t src_pixel = src->argb[j * src->argb_stride + i]; |
| 646 | const uint32_t dst_pixel = dst->argb[j * dst->argb_stride + i]; |
| 647 | const uint32_t dst_alpha = dst_pixel >> 24; |
| 648 | if (dst_alpha != 0xff && src_pixel != dst_pixel) { |
| 649 | // In this case, if we use blending, we can't attain the desired |
| 650 | // 'dst_pixel' value for this pixel. So, blending is not possible. |
| 651 | return 0; |
| 652 | } |
| 653 | } |
| 654 | } |
| 655 | return 1; |
| 656 | } |
| 657 | |
| 658 | static int IsLossyBlendingPossible(const WebPPicture* const src, |
| 659 | const WebPPicture* const dst, |
| 660 | const FrameRectangle* const rect, |
| 661 | float quality) { |
| 662 | const int max_allowed_diff_lossy = QualityToMaxDiff(quality); |
| 663 | int i, j; |
| 664 | assert(src->width == dst->width && src->height == dst->height); |
| 665 | assert(rect->x_offset_ + rect->width_ <= dst->width); |
| 666 | assert(rect->y_offset_ + rect->height_ <= dst->height); |
| 667 | for (j = rect->y_offset_; j < rect->y_offset_ + rect->height_; ++j) { |
| 668 | for (i = rect->x_offset_; i < rect->x_offset_ + rect->width_; ++i) { |
| 669 | const uint32_t src_pixel = src->argb[j * src->argb_stride + i]; |
| 670 | const uint32_t dst_pixel = dst->argb[j * dst->argb_stride + i]; |
| 671 | const uint32_t dst_alpha = dst_pixel >> 24; |
| 672 | if (dst_alpha != 0xff && |
| 673 | !PixelsAreSimilar(src_pixel, dst_pixel, max_allowed_diff_lossy)) { |
| 674 | // In this case, if we use blending, we can't attain the desired |
| 675 | // 'dst_pixel' value for this pixel. So, blending is not possible. |
| 676 | return 0; |
| 677 | } |
| 678 | } |
| 679 | } |
| 680 | return 1; |
| 681 | } |
| 682 | |
| 683 | // For pixels in 'rect', replace those pixels in 'dst' that are same as 'src' by |
| 684 | // transparent pixels. |
| 685 | // Returns true if at least one pixel gets modified. |
| 686 | static int IncreaseTransparency(const WebPPicture* const src, |
| 687 | const FrameRectangle* const rect, |
| 688 | WebPPicture* const dst) { |
| 689 | int i, j; |
| 690 | int modified = 0; |
| 691 | assert(src != NULL && dst != NULL && rect != NULL); |
| 692 | assert(src->width == dst->width && src->height == dst->height); |
| 693 | for (j = rect->y_offset_; j < rect->y_offset_ + rect->height_; ++j) { |
| 694 | const uint32_t* const psrc = src->argb + j * src->argb_stride; |
| 695 | uint32_t* const pdst = dst->argb + j * dst->argb_stride; |
| 696 | for (i = rect->x_offset_; i < rect->x_offset_ + rect->width_; ++i) { |
| 697 | if (psrc[i] == pdst[i] && pdst[i] != TRANSPARENT_COLOR) { |
| 698 | pdst[i] = TRANSPARENT_COLOR; |
| 699 | modified = 1; |
| 700 | } |
| 701 | } |
| 702 | } |
| 703 | return modified; |
| 704 | } |
| 705 | |
| 706 | #undef TRANSPARENT_COLOR |
| 707 | |
| 708 | // Replace similar blocks of pixels by a 'see-through' transparent block |
| 709 | // with uniform average color. |
| 710 | // Assumes lossy compression is being used. |
| 711 | // Returns true if at least one pixel gets modified. |
| 712 | static int FlattenSimilarBlocks(const WebPPicture* const src, |
| 713 | const FrameRectangle* const rect, |
| 714 | WebPPicture* const dst, float quality) { |
| 715 | const int max_allowed_diff_lossy = QualityToMaxDiff(quality); |
| 716 | int i, j; |
| 717 | int modified = 0; |
| 718 | const int block_size = 8; |
| 719 | const int y_start = (rect->y_offset_ + block_size) & ~(block_size - 1); |
| 720 | const int y_end = (rect->y_offset_ + rect->height_) & ~(block_size - 1); |
| 721 | const int x_start = (rect->x_offset_ + block_size) & ~(block_size - 1); |
| 722 | const int x_end = (rect->x_offset_ + rect->width_) & ~(block_size - 1); |
| 723 | assert(src != NULL && dst != NULL && rect != NULL); |
| 724 | assert(src->width == dst->width && src->height == dst->height); |
| 725 | assert((block_size & (block_size - 1)) == 0); // must be a power of 2 |
| 726 | // Iterate over each block and count similar pixels. |
| 727 | for (j = y_start; j < y_end; j += block_size) { |
| 728 | for (i = x_start; i < x_end; i += block_size) { |
| 729 | int cnt = 0; |
| 730 | int avg_r = 0, avg_g = 0, avg_b = 0; |
| 731 | int x, y; |
| 732 | const uint32_t* const psrc = src->argb + j * src->argb_stride + i; |
| 733 | uint32_t* const pdst = dst->argb + j * dst->argb_stride + i; |
| 734 | for (y = 0; y < block_size; ++y) { |
| 735 | for (x = 0; x < block_size; ++x) { |
| 736 | const uint32_t src_pixel = psrc[x + y * src->argb_stride]; |
| 737 | const int alpha = src_pixel >> 24; |
| 738 | if (alpha == 0xff && |
| 739 | PixelsAreSimilar(src_pixel, pdst[x + y * dst->argb_stride], |
| 740 | max_allowed_diff_lossy)) { |
| 741 | ++cnt; |
| 742 | avg_r += (src_pixel >> 16) & 0xff; |
| 743 | avg_g += (src_pixel >> 8) & 0xff; |
| 744 | avg_b += (src_pixel >> 0) & 0xff; |
| 745 | } |
| 746 | } |
| 747 | } |
| 748 | // If we have a fully similar block, we replace it with an |
| 749 | // average transparent block. This compresses better in lossy mode. |
| 750 | if (cnt == block_size * block_size) { |
| 751 | const uint32_t color = (0x00 << 24) | |
| 752 | ((avg_r / cnt) << 16) | |
| 753 | ((avg_g / cnt) << 8) | |
| 754 | ((avg_b / cnt) << 0); |
| 755 | for (y = 0; y < block_size; ++y) { |
| 756 | for (x = 0; x < block_size; ++x) { |
| 757 | pdst[x + y * dst->argb_stride] = color; |
| 758 | } |
| 759 | } |
| 760 | modified = 1; |
| 761 | } |
| 762 | } |
| 763 | } |
| 764 | return modified; |
| 765 | } |
| 766 | |
| 767 | static int EncodeFrame(const WebPConfig* const config, WebPPicture* const pic, |
| 768 | WebPMemoryWriter* const memory) { |
| 769 | pic->use_argb = 1; |
| 770 | pic->writer = WebPMemoryWrite; |
| 771 | pic->custom_ptr = memory; |
| 772 | if (!WebPEncode(config, pic)) { |
| 773 | return 0; |
| 774 | } |
| 775 | return 1; |
| 776 | } |
| 777 | |
| 778 | // Struct representing a candidate encoded frame including its metadata. |
| 779 | typedef struct { |
| 780 | WebPMemoryWriter mem_; |
| 781 | WebPMuxFrameInfo info_; |
| 782 | FrameRectangle rect_; |
| 783 | int evaluate_; // True if this candidate should be evaluated. |
| 784 | } Candidate; |
| 785 | |
| 786 | // Generates a candidate encoded frame given a picture and metadata. |
| 787 | static WebPEncodingError EncodeCandidate(WebPPicture* const sub_frame, |
| 788 | const FrameRectangle* const rect, |
| 789 | const WebPConfig* const encoder_config, |
| 790 | int use_blending, |
| 791 | Candidate* const candidate) { |
| 792 | WebPConfig config = *encoder_config; |
| 793 | WebPEncodingError error_code = VP8_ENC_OK; |
| 794 | assert(candidate != NULL); |
| 795 | memset(candidate, 0, sizeof(*candidate)); |
| 796 | |
| 797 | // Set frame rect and info. |
| 798 | candidate->rect_ = *rect; |
| 799 | candidate->info_.id = WEBP_CHUNK_ANMF; |
| 800 | candidate->info_.x_offset = rect->x_offset_; |
| 801 | candidate->info_.y_offset = rect->y_offset_; |
| 802 | candidate->info_.dispose_method = WEBP_MUX_DISPOSE_NONE; // Set later. |
| 803 | candidate->info_.blend_method = |
| 804 | use_blending ? WEBP_MUX_BLEND : WEBP_MUX_NO_BLEND; |
| 805 | candidate->info_.duration = 0; // Set in next call to WebPAnimEncoderAdd(). |
| 806 | |
| 807 | // Encode picture. |
| 808 | WebPMemoryWriterInit(&candidate->mem_); |
| 809 | |
| 810 | if (!config.lossless && use_blending) { |
| 811 | // Disable filtering to avoid blockiness in reconstructed frames at the |
| 812 | // time of decoding. |
| 813 | config.autofilter = 0; |
| 814 | config.filter_strength = 0; |
| 815 | } |
| 816 | if (!EncodeFrame(&config, sub_frame, &candidate->mem_)) { |
| 817 | error_code = sub_frame->error_code; |
| 818 | goto Err; |
| 819 | } |
| 820 | |
| 821 | candidate->evaluate_ = 1; |
| 822 | return error_code; |
| 823 | |
| 824 | Err: |
| 825 | WebPMemoryWriterClear(&candidate->mem_); |
| 826 | return error_code; |
| 827 | } |
| 828 | |
| 829 | static void CopyCurrentCanvas(WebPAnimEncoder* const enc) { |
| 830 | if (enc->curr_canvas_copy_modified_) { |
| 831 | WebPCopyPixels(enc->curr_canvas_, &enc->curr_canvas_copy_); |
| 832 | enc->curr_canvas_copy_.progress_hook = enc->curr_canvas_->progress_hook; |
| 833 | enc->curr_canvas_copy_.user_data = enc->curr_canvas_->user_data; |
| 834 | enc->curr_canvas_copy_modified_ = 0; |
| 835 | } |
| 836 | } |
| 837 | |
| 838 | enum { |
| 839 | LL_DISP_NONE = 0, |
| 840 | LL_DISP_BG, |
| 841 | LOSSY_DISP_NONE, |
| 842 | LOSSY_DISP_BG, |
| 843 | CANDIDATE_COUNT |
| 844 | }; |
| 845 | |
| 846 | #define MIN_COLORS_LOSSY 31 // Don't try lossy below this threshold. |
| 847 | #define MAX_COLORS_LOSSLESS 194 // Don't try lossless above this threshold. |
| 848 | |
| 849 | // Generates candidates for a given dispose method given pre-filled sub-frame |
| 850 | // 'params'. |
| 851 | static WebPEncodingError GenerateCandidates( |
| 852 | WebPAnimEncoder* const enc, Candidate candidates[CANDIDATE_COUNT], |
| 853 | WebPMuxAnimDispose dispose_method, int is_lossless, int is_key_frame, |
| 854 | SubFrameParams* const params, |
| 855 | const WebPConfig* const config_ll, const WebPConfig* const config_lossy) { |
| 856 | WebPEncodingError error_code = VP8_ENC_OK; |
| 857 | const int is_dispose_none = (dispose_method == WEBP_MUX_DISPOSE_NONE); |
| 858 | Candidate* const candidate_ll = |
| 859 | is_dispose_none ? &candidates[LL_DISP_NONE] : &candidates[LL_DISP_BG]; |
| 860 | Candidate* const candidate_lossy = is_dispose_none |
| 861 | ? &candidates[LOSSY_DISP_NONE] |
| 862 | : &candidates[LOSSY_DISP_BG]; |
| 863 | WebPPicture* const curr_canvas = &enc->curr_canvas_copy_; |
| 864 | const WebPPicture* const prev_canvas = |
| 865 | is_dispose_none ? &enc->prev_canvas_ : &enc->prev_canvas_disposed_; |
| 866 | int use_blending_ll, use_blending_lossy; |
| 867 | int evaluate_ll, evaluate_lossy; |
| 868 | |
| 869 | CopyCurrentCanvas(enc); |
| 870 | use_blending_ll = |
| 871 | !is_key_frame && |
| 872 | IsLosslessBlendingPossible(prev_canvas, curr_canvas, ¶ms->rect_ll_); |
| 873 | use_blending_lossy = |
| 874 | !is_key_frame && |
| 875 | IsLossyBlendingPossible(prev_canvas, curr_canvas, ¶ms->rect_lossy_, |
| 876 | config_lossy->quality); |
| 877 | |
| 878 | // Pick candidates to be tried. |
| 879 | if (!enc->options_.allow_mixed) { |
| 880 | evaluate_ll = is_lossless; |
| 881 | evaluate_lossy = !is_lossless; |
| 882 | } else if (enc->options_.minimize_size) { |
| 883 | evaluate_ll = 1; |
| 884 | evaluate_lossy = 1; |
| 885 | } else { // Use a heuristic for trying lossless and/or lossy compression. |
| 886 | const int num_colors = WebPGetColorPalette(¶ms->sub_frame_ll_, NULL); |
| 887 | evaluate_ll = (num_colors < MAX_COLORS_LOSSLESS); |
| 888 | evaluate_lossy = (num_colors >= MIN_COLORS_LOSSY); |
| 889 | } |
| 890 | |
| 891 | // Generate candidates. |
| 892 | if (evaluate_ll) { |
| 893 | CopyCurrentCanvas(enc); |
| 894 | if (use_blending_ll) { |
| 895 | enc->curr_canvas_copy_modified_ = |
| 896 | IncreaseTransparency(prev_canvas, ¶ms->rect_ll_, curr_canvas); |
| 897 | } |
| 898 | error_code = EncodeCandidate(¶ms->sub_frame_ll_, ¶ms->rect_ll_, |
| 899 | config_ll, use_blending_ll, candidate_ll); |
| 900 | if (error_code != VP8_ENC_OK) return error_code; |
| 901 | } |
| 902 | if (evaluate_lossy) { |
| 903 | CopyCurrentCanvas(enc); |
| 904 | if (use_blending_lossy) { |
| 905 | enc->curr_canvas_copy_modified_ = |
| 906 | FlattenSimilarBlocks(prev_canvas, ¶ms->rect_lossy_, curr_canvas, |
| 907 | config_lossy->quality); |
| 908 | } |
| 909 | error_code = |
| 910 | EncodeCandidate(¶ms->sub_frame_lossy_, ¶ms->rect_lossy_, |
| 911 | config_lossy, use_blending_lossy, candidate_lossy); |
| 912 | if (error_code != VP8_ENC_OK) return error_code; |
| 913 | enc->curr_canvas_copy_modified_ = 1; |
| 914 | } |
| 915 | return error_code; |
| 916 | } |
| 917 | |
| 918 | #undef MIN_COLORS_LOSSY |
| 919 | #undef MAX_COLORS_LOSSLESS |
| 920 | |
| 921 | static void GetEncodedData(const WebPMemoryWriter* const memory, |
| 922 | WebPData* const encoded_data) { |
| 923 | encoded_data->bytes = memory->mem; |
| 924 | encoded_data->size = memory->size; |
| 925 | } |
| 926 | |
| 927 | // Sets dispose method of the previous frame to be 'dispose_method'. |
| 928 | static void SetPreviousDisposeMethod(WebPAnimEncoder* const enc, |
| 929 | WebPMuxAnimDispose dispose_method) { |
| 930 | const size_t position = enc->count_ - 2; |
| 931 | EncodedFrame* const prev_enc_frame = GetFrame(enc, position); |
| 932 | assert(enc->count_ >= 2); // As current and previous frames are in enc. |
| 933 | |
| 934 | if (enc->prev_candidate_undecided_) { |
| 935 | assert(dispose_method == WEBP_MUX_DISPOSE_NONE); |
| 936 | prev_enc_frame->sub_frame_.dispose_method = dispose_method; |
| 937 | prev_enc_frame->key_frame_.dispose_method = dispose_method; |
| 938 | } else { |
| 939 | WebPMuxFrameInfo* const prev_info = prev_enc_frame->is_key_frame_ |
| 940 | ? &prev_enc_frame->key_frame_ |
| 941 | : &prev_enc_frame->sub_frame_; |
| 942 | prev_info->dispose_method = dispose_method; |
| 943 | } |
| 944 | } |
| 945 | |
| 946 | static int IncreasePreviousDuration(WebPAnimEncoder* const enc, int duration) { |
| 947 | const size_t position = enc->count_ - 1; |
| 948 | EncodedFrame* const prev_enc_frame = GetFrame(enc, position); |
| 949 | int new_duration; |
| 950 | |
| 951 | assert(enc->count_ >= 1); |
| 952 | assert(prev_enc_frame->sub_frame_.duration == |
| 953 | prev_enc_frame->key_frame_.duration); |
| 954 | assert(prev_enc_frame->sub_frame_.duration == |
| 955 | (prev_enc_frame->sub_frame_.duration & (MAX_DURATION - 1))); |
| 956 | assert(duration == (duration & (MAX_DURATION - 1))); |
| 957 | |
| 958 | new_duration = prev_enc_frame->sub_frame_.duration + duration; |
| 959 | if (new_duration >= MAX_DURATION) { // Special case. |
| 960 | // Separate out previous frame from earlier merged frames to avoid overflow. |
| 961 | // We add a 1x1 transparent frame for the previous frame, with blending on. |
| 962 | const FrameRectangle rect = { 0, 0, 1, 1 }; |
| 963 | const uint8_t lossless_1x1_bytes[] = { |
| 964 | 0x52, 0x49, 0x46, 0x46, 0x14, 0x00, 0x00, 0x00, 0x57, 0x45, 0x42, 0x50, |
| 965 | 0x56, 0x50, 0x38, 0x4c, 0x08, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, 0x00, |
| 966 | 0x10, 0x88, 0x88, 0x08 |
| 967 | }; |
| 968 | const WebPData lossless_1x1 = { |
| 969 | lossless_1x1_bytes, sizeof(lossless_1x1_bytes) |
| 970 | }; |
| 971 | const uint8_t lossy_1x1_bytes[] = { |
| 972 | 0x52, 0x49, 0x46, 0x46, 0x40, 0x00, 0x00, 0x00, 0x57, 0x45, 0x42, 0x50, |
| 973 | 0x56, 0x50, 0x38, 0x58, 0x0a, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, |
| 974 | 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x4c, 0x50, 0x48, 0x02, 0x00, |
| 975 | 0x00, 0x00, 0x00, 0x00, 0x56, 0x50, 0x38, 0x20, 0x18, 0x00, 0x00, 0x00, |
| 976 | 0x30, 0x01, 0x00, 0x9d, 0x01, 0x2a, 0x01, 0x00, 0x01, 0x00, 0x02, 0x00, |
| 977 | 0x34, 0x25, 0xa4, 0x00, 0x03, 0x70, 0x00, 0xfe, 0xfb, 0xfd, 0x50, 0x00 |
| 978 | }; |
| 979 | const WebPData lossy_1x1 = { lossy_1x1_bytes, sizeof(lossy_1x1_bytes) }; |
| 980 | const int can_use_lossless = |
| 981 | (enc->last_config_.lossless || enc->options_.allow_mixed); |
| 982 | EncodedFrame* const curr_enc_frame = GetFrame(enc, enc->count_); |
| 983 | curr_enc_frame->is_key_frame_ = 0; |
| 984 | curr_enc_frame->sub_frame_.id = WEBP_CHUNK_ANMF; |
| 985 | curr_enc_frame->sub_frame_.x_offset = 0; |
| 986 | curr_enc_frame->sub_frame_.y_offset = 0; |
| 987 | curr_enc_frame->sub_frame_.dispose_method = WEBP_MUX_DISPOSE_NONE; |
| 988 | curr_enc_frame->sub_frame_.blend_method = WEBP_MUX_BLEND; |
| 989 | curr_enc_frame->sub_frame_.duration = duration; |
| 990 | if (!WebPDataCopy(can_use_lossless ? &lossless_1x1 : &lossy_1x1, |
| 991 | &curr_enc_frame->sub_frame_.bitstream)) { |
| 992 | return 0; |
| 993 | } |
| 994 | ++enc->count_; |
| 995 | ++enc->count_since_key_frame_; |
| 996 | enc->flush_count_ = enc->count_ - 1; |
| 997 | enc->prev_candidate_undecided_ = 0; |
| 998 | enc->prev_rect_ = rect; |
| 999 | } else { // Regular case. |
| 1000 | // Increase duration of the previous frame by 'duration'. |
| 1001 | prev_enc_frame->sub_frame_.duration = new_duration; |
| 1002 | prev_enc_frame->key_frame_.duration = new_duration; |
| 1003 | } |
| 1004 | return 1; |
| 1005 | } |
| 1006 | |
| 1007 | // Pick the candidate encoded frame with smallest size and release other |
| 1008 | // candidates. |
| 1009 | // TODO(later): Perhaps a rough SSIM/PSNR produced by the encoder should |
| 1010 | // also be a criteria, in addition to sizes. |
| 1011 | static void PickBestCandidate(WebPAnimEncoder* const enc, |
| 1012 | Candidate* const candidates, int is_key_frame, |
| 1013 | EncodedFrame* const encoded_frame) { |
| 1014 | int i; |
| 1015 | int best_idx = -1; |
| 1016 | size_t best_size = ~0; |
| 1017 | for (i = 0; i < CANDIDATE_COUNT; ++i) { |
| 1018 | if (candidates[i].evaluate_) { |
| 1019 | const size_t candidate_size = candidates[i].mem_.size; |
| 1020 | if (candidate_size < best_size) { |
| 1021 | best_idx = i; |
| 1022 | best_size = candidate_size; |
| 1023 | } |
| 1024 | } |
| 1025 | } |
| 1026 | assert(best_idx != -1); |
| 1027 | for (i = 0; i < CANDIDATE_COUNT; ++i) { |
| 1028 | if (candidates[i].evaluate_) { |
| 1029 | if (i == best_idx) { |
| 1030 | WebPMuxFrameInfo* const dst = is_key_frame |
| 1031 | ? &encoded_frame->key_frame_ |
| 1032 | : &encoded_frame->sub_frame_; |
| 1033 | *dst = candidates[i].info_; |
| 1034 | GetEncodedData(&candidates[i].mem_, &dst->bitstream); |
| 1035 | if (!is_key_frame) { |
| 1036 | // Note: Previous dispose method only matters for non-keyframes. |
| 1037 | // Also, we don't want to modify previous dispose method that was |
| 1038 | // selected when a non key-frame was assumed. |
| 1039 | const WebPMuxAnimDispose prev_dispose_method = |
| 1040 | (best_idx == LL_DISP_NONE || best_idx == LOSSY_DISP_NONE) |
| 1041 | ? WEBP_MUX_DISPOSE_NONE |
| 1042 | : WEBP_MUX_DISPOSE_BACKGROUND; |
| 1043 | SetPreviousDisposeMethod(enc, prev_dispose_method); |
| 1044 | } |
| 1045 | enc->prev_rect_ = candidates[i].rect_; // save for next frame. |
| 1046 | } else { |
| 1047 | WebPMemoryWriterClear(&candidates[i].mem_); |
| 1048 | candidates[i].evaluate_ = 0; |
| 1049 | } |
| 1050 | } |
| 1051 | } |
| 1052 | } |
| 1053 | |
| 1054 | // Depending on the configuration, tries different compressions |
| 1055 | // (lossy/lossless), dispose methods, blending methods etc to encode the current |
| 1056 | // frame and outputs the best one in 'encoded_frame'. |
| 1057 | // 'frame_skipped' will be set to true if this frame should actually be skipped. |
| 1058 | static WebPEncodingError SetFrame(WebPAnimEncoder* const enc, |
| 1059 | const WebPConfig* const config, |
| 1060 | int is_key_frame, |
| 1061 | EncodedFrame* const encoded_frame, |
| 1062 | int* const frame_skipped) { |
| 1063 | int i; |
| 1064 | WebPEncodingError error_code = VP8_ENC_OK; |
| 1065 | const WebPPicture* const curr_canvas = &enc->curr_canvas_copy_; |
| 1066 | const WebPPicture* const prev_canvas = &enc->prev_canvas_; |
| 1067 | Candidate candidates[CANDIDATE_COUNT]; |
| 1068 | const int is_lossless = config->lossless; |
| 1069 | const int consider_lossless = is_lossless || enc->options_.allow_mixed; |
| 1070 | const int consider_lossy = !is_lossless || enc->options_.allow_mixed; |
| 1071 | const int is_first_frame = enc->is_first_frame_; |
| 1072 | |
| 1073 | // First frame cannot be skipped as there is no 'previous frame' to merge it |
| 1074 | // to. So, empty rectangle is not allowed for the first frame. |
| 1075 | const int empty_rect_allowed_none = !is_first_frame; |
| 1076 | |
| 1077 | // Even if there is exact pixel match between 'disposed previous canvas' and |
| 1078 | // 'current canvas', we can't skip current frame, as there may not be exact |
| 1079 | // pixel match between 'previous canvas' and 'current canvas'. So, we don't |
| 1080 | // allow empty rectangle in this case. |
| 1081 | const int empty_rect_allowed_bg = 0; |
| 1082 | |
| 1083 | // If current frame is a key-frame, dispose method of previous frame doesn't |
| 1084 | // matter, so we don't try dispose to background. |
| 1085 | // Also, if key-frame insertion is on, and previous frame could be picked as |
| 1086 | // either a sub-frame or a key-frame, then we can't be sure about what frame |
| 1087 | // rectangle would be disposed. In that case too, we don't try dispose to |
| 1088 | // background. |
| 1089 | const int dispose_bg_possible = |
| 1090 | !is_key_frame && !enc->prev_candidate_undecided_; |
| 1091 | |
| 1092 | SubFrameParams dispose_none_params; |
| 1093 | SubFrameParams dispose_bg_params; |
| 1094 | |
| 1095 | WebPConfig config_ll = *config; |
| 1096 | WebPConfig config_lossy = *config; |
| 1097 | config_ll.lossless = 1; |
| 1098 | config_lossy.lossless = 0; |
| 1099 | enc->last_config_ = *config; |
| 1100 | enc->last_config_reversed_ = config->lossless ? config_lossy : config_ll; |
| 1101 | *frame_skipped = 0; |
| 1102 | |
| 1103 | if (!SubFrameParamsInit(&dispose_none_params, 1, empty_rect_allowed_none) || |
| 1104 | !SubFrameParamsInit(&dispose_bg_params, 0, empty_rect_allowed_bg)) { |
| 1105 | return VP8_ENC_ERROR_INVALID_CONFIGURATION; |
| 1106 | } |
| 1107 | |
| 1108 | memset(candidates, 0, sizeof(candidates)); |
| 1109 | |
| 1110 | // Change-rectangle assuming previous frame was DISPOSE_NONE. |
| 1111 | if (!GetSubRects(prev_canvas, curr_canvas, is_key_frame, is_first_frame, |
| 1112 | config_lossy.quality, &dispose_none_params)) { |
| 1113 | error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION; |
| 1114 | goto Err; |
| 1115 | } |
| 1116 | |
| 1117 | if ((consider_lossless && IsEmptyRect(&dispose_none_params.rect_ll_)) || |
| 1118 | (consider_lossy && IsEmptyRect(&dispose_none_params.rect_lossy_))) { |
| 1119 | // Don't encode the frame at all. Instead, the duration of the previous |
| 1120 | // frame will be increased later. |
| 1121 | assert(empty_rect_allowed_none); |
| 1122 | *frame_skipped = 1; |
| 1123 | goto End; |
| 1124 | } |
| 1125 | |
| 1126 | if (dispose_bg_possible) { |
| 1127 | // Change-rectangle assuming previous frame was DISPOSE_BACKGROUND. |
| 1128 | WebPPicture* const prev_canvas_disposed = &enc->prev_canvas_disposed_; |
| 1129 | WebPCopyPixels(prev_canvas, prev_canvas_disposed); |
| 1130 | DisposeFrameRectangle(WEBP_MUX_DISPOSE_BACKGROUND, &enc->prev_rect_, |
| 1131 | prev_canvas_disposed); |
| 1132 | |
| 1133 | if (!GetSubRects(prev_canvas_disposed, curr_canvas, is_key_frame, |
| 1134 | is_first_frame, config_lossy.quality, |
| 1135 | &dispose_bg_params)) { |
| 1136 | error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION; |
| 1137 | goto Err; |
| 1138 | } |
| 1139 | assert(!IsEmptyRect(&dispose_bg_params.rect_ll_)); |
| 1140 | assert(!IsEmptyRect(&dispose_bg_params.rect_lossy_)); |
| 1141 | |
| 1142 | if (enc->options_.minimize_size) { // Try both dispose methods. |
| 1143 | dispose_bg_params.should_try_ = 1; |
| 1144 | dispose_none_params.should_try_ = 1; |
| 1145 | } else if ((is_lossless && |
| 1146 | RectArea(&dispose_bg_params.rect_ll_) < |
| 1147 | RectArea(&dispose_none_params.rect_ll_)) || |
| 1148 | (!is_lossless && |
| 1149 | RectArea(&dispose_bg_params.rect_lossy_) < |
| 1150 | RectArea(&dispose_none_params.rect_lossy_))) { |
| 1151 | dispose_bg_params.should_try_ = 1; // Pick DISPOSE_BACKGROUND. |
| 1152 | dispose_none_params.should_try_ = 0; |
| 1153 | } |
| 1154 | } |
| 1155 | |
| 1156 | if (dispose_none_params.should_try_) { |
| 1157 | error_code = GenerateCandidates( |
| 1158 | enc, candidates, WEBP_MUX_DISPOSE_NONE, is_lossless, is_key_frame, |
| 1159 | &dispose_none_params, &config_ll, &config_lossy); |
| 1160 | if (error_code != VP8_ENC_OK) goto Err; |
| 1161 | } |
| 1162 | |
| 1163 | if (dispose_bg_params.should_try_) { |
| 1164 | assert(!enc->is_first_frame_); |
| 1165 | assert(dispose_bg_possible); |
| 1166 | error_code = GenerateCandidates( |
| 1167 | enc, candidates, WEBP_MUX_DISPOSE_BACKGROUND, is_lossless, is_key_frame, |
| 1168 | &dispose_bg_params, &config_ll, &config_lossy); |
| 1169 | if (error_code != VP8_ENC_OK) goto Err; |
| 1170 | } |
| 1171 | |
| 1172 | PickBestCandidate(enc, candidates, is_key_frame, encoded_frame); |
| 1173 | |
| 1174 | goto End; |
| 1175 | |
| 1176 | Err: |
| 1177 | for (i = 0; i < CANDIDATE_COUNT; ++i) { |
| 1178 | if (candidates[i].evaluate_) { |
| 1179 | WebPMemoryWriterClear(&candidates[i].mem_); |
| 1180 | } |
| 1181 | } |
| 1182 | |
| 1183 | End: |
| 1184 | SubFrameParamsFree(&dispose_none_params); |
| 1185 | SubFrameParamsFree(&dispose_bg_params); |
| 1186 | return error_code; |
| 1187 | } |
| 1188 | |
| 1189 | // Calculate the penalty incurred if we encode given frame as a key frame |
| 1190 | // instead of a sub-frame. |
| 1191 | static int64_t KeyFramePenalty(const EncodedFrame* const encoded_frame) { |
| 1192 | return ((int64_t)encoded_frame->key_frame_.bitstream.size - |
| 1193 | encoded_frame->sub_frame_.bitstream.size); |
| 1194 | } |
| 1195 | |
| 1196 | static int CacheFrame(WebPAnimEncoder* const enc, |
| 1197 | const WebPConfig* const config) { |
| 1198 | int ok = 0; |
| 1199 | int frame_skipped = 0; |
| 1200 | WebPEncodingError error_code = VP8_ENC_OK; |
| 1201 | const size_t position = enc->count_; |
| 1202 | EncodedFrame* const encoded_frame = GetFrame(enc, position); |
| 1203 | |
| 1204 | ++enc->count_; |
| 1205 | |
| 1206 | if (enc->is_first_frame_) { // Add this as a key-frame. |
| 1207 | error_code = SetFrame(enc, config, 1, encoded_frame, &frame_skipped); |
| 1208 | if (error_code != VP8_ENC_OK) goto End; |
| 1209 | assert(frame_skipped == 0); // First frame can't be skipped, even if empty. |
| 1210 | assert(position == 0 && enc->count_ == 1); |
| 1211 | encoded_frame->is_key_frame_ = 1; |
| 1212 | enc->flush_count_ = 0; |
| 1213 | enc->count_since_key_frame_ = 0; |
| 1214 | enc->prev_candidate_undecided_ = 0; |
| 1215 | } else { |
| 1216 | ++enc->count_since_key_frame_; |
| 1217 | if (enc->count_since_key_frame_ <= enc->options_.kmin) { |
| 1218 | // Add this as a frame rectangle. |
| 1219 | error_code = SetFrame(enc, config, 0, encoded_frame, &frame_skipped); |
| 1220 | if (error_code != VP8_ENC_OK) goto End; |
| 1221 | if (frame_skipped) goto Skip; |
| 1222 | encoded_frame->is_key_frame_ = 0; |
| 1223 | enc->flush_count_ = enc->count_ - 1; |
| 1224 | enc->prev_candidate_undecided_ = 0; |
| 1225 | } else { |
| 1226 | int64_t curr_delta; |
| 1227 | FrameRectangle prev_rect_key, prev_rect_sub; |
| 1228 | |
| 1229 | // Add this as a frame rectangle to enc. |
| 1230 | error_code = SetFrame(enc, config, 0, encoded_frame, &frame_skipped); |
| 1231 | if (error_code != VP8_ENC_OK) goto End; |
| 1232 | if (frame_skipped) goto Skip; |
| 1233 | prev_rect_sub = enc->prev_rect_; |
| 1234 | |
| 1235 | |
| 1236 | // Add this as a key-frame to enc, too. |
| 1237 | error_code = SetFrame(enc, config, 1, encoded_frame, &frame_skipped); |
| 1238 | if (error_code != VP8_ENC_OK) goto End; |
| 1239 | assert(frame_skipped == 0); // Key-frame cannot be an empty rectangle. |
| 1240 | prev_rect_key = enc->prev_rect_; |
| 1241 | |
| 1242 | // Analyze size difference of the two variants. |
| 1243 | curr_delta = KeyFramePenalty(encoded_frame); |
| 1244 | if (curr_delta <= enc->best_delta_) { // Pick this as the key-frame. |
| 1245 | if (enc->keyframe_ != KEYFRAME_NONE) { |
| 1246 | EncodedFrame* const old_keyframe = GetFrame(enc, enc->keyframe_); |
| 1247 | assert(old_keyframe->is_key_frame_); |
| 1248 | old_keyframe->is_key_frame_ = 0; |
| 1249 | } |
| 1250 | encoded_frame->is_key_frame_ = 1; |
| 1251 | enc->prev_candidate_undecided_ = 1; |
| 1252 | enc->keyframe_ = (int)position; |
| 1253 | enc->best_delta_ = curr_delta; |
| 1254 | enc->flush_count_ = enc->count_ - 1; // We can flush previous frames. |
| 1255 | } else { |
| 1256 | encoded_frame->is_key_frame_ = 0; |
| 1257 | enc->prev_candidate_undecided_ = 0; |
| 1258 | } |
| 1259 | // Note: We need '>=' below because when kmin and kmax are both zero, |
| 1260 | // count_since_key_frame will always be > kmax. |
| 1261 | if (enc->count_since_key_frame_ >= enc->options_.kmax) { |
| 1262 | enc->flush_count_ = enc->count_ - 1; |
| 1263 | enc->count_since_key_frame_ = 0; |
| 1264 | enc->keyframe_ = KEYFRAME_NONE; |
| 1265 | enc->best_delta_ = DELTA_INFINITY; |
| 1266 | } |
| 1267 | if (!enc->prev_candidate_undecided_) { |
| 1268 | enc->prev_rect_ = |
| 1269 | encoded_frame->is_key_frame_ ? prev_rect_key : prev_rect_sub; |
| 1270 | } |
| 1271 | } |
| 1272 | } |
| 1273 | |
| 1274 | // Update previous to previous and previous canvases for next call. |
| 1275 | WebPCopyPixels(enc->curr_canvas_, &enc->prev_canvas_); |
| 1276 | enc->is_first_frame_ = 0; |
| 1277 | |
| 1278 | Skip: |
| 1279 | ok = 1; |
| 1280 | ++enc->in_frame_count_; |
| 1281 | |
| 1282 | End: |
| 1283 | if (!ok || frame_skipped) { |
| 1284 | FrameRelease(encoded_frame); |
| 1285 | // We reset some counters, as the frame addition failed/was skipped. |
| 1286 | --enc->count_; |
| 1287 | if (!enc->is_first_frame_) --enc->count_since_key_frame_; |
| 1288 | if (!ok) { |
| 1289 | MarkError2(enc, "ERROR adding frame. WebPEncodingError" , error_code); |
| 1290 | } |
| 1291 | } |
| 1292 | enc->curr_canvas_->error_code = error_code; // report error_code |
| 1293 | assert(ok || error_code != VP8_ENC_OK); |
| 1294 | return ok; |
| 1295 | } |
| 1296 | |
| 1297 | static int FlushFrames(WebPAnimEncoder* const enc) { |
| 1298 | while (enc->flush_count_ > 0) { |
| 1299 | WebPMuxError err; |
| 1300 | EncodedFrame* const curr = GetFrame(enc, 0); |
| 1301 | const WebPMuxFrameInfo* const info = |
| 1302 | curr->is_key_frame_ ? &curr->key_frame_ : &curr->sub_frame_; |
| 1303 | assert(enc->mux_ != NULL); |
| 1304 | err = WebPMuxPushFrame(enc->mux_, info, 1); |
| 1305 | if (err != WEBP_MUX_OK) { |
| 1306 | MarkError2(enc, "ERROR adding frame. WebPMuxError" , err); |
| 1307 | return 0; |
| 1308 | } |
| 1309 | if (enc->options_.verbose) { |
| 1310 | fprintf(stderr, "INFO: Added frame. offset:%d,%d dispose:%d blend:%d\n" , |
| 1311 | info->x_offset, info->y_offset, info->dispose_method, |
| 1312 | info->blend_method); |
| 1313 | } |
| 1314 | ++enc->out_frame_count_; |
| 1315 | FrameRelease(curr); |
| 1316 | ++enc->start_; |
| 1317 | --enc->flush_count_; |
| 1318 | --enc->count_; |
| 1319 | if (enc->keyframe_ != KEYFRAME_NONE) --enc->keyframe_; |
| 1320 | } |
| 1321 | |
| 1322 | if (enc->count_ == 1 && enc->start_ != 0) { |
| 1323 | // Move enc->start to index 0. |
| 1324 | const int enc_start_tmp = (int)enc->start_; |
| 1325 | EncodedFrame temp = enc->encoded_frames_[0]; |
| 1326 | enc->encoded_frames_[0] = enc->encoded_frames_[enc_start_tmp]; |
| 1327 | enc->encoded_frames_[enc_start_tmp] = temp; |
| 1328 | FrameRelease(&enc->encoded_frames_[enc_start_tmp]); |
| 1329 | enc->start_ = 0; |
| 1330 | } |
| 1331 | return 1; |
| 1332 | } |
| 1333 | |
| 1334 | #undef DELTA_INFINITY |
| 1335 | #undef KEYFRAME_NONE |
| 1336 | |
| 1337 | int WebPAnimEncoderAdd(WebPAnimEncoder* enc, WebPPicture* frame, int timestamp, |
| 1338 | const WebPConfig* encoder_config) { |
| 1339 | WebPConfig config; |
| 1340 | int ok; |
| 1341 | |
| 1342 | if (enc == NULL) { |
| 1343 | return 0; |
| 1344 | } |
| 1345 | MarkNoError(enc); |
| 1346 | |
| 1347 | if (!enc->is_first_frame_) { |
| 1348 | // Make sure timestamps are non-decreasing (integer wrap-around is OK). |
| 1349 | const uint32_t prev_frame_duration = |
| 1350 | (uint32_t)timestamp - enc->prev_timestamp_; |
| 1351 | if (prev_frame_duration >= MAX_DURATION) { |
| 1352 | if (frame != NULL) { |
| 1353 | frame->error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION; |
| 1354 | } |
| 1355 | MarkError(enc, "ERROR adding frame: timestamps must be non-decreasing" ); |
| 1356 | return 0; |
| 1357 | } |
| 1358 | if (!IncreasePreviousDuration(enc, (int)prev_frame_duration)) { |
| 1359 | return 0; |
| 1360 | } |
| 1361 | } else { |
| 1362 | enc->first_timestamp_ = timestamp; |
| 1363 | } |
| 1364 | |
| 1365 | if (frame == NULL) { // Special: last call. |
| 1366 | enc->got_null_frame_ = 1; |
| 1367 | enc->prev_timestamp_ = timestamp; |
| 1368 | return 1; |
| 1369 | } |
| 1370 | |
| 1371 | if (frame->width != enc->canvas_width_ || |
| 1372 | frame->height != enc->canvas_height_) { |
| 1373 | frame->error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION; |
| 1374 | MarkError(enc, "ERROR adding frame: Invalid frame dimensions" ); |
| 1375 | return 0; |
| 1376 | } |
| 1377 | |
| 1378 | if (!frame->use_argb) { // Convert frame from YUV(A) to ARGB. |
| 1379 | if (enc->options_.verbose) { |
| 1380 | fprintf(stderr, "WARNING: Converting frame from YUV(A) to ARGB format; " |
| 1381 | "this incurs a small loss.\n" ); |
| 1382 | } |
| 1383 | if (!WebPPictureYUVAToARGB(frame)) { |
| 1384 | MarkError(enc, "ERROR converting frame from YUV(A) to ARGB" ); |
| 1385 | return 0; |
| 1386 | } |
| 1387 | } |
| 1388 | |
| 1389 | if (encoder_config != NULL) { |
| 1390 | if (!WebPValidateConfig(encoder_config)) { |
| 1391 | MarkError(enc, "ERROR adding frame: Invalid WebPConfig" ); |
| 1392 | return 0; |
| 1393 | } |
| 1394 | config = *encoder_config; |
| 1395 | } else { |
| 1396 | WebPConfigInit(&config); |
| 1397 | config.lossless = 1; |
| 1398 | } |
| 1399 | assert(enc->curr_canvas_ == NULL); |
| 1400 | enc->curr_canvas_ = frame; // Store reference. |
| 1401 | assert(enc->curr_canvas_copy_modified_ == 1); |
| 1402 | CopyCurrentCanvas(enc); |
| 1403 | |
| 1404 | ok = CacheFrame(enc, &config) && FlushFrames(enc); |
| 1405 | |
| 1406 | enc->curr_canvas_ = NULL; |
| 1407 | enc->curr_canvas_copy_modified_ = 1; |
| 1408 | if (ok) { |
| 1409 | enc->prev_timestamp_ = timestamp; |
| 1410 | } |
| 1411 | return ok; |
| 1412 | } |
| 1413 | |
| 1414 | // ----------------------------------------------------------------------------- |
| 1415 | // Bitstream assembly. |
| 1416 | |
| 1417 | static int DecodeFrameOntoCanvas(const WebPMuxFrameInfo* const frame, |
| 1418 | WebPPicture* const canvas) { |
| 1419 | const WebPData* const image = &frame->bitstream; |
| 1420 | WebPPicture sub_image; |
| 1421 | WebPDecoderConfig config; |
| 1422 | WebPInitDecoderConfig(&config); |
| 1423 | WebPUtilClearPic(canvas, NULL); |
| 1424 | if (WebPGetFeatures(image->bytes, image->size, &config.input) != |
| 1425 | VP8_STATUS_OK) { |
| 1426 | return 0; |
| 1427 | } |
| 1428 | if (!WebPPictureView(canvas, frame->x_offset, frame->y_offset, |
| 1429 | config.input.width, config.input.height, &sub_image)) { |
| 1430 | return 0; |
| 1431 | } |
| 1432 | config.output.is_external_memory = 1; |
| 1433 | config.output.colorspace = MODE_BGRA; |
| 1434 | config.output.u.RGBA.rgba = (uint8_t*)sub_image.argb; |
| 1435 | config.output.u.RGBA.stride = sub_image.argb_stride * 4; |
| 1436 | config.output.u.RGBA.size = config.output.u.RGBA.stride * sub_image.height; |
| 1437 | |
| 1438 | if (WebPDecode(image->bytes, image->size, &config) != VP8_STATUS_OK) { |
| 1439 | return 0; |
| 1440 | } |
| 1441 | return 1; |
| 1442 | } |
| 1443 | |
| 1444 | static int FrameToFullCanvas(WebPAnimEncoder* const enc, |
| 1445 | const WebPMuxFrameInfo* const frame, |
| 1446 | WebPData* const full_image) { |
| 1447 | WebPPicture* const canvas_buf = &enc->curr_canvas_copy_; |
| 1448 | WebPMemoryWriter mem1, mem2; |
| 1449 | WebPMemoryWriterInit(&mem1); |
| 1450 | WebPMemoryWriterInit(&mem2); |
| 1451 | |
| 1452 | if (!DecodeFrameOntoCanvas(frame, canvas_buf)) goto Err; |
| 1453 | if (!EncodeFrame(&enc->last_config_, canvas_buf, &mem1)) goto Err; |
| 1454 | GetEncodedData(&mem1, full_image); |
| 1455 | |
| 1456 | if (enc->options_.allow_mixed) { |
| 1457 | if (!EncodeFrame(&enc->last_config_reversed_, canvas_buf, &mem2)) goto Err; |
| 1458 | if (mem2.size < mem1.size) { |
| 1459 | GetEncodedData(&mem2, full_image); |
| 1460 | WebPMemoryWriterClear(&mem1); |
| 1461 | } else { |
| 1462 | WebPMemoryWriterClear(&mem2); |
| 1463 | } |
| 1464 | } |
| 1465 | return 1; |
| 1466 | |
| 1467 | Err: |
| 1468 | WebPMemoryWriterClear(&mem1); |
| 1469 | WebPMemoryWriterClear(&mem2); |
| 1470 | return 0; |
| 1471 | } |
| 1472 | |
| 1473 | // Convert a single-frame animation to a non-animated image if appropriate. |
| 1474 | // TODO(urvang): Can we pick one of the two heuristically (based on frame |
| 1475 | // rectangle and/or presence of alpha)? |
| 1476 | static WebPMuxError OptimizeSingleFrame(WebPAnimEncoder* const enc, |
| 1477 | WebPData* const webp_data) { |
| 1478 | WebPMuxError err = WEBP_MUX_OK; |
| 1479 | int canvas_width, canvas_height; |
| 1480 | WebPMuxFrameInfo frame; |
| 1481 | WebPData full_image; |
| 1482 | WebPData webp_data2; |
| 1483 | WebPMux* const mux = WebPMuxCreate(webp_data, 0); |
| 1484 | if (mux == NULL) return WEBP_MUX_BAD_DATA; |
| 1485 | assert(enc->out_frame_count_ == 1); |
| 1486 | WebPDataInit(&frame.bitstream); |
| 1487 | WebPDataInit(&full_image); |
| 1488 | WebPDataInit(&webp_data2); |
| 1489 | |
| 1490 | err = WebPMuxGetFrame(mux, 1, &frame); |
| 1491 | if (err != WEBP_MUX_OK) goto End; |
| 1492 | if (frame.id != WEBP_CHUNK_ANMF) goto End; // Non-animation: nothing to do. |
| 1493 | err = WebPMuxGetCanvasSize(mux, &canvas_width, &canvas_height); |
| 1494 | if (err != WEBP_MUX_OK) goto End; |
| 1495 | if (!FrameToFullCanvas(enc, &frame, &full_image)) { |
| 1496 | err = WEBP_MUX_BAD_DATA; |
| 1497 | goto End; |
| 1498 | } |
| 1499 | err = WebPMuxSetImage(mux, &full_image, 1); |
| 1500 | if (err != WEBP_MUX_OK) goto End; |
| 1501 | err = WebPMuxAssemble(mux, &webp_data2); |
| 1502 | if (err != WEBP_MUX_OK) goto End; |
| 1503 | |
| 1504 | if (webp_data2.size < webp_data->size) { // Pick 'webp_data2' if smaller. |
| 1505 | WebPDataClear(webp_data); |
| 1506 | *webp_data = webp_data2; |
| 1507 | WebPDataInit(&webp_data2); |
| 1508 | } |
| 1509 | |
| 1510 | End: |
| 1511 | WebPDataClear(&frame.bitstream); |
| 1512 | WebPDataClear(&full_image); |
| 1513 | WebPMuxDelete(mux); |
| 1514 | WebPDataClear(&webp_data2); |
| 1515 | return err; |
| 1516 | } |
| 1517 | |
| 1518 | int WebPAnimEncoderAssemble(WebPAnimEncoder* enc, WebPData* webp_data) { |
| 1519 | WebPMux* mux; |
| 1520 | WebPMuxError err; |
| 1521 | |
| 1522 | if (enc == NULL) { |
| 1523 | return 0; |
| 1524 | } |
| 1525 | MarkNoError(enc); |
| 1526 | |
| 1527 | if (webp_data == NULL) { |
| 1528 | MarkError(enc, "ERROR assembling: NULL input" ); |
| 1529 | return 0; |
| 1530 | } |
| 1531 | |
| 1532 | if (enc->in_frame_count_ == 0) { |
| 1533 | MarkError(enc, "ERROR: No frames to assemble" ); |
| 1534 | return 0; |
| 1535 | } |
| 1536 | |
| 1537 | if (!enc->got_null_frame_ && enc->in_frame_count_ > 1 && enc->count_ > 0) { |
| 1538 | // set duration of the last frame to be avg of durations of previous frames. |
| 1539 | const double delta_time = |
| 1540 | (uint32_t)enc->prev_timestamp_ - enc->first_timestamp_; |
| 1541 | const int average_duration = (int)(delta_time / (enc->in_frame_count_ - 1)); |
| 1542 | if (!IncreasePreviousDuration(enc, average_duration)) { |
| 1543 | return 0; |
| 1544 | } |
| 1545 | } |
| 1546 | |
| 1547 | // Flush any remaining frames. |
| 1548 | enc->flush_count_ = enc->count_; |
| 1549 | if (!FlushFrames(enc)) { |
| 1550 | return 0; |
| 1551 | } |
| 1552 | |
| 1553 | // Set definitive canvas size. |
| 1554 | mux = enc->mux_; |
| 1555 | err = WebPMuxSetCanvasSize(mux, enc->canvas_width_, enc->canvas_height_); |
| 1556 | if (err != WEBP_MUX_OK) goto Err; |
| 1557 | |
| 1558 | err = WebPMuxSetAnimationParams(mux, &enc->options_.anim_params); |
| 1559 | if (err != WEBP_MUX_OK) goto Err; |
| 1560 | |
| 1561 | // Assemble into a WebP bitstream. |
| 1562 | err = WebPMuxAssemble(mux, webp_data); |
| 1563 | if (err != WEBP_MUX_OK) goto Err; |
| 1564 | |
| 1565 | if (enc->out_frame_count_ == 1) { |
| 1566 | err = OptimizeSingleFrame(enc, webp_data); |
| 1567 | if (err != WEBP_MUX_OK) goto Err; |
| 1568 | } |
| 1569 | return 1; |
| 1570 | |
| 1571 | Err: |
| 1572 | MarkError2(enc, "ERROR assembling WebP" , err); |
| 1573 | return 0; |
| 1574 | } |
| 1575 | |
| 1576 | const char* WebPAnimEncoderGetError(WebPAnimEncoder* enc) { |
| 1577 | if (enc == NULL) return NULL; |
| 1578 | return enc->error_str_; |
| 1579 | } |
| 1580 | |
| 1581 | // ----------------------------------------------------------------------------- |
| 1582 | |