1// Copyright 2012 Google Inc. All Rights Reserved.
2//
3// Use of this source code is governed by a BSD-style license
4// that can be found in the COPYING file in the root of the source
5// tree. An additional intellectual property rights grant can be found
6// in the file PATENTS. All contributing project authors may
7// be found in the AUTHORS file in the root of the source tree.
8// -----------------------------------------------------------------------------
9//
10// Author: Jyrki Alakuijala (jyrki@google.com)
11//
12#ifdef HAVE_CONFIG_H
13#include "src/webp/config.h"
14#endif
15
16#include <float.h>
17#include <math.h>
18
19#include "src/dsp/lossless.h"
20#include "src/dsp/lossless_common.h"
21#include "src/enc/backward_references_enc.h"
22#include "src/enc/histogram_enc.h"
23#include "src/enc/vp8i_enc.h"
24#include "src/utils/utils.h"
25
26#define MAX_BIT_COST FLT_MAX
27
28// Number of partitions for the three dominant (literal, red and blue) symbol
29// costs.
30#define NUM_PARTITIONS 4
31// The size of the bin-hash corresponding to the three dominant costs.
32#define BIN_SIZE (NUM_PARTITIONS * NUM_PARTITIONS * NUM_PARTITIONS)
33// Maximum number of histograms allowed in greedy combining algorithm.
34#define MAX_HISTO_GREEDY 100
35
36static void HistogramClear(VP8LHistogram* const p) {
37 uint32_t* const literal = p->literal_;
38 const int cache_bits = p->palette_code_bits_;
39 const int histo_size = VP8LGetHistogramSize(cache_bits);
40 memset(p, 0, histo_size);
41 p->palette_code_bits_ = cache_bits;
42 p->literal_ = literal;
43}
44
45// Swap two histogram pointers.
46static void HistogramSwap(VP8LHistogram** const A, VP8LHistogram** const B) {
47 VP8LHistogram* const tmp = *A;
48 *A = *B;
49 *B = tmp;
50}
51
52static void HistogramCopy(const VP8LHistogram* const src,
53 VP8LHistogram* const dst) {
54 uint32_t* const dst_literal = dst->literal_;
55 const int dst_cache_bits = dst->palette_code_bits_;
56 const int literal_size = VP8LHistogramNumCodes(dst_cache_bits);
57 const int histo_size = VP8LGetHistogramSize(dst_cache_bits);
58 assert(src->palette_code_bits_ == dst_cache_bits);
59 memcpy(dst, src, histo_size);
60 dst->literal_ = dst_literal;
61 memcpy(dst->literal_, src->literal_, literal_size * sizeof(*dst->literal_));
62}
63
64int VP8LGetHistogramSize(int cache_bits) {
65 const int literal_size = VP8LHistogramNumCodes(cache_bits);
66 const size_t total_size = sizeof(VP8LHistogram) + sizeof(int) * literal_size;
67 assert(total_size <= (size_t)0x7fffffff);
68 return (int)total_size;
69}
70
71void VP8LFreeHistogram(VP8LHistogram* const histo) {
72 WebPSafeFree(histo);
73}
74
75void VP8LFreeHistogramSet(VP8LHistogramSet* const histo) {
76 WebPSafeFree(histo);
77}
78
79void VP8LHistogramStoreRefs(const VP8LBackwardRefs* const refs,
80 VP8LHistogram* const histo) {
81 VP8LRefsCursor c = VP8LRefsCursorInit(refs);
82 while (VP8LRefsCursorOk(&c)) {
83 VP8LHistogramAddSinglePixOrCopy(histo, c.cur_pos, NULL, 0);
84 VP8LRefsCursorNext(&c);
85 }
86}
87
88void VP8LHistogramCreate(VP8LHistogram* const p,
89 const VP8LBackwardRefs* const refs,
90 int palette_code_bits) {
91 if (palette_code_bits >= 0) {
92 p->palette_code_bits_ = palette_code_bits;
93 }
94 HistogramClear(p);
95 VP8LHistogramStoreRefs(refs, p);
96}
97
98void VP8LHistogramInit(VP8LHistogram* const p, int palette_code_bits,
99 int init_arrays) {
100 p->palette_code_bits_ = palette_code_bits;
101 if (init_arrays) {
102 HistogramClear(p);
103 } else {
104 p->trivial_symbol_ = 0;
105 p->bit_cost_ = 0.;
106 p->literal_cost_ = 0.;
107 p->red_cost_ = 0.;
108 p->blue_cost_ = 0.;
109 memset(p->is_used_, 0, sizeof(p->is_used_));
110 }
111}
112
113VP8LHistogram* VP8LAllocateHistogram(int cache_bits) {
114 VP8LHistogram* histo = NULL;
115 const int total_size = VP8LGetHistogramSize(cache_bits);
116 uint8_t* const memory = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*memory));
117 if (memory == NULL) return NULL;
118 histo = (VP8LHistogram*)memory;
119 // literal_ won't necessary be aligned.
120 histo->literal_ = (uint32_t*)(memory + sizeof(VP8LHistogram));
121 VP8LHistogramInit(histo, cache_bits, /*init_arrays=*/ 0);
122 return histo;
123}
124
125// Resets the pointers of the histograms to point to the bit buffer in the set.
126static void HistogramSetResetPointers(VP8LHistogramSet* const set,
127 int cache_bits) {
128 int i;
129 const int histo_size = VP8LGetHistogramSize(cache_bits);
130 uint8_t* memory = (uint8_t*) (set->histograms);
131 memory += set->max_size * sizeof(*set->histograms);
132 for (i = 0; i < set->max_size; ++i) {
133 memory = (uint8_t*) WEBP_ALIGN(memory);
134 set->histograms[i] = (VP8LHistogram*) memory;
135 // literal_ won't necessary be aligned.
136 set->histograms[i]->literal_ = (uint32_t*)(memory + sizeof(VP8LHistogram));
137 memory += histo_size;
138 }
139}
140
141// Returns the total size of the VP8LHistogramSet.
142static size_t HistogramSetTotalSize(int size, int cache_bits) {
143 const int histo_size = VP8LGetHistogramSize(cache_bits);
144 return (sizeof(VP8LHistogramSet) + size * (sizeof(VP8LHistogram*) +
145 histo_size + WEBP_ALIGN_CST));
146}
147
148VP8LHistogramSet* VP8LAllocateHistogramSet(int size, int cache_bits) {
149 int i;
150 VP8LHistogramSet* set;
151 const size_t total_size = HistogramSetTotalSize(size, cache_bits);
152 uint8_t* memory = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*memory));
153 if (memory == NULL) return NULL;
154
155 set = (VP8LHistogramSet*)memory;
156 memory += sizeof(*set);
157 set->histograms = (VP8LHistogram**)memory;
158 set->max_size = size;
159 set->size = size;
160 HistogramSetResetPointers(set, cache_bits);
161 for (i = 0; i < size; ++i) {
162 VP8LHistogramInit(set->histograms[i], cache_bits, /*init_arrays=*/ 0);
163 }
164 return set;
165}
166
167void VP8LHistogramSetClear(VP8LHistogramSet* const set) {
168 int i;
169 const int cache_bits = set->histograms[0]->palette_code_bits_;
170 const int size = set->max_size;
171 const size_t total_size = HistogramSetTotalSize(size, cache_bits);
172 uint8_t* memory = (uint8_t*)set;
173
174 memset(memory, 0, total_size);
175 memory += sizeof(*set);
176 set->histograms = (VP8LHistogram**)memory;
177 set->max_size = size;
178 set->size = size;
179 HistogramSetResetPointers(set, cache_bits);
180 for (i = 0; i < size; ++i) {
181 set->histograms[i]->palette_code_bits_ = cache_bits;
182 }
183}
184
185// Removes the histogram 'i' from 'set' by setting it to NULL.
186static void HistogramSetRemoveHistogram(VP8LHistogramSet* const set, int i,
187 int* const num_used) {
188 assert(set->histograms[i] != NULL);
189 set->histograms[i] = NULL;
190 --*num_used;
191 // If we remove the last valid one, shrink until the next valid one.
192 if (i == set->size - 1) {
193 while (set->size >= 1 && set->histograms[set->size - 1] == NULL) {
194 --set->size;
195 }
196 }
197}
198
199// -----------------------------------------------------------------------------
200
201void VP8LHistogramAddSinglePixOrCopy(VP8LHistogram* const histo,
202 const PixOrCopy* const v,
203 int (*const distance_modifier)(int, int),
204 int distance_modifier_arg0) {
205 if (PixOrCopyIsLiteral(v)) {
206 ++histo->alpha_[PixOrCopyLiteral(v, 3)];
207 ++histo->red_[PixOrCopyLiteral(v, 2)];
208 ++histo->literal_[PixOrCopyLiteral(v, 1)];
209 ++histo->blue_[PixOrCopyLiteral(v, 0)];
210 } else if (PixOrCopyIsCacheIdx(v)) {
211 const int literal_ix =
212 NUM_LITERAL_CODES + NUM_LENGTH_CODES + PixOrCopyCacheIdx(v);
213 assert(histo->palette_code_bits_ != 0);
214 ++histo->literal_[literal_ix];
215 } else {
216 int code, extra_bits;
217 VP8LPrefixEncodeBits(PixOrCopyLength(v), &code, &extra_bits);
218 ++histo->literal_[NUM_LITERAL_CODES + code];
219 if (distance_modifier == NULL) {
220 VP8LPrefixEncodeBits(PixOrCopyDistance(v), &code, &extra_bits);
221 } else {
222 VP8LPrefixEncodeBits(
223 distance_modifier(distance_modifier_arg0, PixOrCopyDistance(v)),
224 &code, &extra_bits);
225 }
226 ++histo->distance_[code];
227 }
228}
229
230// -----------------------------------------------------------------------------
231// Entropy-related functions.
232
233static WEBP_INLINE float BitsEntropyRefine(const VP8LBitEntropy* entropy) {
234 float mix;
235 if (entropy->nonzeros < 5) {
236 if (entropy->nonzeros <= 1) {
237 return 0;
238 }
239 // Two symbols, they will be 0 and 1 in a Huffman code.
240 // Let's mix in a bit of entropy to favor good clustering when
241 // distributions of these are combined.
242 if (entropy->nonzeros == 2) {
243 return 0.99f * entropy->sum + 0.01f * entropy->entropy;
244 }
245 // No matter what the entropy says, we cannot be better than min_limit
246 // with Huffman coding. I am mixing a bit of entropy into the
247 // min_limit since it produces much better (~0.5 %) compression results
248 // perhaps because of better entropy clustering.
249 if (entropy->nonzeros == 3) {
250 mix = 0.95f;
251 } else {
252 mix = 0.7f; // nonzeros == 4.
253 }
254 } else {
255 mix = 0.627f;
256 }
257
258 {
259 float min_limit = 2.f * entropy->sum - entropy->max_val;
260 min_limit = mix * min_limit + (1.f - mix) * entropy->entropy;
261 return (entropy->entropy < min_limit) ? min_limit : entropy->entropy;
262 }
263}
264
265float VP8LBitsEntropy(const uint32_t* const array, int n) {
266 VP8LBitEntropy entropy;
267 VP8LBitsEntropyUnrefined(array, n, &entropy);
268
269 return BitsEntropyRefine(&entropy);
270}
271
272static float InitialHuffmanCost(void) {
273 // Small bias because Huffman code length is typically not stored in
274 // full length.
275 static const int kHuffmanCodeOfHuffmanCodeSize = CODE_LENGTH_CODES * 3;
276 static const float kSmallBias = 9.1f;
277 return kHuffmanCodeOfHuffmanCodeSize - kSmallBias;
278}
279
280// Finalize the Huffman cost based on streak numbers and length type (<3 or >=3)
281static float FinalHuffmanCost(const VP8LStreaks* const stats) {
282 // The constants in this function are experimental and got rounded from
283 // their original values in 1/8 when switched to 1/1024.
284 float retval = InitialHuffmanCost();
285 // Second coefficient: Many zeros in the histogram are covered efficiently
286 // by a run-length encode. Originally 2/8.
287 retval += stats->counts[0] * 1.5625f + 0.234375f * stats->streaks[0][1];
288 // Second coefficient: Constant values are encoded less efficiently, but still
289 // RLE'ed. Originally 6/8.
290 retval += stats->counts[1] * 2.578125f + 0.703125f * stats->streaks[1][1];
291 // 0s are usually encoded more efficiently than non-0s.
292 // Originally 15/8.
293 retval += 1.796875f * stats->streaks[0][0];
294 // Originally 26/8.
295 retval += 3.28125f * stats->streaks[1][0];
296 return retval;
297}
298
299// Get the symbol entropy for the distribution 'population'.
300// Set 'trivial_sym', if there's only one symbol present in the distribution.
301static float PopulationCost(const uint32_t* const population, int length,
302 uint32_t* const trivial_sym,
303 uint8_t* const is_used) {
304 VP8LBitEntropy bit_entropy;
305 VP8LStreaks stats;
306 VP8LGetEntropyUnrefined(population, length, &bit_entropy, &stats);
307 if (trivial_sym != NULL) {
308 *trivial_sym = (bit_entropy.nonzeros == 1) ? bit_entropy.nonzero_code
309 : VP8L_NON_TRIVIAL_SYM;
310 }
311 // The histogram is used if there is at least one non-zero streak.
312 *is_used = (stats.streaks[1][0] != 0 || stats.streaks[1][1] != 0);
313
314 return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats);
315}
316
317// trivial_at_end is 1 if the two histograms only have one element that is
318// non-zero: both the zero-th one, or both the last one.
319static WEBP_INLINE float GetCombinedEntropy(const uint32_t* const X,
320 const uint32_t* const Y, int length,
321 int is_X_used, int is_Y_used,
322 int trivial_at_end) {
323 VP8LStreaks stats;
324 if (trivial_at_end) {
325 // This configuration is due to palettization that transforms an indexed
326 // pixel into 0xff000000 | (pixel << 8) in VP8LBundleColorMap.
327 // BitsEntropyRefine is 0 for histograms with only one non-zero value.
328 // Only FinalHuffmanCost needs to be evaluated.
329 memset(&stats, 0, sizeof(stats));
330 // Deal with the non-zero value at index 0 or length-1.
331 stats.streaks[1][0] = 1;
332 // Deal with the following/previous zero streak.
333 stats.counts[0] = 1;
334 stats.streaks[0][1] = length - 1;
335 return FinalHuffmanCost(&stats);
336 } else {
337 VP8LBitEntropy bit_entropy;
338 if (is_X_used) {
339 if (is_Y_used) {
340 VP8LGetCombinedEntropyUnrefined(X, Y, length, &bit_entropy, &stats);
341 } else {
342 VP8LGetEntropyUnrefined(X, length, &bit_entropy, &stats);
343 }
344 } else {
345 if (is_Y_used) {
346 VP8LGetEntropyUnrefined(Y, length, &bit_entropy, &stats);
347 } else {
348 memset(&stats, 0, sizeof(stats));
349 stats.counts[0] = 1;
350 stats.streaks[0][length > 3] = length;
351 VP8LBitEntropyInit(&bit_entropy);
352 }
353 }
354
355 return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats);
356 }
357}
358
359// Estimates the Entropy + Huffman + other block overhead size cost.
360float VP8LHistogramEstimateBits(VP8LHistogram* const p) {
361 return
362 PopulationCost(p->literal_, VP8LHistogramNumCodes(p->palette_code_bits_),
363 NULL, &p->is_used_[0])
364 + PopulationCost(p->red_, NUM_LITERAL_CODES, NULL, &p->is_used_[1])
365 + PopulationCost(p->blue_, NUM_LITERAL_CODES, NULL, &p->is_used_[2])
366 + PopulationCost(p->alpha_, NUM_LITERAL_CODES, NULL, &p->is_used_[3])
367 + PopulationCost(p->distance_, NUM_DISTANCE_CODES, NULL, &p->is_used_[4])
368 + VP8LExtraCost(p->literal_ + NUM_LITERAL_CODES, NUM_LENGTH_CODES)
369 + VP8LExtraCost(p->distance_, NUM_DISTANCE_CODES);
370}
371
372// -----------------------------------------------------------------------------
373// Various histogram combine/cost-eval functions
374
375static int GetCombinedHistogramEntropy(const VP8LHistogram* const a,
376 const VP8LHistogram* const b,
377 float cost_threshold, float* cost) {
378 const int palette_code_bits = a->palette_code_bits_;
379 int trivial_at_end = 0;
380 assert(a->palette_code_bits_ == b->palette_code_bits_);
381 *cost += GetCombinedEntropy(a->literal_, b->literal_,
382 VP8LHistogramNumCodes(palette_code_bits),
383 a->is_used_[0], b->is_used_[0], 0);
384 *cost += VP8LExtraCostCombined(a->literal_ + NUM_LITERAL_CODES,
385 b->literal_ + NUM_LITERAL_CODES,
386 NUM_LENGTH_CODES);
387 if (*cost > cost_threshold) return 0;
388
389 if (a->trivial_symbol_ != VP8L_NON_TRIVIAL_SYM &&
390 a->trivial_symbol_ == b->trivial_symbol_) {
391 // A, R and B are all 0 or 0xff.
392 const uint32_t color_a = (a->trivial_symbol_ >> 24) & 0xff;
393 const uint32_t color_r = (a->trivial_symbol_ >> 16) & 0xff;
394 const uint32_t color_b = (a->trivial_symbol_ >> 0) & 0xff;
395 if ((color_a == 0 || color_a == 0xff) &&
396 (color_r == 0 || color_r == 0xff) &&
397 (color_b == 0 || color_b == 0xff)) {
398 trivial_at_end = 1;
399 }
400 }
401
402 *cost +=
403 GetCombinedEntropy(a->red_, b->red_, NUM_LITERAL_CODES, a->is_used_[1],
404 b->is_used_[1], trivial_at_end);
405 if (*cost > cost_threshold) return 0;
406
407 *cost +=
408 GetCombinedEntropy(a->blue_, b->blue_, NUM_LITERAL_CODES, a->is_used_[2],
409 b->is_used_[2], trivial_at_end);
410 if (*cost > cost_threshold) return 0;
411
412 *cost +=
413 GetCombinedEntropy(a->alpha_, b->alpha_, NUM_LITERAL_CODES,
414 a->is_used_[3], b->is_used_[3], trivial_at_end);
415 if (*cost > cost_threshold) return 0;
416
417 *cost +=
418 GetCombinedEntropy(a->distance_, b->distance_, NUM_DISTANCE_CODES,
419 a->is_used_[4], b->is_used_[4], 0);
420 *cost +=
421 VP8LExtraCostCombined(a->distance_, b->distance_, NUM_DISTANCE_CODES);
422 if (*cost > cost_threshold) return 0;
423
424 return 1;
425}
426
427static WEBP_INLINE void HistogramAdd(const VP8LHistogram* const a,
428 const VP8LHistogram* const b,
429 VP8LHistogram* const out) {
430 VP8LHistogramAdd(a, b, out);
431 out->trivial_symbol_ = (a->trivial_symbol_ == b->trivial_symbol_)
432 ? a->trivial_symbol_
433 : VP8L_NON_TRIVIAL_SYM;
434}
435
436// Performs out = a + b, computing the cost C(a+b) - C(a) - C(b) while comparing
437// to the threshold value 'cost_threshold'. The score returned is
438// Score = C(a+b) - C(a) - C(b), where C(a) + C(b) is known and fixed.
439// Since the previous score passed is 'cost_threshold', we only need to compare
440// the partial cost against 'cost_threshold + C(a) + C(b)' to possibly bail-out
441// early.
442static float HistogramAddEval(const VP8LHistogram* const a,
443 const VP8LHistogram* const b,
444 VP8LHistogram* const out, float cost_threshold) {
445 float cost = 0;
446 const float sum_cost = a->bit_cost_ + b->bit_cost_;
447 cost_threshold += sum_cost;
448
449 if (GetCombinedHistogramEntropy(a, b, cost_threshold, &cost)) {
450 HistogramAdd(a, b, out);
451 out->bit_cost_ = cost;
452 out->palette_code_bits_ = a->palette_code_bits_;
453 }
454
455 return cost - sum_cost;
456}
457
458// Same as HistogramAddEval(), except that the resulting histogram
459// is not stored. Only the cost C(a+b) - C(a) is evaluated. We omit
460// the term C(b) which is constant over all the evaluations.
461static float HistogramAddThresh(const VP8LHistogram* const a,
462 const VP8LHistogram* const b,
463 float cost_threshold) {
464 float cost;
465 assert(a != NULL && b != NULL);
466 cost = -a->bit_cost_;
467 GetCombinedHistogramEntropy(a, b, cost_threshold, &cost);
468 return cost;
469}
470
471// -----------------------------------------------------------------------------
472
473// The structure to keep track of cost range for the three dominant entropy
474// symbols.
475typedef struct {
476 float literal_max_;
477 float literal_min_;
478 float red_max_;
479 float red_min_;
480 float blue_max_;
481 float blue_min_;
482} DominantCostRange;
483
484static void DominantCostRangeInit(DominantCostRange* const c) {
485 c->literal_max_ = 0.;
486 c->literal_min_ = MAX_BIT_COST;
487 c->red_max_ = 0.;
488 c->red_min_ = MAX_BIT_COST;
489 c->blue_max_ = 0.;
490 c->blue_min_ = MAX_BIT_COST;
491}
492
493static void UpdateDominantCostRange(
494 const VP8LHistogram* const h, DominantCostRange* const c) {
495 if (c->literal_max_ < h->literal_cost_) c->literal_max_ = h->literal_cost_;
496 if (c->literal_min_ > h->literal_cost_) c->literal_min_ = h->literal_cost_;
497 if (c->red_max_ < h->red_cost_) c->red_max_ = h->red_cost_;
498 if (c->red_min_ > h->red_cost_) c->red_min_ = h->red_cost_;
499 if (c->blue_max_ < h->blue_cost_) c->blue_max_ = h->blue_cost_;
500 if (c->blue_min_ > h->blue_cost_) c->blue_min_ = h->blue_cost_;
501}
502
503static void UpdateHistogramCost(VP8LHistogram* const h) {
504 uint32_t alpha_sym, red_sym, blue_sym;
505 const float alpha_cost =
506 PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym, &h->is_used_[3]);
507 const float distance_cost =
508 PopulationCost(h->distance_, NUM_DISTANCE_CODES, NULL, &h->is_used_[4]) +
509 VP8LExtraCost(h->distance_, NUM_DISTANCE_CODES);
510 const int num_codes = VP8LHistogramNumCodes(h->palette_code_bits_);
511 h->literal_cost_ =
512 PopulationCost(h->literal_, num_codes, NULL, &h->is_used_[0]) +
513 VP8LExtraCost(h->literal_ + NUM_LITERAL_CODES, NUM_LENGTH_CODES);
514 h->red_cost_ =
515 PopulationCost(h->red_, NUM_LITERAL_CODES, &red_sym, &h->is_used_[1]);
516 h->blue_cost_ =
517 PopulationCost(h->blue_, NUM_LITERAL_CODES, &blue_sym, &h->is_used_[2]);
518 h->bit_cost_ = h->literal_cost_ + h->red_cost_ + h->blue_cost_ +
519 alpha_cost + distance_cost;
520 if ((alpha_sym | red_sym | blue_sym) == VP8L_NON_TRIVIAL_SYM) {
521 h->trivial_symbol_ = VP8L_NON_TRIVIAL_SYM;
522 } else {
523 h->trivial_symbol_ =
524 ((uint32_t)alpha_sym << 24) | (red_sym << 16) | (blue_sym << 0);
525 }
526}
527
528static int GetBinIdForEntropy(float min, float max, float val) {
529 const float range = max - min;
530 if (range > 0.) {
531 const float delta = val - min;
532 return (int)((NUM_PARTITIONS - 1e-6) * delta / range);
533 } else {
534 return 0;
535 }
536}
537
538static int GetHistoBinIndex(const VP8LHistogram* const h,
539 const DominantCostRange* const c, int low_effort) {
540 int bin_id = GetBinIdForEntropy(c->literal_min_, c->literal_max_,
541 h->literal_cost_);
542 assert(bin_id < NUM_PARTITIONS);
543 if (!low_effort) {
544 bin_id = bin_id * NUM_PARTITIONS
545 + GetBinIdForEntropy(c->red_min_, c->red_max_, h->red_cost_);
546 bin_id = bin_id * NUM_PARTITIONS
547 + GetBinIdForEntropy(c->blue_min_, c->blue_max_, h->blue_cost_);
548 assert(bin_id < BIN_SIZE);
549 }
550 return bin_id;
551}
552
553// Construct the histograms from backward references.
554static void HistogramBuild(
555 int xsize, int histo_bits, const VP8LBackwardRefs* const backward_refs,
556 VP8LHistogramSet* const image_histo) {
557 int x = 0, y = 0;
558 const int histo_xsize = VP8LSubSampleSize(xsize, histo_bits);
559 VP8LHistogram** const histograms = image_histo->histograms;
560 VP8LRefsCursor c = VP8LRefsCursorInit(backward_refs);
561 assert(histo_bits > 0);
562 VP8LHistogramSetClear(image_histo);
563 while (VP8LRefsCursorOk(&c)) {
564 const PixOrCopy* const v = c.cur_pos;
565 const int ix = (y >> histo_bits) * histo_xsize + (x >> histo_bits);
566 VP8LHistogramAddSinglePixOrCopy(histograms[ix], v, NULL, 0);
567 x += PixOrCopyLength(v);
568 while (x >= xsize) {
569 x -= xsize;
570 ++y;
571 }
572 VP8LRefsCursorNext(&c);
573 }
574}
575
576// Copies the histograms and computes its bit_cost.
577static const uint16_t kInvalidHistogramSymbol = (uint16_t)(-1);
578static void HistogramCopyAndAnalyze(VP8LHistogramSet* const orig_histo,
579 VP8LHistogramSet* const image_histo,
580 int* const num_used,
581 uint16_t* const histogram_symbols) {
582 int i, cluster_id;
583 int num_used_orig = *num_used;
584 VP8LHistogram** const orig_histograms = orig_histo->histograms;
585 VP8LHistogram** const histograms = image_histo->histograms;
586 assert(image_histo->max_size == orig_histo->max_size);
587 for (cluster_id = 0, i = 0; i < orig_histo->max_size; ++i) {
588 VP8LHistogram* const histo = orig_histograms[i];
589 UpdateHistogramCost(histo);
590
591 // Skip the histogram if it is completely empty, which can happen for tiles
592 // with no information (when they are skipped because of LZ77).
593 if (!histo->is_used_[0] && !histo->is_used_[1] && !histo->is_used_[2]
594 && !histo->is_used_[3] && !histo->is_used_[4]) {
595 // The first histogram is always used. If an histogram is empty, we set
596 // its id to be the same as the previous one: this will improve
597 // compressibility for later LZ77.
598 assert(i > 0);
599 HistogramSetRemoveHistogram(image_histo, i, num_used);
600 HistogramSetRemoveHistogram(orig_histo, i, &num_used_orig);
601 histogram_symbols[i] = kInvalidHistogramSymbol;
602 } else {
603 // Copy histograms from orig_histo[] to image_histo[].
604 HistogramCopy(histo, histograms[i]);
605 histogram_symbols[i] = cluster_id++;
606 assert(cluster_id <= image_histo->max_size);
607 }
608 }
609}
610
611// Partition histograms to different entropy bins for three dominant (literal,
612// red and blue) symbol costs and compute the histogram aggregate bit_cost.
613static void HistogramAnalyzeEntropyBin(VP8LHistogramSet* const image_histo,
614 uint16_t* const bin_map,
615 int low_effort) {
616 int i;
617 VP8LHistogram** const histograms = image_histo->histograms;
618 const int histo_size = image_histo->size;
619 DominantCostRange cost_range;
620 DominantCostRangeInit(&cost_range);
621
622 // Analyze the dominant (literal, red and blue) entropy costs.
623 for (i = 0; i < histo_size; ++i) {
624 if (histograms[i] == NULL) continue;
625 UpdateDominantCostRange(histograms[i], &cost_range);
626 }
627
628 // bin-hash histograms on three of the dominant (literal, red and blue)
629 // symbol costs and store the resulting bin_id for each histogram.
630 for (i = 0; i < histo_size; ++i) {
631 // bin_map[i] is not set to a special value as its use will later be guarded
632 // by another (histograms[i] == NULL).
633 if (histograms[i] == NULL) continue;
634 bin_map[i] = GetHistoBinIndex(histograms[i], &cost_range, low_effort);
635 }
636}
637
638// Merges some histograms with same bin_id together if it's advantageous.
639// Sets the remaining histograms to NULL.
640static void HistogramCombineEntropyBin(
641 VP8LHistogramSet* const image_histo, int* num_used,
642 const uint16_t* const clusters, uint16_t* const cluster_mappings,
643 VP8LHistogram* cur_combo, const uint16_t* const bin_map, int num_bins,
644 float combine_cost_factor, int low_effort) {
645 VP8LHistogram** const histograms = image_histo->histograms;
646 int idx;
647 struct {
648 int16_t first; // position of the histogram that accumulates all
649 // histograms with the same bin_id
650 uint16_t num_combine_failures; // number of combine failures per bin_id
651 } bin_info[BIN_SIZE];
652
653 assert(num_bins <= BIN_SIZE);
654 for (idx = 0; idx < num_bins; ++idx) {
655 bin_info[idx].first = -1;
656 bin_info[idx].num_combine_failures = 0;
657 }
658
659 // By default, a cluster matches itself.
660 for (idx = 0; idx < *num_used; ++idx) cluster_mappings[idx] = idx;
661 for (idx = 0; idx < image_histo->size; ++idx) {
662 int bin_id, first;
663 if (histograms[idx] == NULL) continue;
664 bin_id = bin_map[idx];
665 first = bin_info[bin_id].first;
666 if (first == -1) {
667 bin_info[bin_id].first = idx;
668 } else if (low_effort) {
669 HistogramAdd(histograms[idx], histograms[first], histograms[first]);
670 HistogramSetRemoveHistogram(image_histo, idx, num_used);
671 cluster_mappings[clusters[idx]] = clusters[first];
672 } else {
673 // try to merge #idx into #first (both share the same bin_id)
674 const float bit_cost = histograms[idx]->bit_cost_;
675 const float bit_cost_thresh = -bit_cost * combine_cost_factor;
676 const float curr_cost_diff = HistogramAddEval(
677 histograms[first], histograms[idx], cur_combo, bit_cost_thresh);
678 if (curr_cost_diff < bit_cost_thresh) {
679 // Try to merge two histograms only if the combo is a trivial one or
680 // the two candidate histograms are already non-trivial.
681 // For some images, 'try_combine' turns out to be false for a lot of
682 // histogram pairs. In that case, we fallback to combining
683 // histograms as usual to avoid increasing the header size.
684 const int try_combine =
685 (cur_combo->trivial_symbol_ != VP8L_NON_TRIVIAL_SYM) ||
686 ((histograms[idx]->trivial_symbol_ == VP8L_NON_TRIVIAL_SYM) &&
687 (histograms[first]->trivial_symbol_ == VP8L_NON_TRIVIAL_SYM));
688 const int max_combine_failures = 32;
689 if (try_combine ||
690 bin_info[bin_id].num_combine_failures >= max_combine_failures) {
691 // move the (better) merged histogram to its final slot
692 HistogramSwap(&cur_combo, &histograms[first]);
693 HistogramSetRemoveHistogram(image_histo, idx, num_used);
694 cluster_mappings[clusters[idx]] = clusters[first];
695 } else {
696 ++bin_info[bin_id].num_combine_failures;
697 }
698 }
699 }
700 }
701 if (low_effort) {
702 // for low_effort case, update the final cost when everything is merged
703 for (idx = 0; idx < image_histo->size; ++idx) {
704 if (histograms[idx] == NULL) continue;
705 UpdateHistogramCost(histograms[idx]);
706 }
707 }
708}
709
710// Implement a Lehmer random number generator with a multiplicative constant of
711// 48271 and a modulo constant of 2^31 - 1.
712static uint32_t MyRand(uint32_t* const seed) {
713 *seed = (uint32_t)(((uint64_t)(*seed) * 48271u) % 2147483647u);
714 assert(*seed > 0);
715 return *seed;
716}
717
718// -----------------------------------------------------------------------------
719// Histogram pairs priority queue
720
721// Pair of histograms. Negative idx1 value means that pair is out-of-date.
722typedef struct {
723 int idx1;
724 int idx2;
725 float cost_diff;
726 float cost_combo;
727} HistogramPair;
728
729typedef struct {
730 HistogramPair* queue;
731 int size;
732 int max_size;
733} HistoQueue;
734
735static int HistoQueueInit(HistoQueue* const histo_queue, const int max_size) {
736 histo_queue->size = 0;
737 histo_queue->max_size = max_size;
738 // We allocate max_size + 1 because the last element at index "size" is
739 // used as temporary data (and it could be up to max_size).
740 histo_queue->queue = (HistogramPair*)WebPSafeMalloc(
741 histo_queue->max_size + 1, sizeof(*histo_queue->queue));
742 return histo_queue->queue != NULL;
743}
744
745static void HistoQueueClear(HistoQueue* const histo_queue) {
746 assert(histo_queue != NULL);
747 WebPSafeFree(histo_queue->queue);
748 histo_queue->size = 0;
749 histo_queue->max_size = 0;
750}
751
752// Pop a specific pair in the queue by replacing it with the last one
753// and shrinking the queue.
754static void HistoQueuePopPair(HistoQueue* const histo_queue,
755 HistogramPair* const pair) {
756 assert(pair >= histo_queue->queue &&
757 pair < (histo_queue->queue + histo_queue->size));
758 assert(histo_queue->size > 0);
759 *pair = histo_queue->queue[histo_queue->size - 1];
760 --histo_queue->size;
761}
762
763// Check whether a pair in the queue should be updated as head or not.
764static void HistoQueueUpdateHead(HistoQueue* const histo_queue,
765 HistogramPair* const pair) {
766 assert(pair->cost_diff < 0.);
767 assert(pair >= histo_queue->queue &&
768 pair < (histo_queue->queue + histo_queue->size));
769 assert(histo_queue->size > 0);
770 if (pair->cost_diff < histo_queue->queue[0].cost_diff) {
771 // Replace the best pair.
772 const HistogramPair tmp = histo_queue->queue[0];
773 histo_queue->queue[0] = *pair;
774 *pair = tmp;
775 }
776}
777
778// Update the cost diff and combo of a pair of histograms. This needs to be
779// called when the the histograms have been merged with a third one.
780static void HistoQueueUpdatePair(const VP8LHistogram* const h1,
781 const VP8LHistogram* const h2, float threshold,
782 HistogramPair* const pair) {
783 const float sum_cost = h1->bit_cost_ + h2->bit_cost_;
784 pair->cost_combo = 0.;
785 GetCombinedHistogramEntropy(h1, h2, sum_cost + threshold, &pair->cost_combo);
786 pair->cost_diff = pair->cost_combo - sum_cost;
787}
788
789// Create a pair from indices "idx1" and "idx2" provided its cost
790// is inferior to "threshold", a negative entropy.
791// It returns the cost of the pair, or 0. if it superior to threshold.
792static float HistoQueuePush(HistoQueue* const histo_queue,
793 VP8LHistogram** const histograms, int idx1,
794 int idx2, float threshold) {
795 const VP8LHistogram* h1;
796 const VP8LHistogram* h2;
797 HistogramPair pair;
798
799 // Stop here if the queue is full.
800 if (histo_queue->size == histo_queue->max_size) return 0.;
801 assert(threshold <= 0.);
802 if (idx1 > idx2) {
803 const int tmp = idx2;
804 idx2 = idx1;
805 idx1 = tmp;
806 }
807 pair.idx1 = idx1;
808 pair.idx2 = idx2;
809 h1 = histograms[idx1];
810 h2 = histograms[idx2];
811
812 HistoQueueUpdatePair(h1, h2, threshold, &pair);
813
814 // Do not even consider the pair if it does not improve the entropy.
815 if (pair.cost_diff >= threshold) return 0.;
816
817 histo_queue->queue[histo_queue->size++] = pair;
818 HistoQueueUpdateHead(histo_queue, &histo_queue->queue[histo_queue->size - 1]);
819
820 return pair.cost_diff;
821}
822
823// -----------------------------------------------------------------------------
824
825// Combines histograms by continuously choosing the one with the highest cost
826// reduction.
827static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo,
828 int* const num_used) {
829 int ok = 0;
830 const int image_histo_size = image_histo->size;
831 int i, j;
832 VP8LHistogram** const histograms = image_histo->histograms;
833 // Priority queue of histogram pairs.
834 HistoQueue histo_queue;
835
836 // image_histo_size^2 for the queue size is safe. If you look at
837 // HistogramCombineGreedy, and imagine that UpdateQueueFront always pushes
838 // data to the queue, you insert at most:
839 // - image_histo_size*(image_histo_size-1)/2 (the first two for loops)
840 // - image_histo_size - 1 in the last for loop at the first iteration of
841 // the while loop, image_histo_size - 2 at the second iteration ...
842 // therefore image_histo_size*(image_histo_size-1)/2 overall too
843 if (!HistoQueueInit(&histo_queue, image_histo_size * image_histo_size)) {
844 goto End;
845 }
846
847 for (i = 0; i < image_histo_size; ++i) {
848 if (image_histo->histograms[i] == NULL) continue;
849 for (j = i + 1; j < image_histo_size; ++j) {
850 // Initialize queue.
851 if (image_histo->histograms[j] == NULL) continue;
852 HistoQueuePush(&histo_queue, histograms, i, j, 0.);
853 }
854 }
855
856 while (histo_queue.size > 0) {
857 const int idx1 = histo_queue.queue[0].idx1;
858 const int idx2 = histo_queue.queue[0].idx2;
859 HistogramAdd(histograms[idx2], histograms[idx1], histograms[idx1]);
860 histograms[idx1]->bit_cost_ = histo_queue.queue[0].cost_combo;
861
862 // Remove merged histogram.
863 HistogramSetRemoveHistogram(image_histo, idx2, num_used);
864
865 // Remove pairs intersecting the just combined best pair.
866 for (i = 0; i < histo_queue.size;) {
867 HistogramPair* const p = histo_queue.queue + i;
868 if (p->idx1 == idx1 || p->idx2 == idx1 ||
869 p->idx1 == idx2 || p->idx2 == idx2) {
870 HistoQueuePopPair(&histo_queue, p);
871 } else {
872 HistoQueueUpdateHead(&histo_queue, p);
873 ++i;
874 }
875 }
876
877 // Push new pairs formed with combined histogram to the queue.
878 for (i = 0; i < image_histo->size; ++i) {
879 if (i == idx1 || image_histo->histograms[i] == NULL) continue;
880 HistoQueuePush(&histo_queue, image_histo->histograms, idx1, i, 0.);
881 }
882 }
883
884 ok = 1;
885
886 End:
887 HistoQueueClear(&histo_queue);
888 return ok;
889}
890
891// Perform histogram aggregation using a stochastic approach.
892// 'do_greedy' is set to 1 if a greedy approach needs to be performed
893// afterwards, 0 otherwise.
894static int PairComparison(const void* idx1, const void* idx2) {
895 // To be used with bsearch: <0 when *idx1<*idx2, >0 if >, 0 when ==.
896 return (*(int*) idx1 - *(int*) idx2);
897}
898static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,
899 int* const num_used, int min_cluster_size,
900 int* const do_greedy) {
901 int j, iter;
902 uint32_t seed = 1;
903 int tries_with_no_success = 0;
904 const int outer_iters = *num_used;
905 const int num_tries_no_success = outer_iters / 2;
906 VP8LHistogram** const histograms = image_histo->histograms;
907 // Priority queue of histogram pairs. Its size of 'kHistoQueueSize'
908 // impacts the quality of the compression and the speed: the smaller the
909 // faster but the worse for the compression.
910 HistoQueue histo_queue;
911 const int kHistoQueueSize = 9;
912 int ok = 0;
913 // mapping from an index in image_histo with no NULL histogram to the full
914 // blown image_histo.
915 int* mappings;
916
917 if (*num_used < min_cluster_size) {
918 *do_greedy = 1;
919 return 1;
920 }
921
922 mappings = (int*) WebPSafeMalloc(*num_used, sizeof(*mappings));
923 if (mappings == NULL) return 0;
924 if (!HistoQueueInit(&histo_queue, kHistoQueueSize)) goto End;
925 // Fill the initial mapping.
926 for (j = 0, iter = 0; iter < image_histo->size; ++iter) {
927 if (histograms[iter] == NULL) continue;
928 mappings[j++] = iter;
929 }
930 assert(j == *num_used);
931
932 // Collapse similar histograms in 'image_histo'.
933 for (iter = 0;
934 iter < outer_iters && *num_used >= min_cluster_size &&
935 ++tries_with_no_success < num_tries_no_success;
936 ++iter) {
937 int* mapping_index;
938 float best_cost =
939 (histo_queue.size == 0) ? 0.f : histo_queue.queue[0].cost_diff;
940 int best_idx1 = -1, best_idx2 = 1;
941 const uint32_t rand_range = (*num_used - 1) * (*num_used);
942 // (*num_used) / 2 was chosen empirically. Less means faster but worse
943 // compression.
944 const int num_tries = (*num_used) / 2;
945
946 // Pick random samples.
947 for (j = 0; *num_used >= 2 && j < num_tries; ++j) {
948 float curr_cost;
949 // Choose two different histograms at random and try to combine them.
950 const uint32_t tmp = MyRand(&seed) % rand_range;
951 uint32_t idx1 = tmp / (*num_used - 1);
952 uint32_t idx2 = tmp % (*num_used - 1);
953 if (idx2 >= idx1) ++idx2;
954 idx1 = mappings[idx1];
955 idx2 = mappings[idx2];
956
957 // Calculate cost reduction on combination.
958 curr_cost =
959 HistoQueuePush(&histo_queue, histograms, idx1, idx2, best_cost);
960 if (curr_cost < 0) { // found a better pair?
961 best_cost = curr_cost;
962 // Empty the queue if we reached full capacity.
963 if (histo_queue.size == histo_queue.max_size) break;
964 }
965 }
966 if (histo_queue.size == 0) continue;
967
968 // Get the best histograms.
969 best_idx1 = histo_queue.queue[0].idx1;
970 best_idx2 = histo_queue.queue[0].idx2;
971 assert(best_idx1 < best_idx2);
972 // Pop best_idx2 from mappings.
973 mapping_index = (int*) bsearch(&best_idx2, mappings, *num_used,
974 sizeof(best_idx2), &PairComparison);
975 assert(mapping_index != NULL);
976 memmove(mapping_index, mapping_index + 1, sizeof(*mapping_index) *
977 ((*num_used) - (mapping_index - mappings) - 1));
978 // Merge the histograms and remove best_idx2 from the queue.
979 HistogramAdd(histograms[best_idx2], histograms[best_idx1],
980 histograms[best_idx1]);
981 histograms[best_idx1]->bit_cost_ = histo_queue.queue[0].cost_combo;
982 HistogramSetRemoveHistogram(image_histo, best_idx2, num_used);
983 // Parse the queue and update each pair that deals with best_idx1,
984 // best_idx2 or image_histo_size.
985 for (j = 0; j < histo_queue.size;) {
986 HistogramPair* const p = histo_queue.queue + j;
987 const int is_idx1_best = p->idx1 == best_idx1 || p->idx1 == best_idx2;
988 const int is_idx2_best = p->idx2 == best_idx1 || p->idx2 == best_idx2;
989 int do_eval = 0;
990 // The front pair could have been duplicated by a random pick so
991 // check for it all the time nevertheless.
992 if (is_idx1_best && is_idx2_best) {
993 HistoQueuePopPair(&histo_queue, p);
994 continue;
995 }
996 // Any pair containing one of the two best indices should only refer to
997 // best_idx1. Its cost should also be updated.
998 if (is_idx1_best) {
999 p->idx1 = best_idx1;
1000 do_eval = 1;
1001 } else if (is_idx2_best) {
1002 p->idx2 = best_idx1;
1003 do_eval = 1;
1004 }
1005 // Make sure the index order is respected.
1006 if (p->idx1 > p->idx2) {
1007 const int tmp = p->idx2;
1008 p->idx2 = p->idx1;
1009 p->idx1 = tmp;
1010 }
1011 if (do_eval) {
1012 // Re-evaluate the cost of an updated pair.
1013 HistoQueueUpdatePair(histograms[p->idx1], histograms[p->idx2], 0., p);
1014 if (p->cost_diff >= 0.) {
1015 HistoQueuePopPair(&histo_queue, p);
1016 continue;
1017 }
1018 }
1019 HistoQueueUpdateHead(&histo_queue, p);
1020 ++j;
1021 }
1022 tries_with_no_success = 0;
1023 }
1024 *do_greedy = (*num_used <= min_cluster_size);
1025 ok = 1;
1026
1027 End:
1028 HistoQueueClear(&histo_queue);
1029 WebPSafeFree(mappings);
1030 return ok;
1031}
1032
1033// -----------------------------------------------------------------------------
1034// Histogram refinement
1035
1036// Find the best 'out' histogram for each of the 'in' histograms.
1037// At call-time, 'out' contains the histograms of the clusters.
1038// Note: we assume that out[]->bit_cost_ is already up-to-date.
1039static void HistogramRemap(const VP8LHistogramSet* const in,
1040 VP8LHistogramSet* const out,
1041 uint16_t* const symbols) {
1042 int i;
1043 VP8LHistogram** const in_histo = in->histograms;
1044 VP8LHistogram** const out_histo = out->histograms;
1045 const int in_size = out->max_size;
1046 const int out_size = out->size;
1047 if (out_size > 1) {
1048 for (i = 0; i < in_size; ++i) {
1049 int best_out = 0;
1050 float best_bits = MAX_BIT_COST;
1051 int k;
1052 if (in_histo[i] == NULL) {
1053 // Arbitrarily set to the previous value if unused to help future LZ77.
1054 symbols[i] = symbols[i - 1];
1055 continue;
1056 }
1057 for (k = 0; k < out_size; ++k) {
1058 float cur_bits;
1059 cur_bits = HistogramAddThresh(out_histo[k], in_histo[i], best_bits);
1060 if (k == 0 || cur_bits < best_bits) {
1061 best_bits = cur_bits;
1062 best_out = k;
1063 }
1064 }
1065 symbols[i] = best_out;
1066 }
1067 } else {
1068 assert(out_size == 1);
1069 for (i = 0; i < in_size; ++i) {
1070 symbols[i] = 0;
1071 }
1072 }
1073
1074 // Recompute each out based on raw and symbols.
1075 VP8LHistogramSetClear(out);
1076 out->size = out_size;
1077
1078 for (i = 0; i < in_size; ++i) {
1079 int idx;
1080 if (in_histo[i] == NULL) continue;
1081 idx = symbols[i];
1082 HistogramAdd(in_histo[i], out_histo[idx], out_histo[idx]);
1083 }
1084}
1085
1086static float GetCombineCostFactor(int histo_size, int quality) {
1087 float combine_cost_factor = 0.16f;
1088 if (quality < 90) {
1089 if (histo_size > 256) combine_cost_factor /= 2.f;
1090 if (histo_size > 512) combine_cost_factor /= 2.f;
1091 if (histo_size > 1024) combine_cost_factor /= 2.f;
1092 if (quality <= 50) combine_cost_factor /= 2.f;
1093 }
1094 return combine_cost_factor;
1095}
1096
1097// Given a HistogramSet 'set', the mapping of clusters 'cluster_mapping' and the
1098// current assignment of the cells in 'symbols', merge the clusters and
1099// assign the smallest possible clusters values.
1100static void OptimizeHistogramSymbols(const VP8LHistogramSet* const set,
1101 uint16_t* const cluster_mappings,
1102 int num_clusters,
1103 uint16_t* const cluster_mappings_tmp,
1104 uint16_t* const symbols) {
1105 int i, cluster_max;
1106 int do_continue = 1;
1107 // First, assign the lowest cluster to each pixel.
1108 while (do_continue) {
1109 do_continue = 0;
1110 for (i = 0; i < num_clusters; ++i) {
1111 int k;
1112 k = cluster_mappings[i];
1113 while (k != cluster_mappings[k]) {
1114 cluster_mappings[k] = cluster_mappings[cluster_mappings[k]];
1115 k = cluster_mappings[k];
1116 }
1117 if (k != cluster_mappings[i]) {
1118 do_continue = 1;
1119 cluster_mappings[i] = k;
1120 }
1121 }
1122 }
1123 // Create a mapping from a cluster id to its minimal version.
1124 cluster_max = 0;
1125 memset(cluster_mappings_tmp, 0,
1126 set->max_size * sizeof(*cluster_mappings_tmp));
1127 assert(cluster_mappings[0] == 0);
1128 // Re-map the ids.
1129 for (i = 0; i < set->max_size; ++i) {
1130 int cluster;
1131 if (symbols[i] == kInvalidHistogramSymbol) continue;
1132 cluster = cluster_mappings[symbols[i]];
1133 assert(symbols[i] < num_clusters);
1134 if (cluster > 0 && cluster_mappings_tmp[cluster] == 0) {
1135 ++cluster_max;
1136 cluster_mappings_tmp[cluster] = cluster_max;
1137 }
1138 symbols[i] = cluster_mappings_tmp[cluster];
1139 }
1140
1141 // Make sure all cluster values are used.
1142 cluster_max = 0;
1143 for (i = 0; i < set->max_size; ++i) {
1144 if (symbols[i] == kInvalidHistogramSymbol) continue;
1145 if (symbols[i] <= cluster_max) continue;
1146 ++cluster_max;
1147 assert(symbols[i] == cluster_max);
1148 }
1149}
1150
1151static void RemoveEmptyHistograms(VP8LHistogramSet* const image_histo) {
1152 uint32_t size;
1153 int i;
1154 for (i = 0, size = 0; i < image_histo->size; ++i) {
1155 if (image_histo->histograms[i] == NULL) continue;
1156 image_histo->histograms[size++] = image_histo->histograms[i];
1157 }
1158 image_histo->size = size;
1159}
1160
1161int VP8LGetHistoImageSymbols(int xsize, int ysize,
1162 const VP8LBackwardRefs* const refs, int quality,
1163 int low_effort, int histogram_bits, int cache_bits,
1164 VP8LHistogramSet* const image_histo,
1165 VP8LHistogram* const tmp_histo,
1166 uint16_t* const histogram_symbols,
1167 const WebPPicture* const pic, int percent_range,
1168 int* const percent) {
1169 const int histo_xsize =
1170 histogram_bits ? VP8LSubSampleSize(xsize, histogram_bits) : 1;
1171 const int histo_ysize =
1172 histogram_bits ? VP8LSubSampleSize(ysize, histogram_bits) : 1;
1173 const int image_histo_raw_size = histo_xsize * histo_ysize;
1174 VP8LHistogramSet* const orig_histo =
1175 VP8LAllocateHistogramSet(image_histo_raw_size, cache_bits);
1176 // Don't attempt linear bin-partition heuristic for
1177 // histograms of small sizes (as bin_map will be very sparse) and
1178 // maximum quality q==100 (to preserve the compression gains at that level).
1179 const int entropy_combine_num_bins = low_effort ? NUM_PARTITIONS : BIN_SIZE;
1180 int entropy_combine;
1181 uint16_t* const map_tmp =
1182 WebPSafeMalloc(2 * image_histo_raw_size, sizeof(map_tmp));
1183 uint16_t* const cluster_mappings = map_tmp + image_histo_raw_size;
1184 int num_used = image_histo_raw_size;
1185 if (orig_histo == NULL || map_tmp == NULL) {
1186 WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
1187 goto Error;
1188 }
1189
1190 // Construct the histograms from backward references.
1191 HistogramBuild(xsize, histogram_bits, refs, orig_histo);
1192 // Copies the histograms and computes its bit_cost.
1193 // histogram_symbols is optimized
1194 HistogramCopyAndAnalyze(orig_histo, image_histo, &num_used,
1195 histogram_symbols);
1196
1197 entropy_combine =
1198 (num_used > entropy_combine_num_bins * 2) && (quality < 100);
1199
1200 if (entropy_combine) {
1201 uint16_t* const bin_map = map_tmp;
1202 const float combine_cost_factor =
1203 GetCombineCostFactor(image_histo_raw_size, quality);
1204 const uint32_t num_clusters = num_used;
1205
1206 HistogramAnalyzeEntropyBin(image_histo, bin_map, low_effort);
1207 // Collapse histograms with similar entropy.
1208 HistogramCombineEntropyBin(
1209 image_histo, &num_used, histogram_symbols, cluster_mappings, tmp_histo,
1210 bin_map, entropy_combine_num_bins, combine_cost_factor, low_effort);
1211 OptimizeHistogramSymbols(image_histo, cluster_mappings, num_clusters,
1212 map_tmp, histogram_symbols);
1213 }
1214
1215 // Don't combine the histograms using stochastic and greedy heuristics for
1216 // low-effort compression mode.
1217 if (!low_effort || !entropy_combine) {
1218 const float x = quality / 100.f;
1219 // cubic ramp between 1 and MAX_HISTO_GREEDY:
1220 const int threshold_size = (int)(1 + (x * x * x) * (MAX_HISTO_GREEDY - 1));
1221 int do_greedy;
1222 if (!HistogramCombineStochastic(image_histo, &num_used, threshold_size,
1223 &do_greedy)) {
1224 WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
1225 goto Error;
1226 }
1227 if (do_greedy) {
1228 RemoveEmptyHistograms(image_histo);
1229 if (!HistogramCombineGreedy(image_histo, &num_used)) {
1230 WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);
1231 goto Error;
1232 }
1233 }
1234 }
1235
1236 // Find the optimal map from original histograms to the final ones.
1237 RemoveEmptyHistograms(image_histo);
1238 HistogramRemap(orig_histo, image_histo, histogram_symbols);
1239
1240 if (!WebPReportProgress(pic, *percent + percent_range, percent)) {
1241 goto Error;
1242 }
1243
1244 Error:
1245 VP8LFreeHistogramSet(orig_histo);
1246 WebPSafeFree(map_tmp);
1247 return (pic->error_code == VP8_ENC_OK);
1248}
1249