1 | // Copyright 2012 Google Inc. All Rights Reserved. |
2 | // |
3 | // Use of this source code is governed by a BSD-style license |
4 | // that can be found in the COPYING file in the root of the source |
5 | // tree. An additional intellectual property rights grant can be found |
6 | // in the file PATENTS. All contributing project authors may |
7 | // be found in the AUTHORS file in the root of the source tree. |
8 | // ----------------------------------------------------------------------------- |
9 | // |
10 | // Utilities for building and looking up Huffman trees. |
11 | // |
12 | // Author: Urvang Joshi (urvang@google.com) |
13 | |
14 | #include <assert.h> |
15 | #include <stdlib.h> |
16 | #include <string.h> |
17 | #include "src/utils/huffman_utils.h" |
18 | #include "src/utils/utils.h" |
19 | #include "src/webp/format_constants.h" |
20 | |
21 | // Huffman data read via DecodeImageStream is represented in two (red and green) |
22 | // bytes. |
23 | #define MAX_HTREE_GROUPS 0x10000 |
24 | |
25 | HTreeGroup* VP8LHtreeGroupsNew(int num_htree_groups) { |
26 | HTreeGroup* const htree_groups = |
27 | (HTreeGroup*)WebPSafeMalloc(num_htree_groups, sizeof(*htree_groups)); |
28 | if (htree_groups == NULL) { |
29 | return NULL; |
30 | } |
31 | assert(num_htree_groups <= MAX_HTREE_GROUPS); |
32 | return htree_groups; |
33 | } |
34 | |
35 | void VP8LHtreeGroupsFree(HTreeGroup* const htree_groups) { |
36 | if (htree_groups != NULL) { |
37 | WebPSafeFree(htree_groups); |
38 | } |
39 | } |
40 | |
41 | // Returns reverse(reverse(key, len) + 1, len), where reverse(key, len) is the |
42 | // bit-wise reversal of the len least significant bits of key. |
43 | static WEBP_INLINE uint32_t GetNextKey(uint32_t key, int len) { |
44 | uint32_t step = 1 << (len - 1); |
45 | while (key & step) { |
46 | step >>= 1; |
47 | } |
48 | return step ? (key & (step - 1)) + step : key; |
49 | } |
50 | |
51 | // Stores code in table[0], table[step], table[2*step], ..., table[end]. |
52 | // Assumes that end is an integer multiple of step. |
53 | static WEBP_INLINE void ReplicateValue(HuffmanCode* table, |
54 | int step, int end, |
55 | HuffmanCode code) { |
56 | assert(end % step == 0); |
57 | do { |
58 | end -= step; |
59 | table[end] = code; |
60 | } while (end > 0); |
61 | } |
62 | |
63 | // Returns the table width of the next 2nd level table. count is the histogram |
64 | // of bit lengths for the remaining symbols, len is the code length of the next |
65 | // processed symbol |
66 | static WEBP_INLINE int NextTableBitSize(const int* const count, |
67 | int len, int root_bits) { |
68 | int left = 1 << (len - root_bits); |
69 | while (len < MAX_ALLOWED_CODE_LENGTH) { |
70 | left -= count[len]; |
71 | if (left <= 0) break; |
72 | ++len; |
73 | left <<= 1; |
74 | } |
75 | return len - root_bits; |
76 | } |
77 | |
78 | // sorted[code_lengths_size] is a pre-allocated array for sorting symbols |
79 | // by code length. |
80 | static int BuildHuffmanTable(HuffmanCode* const root_table, int root_bits, |
81 | const int code_lengths[], int code_lengths_size, |
82 | uint16_t sorted[]) { |
83 | HuffmanCode* table = root_table; // next available space in table |
84 | int total_size = 1 << root_bits; // total size root table + 2nd level table |
85 | int len; // current code length |
86 | int symbol; // symbol index in original or sorted table |
87 | // number of codes of each length: |
88 | int count[MAX_ALLOWED_CODE_LENGTH + 1] = { 0 }; |
89 | // offsets in sorted table for each length: |
90 | int offset[MAX_ALLOWED_CODE_LENGTH + 1]; |
91 | |
92 | assert(code_lengths_size != 0); |
93 | assert(code_lengths != NULL); |
94 | assert((root_table != NULL && sorted != NULL) || |
95 | (root_table == NULL && sorted == NULL)); |
96 | assert(root_bits > 0); |
97 | |
98 | // Build histogram of code lengths. |
99 | for (symbol = 0; symbol < code_lengths_size; ++symbol) { |
100 | if (code_lengths[symbol] > MAX_ALLOWED_CODE_LENGTH) { |
101 | return 0; |
102 | } |
103 | ++count[code_lengths[symbol]]; |
104 | } |
105 | |
106 | // Error, all code lengths are zeros. |
107 | if (count[0] == code_lengths_size) { |
108 | return 0; |
109 | } |
110 | |
111 | // Generate offsets into sorted symbol table by code length. |
112 | offset[1] = 0; |
113 | for (len = 1; len < MAX_ALLOWED_CODE_LENGTH; ++len) { |
114 | if (count[len] > (1 << len)) { |
115 | return 0; |
116 | } |
117 | offset[len + 1] = offset[len] + count[len]; |
118 | } |
119 | |
120 | // Sort symbols by length, by symbol order within each length. |
121 | for (symbol = 0; symbol < code_lengths_size; ++symbol) { |
122 | const int symbol_code_length = code_lengths[symbol]; |
123 | if (code_lengths[symbol] > 0) { |
124 | if (sorted != NULL) { |
125 | sorted[offset[symbol_code_length]++] = symbol; |
126 | } else { |
127 | offset[symbol_code_length]++; |
128 | } |
129 | } |
130 | } |
131 | |
132 | // Special case code with only one value. |
133 | if (offset[MAX_ALLOWED_CODE_LENGTH] == 1) { |
134 | if (sorted != NULL) { |
135 | HuffmanCode code; |
136 | code.bits = 0; |
137 | code.value = (uint16_t)sorted[0]; |
138 | ReplicateValue(table, 1, total_size, code); |
139 | } |
140 | return total_size; |
141 | } |
142 | |
143 | { |
144 | int step; // step size to replicate values in current table |
145 | uint32_t low = 0xffffffffu; // low bits for current root entry |
146 | uint32_t mask = total_size - 1; // mask for low bits |
147 | uint32_t key = 0; // reversed prefix code |
148 | int num_nodes = 1; // number of Huffman tree nodes |
149 | int num_open = 1; // number of open branches in current tree level |
150 | int table_bits = root_bits; // key length of current table |
151 | int table_size = 1 << table_bits; // size of current table |
152 | symbol = 0; |
153 | // Fill in root table. |
154 | for (len = 1, step = 2; len <= root_bits; ++len, step <<= 1) { |
155 | num_open <<= 1; |
156 | num_nodes += num_open; |
157 | num_open -= count[len]; |
158 | if (num_open < 0) { |
159 | return 0; |
160 | } |
161 | if (root_table == NULL) continue; |
162 | for (; count[len] > 0; --count[len]) { |
163 | HuffmanCode code; |
164 | code.bits = (uint8_t)len; |
165 | code.value = (uint16_t)sorted[symbol++]; |
166 | ReplicateValue(&table[key], step, table_size, code); |
167 | key = GetNextKey(key, len); |
168 | } |
169 | } |
170 | |
171 | // Fill in 2nd level tables and add pointers to root table. |
172 | for (len = root_bits + 1, step = 2; len <= MAX_ALLOWED_CODE_LENGTH; |
173 | ++len, step <<= 1) { |
174 | num_open <<= 1; |
175 | num_nodes += num_open; |
176 | num_open -= count[len]; |
177 | if (num_open < 0) { |
178 | return 0; |
179 | } |
180 | for (; count[len] > 0; --count[len]) { |
181 | HuffmanCode code; |
182 | if ((key & mask) != low) { |
183 | if (root_table != NULL) table += table_size; |
184 | table_bits = NextTableBitSize(count, len, root_bits); |
185 | table_size = 1 << table_bits; |
186 | total_size += table_size; |
187 | low = key & mask; |
188 | if (root_table != NULL) { |
189 | root_table[low].bits = (uint8_t)(table_bits + root_bits); |
190 | root_table[low].value = (uint16_t)((table - root_table) - low); |
191 | } |
192 | } |
193 | if (root_table != NULL) { |
194 | code.bits = (uint8_t)(len - root_bits); |
195 | code.value = (uint16_t)sorted[symbol++]; |
196 | ReplicateValue(&table[key >> root_bits], step, table_size, code); |
197 | } |
198 | key = GetNextKey(key, len); |
199 | } |
200 | } |
201 | |
202 | // Check if tree is full. |
203 | if (num_nodes != 2 * offset[MAX_ALLOWED_CODE_LENGTH] - 1) { |
204 | return 0; |
205 | } |
206 | } |
207 | |
208 | return total_size; |
209 | } |
210 | |
211 | // Maximum code_lengths_size is 2328 (reached for 11-bit color_cache_bits). |
212 | // More commonly, the value is around ~280. |
213 | #define MAX_CODE_LENGTHS_SIZE \ |
214 | ((1 << MAX_CACHE_BITS) + NUM_LITERAL_CODES + NUM_LENGTH_CODES) |
215 | // Cut-off value for switching between heap and stack allocation. |
216 | #define SORTED_SIZE_CUTOFF 512 |
217 | int VP8LBuildHuffmanTable(HuffmanTables* const root_table, int root_bits, |
218 | const int code_lengths[], int code_lengths_size) { |
219 | const int total_size = |
220 | BuildHuffmanTable(NULL, root_bits, code_lengths, code_lengths_size, NULL); |
221 | assert(code_lengths_size <= MAX_CODE_LENGTHS_SIZE); |
222 | if (total_size == 0 || root_table == NULL) return total_size; |
223 | |
224 | if (root_table->curr_segment->curr_table + total_size >= |
225 | root_table->curr_segment->start + root_table->curr_segment->size) { |
226 | // If 'root_table' does not have enough memory, allocate a new segment. |
227 | // The available part of root_table->curr_segment is left unused because we |
228 | // need a contiguous buffer. |
229 | const int segment_size = root_table->curr_segment->size; |
230 | struct HuffmanTablesSegment* next = |
231 | (HuffmanTablesSegment*)WebPSafeMalloc(1, sizeof(*next)); |
232 | if (next == NULL) return 0; |
233 | // Fill the new segment. |
234 | // We need at least 'total_size' but if that value is small, it is better to |
235 | // allocate a big chunk to prevent more allocations later. 'segment_size' is |
236 | // therefore chosen (any other arbitrary value could be chosen). |
237 | next->size = total_size > segment_size ? total_size : segment_size; |
238 | next->start = |
239 | (HuffmanCode*)WebPSafeMalloc(next->size, sizeof(*next->start)); |
240 | if (next->start == NULL) { |
241 | WebPSafeFree(next); |
242 | return 0; |
243 | } |
244 | next->curr_table = next->start; |
245 | next->next = NULL; |
246 | // Point to the new segment. |
247 | root_table->curr_segment->next = next; |
248 | root_table->curr_segment = next; |
249 | } |
250 | if (code_lengths_size <= SORTED_SIZE_CUTOFF) { |
251 | // use local stack-allocated array. |
252 | uint16_t sorted[SORTED_SIZE_CUTOFF]; |
253 | BuildHuffmanTable(root_table->curr_segment->curr_table, root_bits, |
254 | code_lengths, code_lengths_size, sorted); |
255 | } else { // rare case. Use heap allocation. |
256 | uint16_t* const sorted = |
257 | (uint16_t*)WebPSafeMalloc(code_lengths_size, sizeof(*sorted)); |
258 | if (sorted == NULL) return 0; |
259 | BuildHuffmanTable(root_table->curr_segment->curr_table, root_bits, |
260 | code_lengths, code_lengths_size, sorted); |
261 | WebPSafeFree(sorted); |
262 | } |
263 | return total_size; |
264 | } |
265 | |
266 | int VP8LHuffmanTablesAllocate(int size, HuffmanTables* huffman_tables) { |
267 | // Have 'segment' point to the first segment for now, 'root'. |
268 | HuffmanTablesSegment* const root = &huffman_tables->root; |
269 | huffman_tables->curr_segment = root; |
270 | // Allocate root. |
271 | root->start = (HuffmanCode*)WebPSafeMalloc(size, sizeof(*root->start)); |
272 | if (root->start == NULL) return 0; |
273 | root->curr_table = root->start; |
274 | root->next = NULL; |
275 | root->size = size; |
276 | return 1; |
277 | } |
278 | |
279 | void VP8LHuffmanTablesDeallocate(HuffmanTables* const huffman_tables) { |
280 | HuffmanTablesSegment *current, *next; |
281 | if (huffman_tables == NULL) return; |
282 | // Free the root node. |
283 | current = &huffman_tables->root; |
284 | next = current->next; |
285 | WebPSafeFree(current->start); |
286 | current->start = NULL; |
287 | current->next = NULL; |
288 | current = next; |
289 | // Free the following nodes. |
290 | while (current != NULL) { |
291 | next = current->next; |
292 | WebPSafeFree(current->start); |
293 | WebPSafeFree(current); |
294 | current = next; |
295 | } |
296 | } |
297 | |