1// This file is part of meshoptimizer library; see meshoptimizer.h for version/license details
2#include "meshoptimizer.h"
3
4#include <assert.h>
5#include <string.h>
6
7// This work is based on:
8// John McDonald, Mark Kilgard. Crack-Free Point-Normal Triangles using Adjacent Edge Normals. 2010
9namespace meshopt
10{
11
12static unsigned int hashUpdate4(unsigned int h, const unsigned char* key, size_t len)
13{
14 // MurmurHash2
15 const unsigned int m = 0x5bd1e995;
16 const int r = 24;
17
18 while (len >= 4)
19 {
20 unsigned int k = *reinterpret_cast<const unsigned int*>(key);
21
22 k *= m;
23 k ^= k >> r;
24 k *= m;
25
26 h *= m;
27 h ^= k;
28
29 key += 4;
30 len -= 4;
31 }
32
33 return h;
34}
35
36struct VertexHasher
37{
38 const unsigned char* vertices;
39 size_t vertex_size;
40 size_t vertex_stride;
41
42 size_t hash(unsigned int index) const
43 {
44 return hashUpdate4(0, vertices + index * vertex_stride, vertex_size);
45 }
46
47 bool equal(unsigned int lhs, unsigned int rhs) const
48 {
49 return memcmp(vertices + lhs * vertex_stride, vertices + rhs * vertex_stride, vertex_size) == 0;
50 }
51};
52
53struct VertexStreamHasher
54{
55 const meshopt_Stream* streams;
56 size_t stream_count;
57
58 size_t hash(unsigned int index) const
59 {
60 unsigned int h = 0;
61
62 for (size_t i = 0; i < stream_count; ++i)
63 {
64 const meshopt_Stream& s = streams[i];
65 const unsigned char* data = static_cast<const unsigned char*>(s.data);
66
67 h = hashUpdate4(h, data + index * s.stride, s.size);
68 }
69
70 return h;
71 }
72
73 bool equal(unsigned int lhs, unsigned int rhs) const
74 {
75 for (size_t i = 0; i < stream_count; ++i)
76 {
77 const meshopt_Stream& s = streams[i];
78 const unsigned char* data = static_cast<const unsigned char*>(s.data);
79
80 if (memcmp(data + lhs * s.stride, data + rhs * s.stride, s.size) != 0)
81 return false;
82 }
83
84 return true;
85 }
86};
87
88struct EdgeHasher
89{
90 const unsigned int* remap;
91
92 size_t hash(unsigned long long edge) const
93 {
94 unsigned int e0 = unsigned(edge >> 32);
95 unsigned int e1 = unsigned(edge);
96
97 unsigned int h1 = remap[e0];
98 unsigned int h2 = remap[e1];
99
100 const unsigned int m = 0x5bd1e995;
101
102 // MurmurHash64B finalizer
103 h1 ^= h2 >> 18;
104 h1 *= m;
105 h2 ^= h1 >> 22;
106 h2 *= m;
107 h1 ^= h2 >> 17;
108 h1 *= m;
109 h2 ^= h1 >> 19;
110 h2 *= m;
111
112 return h2;
113 }
114
115 bool equal(unsigned long long lhs, unsigned long long rhs) const
116 {
117 unsigned int l0 = unsigned(lhs >> 32);
118 unsigned int l1 = unsigned(lhs);
119
120 unsigned int r0 = unsigned(rhs >> 32);
121 unsigned int r1 = unsigned(rhs);
122
123 return remap[l0] == remap[r0] && remap[l1] == remap[r1];
124 }
125};
126
127static size_t hashBuckets(size_t count)
128{
129 size_t buckets = 1;
130 while (buckets < count + count / 4)
131 buckets *= 2;
132
133 return buckets;
134}
135
136template <typename T, typename Hash>
137static T* hashLookup(T* table, size_t buckets, const Hash& hash, const T& key, const T& empty)
138{
139 assert(buckets > 0);
140 assert((buckets & (buckets - 1)) == 0);
141
142 size_t hashmod = buckets - 1;
143 size_t bucket = hash.hash(key) & hashmod;
144
145 for (size_t probe = 0; probe <= hashmod; ++probe)
146 {
147 T& item = table[bucket];
148
149 if (item == empty)
150 return &item;
151
152 if (hash.equal(item, key))
153 return &item;
154
155 // hash collision, quadratic probing
156 bucket = (bucket + probe + 1) & hashmod;
157 }
158
159 assert(false && "Hash table is full"); // unreachable
160 return 0;
161}
162
163static void buildPositionRemap(unsigned int* remap, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride, meshopt_Allocator& allocator)
164{
165 VertexHasher vertex_hasher = {reinterpret_cast<const unsigned char*>(vertex_positions), 3 * sizeof(float), vertex_positions_stride};
166
167 size_t vertex_table_size = hashBuckets(vertex_count);
168 unsigned int* vertex_table = allocator.allocate<unsigned int>(vertex_table_size);
169 memset(vertex_table, -1, vertex_table_size * sizeof(unsigned int));
170
171 for (size_t i = 0; i < vertex_count; ++i)
172 {
173 unsigned int index = unsigned(i);
174 unsigned int* entry = hashLookup(vertex_table, vertex_table_size, vertex_hasher, index, ~0u);
175
176 if (*entry == ~0u)
177 *entry = index;
178
179 remap[index] = *entry;
180 }
181}
182
183} // namespace meshopt
184
185size_t meshopt_generateVertexRemap(unsigned int* destination, const unsigned int* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size)
186{
187 using namespace meshopt;
188
189 assert(indices || index_count == vertex_count);
190 assert(!indices || index_count % 3 == 0);
191 assert(vertex_size > 0 && vertex_size <= 256);
192
193 meshopt_Allocator allocator;
194
195 memset(destination, -1, vertex_count * sizeof(unsigned int));
196
197 VertexHasher hasher = {static_cast<const unsigned char*>(vertices), vertex_size, vertex_size};
198
199 size_t table_size = hashBuckets(vertex_count);
200 unsigned int* table = allocator.allocate<unsigned int>(table_size);
201 memset(table, -1, table_size * sizeof(unsigned int));
202
203 unsigned int next_vertex = 0;
204
205 for (size_t i = 0; i < index_count; ++i)
206 {
207 unsigned int index = indices ? indices[i] : unsigned(i);
208 assert(index < vertex_count);
209
210 if (destination[index] == ~0u)
211 {
212 unsigned int* entry = hashLookup(table, table_size, hasher, index, ~0u);
213
214 if (*entry == ~0u)
215 {
216 *entry = index;
217
218 destination[index] = next_vertex++;
219 }
220 else
221 {
222 assert(destination[*entry] != ~0u);
223
224 destination[index] = destination[*entry];
225 }
226 }
227 }
228
229 assert(next_vertex <= vertex_count);
230
231 return next_vertex;
232}
233
234size_t meshopt_generateVertexRemapMulti(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, const struct meshopt_Stream* streams, size_t stream_count)
235{
236 using namespace meshopt;
237
238 assert(indices || index_count == vertex_count);
239 assert(index_count % 3 == 0);
240 assert(stream_count > 0 && stream_count <= 16);
241
242 for (size_t i = 0; i < stream_count; ++i)
243 {
244 assert(streams[i].size > 0 && streams[i].size <= 256);
245 assert(streams[i].size <= streams[i].stride);
246 }
247
248 meshopt_Allocator allocator;
249
250 memset(destination, -1, vertex_count * sizeof(unsigned int));
251
252 VertexStreamHasher hasher = {streams, stream_count};
253
254 size_t table_size = hashBuckets(vertex_count);
255 unsigned int* table = allocator.allocate<unsigned int>(table_size);
256 memset(table, -1, table_size * sizeof(unsigned int));
257
258 unsigned int next_vertex = 0;
259
260 for (size_t i = 0; i < index_count; ++i)
261 {
262 unsigned int index = indices ? indices[i] : unsigned(i);
263 assert(index < vertex_count);
264
265 if (destination[index] == ~0u)
266 {
267 unsigned int* entry = hashLookup(table, table_size, hasher, index, ~0u);
268
269 if (*entry == ~0u)
270 {
271 *entry = index;
272
273 destination[index] = next_vertex++;
274 }
275 else
276 {
277 assert(destination[*entry] != ~0u);
278
279 destination[index] = destination[*entry];
280 }
281 }
282 }
283
284 assert(next_vertex <= vertex_count);
285
286 return next_vertex;
287}
288
289void meshopt_remapVertexBuffer(void* destination, const void* vertices, size_t vertex_count, size_t vertex_size, const unsigned int* remap)
290{
291 assert(vertex_size > 0 && vertex_size <= 256);
292
293 meshopt_Allocator allocator;
294
295 // support in-place remap
296 if (destination == vertices)
297 {
298 unsigned char* vertices_copy = allocator.allocate<unsigned char>(vertex_count * vertex_size);
299 memcpy(vertices_copy, vertices, vertex_count * vertex_size);
300 vertices = vertices_copy;
301 }
302
303 for (size_t i = 0; i < vertex_count; ++i)
304 {
305 if (remap[i] != ~0u)
306 {
307 assert(remap[i] < vertex_count);
308
309 memcpy(static_cast<unsigned char*>(destination) + remap[i] * vertex_size, static_cast<const unsigned char*>(vertices) + i * vertex_size, vertex_size);
310 }
311 }
312}
313
314void meshopt_remapIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const unsigned int* remap)
315{
316 assert(index_count % 3 == 0);
317
318 for (size_t i = 0; i < index_count; ++i)
319 {
320 unsigned int index = indices ? indices[i] : unsigned(i);
321 assert(remap[index] != ~0u);
322
323 destination[i] = remap[index];
324 }
325}
326
327void meshopt_generateShadowIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const void* vertices, size_t vertex_count, size_t vertex_size, size_t vertex_stride)
328{
329 using namespace meshopt;
330
331 assert(indices);
332 assert(index_count % 3 == 0);
333 assert(vertex_size > 0 && vertex_size <= 256);
334 assert(vertex_size <= vertex_stride);
335
336 meshopt_Allocator allocator;
337
338 unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
339 memset(remap, -1, vertex_count * sizeof(unsigned int));
340
341 VertexHasher hasher = {static_cast<const unsigned char*>(vertices), vertex_size, vertex_stride};
342
343 size_t table_size = hashBuckets(vertex_count);
344 unsigned int* table = allocator.allocate<unsigned int>(table_size);
345 memset(table, -1, table_size * sizeof(unsigned int));
346
347 for (size_t i = 0; i < index_count; ++i)
348 {
349 unsigned int index = indices[i];
350 assert(index < vertex_count);
351
352 if (remap[index] == ~0u)
353 {
354 unsigned int* entry = hashLookup(table, table_size, hasher, index, ~0u);
355
356 if (*entry == ~0u)
357 *entry = index;
358
359 remap[index] = *entry;
360 }
361
362 destination[i] = remap[index];
363 }
364}
365
366void meshopt_generateShadowIndexBufferMulti(unsigned int* destination, const unsigned int* indices, size_t index_count, size_t vertex_count, const struct meshopt_Stream* streams, size_t stream_count)
367{
368 using namespace meshopt;
369
370 assert(indices);
371 assert(index_count % 3 == 0);
372 assert(stream_count > 0 && stream_count <= 16);
373
374 for (size_t i = 0; i < stream_count; ++i)
375 {
376 assert(streams[i].size > 0 && streams[i].size <= 256);
377 assert(streams[i].size <= streams[i].stride);
378 }
379
380 meshopt_Allocator allocator;
381
382 unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
383 memset(remap, -1, vertex_count * sizeof(unsigned int));
384
385 VertexStreamHasher hasher = {streams, stream_count};
386
387 size_t table_size = hashBuckets(vertex_count);
388 unsigned int* table = allocator.allocate<unsigned int>(table_size);
389 memset(table, -1, table_size * sizeof(unsigned int));
390
391 for (size_t i = 0; i < index_count; ++i)
392 {
393 unsigned int index = indices[i];
394 assert(index < vertex_count);
395
396 if (remap[index] == ~0u)
397 {
398 unsigned int* entry = hashLookup(table, table_size, hasher, index, ~0u);
399
400 if (*entry == ~0u)
401 *entry = index;
402
403 remap[index] = *entry;
404 }
405
406 destination[i] = remap[index];
407 }
408}
409
410void meshopt_generateAdjacencyIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride)
411{
412 using namespace meshopt;
413
414 assert(index_count % 3 == 0);
415 assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
416 assert(vertex_positions_stride % sizeof(float) == 0);
417
418 meshopt_Allocator allocator;
419
420 static const int next[4] = {1, 2, 0, 1};
421
422 // build position remap: for each vertex, which other (canonical) vertex does it map to?
423 unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
424 buildPositionRemap(remap, vertex_positions, vertex_count, vertex_positions_stride, allocator);
425
426 // build edge set; this stores all triangle edges but we can look these up by any other wedge
427 EdgeHasher edge_hasher = {remap};
428
429 size_t edge_table_size = hashBuckets(index_count);
430 unsigned long long* edge_table = allocator.allocate<unsigned long long>(edge_table_size);
431 unsigned int* edge_vertex_table = allocator.allocate<unsigned int>(edge_table_size);
432
433 memset(edge_table, -1, edge_table_size * sizeof(unsigned long long));
434 memset(edge_vertex_table, -1, edge_table_size * sizeof(unsigned int));
435
436 for (size_t i = 0; i < index_count; i += 3)
437 {
438 for (int e = 0; e < 3; ++e)
439 {
440 unsigned int i0 = indices[i + e];
441 unsigned int i1 = indices[i + next[e]];
442 unsigned int i2 = indices[i + next[e + 1]];
443 assert(i0 < vertex_count && i1 < vertex_count && i2 < vertex_count);
444
445 unsigned long long edge = ((unsigned long long)i0 << 32) | i1;
446 unsigned long long* entry = hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
447
448 if (*entry == ~0ull)
449 {
450 *entry = edge;
451
452 // store vertex opposite to the edge
453 edge_vertex_table[entry - edge_table] = i2;
454 }
455 }
456 }
457
458 // build resulting index buffer: 6 indices for each input triangle
459 for (size_t i = 0; i < index_count; i += 3)
460 {
461 unsigned int patch[6];
462
463 for (int e = 0; e < 3; ++e)
464 {
465 unsigned int i0 = indices[i + e];
466 unsigned int i1 = indices[i + next[e]];
467 assert(i0 < vertex_count && i1 < vertex_count);
468
469 // note: this refers to the opposite edge!
470 unsigned long long edge = ((unsigned long long)i1 << 32) | i0;
471 unsigned long long* oppe = hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
472
473 patch[e * 2 + 0] = i0;
474 patch[e * 2 + 1] = (*oppe == ~0ull) ? i0 : edge_vertex_table[oppe - edge_table];
475 }
476
477 memcpy(destination + i * 2, patch, sizeof(patch));
478 }
479}
480
481void meshopt_generateTessellationIndexBuffer(unsigned int* destination, const unsigned int* indices, size_t index_count, const float* vertex_positions, size_t vertex_count, size_t vertex_positions_stride)
482{
483 using namespace meshopt;
484
485 assert(index_count % 3 == 0);
486 assert(vertex_positions_stride >= 12 && vertex_positions_stride <= 256);
487 assert(vertex_positions_stride % sizeof(float) == 0);
488
489 meshopt_Allocator allocator;
490
491 static const int next[3] = {1, 2, 0};
492
493 // build position remap: for each vertex, which other (canonical) vertex does it map to?
494 unsigned int* remap = allocator.allocate<unsigned int>(vertex_count);
495 buildPositionRemap(remap, vertex_positions, vertex_count, vertex_positions_stride, allocator);
496
497 // build edge set; this stores all triangle edges but we can look these up by any other wedge
498 EdgeHasher edge_hasher = {remap};
499
500 size_t edge_table_size = hashBuckets(index_count);
501 unsigned long long* edge_table = allocator.allocate<unsigned long long>(edge_table_size);
502 memset(edge_table, -1, edge_table_size * sizeof(unsigned long long));
503
504 for (size_t i = 0; i < index_count; i += 3)
505 {
506 for (int e = 0; e < 3; ++e)
507 {
508 unsigned int i0 = indices[i + e];
509 unsigned int i1 = indices[i + next[e]];
510 assert(i0 < vertex_count && i1 < vertex_count);
511
512 unsigned long long edge = ((unsigned long long)i0 << 32) | i1;
513 unsigned long long* entry = hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
514
515 if (*entry == ~0ull)
516 *entry = edge;
517 }
518 }
519
520 // build resulting index buffer: 12 indices for each input triangle
521 for (size_t i = 0; i < index_count; i += 3)
522 {
523 unsigned int patch[12];
524
525 for (int e = 0; e < 3; ++e)
526 {
527 unsigned int i0 = indices[i + e];
528 unsigned int i1 = indices[i + next[e]];
529 assert(i0 < vertex_count && i1 < vertex_count);
530
531 // note: this refers to the opposite edge!
532 unsigned long long edge = ((unsigned long long)i1 << 32) | i0;
533 unsigned long long oppe = *hashLookup(edge_table, edge_table_size, edge_hasher, edge, ~0ull);
534
535 // use the same edge if opposite edge doesn't exist (border)
536 oppe = (oppe == ~0ull) ? edge : oppe;
537
538 // triangle index (0, 1, 2)
539 patch[e] = i0;
540
541 // opposite edge (3, 4; 5, 6; 7, 8)
542 patch[3 + e * 2 + 0] = unsigned(oppe);
543 patch[3 + e * 2 + 1] = unsigned(oppe >> 32);
544
545 // dominant vertex (9, 10, 11)
546 patch[9 + e] = remap[i0];
547 }
548
549 memcpy(destination + i * 4, patch, sizeof(patch));
550 }
551}
552