1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4
5// ---------------------------------------------------------------------------
6// NativeFormatWriter
7//
8// Utilities to write native data to images, that can be read by the NativeFormat.Reader class
9// ---------------------------------------------------------------------------
10
11#include "common.h"
12
13#include "nativeformatwriter.h"
14
15#include <clr_std/algorithm>
16
17namespace NativeFormat
18{
19 //
20 // Same encoding as what's used by CTL
21 //
22 void NativeWriter::WriteUnsigned(unsigned d)
23 {
24 if (d < 128)
25 {
26 WriteByte((byte)(d*2 + 0));
27 }
28 else if (d < 128*128)
29 {
30 WriteByte((byte)(d*4 + 1));
31 WriteByte((byte)(d >> 6));
32 }
33 else if (d < 128*128*128)
34 {
35 WriteByte((byte)(d*8 + 3));
36 WriteByte((byte)(d >> 5));
37 WriteByte((byte)(d >> 13));
38 }
39 else if (d < 128*128*128*128)
40 {
41 WriteByte((byte)(d*16 + 7));
42 WriteByte((byte)(d >> 4));
43 WriteByte((byte)(d >> 12));
44 WriteByte((byte)(d >> 20));
45 }
46 else
47 {
48 WriteByte((byte)15);
49 WriteUInt32(d);
50 }
51 }
52
53 unsigned NativeWriter::GetUnsignedEncodingSize(unsigned d)
54 {
55 if (d < 128) return 1;
56 if (d < 128*128) return 2;
57 if (d < 128*128*128) return 3;
58 if (d < 128*128*128*128) return 4;
59 return 5;
60 }
61
62 void NativeWriter::WriteSigned(int i)
63 {
64 unsigned d = (unsigned)i;
65 if (d + 64 < 128)
66 {
67 WriteByte((byte)(d*2 + 0));
68 }
69 else if (d + 64*128 < 128*128)
70 {
71 WriteByte((byte)(d*4 + 1));
72 WriteByte((byte)(d >> 6));
73 }
74 else if (d + 64*128*128 < 128*128*128)
75 {
76 WriteByte((byte)(d*8 + 3));
77 WriteByte((byte)(d >> 5));
78 WriteByte((byte)(d >> 13));
79 }
80 else if (d + 64*128*128*128 < 128*128*128*128)
81 {
82 WriteByte((byte)(d*16 + 7));
83 WriteByte((byte)(d >> 4));
84 WriteByte((byte)(d >> 12));
85 WriteByte((byte)(d >> 20));
86 }
87 else
88 {
89 WriteByte((byte)15);
90 WriteUInt32(d);
91 }
92 }
93
94 void NativeWriter::WriteRelativeOffset(Vertex * pVal)
95 {
96 WriteSigned(GetExpectedOffset(pVal) - GetCurrentOffset());
97 }
98
99 int NativeWriter::GetExpectedOffset(Vertex * pVal)
100 {
101 assert(pVal->m_offset != Vertex::NotPlaced);
102
103 if (pVal->m_iteration == -1)
104 {
105 // If the offsets are not determined yet, use the maximum possible encoding
106 return 0x7FFFFFFF;
107 }
108
109 int offset = pVal->m_offset;
110
111 // If the offset was not update in this iteration yet, adjust it by delta we have accumulated in this iteration so far.
112 // This adjustment allows the offsets to converge faster.
113 if (pVal->m_iteration < m_iteration)
114 offset += m_offsetAdjustment;
115
116 return offset;
117 }
118
119 vector<byte>& NativeWriter::Save()
120 {
121 assert(m_phase == Initial);
122
123 for (auto pSection : m_Sections) for (auto pVertex : *pSection)
124 {
125 pVertex->m_offset = GetCurrentOffset();
126 pVertex->m_iteration = m_iteration;
127 pVertex->Save(this);
128 }
129
130 // Aggresive phase that only allows offsets to shrink.
131 m_phase = Shrinking;
132 for (;;)
133 {
134 m_iteration++;
135 m_Buffer.clear();
136
137 m_offsetAdjustment = 0;
138
139 for (auto pSection : m_Sections) for (auto pVertex : *pSection)
140 {
141 int currentOffset = GetCurrentOffset();
142
143 // Only allow the offsets to shrink.
144 m_offsetAdjustment = min(m_offsetAdjustment, currentOffset - pVertex->m_offset);
145
146 pVertex->m_offset += m_offsetAdjustment;
147
148 if (pVertex->m_offset < currentOffset)
149 {
150 // It is possible for the encoding of relative offsets to grow during some iterations.
151 // Ignore this growth because of it should disappear during next iteration.
152 RollbackTo(pVertex->m_offset);
153 }
154 assert(pVertex->m_offset == GetCurrentOffset());
155
156 pVertex->m_iteration = m_iteration;
157
158 pVertex->Save(this);
159 }
160
161 // We are not able to shrink anymore. We cannot just return here. It is possible that we have rolledback
162 // above because of we shrinked too much.
163 if (m_offsetAdjustment == 0)
164 break;
165
166 // Limit number of shrinking interations. This limit is meant to be hit in corner cases only.
167 if (m_iteration > 10)
168 break;
169 }
170
171 // Conservative phase that only allows the offsets to grow. It is guaranteed to converge.
172 m_phase = Growing;
173 for (;;)
174 {
175 m_iteration++;
176 m_Buffer.clear();
177
178 m_offsetAdjustment = 0;
179 m_paddingSize = 0;
180
181 for (auto pSection : m_Sections) for (auto pVertex : *pSection)
182 {
183 int currentOffset = GetCurrentOffset();
184
185 // Only allow the offsets to grow.
186 m_offsetAdjustment = max(m_offsetAdjustment, currentOffset - pVertex->m_offset);
187
188 pVertex->m_offset += m_offsetAdjustment;
189
190 if (pVertex->m_offset > currentOffset)
191 {
192 // Padding
193 int padding = pVertex->m_offset - currentOffset;
194 m_paddingSize += padding;
195 WritePad(padding);
196 }
197 assert(pVertex->m_offset == GetCurrentOffset());
198
199 pVertex->m_iteration = m_iteration;
200
201 pVertex->Save(this);
202 }
203
204 if (m_offsetAdjustment == 0)
205 return m_Buffer;
206 }
207
208 m_phase = Done;
209 }
210
211 Vertex * NativeSection::Place(Vertex * pVertex)
212 {
213 assert(pVertex->m_offset == Vertex::NotPlaced);
214 pVertex->m_offset = Vertex::Placed;
215 push_back(pVertex);
216
217 return pVertex;
218 }
219
220 Vertex * VertexArray::ExpandBlock(size_t index, int depth, bool place, bool * pIsLeaf)
221 {
222 if (depth == 1)
223 {
224 Vertex * pFirst = (index < m_Entries.size()) ? m_Entries[index] : nullptr;
225 Vertex * pSecond = ((index + 1) < m_Entries.size()) ? m_Entries[index + 1] : nullptr;
226
227 if (pFirst == nullptr && pSecond == nullptr)
228 return nullptr;
229
230 if (pFirst == nullptr || pSecond == nullptr)
231 {
232 VertexLeaf * pLeaf = new VertexLeaf();
233 if (place)
234 m_pSection->Place(pLeaf);
235
236 pLeaf->m_pVertex = (pFirst == nullptr) ? pSecond : pFirst;
237 pLeaf->m_leafIndex = ((pFirst == nullptr) ? (index + 1) : index) & (_blockSize - 1);
238
239 *pIsLeaf = true;
240 return pLeaf;
241 }
242
243 VertexTree * pTree = new VertexTree();
244 if (place)
245 m_pSection->Place(pTree);
246
247 pTree->m_pFirst = pFirst;
248 pTree->m_pSecond = pSecond;
249
250 m_pSection->Place(pSecond);
251
252 return pTree;
253 }
254 else
255 {
256 VertexTree * pTree = new VertexTree();
257 if (place)
258 m_pSection->Place(pTree);
259
260 bool fFirstIsLeaf = false, fSecondIsLeaf = false;
261 Vertex * pFirst = ExpandBlock(index, depth - 1, false, &fFirstIsLeaf);
262 Vertex * pSecond = ExpandBlock(index + (size_t{ 1 } << (depth - 1)), depth - 1, true, &fSecondIsLeaf);
263
264 Vertex * pPop;
265
266 if ((pFirst == nullptr && pSecond == nullptr))
267 {
268 if (place)
269 {
270 pPop = m_pSection->Pop();
271 assert(pPop == pTree);
272 }
273
274 delete pTree;
275 return nullptr;
276 }
277
278 if ((pFirst == nullptr) && fSecondIsLeaf)
279 {
280 pPop = m_pSection->Pop();
281 assert(pPop == pSecond);
282
283 if (place)
284 {
285 pPop = m_pSection->Pop();
286 assert(pPop == pTree);
287 }
288
289 delete pTree;
290
291 if (place)
292 m_pSection->Place(pSecond);
293
294 *pIsLeaf = true;
295 return pSecond;
296 }
297
298 if ((pSecond == nullptr) && fFirstIsLeaf)
299 {
300 if (place)
301 {
302 pPop = m_pSection->Pop();
303 assert(pPop == pTree);
304 }
305
306 delete pTree;
307
308 if (place)
309 m_pSection->Place(pFirst);
310
311 *pIsLeaf = true;
312 return pFirst;
313 }
314
315 pTree->m_pFirst = pFirst;
316 pTree->m_pSecond = pSecond;
317
318 return pTree;
319 }
320 }
321
322 void VertexArray::ExpandLayout()
323 {
324 VertexLeaf * pNullBlock = nullptr;
325 for (size_t i = 0; i < m_Entries.size(); i += _blockSize)
326 {
327 bool fIsLeaf;
328 Vertex * pBlock = ExpandBlock(i, 4, true, &fIsLeaf);
329
330 if (pBlock == nullptr)
331 {
332 if (pNullBlock == nullptr)
333 {
334 pNullBlock = new VertexLeaf();
335 pNullBlock->m_leafIndex = _blockSize;
336 pNullBlock->m_pVertex = nullptr;
337 m_pSection->Place(pNullBlock);
338 }
339 pBlock = pNullBlock;
340 }
341
342 m_Blocks.push_back(pBlock);
343 }
344
345 // Start with maximum size entries
346 m_entryIndexSize = 2;
347 }
348
349 void VertexArray::VertexLeaf::Save(NativeWriter * pWriter)
350 {
351 pWriter->WriteUnsigned(m_leafIndex << 2);
352
353 if (m_pVertex != nullptr)
354 m_pVertex->Save(pWriter);
355 }
356
357 void VertexArray::VertexTree::Save(NativeWriter * pWriter)
358 {
359 unsigned value = (m_pFirst != nullptr) ? 1 : 0;
360
361 if (m_pSecond != nullptr)
362 {
363 value |= 2;
364
365 int delta = pWriter->GetExpectedOffset(m_pSecond) - pWriter->GetCurrentOffset();
366 assert(delta >= 0);
367 value |= (delta << 2);
368 }
369
370 pWriter->WriteUnsigned(value);
371
372 if (m_pFirst != nullptr)
373 m_pFirst->Save(pWriter);
374 }
375
376 void VertexArray::Save(NativeWriter * pWriter)
377 {
378 // Lowest two bits are entry index size, the rest is number of elements
379 pWriter->WriteUnsigned((m_Entries.size() << 2) | m_entryIndexSize);
380
381 int blocksOffset = pWriter->GetCurrentOffset();
382 int maxOffset = 0;
383
384 for (auto pBlock : m_Blocks)
385 {
386 int offset = pWriter->GetExpectedOffset(pBlock) - blocksOffset;
387 assert(offset >= 0);
388
389 maxOffset = max(offset, maxOffset);
390
391 if (m_entryIndexSize == 0)
392 {
393 pWriter->WriteByte((byte)offset);
394 }
395 else
396 if (m_entryIndexSize == 1)
397 {
398 pWriter->WriteUInt16((UInt16)offset);
399 }
400 else
401 {
402 pWriter->WriteUInt32((UInt32)offset);
403 }
404 }
405
406 int newEntryIndexSize = 0;
407 if (maxOffset > 0xFF)
408 {
409 newEntryIndexSize++;
410 if (maxOffset > 0xFFFF)
411 newEntryIndexSize++;
412 }
413
414 if (pWriter->IsGrowing())
415 {
416 if (newEntryIndexSize > m_entryIndexSize)
417 {
418 // Ensure that the table will be redone with new entry index size
419 pWriter->UpdateOffsetAdjustment(1);
420
421 m_entryIndexSize = newEntryIndexSize;
422 }
423 }
424 else
425 {
426 if (newEntryIndexSize < m_entryIndexSize)
427 {
428 // Ensure that the table will be redone with new entry index size
429 pWriter->UpdateOffsetAdjustment(-1);
430
431 m_entryIndexSize = newEntryIndexSize;
432 }
433 }
434 }
435
436 //
437 // VertexHashtable
438 //
439
440 // Returns 1 + log2(x) rounded up, 0 iff x == 0
441 static unsigned HighestBit(unsigned x)
442 {
443 unsigned ret = 0;
444 while (x != 0)
445 {
446 x >>= 1;
447 ret++;
448 }
449 return ret;
450 }
451
452 // Helper method to back patch entry index in the bucket table
453 static void PatchEntryIndex(NativeWriter * pWriter, int patchOffset, int entryIndexSize, int entryIndex)
454 {
455 if (entryIndexSize == 0)
456 {
457 pWriter->PatchByteAt(patchOffset, (byte)entryIndex);
458 }
459 else
460 if (entryIndexSize == 1)
461 {
462 pWriter->PatchByteAt(patchOffset, (byte)entryIndex);
463 pWriter->PatchByteAt(patchOffset + 1, (byte)(entryIndex >> 8));
464 }
465 else
466 {
467 pWriter->PatchByteAt(patchOffset, (byte)entryIndex);
468 pWriter->PatchByteAt(patchOffset + 1, (byte)(entryIndex >> 8));
469 pWriter->PatchByteAt(patchOffset + 2, (byte)(entryIndex >> 16));
470 pWriter->PatchByteAt(patchOffset + 3, (byte)(entryIndex >> 24));
471 }
472 }
473
474 void VertexHashtable::Save(NativeWriter * pWriter)
475 {
476 // Compute the layout of the table if we have not done it yet
477 if (m_nBuckets == 0)
478 ComputeLayout();
479
480 int nEntries = (int)m_Entries.size();
481 int startOffset = pWriter->GetCurrentOffset();
482 int bucketMask = (m_nBuckets - 1);
483
484 // Lowest two bits are entry index size, the rest is log2 number of buckets
485 int numberOfBucketsShift = HighestBit(m_nBuckets) - 1;
486 pWriter->WriteByte(static_cast<uint8_t>((numberOfBucketsShift << 2) | m_entryIndexSize));
487
488 int bucketsOffset = pWriter->GetCurrentOffset();
489
490 pWriter->WritePad((m_nBuckets + 1) << m_entryIndexSize);
491
492 // For faster lookup at runtime, we store the first entry index even though it is redundant (the value can be
493 // inferred from number of buckets)
494 PatchEntryIndex(pWriter, bucketsOffset, m_entryIndexSize, pWriter->GetCurrentOffset() - bucketsOffset);
495
496 int iEntry = 0;
497
498 for (int iBucket = 0; iBucket < m_nBuckets; iBucket++)
499 {
500 while (iEntry < nEntries)
501 {
502 Entry &e = m_Entries[iEntry];
503
504 if (((e.hashcode >> 8) & bucketMask) != (unsigned)iBucket)
505 break;
506
507 int currentOffset = pWriter->GetCurrentOffset();
508 pWriter->UpdateOffsetAdjustment(currentOffset - e.offset);
509 e.offset = currentOffset;
510
511 pWriter->WriteByte((byte)e.hashcode);
512 pWriter->WriteRelativeOffset(e.pVertex);
513
514 iEntry++;
515 }
516
517 int patchOffset = bucketsOffset + ((iBucket + 1) << m_entryIndexSize);
518
519 PatchEntryIndex(pWriter, patchOffset, m_entryIndexSize, pWriter->GetCurrentOffset() - bucketsOffset);
520 }
521 assert(iEntry == nEntries);
522
523 int maxIndexEntry = (pWriter->GetCurrentOffset() - bucketsOffset);
524 int newEntryIndexSize = 0;
525 if (maxIndexEntry > 0xFF)
526 {
527 newEntryIndexSize++;
528 if (maxIndexEntry > 0xFFFF)
529 newEntryIndexSize++;
530 }
531
532 if (pWriter->IsGrowing())
533 {
534 if (newEntryIndexSize > m_entryIndexSize)
535 {
536 // Ensure that the table will be redone with new entry index size
537 pWriter->UpdateOffsetAdjustment(1);
538
539 m_entryIndexSize = newEntryIndexSize;
540 }
541 }
542 else
543 {
544 if (newEntryIndexSize < m_entryIndexSize)
545 {
546 // Ensure that the table will be redone with new entry index size
547 pWriter->UpdateOffsetAdjustment(-1);
548
549 m_entryIndexSize = newEntryIndexSize;
550 }
551 }
552 }
553
554 void VertexHashtable::ComputeLayout()
555 {
556 unsigned bucketsEstimate = (unsigned)(m_Entries.size() / m_nFillFactor);
557
558 // Round number of buckets up to the power of two
559 m_nBuckets = 1 << HighestBit(bucketsEstimate);
560
561 // Lowest byte of the hashcode is used for lookup within the bucket. Keep it sorted too so that
562 // we can use the ordering to terminate the lookup prematurely.
563 unsigned mask = ((m_nBuckets - 1) << 8) | 0xFF;
564
565 // sort it by hashcode
566 std::sort(m_Entries.begin(), m_Entries.end(),
567 [=](Entry const& a, Entry const& b)
568 {
569 return (a.hashcode & mask) < (b.hashcode & mask);
570 }
571 );
572
573 // Start with maximum size entries
574 m_entryIndexSize = 2;
575 }
576}
577