1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4
5// ---------------------------------------------------------------------------
6// NativeFormatWriter
7//
8// Utilities to write native data to images, that can be read by the NativeFormat.Reader class
9// ---------------------------------------------------------------------------
10
11#pragma once
12
13#include <assert.h>
14#include <stdint.h>
15
16// To reduce differences between C# and C++ versions
17#define byte uint8_t
18
19#define UInt16 uint16_t
20#define UInt32 uint32_t
21#define UInt64 uint64_t
22
23#include <clr_std/vector>
24
25namespace NativeFormat
26{
27 using namespace std;
28
29 class NativeSection;
30 class NativeWriter;
31
32 class Vertex
33 {
34 friend class NativeWriter;
35 friend class NativeSection;
36
37 int m_offset;
38 int m_iteration; // Iteration that the offset is valid for
39
40 static const int NotPlaced = -1;
41 static const int Placed = -2;
42
43 public:
44 Vertex()
45 : m_offset(Vertex::NotPlaced), m_iteration(-1)
46 {
47 }
48
49 virtual ~Vertex() {}
50
51 virtual void Save(NativeWriter * pWriter) = 0;
52
53 int GetOffset()
54 {
55 assert(m_offset >= 0);
56 return m_offset;
57 }
58 };
59
60 class NativeSection : vector<Vertex *>
61 {
62 friend class NativeWriter;
63
64 public:
65 Vertex * Place(Vertex * pVertex);
66
67 Vertex * Pop()
68 {
69 Vertex * pVertex = *(end() - 1);
70 erase(end() - 1);
71
72 assert(pVertex->m_offset == Vertex::Placed);
73 pVertex->m_offset = Vertex::NotPlaced;
74
75 return pVertex;
76 }
77 };
78
79 class NativeWriter
80 {
81 vector<NativeSection *> m_Sections;
82
83 enum SavePhase
84 {
85 Initial,
86 Shrinking,
87 Growing,
88 Done
89 };
90
91 vector<byte> m_Buffer;
92 int m_iteration;
93 SavePhase m_phase; // Current save phase
94 int m_offsetAdjustment; // Cumulative offset adjustment compared to previous iteration
95 int m_paddingSize; // How much padding was used
96
97 public:
98 NativeWriter()
99 {
100 m_iteration = 0;
101 m_phase = Initial;
102 }
103
104 NativeSection * NewSection()
105 {
106 NativeSection * pSection = new NativeSection();
107 m_Sections.push_back(pSection);
108 return pSection;
109 }
110
111 void WriteByte(byte b)
112 {
113 m_Buffer.push_back(b);
114 }
115
116 void WriteUInt16(UInt16 value)
117 {
118 WriteByte((byte)value);
119 WriteByte((byte)(value>>8));
120 }
121
122 void WriteUInt32(UInt32 value)
123 {
124 WriteByte((byte)value);
125 WriteByte((byte)(value>>8));
126 WriteByte((byte)(value>>16));
127 WriteByte((byte)(value>>24));
128 }
129
130 void WritePad(unsigned size)
131 {
132 while (size > 0)
133 {
134 WriteByte(0);
135 size--;
136 }
137 }
138
139 bool IsGrowing()
140 {
141 return m_phase == Growing;
142 }
143
144 void UpdateOffsetAdjustment(int offsetDelta)
145 {
146 switch (m_phase)
147 {
148 case Shrinking:
149 m_offsetAdjustment = min(m_offsetAdjustment, offsetDelta);
150 break;
151 case Growing:
152 m_offsetAdjustment = max(m_offsetAdjustment, offsetDelta);
153 break;
154 default:
155 break;
156 }
157 }
158
159 void RollbackTo(int offset)
160 {
161 m_Buffer.erase(m_Buffer.begin() + offset, m_Buffer.end());
162 }
163
164 void RollbackTo(int offset, int offsetAdjustment)
165 {
166 m_offsetAdjustment = offsetAdjustment;
167 RollbackTo(offset);
168 }
169
170 void PatchByteAt(int offset, byte value)
171 {
172 m_Buffer[offset] = value;
173 }
174
175 //
176 // Same encoding as what's used by CTL
177 //
178 void WriteUnsigned(unsigned d);
179 static unsigned GetUnsignedEncodingSize(unsigned d);
180
181 template <typename T>
182 void WriteUnsigned(T d)
183 {
184 WriteUnsigned((unsigned)d);
185 }
186
187 void WriteSigned(int i);
188
189 void WriteRelativeOffset(Vertex * pVal);
190
191 int GetExpectedOffset(Vertex * pVal);
192
193 int GetCurrentOffset(Vertex * pVal)
194 {
195 if (pVal->m_iteration != m_iteration)
196 return -1;
197 return pVal->m_offset;
198 }
199
200 void SetCurrentOffset(Vertex * pVal)
201 {
202 pVal->m_iteration = m_iteration;
203 pVal->m_offset = GetCurrentOffset();
204 }
205
206 int GetCurrentOffset()
207 {
208 return (int)m_Buffer.size();
209 }
210
211 int GetNumberOfIterations()
212 {
213 return m_iteration;
214 }
215
216 int GetPaddingSize()
217 {
218 return m_paddingSize;
219 }
220
221 vector<byte>& Save();
222 };
223
224
225 //
226 // Data structure building blocks
227 //
228
229 class UnsignedConstant : public Vertex
230 {
231 unsigned m_value;
232
233 public:
234 UnsignedConstant(unsigned value)
235 : m_value(value)
236 {
237 }
238
239 virtual void Save(NativeWriter * pWriter)
240 {
241 pWriter->WriteUnsigned(m_value);
242 }
243 };
244
245 //
246 // Sparse array. Good for random access based on index
247 //
248 class VertexArray : public Vertex
249 {
250 vector<Vertex *> m_Entries;
251
252 NativeSection * m_pSection;
253 vector<Vertex *> m_Blocks;
254
255 static const int _blockSize = 16;
256
257 // Current size of index entry
258 int m_entryIndexSize; // 0 - uint8, 1 - uint16, 2 - uint32
259
260 class VertexLeaf : public Vertex
261 {
262 public:
263 Vertex * m_pVertex;
264 size_t m_leafIndex;
265
266 virtual void Save(NativeWriter * pWriter);
267 };
268
269 class VertexTree : public Vertex
270 {
271 public:
272 Vertex * m_pFirst;
273 Vertex * m_pSecond;
274
275 virtual void Save(NativeWriter * pWriter);
276 };
277
278 Vertex * ExpandBlock(size_t index, int depth, bool place, bool * pLeaf);
279
280 public:
281 VertexArray(NativeSection * pSection)
282 : m_pSection(pSection)
283 {
284 }
285
286 void Set(int index, Vertex * pElement)
287 {
288 while ((size_t)index >= m_Entries.size())
289 m_Entries.push_back(nullptr);
290
291 m_Entries[index] = pElement;
292 }
293
294 void ExpandLayout();
295
296 virtual void Save(NativeWriter * pWriter);
297 };
298
299 //
300 // Hashtable. Good for random access based on hashcode + key
301 //
302 class VertexHashtable : public Vertex
303 {
304 struct Entry
305 {
306 Entry()
307 : offset(-1), hashcode(0), pVertex(NULL)
308 {
309 }
310
311 Entry(unsigned hashcode, Vertex * pVertex)
312 : offset(0), hashcode(hashcode), pVertex(pVertex)
313 {
314 }
315
316 int offset;
317
318 unsigned hashcode;
319 Vertex * pVertex;
320 };
321
322 vector<Entry> m_Entries;
323
324 // How many entries to target per bucket. Higher fill factor means smaller size, but worse runtime perf.
325 int m_nFillFactor;
326
327 // Number of buckets choosen for the table. Must be power of two. 0 means that the table is still open for mutation.
328 int m_nBuckets;
329
330 // Current size of index entry
331 int m_entryIndexSize; // 0 - uint8, 1 - uint16, 2 - uint32
332
333 void ComputeLayout();
334
335 public:
336 static const int DefaultFillFactor = 13;
337
338 VertexHashtable(int fillFactor = DefaultFillFactor)
339 {
340 m_nBuckets = 0;
341
342 m_nFillFactor = fillFactor;
343 }
344
345 void Append(unsigned hashcode, Vertex * pElement)
346 {
347 // The table needs to be open for mutation
348 assert(m_nBuckets == 0);
349
350 m_Entries.push_back(Entry(hashcode, pElement));
351 }
352
353 virtual void Save(NativeWriter * pWriter);
354 };
355};
356