1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | //***************************************************************************** |
5 | // Implementation for CBlobFetcher |
6 | // |
7 | |
8 | // |
9 | // |
10 | //***************************************************************************** |
11 | #include "stdafx.h" // for ASSERTE and friends |
12 | #include "blobfetcher.h" |
13 | #include "log.h" |
14 | |
15 | //----------------------------------------------------------------------------- |
16 | // round up to a certain alignment |
17 | static inline unsigned roundUp(unsigned val, unsigned align) { |
18 | _ASSERTE((align & (align - 1)) == 0); // align must be a power of 2 |
19 | |
20 | return((val + (align-1)) & ~(align-1)); |
21 | } |
22 | |
23 | //----------------------------------------------------------------------------- |
24 | // round up to a certain alignment |
25 | static inline unsigned padForAlign(unsigned val, unsigned align) { |
26 | _ASSERTE((align & (align - 1)) == 0); // align must be a power of 2 |
27 | return ((-int(val)) & (align-1)); |
28 | } |
29 | |
30 | //***************************************************************************** |
31 | // Pillar implementation |
32 | //***************************************************************************** |
33 | //----------------------------------------------------------------------------- |
34 | CBlobFetcher::CPillar::CPillar() |
35 | { |
36 | m_dataAlloc = NULL; |
37 | m_dataStart = NULL; |
38 | m_dataCur = NULL; |
39 | m_dataEnd = NULL; |
40 | |
41 | // Default initial size is 4K bytes. |
42 | m_nTargetSize = 0x1000; |
43 | } |
44 | |
45 | //----------------------------------------------------------------------------- |
46 | CBlobFetcher::CPillar::~CPillar() { |
47 | // Sanity check to make sure nobody messed up the pts |
48 | _ASSERTE((m_dataCur >= m_dataStart) && (m_dataCur <= m_dataEnd)); |
49 | |
50 | delete [] m_dataAlloc; |
51 | } |
52 | |
53 | |
54 | //----------------------------------------------------------------------------- |
55 | // Transfer ownership of data, so src will lose data and this will get it. |
56 | // Data itself will remain untouched, just ptrs & ownership change |
57 | //----------------------------------------------------------------------------- |
58 | void CBlobFetcher::CPillar::StealDataFrom(CBlobFetcher::CPillar & src) |
59 | { |
60 | // We should only be moving into an empty Pillar |
61 | _ASSERTE(m_dataStart == NULL); |
62 | |
63 | |
64 | m_dataAlloc = src.m_dataAlloc; |
65 | m_dataStart = src.m_dataStart; |
66 | m_dataCur = src.m_dataCur; |
67 | m_dataEnd = src.m_dataEnd; |
68 | |
69 | m_nTargetSize = src.m_nTargetSize; |
70 | |
71 | // Take away src's claim to data. This prevents multiple ownership and double deleting |
72 | src.m_dataAlloc = src.m_dataStart = src.m_dataCur = src.m_dataEnd = NULL; |
73 | |
74 | } |
75 | |
76 | //----------------------------------------------------------------------------- |
77 | // Allocate a block in this particular pillar |
78 | //----------------------------------------------------------------------------- |
79 | /* make a new block 'len' bytes long' However, move the pointer 'pad' bytes |
80 | over so that the memory has the correct alignment characteristics. |
81 | |
82 | If the return value is NULL, there are two possibilities: |
83 | - This CPillar reserved less memory than needed for the current allocation. |
84 | - We are out-of-memory. In this case, CPillar:GetDataLen() will be 0. |
85 | */ |
86 | |
87 | char * CBlobFetcher::CPillar::MakeNewBlock(unsigned len, unsigned pad) { |
88 | |
89 | _ASSERTE(pad < maxAlign); |
90 | |
91 | // Make sure we have memory in this block to allocate |
92 | if (m_dataStart == NULL) { |
93 | |
94 | // make sure allocate at least as big as length |
95 | unsigned nNewTargetSize = max(m_nTargetSize, len); |
96 | |
97 | // |
98 | // We need to allocate memory with an offset of "pad" from |
99 | // being "maxAlign" aligned. (data % maxAlign == pad). |
100 | // Since "new" doesn't do this, allocate some extra |
101 | // to handle the worst possible alignment case. |
102 | // |
103 | unsigned allocationSize = nNewTargetSize + (maxAlign-1); |
104 | // Check for integer overflow |
105 | if (allocationSize < nNewTargetSize) |
106 | { // Integer overflow happened, fail the allocation |
107 | return NULL; |
108 | } |
109 | |
110 | m_dataAlloc = new (nothrow) char[allocationSize]; |
111 | |
112 | if (m_dataAlloc == NULL) |
113 | return NULL; |
114 | |
115 | // Ensure that no uninitialized values are placed into the pe file. |
116 | // While most of the logic carefully memset's appropriate pad bytes to 0, at least |
117 | // one place has been found where that wasn't true. |
118 | memset(m_dataAlloc, 0, allocationSize); |
119 | |
120 | m_nTargetSize = nNewTargetSize; |
121 | |
122 | m_dataStart = m_dataAlloc + |
123 | ((pad - (UINT_PTR)(m_dataAlloc)) & (((UINT_PTR)maxAlign)-1)); |
124 | |
125 | _ASSERTE((UINT_PTR)(m_dataStart) % maxAlign == pad); |
126 | |
127 | m_dataCur = m_dataStart; |
128 | |
129 | m_dataEnd = &m_dataStart[m_nTargetSize]; |
130 | } |
131 | |
132 | _ASSERTE(m_dataCur >= m_dataStart); |
133 | _ASSERTE((int) len > 0); |
134 | |
135 | // If this block is full, then get out, we'll have to try another block |
136 | if (m_dataCur + len > m_dataEnd) { |
137 | return NULL; |
138 | } |
139 | |
140 | char* ret = m_dataCur; |
141 | m_dataCur += len; |
142 | _ASSERTE(m_dataCur <= m_dataEnd); |
143 | return(ret); |
144 | } |
145 | |
146 | |
147 | //***************************************************************************** |
148 | // Blob Fetcher Implementation |
149 | //***************************************************************************** |
150 | |
151 | //----------------------------------------------------------------------------- |
152 | CBlobFetcher::CBlobFetcher() |
153 | { |
154 | // Setup storage |
155 | m_pIndex = NULL; |
156 | m_nIndexMax = 1; // start off with arbitrary small size @@@ (minimum is 1) |
157 | m_nIndexUsed = 0; |
158 | _ASSERTE(m_nIndexUsed < m_nIndexMax); // use <, not <= |
159 | |
160 | m_nDataLen = 0; |
161 | |
162 | m_pIndex = new CPillar[m_nIndexMax]; |
163 | _ASSERTE(m_pIndex); |
164 | //<TODO>@FUTURE: what do we do here if we run out of memory??!!</TODO> |
165 | } |
166 | |
167 | //----------------------------------------------------------------------------- |
168 | CBlobFetcher::~CBlobFetcher() |
169 | { |
170 | delete [] m_pIndex; |
171 | } |
172 | |
173 | |
174 | //----------------------------------------------------------------------------- |
175 | // Dynamic mem allocation, but we can't move old blocks (since others |
176 | // have pointers to them), so we need a fancy way to grow |
177 | // Returns NULL if the memory could not be allocated. |
178 | //----------------------------------------------------------------------------- |
179 | char* CBlobFetcher::MakeNewBlock(unsigned len, unsigned align) { |
180 | |
181 | _ASSERTE(m_pIndex); |
182 | _ASSERTE(0 < align && align <= maxAlign); |
183 | |
184 | // deal with alignment |
185 | unsigned pad = padForAlign(m_nDataLen, align); |
186 | char* pChRet = NULL; |
187 | if (pad != 0) { |
188 | pChRet = m_pIndex[m_nIndexUsed].MakeNewBlock(pad, 0); |
189 | |
190 | // Did we run out of memory? |
191 | if (pChRet == NULL && m_pIndex[m_nIndexUsed].GetDataLen() == 0) |
192 | return NULL; |
193 | |
194 | // if don't have space for the pad, then need to allocate a new pillar |
195 | // the allocation will handle the padding for the alignment of m_nDataLen |
196 | if (pChRet) { |
197 | memset(pChRet, 0, pad); |
198 | m_nDataLen += pad; |
199 | pad = 0; |
200 | } |
201 | } |
202 | #ifdef _DEBUG |
203 | if (pChRet) |
204 | _ASSERTE((m_nDataLen % align) == 0); |
205 | #endif |
206 | |
207 | // Quickly computing total data length is tough since we have alignment problems |
208 | // We'll do it by getting the length of all the completely full pillars so far |
209 | // and then adding on the size of the current pillar |
210 | unsigned nPreDataLen = m_nDataLen - m_pIndex[m_nIndexUsed].GetDataLen(); |
211 | |
212 | pChRet = m_pIndex[m_nIndexUsed].MakeNewBlock(len + pad, 0); |
213 | |
214 | // Did we run out of memory? |
215 | if (pChRet == NULL && m_pIndex[m_nIndexUsed].GetDataLen() == NULL) |
216 | return NULL; |
217 | |
218 | if (pChRet == NULL) { |
219 | |
220 | nPreDataLen = m_nDataLen; |
221 | |
222 | if (m_nIndexUsed + 1 == m_nIndexMax) { |
223 | // entire array of pillars are full, re-org |
224 | |
225 | const unsigned nNewMax = m_nIndexMax * 2; // arbitrary new size |
226 | |
227 | CPillar* pNewIndex = new (nothrow) CPillar[nNewMax]; |
228 | if (pNewIndex == NULL) |
229 | return NULL; |
230 | |
231 | // Copy old stuff |
232 | for(unsigned i = 0; i < m_nIndexMax; i++) |
233 | pNewIndex[i].StealDataFrom(m_pIndex[i]); |
234 | |
235 | delete [] m_pIndex; |
236 | |
237 | m_nIndexMax = nNewMax; |
238 | m_pIndex = pNewIndex; |
239 | |
240 | STRESS_LOG2(LF_LOADER, LL_INFO10, "CBlobFetcher %08X reallocates m_pIndex %08X\n" , this, m_pIndex); |
241 | } |
242 | |
243 | m_nIndexUsed ++; // current pillar is full, move to next |
244 | |
245 | // Make sure the new pillar is large enough to hold the data |
246 | // How we do this is *totally arbitrary* and has been optimized for how |
247 | // we intend to use this. |
248 | |
249 | unsigned minSizeOfNewPillar = (3 * m_nDataLen) / 2; |
250 | if (minSizeOfNewPillar < len) |
251 | minSizeOfNewPillar = len; |
252 | |
253 | if (m_pIndex[m_nIndexUsed].GetAllocateSize() < minSizeOfNewPillar) { |
254 | m_pIndex[m_nIndexUsed].SetAllocateSize(roundUp(minSizeOfNewPillar, maxAlign)); |
255 | } |
256 | |
257 | // Under stress, we have seen that m_pIndex[0] is empty, but |
258 | // m_pIndex[1] is not. This assert tries to catch that scenario. |
259 | _ASSERTE(m_pIndex[0].GetDataLen() != 0); |
260 | |
261 | // Now that we're on new pillar, try again |
262 | pChRet = m_pIndex[m_nIndexUsed].MakeNewBlock(len + pad, m_nDataLen % maxAlign); |
263 | if (pChRet == NULL) |
264 | return NULL; |
265 | _ASSERTE(pChRet); |
266 | |
267 | // The current pointer picks up at the same alignment that the last block left off |
268 | _ASSERTE(nPreDataLen % maxAlign == ((UINT_PTR) pChRet) % maxAlign); |
269 | } |
270 | |
271 | if (pad != 0) { |
272 | memset(pChRet, 0, pad); |
273 | pChRet += pad; |
274 | } |
275 | |
276 | m_nDataLen = nPreDataLen + m_pIndex[m_nIndexUsed].GetDataLen(); |
277 | |
278 | _ASSERTE(((unsigned) m_nDataLen - len) % align == 0); |
279 | _ASSERTE((UINT_PTR(pChRet) % align) == 0); |
280 | return pChRet; |
281 | } |
282 | |
283 | //----------------------------------------------------------------------------- |
284 | // Index segment as if this were linear (middle weight function) |
285 | //----------------------------------------------------------------------------- |
286 | char * CBlobFetcher::ComputePointer(unsigned offset) const |
287 | { |
288 | _ASSERTE(m_pIndex); |
289 | unsigned idx = 0; |
290 | |
291 | if (offset == 0) { |
292 | // if ask for a 0 offset and no data, return NULL |
293 | if (m_pIndex[0].GetDataLen() == 0) |
294 | { |
295 | return NULL; |
296 | } |
297 | } |
298 | else |
299 | { |
300 | while (offset >= m_pIndex[idx].GetDataLen()) { |
301 | offset -= m_pIndex[idx].GetDataLen(); |
302 | idx ++; |
303 | // Overflow - have asked for an offset greater than what exists |
304 | if (idx > m_nIndexUsed) { |
305 | _ASSERTE(!"CBlobFetcher::ComputePointer() Overflow" ); |
306 | return NULL; |
307 | } |
308 | } |
309 | } |
310 | |
311 | char * ptr = (char*) (m_pIndex[idx].GetRawDataStart() + offset); |
312 | return ptr; |
313 | } |
314 | |
315 | //----------------------------------------------------------------------------- |
316 | // See if a pointer came from this blob fetcher |
317 | //----------------------------------------------------------------------------- |
318 | BOOL CBlobFetcher::ContainsPointer( __in char *ptr) const |
319 | { |
320 | _ASSERTE(m_pIndex); |
321 | |
322 | CPillar *p = m_pIndex; |
323 | CPillar *pEnd = p + m_nIndexUsed; |
324 | |
325 | unsigned offset = 0; |
326 | |
327 | while (p <= pEnd) { |
328 | if (p->Contains(ptr)) |
329 | return TRUE; |
330 | |
331 | offset += p->GetDataLen(); |
332 | p++; |
333 | } |
334 | |
335 | return FALSE; |
336 | } |
337 | |
338 | //----------------------------------------------------------------------------- |
339 | // Find a pointer as if this were linear (middle weight function) |
340 | //----------------------------------------------------------------------------- |
341 | unsigned CBlobFetcher::ComputeOffset(__in char *ptr) const |
342 | { |
343 | _ASSERTE(m_pIndex); |
344 | |
345 | CPillar *p = m_pIndex; |
346 | CPillar *pEnd = p + m_nIndexUsed; |
347 | |
348 | unsigned offset = 0; |
349 | |
350 | while (p <= pEnd) { |
351 | if (p->Contains(ptr)) |
352 | return offset + p->GetOffset(ptr); |
353 | |
354 | offset += p->GetDataLen(); |
355 | p++; |
356 | } |
357 | |
358 | _ASSERTE(!"Pointer not found" ); |
359 | return 0; |
360 | } |
361 | |
362 | |
363 | //Take the data from our previous blob and copy it into our new blob |
364 | //after whatever was already in that blob. |
365 | HRESULT CBlobFetcher::Merge(CBlobFetcher *destination) { |
366 | unsigned dataLen; |
367 | char *dataBlock; |
368 | char *dataCurr; |
369 | unsigned idx; |
370 | _ASSERTE(destination); |
371 | |
372 | dataLen = GetDataLen(); |
373 | _ASSERTE( dataLen >= 0 ); |
374 | |
375 | // Make sure there actually is data in the previous blob before trying to append it. |
376 | if ( 0 == dataLen ) |
377 | { |
378 | return S_OK; |
379 | } |
380 | |
381 | //Get the length of our data and get a new block large enough to hold all of it. |
382 | dataBlock = destination->MakeNewBlock(dataLen, 1); |
383 | if (dataBlock == NULL) { |
384 | return E_OUTOFMEMORY; |
385 | } |
386 | |
387 | //Copy all of the bytes using the write algorithm from PEWriter.cpp |
388 | dataCurr=dataBlock; |
389 | for (idx=0; idx<=m_nIndexUsed; idx++) { |
390 | if (m_pIndex[idx].GetDataLen()>0) { |
391 | _ASSERTE(dataCurr<dataBlock+dataLen); |
392 | memcpy(dataCurr, m_pIndex[idx].GetRawDataStart(), m_pIndex[idx].GetDataLen()); |
393 | dataCurr+=m_pIndex[idx].GetDataLen(); |
394 | } |
395 | } |
396 | |
397 | return S_OK; |
398 | |
399 | } |
400 | |