1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4//*****************************************************************************
5// MetaModelRW.cpp
6//
7
8//
9// Implementation for the Read/Write MiniMD code.
10//
11//*****************************************************************************
12#include "stdafx.h"
13#include <limits.h>
14#include <posterror.h>
15#include <metamodelrw.h>
16#include <stgio.h>
17#include <stgtiggerstorage.h>
18#include "mdlog.h"
19#include "rwutil.h"
20#include "../compiler/importhelper.h"
21#include "metadata.h"
22#include "streamutil.h"
23
24#include "../hotdata/hotdataformat.h"
25
26#ifdef FEATURE_PREJIT
27#include "corcompile.h"
28#endif
29
30#ifdef _MSC_VER
31#pragma intrinsic(memcpy)
32#endif
33
34//********** RidMap ***********************************************************
35typedef CDynArray<RID> RIDMAP;
36
37
38//********** Types. ***********************************************************
39#define INDEX_ROW_COUNT_THRESHOLD 25
40
41
42//********** Locals. **********************************************************
43enum MetaDataSizeIndex
44{
45 // Standard MetaData sizes (from VBA library).
46 MDSizeIndex_Standard = 0,
47 // Minimal MetaData sizes used mainly by Reflection.Emit for small assemblies (emitting 1 type per
48 // assembly).
49 // Motivated by the performance requirement in collectible types.
50 MDSizeIndex_Minimal = 1,
51
52 MDSizeIndex_Count
53}; // enum MetaDataSizeIndex
54
55// Gets index of MetaData sizes used to access code:g_PoolSizeInfo, code:g_HashSize and code:g_TblSizeInfo.
56static
57enum MetaDataSizeIndex
58GetMetaDataSizeIndex(const OptionValue *pOptionValue)
59{
60 if (pOptionValue->m_InitialSize == MDInitialSizeMinimal)
61 {
62 return MDSizeIndex_Minimal;
63 }
64 _ASSERTE(pOptionValue->m_InitialSize == MDInitialSizeDefault);
65 return MDSizeIndex_Standard;
66} // GetSizeHint
67
68#define IX_STRING_POOL 0
69#define IX_US_BLOB_POOL 1
70#define IX_GUID_POOL 2
71#define IX_BLOB_POOL 3
72
73static
74const ULONG
75g_PoolSizeInfo[MDSizeIndex_Count][4][2] =
76{
77 { // Standard pool sizes { Size in bytes, Number of buckets in hash } (code:MDSizeIndex_Standard).
78 {20000, 449}, // Strings
79 {5000, 150}, // User literal string blobs
80 {256, 16}, // Guids
81 {20000, 449} // Blobs
82 },
83 { // Minimal pool sizes { Size in bytes, Number of buckets in hash } (code:MDSizeIndex_Minimal).
84 {300, 10}, // Strings
85 {50, 5}, // User literal string blobs
86 {16, 3}, // Guids
87 {200, 10} // Blobs
88 }
89};
90
91static
92const ULONG
93g_HashSize[MDSizeIndex_Count] =
94{
95 257, // Standard MetaData size (code:MDSizeIndex_Standard).
96 50 // Minimal MetaData size (code:MDSizeIndex_Minimal).
97};
98
99static
100const ULONG
101g_TblSizeInfo[MDSizeIndex_Count][TBL_COUNT] =
102{
103 // Standard table sizes (code:MDSizeIndex_Standard).
104 {
105 1, // Module
106 90, // TypeRef
107 65, // TypeDef
108 0, // FieldPtr
109 400, // Field
110 0, // MethodPtr
111 625, // Method
112 0, // ParamPtr
113 1200, // Param
114 6, // InterfaceImpl
115 500, // MemberRef
116 400, // Constant
117 650, // CustomAttribute
118 0, // FieldMarshal
119 0, // DeclSecurity
120 0, // ClassLayout
121 0, // FieldLayout
122 175, // StandAloneSig
123 0, // EventMap
124 0, // EventPtr
125 0, // Event
126 5, // PropertyMap
127 0, // PropertyPtr
128 25, // Property
129 45, // MethodSemantics
130 20, // MethodImpl
131 0, // ModuleRef
132 0, // TypeSpec
133 0, // ImplMap
134 0, // FieldRVA
135 0, // ENCLog
136 0, // ENCMap
137 0, // Assembly
138 0, // AssemblyProcessor
139 0, // AssemblyOS
140 0, // AssemblyRef
141 0, // AssemblyRefProcessor
142 0, // AssemblyRefOS
143 0, // File
144 0, // ExportedType
145 0, // ManifestResource
146 0, // NestedClass
147 0, // GenericParam
148 0, // MethodSpec
149 0, // GenericParamConstraint
150 },
151 // Minimal table sizes (code:MDSizeIndex_Minimal).
152 {
153 1, // Module
154 2, // TypeRef
155 2, // TypeDef
156 0, // FieldPtr
157 2, // Field
158 0, // MethodPtr
159 2, // Method
160 0, // ParamPtr
161 0, // Param
162 0, // InterfaceImpl
163 1, // MemberRef
164 0, // Constant
165 0, // CustomAttribute
166 0, // FieldMarshal
167 0, // DeclSecurity
168 0, // ClassLayout
169 0, // FieldLayout
170 0, // StandAloneSig
171 0, // EventMap
172 0, // EventPtr
173 0, // Event
174 0, // PropertyMap
175 0, // PropertyPtr
176 0, // Property
177 0, // MethodSemantics
178 0, // MethodImpl
179 0, // ModuleRef
180 0, // TypeSpec
181 0, // ImplMap
182 0, // FieldRVA
183 0, // ENCLog
184 0, // ENCMap
185 1, // Assembly
186 0, // AssemblyProcessor
187 0, // AssemblyOS
188 1, // AssemblyRef
189 0, // AssemblyRefProcessor
190 0, // AssemblyRefOS
191 0, // File
192 0, // ExportedType
193 0, // ManifestResource
194 0, // NestedClass
195 0, // GenericParam
196 0, // MethodSpec
197 0, // GenericParamConstraint
198 }
199}; // g_TblSizeInfo
200
201struct TblIndex
202{
203 ULONG m_iName; // Name column.
204 ULONG m_iParent; // Parent column, if any.
205 ULONG m_Token; // Token of the table.
206};
207
208// Table to drive generic named-item indexing.
209const TblIndex g_TblIndex[TBL_COUNT] =
210{
211 {(ULONG) -1, (ULONG) -1, mdtModule}, // Module
212 {TypeRefRec::COL_Name, (ULONG) -1, mdtTypeRef}, // TypeRef
213 {TypeDefRec::COL_Name, (ULONG) -1, mdtTypeDef}, // TypeDef
214 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // FieldPtr
215 {(ULONG) -1, (ULONG) -1, mdtFieldDef}, // Field
216 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // MethodPtr
217 {(ULONG) -1, (ULONG) -1, mdtMethodDef}, // Method
218 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // ParamPtr
219 {(ULONG) -1, (ULONG) -1, mdtParamDef}, // Param
220 {(ULONG) -1, (ULONG) -1, mdtInterfaceImpl}, // InterfaceImpl
221 {MemberRefRec::COL_Name, MemberRefRec::COL_Class, mdtMemberRef}, // MemberRef
222 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // Constant
223 {(ULONG) -1, (ULONG) -1, mdtCustomAttribute},// CustomAttribute
224 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // FieldMarshal
225 {(ULONG) -1, (ULONG) -1, mdtPermission}, // DeclSecurity
226 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // ClassLayout
227 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // FieldLayout
228 {(ULONG) -1, (ULONG) -1, mdtSignature}, // StandAloneSig
229 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // EventMap
230 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // EventPtr
231 {(ULONG) -1, (ULONG) -1, mdtEvent}, // Event
232 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // PropertyMap
233 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // PropertyPtr
234 {(ULONG) -1, (ULONG) -1, mdtProperty}, // Property
235 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // MethodSemantics
236 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // MethodImpl
237 {(ULONG) -1, (ULONG) -1, mdtModuleRef}, // ModuleRef
238 {(ULONG) -1, (ULONG) -1, mdtTypeSpec}, // TypeSpec
239 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // ImplMap <TODO>@FUTURE: Check that these are the right entries here.</TODO>
240 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // FieldRVA <TODO>@FUTURE: Check that these are the right entries here.</TODO>
241 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // ENCLog
242 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // ENCMap
243 {(ULONG) -1, (ULONG) -1, mdtAssembly}, // Assembly <TODO>@FUTURE: Update with the right number.</TODO>
244 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // AssemblyProcessor <TODO>@FUTURE: Update with the right number.</TODO>
245 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // AssemblyOS <TODO>@FUTURE: Update with the right number.</TODO>
246 {(ULONG) -1, (ULONG) -1, mdtAssemblyRef}, // AssemblyRef <TODO>@FUTURE: Update with the right number.</TODO>
247 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // AssemblyRefProcessor <TODO>@FUTURE: Update with the right number.</TODO>
248 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // AssemblyRefOS <TODO>@FUTURE: Update with the right number.</TODO>
249 {(ULONG) -1, (ULONG) -1, mdtFile}, // File <TODO>@FUTURE: Update with the right number.</TODO>
250 {(ULONG) -1, (ULONG) -1, mdtExportedType}, // ExportedType <TODO>@FUTURE: Update with the right number.</TODO>
251 {(ULONG) -1, (ULONG) -1, mdtManifestResource},// ManifestResource <TODO>@FUTURE: Update with the right number.</TODO>
252 {(ULONG) -1, (ULONG) -1, (ULONG) -1}, // NestedClass
253 {(ULONG) -1, (ULONG) -1, mdtGenericParam}, // GenericParam
254 {(ULONG) -1, (ULONG) -1, mdtMethodSpec}, // MethodSpec
255 {(ULONG) -1, (ULONG) -1, mdtGenericParamConstraint},// GenericParamConstraint
256};
257
258ULONG CMiniMdRW::m_TruncatedEncTables[] =
259{
260 TBL_ENCLog,
261 TBL_ENCMap,
262 (ULONG) -1
263};
264
265//*****************************************************************************
266// Given a token type, return the table index.
267//*****************************************************************************
268ULONG CMiniMdRW::GetTableForToken( // Table index, or -1.
269 mdToken tkn) // Token to find.
270{
271 ULONG ixTbl; // Loop control.
272 ULONG type = TypeFromToken(tkn);
273
274 // Get the type -- if a string, no associated table.
275 if (type >= mdtString)
276 return (ULONG) -1;
277 // Table number is same as high-byte of token.
278 ixTbl = type >> 24;
279 // Make sure.
280 _ASSERTE(g_TblIndex[ixTbl].m_Token == type);
281
282 return ixTbl;
283} // CMiniMdRW::GetTableForToken
284
285//*****************************************************************************
286// Given a Table index, return the Token type.
287//*****************************************************************************
288mdToken CMiniMdRW::GetTokenForTable( // Token type, or -1.
289 ULONG ixTbl) // Table index.
290{
291 _ASSERTE(g_TblIndex[ixTbl].m_Token == (ixTbl<<24) || g_TblIndex[ixTbl].m_Token == (ULONG) -1);
292 return g_TblIndex[ixTbl].m_Token;
293} // CMiniMdRW::GetTokenForTable
294
295//*****************************************************************************
296// Helper classes for sorting MiniMdRW tables.
297//*****************************************************************************
298class CQuickSortMiniMdRW
299{
300protected:
301 CMiniMdRW &m_MiniMd; // The MiniMd with the data.
302 ULONG m_ixTbl; // The table.
303 ULONG m_ixCol; // The column.
304 int m_iCount; // How many items in array.
305 int m_iElemSize; // Size of one element.
306 RIDMAP *m_pRidMap; // Rid map that need to be swapped as we swap data
307 bool m_bMapToken; // MapToken handling desired.
308
309 BYTE m_buf[128]; // For swapping.
310
311 HRESULT getRow(UINT32 nIndex, void **ppRecord)
312 {
313 return m_MiniMd.m_Tables[m_ixTbl].GetRecord(nIndex, reinterpret_cast<BYTE **>(ppRecord));
314 }
315 void SetSorted() { m_MiniMd.SetSorted(m_ixTbl, true); }
316
317 HRESULT PrepMapTokens()
318 {
319 HRESULT hr = S_OK;
320
321 // If remap notifications are desired, prepare to collect the info in a RIDMAP.
322 if (m_bMapToken)
323 {
324 _ASSERTE(m_pRidMap == NULL); // Don't call twice.
325 IfNullGo(m_pRidMap = new (nothrow) RIDMAP);
326 if (!m_pRidMap->AllocateBlock(m_iCount + 1))
327 {
328 delete m_pRidMap;
329 m_pRidMap = NULL;
330 IfFailGo(E_OUTOFMEMORY);
331 }
332 for (int i=0; i<= m_iCount; ++i)
333 *(m_pRidMap->Get(i)) = i;
334 }
335
336 ErrExit:
337 return hr;
338 } // CQuickSortMiniMdRW::PrepMapTokens
339
340 __checkReturn
341 HRESULT DoMapTokens()
342 {
343 HRESULT hr;
344 if (m_bMapToken)
345 {
346 mdToken typ = m_MiniMd.GetTokenForTable(m_ixTbl);
347 for (int i=1; i<=m_iCount; ++i)
348 {
349 IfFailRet(m_MiniMd.MapToken(*(m_pRidMap->Get(i)), i, typ));
350 }
351 }
352 return S_OK;
353 } // CQuickSortMiniMdRW::DoMapTokens
354
355public:
356 CQuickSortMiniMdRW(
357 CMiniMdRW &MiniMd, // MiniMd with the data.
358 ULONG ixTbl, // The table.
359 ULONG ixCol, // The column.
360 bool bMapToken) // If true, MapToken handling desired.
361 : m_MiniMd(MiniMd),
362 m_ixTbl(ixTbl),
363 m_ixCol(ixCol),
364 m_pRidMap(NULL),
365 m_bMapToken(bMapToken)
366 {
367 m_iElemSize = m_MiniMd.m_TableDefs[m_ixTbl].m_cbRec;
368 _ASSERTE(m_iElemSize <= (int) sizeof(m_buf));
369 }
370
371 ~CQuickSortMiniMdRW()
372 {
373 if (m_bMapToken)
374 {
375 if (m_pRidMap)
376 {
377 m_pRidMap->Clear();
378 delete m_pRidMap;
379 m_pRidMap = NULL;
380 }
381 m_bMapToken = false;
382 }
383 } // CQuickSortMiniMdRW::~CQuickSortMiniMdRW
384
385 // set the RidMap
386 void SetRidMap(RIDMAP *pRidMap) { m_pRidMap = pRidMap; }
387
388 //*****************************************************************************
389 // Call to sort the array.
390 //*****************************************************************************
391 HRESULT Sort()
392 {
393 HRESULT hr = S_OK;
394
395 INDEBUG(m_MiniMd.Debug_CheckIsLockedForWrite();)
396
397 _ASSERTE(m_MiniMd.IsSortable(m_ixTbl));
398 m_iCount = m_MiniMd.GetCountRecs(m_ixTbl);
399
400 // If remap notifications are desired, prepare to collect the info in a RIDMAP.
401 IfFailGo(PrepMapTokens());
402
403 // We are going to sort tables. Invalidate the hash tables
404 if ( m_MiniMd.m_pLookUpHashs[m_ixTbl] != NULL )
405 {
406 delete m_MiniMd.m_pLookUpHashs[m_ixTbl];
407 m_MiniMd.m_pLookUpHashs[m_ixTbl] = NULL;
408 }
409
410 IfFailGo(SortRange(1, m_iCount));
411
412 // The table is sorted until its next change.
413 SetSorted();
414
415 // If remap notifications were desired, send them.
416 IfFailGo(DoMapTokens());
417
418 ErrExit:
419 return hr;
420 } // CQuickSortMiniMdRW::Sort
421
422 //*****************************************************************************
423 // Call to check whether the array is sorted without altering it.
424 //*****************************************************************************
425 HRESULT CheckSortedWithNoDuplicates()
426 {
427 HRESULT hr = S_OK;
428 int iCount = m_MiniMd.GetCountRecs(m_ixTbl);
429 int nResult;
430
431 m_MiniMd.SetSorted(m_ixTbl, false);
432
433 for (int i = 1; i < iCount; i++)
434 {
435 IfFailGo(Compare(i, i+1, &nResult));
436
437 if (nResult >= 0)
438 {
439 return S_OK;
440 }
441 }
442
443 // The table is sorted until its next change.
444 SetSorted();
445
446 ErrExit:
447 return hr;
448 } // CQuickSortMiniMdRW::CheckSortedWithNoDuplicates
449
450 //*****************************************************************************
451 // Override this function to do the comparison.
452 //*****************************************************************************
453 __checkReturn
454 HRESULT Compare(
455 int iLeft, // First item to compare.
456 int iRight, // Second item to compare.
457 int *pnResult) // -1, 0, or 1
458 {
459 HRESULT hr;
460 void *pLeft;
461 void *pRight;
462 IfFailRet(getRow(iLeft, &pLeft));
463 IfFailRet(getRow(iRight, &pRight));
464 ULONG ulLeft = m_MiniMd.GetCol(m_ixTbl, m_ixCol, pLeft);
465 ULONG ulRight = m_MiniMd.GetCol(m_ixTbl, m_ixCol, pRight);
466
467 if (ulLeft < ulRight)
468 {
469 *pnResult = -1;
470 return S_OK;
471 }
472 if (ulLeft == ulRight)
473 {
474 *pnResult = 0;
475 return S_OK;
476 }
477 *pnResult = 1;
478 return S_OK;
479 } // CQuickSortMiniMdRW::Compare
480
481private:
482 __checkReturn
483 HRESULT SortRange(
484 int iLeft,
485 int iRight)
486 {
487 HRESULT hr;
488 int iLast;
489 int nResult;
490
491 for (;;)
492 {
493 // if less than two elements you're done.
494 if (iLeft >= iRight)
495 {
496 return S_OK;
497 }
498
499 // The mid-element is the pivot, move it to the left.
500 IfFailRet(Compare(iLeft, (iLeft+iRight)/2, &nResult));
501 if (nResult != 0)
502 {
503 IfFailRet(Swap(iLeft, (iLeft+iRight)/2));
504 }
505 iLast = iLeft;
506
507 // move everything that is smaller than the pivot to the left.
508 for (int i = iLeft+1; i <= iRight; i++)
509 {
510 IfFailRet(Compare(i, iLeft, &nResult));
511 if (nResult < 0)
512 {
513 IfFailRet(Swap(i, ++iLast));
514 }
515 }
516
517 // Put the pivot to the point where it is in between smaller and larger elements.
518 IfFailRet(Compare(iLeft, iLast, &nResult));
519 if (nResult != 0)
520 {
521 IfFailRet(Swap(iLeft, iLast));
522 }
523
524 // Sort each partition.
525 int iLeftLast = iLast - 1;
526 int iRightFirst = iLast + 1;
527 if (iLeftLast - iLeft < iRight - iRightFirst)
528 { // Left partition is smaller, sort it recursively
529 IfFailRet(SortRange(iLeft, iLeftLast));
530 // Tail call to sort the right (bigger) partition
531 iLeft = iRightFirst;
532 //iRight = iRight;
533 continue;
534 }
535 else
536 { // Right partition is smaller, sort it recursively
537 IfFailRet(SortRange(iRightFirst, iRight));
538 // Tail call to sort the left (bigger) partition
539 //iLeft = iLeft;
540 iRight = iLeftLast;
541 continue;
542 }
543 }
544 } // CQuickSortMiniMdRW::SortRange
545
546protected:
547 __checkReturn
548 inline HRESULT Swap(
549 int iFirst,
550 int iSecond)
551 {
552 HRESULT hr;
553 void *pFirst;
554 void *pSecond;
555 if (iFirst == iSecond)
556 {
557 return S_OK;
558 }
559
560 PREFAST_ASSUME_MSG(m_iElemSize <= (int) sizeof(m_buf), "The MetaData table row has to fit into buffer for swapping.");
561
562 IfFailRet(getRow(iFirst, &pFirst));
563 IfFailRet(getRow(iSecond, &pSecond));
564 memcpy(m_buf, pFirst, m_iElemSize);
565 memcpy(pFirst, pSecond, m_iElemSize);
566 memcpy(pSecond, m_buf, m_iElemSize);
567 if (m_pRidMap != NULL)
568 {
569 RID ridTemp;
570 ridTemp = *(m_pRidMap->Get(iFirst));
571 *(m_pRidMap->Get(iFirst)) = *(m_pRidMap->Get(iSecond));
572 *(m_pRidMap->Get(iSecond)) = ridTemp;
573 }
574 return S_OK;
575 } // CQuickSortMiniMdRW::Swap
576
577}; // class CQuickSortMiniMdRW
578
579class CStableSortMiniMdRW : public CQuickSortMiniMdRW
580{
581public:
582 CStableSortMiniMdRW(
583 CMiniMdRW &MiniMd, // MiniMd with the data.
584 ULONG ixTbl, // The table.
585 ULONG ixCol, // The column.
586 bool bMapToken) // Is MapToken handling desired.
587 : CQuickSortMiniMdRW(MiniMd, ixTbl, ixCol, bMapToken)
588 {}
589
590 //*****************************************************************************
591 // Call to sort the array.
592 //*****************************************************************************
593 __checkReturn
594 HRESULT Sort()
595 {
596 int i; // Outer loop counter.
597 int j; // Inner loop counter.
598 int bSwap; // Early out.
599 HRESULT hr = S_OK;
600 int nResult;
601
602 _ASSERTE(m_MiniMd.IsSortable(m_ixTbl));
603 m_iCount = m_MiniMd.GetCountRecs(m_ixTbl);
604
605 // If remap notifications are desired, prepare to collect the info in a RIDMAP.
606 IfFailGo(PrepMapTokens());
607
608 for (i=m_iCount; i>1; --i)
609 {
610 bSwap = 0;
611 for (j=1; j<i; ++j)
612 {
613 IfFailGo(Compare(j, j+1, &nResult));
614 if (nResult > 0)
615 {
616 IfFailGo(Swap(j, j+1));
617 bSwap = 1;
618 }
619 }
620 // If made a full pass w/o swaps, done.
621 if (!bSwap)
622 break;
623 }
624
625 // The table is sorted until its next change.
626 SetSorted();
627
628 // If remap notifications were desired, send them.
629 IfFailGo(DoMapTokens());
630
631 ErrExit:
632 return hr;
633 } // CStableSortMiniMdRW::Sort
634
635}; // class CStableSortMiniMdRW
636
637//-------------------------------------------------------------------------
638#define SORTER(tbl,key) CQuickSortMiniMdRW sort##tbl(*this, TBL_##tbl, tbl##Rec::COL_##key, false);
639#define SORTER_WITHREMAP(tbl,key) CQuickSortMiniMdRW sort##tbl(*this, TBL_##tbl, tbl##Rec::COL_##key, true);
640#define STABLESORTER(tbl,key) CStableSortMiniMdRW sort##tbl(*this, TBL_##tbl, tbl##Rec::COL_##key, false);
641#define STABLESORTER_WITHREMAP(tbl,key) CStableSortMiniMdRW sort##tbl(*this, TBL_##tbl, tbl##Rec::COL_##key, true);
642//-------------------------------------------------------------------------
643
644
645
646//********** Code. ************************************************************
647
648
649//*****************************************************************************
650// Ctor / dtor.
651//*****************************************************************************
652#ifdef _DEBUG
653static bool bENCDeltaOnly = false;
654#endif
655CMiniMdRW::CMiniMdRW()
656 : m_pMemberRefHash(0),
657 m_pMemberDefHash(0),
658 m_pNamedItemHash(0),
659 m_pHandler(0),
660 m_cbSaveSize(0),
661 m_fIsReadOnly(false),
662 m_bPreSaveDone(false),
663 m_bPostGSSMod(false),
664 m_pMethodMap(0),
665 m_pFieldMap(0),
666 m_pPropertyMap(0),
667 m_pEventMap(0),
668 m_pParamMap(0),
669 m_pFilterTable(0),
670 m_pHostFilter(0),
671 m_pTokenRemapManager(0),
672 m_fMinimalDelta(FALSE),
673 m_rENCRecs(0)
674{
675#ifdef _DEBUG
676 if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MD_EncDelta))
677 {
678 bENCDeltaOnly = true;
679 }
680 if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MD_MiniMDBreak))
681 {
682 _ASSERTE(!"CMiniMdRW::CMiniMdRW()");
683 }
684#endif // _DEBUG
685
686 ZeroMemory(&m_OptionValue, sizeof(OptionValue));
687
688 // initialize the embeded lookuptable struct. Further initialization, after constructor.
689 for (ULONG ixTbl=0; ixTbl<TBL_COUNT; ++ixTbl)
690 {
691 m_pVS[ixTbl] = 0;
692 m_pLookUpHashs[ixTbl] = 0;
693 }
694
695 // Assume that we can sort tables as needed.
696 memset(m_bSortable, 1, sizeof(m_bSortable));
697
698 // Initialize the global array of Ptr table indices.
699 g_PtrTableIxs[TBL_Field].m_ixtbl = TBL_FieldPtr;
700 g_PtrTableIxs[TBL_Field].m_ixcol = FieldPtrRec::COL_Field;
701 g_PtrTableIxs[TBL_Method].m_ixtbl = TBL_MethodPtr;
702 g_PtrTableIxs[TBL_Method].m_ixcol = MethodPtrRec::COL_Method;
703 g_PtrTableIxs[TBL_Param].m_ixtbl = TBL_ParamPtr;
704 g_PtrTableIxs[TBL_Param].m_ixcol = ParamPtrRec::COL_Param;
705 g_PtrTableIxs[TBL_Property].m_ixtbl = TBL_PropertyPtr;
706 g_PtrTableIxs[TBL_Property].m_ixcol = PropertyPtrRec::COL_Property;
707 g_PtrTableIxs[TBL_Event].m_ixtbl = TBL_EventPtr;
708 g_PtrTableIxs[TBL_Event].m_ixcol = EventPtrRec::COL_Event;
709
710 // AUTO_GROW initialization
711 m_maxRid = m_maxIx = 0;
712 m_limIx = USHRT_MAX >> 1;
713 m_limRid = USHRT_MAX >> AUTO_GROW_CODED_TOKEN_PADDING;
714 m_eGrow = eg_ok;
715#ifdef _DEBUG
716 {
717 ULONG iMax, iCdTkn;
718 for (iMax=0, iCdTkn=0; iCdTkn<CDTKN_COUNT; ++iCdTkn)
719 {
720 CCodedTokenDef const *pCTD = &g_CodedTokens[iCdTkn];
721 if (pCTD->m_cTokens > iMax)
722 iMax = pCTD->m_cTokens;
723 }
724 // If assert fires, change define for AUTO_GROW_CODED_TOKEN_PADDING.
725 _ASSERTE(CMiniMdRW::m_cb[iMax] == AUTO_GROW_CODED_TOKEN_PADDING);
726 }
727 dbg_m_pLock = NULL;
728#endif //_DEBUG
729
730} // CMiniMdRW::CMiniMdRW
731
732CMiniMdRW::~CMiniMdRW()
733{
734 // Un-initialize the embeded lookuptable struct
735 for (ULONG ixTbl=0; ixTbl<TBL_COUNT; ++ixTbl)
736 {
737 if (m_pVS[ixTbl])
738 {
739 m_pVS[ixTbl]->Uninit();
740 delete m_pVS[ixTbl];
741 }
742 if ( m_pLookUpHashs[ixTbl] != NULL )
743 delete m_pLookUpHashs[ixTbl];
744
745 }
746 if (m_pFilterTable)
747 delete m_pFilterTable;
748
749 if (m_rENCRecs)
750 delete [] m_rENCRecs;
751
752 if (m_pHandler)
753 m_pHandler->Release(), m_pHandler = 0;
754 if (m_pHostFilter)
755 m_pHostFilter->Release();
756 if (m_pMemberRefHash)
757 delete m_pMemberRefHash;
758 if (m_pMemberDefHash)
759 delete m_pMemberDefHash;
760 if (m_pNamedItemHash)
761 delete m_pNamedItemHash;
762 if (m_pMethodMap)
763 delete m_pMethodMap;
764 if (m_pFieldMap)
765 delete m_pFieldMap;
766 if (m_pPropertyMap)
767 delete m_pPropertyMap;
768 if (m_pEventMap)
769 delete m_pEventMap;
770 if (m_pParamMap)
771 delete m_pParamMap;
772 if (m_pTokenRemapManager)
773 delete m_pTokenRemapManager;
774} // CMiniMdRW::~CMiniMdRW
775
776
777//*****************************************************************************
778// return all found CAs in an enumerator
779//*****************************************************************************
780__checkReturn
781HRESULT
782CMiniMdRW::CommonEnumCustomAttributeByName(
783 mdToken tkObj, // [IN] Object with Custom Attribute.
784 LPCUTF8 szName, // [IN] Name of desired Custom Attribute.
785 bool fStopAtFirstFind, // [IN] just find the first one
786 HENUMInternal *phEnum) // enumerator to fill up
787{
788 HRESULT hr = S_OK;
789 HRESULT hrRet = S_FALSE; // Assume that we won't find any
790 ULONG ridStart, ridEnd; // Loop start and endpoints.
791 CLookUpHash *pHashTable = m_pLookUpHashs[TBL_CustomAttribute];
792
793 _ASSERTE(phEnum != NULL);
794
795 memset(phEnum, 0, sizeof(HENUMInternal));
796
797 HENUMInternal::InitDynamicArrayEnum(phEnum);
798
799 phEnum->m_tkKind = mdtCustomAttribute;
800
801 if (pHashTable)
802 {
803 // table is not sorted and hash is not built so we have to create a dynamic array
804 // create the dynamic enumerator.
805 TOKENHASHENTRY *p;
806 ULONG iHash;
807 int pos;
808
809 // Hash the data.
810 iHash = HashCustomAttribute(tkObj);
811
812 // Go through every entry in the hash chain looking for ours.
813 for (p = pHashTable->FindFirst(iHash, pos);
814 p;
815 p = pHashTable->FindNext(pos))
816 {
817 IfFailGo(CompareCustomAttribute( tkObj, szName, RidFromToken(p->tok)));
818 if (hr == S_OK)
819 {
820 hrRet = S_OK;
821
822 // If here, found a match.
823 IfFailGo( HENUMInternal::AddElementToEnum(
824 phEnum,
825 TokenFromRid(p->tok, mdtCustomAttribute)));
826 if (fStopAtFirstFind)
827 goto ErrExit;
828 }
829 }
830 }
831 else
832 {
833 // Get the list of custom values for the parent object.
834 if ( IsSorted(TBL_CustomAttribute) )
835 {
836 IfFailGo(getCustomAttributeForToken(tkObj, &ridEnd, &ridStart));
837 // If found none, done.
838 if (ridStart == 0)
839 goto ErrExit;
840 }
841 else
842 {
843 // linear scan of entire table.
844 ridStart = 1;
845 ridEnd = getCountCustomAttributes() + 1;
846 }
847
848 // Look for one with the given name.
849 for (; ridStart < ridEnd; ++ridStart)
850 {
851 IfFailGo(CompareCustomAttribute( tkObj, szName, ridStart));
852 if (hr == S_OK)
853 {
854 // If here, found a match.
855 hrRet = S_OK;
856 IfFailGo( HENUMInternal::AddElementToEnum(
857 phEnum,
858 TokenFromRid(ridStart, mdtCustomAttribute)));
859 if (fStopAtFirstFind)
860 goto ErrExit;
861 }
862 }
863 }
864
865ErrExit:
866 if (FAILED(hr))
867 return hr;
868 return hrRet;
869} // CMiniMdRW::CommonEnumCustomAttributeByName
870
871
872
873//*****************************************************************************
874// return just the blob value of the first found CA matching the query.
875//*****************************************************************************
876__checkReturn
877HRESULT
878CMiniMdRW::CommonGetCustomAttributeByNameEx( // S_OK or error.
879 mdToken tkObj, // [IN] Object with Custom Attribute.
880 LPCUTF8 szName, // [IN] Name of desired Custom Attribute.
881 mdCustomAttribute *ptkCA, // [OUT] put custom attribute token here
882 const void **ppData, // [OUT] Put pointer to data here.
883 ULONG *pcbData) // [OUT] Put size of data here.
884{
885 HRESULT hr;
886 const void *pData;
887 ULONG cbData;
888 HENUMInternal hEnum;
889 mdCustomAttribute ca;
890 CustomAttributeRec *pRec;
891
892 hr = CommonEnumCustomAttributeByName(tkObj, szName, true, &hEnum);
893 if (hr != S_OK)
894 goto ErrExit;
895
896 if (ppData != NULL || ptkCA != NULL)
897 {
898 // now get the record out.
899 if (ppData == 0)
900 ppData = &pData;
901 if (pcbData == 0)
902 pcbData = &cbData;
903
904
905 if (HENUMInternal::EnumNext(&hEnum, &ca))
906 {
907 IfFailGo(GetCustomAttributeRecord(RidFromToken(ca), &pRec));
908 IfFailGo(getValueOfCustomAttribute(pRec, reinterpret_cast<const BYTE **>(ppData), pcbData));
909 if (ptkCA)
910 *ptkCA = ca;
911 }
912 else
913 {
914 _ASSERTE(!"Enum returned no items after EnumInit returned S_OK");
915 hr = S_FALSE;
916 }
917 }
918ErrExit:
919 HENUMInternal::ClearEnum(&hEnum);
920 return hr;
921} // CMiniMdRW::CommonGetCustomAttributeByName
922
923//*****************************************************************************
924// unmark everything in this module
925//*****************************************************************************
926__checkReturn
927HRESULT
928CMiniMdRW::UnmarkAll()
929{
930 HRESULT hr = NOERROR;
931 ULONG ulSize = 0;
932 ULONG ixTbl;
933 FilterTable *pFilter;
934
935 // find the max rec count with all tables
936 for (ixTbl = 0; ixTbl < TBL_COUNT; ++ixTbl)
937 {
938 if (GetCountRecs(ixTbl) > ulSize)
939 ulSize = GetCountRecs(ixTbl);
940 }
941 IfNullGo(pFilter = GetFilterTable());
942 IfFailGo(pFilter->UnmarkAll(this, ulSize));
943
944ErrExit:
945 return hr;
946} // CMiniMdRW::UnmarkAll
947
948
949//*****************************************************************************
950// mark everything in this module
951//*****************************************************************************
952__checkReturn
953HRESULT
954CMiniMdRW::MarkAll()
955{
956 HRESULT hr = NOERROR;
957 ULONG ulSize = 0;
958 ULONG ixTbl;
959 FilterTable *pFilter;
960
961 // find the max rec count with all tables
962 for (ixTbl = 0; ixTbl < TBL_COUNT; ++ixTbl)
963 {
964 if (GetCountRecs(ixTbl) > ulSize)
965 ulSize = GetCountRecs(ixTbl);
966 }
967 IfNullGo(pFilter = GetFilterTable());
968 IfFailGo(pFilter->MarkAll(this, ulSize));
969
970ErrExit:
971 return hr;
972} // CMiniMdRW::MarkAll
973
974//*****************************************************************************
975// This will trigger FilterTable to be created
976//*****************************************************************************
977FilterTable *CMiniMdRW::GetFilterTable()
978{
979 if (m_pFilterTable == NULL)
980 {
981 m_pFilterTable = new (nothrow) FilterTable;
982 }
983 return m_pFilterTable;
984}
985
986
987//*****************************************************************************
988// Calculate the map between TypeRef and TypeDef
989//*****************************************************************************
990__checkReturn
991HRESULT
992CMiniMdRW::CalculateTypeRefToTypeDefMap()
993{
994 HRESULT hr = NOERROR;
995 ULONG index;
996 TypeRefRec *pTypeRefRec;
997 LPCSTR szName;
998 LPCSTR szNamespace;
999 mdToken td;
1000 mdToken tkResScope;
1001
1002 PREFIX_ASSUME(GetTypeRefToTypeDefMap() != NULL);
1003
1004 for (index = 1; index <= m_Schema.m_cRecs[TBL_TypeRef]; index++)
1005 {
1006 IfFailRet(GetTypeRefRecord(index, &pTypeRefRec));
1007
1008 // Get the name and namespace of the TypeRef.
1009 IfFailRet(getNameOfTypeRef(pTypeRefRec, &szName));
1010 IfFailRet(getNamespaceOfTypeRef(pTypeRefRec, &szNamespace));
1011 tkResScope = getResolutionScopeOfTypeRef(pTypeRefRec);
1012
1013 // If the resolutionScope is an AssemblyRef, then the type is
1014 // external, even if it has the same name as a type in this scope.
1015 if (TypeFromToken(tkResScope) == mdtAssemblyRef)
1016 continue;
1017
1018 // Iff the name is found in the typedef table, then use
1019 // that value instead. Won't be found if typeref is trully external.
1020 hr = ImportHelper::FindTypeDefByName(this, szNamespace, szName,
1021 (TypeFromToken(tkResScope) == mdtTypeRef) ? tkResScope : mdTokenNil,
1022 &td);
1023 if (hr != S_OK)
1024 {
1025 // don't propagate the error in the Find
1026 hr = NOERROR;
1027 continue;
1028 }
1029 *(GetTypeRefToTypeDefMap()->Get(index)) = td;
1030 }
1031
1032 return hr;
1033} // CMiniMdRW::CalculateTypeRefToTypeDefMap
1034
1035
1036//*****************************************************************************
1037// Set a remap handler.
1038//*****************************************************************************
1039__checkReturn
1040HRESULT
1041CMiniMdRW::SetHandler(
1042 IUnknown *pIUnk)
1043{
1044 if (m_pHandler != NULL)
1045 {
1046 m_pHandler->Release();
1047 m_pHandler = NULL;
1048 }
1049
1050 if (pIUnk != NULL)
1051 {
1052 // ignore the error for QI the IHostFilter
1053 pIUnk->QueryInterface(IID_IHostFilter, reinterpret_cast<void**>(&m_pHostFilter));
1054
1055 return pIUnk->QueryInterface(IID_IMapToken, reinterpret_cast<void**>(&m_pHandler));
1056 }
1057
1058 return S_OK;
1059} // CMiniMdRW::SetHandler
1060
1061//*****************************************************************************
1062// Set a Options
1063//*****************************************************************************
1064__checkReturn
1065HRESULT
1066CMiniMdRW::SetOption(
1067 OptionValue *pOptionValue)
1068{
1069 HRESULT hr = NOERROR;
1070 ULONG ixTbl = 0;
1071 int i;
1072
1073 m_OptionValue = *pOptionValue;
1074
1075 // Turn off delta metadata bit -- can't be used due to EE assumptions about delta PEs.
1076 // Inspect ApplyEditAndContinue for details.
1077 // To enable this, use the EnableDeltaMetadataGeneration/DisableDeltaMetadataGeneration accessors.
1078 _ASSERTE((m_OptionValue.m_UpdateMode & MDUpdateDelta) != MDUpdateDelta);
1079
1080#ifdef _DEBUG
1081 if ((m_OptionValue.m_UpdateMode & MDUpdateMask) == MDUpdateENC &&
1082 bENCDeltaOnly)
1083 m_OptionValue.m_UpdateMode |= MDUpdateDelta;
1084#endif
1085
1086 // if a scope is previously updated as incremental, then it should not be open again
1087 // with full update for read/write.
1088 //
1089 if ((m_Schema.m_heaps & CMiniMdSchema::HAS_DELETE) &&
1090 (m_OptionValue.m_UpdateMode & MDUpdateMask) == MDUpdateFull &&
1091 !m_fIsReadOnly)
1092 {
1093 IfFailGo( CLDB_E_BADUPDATEMODE );
1094 }
1095
1096 if ((m_OptionValue.m_UpdateMode & MDUpdateMask) == MDUpdateIncremental)
1097 m_Schema.m_heaps |= CMiniMdSchema::HAS_DELETE;
1098
1099 // Set the value of sortable based on the options.
1100 switch (m_OptionValue.m_UpdateMode & MDUpdateMask)
1101 {
1102 case MDUpdateFull:
1103 // Always sortable.
1104 for (ixTbl=0; ixTbl<TBL_COUNT; ++ixTbl)
1105 m_bSortable[ixTbl] = 1;
1106 break;
1107 case MDUpdateENC:
1108 // Never sortable.
1109 for (ixTbl=0; ixTbl<TBL_COUNT; ++ixTbl)
1110 m_bSortable[ixTbl] = 0;
1111
1112 // Truncate some tables.
1113 for (i=0; (ixTbl = m_TruncatedEncTables[i]) != (ULONG) -1; ++i)
1114 {
1115 m_Tables[ixTbl].Delete();
1116 IfFailGo(m_Tables[ixTbl].InitializeEmpty_WithRecordCount(
1117 m_TableDefs[ixTbl].m_cbRec,
1118 0
1119 COMMA_INDEBUG_MD(TRUE))); // fIsReadWrite
1120 INDEBUG_MD(m_Tables[ixTbl].Debug_SetTableInfo(NULL, ixTbl));
1121 m_Schema.m_cRecs[ixTbl] = 0;
1122 }
1123
1124 // Out-of-order is expected in an ENC scenario, never an error.
1125 m_OptionValue.m_ErrorIfEmitOutOfOrder = MDErrorOutOfOrderNone;
1126
1127 break;
1128 case MDUpdateIncremental:
1129 // Sortable if no external token.
1130 for (ixTbl=0; ixTbl<TBL_COUNT; ++ixTbl)
1131 m_bSortable[ixTbl] = (GetTokenForTable(ixTbl) == (ULONG) -1);
1132 break;
1133 case MDUpdateExtension:
1134 // Never sortable.
1135 for (ixTbl=0; ixTbl<TBL_COUNT; ++ixTbl)
1136 m_bSortable[ixTbl] = 0;
1137 break;
1138 default:
1139 _ASSERTE(!"Internal error -- unknown save mode");
1140 return E_INVALIDARG;
1141 }
1142
1143 // If this is an ENC session, track the generations.
1144 if (!m_fIsReadOnly && (m_OptionValue.m_UpdateMode & MDUpdateMask) == MDUpdateENC)
1145 {
1146#ifdef FEATURE_METADATA_EMIT
1147 ModuleRec *pMod;
1148 GUID encid;
1149
1150 // Get the module record.
1151 IfFailGo(GetModuleRecord(1, &pMod));
1152
1153/* Do we really want to do this? This would reset the metadata each time we changed an option
1154 // Copy EncId as BaseId.
1155 uVal = GetCol(TBL_Module, ModuleRec::COL_EncId, pMod);
1156 PutCol(TBL_Module, ModuleRec::COL_EncBaseId, pMod, uVal);
1157*/
1158 // Allocate a new GUID for EncId.
1159 IfFailGo(CoCreateGuid(&encid));
1160 IfFailGo(PutGuid(TBL_Module, ModuleRec::COL_EncId, pMod, encid));
1161#else //!FEATURE_METADATA_EMIT
1162 IfFailGo(E_INVALIDARG);
1163#endif //!FEATURE_METADATA_EMIT
1164 }
1165
1166ErrExit:
1167 return hr;
1168} // CMiniMdRW::SetOption
1169
1170//*****************************************************************************
1171// Get Options
1172//*****************************************************************************
1173__checkReturn
1174HRESULT
1175CMiniMdRW::GetOption(
1176 OptionValue *pOptionValue)
1177{
1178 *pOptionValue = m_OptionValue;
1179 return S_OK;
1180} // CMiniMdRW::GetOption
1181
1182//*****************************************************************************
1183// Smart MapToken. Only calls client if token really changed.
1184//*****************************************************************************
1185__checkReturn
1186HRESULT
1187CMiniMdRW::MapToken( // Return value from user callback.
1188 RID from, // Old rid.
1189 RID to, // New rid.
1190 mdToken tkn) // Token type.
1191{
1192 HRESULT hr = S_OK;
1193 TOKENREC *pTokenRec;
1194 MDTOKENMAP *pMovementMap;
1195 // If not change, done.
1196 if (from == to)
1197 return S_OK;
1198
1199 pMovementMap = GetTokenMovementMap();
1200 _ASSERTE(GetTokenMovementMap() != NULL);
1201 if (pMovementMap != NULL)
1202 IfFailRet(pMovementMap->AppendRecord( TokenFromRid(from, tkn), false, TokenFromRid(to, tkn), &pTokenRec ));
1203
1204 // Notify client.
1205 if (m_pHandler != NULL)
1206 {
1207 LOG((LOGMD, "CMiniMdRW::MapToken (remap): from 0x%08x to 0x%08x\n", TokenFromRid(from,tkn), TokenFromRid(to,tkn)));
1208 return m_pHandler->Map(TokenFromRid(from,tkn), TokenFromRid(to,tkn));
1209 }
1210 else
1211 {
1212 return hr;
1213 }
1214} // CMiniMdCreate::MapToken
1215
1216//*****************************************************************************
1217// Set max, lim, based on data.
1218//*****************************************************************************
1219void
1220CMiniMdRW::ComputeGrowLimits(
1221 int bSmall) // large or small tables?
1222{
1223 if (bSmall)
1224 {
1225 // Tables will need to grow if any value exceeds what a two-byte column can hold.
1226 m_maxRid = m_maxIx = 0;
1227 m_limIx = USHRT_MAX >> 1;
1228 m_limRid = USHRT_MAX >> AUTO_GROW_CODED_TOKEN_PADDING;
1229 m_eGrow = eg_ok;
1230 }
1231 else
1232 {
1233 // Tables are already large
1234 m_maxRid = m_maxIx = ULONG_MAX;
1235 m_limIx = USHRT_MAX << 1;
1236 m_limRid = USHRT_MAX << 1;
1237 m_eGrow = eg_grown;
1238 }
1239} // CMiniMdRW::ComputeGrowLimits
1240
1241//*****************************************************************************
1242// Initialization of a new writable MiniMd's pools.
1243//*****************************************************************************
1244__checkReturn
1245HRESULT
1246CMiniMdRW::InitPoolOnMem(
1247 int iPool, // The pool to initialize.
1248 void *pbData, // The data from which to init.
1249 ULONG cbData, // Size of data.
1250 int fIsReadOnly) // Is the memory read-only?
1251{
1252 HRESULT hr;
1253
1254 switch (iPool)
1255 {
1256 case MDPoolStrings:
1257 if (pbData == NULL)
1258 { // Creates new empty string heap with default empty string entry
1259 IfFailRet(m_StringHeap.InitializeEmpty(
1260 0
1261 COMMA_INDEBUG_MD(!fIsReadOnly)));
1262 }
1263 else
1264 {
1265 IfFailRet(m_StringHeap.Initialize(
1266 MetaData::DataBlob((BYTE *)pbData, cbData),
1267 !fIsReadOnly));
1268 }
1269 break;
1270 case MDPoolGuids:
1271 if (pbData == NULL)
1272 { // Creates new empty guid heap
1273 IfFailRet(m_GuidHeap.InitializeEmpty(
1274 0
1275 COMMA_INDEBUG_MD(!fIsReadOnly)));
1276 }
1277 else
1278 {
1279 IfFailRet(m_GuidHeap.Initialize(
1280 MetaData::DataBlob((BYTE *)pbData, cbData),
1281 !fIsReadOnly));
1282 }
1283 break;
1284 case MDPoolBlobs:
1285 if (pbData == NULL)
1286 {
1287 if (IsMinimalDelta())
1288 { // It's EnC minimal delta, don't include default empty blob
1289 IfFailRet(m_BlobHeap.InitializeEmpty_WithoutDefaultEmptyBlob(
1290 0
1291 COMMA_INDEBUG_MD(!fIsReadOnly)));
1292 }
1293 else
1294 { // Creates new empty blob heap with default empty blob entry
1295 IfFailRet(m_BlobHeap.InitializeEmpty(
1296 0
1297 COMMA_INDEBUG_MD(!fIsReadOnly)));
1298 }
1299 }
1300 else
1301 {
1302 IfFailRet(m_BlobHeap.Initialize(
1303 MetaData::DataBlob((BYTE *)pbData, cbData),
1304 !fIsReadOnly));
1305 }
1306 break;
1307 case MDPoolUSBlobs:
1308 if (pbData == NULL)
1309 {
1310 if (IsMinimalDelta())
1311 { // It's EnC minimal delta, don't include default empty user string
1312 IfFailRet(m_UserStringHeap.InitializeEmpty_WithoutDefaultEmptyBlob(
1313 0
1314 COMMA_INDEBUG_MD(!fIsReadOnly)));
1315 }
1316 else
1317 { // Creates new empty user string heap (with default empty !!!blob!!! entry)
1318 // Note: backaward compatiblity: doesn't add default empty user string, but default empty
1319 // blob entry
1320 IfFailRet(m_UserStringHeap.InitializeEmpty(
1321 0
1322 COMMA_INDEBUG_MD(!fIsReadOnly)));
1323 }
1324 }
1325 else
1326 {
1327 IfFailRet(m_UserStringHeap.Initialize(
1328 MetaData::DataBlob((BYTE *)pbData, cbData),
1329 !fIsReadOnly));
1330 }
1331 break;
1332 default:
1333 hr = E_INVALIDARG;
1334 }
1335 return hr;
1336} // CMiniMdRW::InitPoolOnMem
1337
1338//*****************************************************************************
1339// Initialization of a new writable MiniMd
1340//*****************************************************************************
1341__checkReturn
1342HRESULT
1343CMiniMdRW::InitOnMem(
1344 const void *pvBuf, // The data from which to init.
1345 ULONG ulBufLen, // The data size
1346 int fIsReadOnly) // Is the memory read-only?
1347{
1348 HRESULT hr = S_OK;
1349 UINT32 cbSchemaSize; // Size of the schema structure.
1350 S_UINT32 cbTotalSize; // Size of all data used.
1351 BYTE *pBuf = const_cast<BYTE*>(reinterpret_cast<const BYTE*>(pvBuf));
1352 int i;
1353
1354 // post contruction initialize the embeded lookuptable struct
1355 for (ULONG ixTbl = 0; ixTbl < m_TblCount; ++ixTbl)
1356 {
1357 if (m_TableDefs[ixTbl].m_iKey < m_TableDefs[ixTbl].m_cCols)
1358 {
1359 if (m_pVS[ixTbl] == NULL)
1360 {
1361 m_pVS[ixTbl] = new (nothrow) VirtualSort;
1362 IfNullGo(m_pVS[ixTbl]);
1363
1364 m_pVS[ixTbl]->Init(ixTbl, m_TableDefs[ixTbl].m_iKey, this);
1365 }
1366 }
1367 }
1368
1369 // Uncompress the schema from the buffer into our structures.
1370 IfFailGo(SchemaPopulate(pvBuf, ulBufLen, (ULONG *)&cbSchemaSize));
1371
1372 if (m_fMinimalDelta)
1373 IfFailGo(InitWithLargeTables());
1374
1375 // Initialize the pointers to the rest of the data.
1376 pBuf += cbSchemaSize;
1377 cbTotalSize = S_UINT32(cbSchemaSize);
1378 for (i=0; i<(int)m_TblCount; ++i)
1379 {
1380 if (m_Schema.m_cRecs[i] > 0)
1381 {
1382 // Size of one table is rowsize * rowcount.
1383 S_UINT32 cbTableSize =
1384 S_UINT32(m_TableDefs[i].m_cbRec) *
1385 S_UINT32(m_Schema.m_cRecs[i]);
1386 if (cbTableSize.IsOverflow())
1387 {
1388 Debug_ReportError("Table is too big, its size overflows.");
1389 IfFailGo(METADATA_E_INVALID_FORMAT);
1390 }
1391 cbTotalSize += cbTableSize;
1392 if (cbTotalSize.IsOverflow())
1393 {
1394 Debug_ReportError("Total tables size is too big, their total size overflows.");
1395 IfFailGo(METADATA_E_INVALID_FORMAT);
1396 }
1397 IfFailGo(m_Tables[i].Initialize(
1398 m_TableDefs[i].m_cbRec,
1399 MetaData::DataBlob(pBuf, cbTableSize.Value()),
1400 !fIsReadOnly)); // fCopyData
1401 INDEBUG_MD(m_Tables[i].Debug_SetTableInfo(NULL, i));
1402 pBuf += cbTableSize.Value();
1403 }
1404 else
1405 {
1406 IfFailGo(m_Tables[i].InitializeEmpty_WithRecordCount(
1407 m_TableDefs[i].m_cbRec,
1408 0
1409 COMMA_INDEBUG_MD(!fIsReadOnly)));
1410 INDEBUG_MD(m_Tables[i].Debug_SetTableInfo(NULL, i));
1411 }
1412 }
1413
1414 // If the metadata is being opened for read/write, all the updateable columns
1415 // need to be the same width.
1416 if (!fIsReadOnly)
1417 {
1418 // variable to indicate if tables are large, small or mixed.
1419 int fMixed = false;
1420 int iSize = 0;
1421 CMiniColDef *pCols; // The col defs to init.
1422 int iCol;
1423
1424 // Look at all the tables, or until mixed sizes are discovered.
1425 for (i=0; i<(int)m_TblCount && fMixed == false; i++)
1426 { // Look at all the columns of the table.
1427 pCols = m_TableDefs[i].m_pColDefs;
1428 for (iCol = 0; iCol < m_TableDefs[i].m_cCols && !fMixed; iCol++)
1429 { // If not a fixed size column...
1430 if (!IsFixedType(m_TableDefs[i].m_pColDefs[iCol].m_Type))
1431 { // If this is the first non-fixed size column...
1432 if (iSize == 0)
1433 { // remember it's size.
1434 iSize = m_TableDefs[i].m_pColDefs[iCol].m_cbColumn;
1435 }
1436 else
1437 { // Not first non-fixed size, so if a different size...
1438 if (iSize != m_TableDefs[i].m_pColDefs[iCol].m_cbColumn)
1439 { // ...the table has mixed column sizes.
1440 fMixed = true;
1441 }
1442 }
1443 }
1444 }
1445 }
1446 if (fMixed)
1447 {
1448 // grow everything to large
1449 IfFailGo(ExpandTables());
1450 ComputeGrowLimits(FALSE /* ! small*/);
1451 }
1452 else
1453 {
1454 if (iSize == 2)
1455 {
1456 // small schema
1457 ComputeGrowLimits(TRUE /* small */);
1458 }
1459 else
1460 {
1461 // large schema
1462 ComputeGrowLimits(FALSE /* ! small */);
1463 }
1464 }
1465 }
1466 else
1467 {
1468 // Set the limits so we will know when to grow the database.
1469 ComputeGrowLimits(TRUE /* small */);
1470 }
1471
1472 // Track records that this MD started with.
1473 m_StartupSchema = m_Schema;
1474
1475 m_fIsReadOnly = fIsReadOnly ? 1 : 0;
1476
1477ErrExit:
1478 return hr;
1479} // CMiniMdRW::InitOnMem
1480
1481//*****************************************************************************
1482// Validate cross-stream consistency.
1483//*****************************************************************************
1484__checkReturn
1485HRESULT
1486CMiniMdRW::PostInit(
1487 int iLevel)
1488{
1489 return S_OK;
1490} // CMiniMdRW::PostInit
1491
1492//*****************************************************************************
1493// Init a CMiniMdRW from the data in a CMiniMd [RO].
1494//*****************************************************************************
1495__checkReturn
1496HRESULT
1497CMiniMdRW::InitOnRO(
1498 CMiniMd *pMd, // The MiniMd to update from.
1499 int fIsReadOnly) // Will updates be allowed?
1500{
1501 HRESULT hr = S_OK;
1502 ULONG i; // Loop control.
1503
1504 // Init the schema.
1505 IfFailGo(SchemaPopulate(*pMd));
1506
1507 // Allocate VS structs for tables with key columns.
1508 for (ULONG ixTbl = 0; ixTbl < m_TblCount; ++ixTbl)
1509 {
1510 if (m_TableDefs[ixTbl].m_iKey < m_TableDefs[ixTbl].m_cCols)
1511 {
1512 m_pVS[ixTbl] = new (nothrow) VirtualSort;
1513 IfNullGo(m_pVS[ixTbl]);
1514
1515 m_pVS[ixTbl]->Init(ixTbl, m_TableDefs[ixTbl].m_iKey, this);
1516 }
1517 }
1518
1519 // Copy over the column definitions.
1520 for (i = 0; i < m_TblCount; ++i)
1521 {
1522 _ASSERTE(m_TableDefs[i].m_cCols == pMd->m_TableDefs[i].m_cCols);
1523 m_TableDefs[i].m_cbRec = pMd->m_TableDefs[i].m_cbRec;
1524 IfFailGo(SetNewColumnDefinition(&(m_TableDefs[i]), pMd->m_TableDefs[i].m_pColDefs, i));
1525 }
1526
1527 // Initialize string heap
1528 if (pMd->m_StringHeap.GetUnalignedSize() > 0)
1529 {
1530 IfFailGo(m_StringHeap.InitializeFromStringHeap(
1531 &(pMd->m_StringHeap),
1532 !fIsReadOnly));
1533 }
1534 else
1535 {
1536 IfFailGo(m_StringHeap.InitializeEmpty(
1537 0
1538 COMMA_INDEBUG_MD(!fIsReadOnly)));
1539 }
1540
1541 // Initialize user string heap
1542 if (pMd->m_UserStringHeap.GetUnalignedSize() > 0)
1543 {
1544 IfFailGo(m_UserStringHeap.InitializeFromBlobHeap(
1545 &(pMd->m_UserStringHeap),
1546 !fIsReadOnly));
1547 }
1548 else
1549 {
1550 IfFailGo(m_UserStringHeap.InitializeEmpty(
1551 0
1552 COMMA_INDEBUG_MD(!fIsReadOnly)));
1553 }
1554
1555 // Initialize guid heap
1556 if (pMd->m_GuidHeap.GetSize() > 0)
1557 {
1558 IfFailGo(m_GuidHeap.InitializeFromGuidHeap(
1559 &(pMd->m_GuidHeap),
1560 !fIsReadOnly));
1561 }
1562 else
1563 {
1564 IfFailGo(m_GuidHeap.InitializeEmpty(
1565 0
1566 COMMA_INDEBUG_MD(!fIsReadOnly)));
1567 }
1568
1569 // Initialize blob heap
1570 if (pMd->m_BlobHeap.GetUnalignedSize() > 0)
1571 {
1572 IfFailGo(m_BlobHeap.InitializeFromBlobHeap(
1573 &(pMd->m_BlobHeap),
1574 !fIsReadOnly));
1575 }
1576 else
1577 {
1578 IfFailGo(m_BlobHeap.InitializeEmpty(
1579 0
1580 COMMA_INDEBUG_MD(!fIsReadOnly)));
1581 }
1582
1583 // Init the record pools
1584 for (i = 0; i < m_TblCount; ++i)
1585 {
1586 if (m_Schema.m_cRecs[i] > 0)
1587 {
1588 IfFailGo(m_Tables[i].InitializeFromTable(
1589 &(pMd->m_Tables[i]),
1590 m_TableDefs[i].m_cbRec,
1591 m_Schema.m_cRecs[i],
1592 !fIsReadOnly)); // fCopyData
1593 INDEBUG_MD(m_Tables[i].Debug_SetTableInfo(NULL, i));
1594
1595 // We set this bit to indicate the compressed, read-only tables are always sorted
1596 // <TODO>This is not true for all tables, so we should set it correctly and flush out resulting bugs</TODO>
1597 SetSorted(i, true);
1598 }
1599 else
1600 {
1601 IfFailGo(m_Tables[i].InitializeEmpty_WithRecordCount(
1602 m_TableDefs[i].m_cbRec,
1603 2
1604 COMMA_INDEBUG_MD(!fIsReadOnly)));
1605 INDEBUG_MD(m_Tables[i].Debug_SetTableInfo(NULL, i));
1606 // An empty table can be considered unsorted.
1607 SetSorted(i, false);
1608 }
1609 }
1610
1611 // Set the limits so we will know when to grow the database.
1612 ComputeGrowLimits(TRUE /* small */);
1613
1614 // Track records that this MD started with.
1615 m_StartupSchema = m_Schema;
1616
1617 m_fIsReadOnly = fIsReadOnly ? 1 : 0;
1618
1619ErrExit:
1620 return hr;
1621} // CMiniMdRW::InitOnRO
1622
1623#ifdef FEATURE_METADATA_CUSTOM_DATA_SOURCE
1624
1625// This checks that column sizes are reasonable for their types
1626// The sizes could still be too small to hold all values in the range, or larger
1627// than they needed, but there must exist some scenario where this size is the
1628// one we would use.
1629// As long as this validation passes + we verify that the records actually
1630// have space for columns of this size then the worst thing that malicious
1631// data could do is be slightly inneficient, or be unable to address all their data
1632HRESULT _ValidateColumnSize(BYTE trustedColumnType, BYTE untrustedColumnSize)
1633{
1634 // Is the field a RID into a table?
1635 if (trustedColumnType <= iCodedTokenMax)
1636 {
1637 if (untrustedColumnSize != sizeof(USHORT) && untrustedColumnSize != sizeof(ULONG))
1638 return CLDB_E_FILE_CORRUPT;
1639 }
1640 else
1641 { // Fixed type.
1642 switch (trustedColumnType)
1643 {
1644 case iBYTE:
1645 if (untrustedColumnSize != 1)
1646 return CLDB_E_FILE_CORRUPT;
1647 break;
1648 case iSHORT:
1649 case iUSHORT:
1650 if (untrustedColumnSize != 2)
1651 return CLDB_E_FILE_CORRUPT;
1652 break;
1653 case iLONG:
1654 case iULONG:
1655 if (untrustedColumnSize != 4)
1656 return CLDB_E_FILE_CORRUPT;
1657 break;
1658 case iSTRING:
1659 case iGUID:
1660 case iBLOB:
1661 if (untrustedColumnSize != 2 && untrustedColumnSize != 4)
1662 return CLDB_E_FILE_CORRUPT;
1663 break;
1664 default:
1665 _ASSERTE(!"Unexpected schema type");
1666 return CLDB_E_FILE_CORRUPT;
1667 }
1668 }
1669 return S_OK;
1670}
1671
1672__checkReturn
1673HRESULT CMiniMdRW::InitOnCustomDataSource(IMDCustomDataSource* pDataSource)
1674{
1675 HRESULT hr = S_OK;
1676 ULONG i; // Loop control.
1677 ULONG key;
1678 BOOL fIsReadOnly = TRUE;
1679 MetaData::DataBlob stringPoolData;
1680 MetaData::DataBlob userStringPoolData;
1681 MetaData::DataBlob guidHeapData;
1682 MetaData::DataBlob blobHeapData;
1683 MetaData::DataBlob tableRecordData;
1684 CMiniTableDef tableDef;
1685 BOOL sortable = FALSE;
1686
1687
1688 // the data source owns all the memory backing the storage pools, so we need to ensure it stays alive
1689 // after this method returns. When the CMiniMdRW is destroyed the reference will be released.
1690 pDataSource->AddRef();
1691 m_pCustomDataSource = pDataSource;
1692
1693 // Copy over the schema.
1694 IfFailGo(pDataSource->GetSchema(&m_Schema));
1695
1696 // Is this the "native" version of the metadata for this runtime?
1697 if ((m_Schema.m_major != METAMODEL_MAJOR_VER) || (m_Schema.m_minor != METAMODEL_MINOR_VER))
1698 {
1699 // We don't support this version of the metadata
1700 Debug_ReportError("Unsupported version of MetaData.");
1701 return PostError(CLDB_E_FILE_OLDVER, m_Schema.m_major, m_Schema.m_minor);
1702 }
1703
1704 // How big are the various pool inidices?
1705 m_iStringsMask = (m_Schema.m_heaps & CMiniMdSchema::HEAP_STRING_4) ? 0xffffffff : 0xffff;
1706 m_iGuidsMask = (m_Schema.m_heaps & CMiniMdSchema::HEAP_GUID_4) ? 0xffffffff : 0xffff;
1707 m_iBlobsMask = (m_Schema.m_heaps & CMiniMdSchema::HEAP_BLOB_4) ? 0xffffffff : 0xffff;
1708
1709 // Copy over TableDefs, column definitions and allocate VS structs for tables with key columns.
1710 for (ULONG ixTbl = 0; ixTbl < m_TblCount; ++ixTbl)
1711 {
1712 IfFailGo(pDataSource->GetTableDef(ixTbl, &tableDef));
1713 const CMiniTableDef* pTemplate = GetTableDefTemplate(ixTbl);
1714
1715 // validate that the table def looks safe
1716 // we only allow some very limited differences between the standard template and the data source
1717 key = (pTemplate->m_iKey < pTemplate->m_cCols) ? pTemplate->m_iKey : 0xFF;
1718 if (key != tableDef.m_iKey) { IfFailGo(CLDB_E_FILE_CORRUPT); }
1719 if (pTemplate->m_cCols != tableDef.m_cCols) { IfFailGo(CLDB_E_FILE_CORRUPT); }
1720 ULONG cbRec = 0;
1721 for (ULONG i = 0; i < pTemplate->m_cCols; i++)
1722 {
1723 if (tableDef.m_pColDefs == NULL) { IfFailGo(CLDB_E_FILE_CORRUPT); }
1724 if (pTemplate->m_pColDefs[i].m_Type != tableDef.m_pColDefs[i].m_Type) { IfFailGo(CLDB_E_FILE_CORRUPT); }
1725 IfFailGo(_ValidateColumnSize(pTemplate->m_pColDefs[i].m_Type, tableDef.m_pColDefs[i].m_cbColumn));
1726 // sometimes, but not always, it seems like columns get alignment padding
1727 // we'll allow it if we see it
1728 if (cbRec > tableDef.m_pColDefs[i].m_oColumn) { IfFailGo(CLDB_E_FILE_CORRUPT); }
1729 if (tableDef.m_pColDefs[i].m_oColumn > AlignUp(cbRec, tableDef.m_pColDefs[i].m_cbColumn)) { IfFailGo(CLDB_E_FILE_CORRUPT); }
1730 cbRec = tableDef.m_pColDefs[i].m_oColumn + tableDef.m_pColDefs[i].m_cbColumn;
1731 }
1732 if (tableDef.m_cbRec != cbRec) { IfFailGo(CLDB_E_FILE_CORRUPT); }
1733
1734 // tabledef passed validation, copy it in
1735 m_TableDefs[ixTbl].m_iKey = tableDef.m_iKey;
1736 m_TableDefs[ixTbl].m_cCols = tableDef.m_cCols;
1737 m_TableDefs[ixTbl].m_cbRec = tableDef.m_cbRec;
1738 IfFailGo(SetNewColumnDefinition(&(m_TableDefs[ixTbl]), tableDef.m_pColDefs, ixTbl));
1739 if (m_TableDefs[ixTbl].m_iKey < m_TableDefs[ixTbl].m_cCols)
1740 {
1741 m_pVS[ixTbl] = new (nothrow)VirtualSort;
1742 IfNullGo(m_pVS[ixTbl]);
1743
1744 m_pVS[ixTbl]->Init(ixTbl, m_TableDefs[ixTbl].m_iKey, this);
1745 }
1746 }
1747
1748 // Initialize string heap
1749 IfFailGo(pDataSource->GetStringHeap(&stringPoolData));
1750 m_StringHeap.Initialize(stringPoolData, !fIsReadOnly);
1751
1752 // Initialize user string heap
1753 IfFailGo(pDataSource->GetUserStringHeap(&userStringPoolData));
1754 m_UserStringHeap.Initialize(userStringPoolData, !fIsReadOnly);
1755
1756 // Initialize guid heap
1757 IfFailGo(pDataSource->GetGuidHeap(&guidHeapData));
1758 m_GuidHeap.Initialize(guidHeapData, !fIsReadOnly);
1759
1760 // Initialize blob heap
1761 IfFailGo(pDataSource->GetBlobHeap(&blobHeapData));
1762 m_BlobHeap.Initialize(blobHeapData, !fIsReadOnly);
1763
1764 // Init the record pools
1765 for (i = 0; i < m_TblCount; ++i)
1766 {
1767 IfFailGo(pDataSource->GetTableRecords(i, &tableRecordData));
1768 // sanity check record counts and table sizes, this also ensures that cbRec*m_cRecs[x] doesn't overflow
1769 if (m_Schema.m_cRecs[i] > 1000000) { IfFailGo(CLDB_E_FILE_CORRUPT); }
1770 if (tableRecordData.GetSize() < m_TableDefs[i].m_cbRec * m_Schema.m_cRecs[i]) { IfFailGo(CLDB_E_FILE_CORRUPT); }
1771 m_Tables[i].Initialize(m_TableDefs[i].m_cbRec, tableRecordData, !fIsReadOnly);
1772
1773 IfFailGo(pDataSource->GetTableSortable(i, &sortable));
1774 m_bSortable[i] = sortable;
1775 }
1776
1777 // Set the limits so we will know when to grow the database.
1778 ComputeGrowLimits(TRUE /* small */);
1779
1780 // Track records that this MD started with.
1781 m_StartupSchema = m_Schema;
1782
1783 m_fIsReadOnly = fIsReadOnly;
1784
1785ErrExit:
1786 return hr;
1787}
1788#endif
1789
1790//*****************************************************************************
1791// Convert a read-only to read-write. Copies data.
1792//*****************************************************************************
1793__checkReturn
1794HRESULT
1795CMiniMdRW::ConvertToRW()
1796{
1797 HRESULT hr = S_OK;
1798 int i; // Loop control.
1799
1800 // Check for already done.
1801 if (!m_fIsReadOnly)
1802 return hr;
1803
1804 // If this is a minimal delta, then we won't allow it to be RW
1805 if (IsMinimalDelta())
1806 return CLDB_E_INCOMPATIBLE;
1807
1808 BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(return COR_E_STACKOVERFLOW);
1809
1810 IfFailGo(m_StringHeap.MakeWritable());
1811 IfFailGo(m_GuidHeap.MakeWritable());
1812 IfFailGo(m_UserStringHeap.MakeWritable());
1813 IfFailGo(m_BlobHeap.MakeWritable());
1814
1815 // Init the record pools
1816 for (i = 0; i < (int)m_TblCount; ++i)
1817 {
1818 IfFailGo(m_Tables[i].MakeWritable());
1819 }
1820
1821 // Grow the tables.
1822 IfFailGo(ExpandTables());
1823
1824 // Track records that this MD started with.
1825 m_StartupSchema = m_Schema;
1826
1827 // No longer read-only.
1828 m_fIsReadOnly = false;
1829
1830ErrExit:
1831 ;
1832 END_SO_INTOLERANT_CODE;
1833 return hr;
1834} // CMiniMdRW::ConvertToRW
1835
1836//*****************************************************************************
1837// Initialization of a new writable MiniMd
1838//*****************************************************************************
1839__checkReturn
1840HRESULT
1841CMiniMdRW::InitNew()
1842{
1843 HRESULT hr = S_OK;
1844 int i; // Loop control.
1845
1846 // Initialize the Schema.
1847 IfFailGo(m_Schema.InitNew(m_OptionValue.m_MetadataVersion));
1848
1849 // Allocate VS structs for tables with key columns.
1850 for (ULONG ixTbl=0; ixTbl<m_TblCount; ++ixTbl)
1851 {
1852 if (m_TableDefs[ixTbl].m_iKey < m_TableDefs[ixTbl].m_cCols)
1853 {
1854 m_pVS[ixTbl] = new (nothrow) VirtualSort;
1855 IfNullGo(m_pVS[ixTbl]);
1856
1857 m_pVS[ixTbl]->Init(ixTbl, m_TableDefs[ixTbl].m_iKey, this);
1858 }
1859 }
1860
1861 enum MetaDataSizeIndex sizeIndex;
1862 sizeIndex = GetMetaDataSizeIndex(&m_OptionValue);
1863 if ((sizeIndex == MDSizeIndex_Standard) || (sizeIndex == MDSizeIndex_Minimal))
1864 {
1865 // OutputDebugStringA("Default small tables enabled\n");
1866 // How big are the various pool inidices?
1867 m_Schema.m_heaps = 0;
1868 // How many rows in various tables?
1869 for (i = 0; i < (int)m_TblCount; ++i)
1870 {
1871 m_Schema.m_cRecs[i] = 0;
1872 }
1873
1874 // Compute how many bits required to hold.
1875 m_Schema.m_rid = 1;
1876 m_maxRid = m_maxIx = 0;
1877 m_limIx = USHRT_MAX >> 1;
1878 m_limRid = USHRT_MAX >> AUTO_GROW_CODED_TOKEN_PADDING;
1879 m_eGrow = eg_ok;
1880 }
1881
1882 // Now call base class function to calculate the offsets, sizes.
1883 IfFailGo(SchemaPopulate2(NULL));
1884
1885 // Initialize the record heaps.
1886 for (i = 0; i < (int)m_TblCount; ++i)
1887 { // Don't really have any records yet.
1888 m_Schema.m_cRecs[i] = 0;
1889 IfFailGo(m_Tables[i].InitializeEmpty_WithRecordCount(
1890 m_TableDefs[i].m_cbRec,
1891 g_TblSizeInfo[sizeIndex][i]
1892 COMMA_INDEBUG_MD(TRUE))); // fIsReadWrite
1893 INDEBUG_MD(m_Tables[i].Debug_SetTableInfo(NULL, i));
1894
1895 // Create tables as un-sorted. We hope to add all records, then sort just once.
1896 SetSorted(i, false);
1897 }
1898
1899 // Initialize heaps
1900 IfFailGo(m_StringHeap.InitializeEmpty_WithItemsCount(
1901 g_PoolSizeInfo[sizeIndex][IX_STRING_POOL][0],
1902 g_PoolSizeInfo[sizeIndex][IX_STRING_POOL][1]
1903 COMMA_INDEBUG_MD(TRUE))); // fIsReadWrite
1904 IfFailGo(m_BlobHeap.InitializeEmpty_WithItemsCount(
1905 g_PoolSizeInfo[sizeIndex][IX_BLOB_POOL][0],
1906 g_PoolSizeInfo[sizeIndex][IX_BLOB_POOL][1]
1907 COMMA_INDEBUG_MD(TRUE))); // fIsReadWrite
1908 IfFailGo(m_UserStringHeap.InitializeEmpty_WithItemsCount(
1909 g_PoolSizeInfo[sizeIndex][IX_US_BLOB_POOL][0],
1910 g_PoolSizeInfo[sizeIndex][IX_US_BLOB_POOL][1]
1911 COMMA_INDEBUG_MD(TRUE))); // fIsReadWrite
1912 IfFailGo(m_GuidHeap.InitializeEmpty_WithItemsCount(
1913 g_PoolSizeInfo[sizeIndex][IX_GUID_POOL][0],
1914 g_PoolSizeInfo[sizeIndex][IX_GUID_POOL][1]
1915 COMMA_INDEBUG_MD(TRUE))); // fIsReadWrite
1916
1917 // Track records that this MD started with.
1918 m_StartupSchema = m_Schema;
1919
1920 // New db is never read-only.
1921 m_fIsReadOnly = false;
1922
1923ErrExit:
1924 return hr;
1925} // CMiniMdRW::InitNew
1926
1927#ifdef FEATURE_PREJIT
1928//*****************************************************************************
1929// Helper function to determine the size of hot tables
1930//*****************************************************************************
1931static int ShiftCount(ULONG itemCount, ULONG hotItemCount)
1932{
1933 // figure out how many bits are needed to represent the highest rid
1934 ULONG highestRid = itemCount;
1935 int bitCount = 0;
1936 while ((1UL<<bitCount) <= highestRid)
1937 bitCount++;
1938 int shiftCount = bitCount > 8 ? bitCount - 8 : 0;
1939
1940 // tune the shift count so that we don't need to search more than 4 hot entries on average.
1941 while ((hotItemCount >> shiftCount) > 4)
1942 shiftCount++;
1943 if (shiftCount > 16)
1944 shiftCount = 16;
1945 return shiftCount;
1946} // ShiftCount
1947
1948//*****************************************************************************
1949// Helper function to qsort hot tokens
1950//*****************************************************************************
1951
1952typedef struct _TokenIndexPair
1953{
1954 mdToken token;
1955 WORD index;
1956} TokenIndexPair;
1957
1958static WORD shiftCount;
1959static int __cdecl TokenCmp(const void *a, const void *b)
1960{
1961 mdToken ta = ((const TokenIndexPair *)a)->token;
1962 mdToken tb = ((const TokenIndexPair *)b)->token;
1963 if (shiftCount > 0)
1964 {
1965 // shiftCount is the number of low order bits that are used to index
1966 // into the first level table. The below swaps high and low order bits so
1967 // the values with common low order bits end up together after the sort.
1968 ta = (ta >> shiftCount) | ((ta & ((1<<shiftCount)-1)) << (32-shiftCount));
1969 tb = (tb >> shiftCount) | ((tb & ((1<<shiftCount)-1)) << (32-shiftCount));
1970 }
1971 if (ta < tb)
1972 return -1;
1973 else if (ta > tb)
1974 return 1;
1975 else
1976 return 0;
1977}
1978
1979//*****************************************************************************
1980// A wrapper for metadata's use of CorProfileData::GetHotTokens that recognizes tokens
1981// flagged with ProfilingFlags_MetaDataSearch and reinterprets them into a corresponding
1982// set of ProfilingFlags_MetaData tokens.
1983//
1984// If you are reading this because you are changing the implementation of one of the searches
1985// in CMiniMdBase, it should be a mechanical process to copy the new search code below and
1986// change the row accesses into setting values in the rowFlags array.
1987//
1988// Doing so allows us to fix the problem that incremental IBC is fundamentally not suited to
1989// metadata searches.
1990//
1991// For instance, consider the following scenario:
1992// - You gather IBC data on a scenario that does a metadata binary search
1993// - The data comes from build X where the table is of size 100 and the target is in row 18
1994// - This means the intermediate values touched are rows 50, 25, and 12
1995// - You then apply this IBC data to build Y which has changed to include 20 new entries to start the table
1996// - Naively, incremental IBC would have remapped these tokens and predicted accesses at rows 70, 35, 32, and 38
1997// - But this is wrong! And very bad for working set. The search will actually touch 60, 30, and 45 on the way to 38
1998//
1999// The solution is to only store rows in IBC data that were touched intentionally, either as direct
2000// accesses (with ProfilingFlags_MetaData) or as the result of searches (ProfilingFlags_MetaDataSearch).
2001// We then expand these "search tokens" here into the set of accesses that would occur on the current
2002// table as we do our various types of metadata search for them.
2003//
2004// Currently, we infer touches for the following types of access:
2005// - Direct access (getRow)
2006// - Binary search (CMiniMdBase::vSearchTable or CMiniMdBase::vSearchTableNotGreater)
2007// - Bounds of a multi-element span (CMiniMdBase::SearchTableForMultipleRows)
2008//
2009// In theory, we could have different flags for each type of search (e.g. binary, multiple-row, etc) and
2010// avoid any over-reporting of intermediate tokens, but in practice the IBC flag bits are scarce and
2011// measurements show a minimal (<1%) amount of over-reporting.
2012//
2013//*****************************************************************************
2014
2015enum HotTokenFlags
2016{
2017 HotTokenFlags_Cold = 0x0,
2018 HotTokenFlags_ProfiledAccess = 0x1,
2019 HotTokenFlags_IntermediateInBinarySearch = 0x2,
2020 HotTokenFlags_BoundingMultipleRowSearch = 0x4
2021};
2022
2023__checkReturn
2024HRESULT
2025CMiniMdRW::GetHotMetadataTokensSearchAware(
2026 CorProfileData *pProfileData,
2027 ULONG ixTbl,
2028 ULONG *pResultCount,
2029 mdToken *tokenBuffer,
2030 ULONG maxCount)
2031{
2032 HRESULT hr = S_OK;
2033 ULONG resultCount = 0;
2034
2035 ULONG metadataAccessFlag = 1<<ProfilingFlags_MetaData;
2036 ULONG metadataSearchFlag = 1<<ProfilingFlags_MetaDataSearch;
2037
2038 // Query the profile data to determine the number of hot search tokens
2039 ULONG numSearchTokens = pProfileData->GetHotTokens(ixTbl, metadataSearchFlag, metadataSearchFlag, NULL, 0);
2040 ULONG cRecs = GetCountRecs(ixTbl);
2041
2042 if (numSearchTokens == 0 || cRecs == 0)
2043 {
2044 // If there are none, we can simply return the hot access tokens without doing any interesting work
2045 resultCount = pProfileData->GetHotTokens(ixTbl, metadataAccessFlag, metadataAccessFlag, tokenBuffer, maxCount);
2046 }
2047 else
2048 {
2049 // But if there are hot search tokens, we need to infer what intermediate rows will be touched by our various types of metadata searching.
2050 // To do so, retrieve all hot tokens and allocate temporary storage to use to mark which rows should be considered hot and for what reason
2051 // (i.e. an array of HotTokenFlags, one per entry in the table, indexed by RID).
2052 ULONG numAccessTokens = pProfileData->GetHotTokens(ixTbl, metadataAccessFlag, metadataAccessFlag, NULL, 0);
2053
2054 NewArrayHolder<mdToken> searchTokens = new (nothrow) mdToken[numSearchTokens];
2055 IfNullGo(searchTokens);
2056 NewArrayHolder<mdToken> accessTokens = new (nothrow) mdToken[numAccessTokens];
2057 IfNullGo(accessTokens);
2058 NewArrayHolder<BYTE> rowFlags = new (nothrow) BYTE[cRecs + 1];
2059 IfNullGo(rowFlags);
2060
2061 pProfileData->GetHotTokens(ixTbl, metadataSearchFlag, metadataSearchFlag, searchTokens, numSearchTokens);
2062 pProfileData->GetHotTokens(ixTbl, metadataAccessFlag, metadataAccessFlag, accessTokens, numAccessTokens);
2063
2064 // Initially, consider all rows cold
2065 memset(rowFlags, HotTokenFlags_Cold, cRecs + 1);
2066
2067 // Category 1: Rows may have been touched directly (getRow)
2068 // Simply mark the corresponding entry to each access token
2069 for (ULONG i = 0; i < numAccessTokens; ++i)
2070 {
2071 RID rid = RidFromToken(accessTokens[i]);
2072
2073 if (rid <= cRecs)
2074 {
2075 rowFlags[rid] |= HotTokenFlags_ProfiledAccess;
2076 }
2077 }
2078
2079 // Category 2: Rows may have been intermediate touches in a binary search (CMiniMdBase::vSearchTable or CMiniMdBase::vSearchTableNotGreater)
2080 // A search token may indicate where a binary search stopped, so for each of them compute and mark the intermediate set of rows that would have been touched
2081 for (ULONG i = 0; i < numSearchTokens; ++i)
2082 {
2083 RID rid = RidFromToken(searchTokens[i]);
2084
2085 ULONG lo = 1;
2086 ULONG hi = cRecs;
2087
2088 while (lo <= hi)
2089 {
2090 ULONG mid = (lo + hi) / 2;
2091
2092 if (mid <= cRecs)
2093 {
2094 rowFlags[mid] |= HotTokenFlags_IntermediateInBinarySearch;
2095 }
2096
2097 if (mid == rid)
2098 {
2099 break;
2100 }
2101
2102 if (mid < rid)
2103 lo = mid + 1;
2104 else
2105 hi = mid - 1;
2106 }
2107 }
2108
2109 // Category 3: Rows may have been touched to find the bounds of a multiple element span (CMiniMdBase::SearchTableForMultipleRows)
2110 // A search token will indicate where the search stopped, so mark the first row before and after each that was not itself touched
2111 for (ULONG i = 0; i < numSearchTokens; ++i)
2112 {
2113 RID rid = RidFromToken(searchTokens[i]);
2114
2115 for (RID r = rid - 1; r >= 1 && r <= cRecs; --r)
2116 {
2117 if ((rowFlags[r] & HotTokenFlags_ProfiledAccess) == 0)
2118 {
2119 rowFlags[r] |= HotTokenFlags_BoundingMultipleRowSearch;
2120 break;
2121 }
2122 }
2123
2124 for (RID r = rid + 1; r <= cRecs; ++r)
2125 {
2126 if ((rowFlags[r] & HotTokenFlags_ProfiledAccess) == 0)
2127 {
2128 rowFlags[r] |= HotTokenFlags_BoundingMultipleRowSearch;
2129 break;
2130 }
2131 }
2132 }
2133
2134 // Now walk back over our temporary storage, counting and possibly returning the computed hot tokens
2135 resultCount = 0;
2136 for (ULONG i = 1; i <= cRecs; ++i)
2137 {
2138 if (rowFlags[i] != HotTokenFlags_Cold)
2139 {
2140 if (tokenBuffer != NULL && resultCount < maxCount)
2141 tokenBuffer[resultCount] = TokenFromRid(i, ixTbl << 24);
2142 resultCount++;
2143 }
2144 }
2145 }
2146
2147 if (pResultCount)
2148 *pResultCount = resultCount;
2149
2150 ErrExit:
2151 return hr;
2152} // CMiniMdRW::GetHotMetadataTokensSearchAware
2153
2154
2155#endif //FEATURE_PREJIT
2156
2157//*****************************************************************************
2158// Determine how big the tables would be when saved.
2159//*****************************************************************************
2160__checkReturn
2161HRESULT
2162CMiniMdRW::GetFullSaveSize(
2163 CorSaveSize fSave, // [IN] cssAccurate or cssQuick.
2164 UINT32 *pcbSaveSize, // [OUT] Put the size here.
2165 DWORD *pbSaveCompressed, // [OUT] Will the saved data be fully compressed?
2166 MetaDataReorderingOptions reorderingOptions, // [IN] Metadata reordering options
2167 CorProfileData *pProfileData) // [IN] Optional IBC profile data for working set optimization
2168{
2169 HRESULT hr = S_OK;
2170 CMiniTableDef sTempTable; // Definition for a temporary table.
2171 CQuickArray<CMiniColDef> rTempCols; // Definition for a temp table's columns.
2172 BYTE SchemaBuf[sizeof(CMiniMdSchema)]; //Buffer for compressed schema.
2173 ULONG cbAlign; // Bytes needed for alignment.
2174 UINT32 cbTable; // Bytes in a table.
2175 UINT32 cbTotal; // Bytes written.
2176 int i; // Loop control.
2177
2178 _ASSERTE(m_bPreSaveDone);
2179#ifndef FEATURE_PREJIT
2180 _ASSERTE(pProfileData == NULL);
2181#endif //!FEATURE_PREJIT
2182
2183 // Determine if the stream is "fully compressed", ie no pointer tables.
2184 *pbSaveCompressed = true;
2185 for (i=0; i<(int)m_TblCount; ++i)
2186 {
2187 if (HasIndirectTable(i))
2188 {
2189 *pbSaveCompressed = false;
2190 break;
2191 }
2192 }
2193
2194 // Build the header.
2195 CMiniMdSchema Schema = m_Schema;
2196 IfFailGo(m_StringHeap.GetAlignedSize(&cbTable));
2197 if (cbTable > USHRT_MAX)
2198 {
2199 Schema.m_heaps |= CMiniMdSchema::HEAP_STRING_4;
2200 }
2201 else
2202 {
2203 Schema.m_heaps &= ~CMiniMdSchema::HEAP_STRING_4;
2204 }
2205
2206 IfFailGo(m_BlobHeap.GetAlignedSize(&cbTable));
2207 if (cbTable > USHRT_MAX)
2208 {
2209 Schema.m_heaps |= CMiniMdSchema::HEAP_BLOB_4;
2210 }
2211 else
2212 {
2213 Schema.m_heaps &= ~CMiniMdSchema::HEAP_BLOB_4;
2214 }
2215
2216 if (m_GuidHeap.GetSize() > USHRT_MAX)
2217 {
2218 Schema.m_heaps |= CMiniMdSchema::HEAP_GUID_4;
2219 }
2220 else
2221 {
2222 Schema.m_heaps &= ~CMiniMdSchema::HEAP_GUID_4;
2223 }
2224
2225 cbTotal = 0;
2226 // schema isn't saved for the hot metadata
2227 if (pProfileData == NULL)
2228 {
2229 cbTotal = Schema.SaveTo(SchemaBuf);
2230 if ( (cbAlign = Align4(cbTotal) - cbTotal) != 0)
2231 cbTotal += cbAlign;
2232 }
2233
2234 // For each table...
2235 ULONG ixTbl;
2236 for (ixTbl=0; ixTbl<m_TblCount; ++ixTbl)
2237 {
2238 if (GetCountRecs(ixTbl))
2239 {
2240 // Determine how big the compressed table will be.
2241
2242 // Allocate a def for the temporary table.
2243 sTempTable = m_TableDefs[ixTbl];
2244 if (m_eGrow == eg_grown)
2245 {
2246 IfFailGo(rTempCols.ReSizeNoThrow(sTempTable.m_cCols));
2247 sTempTable.m_pColDefs = rTempCols.Ptr();
2248
2249 // Initialize temp table col defs based on actual counts of data in the
2250 // real tables.
2251 IfFailGo(InitColsForTable(Schema, ixTbl, &sTempTable, 1, FALSE));
2252 }
2253
2254 cbTable = sTempTable.m_cbRec * GetCountRecs(ixTbl);
2255
2256#ifdef FEATURE_PREJIT
2257 if (pProfileData != NULL)
2258 {
2259 ULONG itemCount = GetCountRecs(ixTbl);
2260
2261 // determine number of rows touched in this table as indicated by IBC profile data
2262 ULONG hotItemCount = 0;
2263 IfFailGo(GetHotMetadataTokensSearchAware(pProfileData, ixTbl, &hotItemCount, NULL, 0));
2264
2265 // assume ManifestResource table is touched completely if touched at all or any hot metadata at all so far
2266 // this is because it's searched linearly, and IBC data misses an unsuccessful search
2267 // after module load
2268 if (ixTbl == TBL_ManifestResource && (hotItemCount > 0 || cbTotal != 0))
2269 hotItemCount = itemCount;
2270
2271 // if the hot subset of the rows along with their side lookup tables will occupy more space
2272 // than the full table, keep the full table to both save space and access time.
2273 if (hotItemCount <= USHRT_MAX && itemCount <= USHRT_MAX && m_TableDefs[ixTbl].m_cbRec <= SHRT_MAX)
2274 {
2275 ULONG estimatedSizeUsingSubsetCopy = hotItemCount * (sizeof(WORD) + sizeof(BYTE) + m_TableDefs[ixTbl].m_cbRec);
2276 ULONG estimatedSizeUsingFullCopy = itemCount * m_TableDefs[ixTbl].m_cbRec;
2277
2278 if (estimatedSizeUsingSubsetCopy > estimatedSizeUsingFullCopy)
2279 hotItemCount = itemCount;
2280 }
2281
2282 // first level table is array of WORD, so we can't handle more than 2**16 hot items
2283 if (hotItemCount > USHRT_MAX)
2284 hotItemCount = 0;
2285
2286 cbTable = 0;
2287 if (hotItemCount > 0)
2288 {
2289 cbTotal = Align4(cbTotal);
2290 cbTable = 5*sizeof(DWORD) + sizeof(WORD); // header: count, 4 offsets, shift count
2291 shiftCount = ShiftCount(itemCount, hotItemCount);
2292 if (hotItemCount < itemCount)
2293 {
2294 cbTable += ((1<<shiftCount) + 1) * sizeof(WORD); // 1st level table
2295 cbTable += hotItemCount*sizeof(BYTE); // 2nd level table
2296 cbTable += hotItemCount*sizeof(WORD); // Index mapping table
2297 }
2298 cbTable = Align4(cbTable); // align hot metadata on 4-byte boundary
2299 cbTable += sTempTable.m_cbRec * hotItemCount; // size of hot metadata
2300
2301 LOG((LOGMD, "CMiniMdRW::GetFullSaveSize: table %2d %5d items %3d hot items %2d shift count %4d total size\n", ixTbl, itemCount, hotItemCount, shiftCount, cbTable));
2302 }
2303 else
2304 LOG((LOGMD, "CMiniMdRW::GetFullSaveSize: table %2d %5d items\n", ixTbl, itemCount));
2305 }
2306#endif //FEATURE_PREJIT
2307 cbTotal += cbTable;
2308 }
2309 }
2310
2311 // Pad with at least 2 bytes and align on 4 bytes.
2312 cbAlign = Align4(cbTotal) - cbTotal;
2313 if (cbAlign < 2)
2314 cbAlign += 4;
2315 cbTotal += cbAlign;
2316
2317 if (pProfileData != NULL)
2318 {
2319#ifdef FEATURE_PREJIT
2320 UINT32 cbHotHeapsSize = 0;
2321
2322 IfFailGo(GetHotPoolsSaveSize(&cbHotHeapsSize, reorderingOptions, pProfileData));
2323 cbTotal += cbHotHeapsSize;
2324
2325 if (cbTotal <= 4)
2326 cbTotal = 0;
2327 else
2328 cbTotal += sizeof(UINT32) + m_TblCount*sizeof(UINT32)
2329 + 2 * sizeof(UINT32); // plus the size of hot metadata header
2330#endif //FEATURE_PREJIT
2331 }
2332 else
2333 {
2334 m_cbSaveSize = cbTotal;
2335 }
2336
2337 LOG((LOGMD, "CMiniMdRW::GetFullSaveSize: Total %ssize = %d\n", pProfileData ? "hot " : "", cbTotal));
2338
2339 *pcbSaveSize = cbTotal;
2340
2341ErrExit:
2342 return hr;
2343} // CMiniMdRW::GetFullSaveSize
2344
2345//*****************************************************************************
2346// GetSaveSize for saving just the delta (ENC) data.
2347//*****************************************************************************
2348__checkReturn
2349HRESULT
2350CMiniMdRW::GetENCSaveSize( // S_OK or error.
2351 UINT32 *pcbSaveSize) // [OUT] Put the size here.
2352{
2353 HRESULT hr = S_OK;
2354 BYTE SchemaBuf[sizeof(CMiniMdSchema)]; //Buffer for compressed schema.
2355 ULONG cbAlign; // Bytes needed for alignment.
2356 UINT32 cbTable; // Bytes in a table.
2357 UINT32 cbTotal; // Bytes written.
2358 ULONG ixTbl; // Loop control.
2359
2360 // If not saving deltas, defer to full GetSaveSize.
2361 if ((m_OptionValue.m_UpdateMode & MDUpdateDelta) != MDUpdateDelta)
2362 {
2363 DWORD bCompressed;
2364 return GetFullSaveSize(cssAccurate, pcbSaveSize, &bCompressed);
2365 }
2366
2367 // Make sure the minimal deltas have expanded tables
2368 IfFailRet(ExpandTables());
2369
2370 // Build the header.
2371 CMiniMdSchema Schema = m_Schema;
2372
2373 if (m_rENCRecs != NULL)
2374 {
2375 for (ixTbl=0; ixTbl<m_TblCount; ++ixTbl)
2376 Schema.m_cRecs[ixTbl] = m_rENCRecs[ixTbl].Count();
2377 }
2378 else
2379 {
2380 for (ixTbl=0; ixTbl<m_TblCount; ++ixTbl)
2381 Schema.m_cRecs[ixTbl] = 0;
2382 }
2383
2384 Schema.m_cRecs[TBL_Module] = m_Schema.m_cRecs[TBL_Module];
2385 Schema.m_cRecs[TBL_ENCLog] = m_Schema.m_cRecs[TBL_ENCLog];
2386 Schema.m_cRecs[TBL_ENCMap] = m_Schema.m_cRecs[TBL_ENCMap];
2387
2388 cbTotal = Schema.SaveTo(SchemaBuf);
2389 if ( (cbAlign = Align4(cbTotal) - cbTotal) != 0)
2390 cbTotal += cbAlign;
2391
2392 // Accumulate size of each table...
2393 for (ixTbl=0; ixTbl<m_TblCount; ++ixTbl)
2394 { // ENC tables are special.
2395 if (ixTbl == TBL_ENCLog || ixTbl == TBL_ENCMap || ixTbl == TBL_Module)
2396 cbTable = m_Schema.m_cRecs[ixTbl] * m_TableDefs[ixTbl].m_cbRec;
2397 else
2398 cbTable = Schema.m_cRecs[ixTbl] * m_TableDefs[ixTbl].m_cbRec;
2399 cbTotal += cbTable;
2400 }
2401
2402 // Pad with at least 2 bytes and align on 4 bytes.
2403 cbAlign = Align4(cbTotal) - cbTotal;
2404 if (cbAlign < 2)
2405 cbAlign += 4;
2406 cbTotal += cbAlign;
2407
2408 *pcbSaveSize = cbTotal;
2409 m_cbSaveSize = cbTotal;
2410
2411//ErrExit:
2412 return hr;
2413} // CMiniMdRW::GetENCSaveSize
2414
2415
2416#ifdef FEATURE_PREJIT
2417
2418// Determine the size of the hot blob data
2419//
2420__checkReturn
2421HRESULT
2422CMiniMdRW::GetHotPoolsSaveSize(
2423 UINT32 *pcbSize,
2424 MetaDataReorderingOptions reorderingOptions,
2425 CorProfileData *pProfileData)
2426{
2427 HRESULT hr = S_OK;
2428 UINT32 cbSavedDirSize = 0;
2429 UINT32 cbSavedHeapsSize = 0;
2430
2431 StreamUtil::NullStream stream;
2432 IfFailGo(SaveHotPoolsToStream(
2433 &stream,
2434 reorderingOptions,
2435 pProfileData,
2436 &cbSavedDirSize,
2437 &cbSavedHeapsSize));
2438 *pcbSize = cbSavedDirSize + cbSavedHeapsSize;
2439
2440ErrExit:
2441 return hr;
2442} // CMiniMdRW::GetHotPoolsSaveSize
2443
2444#endif //FEATURE_PREJIT
2445
2446
2447//*****************************************************************************
2448// Determine how big the tables would be when saved.
2449//*****************************************************************************
2450__checkReturn
2451HRESULT
2452CMiniMdRW::GetSaveSize(
2453 CorSaveSize fSave, // [IN] cssAccurate or cssQuick.
2454 UINT32 *pcbSaveSize, // [OUT] Put the size here.
2455 DWORD *pbSaveCompressed, // [OUT] Will the saved data be fully compressed?
2456 MetaDataReorderingOptions reorderingOptions, // [IN] Optional metadata reordering options
2457 CorProfileData *pProfileData) // [IN] Optional IBC profile data for working set optimization
2458{
2459 HRESULT hr;
2460
2461 // Prepare the data for save.
2462 IfFailGo(PreSave());
2463
2464 switch (m_OptionValue.m_UpdateMode & MDUpdateMask)
2465 {
2466 case MDUpdateFull:
2467 hr = GetFullSaveSize(fSave, pcbSaveSize, pbSaveCompressed, reorderingOptions, pProfileData);
2468 break;
2469 case MDUpdateIncremental:
2470 case MDUpdateExtension:
2471 case MDUpdateENC:
2472 hr = GetFullSaveSize(fSave, pcbSaveSize, pbSaveCompressed, NoReordering, pProfileData);
2473 // never save compressed if it is incremental compilation.
2474 *pbSaveCompressed = false;
2475 break;
2476 case MDUpdateDelta:
2477 *pbSaveCompressed = false;
2478 hr = GetENCSaveSize(pcbSaveSize);
2479 break;
2480 default:
2481 _ASSERTE(!"Internal error -- unknown save mode");
2482 return E_INVALIDARG;
2483 }
2484
2485ErrExit:
2486 return hr;
2487} // CMiniMdRW::GetSaveSize
2488
2489//*****************************************************************************
2490// Determine how big a pool would be when saved full size.
2491//*****************************************************************************
2492__checkReturn
2493HRESULT
2494CMiniMdRW::GetFullPoolSaveSize( // S_OK or error.
2495 int iPool, // The pool of interest.
2496 UINT32 *pcbSaveSize) // [OUT] Put the size here.
2497{
2498 HRESULT hr;
2499
2500 switch (iPool)
2501 {
2502 case MDPoolStrings:
2503 hr = m_StringHeap.GetAlignedSize(pcbSaveSize);
2504 break;
2505 case MDPoolGuids:
2506 *pcbSaveSize = m_GuidHeap.GetSize();
2507 hr = S_OK;
2508 break;
2509 case MDPoolBlobs:
2510 hr = m_BlobHeap.GetAlignedSize(pcbSaveSize);
2511 break;
2512 case MDPoolUSBlobs:
2513 hr = m_UserStringHeap.GetAlignedSize(pcbSaveSize);
2514 break;
2515 default:
2516 hr = E_INVALIDARG;
2517 }
2518
2519 return hr;
2520} // CMiniMdRW::GetFullPoolSaveSize
2521
2522//*****************************************************************************
2523// Determine how big a pool would be when saved ENC size.
2524//*****************************************************************************
2525__checkReturn
2526HRESULT
2527CMiniMdRW::GetENCPoolSaveSize(
2528 int iPool, // The pool of interest.
2529 UINT32 *pcbSaveSize) // [OUT] Put the size here.
2530{
2531 HRESULT hr;
2532
2533 switch (iPool)
2534 {
2535 case MDPoolStrings:
2536 IfFailRet(m_StringHeap.GetEnCSessionAddedHeapSize_Aligned(pcbSaveSize));
2537 hr = S_OK;
2538 break;
2539 case MDPoolGuids:
2540 // We never save delta guid heap, we save full guid heap everytime
2541 *pcbSaveSize = m_GuidHeap.GetSize();
2542 hr = S_OK;
2543 break;
2544 case MDPoolBlobs:
2545 IfFailRet(m_BlobHeap.GetEnCSessionAddedHeapSize_Aligned(pcbSaveSize));
2546 hr = S_OK;
2547 break;
2548 case MDPoolUSBlobs:
2549 IfFailRet(m_UserStringHeap.GetEnCSessionAddedHeapSize_Aligned(pcbSaveSize));
2550 hr = S_OK;
2551 break;
2552 default:
2553 hr = E_INVALIDARG;
2554 }
2555
2556 return hr;
2557} // CMiniMdRW::GetENCPoolSaveSize
2558
2559//*****************************************************************************
2560// Determine how big a pool would be when saved.
2561//*****************************************************************************
2562__checkReturn
2563HRESULT
2564CMiniMdRW::GetPoolSaveSize(
2565 int iPool, // The pool of interest.
2566 UINT32 *pcbSaveSize) // [OUT] Put the size here.
2567{
2568 HRESULT hr;
2569
2570 switch (m_OptionValue.m_UpdateMode & MDUpdateMask)
2571 {
2572 case MDUpdateFull:
2573 case MDUpdateIncremental:
2574 case MDUpdateExtension:
2575 case MDUpdateENC:
2576 hr = GetFullPoolSaveSize(iPool, pcbSaveSize);
2577 break;
2578 case MDUpdateDelta:
2579 hr = GetENCPoolSaveSize(iPool, pcbSaveSize);
2580 break;
2581 default:
2582 _ASSERTE(!"Internal error -- unknown save mode");
2583 return E_INVALIDARG;
2584 }
2585
2586 return hr;
2587} // CMiniMdRW::GetPoolSaveSize
2588
2589//*****************************************************************************
2590// Is the given pool empty?
2591//*****************************************************************************
2592int CMiniMdRW::IsPoolEmpty( // True or false.
2593 int iPool) // The pool of interest.
2594{
2595 switch (iPool)
2596 {
2597 case MDPoolStrings:
2598 return m_StringHeap.IsEmpty();
2599 case MDPoolGuids:
2600 return m_GuidHeap.IsEmpty();
2601 case MDPoolBlobs:
2602 return m_BlobHeap.IsEmpty();
2603 case MDPoolUSBlobs:
2604 return m_UserStringHeap.IsEmpty();
2605 }
2606 return true;
2607} // CMiniMdRW::IsPoolEmpty
2608
2609// --------------------------------------------------------------------------------------
2610//
2611// Gets user string (*Data) at index (nIndex) and fills the index (*pnNextIndex) of the next user string
2612// in the heap.
2613// Returns S_OK and fills the string (*pData) and the next index (*pnNextIndex).
2614// Returns S_FALSE if the index (nIndex) is not valid user string index.
2615// Returns error code otherwise.
2616// Clears *pData and sets *pnNextIndex to 0 on error or S_FALSE.
2617//
2618__checkReturn
2619HRESULT
2620CMiniMdRW::GetUserStringAndNextIndex(
2621 UINT32 nIndex,
2622 MetaData::DataBlob *pData,
2623 UINT32 *pnNextIndex)
2624{
2625 HRESULT hr = S_OK;
2626 MINIMD_POSSIBLE_INTERNAL_POINTER_EXPOSED();
2627
2628 // First check that the index is valid to avoid debug error reporting
2629 // If this turns out to be slow, then we can add a new API to BlobHeap "GetBlobWithSizePrefix_DontFail"
2630 // to merge this check with following GetBlobWithSizePrefix call
2631 if (!m_UserStringHeap.IsValidIndex(nIndex))
2632 {
2633 return S_FALSE;
2634 }
2635
2636 // Get user string at index nIndex (verifies that the user string is in the heap)
2637 IfFailGo(m_UserStringHeap.GetBlobWithSizePrefix(
2638 nIndex,
2639 pData));
2640 _ASSERTE(hr == S_OK);
2641
2642 // Get index behind the user string - doesn't overflow, because the user string is in the heap
2643 *pnNextIndex = nIndex + pData->GetSize();
2644
2645 UINT32 cbUserStringSize_Ignore;
2646 if (!pData->GetCompressedU(&cbUserStringSize_Ignore))
2647 {
2648 Debug_ReportInternalError("There's a bug, because previous call to GetBlobWithSizePrefix succeeded.");
2649 IfFailGo(METADATA_E_INTERNAL_ERROR);
2650 }
2651 return S_OK;
2652
2653ErrExit:
2654 // Fill output parameters on error
2655 *pnNextIndex = 0;
2656 pData->Clear();
2657
2658 return hr;
2659} // CMiniMdRW::GetUserStringAndNextIndex
2660
2661//*****************************************************************************
2662// Initialized TokenRemapManager
2663//*****************************************************************************
2664__checkReturn
2665HRESULT
2666CMiniMdRW::InitTokenRemapManager()
2667{
2668 HRESULT hr = NOERROR;
2669
2670 if (m_pTokenRemapManager == NULL)
2671 {
2672 // allocate TokenRemapManager
2673 m_pTokenRemapManager = new (nothrow) TokenRemapManager;
2674 IfNullGo(m_pTokenRemapManager);
2675 }
2676
2677 // initialize the ref to def optimization map
2678 IfFailGo( m_pTokenRemapManager->ClearAndEnsureCapacity(m_Schema.m_cRecs[TBL_TypeRef], m_Schema.m_cRecs[TBL_MemberRef]));
2679
2680ErrExit:
2681 return hr;
2682} // CMiniMdRW::InitTokenRemapManager
2683
2684//*****************************************************************************
2685// Debug code to check whether a table's objects can have custom attributes
2686// attached.
2687//*****************************************************************************
2688#ifdef _DEBUG
2689bool CMiniMdRW::CanHaveCustomAttribute( // Can a given table have a custom attribute token?
2690 ULONG ixTbl) // Table in question.
2691{
2692 mdToken tk = GetTokenForTable(ixTbl);
2693 size_t ix;
2694 for (ix=0; ix<g_CodedTokens[CDTKN_HasCustomAttribute].m_cTokens; ++ix)
2695 if (g_CodedTokens[CDTKN_HasCustomAttribute].m_pTokens[ix] == tk)
2696 return true;
2697 return false;
2698} // CMiniMdRW::CanHaveCustomAttribute
2699#endif //_DEBUG
2700
2701#ifdef _PREFAST_
2702#pragma warning(push)
2703#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
2704#endif
2705//---------------------------------------------------------------------------------------
2706//
2707// Perform any available pre-save optimizations.
2708//
2709__checkReturn
2710HRESULT
2711CMiniMdRW::PreSaveFull()
2712{
2713 HRESULT hr = S_OK;
2714 RID ridPtr; // A RID from a pointer table.
2715
2716 if (m_bPreSaveDone)
2717 return hr;
2718
2719 // Don't yet know what the save size will be.
2720 m_cbSaveSize = 0;
2721 m_bSaveCompressed = false;
2722
2723 // Convert any END_OF_TABLE values for tables with child pointer tables.
2724 IfFailGo(ConvertMarkerToEndOfTable(
2725 TBL_TypeDef,
2726 TypeDefRec::COL_MethodList,
2727 m_Schema.m_cRecs[TBL_Method] + 1,
2728 m_Schema.m_cRecs[TBL_TypeDef]));
2729 IfFailGo(ConvertMarkerToEndOfTable(
2730 TBL_TypeDef,
2731 TypeDefRec::COL_FieldList,
2732 m_Schema.m_cRecs[TBL_Field] + 1,
2733 m_Schema.m_cRecs[TBL_TypeDef]));
2734 IfFailGo(ConvertMarkerToEndOfTable(
2735 TBL_Method,
2736 MethodRec::COL_ParamList,
2737 m_Schema.m_cRecs[TBL_Param]+1,
2738 m_Schema.m_cRecs[TBL_Method]));
2739 IfFailGo(ConvertMarkerToEndOfTable(
2740 TBL_PropertyMap,
2741 PropertyMapRec::COL_PropertyList,
2742 m_Schema.m_cRecs[TBL_Property] + 1,
2743 m_Schema.m_cRecs[TBL_PropertyMap]));
2744 IfFailGo(ConvertMarkerToEndOfTable(
2745 TBL_EventMap,
2746 EventMapRec::COL_EventList,
2747 m_Schema.m_cRecs[TBL_Event] + 1,
2748 m_Schema.m_cRecs[TBL_EventMap]));
2749
2750 // If there is a handler and in "Full" mode, eliminate the intermediate tables.
2751 if ((m_pHandler != NULL) && ((m_OptionValue.m_UpdateMode &MDUpdateMask) == MDUpdateFull))
2752 {
2753 // If there is a handler, and not in E&C, save as fully compressed.
2754 m_bSaveCompressed = true;
2755
2756 // Temporary tables for new Fields, Methods, Params and FieldLayouts.
2757 MetaData::TableRW newFields;
2758 IfFailGo(newFields.InitializeEmpty_WithRecordCount(
2759 m_TableDefs[TBL_Field].m_cbRec,
2760 m_Schema.m_cRecs[TBL_Field]
2761 COMMA_INDEBUG_MD(TRUE)));
2762 INDEBUG_MD(newFields.Debug_SetTableInfo("TBL_Field", TBL_Field));
2763
2764 MetaData::TableRW newMethods;
2765 IfFailGo(newMethods.InitializeEmpty_WithRecordCount(
2766 m_TableDefs[TBL_Method].m_cbRec,
2767 m_Schema.m_cRecs[TBL_Method]
2768 COMMA_INDEBUG_MD(TRUE)));
2769 INDEBUG_MD(newMethods.Debug_SetTableInfo("TBL_Method", TBL_Method));
2770
2771 MetaData::TableRW newParams;
2772 IfFailGo(newParams.InitializeEmpty_WithRecordCount(
2773 m_TableDefs[TBL_Param].m_cbRec,
2774 m_Schema.m_cRecs[TBL_Param]
2775 COMMA_INDEBUG_MD(TRUE)));
2776 INDEBUG_MD(newParams.Debug_SetTableInfo("TBL_Param", TBL_Param));
2777
2778 MetaData::TableRW newEvents;
2779 IfFailGo(newEvents.InitializeEmpty_WithRecordCount(
2780 m_TableDefs[TBL_Event].m_cbRec,
2781 m_Schema.m_cRecs[TBL_Event]
2782 COMMA_INDEBUG_MD(TRUE)));
2783 INDEBUG_MD(newEvents.Debug_SetTableInfo("TBL_Event", TBL_Event));
2784
2785 MetaData::TableRW newPropertys;
2786 IfFailGo(newPropertys.InitializeEmpty_WithRecordCount(
2787 m_TableDefs[TBL_Property].m_cbRec,
2788 m_Schema.m_cRecs[TBL_Property]
2789 COMMA_INDEBUG_MD(TRUE)));
2790 INDEBUG_MD(newPropertys.Debug_SetTableInfo("TBL_Property", TBL_Property));
2791
2792 // If we have any indirect table for Field or Method and we are about to reorder these
2793 // tables, the MemberDef hash table will be invalid after the token movement. So invalidate
2794 // the hash.
2795 if ((HasIndirectTable(TBL_Field) || HasIndirectTable(TBL_Method)) && (m_pMemberDefHash != NULL))
2796 {
2797 delete m_pMemberDefHash;
2798 m_pMemberDefHash = NULL;
2799 }
2800
2801 // Enumerate fields and copy.
2802 if (HasIndirectTable(TBL_Field))
2803 {
2804 for (ridPtr = 1; ridPtr <= m_Schema.m_cRecs[TBL_Field]; ++ridPtr)
2805 {
2806 BYTE * pOldPtr;
2807 IfFailGo(m_Tables[TBL_FieldPtr].GetRecord(ridPtr, &pOldPtr));
2808 RID ridOld;
2809 ridOld = GetCol(TBL_FieldPtr, FieldPtrRec::COL_Field, pOldPtr);
2810 BYTE * pOld;
2811 IfFailGo(m_Tables[TBL_Field].GetRecord(ridOld, &pOld));
2812 RID ridNew;
2813 BYTE * pNew;
2814 IfFailGo(newFields.AddRecord(&pNew, (UINT32 *)&ridNew));
2815 _ASSERTE(ridNew == ridPtr);
2816 memcpy(pNew, pOld, m_TableDefs[TBL_Field].m_cbRec);
2817
2818 // Let the caller know of the token change.
2819 IfFailGo(MapToken(ridOld, ridNew, mdtFieldDef));
2820 }
2821 }
2822
2823 // Enumerate methods and copy.
2824 if (HasIndirectTable(TBL_Method) || HasIndirectTable(TBL_Param))
2825 {
2826 for (ridPtr = 1; ridPtr <= m_Schema.m_cRecs[TBL_Method]; ++ridPtr)
2827 {
2828 MethodRec * pOld;
2829 RID ridOld;
2830 BYTE * pNew = NULL;
2831 if (HasIndirectTable(TBL_Method))
2832 {
2833 BYTE * pOldPtr;
2834 IfFailGo(m_Tables[TBL_MethodPtr].GetRecord(ridPtr, &pOldPtr));
2835 ridOld = GetCol(TBL_MethodPtr, MethodPtrRec::COL_Method, pOldPtr);
2836 IfFailGo(GetMethodRecord(ridOld, &pOld));
2837 RID ridNew;
2838 IfFailGo(newMethods.AddRecord(&pNew, (UINT32 *)&ridNew));
2839 _ASSERTE(ridNew == ridPtr);
2840 memcpy(pNew, pOld, m_TableDefs[TBL_Method].m_cbRec);
2841
2842 // Let the caller know of the token change.
2843 IfFailGo(MapToken(ridOld, ridNew, mdtMethodDef));
2844 }
2845 else
2846 {
2847 ridOld = ridPtr;
2848 IfFailGo(GetMethodRecord(ridPtr, &pOld));
2849 }
2850
2851 // Handle the params of the method.
2852 if (HasIndirectTable(TBL_Method))
2853 {
2854 IfFailGo(PutCol(TBL_Method, MethodRec::COL_ParamList, pNew, newParams.GetRecordCount() + 1));
2855 }
2856 RID ixStart = getParamListOfMethod(pOld);
2857 RID ixEnd;
2858 IfFailGo(getEndParamListOfMethod(ridOld, &ixEnd));
2859 for (; ixStart<ixEnd; ++ixStart)
2860 {
2861 RID ridParam;
2862 if (HasIndirectTable(TBL_Param))
2863 {
2864 BYTE * pOldPtr;
2865 IfFailGo(m_Tables[TBL_ParamPtr].GetRecord(ixStart, &pOldPtr));
2866 ridParam = GetCol(TBL_ParamPtr, ParamPtrRec::COL_Param, pOldPtr);
2867 }
2868 else
2869 {
2870 ridParam = ixStart;
2871 }
2872 BYTE * pOldRecord;
2873 IfFailGo(m_Tables[TBL_Param].GetRecord(ridParam, &pOldRecord));
2874 RID ridNew;
2875 BYTE * pNewRecord;
2876 IfFailGo(newParams.AddRecord(&pNewRecord, (UINT32 *)&ridNew));
2877 memcpy(pNewRecord, pOldRecord, m_TableDefs[TBL_Param].m_cbRec);
2878
2879 // Let the caller know of the token change.
2880 IfFailGo(MapToken(ridParam, ridNew, mdtParamDef));
2881 }
2882 }
2883 }
2884
2885 // Get rid of EventPtr and PropertyPtr table as well
2886 // Enumerate fields and copy.
2887 if (HasIndirectTable(TBL_Event))
2888 {
2889 for (ridPtr = 1; ridPtr <= m_Schema.m_cRecs[TBL_Event]; ++ridPtr)
2890 {
2891 BYTE * pOldPtr;
2892 IfFailGo(m_Tables[TBL_EventPtr].GetRecord(ridPtr, &pOldPtr));
2893 RID ridOld;
2894 ridOld = GetCol(TBL_EventPtr, EventPtrRec::COL_Event, pOldPtr);
2895 BYTE * pOld;
2896 IfFailGo(m_Tables[TBL_Event].GetRecord(ridOld, &pOld));
2897 RID ridNew;
2898 BYTE * pNew;
2899 IfFailGo(newEvents.AddRecord(&pNew, (UINT32 *)&ridNew));
2900 _ASSERTE(ridNew == ridPtr);
2901 memcpy(pNew, pOld, m_TableDefs[TBL_Event].m_cbRec);
2902
2903 // Let the caller know of the token change.
2904 IfFailGo(MapToken(ridOld, ridNew, mdtEvent));
2905 }
2906 }
2907
2908 if (HasIndirectTable(TBL_Property))
2909 {
2910 for (ridPtr = 1; ridPtr <= m_Schema.m_cRecs[TBL_Property]; ++ridPtr)
2911 {
2912 BYTE * pOldPtr;
2913 IfFailGo(m_Tables[TBL_PropertyPtr].GetRecord(ridPtr, &pOldPtr));
2914 RID ridOld;
2915 ridOld = GetCol(TBL_PropertyPtr, PropertyPtrRec::COL_Property, pOldPtr);
2916 BYTE * pOld;
2917 IfFailGo(m_Tables[TBL_Property].GetRecord(ridOld, &pOld));
2918 RID ridNew;
2919 BYTE * pNew;
2920 IfFailGo(newPropertys.AddRecord(&pNew, (UINT32 *)&ridNew));
2921 _ASSERTE(ridNew == ridPtr);
2922 memcpy(pNew, pOld, m_TableDefs[TBL_Property].m_cbRec);
2923
2924 // Let the caller know of the token change.
2925 IfFailGo(MapToken(ridOld, ridNew, mdtProperty));
2926 }
2927 }
2928
2929
2930 // Replace the old tables with the new, sorted ones.
2931 if (HasIndirectTable(TBL_Field))
2932 {
2933 m_Tables[TBL_Field].Delete();
2934 IfFailGo(m_Tables[TBL_Field].InitializeFromTable(
2935 &newFields,
2936 TRUE)); // fCopyData
2937 }
2938 if (HasIndirectTable(TBL_Method))
2939 {
2940 m_Tables[TBL_Method].Delete();
2941 IfFailGo(m_Tables[TBL_Method].InitializeFromTable(
2942 &newMethods,
2943 TRUE)); // fCopyData
2944 }
2945 if (HasIndirectTable(TBL_Method) || HasIndirectTable(TBL_Param))
2946 {
2947 m_Tables[TBL_Param].Delete();
2948 IfFailGo(m_Tables[TBL_Param].InitializeFromTable(
2949 &newParams,
2950 TRUE)); // fCopyData
2951 }
2952 if (HasIndirectTable(TBL_Property))
2953 {
2954 m_Tables[TBL_Property].Delete();
2955 IfFailGo(m_Tables[TBL_Property].InitializeFromTable(
2956 &newPropertys,
2957 TRUE)); // fCopyData
2958 }
2959 if (HasIndirectTable(TBL_Event))
2960 {
2961 m_Tables[TBL_Event].Delete();
2962 IfFailGo(m_Tables[TBL_Event].InitializeFromTable(
2963 &newEvents,
2964 TRUE)); // fCopyData
2965 }
2966
2967 // Empty the pointer tables table.
2968 m_Schema.m_cRecs[TBL_FieldPtr] = 0;
2969 m_Schema.m_cRecs[TBL_MethodPtr] = 0;
2970 m_Schema.m_cRecs[TBL_ParamPtr] = 0;
2971 m_Schema.m_cRecs[TBL_PropertyPtr] = 0;
2972 m_Schema.m_cRecs[TBL_EventPtr] = 0;
2973
2974 // invalidated the parent look up tables
2975 if (m_pMethodMap)
2976 {
2977 delete m_pMethodMap;
2978 m_pMethodMap = NULL;
2979 }
2980 if (m_pFieldMap)
2981 {
2982 delete m_pFieldMap;
2983 m_pFieldMap = NULL;
2984 }
2985 if (m_pPropertyMap)
2986 {
2987 delete m_pPropertyMap;
2988 m_pPropertyMap = NULL;
2989 }
2990 if (m_pEventMap)
2991 {
2992 delete m_pEventMap;
2993 m_pEventMap = NULL;
2994 }
2995 if (m_pParamMap)
2996 {
2997 delete m_pParamMap;
2998 m_pParamMap = NULL;
2999 }
3000 }
3001
3002 // Do the ref to def fixup before fix up with token movement
3003 IfFailGo(FixUpRefToDef());
3004
3005 ////////////////////////////////////////////////////////////////////////////
3006 //
3007 // We now need to do two kinds of fixups, and the two fixups interact with
3008 // each other.
3009 // 1) We need to sort several tables for binary searching.
3010 // 2) We need to fixup any references to other tables, which may have
3011 // changed due to ref-to-def, ptr-table elimination, or sorting.
3012 //
3013
3014
3015 // First do fixups. Some of these are then sorted based on fixed-up columns.
3016
3017 IfFailGo(FixUpTable(TBL_MemberRef));
3018 IfFailGo(FixUpTable(TBL_MethodSemantics));
3019 IfFailGo(FixUpTable(TBL_Constant));
3020 IfFailGo(FixUpTable(TBL_FieldMarshal));
3021 IfFailGo(FixUpTable(TBL_MethodImpl));
3022 IfFailGo(FixUpTable(TBL_DeclSecurity));
3023 IfFailGo(FixUpTable(TBL_ImplMap));
3024 IfFailGo(FixUpTable(TBL_FieldRVA));
3025 IfFailGo(FixUpTable(TBL_FieldLayout));
3026
3027 if (SupportsGenerics())
3028 {
3029 IfFailGo(FixUpTable(TBL_GenericParam));
3030 IfFailGo(FixUpTable(TBL_MethodSpec));
3031 }
3032
3033 // Now sort any tables that are allowed to have custom attributes.
3034 // This block for tables sorted in full mode only -- basically
3035 // tables for which we hand out tokens.
3036 if ((m_OptionValue.m_UpdateMode & MDUpdateMask) == MDUpdateFull)
3037 {
3038 if (SupportsGenerics())
3039 {
3040 // Sort the GenericParam table by the Owner.
3041 // Don't disturb the sequence ordering within Owner
3042 STABLESORTER_WITHREMAP(GenericParam, Owner);
3043 IfFailGo(sortGenericParam.Sort());
3044 }
3045
3046 // Sort the InterfaceImpl table by class.
3047 STABLESORTER_WITHREMAP(InterfaceImpl, Class);
3048 IfFailGo(sortInterfaceImpl.Sort());
3049
3050 // Sort the DeclSecurity table by parent.
3051 SORTER_WITHREMAP(DeclSecurity, Parent);
3052 IfFailGo(sortDeclSecurity.Sort());
3053 }
3054
3055 // The GenericParamConstraint table is parented to the GenericParam table,
3056 // so it needs fixup after sorting GenericParam table.
3057 if (SupportsGenerics())
3058 {
3059 IfFailGo(FixUpTable(TBL_GenericParamConstraint));
3060
3061 // After fixing up the GenericParamConstraint table, we can then
3062 // sort it.
3063 if ((m_OptionValue.m_UpdateMode & MDUpdateMask) == MDUpdateFull)
3064 {
3065 // Sort the GenericParamConstraint table by the Owner.
3066 // Don't disturb the sequence ordering within Owner
3067 STABLESORTER_WITHREMAP(GenericParamConstraint, Owner);
3068 IfFailGo(sortGenericParamConstraint.Sort());
3069 }
3070 }
3071 // Fixup the custom attribute table. After this, do not sort any table
3072 // that is allowed to have a custom attribute.
3073 IfFailGo(FixUpTable(TBL_CustomAttribute));
3074
3075 // Sort tables for binary searches.
3076 if (((m_OptionValue.m_UpdateMode & MDUpdateMask) == MDUpdateFull) ||
3077 ((m_OptionValue.m_UpdateMode & MDUpdateMask) == MDUpdateIncremental))
3078 {
3079 // Sort tables as required
3080 //-------------------------------------------------------------------------
3081 // Module order is preserved
3082 // TypeRef order is preserved
3083 // TypeDef order is preserved
3084 // Field grouped and pointed to by TypeDef
3085 // Method grouped and pointed to by TypeDef
3086 // Param grouped and pointed to by Method
3087 // InterfaceImpl sorted here
3088 // MemberRef order is preserved
3089 // Constant sorted here
3090 // CustomAttribute sorted INCORRECTLY!! here
3091 // FieldMarshal sorted here
3092 // DeclSecurity sorted here
3093 // ClassLayout created in order with TypeDefs
3094 // FieldLayout grouped and pointed to by ClassLayouts
3095 // StandaloneSig order is preserved
3096 // TypeSpec order is preserved
3097 // EventMap created in order at conversion (by Event Parent)
3098 // Event sorted by Parent at conversion
3099 // PropertyMap created in order at conversion (by Property Parent)
3100 // Property sorted by Parent at conversion
3101 // MethodSemantics sorted by Association at conversion.
3102 // MethodImpl sorted here.
3103 // Sort the constant table by parent.
3104 // Sort the nested class table by NestedClass.
3105 // Sort the generic par table by Owner
3106 // MethodSpec order is preserved
3107
3108 // Always sort Constant table
3109 _ASSERTE(!CanHaveCustomAttribute(TBL_Constant));
3110 SORTER(Constant, Parent);
3111 sortConstant.Sort();
3112
3113 // Always sort the FieldMarshal table by Parent.
3114 _ASSERTE(!CanHaveCustomAttribute(TBL_FieldMarshal));
3115 SORTER(FieldMarshal, Parent);
3116 sortFieldMarshal.Sort();
3117
3118 // Always sort the MethodSematics
3119 _ASSERTE(!CanHaveCustomAttribute(TBL_MethodSemantics));
3120 SORTER(MethodSemantics, Association);
3121 sortMethodSemantics.Sort();
3122
3123 // Always Sort the ClassLayoutTable by parent.
3124 _ASSERTE(!CanHaveCustomAttribute(TBL_ClassLayout));
3125 SORTER(ClassLayout, Parent);
3126 sortClassLayout.Sort();
3127
3128 // Always Sort the FieldLayoutTable by parent.
3129 _ASSERTE(!CanHaveCustomAttribute(TBL_FieldLayout));
3130 SORTER(FieldLayout, Field);
3131 sortFieldLayout.Sort();
3132
3133 // Always Sort the ImplMap table by the parent.
3134 _ASSERTE(!CanHaveCustomAttribute(TBL_ImplMap));
3135 SORTER(ImplMap, MemberForwarded);
3136 sortImplMap.Sort();
3137
3138 // Always Sort the FieldRVA table by the Field.
3139 _ASSERTE(!CanHaveCustomAttribute(TBL_FieldRVA));
3140 SORTER(FieldRVA, Field);
3141 sortFieldRVA.Sort();
3142
3143 // Always Sort the NestedClass table by the NestedClass.
3144 _ASSERTE(!CanHaveCustomAttribute(TBL_NestedClass));
3145 SORTER(NestedClass, NestedClass);
3146 sortNestedClass.Sort();
3147
3148 // Always Sort the MethodImpl table by the Class.
3149 _ASSERTE(!CanHaveCustomAttribute(TBL_MethodImpl));
3150 SORTER(MethodImpl, Class);
3151 sortMethodImpl.Sort();
3152
3153 // Some tokens are not moved in ENC mode; only "full" mode.
3154 if ((m_OptionValue.m_UpdateMode & MDUpdateMask) == MDUpdateFull)
3155 {
3156 // Sort the CustomAttribute table by parent.
3157 _ASSERTE(!CanHaveCustomAttribute(TBL_CustomAttribute));
3158 SORTER_WITHREMAP(CustomAttribute, Parent);
3159 IfFailGo(sortCustomAttribute.Sort());
3160 }
3161
3162 // Determine if the PropertyMap and EventMap are already sorted, and set the flag appropriately
3163 SORTER(PropertyMap, Parent);
3164 sortPropertyMap.CheckSortedWithNoDuplicates();
3165
3166 SORTER(EventMap, Parent);
3167 sortEventMap.CheckSortedWithNoDuplicates();
3168
3169 //-------------------------------------------------------------------------
3170 } // enclosing scope required for initialization ("goto" above skips initialization).
3171
3172 m_bPreSaveDone = true;
3173
3174 // send the Ref->Def optmization notification to host
3175 if (m_pHandler != NULL)
3176 {
3177 TOKENMAP * ptkmap = GetMemberRefToMemberDefMap();
3178 PREFIX_ASSUME(ptkmap != NULL); // RegMeta always inits this.
3179 MDTOKENMAP * ptkRemap = GetTokenMovementMap();
3180 int iCount = m_Schema.m_cRecs[TBL_MemberRef];
3181 mdToken tkTo;
3182 mdToken tkDefTo;
3183 int i;
3184 MemberRefRec * pMemberRefRec; // A MemberRefRec.
3185 const COR_SIGNATURE * pvSig; // Signature of the MemberRef.
3186 ULONG cbSig; // Size of the signature blob.
3187
3188 // loop through all LocalVar
3189 for (i = 1; i <= iCount; i++)
3190 {
3191 tkTo = *(ptkmap->Get(i));
3192 if (RidFromToken(tkTo) != mdTokenNil)
3193 {
3194 // so far, the parent of memberref can be changed to only fielddef or methoddef
3195 // or it will remain unchanged.
3196 //
3197 _ASSERTE((TypeFromToken(tkTo) == mdtFieldDef) || (TypeFromToken(tkTo) == mdtMethodDef));
3198
3199 IfFailGo(GetMemberRefRecord(i, &pMemberRefRec));
3200 IfFailGo(getSignatureOfMemberRef(pMemberRefRec, &pvSig, &cbSig));
3201
3202 // Don't turn mr's with vararg's into defs, because the variable portion
3203 // of the call is kept in the mr signature.
3204 if ((pvSig != NULL) && isCallConv(*pvSig, IMAGE_CEE_CS_CALLCONV_VARARG))
3205 continue;
3206
3207 // ref is optimized to the def
3208
3209 // now remap the def since def could be moved again.
3210 tkDefTo = ptkRemap->SafeRemap(tkTo);
3211
3212 // when Def token moves, it will not change type!!
3213 _ASSERTE(TypeFromToken(tkTo) == TypeFromToken(tkDefTo));
3214 LOG((LOGMD, "MapToken (remap): from 0x%08x to 0x%08x\n", TokenFromRid(i, mdtMemberRef), tkDefTo));
3215 m_pHandler->Map(TokenFromRid(i, mdtMemberRef), tkDefTo);
3216 }
3217 }
3218 }
3219
3220 // Ok, we've applied all of the token remaps. Make sure we don't apply them again in the future
3221 if (GetTokenMovementMap() != NULL)
3222 IfFailGo(GetTokenMovementMap()->EmptyMap());
3223
3224ErrExit:
3225
3226 return hr;
3227} // CMiniMdRW::PreSaveFull
3228
3229#ifdef _PREFAST_
3230#pragma warning(pop)
3231#endif
3232
3233//---------------------------------------------------------------------------------------
3234//
3235// ENC-specific pre-safe work.
3236//
3237__checkReturn
3238HRESULT
3239CMiniMdRW::PreSaveEnc()
3240{
3241 HRESULT hr;
3242 int iNew; // Insertion point for new tokens.
3243 ULONG *pul; // Found token.
3244 ULONG iRid; // RID from a token.
3245 ULONG ixTbl; // Table from an ENC record.
3246 ULONG cRecs; // Count of records in a table.
3247
3248 IfFailGo(PreSaveFull());
3249
3250 // Turn off pre-save bit so that we can add ENC map records.
3251 m_bPreSaveDone = false;
3252
3253 if (m_Schema.m_cRecs[TBL_ENCLog])
3254 { // Keep track of ENC recs we've seen.
3255 _ASSERTE(m_rENCRecs == 0);
3256 m_rENCRecs = new (nothrow) ULONGARRAY[m_TblCount];
3257 IfNullGo(m_rENCRecs);
3258
3259 // Create the temporary table.
3260 MetaData::TableRW tempTable;
3261 IfFailGo(tempTable.InitializeEmpty_WithRecordCount(
3262 m_TableDefs[TBL_ENCLog].m_cbRec,
3263 m_Schema.m_cRecs[TBL_ENCLog]
3264 COMMA_INDEBUG_MD(TRUE)));
3265 INDEBUG_MD(tempTable.Debug_SetTableInfo("TBL_ENCLog", TBL_ENCLog));
3266
3267 // For each row in the data.
3268 RID rid;
3269 ULONG iKept=0;
3270 for (rid=1; rid<=m_Schema.m_cRecs[TBL_ENCLog]; ++rid)
3271 {
3272 ENCLogRec *pFrom;
3273 IfFailGo(m_Tables[TBL_ENCLog].GetRecord(rid, reinterpret_cast<BYTE **>(&pFrom)));
3274
3275 // Keep this record?
3276 if (pFrom->GetFuncCode() == 0)
3277 { // No func code. Skip if we've seen this token before.
3278
3279 // What kind of record is this?
3280 if (IsRecId(pFrom->GetToken()))
3281 { // Non-token table
3282 iRid = RidFromRecId(pFrom->GetToken());
3283 ixTbl = TblFromRecId(pFrom->GetToken());
3284 }
3285 else
3286 { // Token table.
3287 iRid = RidFromToken(pFrom->GetToken());
3288 ixTbl = GetTableForToken(pFrom->GetToken());
3289
3290 }
3291
3292 RIDBinarySearch searcher((UINT32 *)m_rENCRecs[ixTbl].Ptr(), m_rENCRecs[ixTbl].Count());
3293 pul = (ULONG *)(searcher.Find((UINT32 *)&iRid, &iNew));
3294 // If we found the token, don't keep the record.
3295 if (pul != 0)
3296 {
3297 LOG((LOGMD, "PreSave ENCLog skipping duplicate token %d", pFrom->GetToken()));
3298 continue;
3299 }
3300 // First time token was seen, so keep track of it.
3301 IfNullGo(pul = m_rENCRecs[ixTbl].Insert(iNew));
3302 *pul = iRid;
3303 }
3304
3305 // Keeping the record, so allocate the new record to hold it.
3306 ++iKept;
3307 RID ridNew;
3308 ENCLogRec *pTo;
3309 IfFailGo(tempTable.AddRecord(reinterpret_cast<BYTE **>(&pTo), (UINT32 *)&ridNew));
3310 _ASSERTE(ridNew == iKept);
3311
3312 // copy the data.
3313 *pTo = *pFrom;
3314 }
3315
3316 // Keep the expanded table.
3317 m_Tables[TBL_ENCLog].Delete();
3318 IfFailGo(m_Tables[TBL_ENCLog].InitializeFromTable(
3319 &tempTable,
3320 TRUE)); // fCopyData
3321 INDEBUG_MD(m_Tables[TBL_ENCLog].Debug_SetTableInfo("TBL_ENCLog", TBL_ENCLog));
3322 m_Schema.m_cRecs[TBL_ENCLog] = iKept;
3323
3324 // If saving only deltas, build the ENC Map table.
3325 if (((m_OptionValue.m_UpdateMode & MDUpdateDelta)) == MDUpdateDelta)
3326 {
3327 cRecs = 0;
3328 for (ixTbl=0; ixTbl<m_TblCount; ++ixTbl)
3329 {
3330 cRecs += m_rENCRecs[ixTbl].Count();
3331 }
3332 m_Tables[TBL_ENCMap].Delete();
3333
3334 m_Schema.m_cRecs[TBL_ENCMap] = 0;
3335
3336 IfFailGo(m_Tables[TBL_ENCMap].InitializeEmpty_WithRecordCount(
3337 m_TableDefs[TBL_ENCMap].m_cbRec,
3338 cRecs
3339 COMMA_INDEBUG_MD(TRUE)));
3340 INDEBUG_MD(m_Tables[TBL_ENCMap].Debug_SetTableInfo("TBL_ENCMap", TBL_ENCMap));
3341 cRecs = 0;
3342 for (ixTbl=0; ixTbl<m_TblCount; ++ixTbl)
3343 {
3344 ENCMapRec *pNew;
3345 ULONG nNew;
3346 for (int i=0; i<m_rENCRecs[ixTbl].Count(); ++i)
3347 {
3348 IfFailGo(AddENCMapRecord(&pNew, &nNew)); // pre-allocated for all rows.
3349 _ASSERTE(nNew == ++cRecs);
3350 _ASSERTE(TblFromRecId(RecIdFromRid(m_rENCRecs[ixTbl][i], ixTbl)) < m_TblCount);
3351 pNew->SetToken(RecIdFromRid(m_rENCRecs[ixTbl][i], ixTbl));
3352 }
3353 }
3354 }
3355 }
3356
3357 // Turn pre-save bit back on.
3358 m_bPreSaveDone = true;
3359
3360ErrExit:
3361 return hr;
3362} // CMiniMdRW::PreSaveEnc
3363
3364//*****************************************************************************
3365// Perform any appropriate pre-save optimization or reorganization.
3366//*****************************************************************************
3367__checkReturn
3368HRESULT
3369CMiniMdRW::PreSave(
3370 MetaDataReorderingOptions reorderingOptions,
3371 CorProfileData *pProfileData)
3372{
3373 HRESULT hr = S_OK;
3374
3375#ifdef _DEBUG
3376 if (CLRConfig::GetConfigValue(CLRConfig::INTERNAL_MD_PreSaveBreak))
3377 {
3378 _ASSERTE(!"CMiniMdRW::PreSave()");
3379 }
3380#endif //_DEBUG
3381
3382 if (m_bPreSaveDone)
3383 return hr;
3384
3385#ifdef FEATURE_PREJIT
3386 // Reorganization should be done at ngen time only
3387 if( reorderingOptions & ReArrangeStringPool )
3388 {
3389 EX_TRY
3390 {
3391 OrganizeStringPool(pProfileData);
3392 }
3393 EX_CATCH
3394 {
3395 hr = GET_EXCEPTION()->GetHR();
3396 }
3397 EX_END_CATCH(SwallowAllExceptions)
3398 IfFailRet(hr);
3399 }
3400#endif // FEATURE_PREJIT
3401
3402 switch (m_OptionValue.m_UpdateMode & MDUpdateMask)
3403 {
3404 case MDUpdateFull:
3405 case MDUpdateIncremental:
3406 case MDUpdateExtension:
3407 hr = PreSaveFull();
3408 break;
3409 // PreSaveEnc removes duplicate entries in the ENCLog table,
3410 // which we need to do regardless if we're saving a full MD
3411 // or a minimal delta.
3412 case MDUpdateDelta:
3413 case MDUpdateENC:
3414 hr = PreSaveEnc();
3415 break;
3416 default:
3417 _ASSERTE(!"Internal error -- unknown save mode");
3418 return E_INVALIDARG;
3419 }
3420
3421 return hr;
3422} // CMiniMdRW::PreSave
3423
3424//*****************************************************************************
3425// Perform any necessary post-save cleanup.
3426//*****************************************************************************
3427__checkReturn
3428HRESULT
3429CMiniMdRW::PostSave()
3430{
3431 if (m_rENCRecs)
3432 {
3433 delete [] m_rENCRecs;
3434 m_rENCRecs = 0;
3435 }
3436
3437 m_bPreSaveDone = false;
3438
3439 return S_OK;
3440} // CMiniMdRW::PostSave
3441
3442//*****************************************************************************
3443// Save the tables to the stream.
3444//*****************************************************************************
3445__checkReturn
3446HRESULT
3447CMiniMdRW::SaveFullTablesToStream(
3448 IStream *pIStream,
3449 MetaDataReorderingOptions reorderingOptions,
3450 CorProfileData *pProfileData)
3451{
3452 HRESULT hr;
3453 CMiniTableDef sTempTable; // Definition for a temporary table.
3454 CQuickArray<CMiniColDef> rTempCols; // Definition for a temp table's columns.
3455 BYTE SchemaBuf[sizeof(CMiniMdSchema)]; //Buffer for compressed schema.
3456 ULONG cbAlign; // Bytes needed for alignment.
3457 UINT32 cbTable; // Bytes in a table.
3458 UINT32 cbTotal; // Bytes written.
3459 static const unsigned char zeros[8] = {0}; // For padding and alignment.
3460
3461#ifndef FEATURE_PREJIT
3462 _ASSERTE(pProfileData == NULL);
3463#endif //!FEATURE_PREJIT
3464
3465 // Write the header.
3466 CMiniMdSchema Schema = m_Schema;
3467 IfFailGo(m_StringHeap.GetAlignedSize(&cbTable));
3468 if (cbTable > USHRT_MAX)
3469 {
3470 Schema.m_heaps |= CMiniMdSchema::HEAP_STRING_4;
3471 }
3472 else
3473 {
3474 Schema.m_heaps &= ~CMiniMdSchema::HEAP_STRING_4;
3475 }
3476
3477 if (m_GuidHeap.GetSize() > USHRT_MAX)
3478 {
3479 Schema.m_heaps |= CMiniMdSchema::HEAP_GUID_4;
3480 }
3481 else
3482 {
3483 Schema.m_heaps &= ~CMiniMdSchema::HEAP_GUID_4;
3484 }
3485
3486 IfFailGo(m_BlobHeap.GetAlignedSize(&cbTable));
3487 if (cbTable > USHRT_MAX)
3488 {
3489 Schema.m_heaps |= CMiniMdSchema::HEAP_BLOB_4;
3490 }
3491 else
3492 {
3493 Schema.m_heaps &= ~CMiniMdSchema::HEAP_BLOB_4;
3494 }
3495
3496 cbTotal = 0;
3497 if (pProfileData == NULL)
3498 {
3499 cbTotal = Schema.SaveTo(SchemaBuf);
3500 IfFailGo(pIStream->Write(SchemaBuf, cbTotal, 0));
3501 if ( (cbAlign = Align4(cbTotal) - cbTotal) != 0)
3502 IfFailGo(pIStream->Write(&hr, cbAlign, 0));
3503 cbTotal += cbAlign;
3504 }
3505
3506 ULONG headerOffset[TBL_COUNT];
3507 _ASSERTE(m_TblCount <= TBL_COUNT);
3508
3509 ULONG ixTbl;
3510 // For each table...
3511 for (ixTbl=0; ixTbl<m_TblCount; ++ixTbl)
3512 {
3513 headerOffset[ixTbl] = ~0U;
3514
3515 ULONG itemCount = GetCountRecs(ixTbl);
3516 if (itemCount)
3517 {
3518#ifdef FEATURE_PREJIT
3519 ULONG hotItemCount = 0;
3520
3521 NewArrayHolder<mdToken> hotItemList = NULL;
3522 NewArrayHolder<TokenIndexPair> indexMapping = NULL;
3523
3524 // check if we were asked to generate the hot tables
3525 if (pProfileData != NULL)
3526 {
3527 // obtain the number of tokens in this table whose metadata was touched
3528 IfFailGo(GetHotMetadataTokensSearchAware(pProfileData, ixTbl, &hotItemCount, NULL, 0));
3529
3530 // assume ManifestResource table is touched completely if touched at all or any hot metadata at all so far
3531 // this is because it's searched linearly, and IBC data misses an unsuccessful search
3532 // after module load
3533 if (ixTbl == TBL_ManifestResource && (hotItemCount > 0 || cbTotal != 0))
3534 hotItemCount = itemCount;
3535
3536 // if the hot subset of the rows along with their side lookup tables will occupy more space
3537 // than the full table, keep the full table to save both space and access time.
3538 if (hotItemCount <= USHRT_MAX && itemCount <= USHRT_MAX && m_TableDefs[ixTbl].m_cbRec <= SHRT_MAX)
3539 {
3540 ULONG estimatedSizeUsingSubsetCopy = hotItemCount * (sizeof(WORD) + sizeof(BYTE) + m_TableDefs[ixTbl].m_cbRec);
3541 ULONG estimatedSizeUsingFullCopy = itemCount * m_TableDefs[ixTbl].m_cbRec;
3542
3543 if (estimatedSizeUsingSubsetCopy > estimatedSizeUsingFullCopy)
3544 hotItemCount = itemCount;
3545 }
3546
3547 // first level table is array of WORD, so we can't handle more than 2**16 hot items
3548 if (hotItemCount > USHRT_MAX)
3549 hotItemCount = 0;
3550
3551 // only generate additional table if any hot items at all
3552 if (hotItemCount > 0)
3553 {
3554 if ( (cbAlign = Align4(cbTotal) - cbTotal) != 0)
3555 IfFailGo(pIStream->Write(&hr, cbAlign, 0));
3556 cbTotal += cbAlign;
3557
3558 headerOffset[ixTbl] = cbTotal;
3559
3560 // write first part of header: hot item count
3561 IfFailGo(pIStream->Write(&hotItemCount, sizeof(hotItemCount), 0));
3562 cbTotal += sizeof(hotItemCount);
3563
3564 ULONG offset = 0;
3565 if (hotItemCount < itemCount)
3566 {
3567 // obtain the tokens whose metadata was touched
3568 hotItemList = new (nothrow) mdToken[hotItemCount];
3569 IfNullGo(hotItemList);
3570 IfFailGo(GetHotMetadataTokensSearchAware(pProfileData, ixTbl, NULL, hotItemList, hotItemCount));
3571
3572 // construct an array of token-index pairs and save the original order of the tokens in pProfileData->GetHotTokens
3573 // we want to write hot rows in this order to preserve the ordering optimizations done by IbcMerge
3574 indexMapping = new (nothrow) TokenIndexPair[hotItemCount];
3575 IfNullGo(indexMapping);
3576
3577 for (DWORD i = 0; i < hotItemCount; i++)
3578 {
3579 indexMapping[i].token = hotItemList[i];
3580 indexMapping[i].index = (WORD)i;
3581 }
3582
3583 // figure out how big the first level table should be
3584 // and sort tokens accordingly
3585 shiftCount = ShiftCount(itemCount, hotItemCount);
3586 qsort(indexMapping, hotItemCount, sizeof(indexMapping[0]), TokenCmp);
3587
3588 // each table has a header that consists of the hotItemCount, offsets to
3589 // the first and second level tables, an offset to the actual data, and the
3590 // shiftCount that determines the size of the first level table.
3591 // see class HotTableHeader in metamodelro.h
3592
3593 // we have already written the hotItemCount above.
3594
3595 // so now write the offset of the first level table (just after the header)
3596 offset = sizeof(hotItemCount) + 4*sizeof(offset) + sizeof(shiftCount);
3597 IfFailGo(pIStream->Write(&offset, sizeof(offset), 0));
3598 cbTotal += sizeof(offset);
3599
3600 // figure out first level table size (1 extra entry at the end)
3601 ULONG firstLevelCount = (1<<shiftCount)+1;
3602 offset += firstLevelCount*sizeof(WORD);
3603
3604 // write offset of second level table.
3605 IfFailGo(pIStream->Write(&offset, sizeof(offset), 0));
3606 cbTotal += sizeof(offset);
3607
3608 // second level table has a byte-sized entry for each hot item
3609 offset += hotItemCount*sizeof(BYTE);
3610
3611 // write offset of index mapping table.
3612 IfFailGo(pIStream->Write(&offset, sizeof(offset), 0));
3613 cbTotal += sizeof(offset);
3614
3615 // index mapping table has a word-sized entry for each hot item
3616 offset += hotItemCount*sizeof(WORD);
3617
3618 // actual data is just behind it, but 4-byte aligned
3619 offset = Align4(offset);
3620
3621 // write offset of actual hot metadata
3622 IfFailGo(pIStream->Write(&offset, sizeof(offset), 0));
3623 cbTotal += sizeof(offset);
3624
3625 // write shiftCount
3626 IfFailGo(pIStream->Write(&shiftCount, sizeof(shiftCount), 0));
3627 cbTotal += sizeof(shiftCount);
3628
3629 // allocate tables
3630 NewArrayHolder<WORD> firstLevelTable = new (nothrow) WORD[firstLevelCount];
3631 IfNullGo(firstLevelTable);
3632 NewArrayHolder<BYTE> secondLevelTable = new (nothrow) BYTE[hotItemCount];
3633 IfNullGo(secondLevelTable);
3634 NewArrayHolder<WORD> indexMappingTable = new (nothrow) WORD[hotItemCount];
3635 IfNullGo(indexMappingTable);
3636
3637 // fill out the tables
3638 ULONG nextFirstLevelIndex = 0;
3639 for (DWORD i = 0; i < hotItemCount; i++)
3640 {
3641 // second level table contains the high order bits for each hot rid
3642 secondLevelTable[i] = (BYTE)(RidFromToken(indexMapping[i].token) >> shiftCount);
3643
3644 // the index into the first level table is the low order bits.
3645 ULONG firstLevelIndex = indexMapping[i].token & ((1<<shiftCount)-1);
3646
3647 // first level indicates where to start searching in the second level table
3648 while (nextFirstLevelIndex <= firstLevelIndex)
3649 firstLevelTable[nextFirstLevelIndex++] = (WORD)i;
3650
3651 // index mapping table converts the index of this hot rid in the second level table
3652 // to the index of the hot data in the cached rows
3653 indexMappingTable[i] = indexMapping[i].index;
3654 }
3655 // fill remaining entries
3656 while (nextFirstLevelIndex < firstLevelCount)
3657 firstLevelTable[nextFirstLevelIndex++] = (WORD)hotItemCount;
3658
3659 // write first level table
3660 IfFailGo(pIStream->Write(firstLevelTable, sizeof(firstLevelTable[0])*firstLevelCount, 0));
3661 cbTotal += sizeof(firstLevelTable[0])*firstLevelCount;
3662
3663 // write second level table
3664 IfFailGo(pIStream->Write(secondLevelTable, sizeof(secondLevelTable[0])*hotItemCount, 0));
3665 cbTotal += sizeof(secondLevelTable[0])*hotItemCount;
3666
3667 // write index mapping table
3668 IfFailGo(pIStream->Write(indexMappingTable, sizeof(indexMappingTable[0])*hotItemCount, 0));
3669 cbTotal += sizeof(indexMappingTable[0])*hotItemCount;
3670
3671 // NewArrayHolder for firstLevelTable and secondLevelTable going out of scope - no delete[] necessary
3672 }
3673 else
3674 {
3675 // in case the whole table is touched, omit the tables
3676 // we still have a full header though with zero offsets for these tables.
3677 IfFailGo(pIStream->Write(&offset, sizeof(offset), 0));
3678 cbTotal += sizeof(offset);
3679 IfFailGo(pIStream->Write(&offset, sizeof(offset), 0));
3680 cbTotal += sizeof(offset);
3681 IfFailGo(pIStream->Write(&offset, sizeof(offset), 0));
3682 cbTotal += sizeof(offset);
3683
3684 // offset for actual data points immediately after the header
3685 offset += sizeof(hotItemCount) + 4*sizeof(offset) + sizeof(shiftCount);
3686 offset = Align4(offset);
3687 IfFailGo(pIStream->Write(&offset, sizeof(offset), 0));
3688 cbTotal += sizeof(offset);
3689 shiftCount = 0;
3690
3691 // write shift count
3692 IfFailGo(pIStream->Write(&shiftCount, sizeof(shiftCount), 0));
3693 cbTotal += sizeof(shiftCount);
3694 }
3695 if ( (cbAlign = Align4(cbTotal) - cbTotal) != 0)
3696 IfFailGo(pIStream->Write(&hr, cbAlign, 0));
3697 cbTotal += cbAlign;
3698 _ASSERTE(cbTotal == headerOffset[ixTbl] + offset);
3699 }
3700 }
3701#endif //FEATURE_PREJIT
3702
3703 // Compress the records by allocating a new, temporary, table and
3704 // copying the rows from the one to the new.
3705
3706 // If the table was grown, shrink it as much as possible.
3707 if (m_eGrow == eg_grown)
3708 {
3709
3710 // Allocate a def for the temporary table.
3711 sTempTable = m_TableDefs[ixTbl];
3712 IfFailGo(rTempCols.ReSizeNoThrow(sTempTable.m_cCols));
3713 sTempTable.m_pColDefs = rTempCols.Ptr();
3714
3715 // Initialize temp table col defs based on actual counts of data in the
3716 // real tables.
3717 IfFailGo(InitColsForTable(Schema, ixTbl, &sTempTable, 1, FALSE));
3718
3719 // Create the temporary table.
3720 MetaData::TableRW tempTable;
3721 IfFailGo(tempTable.InitializeEmpty_WithRecordCount(
3722 sTempTable.m_cbRec,
3723 m_Schema.m_cRecs[ixTbl]
3724 COMMA_INDEBUG_MD(TRUE)));
3725 INDEBUG_MD(tempTable.Debug_SetTableInfo(NULL, ixTbl));
3726
3727 // For each row in the data.
3728 RID rid;
3729 for (rid=1; rid<=m_Schema.m_cRecs[ixTbl]; ++rid)
3730 {
3731 RID ridNew;
3732 BYTE *pRow;
3733 IfFailGo(m_Tables[ixTbl].GetRecord(rid, &pRow));
3734 BYTE *pNew;
3735 IfFailGo(tempTable.AddRecord(&pNew, (UINT32 *)&ridNew));
3736 _ASSERTE(rid == ridNew);
3737
3738 // For each column.
3739 for (ULONG ixCol=0; ixCol<sTempTable.m_cCols; ++ixCol)
3740 {
3741 // Copy the data to the temp table.
3742 ULONG ulVal = GetCol(ixTbl, ixCol, pRow);
3743 IfFailGo(PutCol(rTempCols[ixCol], pNew, ulVal));
3744 }
3745 } // Persist the temp table to the stream.
3746#ifdef FEATURE_PREJIT
3747 if (pProfileData != NULL)
3748 {
3749 // only write out the hot rows as indicated by profile data
3750 for (DWORD i = 0; i < hotItemCount; i++)
3751 {
3752 BYTE *pRow;
3753 IfFailGo(tempTable.GetRecord(
3754 hotItemList != NULL ? RidFromToken(hotItemList[i]) : i + 1,
3755 &pRow));
3756 IfFailGo(pIStream->Write(pRow, sTempTable.m_cbRec, 0));
3757 }
3758 cbTable = sTempTable.m_cbRec*hotItemCount;
3759 }
3760 else
3761#endif //FEATURE_PREJIT
3762 {
3763 IfFailGo(tempTable.GetRecordsDataSize(&cbTable));
3764 _ASSERTE(cbTable == sTempTable.m_cbRec * GetCountRecs(ixTbl));
3765 IfFailGo(tempTable.SaveToStream(
3766 pIStream));
3767 }
3768 cbTotal += cbTable;
3769 }
3770 else
3771 { // Didn't grow, so just persist directly to stream.
3772#ifdef FEATURE_PREJIT
3773 if (pProfileData != NULL)
3774 {
3775 // only write out the hot rows as indicated by profile data
3776 for (DWORD i = 0; i < hotItemCount; i++)
3777 {
3778 BYTE *pRow;
3779 IfFailGo(m_Tables[ixTbl].GetRecord(
3780 hotItemList != NULL ? RidFromToken(hotItemList[i]) : i + 1,
3781 &pRow));
3782 IfFailGo(pIStream->Write(pRow, m_TableDefs[ixTbl].m_cbRec, 0));
3783 }
3784 cbTable = m_TableDefs[ixTbl].m_cbRec*hotItemCount;
3785 }
3786 else
3787#endif //FEATURE_PREJIT
3788 {
3789 IfFailGo(m_Tables[ixTbl].GetRecordsDataSize(&cbTable));
3790 _ASSERTE(cbTable == m_TableDefs[ixTbl].m_cbRec * GetCountRecs(ixTbl));
3791 IfFailGo(m_Tables[ixTbl].SaveToStream(
3792 pIStream));
3793 }
3794 cbTotal += cbTable;
3795 }
3796 // NewArrayHolder hotItemList going out of scope - no delete [] necessary
3797 }
3798 }
3799
3800 // Pad with at least 2 bytes and align on 4 bytes.
3801 cbAlign = Align4(cbTotal) - cbTotal;
3802 if (cbAlign < 2)
3803 cbAlign += 4;
3804 IfFailGo(pIStream->Write(zeros, cbAlign, 0));
3805 cbTotal += cbAlign;
3806 _ASSERTE((m_cbSaveSize == 0) || (m_cbSaveSize == cbTotal) || (pProfileData != NULL));
3807
3808#ifdef FEATURE_PREJIT
3809 if (pProfileData != NULL)
3810 {
3811 // #WritingHotMetaData write hot table directory (HotTableDirectory in MetaModelRO.h)
3812
3813 // first write magic
3814 ULONG magic = 0x484f4e44;
3815 IfFailGo(pIStream->Write(&magic, sizeof(magic), 0));
3816
3817 // compute offsets to table headers
3818 for (ixTbl=0; ixTbl<m_TblCount; ++ixTbl)
3819 if (headerOffset[ixTbl] != ~0u)
3820 {
3821 headerOffset[ixTbl] -= cbTotal;
3822 }
3823 else
3824 {
3825 headerOffset[ixTbl] = 0;
3826 }
3827
3828 // write the offsets to the table headers
3829 IfFailGo(pIStream->Write(headerOffset, sizeof(headerOffset), 0));
3830 cbTotal += sizeof(magic) + sizeof(headerOffset);
3831
3832 UINT32 cbPoolDirSize = 0;
3833 UINT32 cbSavedHeapsSize = 0;
3834
3835 IfFailGo(SaveHotPoolsToStream(
3836 pIStream,
3837 reorderingOptions,
3838 pProfileData,
3839 &cbPoolDirSize,
3840 &cbSavedHeapsSize));
3841
3842 // write hot metadata (including pools) header
3843 IfFailGo(StreamUtil::WriteToStream(pIStream, (DWORD)(cbSavedHeapsSize + cbPoolDirSize)));
3844 IfFailGo(StreamUtil::WriteToStream(pIStream, (DWORD)cbPoolDirSize));
3845 }
3846#endif //FEATURE_PREJIT
3847
3848ErrExit:
3849 return hr;
3850} // CMiniMdRW::SaveFullTablesToStream
3851
3852//*****************************************************************************
3853// Check to see if it is safe to reorder the string pool
3854// The existing implementation of metadata tables is such that string offsets in different tables
3855// may have different sizes.
3856// Since we are going to reorder the string pool, offsets of strings would change and that may
3857// cause overflows if tables have string offsets with different sizes
3858//*****************************************************************************
3859BOOL CMiniMdRW::IsSafeToReorderStringPool()
3860{
3861#ifdef FEATURE_PREJIT
3862 BYTE lastColumnSize=0;
3863 ULONG ixTbl=0, ixCol=0;
3864 for (ixTbl=0; ixTbl<m_TblCount; ixTbl++)
3865 {
3866 // for every column in this row
3867 for (ixCol=0; ixCol<m_TableDefs[ixTbl].m_cCols; ixCol++)
3868 {
3869 // proceed only when the column type is iSTRING
3870 if(m_TableDefs[ixTbl].m_pColDefs[ixCol].m_Type == iSTRING)
3871 {
3872 if(lastColumnSize == 0)
3873 {
3874 lastColumnSize = m_TableDefs[ixTbl].m_pColDefs[ixCol].m_cbColumn;
3875 }
3876 else if(lastColumnSize != m_TableDefs[ixTbl].m_pColDefs[ixCol].m_cbColumn)
3877 {
3878 return FALSE;
3879 }
3880 }
3881 }
3882 }
3883 return TRUE;
3884#else
3885 return FALSE;
3886#endif // FEATURE_PREJIT
3887} // CMiniMdRW::IsSafeToReorderStringPool
3888
3889//*****************************************************************************
3890// Function to mark hot strings in the marks array based on the token information
3891// in profile data
3892//*****************************************************************************
3893VOID CMiniMdRW::MarkHotStrings(CorProfileData *pProfileData, BYTE * pMarks, ULONG poolSize)
3894{
3895#ifdef FEATURE_PREJIT
3896 if(pProfileData != NULL)
3897 {
3898 ULONG hotItemCount = pProfileData->GetHotTokens( TBL_COUNT + MDPoolStrings, 1 << ProfilingFlags_MetaData, 1 << ProfilingFlags_MetaData, NULL, 0 );
3899 if(hotItemCount > 0)
3900 {
3901 NewArrayHolder< ULONG > hotItemList = new ULONG[hotItemCount];
3902
3903 // get hot tokens
3904 pProfileData->GetHotTokens( TBL_COUNT + MDPoolStrings, 1 << ProfilingFlags_MetaData, 1 << ProfilingFlags_MetaData, reinterpret_cast<mdToken *>(&hotItemList[0]), hotItemCount );
3905
3906 for ( ULONG i=0; i<hotItemCount; ++i )
3907 {
3908 // convert tokens to rids
3909 ULONG ulStringOffset = RidFromToken(hotItemList[i]);
3910
3911 if (ulStringOffset >= poolSize)
3912 ThrowHR(E_UNEXPECTED);
3913
3914 pMarks[ulStringOffset] = ReorderData::ProfileData;
3915 }
3916 }
3917 }
3918#endif // FEATURE_PREJIT
3919} // CMiniMdRW::MarkHotStrings
3920
3921//*******************************************************************************
3922// Function to mark hot strings referenced by hot tables based on token information in profile data
3923//*******************************************************************************
3924VOID CMiniMdRW::MarkStringsInHotTables(CorProfileData *pProfileData, BYTE * pMarks, ULONG poolSize)
3925{
3926#ifdef FEATURE_PREJIT
3927 ULONG ixTbl=0, ixCol=0;
3928 ULONG hotItemCount=0;
3929 RID hotRID=0;
3930 BYTE *pHotRow=NULL;
3931
3932 if(pProfileData != NULL)
3933 {
3934 for (ixTbl=0; ixTbl<m_TblCount; ++ixTbl)
3935 {
3936 NewArrayHolder<mdToken> hotItemList = NULL;
3937 // obtain the number of tokens in this table whose metadata was touched
3938 hotItemCount = pProfileData->GetHotTokens(ixTbl, 1<<ProfilingFlags_MetaData, 1<<ProfilingFlags_MetaData, NULL, 0);
3939
3940 // obtain the tokens whose metadata was touched
3941 if(hotItemCount > 0)
3942 {
3943 hotItemList = new mdToken[hotItemCount];
3944 pProfileData->GetHotTokens(ixTbl, 1<<ProfilingFlags_MetaData, 1<<ProfilingFlags_MetaData, hotItemList, hotItemCount);
3945 }
3946
3947 // for every column in this hot row
3948 for (ixCol=0; ixCol<m_TableDefs[ixTbl].m_cCols; ++ixCol)
3949 {
3950 // add the string to the string pool only if it hasn't been added yet
3951 if(m_TableDefs[ixTbl].m_pColDefs[ixCol].m_Type == iSTRING)
3952 {
3953 // for every hot token in the list
3954 for(ULONG item=0; item<hotItemCount; item++)
3955 {
3956 // get the rid from the token
3957 hotRID = RidFromToken(hotItemList[item]);
3958 IfFailThrow(m_Tables[ixTbl].GetRecord(hotRID, &pHotRow));
3959 _ASSERTE(pHotRow != NULL);
3960
3961 // get column for string; this will get me the current string offset
3962 ULONG ulStringOffset = GetCol(ixTbl, ixCol, pHotRow);
3963
3964 if (ulStringOffset >= poolSize)
3965 ThrowHR(E_UNEXPECTED);
3966
3967 pMarks[ulStringOffset] = ReorderData::ProfileData;
3968 }
3969 }
3970 }
3971 }
3972 }
3973#endif // FEATURE_PREJIT
3974} // CMiniMdRW::MarkStringsInHotTables
3975
3976//*****************************************************************************
3977// Function to mark strings referenced by the different metadata tables
3978//*****************************************************************************
3979VOID CMiniMdRW::MarkStringsInTables(BYTE * pMarks, ULONG poolSize)
3980{
3981#ifdef FEATURE_PREJIT
3982 for (ULONG ixTbl=0; ixTbl<m_TblCount; ixTbl++)
3983 {
3984 // for every row in the table
3985 for (RID ridOld=1; ridOld<=m_Schema.m_cRecs[ixTbl]; ridOld++)
3986 {
3987 // lets assume we do not have any references to the stringpool
3988 BOOL fHasStringData = FALSE;
3989
3990 // for every column in this row
3991 for (ULONG ixCol=0; ixCol<m_TableDefs[ixTbl].m_cCols; ixCol++)
3992 {
3993 // proceed only when the column type is iSTRING
3994 if(m_TableDefs[ixTbl].m_pColDefs[ixCol].m_Type == iSTRING)
3995 {
3996 fHasStringData = TRUE;
3997 // get the current record
3998 BYTE *pOldRow;
3999 IfFailThrow(m_Tables[ixTbl].GetRecord(ridOld, &pOldRow));
4000
4001 // get column for string; this will get me the current string offset
4002 ULONG ulStringOffset = GetCol(ixTbl, ixCol, pOldRow);
4003
4004 // ignore empty strings, they are not moving anywhere
4005 if(ulStringOffset == 0)
4006 continue;
4007
4008 if (ulStringOffset >= poolSize)
4009 ThrowHR(E_UNEXPECTED);
4010
4011 BYTE ulBucketType=0;
4012
4013 switch(ixTbl)
4014 {
4015 case TBL_Method:
4016 ulBucketType = IsMdPublic(GetCol(TBL_Method, MethodRec::COL_Flags, pOldRow))
4017 ? ReorderData::PublicData
4018 : ReorderData::NonPublicData;
4019 break;
4020 case TBL_Field:
4021 ulBucketType = IsFdPublic(GetCol(TBL_Field, FieldRec::COL_Flags, pOldRow))
4022 ? ReorderData::PublicData
4023 : ReorderData::NonPublicData;
4024 break;
4025 case TBL_TypeDef:
4026 ulBucketType = IsTdPublic(GetCol(TBL_TypeDef, TypeDefRec::COL_Flags, pOldRow))
4027 ? ReorderData::PublicData
4028 : ReorderData::NonPublicData;
4029 break;
4030 case TBL_ManifestResource:
4031 ulBucketType = IsMrPublic(GetCol(TBL_ManifestResource, ManifestResourceRec::COL_Flags, pOldRow))
4032 ? ReorderData::PublicData
4033 : ReorderData::NonPublicData;
4034 break;
4035 default:
4036 ulBucketType = ReorderData::OtherData;
4037 break;
4038 }
4039
4040 if (pMarks[ulStringOffset] == ReorderData::Undefined || pMarks[ulStringOffset] > ulBucketType)
4041 pMarks[ulStringOffset] = ulBucketType;
4042 }
4043 }
4044 if (!fHasStringData)
4045 break;
4046 }
4047 }
4048#endif // FEATURE_PREJIT
4049} // CMiniMdRW::MarkStringsInTables
4050
4051// --------------------------------------------------------------------------------------
4052//
4053// Function to mark duplicate strings in the mark array. This step is basically to take care of
4054// strings that have the same tail.
4055// Throws on error.
4056//
4057VOID CMiniMdRW::MarkDuplicateStrings(BYTE * pMarks, ULONG poolSize)
4058{
4059#ifdef FEATURE_PREJIT
4060 ULONG offset=1;
4061 while (offset<poolSize)
4062 {
4063 if (pMarks[offset] == ReorderData::Undefined)
4064 {
4065 offset++;
4066 continue;
4067 }
4068
4069 LPCSTR pszString;
4070 IfFailThrow(m_StringHeap.GetString(offset, &pszString));
4071
4072 ULONG start = offset;
4073 ULONG end = offset + (ULONG)strlen(pszString);
4074
4075 BYTE tag = pMarks[offset];
4076 offset++;
4077
4078 while (offset <= end)
4079 {
4080 if (pMarks[offset] != ReorderData::Undefined)
4081 {
4082 tag = min(pMarks[offset], tag);
4083 pMarks[offset] = ReorderData::Duplicate;
4084 }
4085 offset++;
4086 }
4087 pMarks[start] = tag;
4088 }
4089#endif // FEATURE_PREJIT
4090} // CMiniMdRW::MarkDuplicateStrings
4091
4092//*****************************************************************************
4093// Function to update the tables with the modified string offsets
4094//*****************************************************************************
4095VOID CMiniMdRW::FixStringsInTables()
4096{
4097#if defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE)
4098 for (ULONG ixTbl=0; ixTbl<m_TblCount; ixTbl++)
4099 {
4100 // for every row in the table
4101 for (RID ridOld=1; ridOld<=m_Schema.m_cRecs[ixTbl]; ridOld++)
4102 {
4103 // lets assume we do not have any references to the stringpool
4104 BOOL fHasStringData = FALSE;
4105
4106 // for every column in this row
4107 for (ULONG ixCol=0; ixCol<m_TableDefs[ixTbl].m_cCols; ixCol++)
4108 {
4109 // proceed only when the column type is iSTRING
4110 if(m_TableDefs[ixTbl].m_pColDefs[ixCol].m_Type == iSTRING)
4111 {
4112 fHasStringData = TRUE;
4113 // get the current record
4114 BYTE *pOldRow;
4115 IfFailThrow(m_Tables[ixTbl].GetRecord(ridOld, &pOldRow));
4116 _ASSERTE(pOldRow != NULL);
4117
4118 // get column for string; this will get me the current string offset
4119 UINT32 nOldStringOffset = GetCol(ixTbl, ixCol, pOldRow);
4120
4121 // ignore empty strings, they are not moving anywhere
4122 if (nOldStringOffset == 0)
4123 continue;
4124
4125 UINT32 nNewStringOffset;
4126 if (!m_StringPoolOffsetHash.Lookup(nOldStringOffset, &nNewStringOffset))
4127 ThrowHR(E_UNEXPECTED);
4128
4129 IfFailThrow(PutCol(ixTbl, ixCol, pOldRow, nNewStringOffset));
4130 }
4131 }
4132 if (!fHasStringData)
4133 break;
4134 }
4135 }
4136#endif // FEATURE_PREJIT
4137} // CMiniMdRW::FixStringsInTables
4138
4139// --------------------------------------------------------------------------------------
4140//
4141// Function to fill the given string pool with strings from the existing string pool using the mark array.
4142// Throws on error.
4143//
4144VOID
4145CMiniMdRW::CreateReorderedStringPool(
4146 MetaData::StringHeapRW *pStringHeap,
4147 BYTE *pMarks,
4148 ULONG cbHeapSize,
4149 CorProfileData *pProfileData)
4150{
4151#if defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE)
4152 ULONG lastOldOffset = 0;
4153 ULONG lastNewOffset = 0;
4154
4155 // special handling of profile data so as to maintain the same order
4156 // as the hot tokens in the CorProfileData object
4157 if (pProfileData != NULL)
4158 {
4159 ULONG hotItems = pProfileData->GetHotTokens(
4160 TBL_COUNT + MDPoolStrings,
4161 1 << ProfilingFlags_MetaData,
4162 1 << ProfilingFlags_MetaData,
4163 NULL,
4164 0);
4165 if ( hotItems )
4166 {
4167 NewArrayHolder< ULONG > hotItemArr = new ULONG[ hotItems ];
4168 pProfileData->GetHotTokens(
4169 TBL_COUNT + MDPoolStrings,
4170 1 << ProfilingFlags_MetaData,
4171 1 << ProfilingFlags_MetaData,
4172 reinterpret_cast<mdToken *>(&hotItemArr[0]),
4173 hotItems);
4174
4175 // convert tokens to rids
4176 for ( ULONG i = 0; i < hotItems ; ++i )
4177 {
4178 UINT32 newOffset=0, start=0, end=0;
4179 hotItemArr[i] = RidFromToken(hotItemArr[i]);
4180
4181 for (UINT32 offset = hotItemArr[i]; offset >= 1; offset--)
4182 {
4183 if(pMarks[offset] == ReorderData::ProfileData)
4184 {
4185 LPCSTR szString;
4186 IfFailThrow(m_StringHeap.GetString(offset, &szString));
4187 IfFailThrow(pStringHeap->AddString(szString, &newOffset));
4188 start = offset;
4189 end = start + (UINT32)strlen(szString);
4190 break;
4191 }
4192 }
4193
4194 for (UINT32 offset = start; offset <end; offset++)
4195 {
4196 if(pMarks[offset] == ReorderData::ProfileData || pMarks[offset] == ReorderData::Duplicate)
4197 {
4198 m_StringPoolOffsetHash.Add(offset, newOffset);
4199 }
4200 newOffset++;
4201 }
4202 }
4203 }
4204 }
4205
4206 for (BYTE priority = ReorderData::ProfileData; priority <= ReorderData::NonPublicData; priority++)
4207 {
4208 for (UINT32 offset = 1; offset < cbHeapSize; offset++)
4209 {
4210 // Since MinReorderBucketType is 0 and MaxReorderBucketType is 255, checking an unsigned BYTE against that gives a "comparison
4211 // is always true" warning. Logically, the assert is:
4212 // _ASSERTE(pMarks[offset] >= ReorderData::MinReorderBucketType && pMarks[offset] <= ReorderData::MaxReorderBucketType);
4213 _ASSERTE(0 == ReorderData::MinReorderBucketType);
4214 _ASSERTE(255 == ReorderData::MaxReorderBucketType);
4215 _ASSERTE(sizeof(pMarks[0]) == 1);
4216
4217 if (pMarks[offset] == priority)
4218 {
4219 UINT32 newOffset;
4220
4221 if(!m_StringPoolOffsetHash.Lookup(offset, &newOffset))
4222 {
4223 LPCSTR szString;
4224 IfFailThrow(m_StringHeap.GetString(offset, &szString));
4225 IfFailThrow(pStringHeap->AddString(szString, &newOffset));
4226 m_StringPoolOffsetHash.Add(offset, newOffset);
4227
4228 lastOldOffset = offset;
4229 lastNewOffset = newOffset;
4230 }
4231 }
4232 else
4233 if (pMarks[offset] == ReorderData::Duplicate)
4234 {
4235 UINT32 newOffset;
4236 if (lastNewOffset != 0 && !m_StringPoolOffsetHash.Lookup(offset, &newOffset))
4237 m_StringPoolOffsetHash.Add(offset, lastNewOffset + (offset - lastOldOffset));
4238 }
4239 else
4240 if (pMarks[offset] != ReorderData::Undefined)
4241 {
4242 lastNewOffset = 0;
4243 }
4244 }
4245 }
4246#endif // FEATURE_PREJIT
4247} // CMiniMdRW::CreateReorderedStringPool
4248
4249// --------------------------------------------------------------------------------------
4250//
4251// Function to reorganize the string pool based on IBC profile data (if available) and static analysis
4252// Throws on error.
4253//
4254VOID CMiniMdRW::OrganizeStringPool(CorProfileData *pProfileData)
4255{
4256#if defined(FEATURE_PREJIT) && !defined(DACCESS_COMPILE)
4257 if(!IsSafeToReorderStringPool())
4258 {
4259 return;
4260 }
4261
4262 UINT32 cbStringHeapSize = m_StringHeap.GetUnalignedSize();
4263
4264 NewArrayHolder<BYTE> stringMarks = new BYTE[cbStringHeapSize];
4265 ZeroMemory(stringMarks, cbStringHeapSize);
4266
4267 // Each string will be assigned a value based on its hotness in the Mark*() functions
4268 // This list will be later traversed to place the strings in the right order in the string pool and also
4269 // to update the references in the metadata tables
4270
4271 // Mark all hot strings
4272 MarkHotStrings(pProfileData, stringMarks, cbStringHeapSize);
4273
4274 // Mark all strings in hot rows
4275 MarkStringsInHotTables(pProfileData, stringMarks, cbStringHeapSize);
4276
4277 // Mark all remaining strings
4278 MarkStringsInTables(stringMarks, cbStringHeapSize);
4279
4280 // Mark duplicates for interned strings
4281 MarkDuplicateStrings(stringMarks, cbStringHeapSize);
4282
4283 // Initalize the temporary string heap
4284 MetaData::StringHeapRW tempStringHeap;
4285
4286 IfFailThrow(tempStringHeap.InitializeEmpty(
4287 cbStringHeapSize
4288 COMMA_INDEBUG_MD(TRUE))); // fIsReadWrite
4289
4290 // We will use this hash for fixing the string references in the profile data
4291 m_StringPoolOffsetHash.Reallocate(cbStringHeapSize);
4292
4293 // Create the temporary string pool using the mark arrays
4294 CreateReorderedStringPool(&tempStringHeap, stringMarks, cbStringHeapSize, pProfileData);
4295
4296 // Update the tables with string offsets into the temporary string pool
4297 FixStringsInTables();
4298
4299 // Replace the existing string pool with the modified version
4300 m_StringHeap.Delete();
4301 IfFailThrow(m_StringHeap.InitializeFromStringHeap(
4302 &tempStringHeap,
4303 TRUE)); // fCopyData
4304#endif // FEATURE_PREJIT
4305} // CMiniMdRW::OrganizeStringPool
4306
4307#ifdef FEATURE_PREJIT
4308
4309// write hot data of the pools
4310//
4311__checkReturn
4312HRESULT
4313CMiniMdRW::SaveHotPoolsToStream(
4314 IStream *pStream,
4315 MetaDataReorderingOptions reorderingOptions,
4316 CorProfileData *pProfileData,
4317 UINT32 *pnPoolDirSize,
4318 UINT32 *pnHeapsSavedSize)
4319{
4320 HRESULT hr = S_OK;
4321 UINT32 rgHeapSavedSize[MDPoolCount] = { 0, 0, 0, 0 };
4322
4323 // save pools in the order they are described in MDPools enum
4324 //
4325 // we skip the hot string pool when we reorganize the string pool
4326 if (!(reorderingOptions & ReArrangeStringPool))
4327 {
4328 MetaData::HotHeapWriter stringHotHeapWriter(&m_StringHeap);
4329 IfFailRet(SaveHotPoolToStream(
4330 pStream,
4331 pProfileData,
4332 &stringHotHeapWriter,
4333 &rgHeapSavedSize[MDPoolStrings]));
4334 }
4335
4336 // Save guid heap hot data
4337 MetaData::HotHeapWriter guidsHotHeapWriter(&m_GuidHeap);
4338 IfFailRet(SaveHotPoolToStream(
4339 pStream,
4340 pProfileData,
4341 &guidsHotHeapWriter,
4342 &rgHeapSavedSize[MDPoolGuids]));
4343
4344 // Save blob heap hot data
4345 MetaData::HotHeapWriter blobsHotHeapWriter(
4346 &m_BlobHeap,
4347 FALSE); // fUserStringHeap
4348 IfFailRet(SaveHotPoolToStream(
4349 pStream,
4350 pProfileData,
4351 &blobsHotHeapWriter,
4352 &rgHeapSavedSize[MDPoolBlobs]));
4353
4354 // Save user string heap hot data
4355 MetaData::HotHeapWriter userStringsHotHeapWriter(
4356 &m_UserStringHeap,
4357 TRUE); // fUserStringHeap
4358 IfFailRet(SaveHotPoolToStream(
4359 pStream,
4360 pProfileData,
4361 &userStringsHotHeapWriter,
4362 &rgHeapSavedSize[MDPoolUSBlobs]));
4363
4364 // fix pool offsets, they need to point to the header of each saved pool
4365 UINT32 nHeapEndOffset = 0;
4366 for (int i = MDPoolCount; i-- > 0; )
4367 {
4368 if (rgHeapSavedSize[i] != 0)
4369 {
4370 UINT32 nHeapSavedSize = rgHeapSavedSize[i];
4371 // Change size of the heap to the (negative) offset of its header
4372 rgHeapSavedSize[i] = sizeof(struct MetaData::HotHeapHeader) + nHeapEndOffset;
4373 nHeapEndOffset += nHeapSavedSize;
4374 }
4375 }
4376 // Store size of all heaps
4377 *pnHeapsSavedSize = nHeapEndOffset;
4378
4379 // save hot pool dirs
4380 *pnPoolDirSize = 0;
4381 for (int i = 0; i < MDPoolCount; i++)
4382 {
4383 if (rgHeapSavedSize[i] != 0)
4384 {
4385 IfFailRet(StreamUtil::WriteToStream(pStream, i, pnPoolDirSize));
4386 IfFailRet(StreamUtil::WriteToStream(pStream, (ULONG)rgHeapSavedSize[i], pnPoolDirSize));
4387 }
4388 }
4389
4390 return S_OK;
4391} // CMiniMdRW::SaveHotPoolsToStream
4392
4393// write hot data of specific blob
4394//
4395__checkReturn
4396HRESULT
4397CMiniMdRW::SaveHotPoolToStream(
4398 IStream *pStream,
4399 CorProfileData *pProfileData,
4400 MetaData::HotHeapWriter *pHotHeapWriter,
4401 UINT32 *pnSavedSize)
4402{
4403
4404 _ASSERTE(pProfileData != NULL);
4405
4406 HRESULT hr = S_OK;
4407 // #CallToGetHotTokens
4408 // see code:CMiniMdRW.SaveFullTablesToStream#WritingHotMetaData for the main caller of this.
4409 if (pProfileData->GetHotTokens(
4410 pHotHeapWriter->GetTableIndex(),
4411 1 << ProfilingFlags_MetaData,
4412 1 << ProfilingFlags_MetaData,
4413 NULL,
4414 0) != 0)
4415 {
4416 IfFailRet(pHotHeapWriter->SaveToStream(
4417 pStream,
4418 pProfileData,
4419 pnSavedSize));
4420 }
4421 else
4422 {
4423 *pnSavedSize = 0;
4424 }
4425
4426 return S_OK;
4427} // CMiniMdRW::SaveHotPoolToStream
4428
4429#endif //FEATURE_PREJIT
4430
4431//*****************************************************************************
4432// Save the tables to the stream.
4433//*****************************************************************************
4434__checkReturn
4435HRESULT
4436CMiniMdRW::SaveENCTablesToStream(
4437 IStream *pIStream)
4438{
4439 HRESULT hr;
4440 BYTE SchemaBuf[sizeof(CMiniMdSchema)]; //Buffer for compressed schema.
4441 ULONG cbAlign; // Bytes needed for alignment.
4442 ULONG cbTable; // Bytes in a table.
4443 ULONG cbTotal; // Bytes written.
4444 ULONG ixTbl; // Table counter.
4445 static const unsigned char zeros[8] = {0}; // For padding and alignment.
4446
4447 // Make sure the minimal delta has a fully expanded table
4448 IfFailRet(ExpandTables());
4449
4450 // Write the header.
4451 CMiniMdSchema Schema = m_Schema;
4452 Schema.m_heaps |= CMiniMdSchema::DELTA_ONLY;
4453
4454 if (m_rENCRecs != NULL)
4455 {
4456 for (ixTbl=0; ixTbl<m_TblCount; ++ixTbl)
4457 Schema.m_cRecs[ixTbl] = m_rENCRecs[ixTbl].Count();
4458 }
4459 else
4460 {
4461 for (ixTbl=0; ixTbl<m_TblCount; ++ixTbl)
4462 Schema.m_cRecs[ixTbl] = 0;
4463 }
4464
4465 Schema.m_cRecs[TBL_Module] = m_Schema.m_cRecs[TBL_Module];
4466 Schema.m_cRecs[TBL_ENCLog] = m_Schema.m_cRecs[TBL_ENCLog];
4467 Schema.m_cRecs[TBL_ENCMap] = m_Schema.m_cRecs[TBL_ENCMap];
4468
4469 cbTotal = Schema.SaveTo(SchemaBuf);
4470 IfFailGo(pIStream->Write(SchemaBuf, cbTotal, 0));
4471 if ( (cbAlign = Align4(cbTotal) - cbTotal) != 0)
4472 IfFailGo(pIStream->Write(&hr, cbAlign, 0));
4473 cbTotal += cbAlign;
4474
4475 // For each table...
4476 for (ixTbl=0; ixTbl<m_TblCount; ++ixTbl)
4477 {
4478 if (ixTbl == TBL_ENCLog || ixTbl == TBL_ENCMap || ixTbl == TBL_Module)
4479 {
4480 if (m_Schema.m_cRecs[ixTbl] == 0)
4481 continue; // pretty strange if ENC has no enc data.
4482 // Persist the ENC table.
4483 IfFailGo(m_Tables[ixTbl].GetRecordsDataSize((UINT32 *)&cbTable));
4484 _ASSERTE(cbTable == m_TableDefs[ixTbl].m_cbRec * m_Schema.m_cRecs[ixTbl]);
4485 cbTotal += cbTable;
4486 IfFailGo(m_Tables[ixTbl].SaveToStream(
4487 pIStream));
4488 }
4489 else
4490 if (Schema.m_cRecs[ixTbl])
4491 {
4492 // Copy just the delta records.
4493
4494 // Create the temporary table.
4495 MetaData::TableRW tempTable;
4496 IfFailGo(tempTable.InitializeEmpty_WithRecordCount(
4497 m_TableDefs[ixTbl].m_cbRec,
4498 Schema.m_cRecs[ixTbl]
4499 COMMA_INDEBUG_MD(TRUE))); // fIsReadWrite
4500 INDEBUG_MD(tempTable.Debug_SetTableInfo(NULL, ixTbl));
4501
4502 // For each row in the data.
4503 RID rid;
4504 for (ULONG iDelta=0; iDelta<Schema.m_cRecs[ixTbl]; ++iDelta)
4505 {
4506 RID ridNew;
4507 rid = m_rENCRecs[ixTbl][iDelta];
4508 BYTE *pRow;
4509 IfFailGo(m_Tables[ixTbl].GetRecord(rid, &pRow));
4510 BYTE *pNew;
4511 IfFailGo(tempTable.AddRecord(&pNew, (UINT32 *)&ridNew));
4512 _ASSERTE(iDelta+1 == ridNew);
4513
4514 memcpy(pNew, pRow, m_TableDefs[ixTbl].m_cbRec);
4515 }
4516 // Persist the temp table to the stream.
4517 IfFailGo(tempTable.GetRecordsDataSize((UINT32 *)&cbTable));
4518 _ASSERTE(cbTable == m_TableDefs[ixTbl].m_cbRec * Schema.m_cRecs[ixTbl]);
4519 cbTotal += cbTable;
4520 IfFailGo(tempTable.SaveToStream(
4521 pIStream));
4522 }
4523 }
4524
4525 // Pad with at least 2 bytes and align on 4 bytes.
4526 cbAlign = Align4(cbTotal) - cbTotal;
4527 if (cbAlign < 2)
4528 cbAlign += 4;
4529 IfFailGo(pIStream->Write(zeros, cbAlign, 0));
4530 cbTotal += cbAlign;
4531 _ASSERTE(m_cbSaveSize == 0 || m_cbSaveSize == cbTotal);
4532
4533ErrExit:
4534 return hr;
4535} // CMiniMdRW::SaveENCTablesToStream
4536
4537//*****************************************************************************
4538// Save the tables to the stream.
4539//*****************************************************************************
4540__checkReturn
4541HRESULT
4542CMiniMdRW::SaveTablesToStream(
4543 IStream *pIStream, // The stream.
4544 MetaDataReorderingOptions reorderingOptions,
4545 CorProfileData *pProfileData)
4546{
4547 HRESULT hr;
4548
4549 // Prepare the data for save.
4550 IfFailGo(PreSave());
4551
4552 switch (m_OptionValue.m_UpdateMode & MDUpdateMask)
4553 {
4554 case MDUpdateFull:
4555 case MDUpdateIncremental:
4556 case MDUpdateExtension:
4557 case MDUpdateENC:
4558 hr = SaveFullTablesToStream(pIStream, reorderingOptions, pProfileData);
4559 break;
4560 case MDUpdateDelta:
4561 hr = SaveENCTablesToStream(pIStream);
4562 break;
4563 default:
4564 _ASSERTE(!"Internal error -- unknown save mode");
4565 return E_INVALIDARG;
4566 }
4567
4568ErrExit:
4569 return hr;
4570} // CMiniMdRW::SaveTablesToStream
4571
4572//*****************************************************************************
4573// Save a full pool to the stream.
4574//*****************************************************************************
4575__checkReturn
4576HRESULT
4577CMiniMdRW::SaveFullPoolToStream(
4578 int iPool, // The pool.
4579 IStream *pStream) // The stream.
4580{
4581 HRESULT hr;
4582
4583 switch (iPool)
4584 {
4585 case MDPoolStrings:
4586 hr = m_StringHeap.SaveToStream_Aligned(
4587 0, // Start offset of the data to be stored
4588 pStream);
4589 break;
4590 case MDPoolGuids:
4591 hr = m_GuidHeap.SaveToStream(
4592 pStream);
4593 break;
4594 case MDPoolBlobs:
4595 hr = m_BlobHeap.SaveToStream_Aligned(
4596 0, // Start offset of the data to be stored
4597 pStream);
4598 break;
4599 case MDPoolUSBlobs:
4600 hr = m_UserStringHeap.SaveToStream_Aligned(
4601 0, // Start offset of the data to be stored
4602 pStream);
4603 break;
4604 default:
4605 hr = E_INVALIDARG;
4606 }
4607
4608 return hr;
4609} // CMiniMdRW::SaveFullPoolToStream
4610
4611//*****************************************************************************
4612// Save a ENC pool to the stream.
4613//*****************************************************************************
4614__checkReturn
4615HRESULT
4616CMiniMdRW::SaveENCPoolToStream(
4617 int iPool, // The pool.
4618 IStream *pIStream) // The stream.
4619{
4620 HRESULT hr;
4621
4622 switch (iPool)
4623 {
4624 case MDPoolStrings:
4625 {
4626 UINT32 nEnCDeltaStartOffset = m_StringHeap.GetEnCSessionStartHeapSize();
4627 hr = m_StringHeap.SaveToStream_Aligned(
4628 nEnCDeltaStartOffset, // Start offset of the data to be stored
4629 pIStream);
4630 break;
4631 }
4632 case MDPoolGuids:
4633 {
4634 // Save full Guid heap (we never save EnC delta)
4635 hr = m_GuidHeap.SaveToStream(
4636 pIStream);
4637 break;
4638 }
4639 case MDPoolBlobs:
4640 {
4641 UINT32 nEnCDeltaStartOffset = m_BlobHeap.GetEnCSessionStartHeapSize();
4642 hr = m_BlobHeap.SaveToStream_Aligned(
4643 nEnCDeltaStartOffset, // Start offset of the data to be stored
4644 pIStream);
4645 break;
4646 }
4647 case MDPoolUSBlobs:
4648 {
4649 UINT32 nEnCDeltaStartOffset = m_UserStringHeap.GetEnCSessionStartHeapSize();
4650 hr = m_UserStringHeap.SaveToStream_Aligned(
4651 nEnCDeltaStartOffset, // Start offset of the data to be stored
4652 pIStream);
4653 break;
4654 }
4655 default:
4656 hr = E_INVALIDARG;
4657 }
4658
4659 return hr;
4660} // CMiniMdRW::SaveENCPoolToStream
4661
4662//*****************************************************************************
4663// Save a pool to the stream.
4664//*****************************************************************************
4665__checkReturn
4666HRESULT
4667CMiniMdRW::SavePoolToStream(
4668 int iPool, // The pool.
4669 IStream *pIStream) // The stream.
4670{
4671 HRESULT hr;
4672 switch (m_OptionValue.m_UpdateMode & MDUpdateMask)
4673 {
4674 case MDUpdateFull:
4675 case MDUpdateIncremental:
4676 case MDUpdateExtension:
4677 case MDUpdateENC:
4678 hr = SaveFullPoolToStream(iPool, pIStream);
4679 break;
4680 case MDUpdateDelta:
4681 hr = SaveENCPoolToStream(iPool, pIStream);
4682 break;
4683 default:
4684 _ASSERTE(!"Internal error -- unknown save mode");
4685 return E_INVALIDARG;
4686 }
4687
4688 return hr;
4689} // CMiniMdRW::SavePoolToStream
4690
4691//*****************************************************************************
4692// Expand a table from the initial (hopeful) 2-byte column sizes to the large
4693// (but always adequate) 4-byte column sizes.
4694//*****************************************************************************
4695__checkReturn
4696HRESULT
4697CMiniMdRW::ExpandTables()
4698{
4699 HRESULT hr = S_OK;
4700 CMiniMdSchema Schema; // Temp schema by which to build tables.
4701 ULONG ixTbl; // Table counter.
4702
4703 // Allow function to be called many times.
4704 if (m_eGrow == eg_grown)
4705 return (S_OK);
4706
4707 // OutputDebugStringA("Growing tables to large size.\n");
4708
4709 // Make pool indices the large size.
4710 Schema.m_heaps = 0;
4711 Schema.m_heaps |= CMiniMdSchema::HEAP_STRING_4;
4712 Schema.m_heaps |= CMiniMdSchema::HEAP_GUID_4;
4713 Schema.m_heaps |= CMiniMdSchema::HEAP_BLOB_4;
4714
4715 // Make Row counts the large size.
4716 memset(Schema.m_cRecs, 0, sizeof(Schema.m_cRecs));
4717 for (ixTbl=0; ixTbl<m_TblCount; ++ixTbl)
4718 Schema.m_cRecs[ixTbl] = USHRT_MAX+1;
4719
4720 // Compute how many bits required to hold a rid.
4721 Schema.m_rid = 16;
4722
4723 for (ixTbl=0; ixTbl<m_TblCount; ++ixTbl)
4724 {
4725 IfFailGo(ExpandTableColumns(Schema, ixTbl));
4726 }
4727
4728 // Things are bigger now.
4729 m_Schema.m_rid = 16;
4730 m_Schema.m_heaps |= CMiniMdSchema::HEAP_STRING_4;
4731 m_Schema.m_heaps |= CMiniMdSchema::HEAP_GUID_4;
4732 m_Schema.m_heaps |= CMiniMdSchema::HEAP_BLOB_4;
4733 m_iStringsMask = 0xffffffff;
4734 m_iGuidsMask = 0xffffffff;
4735 m_iBlobsMask = 0xffffffff;
4736
4737 // Remember that we've grown.
4738 m_eGrow = eg_grown;
4739 m_maxRid = m_maxIx = ULONG_MAX;
4740
4741ErrExit:
4742 return hr;
4743} // CMiniMdRW::ExpandTables
4744
4745
4746__checkReturn
4747HRESULT
4748CMiniMdRW::InitWithLargeTables()
4749{
4750 CMiniMdSchema Schema; // Temp schema by which to build tables.
4751 HRESULT hr = S_OK;
4752
4753 // Make pool indices the large size.
4754 Schema.m_heaps = 0;
4755 Schema.m_heaps |= CMiniMdSchema::HEAP_STRING_4;
4756 Schema.m_heaps |= CMiniMdSchema::HEAP_GUID_4;
4757 Schema.m_heaps |= CMiniMdSchema::HEAP_BLOB_4;
4758
4759 // Make Row counts the large size.
4760 memset(Schema.m_cRecs, 0, sizeof(Schema.m_cRecs));
4761 for (int ixTbl=0; ixTbl<(int)m_TblCount; ++ixTbl)
4762 Schema.m_cRecs[ixTbl] = USHRT_MAX+1;
4763
4764 // Compute how many bits required to hold a rid.
4765 Schema.m_rid = 16;
4766
4767 // For each table...
4768 for (int ixTbl=0; ixTbl<(int)m_TblCount; ++ixTbl)
4769 {
4770 IfFailRet(InitColsForTable(Schema, ixTbl, &m_TableDefs[ixTbl], 0, TRUE));
4771 }
4772
4773
4774 // Things are bigger now.
4775 m_Schema.m_rid = 16;
4776 m_Schema.m_heaps |= CMiniMdSchema::HEAP_STRING_4;
4777 m_Schema.m_heaps |= CMiniMdSchema::HEAP_GUID_4;
4778 m_Schema.m_heaps |= CMiniMdSchema::HEAP_BLOB_4;
4779 m_iStringsMask = 0xffffffff;
4780 m_iGuidsMask = 0xffffffff;
4781
4782 return hr;
4783}// CMiniMdRW::InitWithLargeTables
4784
4785//*****************************************************************************
4786// Expand the sizes of a tables columns according to a new schema. When this
4787// happens, all RID and Pool index columns expand from 2 to 4 bytes.
4788//*****************************************************************************
4789__checkReturn
4790HRESULT
4791CMiniMdRW::ExpandTableColumns(
4792 CMiniMdSchema &Schema,
4793 ULONG ixTbl)
4794{
4795 HRESULT hr;
4796 CMiniTableDef sTempTable; // Definition for a temporary table.
4797 CQuickBytes qbTempCols;
4798 ULONG ixCol; // Column counter.
4799 ULONG cbFixed; // Count of bytes that don't move.
4800 CMiniColDef *pFromCols; // Definitions of "from" columns.
4801 CMiniColDef *pToCols; // Definitions of "To" columns.
4802 ULONG cMoveCols; // Count of columns to move.
4803 ULONG cFixedCols; // Count of columns to move.
4804
4805 // Allocate a def for the temporary table.
4806 sTempTable = m_TableDefs[ixTbl];
4807 IfFailGo(qbTempCols.ReSizeNoThrow(sTempTable.m_cCols * sizeof(CMiniColDef) + 1));
4808 // Mark the array of columns as not allocated (not ALLOCATED_MEMORY_MARKER) for SetNewColumnDefinition
4809 // call bellow (code:#SetNewColumnDefinition_call)
4810 *(BYTE *)(qbTempCols.Ptr()) = 0;
4811 sTempTable.m_pColDefs = (CMiniColDef *)((BYTE *)(qbTempCols.Ptr()) + 1);
4812
4813 // Initialize temp table col defs based on counts of data in the tables.
4814 IfFailGo(InitColsForTable(Schema, ixTbl, &sTempTable, 1, FALSE));
4815
4816 if (GetCountRecs(ixTbl) > 0)
4817 {
4818 // Analyze the column definitions to determine the unchanged vs changed parts.
4819 cbFixed = 0;
4820 for (ixCol = 0; ixCol < sTempTable.m_cCols; ++ixCol)
4821 {
4822 if (sTempTable.m_pColDefs[ixCol].m_oColumn != m_TableDefs[ixTbl].m_pColDefs[ixCol].m_oColumn ||
4823 sTempTable.m_pColDefs[ixCol].m_cbColumn != m_TableDefs[ixTbl].m_pColDefs[ixCol].m_cbColumn)
4824 break;
4825 cbFixed += sTempTable.m_pColDefs[ixCol].m_cbColumn;
4826 }
4827 if (ixCol == sTempTable.m_cCols)
4828 {
4829 // no column is changing. We are done.
4830 goto ErrExit;
4831 }
4832 cFixedCols = ixCol;
4833 pFromCols = &m_TableDefs[ixTbl].m_pColDefs[ixCol];
4834 pToCols = &sTempTable.m_pColDefs[ixCol];
4835 cMoveCols = sTempTable.m_cCols - ixCol;
4836 for (; ixCol < sTempTable.m_cCols; ++ixCol)
4837 {
4838 _ASSERTE(sTempTable.m_pColDefs[ixCol].m_cbColumn == 4);
4839 }
4840
4841 // Create the temporary table.
4842 MetaData::TableRW tempTable;
4843 IfFailGo(tempTable.InitializeEmpty_WithRecordCount(
4844 sTempTable.m_cbRec,
4845 m_Schema.m_cRecs[ixTbl]
4846 COMMA_INDEBUG_MD(TRUE))); // fIsReadWrite
4847 INDEBUG_MD(tempTable.Debug_SetTableInfo(NULL, ixTbl));
4848
4849 // For each row in the data.
4850 RID rid; // Row iterator.
4851
4852 for (rid = 1; rid <= m_Schema.m_cRecs[ixTbl]; ++rid)
4853 {
4854 RID ridNew;
4855 BYTE *pFrom;
4856 BYTE *pTo;
4857
4858 IfFailGo(m_Tables[ixTbl].GetRecord(rid, &pFrom));
4859 IfFailGo(tempTable.AddRecord(&pTo, (UINT32 *)&ridNew));
4860 _ASSERTE(rid == ridNew);
4861
4862 // Move the fixed part.
4863 memcpy(pTo, pFrom, cbFixed);
4864
4865 // Expand the expanded parts.
4866 for (ixCol = 0; ixCol < cMoveCols; ++ixCol)
4867 {
4868 if (m_TableDefs[ixTbl].m_pColDefs[cFixedCols + ixCol].m_cbColumn == sizeof(USHORT))
4869 {
4870 // The places that access expect the int16 to be in the high bytes so we need to the extra swap
4871 SET_UNALIGNED_VAL32((pTo + pToCols[ixCol].m_oColumn), VAL16(*(USHORT*)(pFrom + pFromCols[ixCol].m_oColumn)));
4872 }
4873 else
4874 {
4875 // In this case we're just copying the data over
4876 memcpy(pTo + pToCols[ixCol].m_oColumn, pFrom + pFromCols[ixCol].m_oColumn, sizeof(ULONG));
4877 }
4878 }
4879 }
4880
4881 // Keep the expanded table.
4882 m_Tables[ixTbl].Delete();
4883 IfFailGo(m_Tables[ixTbl].InitializeFromTable(
4884 &tempTable,
4885 TRUE)); // fCopyData
4886 INDEBUG_MD(m_Tables[ixTbl].Debug_SetTableInfo(NULL, ixTbl));
4887 }
4888 else
4889 { // No data, so just reinitialize.
4890 m_Tables[ixTbl].Delete();
4891 IfFailGo(m_Tables[ixTbl].InitializeEmpty_WithRecordCount(
4892 sTempTable.m_cbRec,
4893 g_TblSizeInfo[0][ixTbl]
4894 COMMA_INDEBUG_MD(TRUE))); // fIsReadWrite
4895 INDEBUG_MD(m_Tables[ixTbl].Debug_SetTableInfo(NULL, ixTbl));
4896 }
4897
4898 //#SetNewColumnDefinition_call
4899 // Keep the new column defs.
4900 IfFailGo(SetNewColumnDefinition(&(m_TableDefs[ixTbl]), sTempTable.m_pColDefs, ixTbl));
4901 m_TableDefs[ixTbl].m_cbRec = sTempTable.m_cbRec;
4902
4903ErrExit:
4904 return hr;
4905} // CMiniMdRW::ExpandTableColumns
4906
4907
4908//*****************************************************************************
4909// Used by caller to let us know save is completed.
4910//*****************************************************************************
4911__checkReturn
4912HRESULT
4913CMiniMdRW::SaveDone()
4914{
4915 return PostSave();
4916} // CMiniMdRW::SaveDone
4917
4918//*****************************************************************************
4919// General post-token-move table fixup.
4920//*****************************************************************************
4921__checkReturn
4922HRESULT
4923CMiniMdRW::FixUpTable(
4924 ULONG ixTbl) // Index of table to fix.
4925{
4926 HRESULT hr = S_OK;
4927 ULONG i, j; // Loop control.
4928 ULONG cRows; // Count of rows in table.
4929 void *pRec; // Pointer to row data.
4930 mdToken tk; // A token.
4931 ULONG rCols[16]; // List of columns with token data.
4932 ULONG cCols; // Count of columns with token data.
4933
4934 // If no remaps, nothing to do.
4935 if (GetTokenMovementMap() == NULL)
4936 return S_OK;
4937
4938 // Find the columns with token data.
4939 cCols = 0;
4940 _ASSERTE(m_TableDefs[ixTbl].m_cCols <= 16);
4941 for (i=0; i<m_TableDefs[ixTbl].m_cCols; ++i)
4942 {
4943 if (m_TableDefs[ixTbl].m_pColDefs[i].m_Type <= iCodedTokenMax)
4944 rCols[cCols++] = i;
4945 }
4946 _ASSERTE(cCols);
4947 if (cCols == 0)
4948 return S_OK;
4949
4950 cRows = m_Schema.m_cRecs[ixTbl];
4951
4952 // loop through all Rows
4953 for (i = 1; i<=cRows; ++i)
4954 {
4955 IfFailGo(getRow(ixTbl, i, &pRec));
4956 for (j=0; j<cCols; ++j)
4957 {
4958 tk = GetToken(ixTbl, rCols[j], pRec);
4959 tk = GetTokenMovementMap()->SafeRemap(tk);
4960 IfFailGo(PutToken(ixTbl, rCols[j], pRec, tk));
4961 }
4962 }
4963
4964ErrExit:
4965 return hr;
4966} // CMiniMdRW::FixUpTable
4967
4968
4969//*****************************************************************************
4970// Fixup all the embedded ref to corresponding def before we remap tokens movement.
4971//*****************************************************************************
4972__checkReturn
4973HRESULT
4974CMiniMdRW::FixUpRefToDef()
4975{
4976 return NOERROR;
4977} // CMiniMdRW::FixUpRefToDef
4978
4979//*****************************************************************************
4980// Given a table with a pointer (index) to a sequence of rows in another
4981// table, get the RID of the end row. This is the STL-ish end; the first row
4982// not in the list. Thus, for a list of 0 elements, the start and end will
4983// be the same.
4984//*****************************************************************************
4985__checkReturn
4986HRESULT
4987CMiniMdRW::Impl_GetEndRidForColumn( // The End rid.
4988 UINT32 nTableIndex,
4989 RID nRowIndex,
4990 CMiniColDef &def, // Column containing the RID into other table.
4991 UINT32 nTargetTableIndex, // The other table.
4992 RID *pEndRid)
4993{
4994 HRESULT hr;
4995 ULONG ixEnd;
4996 void *pRow;
4997
4998 // Last rid in range from NEXT record, or count of table, if last record.
4999 _ASSERTE(nRowIndex <= m_Schema.m_cRecs[nTableIndex]);
5000 if (nRowIndex < m_Schema.m_cRecs[nTableIndex])
5001 {
5002 IfFailRet(getRow(nTableIndex, nRowIndex + 1, &pRow));
5003 ixEnd = getIX(pRow, def);
5004 // We use a special value, 'END_OF_TABLE' (currently 0), to indicate
5005 // end-of-table. If we find the special value we'll have to compute
5006 // the value to return. If we don't find the special value, then
5007 // the value is correct.
5008 if (ixEnd != END_OF_TABLE)
5009 {
5010 *pEndRid = ixEnd;
5011 return S_OK;
5012 }
5013 }
5014
5015 // Either the child pointer value in the next row was END_OF_TABLE, or
5016 // the row is the last row of the table. In either case, we must return
5017 // a value which will work out to the END of the child table. That
5018 // value depends on the value in the row itself -- if the row contains
5019 // END_OF_TABLE, there are no children, and to make the subtraction
5020 // work out, we return END_OF_TABLE for the END value. If the row
5021 // contains some value, then we return the actual END count.
5022 IfFailRet(getRow(nTableIndex, nRowIndex, &pRow));
5023 if (getIX(pRow, def) == END_OF_TABLE)
5024 {
5025 ixEnd = END_OF_TABLE;
5026 }
5027 else
5028 {
5029 ixEnd = m_Schema.m_cRecs[nTargetTableIndex] + 1;
5030 }
5031
5032 *pEndRid = ixEnd;
5033 return S_OK;
5034} // CMiniMd::Impl_GetEndRidForColumn
5035
5036//*****************************************************************************
5037// Add a row to any table.
5038//*****************************************************************************
5039__checkReturn
5040HRESULT
5041CMiniMdRW::AddRecord( // S_OK or error.
5042 UINT32 nTableIndex, // The table to expand.
5043 void **ppRow,
5044 RID *pRid) // Put RID here.
5045{
5046 HRESULT hr;
5047
5048 _ASSERTE(nTableIndex < m_TblCount);
5049 _ASSERTE(!m_bPreSaveDone && "Cannot add records after PreSave and before Save.");
5050 IfFailRet(m_Tables[nTableIndex].AddRecord(
5051 reinterpret_cast<BYTE **>(ppRow),
5052 reinterpret_cast<UINT32 *>(pRid)));
5053 if (*pRid > m_maxRid)
5054 {
5055 m_maxRid = *pRid;
5056 if (m_maxRid > m_limRid && m_eGrow == eg_ok)
5057 {
5058 // OutputDebugStringA("Growing tables due to Record overflow.\n");
5059 m_eGrow = eg_grow, m_maxRid = m_maxIx = ULONG_MAX;
5060 }
5061 }
5062 ++m_Schema.m_cRecs[nTableIndex];
5063 SetSorted(nTableIndex, false);
5064 if (m_pVS[nTableIndex] != NULL)
5065 {
5066 m_pVS[nTableIndex]->m_isMapValid = false;
5067 }
5068
5069 return S_OK;
5070} // CMiniMdRW::AddRecord
5071
5072//*****************************************************************************
5073// Add a row to the TypeDef table, and initialize the pointers to other tables.
5074//*****************************************************************************
5075__checkReturn
5076HRESULT
5077CMiniMdRW::AddTypeDefRecord(
5078 TypeDefRec **ppRow,
5079 RID *pnRowIndex)
5080{
5081 HRESULT hr;
5082 IfFailRet(AddRecord(TBL_TypeDef, (void **)ppRow, pnRowIndex));
5083
5084 IfFailRet(PutCol(TBL_TypeDef, TypeDefRec::COL_MethodList, *ppRow, NewRecordPointerEndValue(TBL_Method)));
5085 IfFailRet(PutCol(TBL_TypeDef, TypeDefRec::COL_FieldList, *ppRow, NewRecordPointerEndValue(TBL_Field)));
5086
5087 return S_OK;
5088} // CMiniMdRW::AddTypeDefRecord
5089
5090//*****************************************************************************
5091// Add a row to the Method table, and initialize the pointers to other tables.
5092//*****************************************************************************
5093__checkReturn
5094HRESULT
5095CMiniMdRW::AddMethodRecord(
5096 MethodRec **ppRow,
5097 RID *pnRowIndex)
5098{
5099 HRESULT hr;
5100 IfFailRet(AddRecord(TBL_Method, (void **)ppRow, pnRowIndex));
5101
5102 IfFailRet(PutCol(TBL_Method, MethodRec::COL_ParamList, *ppRow, NewRecordPointerEndValue(TBL_Param)));
5103
5104 return S_OK;
5105} // CMiniMdRW::AddMethodRecord
5106
5107//*****************************************************************************
5108// Add a row to the EventMap table, and initialize the pointers to other tables.
5109//*****************************************************************************
5110__checkReturn
5111HRESULT
5112CMiniMdRW::AddEventMapRecord(
5113 EventMapRec **ppRow,
5114 RID *pnRowIndex)
5115{
5116 HRESULT hr;
5117 IfFailRet(AddRecord(TBL_EventMap, (void **)ppRow, pnRowIndex));
5118
5119 IfFailRet(PutCol(TBL_EventMap, EventMapRec::COL_EventList, *ppRow, NewRecordPointerEndValue(TBL_Event)));
5120
5121 SetSorted(TBL_EventMap, false);
5122
5123 return S_OK;
5124} // CMiniMdRW::AddEventMapRecord
5125
5126//*********************************************************************************
5127// Add a row to the PropertyMap table, and initialize the pointers to other tables.
5128//*********************************************************************************
5129__checkReturn
5130HRESULT
5131CMiniMdRW::AddPropertyMapRecord(
5132 PropertyMapRec **ppRow,
5133 RID *pnRowIndex)
5134{
5135 HRESULT hr;
5136 IfFailRet(AddRecord(TBL_PropertyMap, (void **)ppRow, pnRowIndex));
5137
5138 IfFailRet(PutCol(TBL_PropertyMap, PropertyMapRec::COL_PropertyList, *ppRow, NewRecordPointerEndValue(TBL_Property)));
5139
5140 SetSorted(TBL_PropertyMap, false);
5141
5142 return S_OK;
5143} // CMiniMdRW::AddPropertyMapRecord
5144
5145//*****************************************************************************
5146// converting a ANSI heap string to unicode string to an output buffer
5147//*****************************************************************************
5148__checkReturn
5149HRESULT
5150CMiniMdRW::Impl_GetStringW(
5151 ULONG ix,
5152 __out_ecount (cchBuffer) LPWSTR szOut,
5153 ULONG cchBuffer,
5154 ULONG *pcchBuffer)
5155{
5156 LPCSTR szString; // Single byte version.
5157 int iSize; // Size of resulting string, in wide chars.
5158 HRESULT hr = NOERROR;
5159
5160 IfFailGo(getString(ix, &szString));
5161
5162 if (*szString == 0)
5163 {
5164 // If emtpy string "", return pccBuffer 0
5165 if ( szOut && cchBuffer )
5166 szOut[0] = W('\0');
5167 if ( pcchBuffer )
5168 *pcchBuffer = 0;
5169 goto ErrExit;
5170 }
5171 if (!(iSize=::WszMultiByteToWideChar(CP_UTF8, 0, szString, -1, szOut, cchBuffer)))
5172 {
5173 // What was the problem?
5174 DWORD dwNT = GetLastError();
5175
5176 // Not truncation?
5177 if (dwNT != ERROR_INSUFFICIENT_BUFFER)
5178 IfFailGo(HRESULT_FROM_NT(dwNT));
5179
5180 // Truncation error; get the size required.
5181 if (pcchBuffer)
5182 *pcchBuffer = ::WszMultiByteToWideChar(CP_UTF8, 0, szString, -1, NULL, 0);
5183
5184 if ((szOut != NULL) && (cchBuffer > 0))
5185 { // null-terminate the truncated output string
5186 szOut[cchBuffer - 1] = W('\0');
5187 }
5188
5189 hr = CLDB_S_TRUNCATION;
5190 goto ErrExit;
5191 }
5192 if (pcchBuffer)
5193 *pcchBuffer = iSize;
5194
5195ErrExit:
5196 return hr;
5197} // CMiniMdRW::Impl_GetStringW
5198
5199//*****************************************************************************
5200// Get a column value from a row. Signed types are sign-extended to the full
5201// ULONG; unsigned types are 0-extended.
5202//*****************************************************************************
5203ULONG CMiniMdRW::GetCol( // Column data.
5204 ULONG ixTbl, // Index of the table.
5205 ULONG ixCol, // Index of the column.
5206 void *pvRecord) // Record with the data.
5207{
5208 BYTE *pRecord; // The row.
5209 BYTE *pData; // The item in the row.
5210 ULONG val; // The return value.
5211 // Valid Table, Column, Row?
5212 _ASSERTE(ixTbl < m_TblCount);
5213 _ASSERTE(ixCol < m_TableDefs[ixTbl].m_cCols);
5214
5215 // Column size, offset
5216 CMiniColDef *pColDef = &m_TableDefs[ixTbl].m_pColDefs[ixCol];
5217
5218 pRecord = reinterpret_cast<BYTE*>(pvRecord);
5219 pData = pRecord + pColDef->m_oColumn;
5220
5221 switch (pColDef->m_cbColumn)
5222 {
5223 case 1:
5224 val = *pData;
5225 break;
5226 case 2:
5227 if (pColDef->m_Type == iSHORT)
5228 val = static_cast<LONG>((INT16)GET_UNALIGNED_VAL16(pData));
5229 else
5230 val = GET_UNALIGNED_VAL16(pData);
5231 break;
5232 case 4:
5233 val = GET_UNALIGNED_VAL32(pData);
5234 break;
5235 default:
5236 _ASSERTE(!"Unexpected column size");
5237 return 0;
5238 }
5239
5240 return val;
5241} // CMiniMdRW::GetCol
5242
5243//*****************************************************************************
5244// General token column fetcher.
5245//*****************************************************************************
5246mdToken CMiniMdRW::GetToken(
5247 ULONG ixTbl, // Index of the table.
5248 ULONG ixCol, // Index of the column.
5249 void *pvRecord) // Record with the data.
5250{
5251 ULONG tkn; // Token from the table.
5252
5253 // Valid Table, Column, Row?
5254 _ASSERTE(ixTbl < m_TblCount);
5255 _ASSERTE(ixCol < m_TableDefs[ixTbl].m_cCols);
5256
5257 // Column description.
5258 CMiniColDef *pColDef = &m_TableDefs[ixTbl].m_pColDefs[ixCol];
5259
5260 // Is the column just a RID?
5261 if (pColDef->m_Type <= iRidMax)
5262 {
5263 tkn = GetCol(ixTbl, ixCol, pvRecord); //pColDef, pvRecord, RidFromToken(tk));
5264 tkn = TokenFromRid(tkn, GetTokenForTable(pColDef->m_Type));
5265 }
5266 else // Is it a coded token?
5267 if (pColDef->m_Type <= iCodedTokenMax)
5268 {
5269 ULONG indexCodedToken = pColDef->m_Type - iCodedToken;
5270 if (indexCodedToken < COUNTOF(g_CodedTokens))
5271 {
5272 const CCodedTokenDef *pCdTkn = &g_CodedTokens[indexCodedToken];
5273 tkn = decodeToken(GetCol(ixTbl, ixCol, pvRecord), pCdTkn->m_pTokens, pCdTkn->m_cTokens);
5274 }
5275 else
5276 {
5277 _ASSERTE(!"GetToken called on unexpected coded token type");
5278 tkn = 0;
5279 }
5280 }
5281 else // It is an error.
5282 {
5283 _ASSERTE(!"GetToken called on unexpected column type");
5284 tkn = 0;
5285 }
5286
5287 return tkn;
5288} // CMiniMdRW::GetToken
5289
5290//*****************************************************************************
5291// Put a column value into a row. The value is passed as a ULONG; 1, 2, or 4
5292// bytes are stored into the column. No table is specified, and the coldef
5293// is passed directly. This allows putting data into other buffers, such as
5294// the temporary table used for saving.
5295//*****************************************************************************
5296__checkReturn
5297HRESULT
5298CMiniMdRW::PutCol( // S_OK or E_UNEXPECTED.
5299 CMiniColDef ColDef, // The col def.
5300 void *pvRecord, // The row.
5301 ULONG uVal) // Value to put.
5302{
5303 HRESULT hr = S_OK;
5304 BYTE *pRecord; // The row.
5305 BYTE *pData; // The item in the row.
5306
5307 pRecord = reinterpret_cast<BYTE*>(pvRecord);
5308 pData = pRecord + ColDef.m_oColumn;
5309
5310 switch (ColDef.m_cbColumn)
5311 {
5312 case 1:
5313 // Don't store a value that would overflow.
5314 if (uVal > UCHAR_MAX)
5315 return E_INVALIDARG;
5316 *pData = static_cast<BYTE>(uVal);
5317 break;
5318 case 2:
5319 if (uVal > USHRT_MAX)
5320 return E_INVALIDARG;
5321 SET_UNALIGNED_VAL16(pData, uVal);
5322 break;
5323 case 4:
5324 SET_UNALIGNED_VAL32(pData, uVal);
5325 break;
5326 default:
5327 _ASSERTE(!"Unexpected column size");
5328 return E_UNEXPECTED;
5329 }
5330
5331 return hr;
5332} // CMiniMdRW::PutCol
5333
5334//*****************************************************************************
5335// Put a column value into a row. The value is passed as a ULONG; 1, 2, or 4
5336// bytes are stored into the column.
5337//*****************************************************************************
5338
5339//*****************************************************************************
5340// Add a string to the string pool, and store the offset in the cell.
5341//*****************************************************************************
5342__checkReturn
5343HRESULT
5344CMiniMdRW::PutString( // S_OK or E_UNEXPECTED.
5345 ULONG ixTbl, // The table.
5346 ULONG ixCol, // The column.
5347 void *pvRecord, // The row.
5348 LPCSTR szString) // Value to put.
5349{
5350 _ASSERTE(szString != NULL);
5351
5352 HRESULT hr = S_OK;
5353 UINT32 nStringIndex = 0;
5354
5355 // Valid Table, Column, Row?
5356 _ASSERTE(ixTbl < m_TblCount);
5357 _ASSERTE(ixCol < m_TableDefs[ixTbl].m_cCols);
5358
5359 // Column description.
5360 _ASSERTE(m_TableDefs[ixTbl].m_pColDefs[ixCol].m_Type == iSTRING);
5361
5362 // <TODO>@FUTURE: Set iOffset to 0 for empty string. Work around the bug in
5363 // StringPool that does not handle empty strings correctly.</TODO>
5364 if (szString[0] == 0)
5365 { // It's empty string
5366 nStringIndex = 0;
5367 }
5368 else
5369 { // It's non-empty string
5370 IfFailGo(m_StringHeap.AddString(
5371 szString,
5372 &nStringIndex));
5373 }
5374
5375 hr = PutCol(m_TableDefs[ixTbl].m_pColDefs[ixCol], pvRecord, nStringIndex);
5376
5377 if (m_maxIx != ULONG_MAX)
5378 {
5379 IfFailGo(m_StringHeap.GetAlignedSize(&nStringIndex));
5380 }
5381 if (nStringIndex > m_maxIx)
5382 {
5383 m_maxIx = nStringIndex;
5384 if (m_maxIx > m_limIx && m_eGrow == eg_ok)
5385 {
5386 // OutputDebugStringA("Growing tables due to String overflow.\n");
5387 m_eGrow = eg_grow, m_maxRid = m_maxIx = ULONG_MAX;
5388 }
5389 }
5390
5391ErrExit:
5392 return hr;
5393} // CMiniMdRW::PutString
5394
5395//*****************************************************************************
5396// Add a string to the string pool, and store the offset in the cell.
5397// Returns: S_OK or E_UNEXPECTED.
5398//*****************************************************************************
5399__checkReturn
5400HRESULT
5401CMiniMdRW::PutStringW(
5402 ULONG ixTbl, // The table.
5403 ULONG ixCol, // The column.
5404 void *pvRecord, // The row.
5405 LPCWSTR wszString) // Value to put.
5406{
5407 _ASSERTE(wszString != NULL);
5408
5409 HRESULT hr = S_OK;
5410 UINT32 nStringIndex = 0; // The new string.
5411
5412 // Valid Table, Column, Row?
5413 _ASSERTE(ixTbl < m_TblCount);
5414 _ASSERTE(ixCol < m_TableDefs[ixTbl].m_cCols);
5415
5416 // Column description.
5417 _ASSERTE(m_TableDefs[ixTbl].m_pColDefs[ixCol].m_Type == iSTRING);
5418
5419 // Special case for empty string for StringPool
5420 if (wszString[0] == 0)
5421 { // It's empty string
5422 // TODO: Is it OK that index 0 contains empty blob (00) and not empty string (00 01)?
5423 nStringIndex = 0;
5424 }
5425 else
5426 { // It's non-empty string
5427 IfFailGo(m_StringHeap.AddStringW(
5428 wszString,
5429 &nStringIndex));
5430 }
5431
5432 hr = PutCol(m_TableDefs[ixTbl].m_pColDefs[ixCol], pvRecord, nStringIndex);
5433
5434 if (m_maxIx != ULONG_MAX)
5435 {
5436 IfFailGo(m_StringHeap.GetAlignedSize(&nStringIndex));
5437 }
5438 if (nStringIndex > m_maxIx)
5439 {
5440 m_maxIx = nStringIndex;
5441 if (m_maxIx > m_limIx && m_eGrow == eg_ok)
5442 {
5443 // OutputDebugStringA("Growing tables due to String overflow.\n");
5444 m_eGrow = eg_grow, m_maxRid = m_maxIx = ULONG_MAX;
5445 }
5446 }
5447
5448ErrExit:
5449 return hr;
5450} // CMiniMdRW::PutStringW
5451
5452//*****************************************************************************
5453// Add a guid to the guid pool, and store the index in the cell.
5454//*****************************************************************************
5455__checkReturn
5456HRESULT
5457CMiniMdRW::PutGuid( // S_OK or E_UNEXPECTED.
5458 ULONG ixTbl, // The table.
5459 ULONG ixCol, // The column.
5460 void *pvRecord, // The row.
5461 REFGUID guid) // Value to put.
5462{
5463 HRESULT hr = S_OK;
5464 UINT32 nIndex;
5465 UINT32 cbSize = 0;
5466
5467 // Valid Table, Column, Row?
5468 _ASSERTE(ixTbl < m_TblCount);
5469 _ASSERTE(ixCol < m_TableDefs[ixTbl].m_cCols);
5470
5471 // Column description.
5472 _ASSERTE(m_TableDefs[ixTbl].m_pColDefs[ixCol].m_Type == iGUID);
5473
5474 IfFailGo(AddGuid(guid, &nIndex));
5475
5476 hr = PutCol(m_TableDefs[ixTbl].m_pColDefs[ixCol], pvRecord, nIndex);
5477
5478 if (m_maxIx != ULONG_MAX)
5479 {
5480 cbSize = m_GuidHeap.GetSize();
5481 }
5482 if (cbSize > m_maxIx)
5483 {
5484 m_maxIx = cbSize;
5485 if (m_maxIx > m_limIx && m_eGrow == eg_ok)
5486 {
5487 // OutputDebugStringA("Growing tables due to GUID overflow.\n");
5488 m_eGrow = eg_grow, m_maxRid = m_maxIx = ULONG_MAX;
5489 }
5490 }
5491
5492ErrExit:
5493 return hr;
5494} // CMiniMdRW::PutGuid
5495
5496//*****************************************************************************
5497// Normally, an MVID is randomly generated for every metadata.
5498// ChangeMvid() can be used to explicitly set it.
5499//*****************************************************************************
5500__checkReturn
5501HRESULT
5502CMiniMdRW::ChangeMvid( // S_OK or E_UNEXPECTED.
5503 REFGUID newMvid)
5504{
5505 HRESULT hr = S_OK;
5506
5507 ModuleRec *pModuleRec;
5508 IfFailRet(GetModuleRecord(1, &pModuleRec));
5509 UINT32 nGuidIndex = GetCol(TBL_Module, ModuleRec::COL_Mvid, pModuleRec);
5510
5511 GUID UNALIGNED *pMvid;
5512 IfFailRet(m_GuidHeap.GetGuid(
5513 nGuidIndex,
5514 &pMvid));
5515
5516 // Replace the GUID with new MVID.
5517 *pMvid = newMvid;
5518 // This was missing (probably because we don't test on platform with different bitness):
5519 //SwapGuid(pMvid);
5520
5521 return hr;
5522} // CMiniMdRW::ChangeMvid
5523
5524//*****************************************************************************
5525// Put a token into a cell. If the column is a coded token, perform the
5526// encoding first.
5527//*****************************************************************************
5528__checkReturn
5529HRESULT
5530CMiniMdRW::PutToken( // S_OK or E_UNEXPECTED.
5531 ULONG ixTbl, // The table.
5532 ULONG ixCol, // The column.
5533 void *pvRecord, // The row.
5534 mdToken tk) // Value to put.
5535{
5536 HRESULT hr = S_OK;
5537 ULONG cdTkn; // The new coded token.
5538
5539 // Valid Table, Column, Row?
5540 _ASSERTE(ixTbl < m_TblCount);
5541 _ASSERTE(ixCol < m_TableDefs[ixTbl].m_cCols);
5542
5543 // Column description.
5544 CMiniColDef ColDef = m_TableDefs[ixTbl].m_pColDefs[ixCol];
5545
5546 // Is the column just a RID?
5547 if (ColDef.m_Type <= iRidMax)
5548 hr = PutCol(ColDef, pvRecord, RidFromToken(tk));
5549 else // Is it a coded token?
5550 if (ColDef.m_Type <= iCodedTokenMax)
5551 {
5552 ULONG indexCodedToken = ColDef.m_Type - iCodedToken;
5553 if (indexCodedToken < COUNTOF(g_CodedTokens))
5554 {
5555 const CCodedTokenDef *pCdTkn = &g_CodedTokens[indexCodedToken];
5556 cdTkn = encodeToken(RidFromToken(tk), TypeFromToken(tk), pCdTkn->m_pTokens, pCdTkn->m_cTokens);
5557 hr = PutCol(ColDef, pvRecord, cdTkn);
5558 }
5559 else
5560 {
5561 _ASSERTE(!"PutToken called on unexpected coded token type");
5562 hr = E_FAIL;
5563 }
5564 }
5565 else // It is an error.
5566 {
5567 _ASSERTE(!"PutToken called on unexpected column type");
5568 }
5569
5570 return hr;
5571} // CMiniMdRW::PutToken
5572
5573//*****************************************************************************
5574// Add a blob to the blob pool, and store the offset in the cell.
5575//*****************************************************************************
5576__checkReturn
5577HRESULT
5578CMiniMdRW::PutBlob(
5579 ULONG ixTbl, // Table with the row.
5580 ULONG ixCol, // Column to set.
5581 void *pvRecord, // The row.
5582 const void *pvData, // Blob data.
5583 ULONG cbData) // Size of the blob data.
5584{
5585 HRESULT hr = S_OK;
5586 UINT32 nBlobIndex;
5587
5588 // Valid Table, Column, Row?
5589 _ASSERTE(ixTbl < m_TblCount);
5590 _ASSERTE(ixCol < m_TableDefs[ixTbl].m_cCols);
5591
5592 // Column description.
5593 _ASSERTE(m_TableDefs[ixTbl].m_pColDefs[ixCol].m_Type == iBLOB);
5594
5595 IfFailGo(m_BlobHeap.AddBlob(
5596 MetaData::DataBlob((BYTE *)pvData, cbData),
5597 &nBlobIndex));
5598
5599 hr = PutCol(m_TableDefs[ixTbl].m_pColDefs[ixCol], pvRecord, nBlobIndex);
5600
5601 if (m_maxIx != ULONG_MAX)
5602 {
5603 IfFailGo(m_BlobHeap.GetAlignedSize(&nBlobIndex));
5604 }
5605 if (nBlobIndex > m_maxIx)
5606 {
5607 m_maxIx = nBlobIndex;
5608 if (m_maxIx > m_limIx && m_eGrow == eg_ok)
5609 {
5610 // OutputDebugStringA("Growing tables due to Blob overflow.\n");
5611 m_eGrow = eg_grow, m_maxRid = m_maxIx = ULONG_MAX;
5612 }
5613 }
5614
5615ErrExit:
5616 return hr;
5617} // CMiniMdRW::PutBlob
5618
5619//*****************************************************************************
5620// Given a table with a pointer to another table, add a row in the second table
5621// at the end of the range of rows belonging to some parent.
5622//*****************************************************************************
5623__checkReturn
5624HRESULT
5625CMiniMdRW::AddChildRowIndirectForParent(
5626 ULONG tblParent, // Parent table.
5627 ULONG colParent, // Column in parent table.
5628 ULONG tblChild, // Child table, pointed to by parent cell.
5629 RID ridParent, // Rid of parent row.
5630 void **ppRow)
5631{
5632 HRESULT hr;
5633 ULONG ixInsert; // Index of new row.
5634 ULONG i; // Loop control.
5635 void *pRow; // A parent row.
5636 ULONG ixChild; // Some child record RID.
5637
5638 // If the row in the parent table is the last row, just append.
5639 if (ridParent == GetCountRecs(tblParent))
5640 {
5641 RID nRowIndex_Ignore;
5642 return AddRecord(tblChild, ppRow, &nRowIndex_Ignore);
5643 }
5644
5645 // Determine the index at which to insert a row.
5646 IfFailRet(getRow(tblParent, ridParent+1, &pRow));
5647 ixInsert = GetCol(tblParent, colParent, pRow);
5648
5649 // Insert the row.
5650 IfFailRet(m_Tables[tblChild].InsertRecord(ixInsert, reinterpret_cast<BYTE **>(ppRow)));
5651 // Count the inserted record.
5652 ++m_Schema.m_cRecs[tblChild];
5653
5654 if (m_Schema.m_cRecs[tblChild] > m_maxRid)
5655 {
5656 m_maxRid = m_Schema.m_cRecs[tblChild];
5657 if (m_maxRid > m_limRid && m_eGrow == eg_ok)
5658 m_eGrow = eg_grow, m_maxIx = m_maxRid = ULONG_MAX;
5659 }
5660
5661 // Adjust the rest of the rows in the table.
5662 for (i=GetCountRecs(tblParent); i>ridParent; --i)
5663 {
5664 IfFailRet(getRow(tblParent, i, &pRow));
5665 ixChild = GetCol(tblParent, colParent, pRow);
5666 ++ixChild;
5667 IfFailRet(PutCol(tblParent, colParent, pRow, ixChild));
5668 }
5669
5670 return S_OK;
5671} // CMiniMdRW::AddChildRowIndirectForParent
5672
5673//*****************************************************************************
5674// Given a Parent and a Child, this routine figures if there needs to be an
5675// indirect table and creates it if needed. Else it just update the pointers
5676// in the entries contained in the parent table.
5677//*****************************************************************************
5678__checkReturn
5679HRESULT
5680CMiniMdRW::AddChildRowDirectForParent(
5681 ULONG tblParent, // Parent table.
5682 ULONG colParent, // Column in parent table.
5683 ULONG tblChild, // Child table, pointed to by parent cell.
5684 RID ridParent) // Rid of parent row.
5685{
5686 HRESULT hr = S_OK;
5687 void *pRow; // A row in the parent table.
5688 RID ixChild; // Rid of a child record.
5689
5690 if (m_Schema.m_cRecs[tblChild-1] != 0)
5691 {
5692 // If there already exists an indirect table, just return.
5693 hr = S_FALSE;
5694 goto ErrExit;
5695 }
5696
5697 // If the parent record has subsequent parent records with children,
5698 // we will now need to build a pointer table.
5699 //
5700 // The canonical form of a child pointer in a parent record is to point to
5701 // the start of the child list. A record with no children will point
5702 // to the same location as its subsequent record (that is, if A and B *could*
5703 // have a child record, but only B *does*, both A and B will point to the
5704 // same place. If the last record in the parent table has no child records,
5705 // it will point one past the end of the child table. This is patterned
5706 // after the STL's inclusive-BEGIN and exclusive-END.
5707 // This has the unfortunate side effect that if a child record is added to
5708 // a parent not at the end of its table, *all* of the subsequent parent records
5709 // will have to be updated to point to the new "1 past end of child table"
5710 // location.
5711 // Therefore, as an optimization, we will also recognize a special marker,
5712 // END_OF_TABLE (currently 0), to mean "past eot".
5713 //
5714 // If the child pointer of the record getting the new child is END_OF_TABLE,
5715 // then there is no subsequent child pointer. We need to fix up this parent
5716 // record, and any previous parent records with END_OF_TABLE to point to the
5717 // new child record.
5718 // If the child pointer of this parent record is not END_OF_TABLE, but the
5719 // child pointer of the next parent record is, then there is nothing at
5720 // all that needs to be done.
5721 // If the child pointer of the next parent record is not END_OF_TABLE, then
5722 // we will have to build a pointer table.
5723
5724 // Get the parent record, and see if its child pointer is END_OF_TABLE. If so,
5725 // fix the parent, and all previous END_OF_TABLE valued parent records.
5726 IfFailGo(getRow(tblParent, ridParent, &pRow));
5727 ixChild = GetCol(tblParent, colParent, pRow);
5728 if (ixChild == END_OF_TABLE)
5729 {
5730 IfFailGo(ConvertMarkerToEndOfTable(tblParent, colParent, m_Schema.m_cRecs[tblChild], ridParent));
5731 goto ErrExit;
5732 }
5733
5734 // The parent did not have END_OF_TABLE for its child pointer. If it was the last
5735 // record in the table, there is nothing more to do.
5736 if (ridParent == m_Schema.m_cRecs[tblParent])
5737 goto ErrExit;
5738
5739 // The parent didn't have END_OF_TABLE, and there are more rows in parent table.
5740 // If the next parent record's child pointer is END_OF_TABLE, then all of the
5741 // remaining records are OK.
5742 IfFailGo(getRow(tblParent, ridParent+1, &pRow));
5743 ixChild = GetCol(tblParent, colParent, pRow);
5744 if (ixChild == END_OF_TABLE)
5745 goto ErrExit;
5746
5747 // The next record was not END_OF_TABLE, so some adjustment will be required.
5748 // If it points to the actual END of the table, there are no more child records
5749 // and the child pointers can be adjusted to the new END of the table.
5750 if (ixChild == m_Schema.m_cRecs[tblChild])
5751 {
5752 for (ULONG i=m_Schema.m_cRecs[tblParent]; i>ridParent; --i)
5753 {
5754 IfFailGo(getRow(tblParent, i, &pRow));
5755 IfFailGo(PutCol(tblParent, colParent, pRow, ixChild+1));
5756 }
5757 goto ErrExit;
5758 }
5759
5760 // The next record contained a pointer to some actual child data. That means that
5761 // this is an out-of-order insertion. We must create an indirect table.
5762 // Convert any END_OF_TABLE to actual END of table value. Note that a record has
5763 // just been added to the child table, and not yet to the parent table, so the END
5764 // should currently point to the last valid record (instead of the usual first invalid
5765 // rid).
5766 IfFailGo(ConvertMarkerToEndOfTable(tblParent, colParent, m_Schema.m_cRecs[tblChild], m_Schema.m_cRecs[tblParent]));
5767 // Create the indirect table.
5768 IfFailGo(CreateIndirectTable(tblChild));
5769 hr = S_FALSE;
5770
5771ErrExit:
5772 return hr;
5773} // CMiniMdRW::AddChildRowDirectForParent
5774
5775//*****************************************************************************
5776// Starting with some location, convert special END_OF_TABLE values into
5777// actual end of table values (count of records + 1).
5778//*****************************************************************************
5779__checkReturn
5780HRESULT
5781CMiniMdRW::ConvertMarkerToEndOfTable(
5782 ULONG tblParent, // Parent table to convert.
5783 ULONG colParent, // Column in parent table.
5784 ULONG ixEnd, // Value to store to child pointer.
5785 RID ridParent) // Rid of parent row to start with (work down).
5786{
5787 HRESULT hr;
5788 void *pRow; // A row in the parent table.
5789 RID ixChild; // Rid of a child record.
5790
5791 for (; ridParent > 0; --ridParent)
5792 {
5793 IfFailGo(getRow(tblParent, ridParent, &pRow));
5794 ixChild = GetCol(tblParent, colParent, pRow);
5795 // Finished when rows no longer have special value.
5796 if (ixChild != END_OF_TABLE)
5797 break;
5798 IfFailGo(PutCol(tblParent, colParent, pRow, ixEnd));
5799 }
5800 // Success.
5801 hr = S_OK;
5802
5803ErrExit:
5804 return hr;
5805} // CMiniMdRW::ConvertMarkerToEndOfTable
5806
5807//*****************************************************************************
5808// Given a Table ID this routine creates the corresponding pointer table with
5809// the entries in the given Table ID less one. It doesn't create the last
5810// entry by default, since its the last entry that caused the Indirect table to
5811// be required in most cases and will need to inserted at the appropriate location
5812// with AddChildRowIndirectForParent() function. So, be VERY CAREFUL when using this function!
5813//*****************************************************************************
5814__checkReturn
5815HRESULT
5816CMiniMdRW::CreateIndirectTable(
5817 ULONG ixTbl, // Given Table.
5818 BOOL bOneLess /* = true */) // if true, create one entry less.
5819{
5820 void *pRecord;
5821 ULONG cRecords;
5822 HRESULT hr = S_OK;
5823
5824 if (m_OptionValue.m_ErrorIfEmitOutOfOrder)
5825 {
5826 //<TODO> Can we use some bit fields and reduce the code size here??
5827 //</TODO>
5828 if (ixTbl == TBL_Field && ( m_OptionValue.m_ErrorIfEmitOutOfOrder & MDFieldOutOfOrder ) )
5829 {
5830 _ASSERTE(!"Out of order emit of field token!");
5831 return CLDB_E_RECORD_OUTOFORDER;
5832 }
5833 else if (ixTbl == TBL_Method && ( m_OptionValue.m_ErrorIfEmitOutOfOrder & MDMethodOutOfOrder ) )
5834 {
5835 _ASSERTE(!"Out of order emit of method token!");
5836 return CLDB_E_RECORD_OUTOFORDER;
5837 }
5838 else if (ixTbl == TBL_Param && ( m_OptionValue.m_ErrorIfEmitOutOfOrder & MDParamOutOfOrder ) )
5839 {
5840 _ASSERTE(!"Out of order emit of param token!");
5841 return CLDB_E_RECORD_OUTOFORDER;
5842 }
5843 else if (ixTbl == TBL_Property && ( m_OptionValue.m_ErrorIfEmitOutOfOrder & MDPropertyOutOfOrder ) )
5844 {
5845 _ASSERTE(!"Out of order emit of property token!");
5846 return CLDB_E_RECORD_OUTOFORDER;
5847 }
5848 else if (ixTbl == TBL_Event && ( m_OptionValue.m_ErrorIfEmitOutOfOrder & MDEventOutOfOrder ) )
5849 {
5850 _ASSERTE(!"Out of order emit of event token!");
5851 return CLDB_E_RECORD_OUTOFORDER;
5852 }
5853 }
5854
5855 _ASSERTE(! HasIndirectTable(ixTbl));
5856
5857 cRecords = GetCountRecs(ixTbl);
5858 if (bOneLess)
5859 cRecords--;
5860
5861 // Create one less than the number of records in the given table.
5862 for (ULONG i = 1; i <= cRecords ; i++)
5863 {
5864 RID nRowIndex_Ignore;
5865 IfFailGo(AddRecord(g_PtrTableIxs[ixTbl].m_ixtbl, &pRecord, &nRowIndex_Ignore));
5866 IfFailGo(PutCol(g_PtrTableIxs[ixTbl].m_ixtbl, g_PtrTableIxs[ixTbl].m_ixcol, pRecord, i));
5867 }
5868ErrExit:
5869 return hr;
5870} // CMiniMdRW::CreateIndirectTable
5871
5872//---------------------------------------------------------------------------------------
5873//
5874// The new paramter may not have been emitted in sequence order. So
5875// check the current parameter and move it up in the indirect table until
5876// we find the right home.
5877//
5878__checkReturn
5879HRESULT
5880CMiniMdRW::FixParamSequence(
5881 RID md) // Rid of method with new parameter.
5882{
5883 HRESULT hr;
5884 MethodRec * pMethod;
5885 IfFailRet(GetMethodRecord(md, &pMethod));
5886 RID ixStart = getParamListOfMethod(pMethod);
5887 RID ixEnd;
5888 IfFailRet(getEndParamListOfMethod(md, &ixEnd));
5889 int iSlots = 0;
5890
5891 // Param table should not be empty at this point.
5892 _ASSERTE(ixEnd > ixStart);
5893
5894 // Get a pointer to the new guy.
5895 RID ridNew;
5896 ParamPtrRec * pNewParamPtr = NULL;
5897 if (HasIndirectTable(TBL_Param))
5898 {
5899 IfFailRet(GetParamPtrRecord(--ixEnd, &pNewParamPtr));
5900 ridNew = GetCol(TBL_ParamPtr, ParamPtrRec::COL_Param, pNewParamPtr);
5901 }
5902 else
5903 {
5904 ridNew = --ixEnd;
5905 }
5906
5907 ParamRec * pNewParam;
5908 IfFailRet(GetParamRecord(ridNew, &pNewParam));
5909
5910 // Walk the list forward looking for the insert point.
5911 for (; ixStart < ixEnd; --ixEnd)
5912 {
5913 // Get the current parameter record.
5914 RID ridOld;
5915 if (HasIndirectTable(TBL_Param))
5916 {
5917 ParamPtrRec * pParamPtr;
5918 IfFailRet(GetParamPtrRecord(ixEnd - 1, &pParamPtr));
5919 ridOld = GetCol(TBL_ParamPtr, ParamPtrRec::COL_Param, pParamPtr);
5920 }
5921 else
5922 {
5923 ridOld = ixEnd - 1;
5924 }
5925
5926 ParamRec * pParamRec;
5927 IfFailRet(GetParamRecord(ridOld, &pParamRec));
5928
5929 // If the new record belongs before this existing record, slide
5930 // all of the old stuff down.
5931 if (pNewParam->GetSequence() < pParamRec->GetSequence())
5932 {
5933 ++iSlots;
5934 }
5935 else
5936 {
5937 break;
5938 }
5939 }
5940
5941 // If the item is out of order, move everything down one slot and
5942 // copy the new guy into the new location. Because the heap can be
5943 // split, this must be done carefully.
5944 //<TODO>@Future: one could write a more complicated but faster routine that
5945 // copies blocks within heaps.</TODO>
5946 if (iSlots)
5947 {
5948 RID endRid;
5949 // Create an indirect table if there isn't one already. This is because
5950 // we can't change tokens that have been handed out, in this case the
5951 // param tokens.
5952 if (!HasIndirectTable(TBL_Param))
5953 {
5954 IfFailRet(CreateIndirectTable(TBL_Param, false));
5955 IfFailRet(getEndParamListOfMethod(md, &endRid));
5956 IfFailRet(GetParamPtrRecord(endRid - 1, &pNewParamPtr));
5957 }
5958 int cbCopy = m_TableDefs[TBL_ParamPtr].m_cbRec;
5959 void * pbBackup = _alloca(cbCopy);
5960 memcpy(pbBackup, pNewParamPtr, cbCopy);
5961
5962 IfFailRet(getEndParamListOfMethod(md, &endRid));
5963 for (ixEnd = endRid - 1; iSlots; iSlots--, --ixEnd)
5964 {
5965 ParamPtrRec * pTo;
5966 IfFailRet(GetParamPtrRecord(ixEnd, &pTo));
5967 ParamPtrRec * pFrom;
5968 IfFailRet(GetParamPtrRecord(ixEnd - 1, &pFrom));
5969 memcpy(pTo, pFrom, cbCopy);
5970 }
5971
5972 ParamPtrRec * pTo;
5973 IfFailRet(GetParamPtrRecord(ixEnd, &pTo));
5974 memcpy(pTo, pbBackup, cbCopy);
5975 }
5976 return S_OK;
5977} // CMiniMdRW::FixParamSequence
5978
5979//---------------------------------------------------------------------------------------
5980//
5981// Given a MethodDef and its parent TypeDef, add the MethodDef to the parent,
5982// adjusting the MethodPtr table if it exists or if it needs to be created.
5983//
5984__checkReturn
5985HRESULT
5986CMiniMdRW::AddMethodToTypeDef(
5987 RID td, // The TypeDef to which to add the Method.
5988 RID md) // MethodDef to add to TypeDef.
5989{
5990 HRESULT hr;
5991 void * pPtr;
5992
5993 // Add direct if possible.
5994 IfFailGo(AddChildRowDirectForParent(TBL_TypeDef, TypeDefRec::COL_MethodList, TBL_Method, td));
5995
5996 // If couldn't add direct...
5997 if (hr == S_FALSE)
5998 { // Add indirect.
5999 IfFailGo(AddChildRowIndirectForParent(TBL_TypeDef, TypeDefRec::COL_MethodList, TBL_MethodPtr, td, &pPtr));
6000 hr = PutCol(TBL_MethodPtr, MethodPtrRec::COL_Method, pPtr, md);
6001
6002 // Add the <md, td> to the method parent lookup table.
6003 IfFailGo(AddMethodToLookUpTable(TokenFromRid(md, mdtMethodDef), td) );
6004 }
6005ErrExit:
6006 return hr;
6007} // CMiniMdRW::AddMethodToTypeDef
6008
6009//*****************************************************************************
6010// Given a FieldDef and its parent TypeDef, add the FieldDef to the parent,
6011// adjusting the FieldPtr table if it exists or if it needs to be created.
6012//*****************************************************************************
6013__checkReturn
6014HRESULT
6015CMiniMdRW::AddFieldToTypeDef(
6016 RID td, // The TypeDef to which to add the Field.
6017 RID md) // FieldDef to add to TypeDef.
6018{
6019 HRESULT hr;
6020 void *pPtr;
6021
6022 // Add direct if possible.
6023 IfFailGo(AddChildRowDirectForParent(TBL_TypeDef, TypeDefRec::COL_FieldList, TBL_Field, td));
6024
6025 // If couldn't add direct...
6026 if (hr == S_FALSE)
6027 { // Add indirect.
6028 IfFailGo(AddChildRowIndirectForParent(TBL_TypeDef, TypeDefRec::COL_FieldList, TBL_FieldPtr, td, &pPtr));
6029 hr = PutCol(TBL_FieldPtr, FieldPtrRec::COL_Field, pPtr, md);
6030
6031 // Add the <md, td> to the field parent lookup table.
6032 IfFailGo(AddFieldToLookUpTable(TokenFromRid(md, mdtFieldDef), td));
6033 }
6034ErrExit:
6035 return hr;
6036} // CMiniMdRW::AddFieldToTypeDef
6037
6038//*****************************************************************************
6039// Given a Param and its parent Method, add the Param to the parent,
6040// adjusting the ParamPtr table if there is an indirect table.
6041//*****************************************************************************
6042__checkReturn
6043HRESULT
6044CMiniMdRW::AddParamToMethod(
6045 RID md, // The MethodDef to which to add the Param.
6046 RID pd) // Param to add to MethodDef.
6047{
6048 HRESULT hr;
6049 void *pPtr;
6050
6051 IfFailGo(AddChildRowDirectForParent(TBL_Method, MethodRec::COL_ParamList, TBL_Param, md));
6052 if (hr == S_FALSE)
6053 {
6054 IfFailGo(AddChildRowIndirectForParent(TBL_Method, MethodRec::COL_ParamList, TBL_ParamPtr, md, &pPtr));
6055 IfFailGo(PutCol(TBL_ParamPtr, ParamPtrRec::COL_Param, pPtr, pd));
6056
6057 // Add the <pd, md> to the field parent lookup table.
6058 IfFailGo(AddParamToLookUpTable(TokenFromRid(pd, mdtParamDef), md));
6059 }
6060 IfFailGo(FixParamSequence(md));
6061
6062ErrExit:
6063 return hr;
6064} // CMiniMdRW::AddParamToMethod
6065
6066//*****************************************************************************
6067// Given a Property and its parent PropertyMap, add the Property to the parent,
6068// adjusting the PropertyPtr table.
6069//*****************************************************************************
6070__checkReturn
6071HRESULT
6072CMiniMdRW::AddPropertyToPropertyMap(
6073 RID pmd, // The PropertyMap to which to add the Property.
6074 RID pd) // Property to add to PropertyMap.
6075{
6076 HRESULT hr;
6077 void *pPtr;
6078
6079 IfFailGo(AddChildRowDirectForParent(TBL_PropertyMap, PropertyMapRec::COL_PropertyList,
6080 TBL_Property, pmd));
6081 if (hr == S_FALSE)
6082 {
6083 IfFailGo(AddChildRowIndirectForParent(TBL_PropertyMap, PropertyMapRec::COL_PropertyList,
6084 TBL_PropertyPtr, pmd, &pPtr));
6085 hr = PutCol(TBL_PropertyPtr, PropertyPtrRec::COL_Property, pPtr, pd);
6086 }
6087
6088
6089ErrExit:
6090 return hr;
6091} // CMiniMdRW::AddPropertyToPropertyMap
6092
6093//*****************************************************************************
6094// Given a Event and its parent EventMap, add the Event to the parent,
6095// adjusting the EventPtr table.
6096//*****************************************************************************
6097__checkReturn
6098HRESULT
6099CMiniMdRW::AddEventToEventMap(
6100 ULONG emd, // The EventMap to which to add the Event.
6101 RID ed) // Event to add to EventMap.
6102{
6103 HRESULT hr;
6104 void *pPtr;
6105
6106 IfFailGo(AddChildRowDirectForParent(TBL_EventMap, EventMapRec::COL_EventList,
6107 TBL_Event, emd));
6108 if (hr == S_FALSE)
6109 {
6110 IfFailGo(AddChildRowIndirectForParent(TBL_EventMap, EventMapRec::COL_EventList,
6111 TBL_EventPtr, emd, &pPtr));
6112 hr = PutCol(TBL_EventPtr, EventPtrRec::COL_Event, pPtr, ed);
6113 }
6114ErrExit:
6115 return hr;
6116} // CMiniMdRW::AddEventToEventMap
6117
6118//*****************************************************************************
6119// Find helper for a constant. This will trigger constant table to be sorted if it is not.
6120//*****************************************************************************
6121__checkReturn
6122HRESULT
6123CMiniMdRW::FindConstantHelper( // return index to the constant table
6124 mdToken tkParent, // Parent token.
6125 RID *pFoundRid)
6126{
6127 _ASSERTE(TypeFromToken(tkParent) != 0);
6128
6129 // If sorted, use the faster lookup
6130 if (IsSorted(TBL_Constant))
6131 {
6132 return FindConstantFor(RidFromToken(tkParent), TypeFromToken(tkParent), pFoundRid);
6133 }
6134 return GenericFindWithHash(TBL_Constant, ConstantRec::COL_Parent, tkParent, pFoundRid);
6135} // CMiniMdRW::FindConstantHelper
6136
6137//*****************************************************************************
6138// Find helper for a FieldMarshal. This will trigger FieldMarshal table to be sorted if it is not.
6139//*****************************************************************************
6140__checkReturn
6141HRESULT
6142CMiniMdRW::FindFieldMarshalHelper( // return index to the field marshal table
6143 mdToken tkParent, // Parent token. Can be a FieldDef or ParamDef.
6144 RID *pFoundRid)
6145{
6146 _ASSERTE(TypeFromToken(tkParent) != 0);
6147
6148 // If sorted, use the faster lookup
6149 if (IsSorted(TBL_FieldMarshal))
6150 {
6151 return FindFieldMarshalFor(RidFromToken(tkParent), TypeFromToken(tkParent), pFoundRid);
6152 }
6153 return GenericFindWithHash(TBL_FieldMarshal, FieldMarshalRec::COL_Parent, tkParent, pFoundRid);
6154} // CMiniMdRW::FindFieldMarshalHelper
6155
6156
6157//*****************************************************************************
6158// Find helper for a method semantics.
6159// This will look up methodsemantics based on its status!
6160// Can return out of memory error because of the enumerator.
6161//*****************************************************************************
6162__checkReturn
6163HRESULT
6164CMiniMdRW::FindMethodSemanticsHelper(
6165 mdToken tkAssociate, // Event or property token
6166 HENUMInternal *phEnum) // fill in the enum
6167{
6168 ULONG ridStart, ridEnd;
6169 ULONG index;
6170 MethodSemanticsRec *pMethodSemantics;
6171 HRESULT hr = NOERROR;
6172 CLookUpHash *pHashTable = m_pLookUpHashs[TBL_MethodSemantics];
6173
6174 _ASSERTE(TypeFromToken(tkAssociate) != 0);
6175
6176 if (IsSorted(TBL_MethodSemantics))
6177 {
6178 IfFailGo(getAssociatesForToken(tkAssociate, &ridEnd, &ridStart));
6179 HENUMInternal::InitSimpleEnum(0, ridStart, ridEnd, phEnum);
6180 }
6181 else if (pHashTable)
6182 {
6183 TOKENHASHENTRY *p;
6184 ULONG iHash;
6185 int pos;
6186
6187 // Hash the data.
6188 HENUMInternal::InitDynamicArrayEnum(phEnum);
6189 iHash = HashToken(tkAssociate);
6190
6191 // Go through every entry in the hash chain looking for ours.
6192 for (p = pHashTable->FindFirst(iHash, pos);
6193 p;
6194 p = pHashTable->FindNext(pos))
6195 {
6196 IfFailGo(GetMethodSemanticsRecord(p->tok, &pMethodSemantics));
6197 if (getAssociationOfMethodSemantics(pMethodSemantics) == tkAssociate)
6198 {
6199 IfFailGo( HENUMInternal::AddElementToEnum(phEnum, p->tok) );
6200 }
6201 }
6202 }
6203 else
6204 {
6205 // linear search
6206 HENUMInternal::InitDynamicArrayEnum(phEnum);
6207 for (index = 1; index <= getCountMethodSemantics(); index++)
6208 {
6209 IfFailGo(GetMethodSemanticsRecord(index, &pMethodSemantics));
6210 if (getAssociationOfMethodSemantics(pMethodSemantics) == tkAssociate)
6211 {
6212 IfFailGo( HENUMInternal::AddElementToEnum(phEnum, index) );
6213 }
6214 }
6215 }
6216ErrExit:
6217 return hr;
6218} // CMiniMdRW::FindMethodSemanticsHelper
6219
6220
6221//*****************************************************************************
6222// Find helper for a method semantics given a associate and semantics.
6223// This will look up methodsemantics based on its status!
6224// Return CLDB_E_RECORD_NOTFOUND if cannot find the matching one
6225//*****************************************************************************
6226__checkReturn
6227HRESULT
6228CMiniMdRW::FindAssociateHelper(
6229 mdToken tkAssociate, // Event or property token
6230 DWORD dwSemantics, // [IN] given a associate semantics(setter, getter, testdefault, reset)
6231 RID *pRid) // [OUT] return matching row index here
6232{
6233 ULONG ridStart, ridEnd;
6234 ULONG index;
6235 MethodSemanticsRec *pMethodSemantics;
6236 HRESULT hr = NOERROR;
6237 CLookUpHash *pHashTable = m_pLookUpHashs[TBL_MethodSemantics];
6238
6239 _ASSERTE(TypeFromToken(tkAssociate) != 0);
6240
6241 if (pHashTable)
6242 {
6243 TOKENHASHENTRY *p;
6244 ULONG iHash;
6245 int pos;
6246
6247 // Hash the data.
6248 iHash = HashToken(tkAssociate);
6249
6250 // Go through every entry in the hash chain looking for ours.
6251 for (p = pHashTable->FindFirst(iHash, pos);
6252 p;
6253 p = pHashTable->FindNext(pos))
6254 {
6255 IfFailGo(GetMethodSemanticsRecord(p->tok, &pMethodSemantics));
6256 if (pMethodSemantics->GetSemantic() == dwSemantics && getAssociationOfMethodSemantics(pMethodSemantics) == tkAssociate)
6257 {
6258 *pRid = p->tok;
6259 goto ErrExit;
6260 }
6261 }
6262 }
6263 else
6264 {
6265 if (IsSorted(TBL_MethodSemantics))
6266 {
6267 IfFailGo(getAssociatesForToken(tkAssociate, &ridEnd, &ridStart));
6268 }
6269 else
6270 {
6271 ridStart = 1;
6272 ridEnd = getCountMethodSemantics() + 1;
6273 }
6274
6275 for (index = ridStart; index < ridEnd ; index++)
6276 {
6277 IfFailGo(GetMethodSemanticsRecord(index, &pMethodSemantics));
6278 if (pMethodSemantics->GetSemantic() == dwSemantics && getAssociationOfMethodSemantics(pMethodSemantics) == tkAssociate)
6279 {
6280 *pRid = index;
6281 goto ErrExit;
6282 }
6283 }
6284 }
6285 hr = CLDB_E_RECORD_NOTFOUND;
6286ErrExit:
6287 return hr;
6288} // CMiniMdRW::FindAssociateHelper
6289
6290
6291//*****************************************************************************
6292// Find helper for a MethodImpl.
6293// This will trigger MethodImpl table to be sorted if it is not.
6294//*****************************************************************************
6295__checkReturn
6296HRESULT
6297CMiniMdRW::FindMethodImplHelper(
6298 mdTypeDef td, // TypeDef token for the Class.
6299 HENUMInternal *phEnum) // fill in the enum
6300{
6301 ULONG ridStart, ridEnd;
6302 ULONG index;
6303 MethodImplRec *pMethodImpl;
6304 HRESULT hr = NOERROR;
6305 CLookUpHash *pHashTable = m_pLookUpHashs[TBL_MethodImpl];
6306
6307 _ASSERTE(TypeFromToken(td) == mdtTypeDef);
6308
6309 if (IsSorted(TBL_MethodImpl))
6310 {
6311 IfFailGo(getMethodImplsForClass(RidFromToken(td), &ridEnd, &ridStart));
6312 HENUMInternal::InitSimpleEnum(0, ridStart, ridEnd, phEnum);
6313 }
6314 else if (pHashTable)
6315 {
6316 TOKENHASHENTRY *p;
6317 ULONG iHash;
6318 int pos;
6319
6320 // Hash the data.
6321 HENUMInternal::InitDynamicArrayEnum(phEnum);
6322 iHash = HashToken(td);
6323
6324 // Go through every entry in the hash chain looking for ours.
6325 for (p = pHashTable->FindFirst(iHash, pos);
6326 p;
6327 p = pHashTable->FindNext(pos))
6328 {
6329 IfFailGo(GetMethodImplRecord(p->tok, &pMethodImpl));
6330 if (getClassOfMethodImpl(pMethodImpl) == td)
6331 {
6332 IfFailGo( HENUMInternal::AddElementToEnum(phEnum, p->tok) );
6333 }
6334 }
6335 }
6336 else
6337 {
6338 // linear search
6339 HENUMInternal::InitDynamicArrayEnum(phEnum);
6340 for (index = 1; index <= getCountMethodImpls(); index++)
6341 {
6342 IfFailGo(GetMethodImplRecord(index, &pMethodImpl));
6343 if (getClassOfMethodImpl(pMethodImpl) == td)
6344 {
6345 IfFailGo( HENUMInternal::AddElementToEnum(phEnum, index) );
6346 }
6347 }
6348 }
6349ErrExit:
6350 return hr;
6351} // CMiniMdRW::FindMethodImplHelper
6352
6353
6354//*****************************************************************************
6355// Find helper for a GenericParam.
6356// This will trigger GenericParam table to be sorted if it is not.
6357//*****************************************************************************
6358__checkReturn
6359HRESULT
6360CMiniMdRW::FindGenericParamHelper(
6361 mdToken tkOwner, // Token for the GenericParams' owner.
6362 HENUMInternal *phEnum) // fill in the enum
6363{
6364 HRESULT hr = NOERROR;
6365 ULONG ridStart, ridEnd; // Start, end of range of tokens.
6366 ULONG index; // A loop counter.
6367 GenericParamRec *pGenericParam;
6368 CLookUpHash *pHashTable = m_pLookUpHashs[TBL_GenericParam];
6369
6370 if (IsSorted(TBL_GenericParam))
6371 {
6372 mdToken tk;
6373 tk = encodeToken(RidFromToken(tkOwner), TypeFromToken(tkOwner), mdtTypeOrMethodDef, lengthof(mdtTypeOrMethodDef));
6374 IfFailGo(SearchTableForMultipleRows(TBL_GenericParam,
6375 _COLDEF(GenericParam,Owner),
6376 tk,
6377 &ridEnd,
6378 &ridStart));
6379 HENUMInternal::InitSimpleEnum(mdtGenericParam, ridStart, ridEnd, phEnum);
6380 }
6381 else if (pHashTable)
6382 {
6383 TOKENHASHENTRY *p;
6384 ULONG iHash;
6385 int pos;
6386
6387 // Hash the data.
6388 HENUMInternal::InitDynamicArrayEnum(phEnum);
6389 iHash = HashToken(tkOwner);
6390
6391 // Go through every entry in the hash chain looking for ours.
6392 for (p = pHashTable->FindFirst(iHash, pos);
6393 p;
6394 p = pHashTable->FindNext(pos))
6395 {
6396 IfFailGo(GetGenericParamRecord(p->tok, &pGenericParam));
6397 if (getOwnerOfGenericParam(pGenericParam) == tkOwner)
6398 {
6399 IfFailGo( HENUMInternal::AddElementToEnum(phEnum, TokenFromRid(p->tok, mdtGenericParam)) );
6400 }
6401 }
6402 }
6403 else
6404 {
6405 // linear search
6406 HENUMInternal::InitDynamicArrayEnum(phEnum);
6407 for (index = 1; index <= getCountGenericParams(); index++)
6408 {
6409 IfFailGo(GetGenericParamRecord(index, &pGenericParam));
6410 if (getOwnerOfGenericParam(pGenericParam) == tkOwner)
6411 {
6412 IfFailGo( HENUMInternal::AddElementToEnum(phEnum, TokenFromRid(index, mdtGenericParam)) );
6413 }
6414 }
6415 }
6416ErrExit:
6417 return hr;
6418} // CMiniMdRW::FindGenericParamHelper
6419
6420
6421//*****************************************************************************
6422// Find helper for a GenericParamConstraint.
6423// This will trigger GenericParamConstraint table to be sorted if it is not.
6424//*****************************************************************************
6425__checkReturn
6426HRESULT
6427CMiniMdRW::FindGenericParamConstraintHelper(
6428 mdGenericParam tkParam, // Token for the GenericParam
6429 HENUMInternal *phEnum) // fill in the enum
6430{
6431 HRESULT hr = NOERROR;
6432 ULONG ridStart, ridEnd; // Start, end of range of tokens.
6433 ULONG index; // A loop counter.
6434 GenericParamConstraintRec *pConstraint;
6435 CLookUpHash *pHashTable = m_pLookUpHashs[TBL_GenericParamConstraint];
6436 RID ridParam = RidFromToken(tkParam);
6437 _ASSERTE(TypeFromToken(tkParam) == mdtGenericParam);
6438
6439 // Extract the rid part of the token for comparison below. Be sure
6440 // that the column is a RID column, so that getGPCFGP() returns a RID.
6441 _ASSERTE(IsRidType(m_TableDefs[TBL_GenericParamConstraint].m_pColDefs[GenericParamConstraintRec::COL_Owner].m_Type));
6442
6443 if (IsSorted(TBL_GenericParamConstraint))
6444 {
6445 IfFailGo(getGenericParamConstraintsForGenericParam(ridParam, &ridEnd, &ridStart));
6446 HENUMInternal::InitSimpleEnum(mdtGenericParamConstraint, ridStart, ridEnd, phEnum);
6447 }
6448 else if (pHashTable)
6449 {
6450 TOKENHASHENTRY *p;
6451 ULONG iHash;
6452 int pos;
6453
6454 // Hash the data.
6455 HENUMInternal::InitDynamicArrayEnum(phEnum);
6456 iHash = HashToken(tkParam);
6457
6458 // Go through every entry in the hash chain looking for ours.
6459 for (p = pHashTable->FindFirst(iHash, pos);
6460 p;
6461 p = pHashTable->FindNext(pos))
6462 {
6463 IfFailGo(GetGenericParamConstraintRecord(p->tok, &pConstraint));
6464 if (getOwnerOfGenericParamConstraint(pConstraint) == tkParam)
6465 {
6466 IfFailGo( HENUMInternal::AddElementToEnum(phEnum, TokenFromRid(p->tok, mdtGenericParamConstraint)) );
6467 }
6468 }
6469 }
6470 else
6471 {
6472 // linear search
6473 HENUMInternal::InitDynamicArrayEnum(phEnum);
6474 for (index = 1; index <= getCountGenericParamConstraints(); index++)
6475 {
6476 IfFailGo(GetGenericParamConstraintRecord(index, &pConstraint));
6477 if (getOwnerOfGenericParamConstraint(pConstraint) == tkParam)
6478 {
6479 IfFailGo( HENUMInternal::AddElementToEnum(phEnum, TokenFromRid(index, mdtGenericParamConstraint)) );
6480 }
6481 }
6482 }
6483ErrExit:
6484 return hr;
6485} // CMiniMdRW::FindGenericParamConstraintHelper
6486
6487
6488//*****************************************************************************
6489// Find helper for a ClassLayout. This will trigger ClassLayout table to be sorted if it is not.
6490//*****************************************************************************
6491__checkReturn
6492HRESULT
6493CMiniMdRW::FindClassLayoutHelper( // return index to the ClassLayout table
6494 mdTypeDef tkParent, // Parent token.
6495 RID *pFoundRid)
6496{
6497 _ASSERTE(TypeFromToken(tkParent) == mdtTypeDef);
6498
6499 // If sorted, use the faster lookup
6500 if (IsSorted(TBL_ClassLayout))
6501 {
6502 return FindClassLayoutFor(RidFromToken(tkParent), pFoundRid);
6503 }
6504 return GenericFindWithHash(TBL_ClassLayout, ClassLayoutRec::COL_Parent, tkParent, pFoundRid);
6505} // CMiniMdRW::FindClassLayoutHelper
6506
6507//*****************************************************************************
6508// Find helper for a FieldLayout. This will trigger FieldLayout table to be sorted if it is not.
6509//*****************************************************************************
6510__checkReturn
6511HRESULT
6512CMiniMdRW::FindFieldLayoutHelper( // return index to the FieldLayout table
6513 mdFieldDef tkField, // Field RID.
6514 RID *pFoundRid)
6515{
6516 _ASSERTE(TypeFromToken(tkField) == mdtFieldDef);
6517
6518 // If sorted, use the faster lookup
6519 if (IsSorted(TBL_FieldLayout))
6520 {
6521 return FindFieldLayoutFor(RidFromToken(tkField), pFoundRid);
6522 }
6523 return GenericFindWithHash(TBL_FieldLayout, FieldLayoutRec::COL_Field, tkField, pFoundRid);
6524} // CMiniMdRW::FindFieldLayoutHelper
6525
6526//*****************************************************************************
6527// Find helper for a ImplMap. This will trigger ImplMap table to be sorted if it is not.
6528//*****************************************************************************
6529__checkReturn
6530HRESULT
6531CMiniMdRW::FindImplMapHelper( // return index to the ImplMap table
6532 mdToken tk, // Member forwarded token.
6533 RID *pFoundRid)
6534{
6535 _ASSERTE(TypeFromToken(tk) != 0);
6536
6537 // If sorted, use the faster lookup
6538 if (IsSorted(TBL_ImplMap))
6539 {
6540 return FindImplMapFor(RidFromToken(tk), TypeFromToken(tk), pFoundRid);
6541 }
6542 return GenericFindWithHash(TBL_ImplMap, ImplMapRec::COL_MemberForwarded, tk, pFoundRid);
6543} // CMiniMdRW::FindImplMapHelper
6544
6545
6546//*****************************************************************************
6547// Find helper for a FieldRVA. This will trigger FieldRVA table to be sorted if it is not.
6548//*****************************************************************************
6549__checkReturn
6550HRESULT
6551CMiniMdRW::FindFieldRVAHelper( // return index to the FieldRVA table
6552 mdFieldDef tkField, // Field token.
6553 RID *pFoundRid)
6554{
6555 _ASSERTE(TypeFromToken(tkField) == mdtFieldDef);
6556
6557 // If sorted, use the faster lookup
6558 if (IsSorted(TBL_FieldRVA))
6559 {
6560 return FindFieldRVAFor(RidFromToken(tkField), pFoundRid);
6561 }
6562 return GenericFindWithHash(TBL_FieldRVA, FieldRVARec::COL_Field, tkField, pFoundRid);
6563} // CMiniMdRW::FindFieldRVAHelper
6564
6565//*****************************************************************************
6566// Find helper for a NestedClass. This will trigger NestedClass table to be sorted if it is not.
6567//*****************************************************************************
6568__checkReturn
6569HRESULT
6570CMiniMdRW::FindNestedClassHelper( // return index to the NestedClass table
6571 mdTypeDef tkClass, // NestedClass RID.
6572 RID *pFoundRid)
6573{
6574 // If sorted, use the faster lookup
6575 if (IsSorted(TBL_NestedClass))
6576 {
6577 return FindNestedClassFor(RidFromToken(tkClass), pFoundRid);
6578 }
6579 return GenericFindWithHash(TBL_NestedClass, NestedClassRec::COL_NestedClass, tkClass, pFoundRid);
6580} // CMiniMdRW::FindNestedClassHelper
6581
6582
6583//*************************************************************************
6584// generic find helper with hash table
6585//*************************************************************************
6586__checkReturn
6587HRESULT
6588CMiniMdRW::GenericFindWithHash( // Return code.
6589 ULONG ixTbl, // Table with hash
6590 ULONG ixCol, // col that we hash.
6591 mdToken tkTarget, // token to be find in the hash
6592 RID *pFoundRid)
6593{
6594 HRESULT hr;
6595 ULONG index;
6596 mdToken tkHash;
6597 BYTE * pRec;
6598
6599 // Partial check -- only one rid for table 0, so if type is 0, rid should be 1.
6600 _ASSERTE(TypeFromToken(tkTarget) != 0 || RidFromToken(tkTarget) == 1);
6601
6602 if (m_pLookUpHashs[ixTbl] == NULL)
6603 {
6604 // Just ignore the returned error - the hash is either created or not
6605 (void)GenericBuildHashTable(ixTbl, ixCol);
6606 }
6607
6608 CLookUpHash * pHashTable = m_pLookUpHashs[ixTbl];
6609 if (pHashTable != NULL)
6610 {
6611 TOKENHASHENTRY *p;
6612 ULONG iHash;
6613 int pos;
6614
6615 // Hash the data.
6616 iHash = HashToken(tkTarget);
6617
6618 // Go through every entry in the hash chain looking for ours.
6619 for (p = pHashTable->FindFirst(iHash, pos);
6620 p;
6621 p = pHashTable->FindNext(pos))
6622 {
6623 IfFailRet(m_Tables[ixTbl].GetRecord(p->tok, &pRec));
6624
6625 // get the column value that we will hash
6626 tkHash = GetToken(ixTbl, ixCol, pRec);
6627 if (tkHash == tkTarget)
6628 {
6629 // found the match
6630 *pFoundRid = p->tok;
6631 return S_OK;
6632 }
6633 }
6634 }
6635 else
6636 {
6637 // linear search
6638 for (index = 1; index <= GetCountRecs(ixTbl); index++)
6639 {
6640 IfFailRet(m_Tables[ixTbl].GetRecord(index, &pRec));
6641 tkHash = GetToken(ixTbl, ixCol, pRec);
6642 if (tkHash == tkTarget)
6643 {
6644 // found the match
6645 *pFoundRid = index;
6646 return S_OK;
6647 }
6648 }
6649 }
6650 *pFoundRid = 0;
6651 return S_OK;
6652} // CMiniMdRW::GenericFindWithHash
6653
6654//*************************************************************************
6655// Build a hash table for the specified table if the size exceed the thresholds.
6656//*************************************************************************
6657__checkReturn
6658HRESULT
6659CMiniMdRW::GenericBuildHashTable(
6660 ULONG ixTbl, // Table with hash.
6661 ULONG ixCol) // Column that we hash.
6662{
6663 HRESULT hr = S_OK;
6664 BYTE *pRec;
6665 mdToken tkHash;
6666 ULONG iHash;
6667 TOKENHASHENTRY *pEntry;
6668
6669 // If the hash table hasn't been built it, see if it should get faulted in.
6670 if (m_pLookUpHashs[ixTbl] == NULL)
6671 {
6672 ULONG ridEnd = GetCountRecs(ixTbl);
6673
6674 //<TODO>@FUTURE: we need to init the size of the hash table corresponding to the current
6675 // size of table in E&C's case.
6676 //</TODO>
6677 // Avoid prefast warning with "if (ridEnd + 1 > INDEX_ROW_COUNT_THRESHOLD)"
6678 if (ridEnd > INDEX_ROW_COUNT_THRESHOLD - 1)
6679 {
6680 // Create a new hash.
6681 NewHolder<CLookUpHash> pHashTable = new (nothrow) CLookUpHash;
6682 IfNullGo(pHashTable);
6683 IfFailGo(pHashTable->NewInit(
6684 g_HashSize[GetMetaDataSizeIndex(&m_OptionValue)]));
6685
6686 // Scan every entry already in the table, add it to the hash.
6687 for (ULONG index = 1; index <= ridEnd; index++)
6688 {
6689 IfFailGo(m_Tables[ixTbl].GetRecord(index, &pRec));
6690
6691 // get the column value that we will hash
6692 tkHash = GetToken(ixTbl, ixCol, pRec);
6693
6694 // hash the value
6695 iHash = HashToken(tkHash);
6696
6697 pEntry = pHashTable->Add(iHash);
6698 IfNullGo(pEntry);
6699 pEntry->tok = index;
6700
6701 }
6702
6703 if (InterlockedCompareExchangeT<CLookUpHash *>(
6704 &m_pLookUpHashs[ixTbl],
6705 pHashTable,
6706 NULL) == NULL)
6707 { // We won the initializaion race
6708 pHashTable.SuppressRelease();
6709 }
6710 }
6711 }
6712ErrExit:
6713 return hr;
6714} // CMiniMdRW::GenericBuildHashTable
6715
6716//*************************************************************************
6717// Add a rid from a table into a hash. We will hash on the ixCol of the ixTbl.
6718//*************************************************************************
6719__checkReturn
6720HRESULT
6721CMiniMdRW::GenericAddToHash(
6722 ULONG ixTbl, // Table with hash
6723 ULONG ixCol, // column that we hash by calling HashToken.
6724 RID rid) // Token of new guy into the ixTbl.
6725{
6726 HRESULT hr = S_OK;
6727 CLookUpHash *pHashTable = m_pLookUpHashs[ixTbl];
6728 void *pRec;
6729 mdToken tkHash;
6730 ULONG iHash;
6731 TOKENHASHENTRY *pEntry;
6732
6733 // If the hash table hasn't been built it, see if it should get faulted in.
6734 if (pHashTable == NULL)
6735 {
6736 IfFailGo(GenericBuildHashTable(ixTbl, ixCol));
6737 }
6738 else
6739 {
6740 // Adding into hash table has to be protected by write-lock
6741 INDEBUG(Debug_CheckIsLockedForWrite();)
6742
6743 IfFailGo(m_Tables[ixTbl].GetRecord(rid, reinterpret_cast<BYTE **>(&pRec)));
6744
6745 tkHash = GetToken(ixTbl, ixCol, pRec);
6746 iHash = HashToken(tkHash);
6747 pEntry = pHashTable->Add(iHash);
6748 IfNullGo(pEntry);
6749 pEntry->tok = rid;
6750 }
6751
6752ErrExit:
6753 return hr;
6754} // CMiniMdRW::GenericAddToHash
6755
6756
6757//*****************************************************************************
6758// look up a table by a col given col value is ulVal.
6759//*****************************************************************************
6760__checkReturn
6761HRESULT
6762CMiniMdRW::LookUpTableByCol(
6763 ULONG ulVal, // Value for which to search.
6764 VirtualSort *pVSTable, // A VirtualSort on the table, if any.
6765 RID *pRidStart, // Put RID of first match here.
6766 RID *pRidEnd) // [OPTIONAL] Put RID of end match here.
6767{
6768 HRESULT hr = NOERROR;
6769 ULONG ixTbl;
6770 ULONG ixCol;
6771
6772 _ASSERTE(pVSTable != NULL);
6773 ixTbl = pVSTable->m_ixTbl;
6774 ixCol = pVSTable->m_ixCol;
6775 if (IsSorted(ixTbl))
6776 {
6777 // Table itself is sorted so we don't need to build a virtual sort table.
6778 // Binary search on the table directly.
6779 //
6780 IfFailGo(SearchTableForMultipleRows(
6781 ixTbl,
6782 m_TableDefs[ixTbl].m_pColDefs[ixCol],
6783 ulVal,
6784 pRidEnd,
6785 pRidStart));
6786 }
6787 else
6788 {
6789 if (!pVSTable->m_isMapValid)
6790 {
6791 INDEBUG(Debug_CheckIsLockedForWrite();)
6792
6793 int iCount;
6794
6795 // build the parallel VirtualSort table
6796 if (pVSTable->m_pMap == NULL)
6797 {
6798 // the first time that we build the VS table. We need to allocate the TOKENMAP
6799 pVSTable->m_pMap = new (nothrow) TOKENMAP;
6800 IfNullGo(pVSTable->m_pMap);
6801 }
6802
6803 // ensure the look up table is big enough
6804 iCount = pVSTable->m_pMap->Count();
6805 if (pVSTable->m_pMap->AllocateBlock(m_Schema.m_cRecs[ixTbl] + 1 - iCount) == 0)
6806 {
6807 IfFailGo(E_OUTOFMEMORY);
6808 }
6809
6810 // now build the table
6811 // Element 0 of m_pMap will never be used, its just being initialized anyway.
6812 for (ULONG i = 0; i <= m_Schema.m_cRecs[ixTbl]; i++)
6813 {
6814 *(pVSTable->m_pMap->Get(i)) = i;
6815 }
6816 // sort the table
6817 IfFailGo(pVSTable->Sort());
6818 }
6819 // binary search on the LookUp
6820 {
6821 void *pRow; // Row from a table.
6822 ULONG val; // Value from a row.
6823 CMiniColDef *pCol;
6824 int lo,hi,mid=0; // binary search indices.
6825 RID ridEnd, ridBegin;
6826
6827 pCol = m_TableDefs[ixTbl].m_pColDefs;
6828
6829 // Start with entire table.
6830 lo = 1;
6831 hi = GetCountRecs( ixTbl );
6832 // While there are rows in the range...
6833 while ( lo <= hi )
6834 { // Look at the one in the middle.
6835 mid = (lo + hi) / 2;
6836 IfFailGo(getRow(
6837 ixTbl,
6838 (UINT32)*(pVSTable->m_pMap->Get(mid)),
6839 &pRow));
6840 val = getIX( pRow, pCol[ixCol] );
6841
6842 // If equal to the target, done.
6843 if ( val == ulVal )
6844 break;
6845 // If middle item is too small, search the top half.
6846 if ( val < ulVal )
6847 lo = mid + 1;
6848 else // but if middle is to big, search bottom half.
6849 hi = mid - 1;
6850 }
6851 if ( lo > hi )
6852 {
6853 // Didn't find anything that matched.
6854 *pRidStart = 0;
6855 if (pRidEnd) *pRidEnd = 0;
6856 goto ErrExit;
6857 }
6858
6859
6860 // Now mid is pointing to one of the several records that match the search.
6861 // Find the beginning and find the end.
6862 ridBegin = mid;
6863
6864 // End will be at least one larger than found record.
6865 ridEnd = ridBegin + 1;
6866
6867 // Search back to start of group.
6868 for (;;)
6869 {
6870 if (ridBegin <= 1)
6871 {
6872 break;
6873 }
6874 IfFailGo(getRow(
6875 ixTbl,
6876 (UINT32)*(pVSTable->m_pMap->Get(ridBegin-1)),
6877 &pRow));
6878 if (getIX(pRow, pCol[ixCol]) != ulVal)
6879 {
6880 break;
6881 }
6882 --ridBegin;
6883 }
6884
6885 // If desired, search forward to end of group.
6886 if (pRidEnd != NULL)
6887 {
6888 for (;;)
6889 {
6890 if (ridEnd > GetCountRecs(ixTbl))
6891 {
6892 break;
6893 }
6894 IfFailGo(getRow(
6895 ixTbl,
6896 (UINT32)*(pVSTable->m_pMap->Get(ridEnd)),
6897 &pRow));
6898 if (getIX(pRow, pCol[ixCol]) != ulVal)
6899 {
6900 break;
6901 }
6902 ++ridEnd;
6903 }
6904 *pRidEnd = ridEnd;
6905 }
6906 *pRidStart = ridBegin;
6907 }
6908 }
6909
6910 // fall through
6911ErrExit:
6912 return hr;
6913} // CMiniMdRW::LookUpTableByCol
6914
6915__checkReturn
6916HRESULT
6917CMiniMdRW::Impl_SearchTableRW(
6918 ULONG ixTbl, // Table to search.
6919 ULONG ixCol, // Column to search.
6920 ULONG ulTarget, // Value to search for.
6921 RID *pFoundRid)
6922{
6923 HRESULT hr = S_OK;
6924 RID iRid; // The resulting RID.
6925 RID iRidEnd; // Unused.
6926
6927 // Look up.
6928 hr = LookUpTableByCol(ulTarget, m_pVS[ixTbl], &iRid, &iRidEnd);
6929 if (FAILED(hr))
6930 {
6931 iRid = 0;
6932 }
6933 else // Convert to real RID.
6934 {
6935 iRid = GetRidFromVirtualSort(ixTbl, iRid);
6936 }
6937
6938 *pFoundRid = iRid;
6939 return S_OK;
6940} // CMiniMdRW::Impl_SearchTableRW
6941
6942//*****************************************************************************
6943// Search a table for the row containing the given key value.
6944// EG. Constant table has pointer back to Param or Field.
6945//*****************************************************************************
6946__checkReturn
6947HRESULT
6948CMiniMdRW::vSearchTable( // RID of matching row, or 0.
6949 ULONG ixTbl, // Table to search.
6950 CMiniColDef sColumn, // Sorted key column, containing search value.
6951 ULONG ulTarget, // Target for search.
6952 RID *pRid)
6953{
6954 HRESULT hr;
6955 void *pRow; // Row from a table.
6956 ULONG val; // Value from a row.
6957
6958 int lo,mid,hi; // binary search indices.
6959
6960 // Binary search requires sorted table.
6961 // @todo GENERICS: why is IsSorted not true for mdtGenericParam?
6962 // _ASSERTE(IsSorted(ixTbl));
6963
6964 // Start with entire table.
6965 lo = 1;
6966 hi = GetCountRecs(ixTbl);
6967 // While there are rows in the range...
6968 while (lo <= hi)
6969 { // Look at the one in the middle.
6970 mid = (lo + hi) / 2;
6971 IfFailRet(getRow(ixTbl, mid, &pRow));
6972 val = getIX(pRow, sColumn);
6973 // If equal to the target, done.
6974 if (val == ulTarget)
6975 {
6976 *pRid = mid;
6977 return S_OK;
6978 }
6979 // If middle item is too small, search the top half.
6980 if (val < ulTarget || val == END_OF_TABLE)
6981 lo = mid + 1;
6982 else // but if middle is to big, search bottom half.
6983 hi = mid - 1;
6984 }
6985 // Didn't find anything that matched.
6986
6987 // @todo GENERICS: Work around for refEmit feature. Remove once table is sorted.
6988 if (ixTbl == TBL_GenericParam && !IsSorted(ixTbl))
6989 {
6990 for (int i = 1; i <= (int)GetCountRecs(ixTbl); i ++)
6991 {
6992 IfFailRet(getRow(ixTbl, i, &pRow));
6993 if (getIX(pRow, sColumn) == ulTarget)
6994 {
6995 *pRid = i;
6996 return S_OK;
6997 }
6998 }
6999 }
7000
7001 *pRid = 0;
7002 return S_OK;
7003} // CMiniMdRW::vSearchTable
7004
7005//*****************************************************************************
7006// Search a table for the highest-RID row containing a value that is less than
7007// or equal to the target value. EG. TypeDef points to first Field, but if
7008// a TypeDef has no fields, it points to first field of next TypeDef.
7009// This is complicated by the possible presence of columns containing
7010// END_OF_TABLE values, which are not necessarily in greater than
7011// other values. However, this invalid-rid value will occur only at the
7012// end of the table.
7013//*****************************************************************************
7014__checkReturn
7015HRESULT
7016CMiniMdRW::vSearchTableNotGreater( // RID of matching row, or 0.
7017 ULONG ixTbl, // Table to search.
7018 CMiniColDef sColumn, // the column def containing search value
7019 ULONG ulTarget, // target for search
7020 RID *pRid)
7021{
7022 HRESULT hr;
7023 void *pRow; // Row from a table.
7024 ULONG cRecs; // Rows in the table.
7025 ULONG val = 0; // Value from a table.
7026 ULONG lo,mid=0,hi; // binary search indices.
7027
7028 cRecs = GetCountRecs(ixTbl);
7029
7030 // Start with entire table.
7031 lo = 1;
7032 hi = cRecs;
7033 // If no recs, return.
7034 if (lo > hi)
7035 {
7036 *pRid = 0;
7037 return S_OK;
7038 }
7039 // While there are rows in the range...
7040 while (lo <= hi)
7041 { // Look at the one in the middle.
7042 mid = (lo + hi) / 2;
7043 IfFailRet(getRow(ixTbl, mid, &pRow));
7044 val = getIX(pRow, sColumn);
7045 // If equal to the target, done searching.
7046 if (val == ulTarget)
7047 break;
7048 // If middle item is too small, search the top half.
7049 if (val < ulTarget && val != END_OF_TABLE)
7050 lo = mid + 1;
7051 else // but if middle is to big, search bottom half.
7052 hi = mid - 1;
7053 }
7054 // May or may not have found anything that matched. Mid will be close, but may
7055 // be to high or too low. It should point to the highest acceptable
7056 // record.
7057
7058 // If the value is greater than the target, back up just until the value is
7059 // less than or equal to the target. SHOULD only be one step.
7060 if (val > ulTarget || val == END_OF_TABLE)
7061 {
7062 while (val > ulTarget || val == END_OF_TABLE)
7063 {
7064 _ASSERTE(mid > 1);
7065 // If no recs match, return.
7066 if (mid == 1)
7067 {
7068 *pRid = 0;
7069 return S_OK;
7070 }
7071 --mid;
7072 IfFailRet(getRow(ixTbl, mid, &pRow));
7073 val = getIX(pRow, sColumn);
7074 }
7075 }
7076 else
7077 {
7078 // Value is less than or equal to the target. As long as the next
7079 // record is also acceptable, move forward.
7080 while (mid < cRecs)
7081 {
7082 // There is another record. Get its value.
7083 IfFailRet(getRow(ixTbl, mid+1, &pRow));
7084 val = getIX(pRow, sColumn);
7085 // If that record is too high, stop.
7086 if (val > ulTarget || val == END_OF_TABLE)
7087 break;
7088 mid++;
7089 }
7090 }
7091
7092 // Return the value that's just less than the target.
7093 *pRid = mid;
7094 return S_OK;
7095} // CMiniMdRW::vSearchTableNotGreater
7096
7097//---------------------------------------------------------------------------------------
7098//
7099// Create MemberRef hash table.
7100//
7101__checkReturn
7102HRESULT
7103CMiniMdRW::CreateMemberRefHash()
7104{
7105 HRESULT hr = S_OK;
7106
7107 if (m_pMemberRefHash == NULL)
7108 {
7109 ULONG ridEnd = getCountMemberRefs();
7110 if (ridEnd + 1 > INDEX_ROW_COUNT_THRESHOLD)
7111 {
7112 // Create a new hash.
7113 NewHolder<CMemberRefHash> pMemberRefHash = new (nothrow) CMemberRefHash();
7114 IfNullGo(pMemberRefHash);
7115 IfFailGo(pMemberRefHash->NewInit(
7116 g_HashSize[GetMetaDataSizeIndex(&m_OptionValue)]));
7117
7118 // Scan every entry already in the table, add it to the hash.
7119 for (ULONG index = 1; index <= ridEnd; index++)
7120 {
7121 MemberRefRec * pMemberRef;
7122 IfFailGo(GetMemberRefRecord(index, &pMemberRef));
7123
7124 LPCSTR szMemberRefName;
7125 IfFailGo(getNameOfMemberRef(pMemberRef, &szMemberRefName));
7126 ULONG iHash = HashMemberRef(
7127 getClassOfMemberRef(pMemberRef),
7128 szMemberRefName);
7129
7130 TOKENHASHENTRY * pEntry = pMemberRefHash->Add(iHash);
7131 IfNullGo(pEntry);
7132 pEntry->tok = TokenFromRid(index, mdtMemberRef);
7133 }
7134
7135 if (InterlockedCompareExchangeT<CMemberRefHash *>(&m_pMemberRefHash, pMemberRefHash, NULL) == NULL)
7136 { // We won the initialization race
7137 pMemberRefHash.SuppressRelease();
7138 }
7139 }
7140 }
7141
7142ErrExit:
7143 return hr;
7144} // CMiniMdRW::CreateMemberRefHash
7145
7146//---------------------------------------------------------------------------------------
7147//
7148// Add a new MemberRef to the hash table.
7149//
7150__checkReturn
7151HRESULT
7152CMiniMdRW::AddMemberRefToHash(
7153 mdMemberRef mr) // Token of new guy.
7154{
7155 HRESULT hr = S_OK;
7156
7157 // If the hash exists, we will add to it - requires write-lock
7158 INDEBUG(Debug_CheckIsLockedForWrite();)
7159
7160 // If the hash table hasn't been built it, see if it should get faulted in.
7161 if (m_pMemberRefHash == NULL)
7162 {
7163 IfFailGo(CreateMemberRefHash());
7164 }
7165 else
7166 {
7167 MemberRefRec * pMemberRef;
7168 IfFailGo(GetMemberRefRecord(RidFromToken(mr), &pMemberRef));
7169
7170 LPCSTR szMemberRefName;
7171 IfFailGo(getNameOfMemberRef(pMemberRef, &szMemberRefName));
7172 ULONG iHash = HashMemberRef(
7173 getClassOfMemberRef(pMemberRef),
7174 szMemberRefName);
7175
7176 TOKENHASHENTRY * pEntry = m_pMemberRefHash->Add(iHash);
7177 IfNullGo(pEntry);
7178 pEntry->tok = TokenFromRid(RidFromToken(mr), mdtMemberRef);
7179 }
7180
7181ErrExit:
7182 return hr;
7183} // CMiniMdRW::AddMemberRefToHash
7184
7185//---------------------------------------------------------------------------------------
7186//
7187// If the hash is built, search for the item. Ignore token *ptkMemberRef.
7188//
7189CMiniMdRW::HashSearchResult
7190CMiniMdRW::FindMemberRefFromHash(
7191 mdToken tkParent, // Parent token.
7192 LPCUTF8 szName, // Name of item.
7193 PCCOR_SIGNATURE pvSigBlob, // Signature.
7194 ULONG cbSigBlob, // Size of signature.
7195 mdMemberRef * ptkMemberRef) // IN: Ignored token. OUT: Return if found.
7196{
7197 // If the table is there, look for the item in the chain of items.
7198 if (m_pMemberRefHash != NULL)
7199 {
7200 TOKENHASHENTRY * p;
7201 ULONG iHash;
7202 int pos;
7203
7204 // Hash the data.
7205 iHash = HashMemberRef(tkParent, szName);
7206
7207 // Go through every entry in the hash chain looking for ours.
7208 for (p = m_pMemberRefHash->FindFirst(iHash, pos);
7209 p != NULL;
7210 p = m_pMemberRefHash->FindNext(pos))
7211 {
7212 if ((CompareMemberRefs(p->tok, tkParent, szName, pvSigBlob, cbSigBlob) == S_OK)
7213 && (*ptkMemberRef != p->tok))
7214 {
7215 *ptkMemberRef = p->tok;
7216 return Found;
7217 }
7218 }
7219
7220 return NotFound;
7221 }
7222 else
7223 {
7224 return NoTable;
7225 }
7226} // CMiniMdRW::FindMemberRefFromHash
7227
7228//*****************************************************************************
7229// Check a given mr token to see if this one is a match.
7230//*****************************************************************************
7231__checkReturn
7232HRESULT
7233CMiniMdRW::CompareMemberRefs( // S_OK match, S_FALSE no match.
7234 mdMemberRef mr, // Token to check.
7235 mdToken tkPar, // Parent token.
7236 LPCUTF8 szNameUtf8, // Name of item.
7237 PCCOR_SIGNATURE pvSigBlob, // Signature.
7238 ULONG cbSigBlob) // Size of signature.
7239{
7240 HRESULT hr;
7241 MemberRefRec *pMemberRef;
7242 LPCUTF8 szNameUtf8Tmp;
7243 PCCOR_SIGNATURE pvSigBlobTmp;
7244 ULONG cbSigBlobTmp;
7245
7246 IfFailRet(GetMemberRefRecord(RidFromToken(mr), &pMemberRef));
7247 if (!IsNilToken(tkPar))
7248 {
7249 // If caller specifies the tkPar and tkPar doesn't match,
7250 // try the next memberref.
7251 //
7252 if (tkPar != getClassOfMemberRef(pMemberRef))
7253 return S_FALSE;
7254 }
7255
7256 IfFailRet(getNameOfMemberRef(pMemberRef, &szNameUtf8Tmp));
7257 if (strcmp(szNameUtf8Tmp, szNameUtf8) == 0)
7258 {
7259 if (pvSigBlob == NULL)
7260 {
7261 return S_OK;
7262 }
7263
7264 // Name matched. Now check the signature if caller supplies signature
7265 //
7266 if ((cbSigBlob != 0) && (pvSigBlob != NULL))
7267 {
7268 IfFailRet(getSignatureOfMemberRef(pMemberRef, &pvSigBlobTmp, &cbSigBlobTmp));
7269 if ((cbSigBlobTmp == cbSigBlob) &&
7270 (memcmp(pvSigBlob, pvSigBlobTmp, cbSigBlob) == 0))
7271 {
7272 return S_OK;
7273 }
7274 }
7275 }
7276 return S_FALSE;
7277} // CMiniMdRW::CompareMemberRefs
7278
7279
7280//*****************************************************************************
7281// Add a new memberdef to the hash table.
7282//*****************************************************************************
7283__checkReturn
7284HRESULT
7285CMiniMdRW::AddMemberDefToHash(
7286 mdToken tkMember, // Token of new guy. It can be MethodDef or FieldDef
7287 mdToken tkParent) // Parent token.
7288{
7289 HRESULT hr = S_OK;
7290 ULONG iHash;
7291 MEMBERDEFHASHENTRY * pEntry;
7292
7293 // If the hash exists, we will add to it - requires write-lock
7294 INDEBUG(Debug_CheckIsLockedForWrite();)
7295
7296 // If the hash table hasn't been built it, see if it should get faulted in.
7297 if (m_pMemberDefHash == NULL)
7298 {
7299 IfFailGo(CreateMemberDefHash());
7300 }
7301 else
7302 {
7303 LPCSTR szName;
7304 if (TypeFromToken(tkMember) == mdtMethodDef)
7305 {
7306 MethodRec * pMethodRecord;
7307 IfFailGo(GetMethodRecord(RidFromToken(tkMember), &pMethodRecord));
7308 IfFailGo(getNameOfMethod(pMethodRecord, &szName));
7309 }
7310 else
7311 {
7312 _ASSERTE(TypeFromToken(tkMember) == mdtFieldDef);
7313 FieldRec * pFieldRecord;
7314 IfFailGo(GetFieldRecord(RidFromToken(tkMember), &pFieldRecord));
7315 IfFailGo(getNameOfField(pFieldRecord, &szName));
7316 }
7317
7318 iHash = HashMemberDef(tkParent, szName);
7319
7320 pEntry = m_pMemberDefHash->Add(iHash);
7321 IfNullGo(pEntry);
7322 pEntry->tok = tkMember;
7323 pEntry->tkParent = tkParent;
7324 }
7325
7326ErrExit:
7327 return hr;
7328} // CMiniMdRW::AddMemberDefToHash
7329
7330
7331//*****************************************************************************
7332// Create MemberDef Hash
7333//*****************************************************************************
7334__checkReturn
7335HRESULT
7336CMiniMdRW::CreateMemberDefHash()
7337{
7338 HRESULT hr = S_OK;
7339 ULONG iHash;
7340 MEMBERDEFHASHENTRY * pEntry;
7341
7342 // If the hash table hasn't been built it, see if it should get faulted in.
7343 if (m_pMemberDefHash == NULL)
7344 {
7345 ULONG ridMethod = getCountMethods();
7346 ULONG ridField = getCountFields();
7347 ULONG iType;
7348 ULONG ridStart;
7349 ULONG ridEnd;
7350 TypeDefRec * pRec;
7351 MethodRec * pMethod;
7352 FieldRec * pField;
7353
7354 if ((ridMethod + ridField + 1) > INDEX_ROW_COUNT_THRESHOLD)
7355 {
7356 // Create a new hash.
7357 NewHolder<CMemberDefHash> pMemberDefHash = new (nothrow) CMemberDefHash();
7358 IfNullGo(pMemberDefHash);
7359 IfFailGo(pMemberDefHash->NewInit(
7360 g_HashSize[GetMetaDataSizeIndex(&m_OptionValue)]));
7361
7362 for (iType = 1; iType <= getCountTypeDefs(); iType++)
7363 {
7364 IfFailGo(GetTypeDefRecord(iType, &pRec));
7365 ridStart = getMethodListOfTypeDef(pRec);
7366 IfFailGo(getEndMethodListOfTypeDef(iType, &ridEnd));
7367
7368 // add all of the methods of this typedef into hash table
7369 for (; ridStart < ridEnd; ridStart++)
7370 {
7371 RID methodRid;
7372 IfFailGo(GetMethodRid(ridStart, &methodRid));
7373 IfFailGo(GetMethodRecord(methodRid, &pMethod));
7374 LPCSTR szMethodName;
7375 IfFailGo(getNameOfMethod(pMethod, &szMethodName));
7376 iHash = HashMemberDef(TokenFromRid(iType, mdtTypeDef), szMethodName);
7377
7378 pEntry = pMemberDefHash->Add(iHash);
7379 if (pEntry == NULL)
7380 IfFailGo(OutOfMemory());
7381 pEntry->tok = TokenFromRid(methodRid, mdtMethodDef);
7382 pEntry->tkParent = TokenFromRid(iType, mdtTypeDef);
7383 }
7384
7385 // add all of the fields of this typedef into hash table
7386 ridStart = getFieldListOfTypeDef(pRec);
7387 IfFailGo(getEndFieldListOfTypeDef(iType, &ridEnd));
7388
7389 // Scan every entry already in the Method table, add it to the hash.
7390 for (; ridStart < ridEnd; ridStart++)
7391 {
7392 RID fieldRid;
7393 IfFailGo(GetFieldRid(ridStart, &fieldRid));
7394 IfFailGo(GetFieldRecord(fieldRid, &pField));
7395 LPCSTR szFieldName;
7396 IfFailGo(getNameOfField(pField, &szFieldName));
7397 iHash = HashMemberDef(TokenFromRid(iType, mdtTypeDef), szFieldName);
7398
7399 pEntry = pMemberDefHash->Add(iHash);
7400 IfNullGo(pEntry);
7401 pEntry->tok = TokenFromRid(fieldRid, mdtFieldDef);
7402 pEntry->tkParent = TokenFromRid(iType, mdtTypeDef);
7403 }
7404 }
7405
7406 if (InterlockedCompareExchangeT<CMemberDefHash *>(&m_pMemberDefHash, pMemberDefHash, NULL) == NULL)
7407 { // We won the initialization race
7408 pMemberDefHash.SuppressRelease();
7409 }
7410 }
7411 }
7412ErrExit:
7413 return hr;
7414} // CMiniMdRW::CreateMemberDefHash
7415
7416//---------------------------------------------------------------------------------------
7417//
7418// If the hash is built, search for the item. Ignore token *ptkMember.
7419//
7420CMiniMdRW::HashSearchResult
7421CMiniMdRW::FindMemberDefFromHash(
7422 mdToken tkParent, // Parent token.
7423 LPCUTF8 szName, // Name of item.
7424 PCCOR_SIGNATURE pvSigBlob, // Signature.
7425 ULONG cbSigBlob, // Size of signature.
7426 mdToken * ptkMember) // IN: Ignored token. OUT: Return if found. It can be MethodDef or FieldDef
7427{
7428 // check to see if we need to create hash table
7429 if (m_pMemberDefHash == NULL)
7430 {
7431 // Ignore the failure - the hash won't be created in the worst case
7432 (void)CreateMemberDefHash();
7433 }
7434
7435 // If the table is there, look for the item in the chain of items.
7436 if (m_pMemberDefHash != NULL)
7437 {
7438 MEMBERDEFHASHENTRY * pEntry;
7439 ULONG iHash;
7440 int pos;
7441
7442 // Hash the data.
7443 iHash = HashMemberDef(tkParent, szName);
7444
7445 // Go through every entry in the hash chain looking for ours.
7446 for (pEntry = m_pMemberDefHash->FindFirst(iHash, pos);
7447 pEntry != NULL;
7448 pEntry = m_pMemberDefHash->FindNext(pos))
7449 {
7450 if ((CompareMemberDefs(pEntry->tok, pEntry->tkParent, tkParent, szName, pvSigBlob, cbSigBlob) == S_OK)
7451 && (pEntry->tok != *ptkMember))
7452 {
7453 *ptkMember = pEntry->tok;
7454 return Found;
7455 }
7456 }
7457
7458 return NotFound;
7459 }
7460 else
7461 {
7462 return NoTable;
7463 }
7464} // CMiniMdRW::FindMemberDefFromHash
7465
7466
7467//*****************************************************************************
7468// Check a given memberDef token to see if this one is a match.
7469//*****************************************************************************
7470__checkReturn
7471HRESULT
7472CMiniMdRW::CompareMemberDefs( // S_OK match, S_FALSE no match.
7473 mdToken tkMember, // Token to check. It can be MethodDef or FieldDef
7474 mdToken tkParent, // Parent token recorded in the hash entry
7475 mdToken tkPar, // Parent token.
7476 LPCUTF8 szNameUtf8, // Name of item.
7477 PCCOR_SIGNATURE pvSigBlob, // Signature.
7478 ULONG cbSigBlob) // Size of signature.
7479{
7480 HRESULT hr;
7481 MethodRec *pMethod;
7482 FieldRec *pField;
7483 LPCUTF8 szNameUtf8Tmp;
7484 PCCOR_SIGNATURE pvSigBlobTmp;
7485 ULONG cbSigBlobTmp;
7486 bool bPrivateScope;
7487
7488 if (TypeFromToken(tkMember) == mdtMethodDef)
7489 {
7490 IfFailGo(GetMethodRecord(RidFromToken(tkMember), &pMethod));
7491 IfFailGo(getNameOfMethod(pMethod, &szNameUtf8Tmp));
7492 IfFailGo(getSignatureOfMethod(pMethod, &pvSigBlobTmp, &cbSigBlobTmp));
7493 bPrivateScope = IsMdPrivateScope(getFlagsOfMethod(pMethod));
7494 }
7495 else
7496 {
7497 _ASSERTE(TypeFromToken(tkMember) == mdtFieldDef);
7498 IfFailGo(GetFieldRecord(RidFromToken(tkMember), &pField));
7499 IfFailGo(getNameOfField(pField, &szNameUtf8Tmp));
7500 IfFailGo(getSignatureOfField(pField, &pvSigBlobTmp, &cbSigBlobTmp));
7501 bPrivateScope = IsFdPrivateScope(getFlagsOfField(pField));
7502 }
7503 if (bPrivateScope || (tkPar != tkParent))
7504 {
7505 return S_FALSE;
7506 }
7507
7508 if (strcmp(szNameUtf8Tmp, szNameUtf8) == 0)
7509 {
7510 if (pvSigBlob == NULL)
7511 {
7512 return S_OK;
7513 }
7514
7515 // Name matched. Now check the signature if caller supplies signature
7516 //
7517 if ((cbSigBlob != 0) && (pvSigBlob != NULL))
7518 {
7519 if ((cbSigBlobTmp == cbSigBlob) &&
7520 (memcmp(pvSigBlob, pvSigBlobTmp, cbSigBlob) == 0))
7521 {
7522 return S_OK;
7523 }
7524 }
7525 }
7526 hr = S_FALSE;
7527ErrExit:
7528 return hr;
7529} // CMiniMdRW::CompareMemberDefs
7530
7531//*****************************************************************************
7532// Add a new NamedItem to the hash table.
7533//*****************************************************************************
7534__checkReturn
7535HRESULT
7536CMiniMdRW::AddNamedItemToHash(
7537 ULONG ixTbl, // Table with the new item.
7538 mdToken tk, // Token of new guy.
7539 LPCUTF8 szName, // Name of item.
7540 mdToken tkParent) // Token of parent, if any.
7541{
7542 HRESULT hr = S_OK;
7543 BYTE *pNamedItem; // A named item record.
7544 LPCUTF8 szItem; // Name of the item.
7545 mdToken tkPar = 0; // Parent token of the item.
7546 ULONG iHash; // A named item's hash value.
7547 TOKENHASHENTRY *pEntry; // New hash entry.
7548
7549 // If the hash table hasn't been built it, see if it should get faulted in.
7550 if (m_pNamedItemHash == NULL)
7551 {
7552 ULONG ridEnd = GetCountRecs(ixTbl);
7553 // Range check avoiding prefast warning with: "if (ridEnd + 1 > INDEX_ROW_COUNT_THRESHOLD)"
7554 if (ridEnd > (INDEX_ROW_COUNT_THRESHOLD - 1))
7555 {
7556 // This assert causes Dev11 #65887, turn it on when the bug is fixed
7557 //INDEBUG(Debug_CheckIsLockedForWrite();)
7558
7559 // OutputDebugStringA("Creating TypeRef hash\n");
7560 // Create a new hash.
7561 m_pNamedItemHash = new (nothrow) CMetaDataHashBase;
7562 IfNullGo(m_pNamedItemHash);
7563 IfFailGo(m_pNamedItemHash->NewInit(
7564 g_HashSize[GetMetaDataSizeIndex(&m_OptionValue)]));
7565
7566 // Scan every entry already in the table, add it to the hash.
7567 for (ULONG index = 1; index <= ridEnd; index++)
7568 {
7569 IfFailGo(m_Tables[ixTbl].GetRecord(index, &pNamedItem));
7570 IfFailGo(getString(GetCol(ixTbl, g_TblIndex[ixTbl].m_iName, pNamedItem), &szItem));
7571 if (g_TblIndex[ixTbl].m_iParent != (ULONG) -1)
7572 tkPar = GetToken(ixTbl, g_TblIndex[ixTbl].m_iParent, pNamedItem);
7573
7574 iHash = HashNamedItem(tkPar, szItem);
7575
7576 pEntry = m_pNamedItemHash->Add(iHash);
7577 IfNullGo(pEntry);
7578 pEntry->tok = TokenFromRid(index, g_TblIndex[ixTbl].m_Token);
7579 }
7580 }
7581 }
7582 else
7583 {
7584 tk = RidFromToken(tk);
7585 IfFailGo(m_Tables[ixTbl].GetRecord(tk, &pNamedItem));
7586 IfFailGo(getString(GetCol(ixTbl, g_TblIndex[ixTbl].m_iName, pNamedItem), &szItem));
7587 if (g_TblIndex[ixTbl].m_iParent != (ULONG)-1)
7588 tkPar = GetToken(ixTbl, g_TblIndex[ixTbl].m_iParent, pNamedItem);
7589
7590 iHash = HashNamedItem(tkPar, szItem);
7591
7592 pEntry = m_pNamedItemHash->Add(iHash);
7593 IfNullGo(pEntry);
7594 pEntry->tok = TokenFromRid(tk, g_TblIndex[ixTbl].m_Token);
7595 }
7596
7597ErrExit:
7598 return hr;
7599} // CMiniMdRW::AddNamedItemToHash
7600
7601//*****************************************************************************
7602// If the hash is built, search for the item.
7603//*****************************************************************************
7604CMiniMdRW::HashSearchResult
7605CMiniMdRW::FindNamedItemFromHash(
7606 ULONG ixTbl, // Table with the item.
7607 LPCUTF8 szName, // Name of item.
7608 mdToken tkParent, // Token of parent, if any.
7609 mdToken * ptk) // Return if found.
7610{
7611 // If the table is there, look for the item in the chain of items.
7612 if (m_pNamedItemHash != NULL)
7613 {
7614 TOKENHASHENTRY *p; // Hash entry from chain.
7615 ULONG iHash; // Item's hash value.
7616 int pos; // Position in hash chain.
7617 mdToken type; // Type of the item being sought.
7618
7619 type = g_TblIndex[ixTbl].m_Token;
7620
7621 // Hash the data.
7622 iHash = HashNamedItem(tkParent, szName);
7623
7624 // Go through every entry in the hash chain looking for ours.
7625 for (p = m_pNamedItemHash->FindFirst(iHash, pos);
7626 p != NULL;
7627 p = m_pNamedItemHash->FindNext(pos))
7628 { // Check that the item is from the right table.
7629 if (TypeFromToken(p->tok) != (ULONG)type)
7630 {
7631 //<TODO>@FUTURE: if using the named item hash for multiple tables, remove
7632 // this check. Until then, debugging aid.</TODO>
7633 _ASSERTE(!"Table mismatch in hash chain");
7634 continue;
7635 }
7636 // Item is in the right table, do the deeper check.
7637 if (CompareNamedItems(ixTbl, p->tok, szName, tkParent) == S_OK)
7638 {
7639 *ptk = p->tok;
7640 return Found;
7641 }
7642 }
7643
7644 return NotFound;
7645 }
7646 else
7647 {
7648 return NoTable;
7649 }
7650} // CMiniMdRW::FindNamedItemFromHash
7651
7652//*****************************************************************************
7653// Check a given mr token to see if this one is a match.
7654//*****************************************************************************
7655__checkReturn
7656HRESULT
7657CMiniMdRW::CompareNamedItems( // S_OK match, S_FALSE no match.
7658 ULONG ixTbl, // Table with the item.
7659 mdToken tk, // Token to check.
7660 LPCUTF8 szName, // Name of item.
7661 mdToken tkParent) // Token of parent, if any.
7662{
7663 HRESULT hr;
7664 BYTE *pNamedItem; // Item to check.
7665 LPCUTF8 szNameUtf8Tmp; // Name of item to check.
7666
7667 // Get the record.
7668 IfFailRet(m_Tables[ixTbl].GetRecord(RidFromToken(tk), &pNamedItem));
7669
7670 // Name is cheaper to get than coded token parent, and fails pretty quickly.
7671 IfFailRet(getString(GetCol(ixTbl, g_TblIndex[ixTbl].m_iName, pNamedItem), &szNameUtf8Tmp));
7672 if (strcmp(szNameUtf8Tmp, szName) != 0)
7673 return S_FALSE;
7674
7675 // Name matched, try parent, if any.
7676 if (g_TblIndex[ixTbl].m_iParent != (ULONG)-1)
7677 {
7678 mdToken tkPar = GetToken(ixTbl, g_TblIndex[ixTbl].m_iParent, pNamedItem);
7679 if (tkPar != tkParent)
7680 return S_FALSE;
7681 }
7682
7683 // Made it to here, so everything matched.
7684 return S_OK;
7685} // CMiniMdRW::CompareNamedItems
7686
7687//*****************************************************************************
7688// Add <md, td> entry to the MethodDef map look up table
7689//*****************************************************************************
7690__checkReturn
7691HRESULT
7692CMiniMdRW::AddMethodToLookUpTable(
7693 mdMethodDef md,
7694 mdTypeDef td)
7695{
7696 HRESULT hr = NOERROR;
7697 mdToken *ptk;
7698 _ASSERTE((TypeFromToken(md) == mdtMethodDef) && HasIndirectTable(TBL_Method));
7699
7700 if (m_pMethodMap != NULL)
7701 {
7702 // Only add to the lookup table if it has been built already by demand.
7703 //
7704 // The first entry in the map is a dummy entry.
7705 // The i'th index entry of the map is the td for methoddef of i.
7706 // We do expect the methoddef tokens are all added when the map exist.
7707 //
7708 _ASSERTE(RidFromToken(md) == (ULONG)m_pMethodMap->Count());
7709 INDEBUG(Debug_CheckIsLockedForWrite();)
7710 ptk = m_pMethodMap->Append();
7711 IfNullGo(ptk);
7712 *ptk = td;
7713 }
7714ErrExit:
7715 return hr;
7716} // CMiniMdRW::AddMethodToLookUpTable
7717
7718//*****************************************************************************
7719// Add <fd, td> entry to the FieldDef map look up table
7720//*****************************************************************************
7721__checkReturn
7722HRESULT
7723CMiniMdRW::AddFieldToLookUpTable(
7724 mdFieldDef fd,
7725 mdTypeDef td)
7726{
7727 HRESULT hr = NOERROR;
7728 mdToken *ptk;
7729 _ASSERTE((TypeFromToken(fd) == mdtFieldDef) && HasIndirectTable(TBL_Field));
7730 if (m_pFieldMap != NULL)
7731 {
7732 // Only add to the lookup table if it has been built already by demand.
7733 //
7734 // The first entry in the map is a dummy entry.
7735 // The i'th index entry of the map is the td for fielddef of i.
7736 // We do expect the fielddef tokens are all added when the map exist.
7737 //
7738 _ASSERTE(RidFromToken(fd) == (ULONG)m_pFieldMap->Count());
7739 ptk = m_pFieldMap->Append();
7740 IfNullGo(ptk);
7741 *ptk = td;
7742 }
7743
7744ErrExit:
7745 return hr;
7746} // CMiniMdRW::AddFieldToLookUpTable
7747
7748//*****************************************************************************
7749// Add <pr, td> entry to the Property map look up table
7750//*****************************************************************************
7751__checkReturn
7752HRESULT
7753CMiniMdRW::AddPropertyToLookUpTable(
7754 mdProperty pr,
7755 mdTypeDef td)
7756{
7757 HRESULT hr = NOERROR;
7758 mdToken *ptk;
7759 _ASSERTE((TypeFromToken(pr) == mdtProperty) && HasIndirectTable(TBL_Property));
7760
7761 if (m_pPropertyMap != NULL)
7762 {
7763 // Only add to the lookup table if it has been built already by demand.
7764 //
7765 // The first entry in the map is a dummy entry.
7766 // The i'th index entry of the map is the td for property of i.
7767 // We do expect the property tokens are all added when the map exist.
7768 //
7769 _ASSERTE(RidFromToken(pr) == (ULONG)m_pPropertyMap->Count());
7770 ptk = m_pPropertyMap->Append();
7771 IfNullGo(ptk);
7772 *ptk = td;
7773 }
7774ErrExit:
7775 return hr;
7776} // CMiniMdRW::AddPropertyToLookUpTable
7777
7778//*****************************************************************************
7779// Add <ev, td> entry to the Event map look up table
7780//*****************************************************************************
7781__checkReturn
7782HRESULT
7783CMiniMdRW::AddEventToLookUpTable(
7784 mdEvent ev,
7785 mdTypeDef td)
7786{
7787 HRESULT hr = NOERROR;
7788 mdToken *ptk;
7789 _ASSERTE((TypeFromToken(ev) == mdtEvent) && HasIndirectTable(TBL_Event));
7790
7791 if (m_pEventMap != NULL)
7792 {
7793 // Only add to the lookup table if it has been built already by demand.
7794 //
7795 // now add to the EventMap table
7796 _ASSERTE(RidFromToken(ev) == (ULONG)m_pEventMap->Count());
7797 ptk = m_pEventMap->Append();
7798 IfNullGo(ptk);
7799 *ptk = td;
7800 }
7801ErrExit:
7802 return hr;
7803} // CMiniMdRW::AddEventToLookUpTable
7804
7805//*****************************************************************************
7806// Add <pd, md> entry to the Param map look up table
7807//*****************************************************************************
7808__checkReturn
7809HRESULT
7810CMiniMdRW::AddParamToLookUpTable(
7811 mdParamDef pd,
7812 mdMethodDef md)
7813{
7814 HRESULT hr = NOERROR;
7815 mdToken *ptk;
7816 _ASSERTE((TypeFromToken(pd) == mdtParamDef) && HasIndirectTable(TBL_Param));
7817
7818 if (m_pParamMap != NULL)
7819 {
7820 // Only add to the lookup table if it has been built already by demand.
7821 //
7822 // now add to the EventMap table
7823 _ASSERTE(RidFromToken(pd) == (ULONG)m_pParamMap->Count());
7824 ptk = m_pParamMap->Append();
7825 IfNullGo(ptk);
7826 *ptk = md;
7827 }
7828ErrExit:
7829 return hr;
7830} // CMiniMdRW::AddParamToLookUpTable
7831
7832//*****************************************************************************
7833// Find parent for a method token. This will use the lookup table if there is an
7834// intermediate table. Or it will use FindMethodOfParent helper
7835//*****************************************************************************
7836__checkReturn
7837HRESULT
7838CMiniMdRW::FindParentOfMethodHelper(
7839 mdMethodDef md, // [IN] the methoddef token
7840 mdTypeDef *ptd) // [OUT] the parent token
7841{
7842 HRESULT hr = NOERROR;
7843 if (HasIndirectTable(TBL_Method))
7844 {
7845 if (m_pMethodMap == NULL)
7846 {
7847 ULONG indexTd;
7848 ULONG indexMd;
7849 ULONG ridStart;
7850 ULONG ridEnd;
7851 TypeDefRec * pTypeDefRec;
7852 MethodPtrRec * pMethodPtrRec;
7853
7854 // build the MethodMap table
7855 NewHolder<TOKENMAP> pMethodMap = new (nothrow) TOKENMAP;
7856 IfNullGo(pMethodMap);
7857 ULONG nAllocateSize;
7858 if (!ClrSafeInt<ULONG>::addition(m_Schema.m_cRecs[TBL_Method], 1, nAllocateSize))
7859 {
7860 IfFailGo(COR_E_OVERFLOW);
7861 }
7862 if (pMethodMap->AllocateBlock(nAllocateSize) == 0)
7863 IfFailGo(E_OUTOFMEMORY);
7864 for (indexTd = 1; indexTd <= m_Schema.m_cRecs[TBL_TypeDef]; indexTd++)
7865 {
7866 IfFailGo(GetTypeDefRecord(indexTd, &pTypeDefRec));
7867 ridStart = getMethodListOfTypeDef(pTypeDefRec);
7868 IfFailGo(getEndMethodListOfTypeDef(indexTd, &ridEnd));
7869
7870 for (indexMd = ridStart; indexMd < ridEnd; indexMd++)
7871 {
7872 IfFailGo(GetMethodPtrRecord(indexMd, &pMethodPtrRec));
7873 PREFIX_ASSUME(pMethodMap->Get(getMethodOfMethodPtr(pMethodPtrRec)) != NULL);
7874 *(pMethodMap->Get(getMethodOfMethodPtr(pMethodPtrRec))) = indexTd;
7875 }
7876 }
7877 if (InterlockedCompareExchangeT<TOKENMAP *>(
7878 &m_pMethodMap,
7879 pMethodMap,
7880 NULL) == NULL)
7881 { // We won the initializaion race
7882 pMethodMap.SuppressRelease();
7883 }
7884 }
7885 *ptd = *(m_pMethodMap->Get(RidFromToken(md)));
7886 }
7887 else
7888 {
7889 IfFailGo(FindParentOfMethod(RidFromToken(md), (RID *)ptd));
7890 }
7891 RidToToken(*ptd, mdtTypeDef);
7892ErrExit:
7893 return hr;
7894} // CMiniMdRW::FindParentOfMethodHelper
7895
7896//*****************************************************************************
7897// Find parent for a field token. This will use the lookup table if there is an
7898// intermediate table. Or it will use FindFieldOfParent helper
7899//*****************************************************************************
7900__checkReturn
7901HRESULT
7902CMiniMdRW::FindParentOfFieldHelper(
7903 mdFieldDef fd, // [IN] fielddef token
7904 mdTypeDef *ptd) // [OUT] parent token
7905{
7906 HRESULT hr = NOERROR;
7907 if (HasIndirectTable(TBL_Field))
7908 {
7909 if (m_pFieldMap == NULL)
7910 {
7911 ULONG indexTd;
7912 ULONG indexFd;
7913 ULONG ridStart, ridEnd;
7914 TypeDefRec *pTypeDefRec;
7915 FieldPtrRec *pFieldPtrRec;
7916
7917 // build the FieldMap table
7918 NewHolder<TOKENMAP> pFieldMap = new (nothrow) TOKENMAP;
7919 IfNullGo(pFieldMap);
7920 ULONG nAllocateSize;
7921 if (!ClrSafeInt<ULONG>::addition(m_Schema.m_cRecs[TBL_Field], 1, nAllocateSize))
7922 {
7923 IfFailGo(COR_E_OVERFLOW);
7924 }
7925 if (pFieldMap->AllocateBlock(nAllocateSize) == 0)
7926 IfFailGo(E_OUTOFMEMORY);
7927 for (indexTd = 1; indexTd<= m_Schema.m_cRecs[TBL_TypeDef]; indexTd++)
7928 {
7929 IfFailGo(GetTypeDefRecord(indexTd, &pTypeDefRec));
7930 ridStart = getFieldListOfTypeDef(pTypeDefRec);
7931 IfFailGo(getEndFieldListOfTypeDef(indexTd, &ridEnd));
7932
7933 for (indexFd = ridStart; indexFd < ridEnd; indexFd++)
7934 {
7935 IfFailGo(GetFieldPtrRecord(indexFd, &pFieldPtrRec));
7936 PREFIX_ASSUME(pFieldMap->Get(getFieldOfFieldPtr(pFieldPtrRec)) != NULL);
7937 *(pFieldMap->Get(getFieldOfFieldPtr(pFieldPtrRec))) = indexTd;
7938 }
7939 }
7940 if (InterlockedCompareExchangeT<TOKENMAP *>(
7941 &m_pFieldMap,
7942 pFieldMap,
7943 NULL) == NULL)
7944 { // We won the initializaion race
7945 pFieldMap.SuppressRelease();
7946 }
7947 }
7948 *ptd = *(m_pFieldMap->Get(RidFromToken(fd)));
7949 }
7950 else
7951 {
7952 IfFailGo(FindParentOfField(RidFromToken(fd), (RID *)ptd));
7953 }
7954 RidToToken(*ptd, mdtTypeDef);
7955ErrExit:
7956 return hr;
7957} // CMiniMdRW::FindParentOfFieldHelper
7958
7959//*****************************************************************************
7960// Find parent for a property token. This will use the lookup table if there is an
7961// intermediate table.
7962//*****************************************************************************
7963__checkReturn
7964HRESULT
7965CMiniMdRW::FindParentOfPropertyHelper(
7966 mdProperty pr,
7967 mdTypeDef *ptd)
7968{
7969 HRESULT hr = NOERROR;
7970 if (HasIndirectTable(TBL_Property))
7971 {
7972 if (m_pPropertyMap == NULL)
7973 {
7974 ULONG indexMap;
7975 ULONG indexPr;
7976 ULONG ridStart, ridEnd;
7977 PropertyMapRec *pPropertyMapRec;
7978 PropertyPtrRec *pPropertyPtrRec;
7979
7980 // build the PropertyMap table
7981 NewHolder<TOKENMAP> pPropertyMap = new (nothrow) TOKENMAP;
7982 IfNullGo(pPropertyMap);
7983 ULONG nAllocateSize;
7984 if (!ClrSafeInt<ULONG>::addition(m_Schema.m_cRecs[TBL_Property], 1, nAllocateSize))
7985 {
7986 IfFailGo(COR_E_OVERFLOW);
7987 }
7988 if (pPropertyMap->AllocateBlock(nAllocateSize) == 0)
7989 IfFailGo( E_OUTOFMEMORY );
7990 for (indexMap = 1; indexMap<= m_Schema.m_cRecs[TBL_PropertyMap]; indexMap++)
7991 {
7992 IfFailGo(GetPropertyMapRecord(indexMap, &pPropertyMapRec));
7993 ridStart = getPropertyListOfPropertyMap(pPropertyMapRec);
7994 IfFailGo(getEndPropertyListOfPropertyMap(indexMap, &ridEnd));
7995
7996 for (indexPr = ridStart; indexPr < ridEnd; indexPr++)
7997 {
7998 IfFailGo(GetPropertyPtrRecord(indexPr, &pPropertyPtrRec));
7999 mdToken *tok = pPropertyMap->Get(getPropertyOfPropertyPtr(pPropertyPtrRec));
8000 PREFIX_ASSUME(tok != NULL);
8001 *tok = getParentOfPropertyMap(pPropertyMapRec);
8002 }
8003 }
8004 if (InterlockedCompareExchangeT<TOKENMAP *>(
8005 &m_pPropertyMap,
8006 pPropertyMap,
8007 NULL) == NULL)
8008 { // We won the initializaion race
8009 pPropertyMap.SuppressRelease();
8010 }
8011 }
8012 *ptd = *(m_pPropertyMap->Get(RidFromToken(pr)));
8013 }
8014 else
8015 {
8016 RID ridPropertyMap;
8017 PropertyMapRec *pRec;
8018
8019 IfFailGo(FindPropertyMapParentOfProperty(RidFromToken(pr), &ridPropertyMap));
8020 IfFailGo(GetPropertyMapRecord(ridPropertyMap, &pRec));
8021 *ptd = getParentOfPropertyMap(pRec);
8022 }
8023 RidToToken(*ptd, mdtTypeDef);
8024ErrExit:
8025 return hr;
8026} // CMiniMdRW::FindParentOfPropertyHelper
8027
8028//*****************************************************************************
8029// Find parent for an Event token. This will use the lookup table if there is an
8030// intermediate table.
8031//*****************************************************************************
8032__checkReturn
8033HRESULT
8034CMiniMdRW::FindParentOfEventHelper(
8035 mdEvent ev,
8036 mdTypeDef *ptd)
8037{
8038 HRESULT hr = NOERROR;
8039 if (HasIndirectTable(TBL_Event))
8040 {
8041 if (m_pEventMap == NULL)
8042 {
8043 ULONG indexMap;
8044 ULONG indexEv;
8045 ULONG ridStart, ridEnd;
8046 EventMapRec *pEventMapRec;
8047 EventPtrRec *pEventPtrRec;
8048
8049 // build the EventMap table
8050 NewHolder<TOKENMAP> pEventMap = new (nothrow) TOKENMAP;
8051 IfNullGo(pEventMap);
8052 ULONG nAllocateSize;
8053 if (!ClrSafeInt<ULONG>::addition(m_Schema.m_cRecs[TBL_Event], 1, nAllocateSize))
8054 {
8055 IfFailGo(COR_E_OVERFLOW);
8056 }
8057 if (pEventMap->AllocateBlock(nAllocateSize) == 0)
8058 IfFailGo(E_OUTOFMEMORY);
8059 for (indexMap = 1; indexMap<= m_Schema.m_cRecs[TBL_EventMap]; indexMap++)
8060 {
8061 IfFailGo(GetEventMapRecord(indexMap, &pEventMapRec));
8062 ridStart = getEventListOfEventMap(pEventMapRec);
8063 IfFailGo(getEndEventListOfEventMap(indexMap, &ridEnd));
8064
8065 for (indexEv = ridStart; indexEv < ridEnd; indexEv++)
8066 {
8067 IfFailGo(GetEventPtrRecord(indexEv, &pEventPtrRec));
8068 mdToken* tok = pEventMap->Get(getEventOfEventPtr(pEventPtrRec));
8069 PREFIX_ASSUME(tok != NULL);
8070 *tok = getParentOfEventMap(pEventMapRec);
8071 }
8072 }
8073 if (InterlockedCompareExchangeT<TOKENMAP *>(
8074 &m_pEventMap,
8075 pEventMap,
8076 NULL) == NULL)
8077 { // We won the initializaion race
8078 pEventMap.SuppressRelease();
8079 }
8080 }
8081 *ptd = *(m_pEventMap->Get(RidFromToken(ev)));
8082 }
8083 else
8084 {
8085 RID ridEventMap;
8086 EventMapRec *pRec;
8087
8088 IfFailGo(FindEventMapParentOfEvent(RidFromToken(ev), &ridEventMap));
8089 IfFailGo(GetEventMapRecord(ridEventMap, &pRec));
8090 *ptd = getParentOfEventMap(pRec);
8091 }
8092 RidToToken(*ptd, mdtTypeDef);
8093ErrExit:
8094 return hr;
8095} // CMiniMdRW::FindParentOfEventHelper
8096
8097//*****************************************************************************
8098// Find parent for a ParamDef token. This will use the lookup table if there is an
8099// intermediate table.
8100//*****************************************************************************
8101__checkReturn
8102HRESULT
8103CMiniMdRW::FindParentOfParamHelper(
8104 mdParamDef pd,
8105 mdMethodDef *pmd)
8106{
8107 HRESULT hr = NOERROR;
8108 if (HasIndirectTable(TBL_Param))
8109 {
8110 if (m_pParamMap == NULL)
8111 {
8112 ULONG indexMd;
8113 ULONG indexPd;
8114 ULONG ridStart, ridEnd;
8115 MethodRec *pMethodRec;
8116 ParamPtrRec *pParamPtrRec;
8117
8118 // build the ParamMap table
8119 NewHolder<TOKENMAP> pParamMap = new (nothrow) TOKENMAP;
8120 IfNullGo(pParamMap);
8121 ULONG nAllocateSize;
8122 if (!ClrSafeInt<ULONG>::addition(m_Schema.m_cRecs[TBL_Param], 1, nAllocateSize))
8123 {
8124 IfFailGo(COR_E_OVERFLOW);
8125 }
8126 if (pParamMap->AllocateBlock(nAllocateSize) == 0)
8127 IfFailGo(E_OUTOFMEMORY);
8128 for (indexMd = 1; indexMd<= m_Schema.m_cRecs[TBL_Method]; indexMd++)
8129 {
8130 IfFailGo(GetMethodRecord(indexMd, &pMethodRec));
8131 ridStart = getParamListOfMethod(pMethodRec);
8132 IfFailGo(getEndParamListOfMethod(indexMd, &ridEnd));
8133
8134 for (indexPd = ridStart; indexPd < ridEnd; indexPd++)
8135 {
8136 IfFailGo(GetParamPtrRecord(indexPd, &pParamPtrRec));
8137 PREFIX_ASSUME(pParamMap->Get(getParamOfParamPtr(pParamPtrRec)) != NULL);
8138 *(pParamMap->Get(getParamOfParamPtr(pParamPtrRec))) = indexMd;
8139 }
8140 }
8141 if (InterlockedCompareExchangeT<TOKENMAP *>(
8142 &m_pParamMap,
8143 pParamMap,
8144 NULL) == NULL)
8145 { // We won the initializaion race
8146 pParamMap.SuppressRelease();
8147 }
8148 }
8149 *pmd = *(m_pParamMap->Get(RidFromToken(pd)));
8150 }
8151 else
8152 {
8153 IfFailGo(FindParentOfParam(RidFromToken(pd), (RID *)pmd));
8154 }
8155 RidToToken(*pmd, mdtMethodDef);
8156ErrExit:
8157 return hr;
8158} // CMiniMdRW::FindParentOfParamHelper
8159
8160
8161//******************************************************************************
8162// Add an entry in the ENC Log table.
8163//******************************************************************************
8164__checkReturn
8165HRESULT
8166CMiniMdRW::UpdateENCLogHelper(
8167 mdToken tk, // Token to be added to the ENCLog table.
8168 CMiniMdRW::eDeltaFuncs funccode) // Specifies the optional function code..
8169{
8170 ENCLogRec *pRecord;
8171 RID iRecord;
8172 HRESULT hr = S_OK;
8173
8174 // @todo - MD can't handle anything other than functions right now
8175 /* if (TypeFromToken(tk) != mdtMethodDef)
8176 {
8177 _ASSERTE(!"Trying to do something that we can't do");
8178 return S_OK;
8179 }
8180 */
8181 IfFailGo(AddENCLogRecord(&pRecord, &iRecord));
8182 pRecord->SetToken(tk);
8183 pRecord->SetFuncCode(funccode);
8184
8185ErrExit:
8186 return hr;
8187} // CMiniMdRW::UpdateENCLogHelper
8188
8189__checkReturn
8190HRESULT
8191CMiniMdRW::UpdateENCLogHelper2(
8192 ULONG ixTbl, // Table being updated.
8193 ULONG iRid, // Record within table.
8194 CMiniMdRW::eDeltaFuncs funccode) // Specifies the optional function code..
8195{
8196 ENCLogRec *pRecord;
8197 RID iRecord;
8198 HRESULT hr = S_OK;
8199
8200 IfFailGo(AddENCLogRecord(&pRecord, &iRecord));
8201 pRecord->SetToken(RecIdFromRid(iRid, ixTbl));
8202 pRecord->SetFuncCode(funccode);
8203
8204ErrExit:
8205 return hr;
8206} // CMiniMdRW::UpdateENCLogHelper2
8207
8208__checkReturn
8209HRESULT
8210CMiniMdRW::ResetENCLog()
8211{
8212#ifdef FEATURE_METADATA_EMIT
8213 HRESULT hr = S_OK;
8214 ModuleRec * pMod;
8215
8216 // Get the module record.
8217 IfFailGo(GetModuleRecord(1, &pMod));
8218
8219
8220 // Reset the pool deltas
8221 m_StringHeap.StartNewEnCSession();
8222 m_BlobHeap.StartNewEnCSession();
8223 m_UserStringHeap.StartNewEnCSession();
8224
8225 // Clear the ENCLog
8226 m_Tables[TBL_ENCLog].Delete();
8227 m_Schema.m_cRecs[TBL_ENCLog] = 0;
8228
8229ErrExit:
8230 return hr;
8231#else //!FEATURE_METADATA_EMIT
8232 return S_OK;
8233#endif //!FEATURE_METADATA_EMIT
8234} // CMiniMdRW::ResetENCLog
8235
8236// ----------------------------------------------------------------------------
8237// Workaround for compiler performance issue VSW 584653 for 2.0 RTM.
8238// Get the table's VirtualSort validity state.
8239bool
8240CMiniMdRW::IsTableVirtualSorted(ULONG ixTbl)
8241{
8242 _ASSERTE(ixTbl < m_TblCount);
8243
8244 if (m_pVS[ixTbl] == NULL)
8245 {
8246 return false;
8247 }
8248 return m_pVS[ixTbl]->m_isMapValid;
8249} // CMiniMdRW::IsTableVirtualSorted
8250
8251// ----------------------------------------------------------------------------
8252// Workaround for compiler performance issue VSW 584653 for 2.0 RTM.
8253//
8254// Validate table's VirtualSort after adding one record into the table.
8255// Returns new VirtualSort validity state in *pfIsTableVirtualSortValid.
8256// Assumptions:
8257// Table's VirtualSort was valid before adding the record to the table.
8258// The caller must ensure validity of VirtualSort by calling to
8259// IsTableVirtualSorted or by using the returned state from previous
8260// call to this method.
8261__checkReturn
8262HRESULT
8263CMiniMdRW::ValidateVirtualSortAfterAddRecord(
8264 ULONG ixTbl,
8265 bool *pfIsTableVirtualSortValid)
8266{
8267 _ASSERTE(ixTbl < m_TblCount);
8268
8269 HRESULT hr;
8270 VirtualSort *pVS = m_pVS[ixTbl];
8271
8272 // VirtualSort was valid (had to exist)
8273 _ASSERTE(pVS != NULL);
8274 // Adding record invalidated VirtualSort
8275 _ASSERTE(!pVS->m_isMapValid);
8276 // Only 1 record was added into table (VirtualSort has 1 bogus element)
8277 _ASSERTE(m_Schema.m_cRecs[ixTbl] == (ULONG)pVS->m_pMap->Count());
8278
8279 // Append 1 element into VirtualSort
8280 mdToken *pAddedVSToken = pVS->m_pMap->Append();
8281 if (pAddedVSToken == NULL)
8282 { // There's not enough memory
8283 // Do not handle OOM now, just leave the VirtualSort invalidated, the
8284 // next allocation will take care of OOM or the VirtualSort will be
8285 // resorted when needed (as it was before this performance workaround)
8286 *pfIsTableVirtualSortValid = false;
8287 return S_OK;
8288 }
8289
8290 // Initialize added element
8291 int iLastElementIndex = pVS->m_pMap->Count() - 1;
8292 *pAddedVSToken = iLastElementIndex;
8293 // Check if the added element extends the VirtualSort (keeps sorting)
8294 if (iLastElementIndex > 2)
8295 {
8296 int nCompareResult;
8297 IfFailRet(pVS->Compare(
8298 iLastElementIndex - 1,
8299 iLastElementIndex,
8300 &nCompareResult));
8301 if (nCompareResult < 0)
8302 { // VirtualSort was extended - the added element is bigger than
8303 // previously last element in VirtualSort
8304
8305 // Validate VirtualSort as it is still sorted and covers all elements
8306 // of the MetaData table
8307 pVS->m_isMapValid = true;
8308 *pfIsTableVirtualSortValid = true;
8309 return S_OK;
8310 }
8311 }
8312 // The added element doesn't extend VirtualSort - it is not sorted
8313
8314 // Keep the VirtualSort invalidated, therefore next binary search will
8315 // force its recreation and resorting (as it did before this performance
8316 // workaround)
8317 *pfIsTableVirtualSortValid = false;
8318 return S_OK;
8319} // CMiniMdRW::ValidateVirtualSortAfterAddRecord
8320
8321#ifdef _DEBUG
8322
8323// ----------------------------------------------------------------------------
8324void
8325CMiniMdRW::Debug_CheckIsLockedForWrite()
8326{
8327 // If this assert fires, then we are trying to modify MetaData that is not locked for write
8328 _ASSERTE((dbg_m_pLock == NULL) || dbg_m_pLock->Debug_IsLockedForWrite());
8329}
8330
8331#endif //_DEBUG
8332
8333//*****************************************************************************
8334//
8335// Sort the whole RID table
8336//
8337//*****************************************************************************
8338__checkReturn
8339HRESULT
8340VirtualSort::Sort()
8341{
8342 m_isMapValid = true;
8343 // Note that m_pMap stores an additional bogus element at count 0. This is
8344 // just so we can align the index in m_pMap with the Rids which are 1 based.
8345 return SortRange(1, m_pMap->Count() - 1);
8346} // VirtualSort::Sort
8347
8348//*****************************************************************************
8349//
8350// Sort the range from iLeft to iRight
8351//
8352//*****************************************************************************
8353__checkReturn
8354HRESULT
8355VirtualSort::SortRange(
8356 int iLeft,
8357 int iRight)
8358{
8359 HRESULT hr;
8360 int iLast;
8361
8362 for (;;)
8363 {
8364 // if less than two elements you're done.
8365 if (iLeft >= iRight)
8366 {
8367 return S_OK;
8368 }
8369
8370 // The mid-element is the pivot, move it to the left.
8371 Swap(iLeft, (iLeft+iRight)/2);
8372 iLast = iLeft;
8373
8374 // move everything that is smaller than the pivot to the left.
8375 for (int i = iLeft+1; i <= iRight; i++)
8376 {
8377 int nCompareResult;
8378 IfFailRet(Compare(i, iLeft, &nCompareResult));
8379 if (nCompareResult < 0)
8380 {
8381 Swap(i, ++iLast);
8382 }
8383 }
8384
8385 // Put the pivot to the point where it is in between smaller and larger elements.
8386 Swap(iLeft, iLast);
8387
8388 // Sort each partition.
8389 int iLeftLast = iLast - 1;
8390 int iRightFirst = iLast + 1;
8391 if (iLeftLast - iLeft < iRight - iRightFirst)
8392 { // Left partition is smaller, sort it recursively
8393 IfFailRet(SortRange(iLeft, iLeftLast));
8394 // Tail call to sort the right (bigger) partition
8395 iLeft = iRightFirst;
8396 //iRight = iRight;
8397 continue;
8398 }
8399 else
8400 { // Right partition is smaller, sort it recursively
8401 IfFailRet(SortRange(iRightFirst, iRight));
8402 // Tail call to sort the left (bigger) partition
8403 //iLeft = iLeft;
8404 iRight = iLeftLast;
8405 continue;
8406 }
8407 }
8408} // VirtualSort::SortRange
8409
8410//*****************************************************************************
8411//
8412// Compare two RID base on the m_ixTbl's m_ixCol
8413//
8414//*****************************************************************************
8415__checkReturn
8416HRESULT
8417VirtualSort::Compare(
8418 RID iLeft, // First item to compare.
8419 RID iRight, // Second item to compare.
8420 int *pnResult) // -1, 0, or 1
8421{
8422 HRESULT hr;
8423 RID ridLeft = *(m_pMap->Get(iLeft));
8424 RID ridRight = *(m_pMap->Get(iRight));
8425 void *pRow; // Row from a table.
8426 ULONG valRight, valLeft; // Value from a row.
8427
8428 IfFailRet(m_pMiniMd->getRow(m_ixTbl, ridLeft, &pRow));
8429 valLeft = m_pMiniMd->getIX(pRow, m_pMiniMd->m_TableDefs[m_ixTbl].m_pColDefs[m_ixCol]);
8430 IfFailRet(m_pMiniMd->getRow(m_ixTbl, ridRight, &pRow));
8431 valRight = m_pMiniMd->getIX(pRow, m_pMiniMd->m_TableDefs[m_ixTbl].m_pColDefs[m_ixCol]);
8432
8433 if (valLeft < valRight)
8434 {
8435 *pnResult = -1;
8436 return S_OK;
8437 }
8438 if (valLeft > valRight)
8439 {
8440 *pnResult = 1;
8441 return S_OK;
8442 }
8443 // Values are equal -- preserve existing ordering.
8444 if (ridLeft < ridRight)
8445 {
8446 *pnResult = -1;
8447 return S_OK;
8448 }
8449 if (ridLeft > ridRight)
8450 {
8451 *pnResult = 1;
8452 return S_OK;
8453 }
8454 // Comparing an item to itself?
8455 _ASSERTE(!"Comparing an item to itself in sort");
8456
8457 *pnResult = 0;
8458 return S_OK;
8459} // VirtualSort::Compare
8460
8461//*****************************************************************************
8462//
8463// Initialization function
8464//
8465//*****************************************************************************
8466void VirtualSort::Init( //
8467 ULONG ixTbl, // Table index.
8468 ULONG ixCol, // Column index.
8469 CMiniMdRW *pMiniMd) // MiniMD with data.
8470{
8471 m_pMap = NULL;
8472 m_isMapValid = false;
8473 m_ixTbl = ixTbl;
8474 m_ixCol = ixCol;
8475 m_pMiniMd = pMiniMd;
8476} // VirtualSort::Init
8477
8478
8479//*****************************************************************************
8480//
8481// Uninitialization function
8482//
8483//*****************************************************************************
8484void VirtualSort::Uninit()
8485{
8486 if ( m_pMap )
8487 delete m_pMap;
8488 m_pMap = NULL;
8489 m_isMapValid = false;
8490} // VirtualSort::Uninit
8491
8492
8493//*****************************************************************************
8494//
8495// Mark a token
8496//
8497//*****************************************************************************
8498HRESULT FilterTable::MarkToken(
8499 mdToken tk, // token to be marked as to keep
8500 DWORD bitToMark) // bit flag to set in the keep table
8501{
8502 HRESULT hr = NOERROR;
8503 RID rid = RidFromToken(tk);
8504
8505 if ( (Count() == 0) || ((RID)(Count() -1)) < rid )
8506 {
8507 // grow table
8508 IfFailGo( AllocateBlock( rid + 1 - Count() ) );
8509 }
8510
8511#ifdef _DEBUG
8512 if ( (*Get(rid)) & bitToMark )
8513 {
8514 // global TypeDef could be marked more than once so don't assert if token is mdtTypeDef
8515 if (TypeFromToken(tk) != mdtTypeDef)
8516 _ASSERTE(!"Token has been Marked");
8517 }
8518#endif //_DEBUG
8519
8520 // set the keep bit
8521 *Get(rid) = (*Get(rid)) | bitToMark;
8522ErrExit:
8523 return hr;
8524} // FilterTable::MarkToken
8525
8526
8527//*****************************************************************************
8528//
8529// Unmark a token
8530//
8531//*****************************************************************************
8532HRESULT FilterTable::UnmarkToken(
8533 mdToken tk, // token to be unmarked as deleted.
8534 DWORD bitToMark) // bit flag to unset in the keep table
8535{
8536 RID rid = RidFromToken(tk);
8537
8538 if ( (Count() == 0) || ((RID)(Count() -1)) < rid )
8539 {
8540 // unmarking should not have grown table. It currently only support dropping the transient CAs.
8541 _ASSERTE(!"BAD state!");
8542 }
8543
8544#ifdef _DEBUG
8545 if ( (*Get(rid)) & bitToMark )
8546 {
8547 // global TypeDef could be marked more than once so don't assert if token is mdtTypeDef
8548 if (TypeFromToken(tk) != mdtTypeDef)
8549 _ASSERTE(!"Token has been Marked");
8550 }
8551#endif //_DEBUG
8552
8553 // unset the keep bit
8554 *Get(rid) = (*Get(rid)) & ~bitToMark;
8555 return NOERROR;
8556} // FilterTable::MarkToken
8557
8558
8559//*****************************************************************************
8560//
8561// Mark an UserString token
8562//
8563//*****************************************************************************
8564HRESULT FilterTable::MarkUserString(
8565 mdString str)
8566{
8567 int high, low, mid;
8568
8569 low = 0;
8570 high = m_daUserStringMarker->Count() - 1;
8571 while (low <= high)
8572 {
8573 mid = (high + low) / 2;
8574 if ((m_daUserStringMarker->Get(mid))->m_tkString > (DWORD) str)
8575 {
8576 high = mid - 1;
8577 }
8578 else if ((m_daUserStringMarker->Get(mid))->m_tkString < (DWORD) str)
8579 {
8580 low = mid + 1;
8581 }
8582 else
8583 {
8584 (m_daUserStringMarker->Get(mid))->m_fMarked = true;
8585 return NOERROR;
8586 }
8587 }
8588 _ASSERTE(!"Bad Token!");
8589 return NOERROR;
8590} // FilterTable::MarkUserString
8591
8592//*****************************************************************************
8593//
8594// Mark a UserString token that was added since our last MarkAll/UnMarkAll
8595//
8596//*****************************************************************************
8597HRESULT FilterTable::MarkNewUserString(mdString str)
8598{
8599 FilterUserStringEntry *pItem = m_daUserStringMarker->Append();
8600
8601 if (pItem == NULL)
8602 return E_OUTOFMEMORY;
8603
8604 pItem->m_tkString = str;
8605 pItem->m_fMarked = true;
8606
8607 return S_OK;
8608} // FilterTable::MarkNewUserString
8609
8610//*****************************************************************************
8611//
8612// Unmarking from 1 to ulSize for all tokens.
8613//
8614//*****************************************************************************
8615HRESULT FilterTable::UnmarkAll(
8616 CMiniMdRW *pMiniMd,
8617 ULONG ulSize)
8618{
8619 HRESULT hr;
8620
8621 S_UINT32 nAllocateSize = S_UINT32(ulSize) + S_UINT32(1);
8622 if (nAllocateSize.IsOverflow())
8623 {
8624 IfFailGo(COR_E_OVERFLOW);
8625 }
8626 if (!AllocateBlock(nAllocateSize.Value()))
8627 {
8628 IfFailGo(E_OUTOFMEMORY);
8629 }
8630 memset(Get(0), 0, nAllocateSize.Value() * sizeof(DWORD));
8631
8632 // unmark all of the user string
8633 m_daUserStringMarker = new (nothrow) CDynArray<FilterUserStringEntry>();
8634 IfNullGo(m_daUserStringMarker);
8635
8636 for (UINT32 nIndex = 0; ;)
8637 {
8638 MetaData::DataBlob userString;
8639 UINT32 nNextIndex;
8640 hr = pMiniMd->GetUserStringAndNextIndex(
8641 nIndex,
8642 &userString,
8643 &nNextIndex);
8644 IfFailGo(hr);
8645 if (hr == S_FALSE)
8646 { // We reached the last user string
8647 hr = S_OK;
8648 break;
8649 }
8650 _ASSERTE(hr == S_OK);
8651
8652 // Skip empty strings
8653 if (userString.IsEmpty())
8654 {
8655 nIndex = nNextIndex;
8656 continue;
8657 }
8658 FilterUserStringEntry *pItem = m_daUserStringMarker->Append();
8659 pItem->m_tkString = TokenFromRid(nIndex, mdtString);
8660 pItem->m_fMarked = false;
8661
8662 // Process next user string in the heap
8663 nIndex = nNextIndex;
8664 }
8665
8666ErrExit:
8667 return hr;
8668} // FilterTable::UnmarkAll
8669
8670
8671
8672//*****************************************************************************
8673//
8674// Marking from 1 to ulSize for all tokens.
8675//
8676//*****************************************************************************
8677HRESULT FilterTable::MarkAll(
8678 CMiniMdRW *pMiniMd,
8679 ULONG ulSize)
8680{
8681 HRESULT hr = S_OK;
8682
8683 S_UINT32 nAllocateSize = S_UINT32(ulSize) + S_UINT32(1);
8684 if (nAllocateSize.IsOverflow())
8685 {
8686 IfFailGo(COR_E_OVERFLOW);
8687 }
8688 if (!AllocateBlock(nAllocateSize.Value()))
8689 {
8690 IfFailGo(E_OUTOFMEMORY);
8691 }
8692 memset(Get(0), 0xFFFFFFFF, nAllocateSize.Value() * sizeof(DWORD));
8693
8694 // mark all of the user string
8695 m_daUserStringMarker = new (nothrow) CDynArray<FilterUserStringEntry>();
8696 IfNullGo(m_daUserStringMarker);
8697
8698 for (UINT32 nIndex = 0; ;)
8699 {
8700 MetaData::DataBlob userString;
8701 UINT32 nNextIndex;
8702 hr = pMiniMd->GetUserStringAndNextIndex(
8703 nIndex,
8704 &userString,
8705 &nNextIndex);
8706 IfFailGo(hr);
8707 if (hr == S_FALSE)
8708 { // We reached the last user string
8709 hr = S_OK;
8710 break;
8711 }
8712 _ASSERTE(hr == S_OK);
8713
8714 // Skip empty strings
8715 if (userString.IsEmpty())
8716 {
8717 nIndex = nNextIndex;
8718 continue;
8719 }
8720 FilterUserStringEntry *pItem = m_daUserStringMarker->Append();
8721 pItem->m_tkString = TokenFromRid(nIndex, mdtString);
8722 pItem->m_fMarked = true;
8723
8724 // Process next user string in the heap
8725 nIndex = nNextIndex;
8726 }
8727
8728ErrExit:
8729 return hr;
8730} // FilterTable::MarkAll
8731
8732//*****************************************************************************
8733//
8734// return true if a token is marked. Otherwise return false.
8735//
8736//*****************************************************************************
8737bool FilterTable::IsTokenMarked(
8738 mdToken tk, // Token to inquiry
8739 DWORD bitMarked) // bit flag to check in the deletion table
8740{
8741 RID rid = RidFromToken(tk);
8742
8743 //<TODO>@FUTURE: inconsistency!!!
8744 // If caller unmarked everything while the module has 2 typedef and 10 methodef.
8745 // We will have 11 rows in the FilterTable. Then user add the 3 typedef, it is
8746 // considered unmarked unless we mark it when we do DefineTypeDef. However, if user
8747 // add another MethodDef, it will be considered marked unless we unmarked.....
8748 // Maybe the solution is not to support DefineXXXX if you use the filter interface??</TODO>
8749
8750 if ( (Count() == 0) || ((RID)(Count() - 1)) < rid )
8751 {
8752 // If UnmarkAll has never been called or tk is added after UnmarkAll,
8753 // tk is considered marked.
8754 //
8755 return true;
8756 }
8757 return ( (*Get(rid)) & bitMarked ? true : false);
8758} // FilterTable::IsTokenMarked
8759
8760
8761//*****************************************************************************
8762//
8763// return true if a token is marked. Otherwise return false.
8764//
8765//*****************************************************************************
8766bool FilterTable::IsTokenMarked(
8767 mdToken tk) // Token to inquiry
8768{
8769
8770 switch ( TypeFromToken(tk) )
8771 {
8772 case mdtTypeRef:
8773 return IsTypeRefMarked(tk);
8774 case mdtTypeDef:
8775 return IsTypeDefMarked(tk);
8776 case mdtFieldDef:
8777 return IsFieldMarked(tk);
8778 case mdtMethodDef:
8779 return IsMethodMarked(tk);
8780 case mdtParamDef:
8781 return IsParamMarked(tk);
8782 case mdtMemberRef:
8783 return IsMemberRefMarked(tk);
8784 case mdtCustomAttribute:
8785 return IsCustomAttributeMarked(tk);
8786 case mdtPermission:
8787 return IsDeclSecurityMarked(tk);
8788 case mdtSignature:
8789 return IsSignatureMarked(tk);
8790 case mdtEvent:
8791 return IsEventMarked(tk);
8792 case mdtProperty:
8793 return IsPropertyMarked(tk);
8794 case mdtModuleRef:
8795 return IsModuleRefMarked(tk);
8796 case mdtTypeSpec:
8797 return IsTypeSpecMarked(tk);
8798 case mdtInterfaceImpl:
8799 return IsInterfaceImplMarked(tk);
8800 case mdtMethodSpec:
8801 return IsMethodSpecMarked(tk);
8802 case mdtString:
8803 return IsUserStringMarked(tk);
8804 default:
8805 _ASSERTE(!"Bad token type!");
8806 break;
8807 }
8808 return false;
8809} // FilterTable::IsTokenMarked
8810
8811//*****************************************************************************
8812//
8813// return true if an UserString is marked.
8814//
8815//*****************************************************************************
8816bool FilterTable::IsUserStringMarked(mdString str)
8817{
8818 int low, mid, high, count;
8819
8820 // if m_daUserStringMarker is not created, UnmarkAll has never been called
8821 if (m_daUserStringMarker == NULL)
8822 return true;
8823
8824 low = 0;
8825 count = m_daUserStringMarker->Count();
8826
8827 if (count == 0)
8828 {
8829 // No strings are marked.
8830 return false;
8831 }
8832
8833 high = m_daUserStringMarker->Count() - 1;
8834
8835 while (low <= high)
8836 {
8837 mid = (high + low) / 2;
8838 if ((m_daUserStringMarker->Get(mid))->m_tkString > (DWORD) str)
8839 {
8840 high = mid - 1;
8841 }
8842 else if ((m_daUserStringMarker->Get(mid))->m_tkString < (DWORD) str)
8843 {
8844 low = mid + 1;
8845 }
8846 else
8847 {
8848 return (m_daUserStringMarker->Get(mid))->m_fMarked;
8849 }
8850 }
8851 _ASSERTE(!"Bad Token!");
8852 return false;
8853} // FilterTable::IsUserStringMarked
8854
8855
8856
8857//*****************************************************************************
8858//
8859// destructor
8860//
8861//*****************************************************************************
8862FilterTable::~FilterTable()
8863{
8864 if (m_daUserStringMarker)
8865 delete m_daUserStringMarker;
8866 Clear();
8867} // FilterTable::~FilterTable
8868
8869