1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | //***************************************************************************** |
5 | // RecordPool.cpp -- Implementation of record heaps. |
6 | // |
7 | |
8 | // |
9 | //***************************************************************************** |
10 | #include "stdafx.h" |
11 | #include <recordpool.h> |
12 | |
13 | #define RECORDPOOL_GROW_FACTOR 8 |
14 | #define RECORDPOOL_GROW_MAX 2048 |
15 | #define RECORDPOOL_GROW_MINROWS 2 |
16 | #define RECORDPOOL_GROW_DEFAULTROWS 16 |
17 | |
18 | HRESULT |
19 | RecordPool::InitNew( |
20 | UINT32 cbRec, // Record size. |
21 | UINT32 cRecsInit) // Initial guess of count of record. |
22 | { |
23 | HRESULT hr; |
24 | S_UINT32 cbGrow; // Initial grow size of the pool. |
25 | |
26 | // Size of each record is fixed. |
27 | m_cbRec = cbRec; |
28 | |
29 | if (cRecsInit > 0) |
30 | { |
31 | cbGrow = S_UINT32(cbRec) * S_UINT32(cRecsInit); |
32 | } |
33 | else |
34 | { |
35 | cbGrow = S_UINT32(cbRec) * S_UINT32(RECORDPOOL_GROW_DEFAULTROWS); |
36 | } |
37 | if (cbGrow.IsOverflow()) |
38 | { |
39 | Debug_ReportInternalError("Growing record pool overflowed." ); |
40 | return CLDB_E_INTERNALERROR; |
41 | } |
42 | |
43 | m_ulGrowInc = cbGrow.Value(); |
44 | |
45 | IfFailRet(StgPool::InitNew()); |
46 | |
47 | // If there is an initial size for the record pool, grow to that now. |
48 | if (cRecsInit > 0) |
49 | { |
50 | if (!Grow(cbGrow.Value())) |
51 | return E_OUTOFMEMORY; |
52 | } |
53 | |
54 | return S_OK; |
55 | } // RecordPool::InitNew |
56 | |
57 | //***************************************************************************** |
58 | // Load a Record heap from persisted memory. If a copy of the data is made |
59 | // (so that it may be updated), then a new hash table is generated which can |
60 | // be used to elminate duplicates with new Records. |
61 | //***************************************************************************** |
62 | HRESULT |
63 | RecordPool::InitOnMem( |
64 | ULONG cbRec, // Record size. |
65 | void *pData, // Predefined data. |
66 | ULONG iSize, // Size of data. |
67 | BOOL fReadOnly) // true if append is forbidden. |
68 | { |
69 | HRESULT hr; |
70 | m_cbRec = cbRec; |
71 | |
72 | // Let base class init our memory structure. |
73 | IfFailRet(StgPool::InitOnMem(pData, iSize, fReadOnly)); |
74 | |
75 | // For init on existing mem case. |
76 | if ((pData != NULL) && (iSize != 0)) |
77 | { |
78 | // If we are doing an update in place don't make a copy |
79 | // If we cannot update, then we don't need a hash table. |
80 | if (fReadOnly) |
81 | return S_OK; |
82 | |
83 | // Other wise copy the memory to do the update |
84 | IfFailRet(TakeOwnershipOfInitMem()); |
85 | } |
86 | |
87 | return S_OK; |
88 | } // RecordPool::InitOnMem |
89 | |
90 | //***************************************************************************** |
91 | // Allocate memory if we don't have any, or grow what we have. If successful, |
92 | // then at least iRequired bytes will be allocated. |
93 | //***************************************************************************** |
94 | bool RecordPool::Grow( // true if successful. |
95 | ULONG iRequired) // Min required bytes to allocate. |
96 | { |
97 | // Allocate the memory. |
98 | if (!StgPool::Grow(iRequired)) |
99 | return false; |
100 | |
101 | // Zero the new memory. |
102 | memset(GetNextLocation(), 0, GetCbSegAvailable()); |
103 | |
104 | return true; |
105 | } // bool RecordProol::Grow() |
106 | |
107 | //***************************************************************************** |
108 | // The Record will be added to the pool. The index of the Record in the pool |
109 | // is returned in *piIndex. If the Record is already in the pool, then the |
110 | // index will be to the existing copy of the Record. |
111 | //***************************************************************************** |
112 | HRESULT |
113 | RecordPool::AddRecord( |
114 | BYTE **ppRecord, |
115 | UINT32 *pnIndex) // Return 1-based index of Record here. |
116 | { |
117 | _ASSERTE(pnIndex != NULL); |
118 | |
119 | // Space on heap for new Record? |
120 | if (m_cbRec > GetCbSegAvailable()) |
121 | { |
122 | if (!Grow(m_cbRec)) |
123 | { |
124 | *ppRecord = NULL; |
125 | return E_OUTOFMEMORY; |
126 | } |
127 | } |
128 | |
129 | // Records should be aligned on record boundaries. |
130 | _ASSERTE((GetNextOffset() % m_cbRec) == 0); |
131 | |
132 | // Copy the Record to the heap. |
133 | *ppRecord = GetNextLocation(); |
134 | |
135 | // Give the 1-based index back to caller. |
136 | *pnIndex = (GetNextOffset() / m_cbRec) + 1; |
137 | |
138 | // Update heap counters. |
139 | SegAllocate(m_cbRec); |
140 | |
141 | return S_OK; |
142 | } // RecordPool::AddRecord |
143 | |
144 | //***************************************************************************** |
145 | // Insert a Record into the pool. The index of the Record before which to |
146 | // insert is specified. Shifts all records down. Return a pointer to the |
147 | // new record. |
148 | //***************************************************************************** |
149 | HRESULT |
150 | RecordPool::InsertRecord( |
151 | UINT32 nIndex, // [IN] Insert record before this. |
152 | BYTE **ppRecord) |
153 | { |
154 | HRESULT hr; |
155 | StgPoolSeg *pCurSeg; // Current segment. |
156 | StgPoolSeg *pPrevSeg; // Previous segment. |
157 | BYTE *pSegEnd; // Last record in a segment. |
158 | BYTE *pFrom; // A copy/move source. |
159 | ULONG cbMove; // Bytes to move. |
160 | BYTE *pNew; // New record. |
161 | |
162 | // Notice the case of appending. |
163 | if (nIndex == (Count() + 1)) |
164 | { |
165 | UINT32 nNewIndex_Ignore; |
166 | return AddRecord(ppRecord, &nNewIndex_Ignore); |
167 | } |
168 | |
169 | // If past end or before beginning, invalid. |
170 | if ((nIndex > Count()) || (nIndex == 0)) |
171 | { |
172 | Debug_ReportError("Invalid index passed for inserting record." ); |
173 | return CLDB_E_INDEX_NOTFOUND; |
174 | } |
175 | |
176 | // This code works by allocating a new record at the end. |
177 | // The last record is moved to the new end record. |
178 | // Working backwards through the chained segments, |
179 | // shift the segment by one record, so the empty record |
180 | // is at the start of the segment instead of the end. |
181 | // copy the last record of the previous segment to the |
182 | // newly emptied first record of the current segment. |
183 | // When the segment containing the insert point is finally |
184 | // reached, its last record is empty (from above loop), so |
185 | // shift from the insertion point to the end-1 by one record. |
186 | |
187 | // Current last record. |
188 | pCurSeg = m_pCurSeg; |
189 | IfFailRet(GetRecord(Count(), &pSegEnd)); |
190 | _ASSERTE(hr == S_OK); |
191 | |
192 | // Add an empty record to the end of the heap. |
193 | { |
194 | UINT32 nLastRecordIndex_Ignore; |
195 | IfFailRet(AddRecord(&pNew, &nLastRecordIndex_Ignore)); |
196 | } |
197 | |
198 | // Copy the current last record to the new record. |
199 | memcpy(pNew, pSegEnd, m_cbRec); |
200 | |
201 | // While the insert location is prior to the current segment, |
202 | while (nIndex < GetIndexForRecord(pCurSeg->m_pSegData)) |
203 | { |
204 | // Shift the segment up by one record. |
205 | cbMove = (ULONG)(pSegEnd - pCurSeg->m_pSegData); |
206 | memmove(pCurSeg->m_pSegData + m_cbRec, pCurSeg->m_pSegData, cbMove); |
207 | |
208 | // Find the previous segment. |
209 | pPrevSeg = this; |
210 | while (pPrevSeg->m_pNextSeg != pCurSeg) |
211 | { |
212 | pPrevSeg = pPrevSeg->m_pNextSeg; |
213 | } |
214 | |
215 | // Copy the final record of the previous segment to the start of this one. |
216 | pSegEnd = pPrevSeg->m_pSegData+pPrevSeg->m_cbSegNext-m_cbRec; |
217 | memcpy(pCurSeg->m_pSegData, pSegEnd, m_cbRec); |
218 | |
219 | // Make the previous segment the current segment. |
220 | pCurSeg = pPrevSeg; |
221 | } |
222 | |
223 | // Shift at the insert location, forward by one. |
224 | IfFailRet(GetRecord(nIndex, &pFrom)); |
225 | _ASSERTE(hr == S_OK); |
226 | |
227 | cbMove = (ULONG)(pSegEnd - pFrom); |
228 | memmove(pFrom + m_cbRec, pFrom, cbMove); |
229 | |
230 | *ppRecord = pFrom; |
231 | |
232 | return S_OK; |
233 | } // RecordPool::InsertRecord |
234 | |
235 | //***************************************************************************** |
236 | // Return a pointer to a Record given an index previously handed out by |
237 | // AddRecord or FindRecord. |
238 | //***************************************************************************** |
239 | HRESULT |
240 | RecordPool::GetRecord( |
241 | UINT32 nIndex, // 1-based index of Record in pool. |
242 | BYTE **ppRecord) |
243 | { |
244 | MetaData::DataBlob record; |
245 | |
246 | if (nIndex == 0) |
247 | { |
248 | Debug_ReportError("Invalid index 0 passed." ); |
249 | *ppRecord = NULL; |
250 | return CLDB_E_INDEX_NOTFOUND; |
251 | } |
252 | |
253 | // Convert to 0-based internal form, defer to implementation. |
254 | HRESULT hr = GetData((nIndex - 1) * m_cbRec, &record); |
255 | if (FAILED(hr)) |
256 | { |
257 | *ppRecord = NULL; |
258 | return hr; |
259 | } |
260 | _ASSERTE(record.ContainsData(m_cbRec)); |
261 | *ppRecord = record.GetDataPointer(); |
262 | |
263 | return hr; |
264 | } // RecordPool::GetRecord |
265 | |
266 | //***************************************************************************** |
267 | // Return the first record in a pool, and set up a context for fast |
268 | // iterating through the pool. Note that this scheme does pretty minimal |
269 | // error checking. |
270 | //***************************************************************************** |
271 | void *RecordPool::GetFirstRecord( // Pointer to Record in pool. |
272 | void **pContext) // Store context here. |
273 | { |
274 | StgPoolSeg **ppSeg = reinterpret_cast<StgPoolSeg**>(pContext); |
275 | |
276 | *ppSeg = static_cast<StgPoolSeg*>(this); |
277 | return (*ppSeg)->m_pSegData; |
278 | } // void *RecordPool::GetFirstRecord() |
279 | |
280 | //***************************************************************************** |
281 | // Given a pointer to a record, return a pointer to the next record. |
282 | // Note that this scheme does pretty minimal error checking. In particular, |
283 | // this will let the caller walk off of the end of valid data in the last |
284 | // segment. |
285 | //***************************************************************************** |
286 | void *RecordPool::GetNextRecord( // Pointer to Record in pool. |
287 | void *pRecord, // Current record. |
288 | void **pContext) // Stored context here. |
289 | { |
290 | BYTE *pbRec = reinterpret_cast<BYTE*>(pRecord); |
291 | StgPoolSeg **ppSeg = reinterpret_cast<StgPoolSeg**>(pContext); |
292 | |
293 | // Get the next record. |
294 | pbRec += m_cbRec; |
295 | |
296 | // Is the next record outside of the current segment? |
297 | if (static_cast<ULONG>(pbRec - (*ppSeg)->m_pSegData) >= (*ppSeg)->m_cbSegSize) |
298 | { |
299 | // Better be exactly one past current segment. |
300 | _ASSERTE(static_cast<ULONG>(pbRec - (*ppSeg)->m_pSegData) == (*ppSeg)->m_cbSegSize); |
301 | // Switch the context pointer. |
302 | *ppSeg = (*ppSeg)->m_pNextSeg; |
303 | // Next record is start of next segment. |
304 | if (*ppSeg) |
305 | return (*ppSeg)->m_pSegData; |
306 | else |
307 | return 0; |
308 | } |
309 | |
310 | return pbRec; |
311 | } // void *RecordPool::GetNextRecord() |
312 | |
313 | //***************************************************************************** |
314 | // Given a pointer to a record, determine the index corresponding to the |
315 | // record. |
316 | //***************************************************************************** |
317 | ULONG RecordPool::GetIndexForRecord( // 1-based index of Record in pool. |
318 | const void *pvRecord) // Pointer to Record in pool. |
319 | { |
320 | ULONG iPrev = 0; // cumulative index of previous segments. |
321 | const StgPoolSeg *pSeg = this; |
322 | const BYTE *pRecord = reinterpret_cast<const BYTE*>(pvRecord); |
323 | const BYTE *pSegData = NULL; |
324 | ULONG ulSegSize; |
325 | for (;;) |
326 | { // Does the current segment contain the record? |
327 | pSegData = pSeg->GetSegData(); |
328 | ulSegSize = pSeg->GetSegSize(); |
329 | if (pRecord >= pSegData && pRecord < pSegData + ulSegSize) |
330 | { // The pointer should be to the start of a record. |
331 | _ASSERTE(((pRecord - pSegData) % m_cbRec) == 0); |
332 | return (ULONG)(1 + iPrev + (pRecord - pSegData) / m_cbRec); |
333 | } |
334 | _ASSERTE((ulSegSize % m_cbRec) == 0); |
335 | iPrev += ulSegSize / m_cbRec; |
336 | pSeg = pSeg->GetNextSeg(); |
337 | // If out of data, didn't find the record. |
338 | if (pSeg == 0) |
339 | return 0; |
340 | } |
341 | } // ULONG RecordPool::GetIndexForRecord() |
342 | |
343 | //***************************************************************************** |
344 | // Given a purported pointer to a record, determine if the pointer is valid. |
345 | //***************************************************************************** |
346 | int RecordPool::IsValidPointerForRecord(// true or false. |
347 | const void *pvRecord) // Pointer to Record in pool. |
348 | { |
349 | const StgPoolSeg *pSeg; |
350 | const BYTE *pRecord = reinterpret_cast<const BYTE*>(pvRecord); |
351 | const BYTE *pSegData = NULL; |
352 | for (pSeg = this; (pSeg); pSeg = pSeg->GetNextSeg()) |
353 | { // Does the current segment contain the record? |
354 | pSegData = pSeg->GetSegData(); |
355 | if ((pRecord >= pSegData) && (pRecord < pSegData + pSeg->GetSegSize())) |
356 | { // The pointer should be to the start of a record. |
357 | return (((pRecord - pSegData) % m_cbRec) == 0); |
358 | } |
359 | _ASSERTE((pSeg->GetSegSize() % m_cbRec) == 0); |
360 | } |
361 | return 0; |
362 | } // int RecordPool::IsValidPointerForRecord() |
363 | |
364 | //***************************************************************************** |
365 | // Replace the contents of this pool with those from another pool. The other |
366 | // pool loses ownership of the memory. |
367 | //***************************************************************************** |
368 | HRESULT RecordPool::ReplaceContents( |
369 | RecordPool *pOther) // The other record pool. |
370 | { |
371 | // Release any memory currently held. |
372 | Uninit(); |
373 | |
374 | // Grab the new data. |
375 | *this = *pOther; |
376 | |
377 | // If the other pool's curseg pointed to itself, make this pool point to itself. |
378 | if (pOther->m_pCurSeg == pOther) |
379 | m_pCurSeg = this; |
380 | |
381 | // Fix the other pool so it won't free the memory that this one |
382 | // just hijacked. |
383 | pOther->m_pSegData = (BYTE*)m_zeros; |
384 | pOther->m_pNextSeg = 0; |
385 | pOther->Uninit(); |
386 | |
387 | return S_OK; |
388 | } // HRESULT RecordPool::ReplaceContents() |
389 | |