1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | #include "strike.h" |
6 | #include "util.h" |
7 | |
8 | #include "sos.h" |
9 | |
10 | |
11 | #ifdef _ASSERTE |
12 | #undef _ASSERTE |
13 | #endif |
14 | |
15 | #define _ASSERTE(a) {;} |
16 | |
17 | #include "gcdesc.h" |
18 | |
19 | |
20 | #undef _ASSERTE |
21 | |
22 | namespace sos |
23 | { |
24 | template <class T> |
25 | static bool MemOverlap(T beg1, T end1, // first range |
26 | T beg2, T end2) // second range |
27 | { |
28 | if (beg2 >= beg1 && beg2 <= end1) // second range starts within first range |
29 | return true; |
30 | else if (end2 >= beg1 && end2 <= end1) // second range ends within first range |
31 | return true; |
32 | else if (beg1 >= beg2 && beg1 <= end2) // first range starts within second range |
33 | return true; |
34 | else if (end1 >= beg2 && end1 <= end2) // first range ends within second range |
35 | return true; |
36 | else |
37 | return false; |
38 | } |
39 | |
40 | |
41 | Object::Object(TADDR addr) |
42 | : mAddress(addr), mMT(0), mSize(~0), mPointers(false), mMTData(0), mTypeName(0) |
43 | { |
44 | if ((mAddress & ~ALIGNCONST) != mAddress) |
45 | sos::Throw<Exception>("Object %p is misaligned." , mAddress); |
46 | } |
47 | |
48 | Object::Object(TADDR addr, TADDR mt) |
49 | : mAddress(addr), mMT(mt & ~3), mSize(~0), mPointers(false), mMTData(0), mTypeName(0) |
50 | { |
51 | if ((mAddress & ~ALIGNCONST) != mAddress) |
52 | sos::Throw<Exception>("Object %p is misaligned." , mAddress); |
53 | } |
54 | |
55 | |
56 | Object::Object(const Object &rhs) |
57 | : mAddress(rhs.mAddress), mMT(rhs.mMT), mSize(rhs.mSize), mPointers(rhs.mPointers), mMTData(rhs.mMTData), mTypeName(rhs.mTypeName) |
58 | { |
59 | rhs.mMTData = 0; |
60 | rhs.mTypeName = 0; |
61 | } |
62 | |
63 | const Object &Object::operator=(TADDR addr) |
64 | { |
65 | if (mMTData) |
66 | delete mMTData; |
67 | |
68 | if (mTypeName) |
69 | delete mTypeName; |
70 | |
71 | mAddress = addr; |
72 | mMT = 0; |
73 | mSize = ~0; |
74 | mMTData = 0; |
75 | mTypeName = 0; |
76 | |
77 | return *this; |
78 | } |
79 | |
80 | bool Object::(ULONG &) const |
81 | { |
82 | struct |
83 | { |
84 | #ifdef _WIN64 |
85 | ULONG _alignpad; |
86 | #endif |
87 | ULONG SyncBlockValue; // the Index and the Bits |
88 | }; |
89 | |
90 | ObjectHeader ; |
91 | |
92 | if (SUCCEEDED(rvCache->Read(TO_TADDR(GetAddress() - sizeof(ObjectHeader)), &header, sizeof(ObjectHeader), NULL))) |
93 | { |
94 | outHeader = header.SyncBlockValue; |
95 | return true; |
96 | } |
97 | |
98 | return false; |
99 | } |
100 | |
101 | |
102 | ULONG Object::() const |
103 | { |
104 | ULONG toReturn = 0; |
105 | if (!TryGetHeader(toReturn)) |
106 | sos::Throw<DataRead>("Failed to get header for object %p." , GetAddress()); |
107 | |
108 | return toReturn; |
109 | } |
110 | |
111 | TADDR Object::GetMT() const |
112 | { |
113 | if (mMT == NULL) |
114 | { |
115 | TADDR temp; |
116 | if (FAILED(MOVE(temp, mAddress))) |
117 | sos::Throw<DataRead>("Object %s has an invalid method table." , DMLListNearObj(mAddress)); |
118 | |
119 | if (temp == NULL) |
120 | sos::Throw<HeapCorruption>("Object %s has an invalid method table." , DMLListNearObj(mAddress)); |
121 | |
122 | mMT = temp & ~3; |
123 | } |
124 | |
125 | return mMT; |
126 | } |
127 | |
128 | TADDR Object::GetComponentMT() const |
129 | { |
130 | if (mMT != NULL && mMT != sos::MethodTable::GetArrayMT()) |
131 | return NULL; |
132 | |
133 | DacpObjectData objData; |
134 | if (FAILED(objData.Request(g_sos, TO_CDADDR(mAddress)))) |
135 | sos::Throw<DataRead>("Failed to request object data for %s." , DMLListNearObj(mAddress)); |
136 | |
137 | if (mMT == NULL) |
138 | mMT = TO_TADDR(objData.MethodTable) & ~3; |
139 | |
140 | return TO_TADDR(objData.ElementTypeHandle); |
141 | } |
142 | |
143 | const WCHAR *Object::GetTypeName() const |
144 | { |
145 | if (mTypeName == NULL) |
146 | mTypeName = CreateMethodTableName(GetMT(), GetComponentMT()); |
147 | |
148 | |
149 | if (mTypeName == NULL) |
150 | return W("<error>" ); |
151 | |
152 | return mTypeName; |
153 | } |
154 | |
155 | void Object::FillMTData() const |
156 | { |
157 | if (mMTData == NULL) |
158 | { |
159 | mMTData = new DacpMethodTableData; |
160 | if (FAILED(mMTData->Request(g_sos, GetMT()))) |
161 | { |
162 | delete mMTData; |
163 | mMTData = NULL; |
164 | sos::Throw<DataRead>("Could not request method table data for object %p (MethodTable: %p)." , mAddress, mMT); |
165 | } |
166 | } |
167 | } |
168 | |
169 | |
170 | void Object::CalculateSizeAndPointers() const |
171 | { |
172 | TADDR mt = GetMT(); |
173 | MethodTableInfo* info = g_special_mtCache.Lookup((DWORD_PTR)mt); |
174 | if (!info->IsInitialized()) |
175 | { |
176 | // this is the first time we see this method table, so we need to get the information |
177 | // from the target |
178 | FillMTData(); |
179 | |
180 | info->BaseSize = mMTData->BaseSize; |
181 | info->ComponentSize = mMTData->ComponentSize; |
182 | info->bContainsPointers = mMTData->bContainsPointers; |
183 | |
184 | // The following request doesn't work on older runtimes. For those, the |
185 | // objects would just look like non-collectible, which is acceptable. |
186 | DacpMethodTableCollectibleData mtcd; |
187 | if (SUCCEEDED(mtcd.Request(g_sos, GetMT()))) |
188 | { |
189 | info->bCollectible = mtcd.bCollectible; |
190 | info->LoaderAllocatorObjectHandle = TO_TADDR(mtcd.LoaderAllocatorObjectHandle); |
191 | } |
192 | } |
193 | |
194 | if (mSize == (size_t)~0) |
195 | { |
196 | mSize = info->BaseSize; |
197 | if (info->ComponentSize) |
198 | { |
199 | // this is an array, so the size has to include the size of the components. We read the number |
200 | // of components from the target and multiply by the component size to get the size. |
201 | mSize += info->ComponentSize * GetNumComponents(GetAddress()); |
202 | } |
203 | |
204 | // On x64 we do an optimization to save 4 bytes in almost every string we create. |
205 | #ifdef _WIN64 |
206 | // Pad to min object size if necessary |
207 | if (mSize < min_obj_size) |
208 | mSize = min_obj_size; |
209 | #endif // _WIN64 |
210 | } |
211 | |
212 | mPointers = info->bContainsPointers != FALSE; |
213 | } |
214 | |
215 | size_t Object::GetSize() const |
216 | { |
217 | if (mSize == (size_t)~0) // poison value |
218 | { |
219 | CalculateSizeAndPointers(); |
220 | } |
221 | |
222 | SOS_Assert(mSize != (size_t)~0); |
223 | return mSize; |
224 | } |
225 | |
226 | |
227 | bool Object::HasPointers() const |
228 | { |
229 | if (mSize == (size_t)~0) |
230 | CalculateSizeAndPointers(); |
231 | |
232 | SOS_Assert(mSize != (size_t)~0); |
233 | return mPointers; |
234 | } |
235 | |
236 | |
237 | bool Object::VerifyMemberFields(TADDR pMT, TADDR obj) |
238 | { |
239 | WORD numInstanceFields = 0; |
240 | return VerifyMemberFields(pMT, obj, numInstanceFields); |
241 | } |
242 | |
243 | |
244 | bool Object::VerifyMemberFields(TADDR pMT, TADDR obj, WORD &numInstanceFields) |
245 | { |
246 | DacpMethodTableData vMethTable; |
247 | if (FAILED(vMethTable.Request(g_sos, pMT))) |
248 | return false; |
249 | |
250 | // Recursively verify the parent (this updates numInstanceFields) |
251 | if (vMethTable.ParentMethodTable) |
252 | { |
253 | if (!VerifyMemberFields(TO_TADDR(vMethTable.ParentMethodTable), obj, numInstanceFields)) |
254 | return false; |
255 | } |
256 | |
257 | DacpMethodTableFieldData vMethodTableFields; |
258 | |
259 | // Verify all fields on the object. |
260 | CLRDATA_ADDRESS dwAddr = vMethodTableFields.FirstField; |
261 | DacpFieldDescData vFieldDesc; |
262 | |
263 | while (numInstanceFields < vMethodTableFields.wNumInstanceFields) |
264 | { |
265 | CheckInterrupt(); |
266 | |
267 | if (FAILED(vFieldDesc.Request(g_sos, dwAddr))) |
268 | return false; |
269 | |
270 | if (vFieldDesc.Type >= ELEMENT_TYPE_MAX) |
271 | return false; |
272 | |
273 | dwAddr = vFieldDesc.NextField; |
274 | |
275 | if (!vFieldDesc.bIsStatic) |
276 | { |
277 | numInstanceFields++; |
278 | TADDR dwTmp = TO_TADDR(obj + vFieldDesc.dwOffset + sizeof(BaseObject)); |
279 | if (vFieldDesc.Type == ELEMENT_TYPE_CLASS) |
280 | { |
281 | // Is it a valid object? |
282 | if (FAILED(MOVE(dwTmp, dwTmp))) |
283 | return false; |
284 | |
285 | if (dwTmp != NULL) |
286 | { |
287 | DacpObjectData objData; |
288 | if (FAILED(objData.Request(g_sos, TO_CDADDR(dwTmp)))) |
289 | return false; |
290 | } |
291 | } |
292 | } |
293 | } |
294 | |
295 | return true; |
296 | } |
297 | |
298 | bool MethodTable::IsZombie(TADDR addr) |
299 | { |
300 | // Zombie objects are objects that reside in an unloaded AppDomain. |
301 | MethodTable mt = addr; |
302 | return _wcscmp(mt.GetName(), W("<Unloaded Type>" )) == 0; |
303 | } |
304 | |
305 | void MethodTable::Clear() |
306 | { |
307 | if (mName) |
308 | { |
309 | delete [] mName; |
310 | mName = NULL; |
311 | } |
312 | } |
313 | |
314 | const WCHAR *MethodTable::GetName() const |
315 | { |
316 | if (mName == NULL) |
317 | mName = CreateMethodTableName(mMT); |
318 | |
319 | if (mName == NULL) |
320 | return W("<error>" ); |
321 | |
322 | return mName; |
323 | } |
324 | |
325 | bool Object::IsValid(TADDR address, bool verifyFields) |
326 | { |
327 | DacpObjectData objectData; |
328 | if (FAILED(objectData.Request(g_sos, TO_CDADDR(address)))) |
329 | return false; |
330 | |
331 | if (verifyFields && |
332 | objectData.MethodTable != g_special_usefulGlobals.FreeMethodTable && |
333 | !MethodTable::IsZombie(TO_TADDR(objectData.MethodTable))) |
334 | { |
335 | return VerifyMemberFields(TO_TADDR(objectData.MethodTable), address); |
336 | } |
337 | |
338 | return true; |
339 | } |
340 | |
341 | bool Object::GetThinLock(ThinLockInfo &out) const |
342 | { |
343 | ULONG = GetHeader(); |
344 | if (header & (BIT_SBLK_IS_HASH_OR_SYNCBLKINDEX | BIT_SBLK_SPIN_LOCK)) |
345 | { |
346 | return false; |
347 | } |
348 | |
349 | out.ThreadId = header & SBLK_MASK_LOCK_THREADID; |
350 | out.Recursion = (header & SBLK_MASK_LOCK_RECLEVEL) >> SBLK_RECLEVEL_SHIFT; |
351 | |
352 | CLRDATA_ADDRESS threadPtr = NULL; |
353 | if (g_sos->GetThreadFromThinlockID(out.ThreadId, &threadPtr) != S_OK) |
354 | { |
355 | out.ThreadPtr = NULL; |
356 | } |
357 | else |
358 | { |
359 | out.ThreadPtr = TO_TADDR(threadPtr); |
360 | } |
361 | |
362 | return out.ThreadId != 0 && out.ThreadPtr != NULL; |
363 | } |
364 | |
365 | bool Object::GetStringData(__out_ecount(size) WCHAR *buffer, size_t size) const |
366 | { |
367 | SOS_Assert(IsString()); |
368 | SOS_Assert(buffer); |
369 | SOS_Assert(size > 0); |
370 | |
371 | return SUCCEEDED(g_sos->GetObjectStringData(mAddress, (ULONG32)size, buffer, NULL)); |
372 | } |
373 | |
374 | size_t Object::GetStringLength() const |
375 | { |
376 | SOS_Assert(IsString()); |
377 | |
378 | strobjInfo stInfo; |
379 | if (FAILED(MOVE(stInfo, mAddress))) |
380 | sos::Throw<DataRead>("Failed to read object data at %p." , mAddress); |
381 | |
382 | // We get the method table for free here, if we don't have it already. |
383 | SOS_Assert((mMT == NULL) || (mMT == TO_TADDR(stInfo.methodTable))); |
384 | if (mMT == NULL) |
385 | mMT = TO_TADDR(stInfo.methodTable); |
386 | |
387 | return (size_t)stInfo.m_StringLength; |
388 | } |
389 | |
390 | |
391 | RefIterator::RefIterator(TADDR obj, LinearReadCache *cache) |
392 | : mCache(cache), mGCDesc(0), mArrayOfVC(false), mDone(false), mBuffer(0), mCurrSeries(0), mLoaderAllocatorObjectHandle(0), |
393 | i(0), mCount(0), mCurr(0), mStop(0), mObject(obj), mObjSize(0) |
394 | { |
395 | Init(); |
396 | } |
397 | |
398 | RefIterator::RefIterator(TADDR obj, CGCDesc *desc, bool arrayOfVC, LinearReadCache *cache) |
399 | : mCache(cache), mGCDesc(desc), mArrayOfVC(arrayOfVC), mDone(false), mBuffer(0), mCurrSeries(0), mLoaderAllocatorObjectHandle(0), |
400 | i(0), mCount(0), mCurr(0), mStop(0), mObject(obj), mObjSize(0) |
401 | { |
402 | Init(); |
403 | } |
404 | |
405 | RefIterator::~RefIterator() |
406 | { |
407 | if (mBuffer) |
408 | delete [] mBuffer; |
409 | } |
410 | |
411 | const RefIterator &RefIterator::operator++() |
412 | { |
413 | if (mDone) |
414 | Throw<Exception>("Attempt to move past the end of the iterator." ); |
415 | |
416 | if (mCurr == mLoaderAllocatorObjectHandle) |
417 | { |
418 | // The mLoaderAllocatorObjectHandle is always the last reference returned |
419 | mDone = true; |
420 | return *this; |
421 | } |
422 | |
423 | if (!mArrayOfVC) |
424 | { |
425 | mCurr += sizeof(TADDR); |
426 | if (mCurr >= mStop) |
427 | { |
428 | mCurrSeries--; |
429 | if (mCurrSeries < mGCDesc->GetLowestSeries()) |
430 | { |
431 | mDone = true; |
432 | } |
433 | else |
434 | { |
435 | mCurr = mObject + mCurrSeries->GetSeriesOffset(); |
436 | mStop = mCurr + mCurrSeries->GetSeriesSize() + mObjSize; |
437 | } |
438 | } |
439 | } |
440 | else |
441 | { |
442 | mCurr += sizeof(TADDR); |
443 | if (mCurr >= mStop) |
444 | { |
445 | int i_last = i; |
446 | i--; |
447 | |
448 | if (i == mCount) |
449 | i = 0; |
450 | |
451 | mCurr += mCurrSeries->val_serie[i_last].skip; |
452 | mStop = mCurr + mCurrSeries->val_serie[i].nptrs * sizeof(TADDR); |
453 | } |
454 | |
455 | if (mCurr >= mObject + mObjSize - plug_skew) |
456 | mDone = true; |
457 | } |
458 | |
459 | if (mDone && mLoaderAllocatorObjectHandle != NULL) |
460 | { |
461 | // The iteration over all regular object references is done, but there is one more |
462 | // reference for collectible types - the LoaderAllocator for GC |
463 | mCurr = mLoaderAllocatorObjectHandle; |
464 | mDone = false; |
465 | } |
466 | |
467 | return *this; |
468 | } |
469 | |
470 | TADDR RefIterator::operator*() const |
471 | { |
472 | return ReadPointer(mCurr); |
473 | } |
474 | |
475 | TADDR RefIterator::GetOffset() const |
476 | { |
477 | return mCurr - mObject; |
478 | } |
479 | |
480 | void RefIterator::Init() |
481 | { |
482 | TADDR mt = ReadPointer(mObject); |
483 | BOOL bContainsPointers = FALSE; |
484 | BOOL bCollectible = FALSE; |
485 | TADDR loaderAllocatorObjectHandle; |
486 | |
487 | if (!GetSizeEfficient(mObject, mt, FALSE, mObjSize, bContainsPointers)) |
488 | Throw<DataRead>("Failed to get size of object." ); |
489 | |
490 | if (!GetCollectibleDataEfficient(mt, bCollectible, loaderAllocatorObjectHandle)) |
491 | Throw<DataRead>("Failed to get collectible info of object." ); |
492 | |
493 | if (!bContainsPointers && !bCollectible) |
494 | { |
495 | mDone = true; |
496 | return; |
497 | } |
498 | |
499 | if (bContainsPointers) |
500 | { |
501 | if (!mGCDesc) |
502 | { |
503 | int entries = 0; |
504 | |
505 | if (FAILED(MOVE(entries, mt-sizeof(TADDR)))) |
506 | Throw<DataRead>("Failed to request number of entries." ); |
507 | |
508 | // array of vc? |
509 | if (entries < 0) |
510 | { |
511 | entries = -entries; |
512 | mArrayOfVC = true; |
513 | } |
514 | else |
515 | { |
516 | mArrayOfVC = false; |
517 | } |
518 | |
519 | size_t slots = 1 + entries * sizeof(CGCDescSeries)/sizeof(TADDR); |
520 | |
521 | ArrayHolder<TADDR> buffer = new TADDR[slots]; |
522 | |
523 | ULONG fetched = 0; |
524 | CLRDATA_ADDRESS address = TO_CDADDR(mt - slots*sizeof(TADDR)); |
525 | if (FAILED(g_ExtData->ReadVirtual(address, buffer, (ULONG)(slots*sizeof(TADDR)), &fetched))) |
526 | Throw<DataRead>("Failed to request GCDesc." ); |
527 | |
528 | mBuffer = buffer.Detach(); |
529 | mGCDesc = (CGCDesc*)(mBuffer + slots); |
530 | } |
531 | |
532 | mCurrSeries = mGCDesc->GetHighestSeries(); |
533 | |
534 | if (!mArrayOfVC) |
535 | { |
536 | mCurr = mObject + mCurrSeries->GetSeriesOffset(); |
537 | mStop = mCurr + mCurrSeries->GetSeriesSize() + mObjSize; |
538 | } |
539 | else |
540 | { |
541 | i = 0; |
542 | mCurr = mObject + mCurrSeries->startoffset; |
543 | mStop = mCurr + mCurrSeries->val_serie[i].nptrs * sizeof(TADDR); |
544 | mCount = (int)mGCDesc->GetNumSeries(); |
545 | } |
546 | |
547 | if (mCurr == mStop) |
548 | operator++(); |
549 | else if (mCurr >= mObject + mObjSize - plug_skew) |
550 | mDone = true; |
551 | } |
552 | else |
553 | { |
554 | mDone = true; |
555 | } |
556 | |
557 | if (bCollectible) |
558 | { |
559 | mLoaderAllocatorObjectHandle = loaderAllocatorObjectHandle; |
560 | if (mDone) |
561 | { |
562 | // There are no object references, but there is still a reference for |
563 | // collectible types - the LoaderAllocator for GC |
564 | mCurr = mLoaderAllocatorObjectHandle; |
565 | mDone = false; |
566 | } |
567 | } |
568 | } |
569 | |
570 | |
571 | const TADDR GCHeap::HeapStart = 0; |
572 | const TADDR GCHeap::HeapEnd = ~0; |
573 | |
574 | ObjectIterator::ObjectIterator(const DacpGcHeapDetails *heap, int numHeaps, TADDR start, TADDR stop) |
575 | : bLarge(false), mCurrObj(0), mLastObj(0), mStart(start), mEnd(stop), mSegmentEnd(0), mHeaps(heap), |
576 | mNumHeaps(numHeaps), mCurrHeap(0) |
577 | { |
578 | mAllocInfo.Init(); |
579 | SOS_Assert(numHeaps > 0); |
580 | |
581 | TADDR segStart = TO_TADDR(mHeaps[0].generation_table[GetMaxGeneration()].start_segment); |
582 | if (FAILED(mSegment.Request(g_sos, segStart, mHeaps[0]))) |
583 | sos::Throw<DataRead>("Could not request segment data at %p." , segStart); |
584 | |
585 | mCurrObj = mStart < TO_TADDR(mSegment.mem) ? TO_TADDR(mSegment.mem) : mStart; |
586 | mSegmentEnd = (segStart == TO_TADDR(mHeaps[0].ephemeral_heap_segment)) ? |
587 | TO_TADDR(mHeaps[0].alloc_allocated) : |
588 | TO_TADDR(mSegment.allocated); |
589 | |
590 | CheckSegmentRange(); |
591 | } |
592 | |
593 | bool ObjectIterator::NextSegment() |
594 | { |
595 | if (mCurrHeap >= mNumHeaps) |
596 | return false; |
597 | |
598 | TADDR next = TO_TADDR(mSegment.next); |
599 | if (next == NULL) |
600 | { |
601 | if (bLarge) |
602 | { |
603 | mCurrHeap++; |
604 | if (mCurrHeap == mNumHeaps) |
605 | return false; |
606 | |
607 | bLarge = false; |
608 | next = TO_TADDR(mHeaps[mCurrHeap].generation_table[GetMaxGeneration()].start_segment); |
609 | } |
610 | else |
611 | { |
612 | bLarge = true; |
613 | next = TO_TADDR(mHeaps[mCurrHeap].generation_table[GetMaxGeneration()+1].start_segment); |
614 | } |
615 | } |
616 | |
617 | SOS_Assert(next != NULL); |
618 | if (FAILED(mSegment.Request(g_sos, next, mHeaps[mCurrHeap]))) |
619 | sos::Throw<DataRead>("Failed to request segment data at %p." , next); |
620 | |
621 | mLastObj = 0; |
622 | mCurrObj = mStart < TO_TADDR(mSegment.mem) ? TO_TADDR(mSegment.mem) : mStart; |
623 | mSegmentEnd = (next == TO_TADDR(mHeaps[mCurrHeap].ephemeral_heap_segment)) ? |
624 | TO_TADDR(mHeaps[mCurrHeap].alloc_allocated) : |
625 | TO_TADDR(mSegment.allocated); |
626 | return CheckSegmentRange(); |
627 | } |
628 | |
629 | bool ObjectIterator::CheckSegmentRange() |
630 | { |
631 | CheckInterrupt(); |
632 | |
633 | while (!MemOverlap(mStart, mEnd, TO_TADDR(mSegment.mem), mSegmentEnd)) |
634 | if (!NextSegment()) |
635 | return false; |
636 | |
637 | // At this point we know that the current segment contains objects in |
638 | // the correct range. However, there's no telling if the user gave us |
639 | // a starting address that corresponds to an object. If mStart is a |
640 | // valid object, then we'll just start there. If it's not we'll need |
641 | // to walk the segment from the beginning to find the first aligned |
642 | // object on or after mStart. |
643 | if (mCurrObj == mStart && !Object::IsValid(mStart)) |
644 | { |
645 | // It's possible mCurrObj will equal mStart after this. That's fine. |
646 | // It means that the starting object is corrupt (and we'll figure |
647 | // that when the user calls GetNext), or IsValid was wrong. |
648 | mLastObj = 0; |
649 | mCurrObj = TO_TADDR(mSegment.mem); |
650 | while (mCurrObj < mStart) |
651 | MoveToNextObject(); |
652 | } |
653 | |
654 | return true; |
655 | } |
656 | |
657 | |
658 | |
659 | const Object &ObjectIterator::operator*() const |
660 | { |
661 | AssertSanity(); |
662 | return mCurrObj; |
663 | } |
664 | |
665 | |
666 | const Object *ObjectIterator::operator->() const |
667 | { |
668 | AssertSanity(); |
669 | return &mCurrObj; |
670 | } |
671 | |
672 | //Object ObjectIterator::GetNext() |
673 | const ObjectIterator &ObjectIterator::operator++() |
674 | { |
675 | CheckInterrupt(); |
676 | |
677 | // Assert we aren't done walking the heap. |
678 | SOS_Assert(*this); |
679 | AssertSanity(); |
680 | |
681 | MoveToNextObject(); |
682 | return *this; |
683 | } |
684 | |
685 | void ObjectIterator::MoveToNextObjectCarefully() |
686 | { |
687 | CheckInterrupt(); |
688 | |
689 | SOS_Assert(*this); |
690 | AssertSanity(); |
691 | |
692 | // Move to NextObject won't generally throw unless it fails to request the |
693 | // MethodTable of the object. At which point we won't know how large the |
694 | // current object is, nor how to move past it. In this case we'll simply |
695 | // move to the next segment if possible to continue iterating from there. |
696 | try |
697 | { |
698 | MoveToNextObject(); |
699 | } |
700 | catch(const sos::Exception &) |
701 | { |
702 | NextSegment(); |
703 | } |
704 | } |
705 | |
706 | void ObjectIterator::AssertSanity() const |
707 | { |
708 | // Assert that we are in a sane state. Function which call this assume two things: |
709 | // 1. That the current object is within the segment bounds. |
710 | // 2. That the current object is within the requested memory range. |
711 | SOS_Assert(mCurrObj >= TO_TADDR(mSegment.mem)); |
712 | SOS_Assert(mCurrObj <= TO_TADDR(mSegmentEnd - Align(min_obj_size))); |
713 | |
714 | SOS_Assert(mCurrObj >= mStart); |
715 | SOS_Assert(mCurrObj <= mEnd); |
716 | } |
717 | |
718 | void ObjectIterator::MoveToNextObject() |
719 | { |
720 | // Object::GetSize can be unaligned, so we must align it ourselves. |
721 | size_t size = (bLarge ? AlignLarge(mCurrObj.GetSize()) : Align(mCurrObj.GetSize())); |
722 | |
723 | mLastObj = mCurrObj; |
724 | mCurrObj = mCurrObj.GetAddress() + size; |
725 | |
726 | if (!bLarge) |
727 | { |
728 | // Is this the end of an allocation context? We need to know this because there can be |
729 | // allocated memory at the end of an allocation context that doesn't yet contain any objects. |
730 | // This happens because we actually allocate a minimum amount of memory (the allocation quantum) |
731 | // whenever we need to get more memory. Typically, a single allocation request won't fill this |
732 | // block, so we'll fulfill subsequent requests out of the remainder of the block until it's |
733 | // depleted. |
734 | int i; |
735 | for (i = 0; i < mAllocInfo.num; i ++) |
736 | { |
737 | if (mCurrObj == TO_TADDR(mAllocInfo.array[i].alloc_ptr)) // end of objects in this context |
738 | { |
739 | // Set mCurrObj to point after the context (alloc_limit is the end of the allocation context). |
740 | mCurrObj = TO_TADDR(mAllocInfo.array[i].alloc_limit) + Align(min_obj_size); |
741 | break; |
742 | } |
743 | } |
744 | |
745 | // We also need to look at the gen0 alloc context. |
746 | if (mCurrObj == TO_TADDR(mHeaps[mCurrHeap].generation_table[0].allocContextPtr)) |
747 | mCurrObj = TO_TADDR(mHeaps[mCurrHeap].generation_table[0].allocContextLimit) + Align(min_obj_size); |
748 | } |
749 | |
750 | if (mCurrObj > mEnd || mCurrObj >= mSegmentEnd) |
751 | NextSegment(); |
752 | } |
753 | |
754 | SyncBlkIterator::SyncBlkIterator() |
755 | : mCurr(1), mTotal(0) |
756 | { |
757 | // If DacpSyncBlockData::Request fails with the call "1", then it means |
758 | // there are no SyncBlocks in the process. |
759 | DacpSyncBlockData syncBlockData; |
760 | if (SUCCEEDED(syncBlockData.Request(g_sos, 1))) |
761 | mTotal = syncBlockData.SyncBlockCount; |
762 | |
763 | mSyncBlk = mCurr; |
764 | } |
765 | |
766 | GCHeap::GCHeap() |
767 | { |
768 | if (FAILED(mHeapData.Request(g_sos))) |
769 | sos::Throw<DataRead>("Failed to request GC heap data." ); |
770 | |
771 | if (mHeapData.bServerMode) |
772 | { |
773 | mNumHeaps = mHeapData.HeapCount; |
774 | DWORD dwAllocSize = 0; |
775 | if (!ClrSafeInt<DWORD>::multiply(sizeof(CLRDATA_ADDRESS), mNumHeaps, dwAllocSize)) |
776 | sos::Throw<Exception>("Failed to get GCHeaps: Integer overflow." ); |
777 | |
778 | CLRDATA_ADDRESS *heapAddrs = (CLRDATA_ADDRESS*)alloca(dwAllocSize); |
779 | if (FAILED(g_sos->GetGCHeapList(mNumHeaps, heapAddrs, NULL))) |
780 | sos::Throw<DataRead>("Failed to get GCHeaps." ); |
781 | |
782 | mHeaps = new DacpGcHeapDetails[mNumHeaps]; |
783 | |
784 | for (int i = 0; i < mNumHeaps; i++) |
785 | if (FAILED(mHeaps[i].Request(g_sos, heapAddrs[i]))) |
786 | sos::Throw<DataRead>("Failed to get GC heap details at %p." , heapAddrs[i]); |
787 | } |
788 | else |
789 | { |
790 | mHeaps = new DacpGcHeapDetails[1]; |
791 | mNumHeaps = 1; |
792 | |
793 | if (FAILED(mHeaps[0].Request(g_sos))) |
794 | sos::Throw<DataRead>("Failed to request GC details data." ); |
795 | } |
796 | } |
797 | |
798 | ObjectIterator GCHeap::WalkHeap(TADDR start, TADDR stop) const |
799 | { |
800 | return ObjectIterator(mHeaps, mNumHeaps, start, stop); |
801 | } |
802 | |
803 | bool GCHeap::AreGCStructuresValid() const |
804 | { |
805 | return mHeapData.bGcStructuresValid != FALSE; |
806 | } |
807 | |
808 | // SyncBlk class |
809 | SyncBlk::SyncBlk() |
810 | : mIndex(0) |
811 | { |
812 | } |
813 | |
814 | SyncBlk::SyncBlk(int index) |
815 | : mIndex(index) |
816 | { |
817 | Init(); |
818 | } |
819 | |
820 | const SyncBlk &SyncBlk::operator=(int index) |
821 | { |
822 | mIndex = index; |
823 | Init(); |
824 | |
825 | return *this; |
826 | } |
827 | |
828 | void SyncBlk::Init() |
829 | { |
830 | if (FAILED(mData.Request(g_sos, mIndex))) |
831 | sos::Throw<DataRead>("Failed to request SyncBlk at index %d." , mIndex); |
832 | } |
833 | |
834 | TADDR SyncBlk::GetAddress() const |
835 | { |
836 | SOS_Assert(mIndex); |
837 | return TO_TADDR(mData.SyncBlockPointer); |
838 | } |
839 | |
840 | TADDR SyncBlk::GetObject() const |
841 | { |
842 | SOS_Assert(mIndex); |
843 | return TO_TADDR(mData.Object); |
844 | } |
845 | |
846 | int SyncBlk::GetIndex() const |
847 | { |
848 | return mIndex; |
849 | } |
850 | |
851 | bool SyncBlk::IsFree() const |
852 | { |
853 | SOS_Assert(mIndex); |
854 | return mData.bFree != FALSE; |
855 | } |
856 | |
857 | unsigned int SyncBlk::GetMonitorHeldCount() const |
858 | { |
859 | SOS_Assert(mIndex); |
860 | return mData.MonitorHeld; |
861 | } |
862 | |
863 | unsigned int SyncBlk::GetRecursion() const |
864 | { |
865 | SOS_Assert(mIndex); |
866 | return mData.Recursion; |
867 | } |
868 | |
869 | DWORD SyncBlk::GetCOMFlags() const |
870 | { |
871 | SOS_Assert(mIndex); |
872 | #ifdef FEATURE_COMINTEROP |
873 | return mData.COMFlags; |
874 | #else |
875 | return 0; |
876 | #endif |
877 | } |
878 | |
879 | unsigned int SyncBlk::GetAdditionalThreadCount() const |
880 | { |
881 | SOS_Assert(mIndex); |
882 | return mData.AdditionalThreadCount; |
883 | } |
884 | |
885 | TADDR SyncBlk::GetHoldingThread() const |
886 | { |
887 | SOS_Assert(mIndex); |
888 | return TO_TADDR(mData.HoldingThread); |
889 | } |
890 | |
891 | TADDR SyncBlk::GetAppDomain() const |
892 | { |
893 | SOS_Assert(mIndex); |
894 | return TO_TADDR(mData.appDomainPtr); |
895 | } |
896 | |
897 | void (TADDR addr, unsigned int size, __inout_ecount(size) WCHAR *buffer) |
898 | { |
899 | try |
900 | { |
901 | sos::Object obj(addr); |
902 | TADDR mtAddr = obj.GetMT(); |
903 | bool isArray = sos::MethodTable::IsArrayMT(mtAddr); |
904 | bool isString = obj.IsString(); |
905 | |
906 | sos::MethodTable mt(isArray ? obj.GetComponentMT() : mtAddr); |
907 | |
908 | if (isArray) |
909 | { |
910 | swprintf_s(buffer, size, W("%s[]" ), mt.GetName()); |
911 | } |
912 | else if (isString) |
913 | { |
914 | WCHAR str[32]; |
915 | obj.GetStringData(str, _countof(str)); |
916 | |
917 | _snwprintf_s(buffer, size, _TRUNCATE, W("%s: \"%s\"" ), mt.GetName(), str); |
918 | } |
919 | else |
920 | { |
921 | _snwprintf_s(buffer, size, _TRUNCATE, W("%s" ), mt.GetName()); |
922 | } |
923 | } |
924 | catch (const sos::Exception &e) |
925 | { |
926 | int len = MultiByteToWideChar(CP_ACP, 0, e.what(), -1, NULL, 0); |
927 | |
928 | ArrayHolder<WCHAR> tmp = new WCHAR[len]; |
929 | MultiByteToWideChar(CP_ACP, 0, e.what(), -1, (WCHAR*)tmp, len); |
930 | |
931 | swprintf_s(buffer, size, W("<invalid object: '%s'>" ), (WCHAR*)tmp); |
932 | } |
933 | } |
934 | } |
935 | |