1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | // |
5 | |
6 | // |
7 | |
8 | |
9 | // sets up vars for GC |
10 | |
11 | #include "gcpriv.h" |
12 | |
13 | #ifndef DACCESS_COMPILE |
14 | |
15 | COUNTER_ONLY(PERF_COUNTER_TIMER_PRECISION g_TotalTimeInGC = 0); |
16 | COUNTER_ONLY(PERF_COUNTER_TIMER_PRECISION g_TotalTimeSinceLastGCEnd = 0); |
17 | |
18 | #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) |
19 | size_t g_GenerationSizes[NUMBERGENERATIONS]; |
20 | size_t g_GenerationPromotedSizes[NUMBERGENERATIONS]; |
21 | #endif // ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE |
22 | |
23 | void GCHeap::UpdatePreGCCounters() |
24 | { |
25 | #if defined(ENABLE_PERF_COUNTERS) |
26 | #ifdef MULTIPLE_HEAPS |
27 | gc_heap* hp = 0; |
28 | #else |
29 | gc_heap* hp = pGenGCHeap; |
30 | #endif //MULTIPLE_HEAPS |
31 | |
32 | size_t allocation_0 = 0; |
33 | size_t allocation_3 = 0; |
34 | |
35 | // Publish perf stats |
36 | g_TotalTimeInGC = GET_CYCLE_COUNT(); |
37 | |
38 | #ifdef MULTIPLE_HEAPS |
39 | int hn = 0; |
40 | for (hn = 0; hn < gc_heap::n_heaps; hn++) |
41 | { |
42 | hp = gc_heap::g_heaps [hn]; |
43 | |
44 | allocation_0 += |
45 | dd_desired_allocation (hp->dynamic_data_of (0))- |
46 | dd_new_allocation (hp->dynamic_data_of (0)); |
47 | allocation_3 += |
48 | dd_desired_allocation (hp->dynamic_data_of (max_generation+1))- |
49 | dd_new_allocation (hp->dynamic_data_of (max_generation+1)); |
50 | } |
51 | #else |
52 | allocation_0 = |
53 | dd_desired_allocation (hp->dynamic_data_of (0))- |
54 | dd_new_allocation (hp->dynamic_data_of (0)); |
55 | allocation_3 = |
56 | dd_desired_allocation (hp->dynamic_data_of (max_generation+1))- |
57 | dd_new_allocation (hp->dynamic_data_of (max_generation+1)); |
58 | |
59 | #endif //MULTIPLE_HEAPS |
60 | |
61 | GetPerfCounters().m_GC.cbAlloc += allocation_0; |
62 | GetPerfCounters().m_GC.cbAlloc += allocation_3; |
63 | GetPerfCounters().m_GC.cbLargeAlloc += allocation_3; |
64 | |
65 | #ifdef _PREFAST_ |
66 | // prefix complains about us dereferencing hp in wks build even though we only access static members |
67 | // this way. not sure how to shut it up except for this ugly workaround: |
68 | PREFIX_ASSUME( hp != NULL); |
69 | #endif //_PREFAST_ |
70 | if (hp->settings.reason == reason_induced IN_STRESS_HEAP( && !hp->settings.stress_induced)) |
71 | { |
72 | COUNTER_ONLY(GetPerfCounters().m_GC.cInducedGCs++); |
73 | } |
74 | |
75 | GetPerfCounters().m_Security.timeRTchecks = 0; |
76 | GetPerfCounters().m_Security.timeRTchecksBase = 1; // To avoid divide by zero |
77 | |
78 | #endif //ENABLE_PERF_COUNTERS |
79 | |
80 | #ifdef MULTIPLE_HEAPS |
81 | //take the first heap.... |
82 | gc_mechanisms *pSettings = &gc_heap::g_heaps[0]->settings; |
83 | #else |
84 | gc_mechanisms *pSettings = &gc_heap::settings; |
85 | #endif //MULTIPLE_HEAPS |
86 | |
87 | uint32_t count = (uint32_t)pSettings->gc_index; |
88 | uint32_t depth = (uint32_t)pSettings->condemned_generation; |
89 | uint32_t reason = (uint32_t)pSettings->reason; |
90 | gc_etw_type type = gc_etw_type_ngc; |
91 | if (pSettings->concurrent) |
92 | { |
93 | type = gc_etw_type_bgc; |
94 | } |
95 | #ifdef BACKGROUND_GC |
96 | else if (depth < max_generation && pSettings->background_p) |
97 | { |
98 | type = gc_etw_type_fgc; |
99 | } |
100 | #endif // BACKGROUND_GC |
101 | |
102 | FIRE_EVENT(GCStart_V2, count, depth, reason, static_cast<uint32_t>(type)); |
103 | g_theGCHeap->DiagDescrGenerations([](void*, int generation, uint8_t* rangeStart, uint8_t* rangeEnd, uint8_t* rangeEndReserved) |
104 | { |
105 | uint64_t range = static_cast<uint64_t>(rangeEnd - rangeStart); |
106 | uint64_t rangeReserved = static_cast<uint64_t>(rangeEndReserved - rangeStart); |
107 | FIRE_EVENT(GCGenerationRange, generation, rangeStart, range, rangeReserved); |
108 | }, nullptr); |
109 | } |
110 | |
111 | void GCHeap::UpdatePostGCCounters() |
112 | { |
113 | totalSurvivedSize = gc_heap::get_total_survived_size(); |
114 | |
115 | // |
116 | // The following is for instrumentation. |
117 | // |
118 | // Calculate the common ones for ETW and perf counters. |
119 | #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) |
120 | #ifdef MULTIPLE_HEAPS |
121 | //take the first heap.... |
122 | gc_heap* hp1 = gc_heap::g_heaps[0]; |
123 | gc_mechanisms *pSettings = &hp1->settings; |
124 | #else |
125 | gc_heap* hp1 = pGenGCHeap; |
126 | gc_mechanisms *pSettings = &gc_heap::settings; |
127 | #endif //MULTIPLE_HEAPS |
128 | |
129 | int condemned_gen = pSettings->condemned_generation; |
130 | |
131 | memset (g_GenerationSizes, 0, sizeof (g_GenerationSizes)); |
132 | memset (g_GenerationPromotedSizes, 0, sizeof (g_GenerationPromotedSizes)); |
133 | |
134 | size_t total_num_gc_handles = g_dwHandles; |
135 | uint32_t total_num_sync_blocks = GCToEEInterface::GetActiveSyncBlockCount(); |
136 | |
137 | // Note this is however for perf counter only, for legacy reasons. What we showed |
138 | // in perf counters for "gen0 size" was really the gen0 budget which made |
139 | // sense (somewhat) at the time. For backward compatibility we are keeping |
140 | // this calculated the same way. For ETW we use the true gen0 size (and |
141 | // gen0 budget is also reported in an event). |
142 | size_t youngest_budget = 0; |
143 | |
144 | size_t promoted_finalization_mem = 0; |
145 | size_t total_num_pinned_objects = gc_heap::get_total_pinned_objects(); |
146 | |
147 | #ifndef FEATURE_REDHAWK |
148 | // if a max gen garbage collection was performed, resync the GC Handle counter; |
149 | // if threads are currently suspended, we do not need to obtain a lock on each handle table |
150 | if (condemned_gen == max_generation) |
151 | total_num_gc_handles = HndCountAllHandles(!IsGCInProgress()); |
152 | #endif //FEATURE_REDHAWK |
153 | |
154 | // per generation calculation. |
155 | for (int gen_index = 0; gen_index <= (max_generation+1); gen_index++) |
156 | { |
157 | #ifdef MULTIPLE_HEAPS |
158 | int hn = 0; |
159 | for (hn = 0; hn < gc_heap::n_heaps; hn++) |
160 | { |
161 | gc_heap* hp = gc_heap::g_heaps[hn]; |
162 | #else |
163 | gc_heap* hp = pGenGCHeap; |
164 | { |
165 | #endif //MULTIPLE_HEAPS |
166 | dynamic_data* dd = hp->dynamic_data_of (gen_index); |
167 | |
168 | if (gen_index == 0) |
169 | { |
170 | youngest_budget += dd_desired_allocation (hp->dynamic_data_of (gen_index)); |
171 | } |
172 | |
173 | g_GenerationSizes[gen_index] += hp->generation_size (gen_index); |
174 | |
175 | if (gen_index <= condemned_gen) |
176 | { |
177 | g_GenerationPromotedSizes[gen_index] += dd_promoted_size (dd); |
178 | } |
179 | |
180 | if ((gen_index == (max_generation+1)) && (condemned_gen == max_generation)) |
181 | { |
182 | g_GenerationPromotedSizes[gen_index] += dd_promoted_size (dd); |
183 | } |
184 | |
185 | if (gen_index == 0) |
186 | { |
187 | promoted_finalization_mem += dd_freach_previous_promotion (dd); |
188 | } |
189 | #ifdef MULTIPLE_HEAPS |
190 | } |
191 | #else |
192 | } |
193 | #endif //MULTIPLE_HEAPS |
194 | } |
195 | #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE |
196 | |
197 | #ifdef FEATURE_EVENT_TRACE |
198 | g_theGCHeap->DiagDescrGenerations([](void*, int generation, uint8_t* rangeStart, uint8_t* rangeEnd, uint8_t* rangeEndReserved) |
199 | { |
200 | uint64_t range = static_cast<uint64_t>(rangeEnd - rangeStart); |
201 | uint64_t rangeReserved = static_cast<uint64_t>(rangeEndReserved - rangeStart); |
202 | FIRE_EVENT(GCGenerationRange, generation, rangeStart, range, rangeReserved); |
203 | }, nullptr); |
204 | |
205 | FIRE_EVENT(GCEnd_V1, static_cast<uint32_t>(pSettings->gc_index), condemned_gen); |
206 | |
207 | #ifdef SIMPLE_DPRINTF |
208 | dprintf (2, ("GC#%d: 0: %Id(%Id); 1: %Id(%Id); 2: %Id(%Id); 3: %Id(%Id)" , |
209 | pSettings->gc_index, |
210 | g_GenerationSizes[0], g_GenerationPromotedSizes[0], |
211 | g_GenerationSizes[1], g_GenerationPromotedSizes[1], |
212 | g_GenerationSizes[2], g_GenerationPromotedSizes[2], |
213 | g_GenerationSizes[3], g_GenerationPromotedSizes[3])); |
214 | #endif //SIMPLE_DPRINTF |
215 | |
216 | FIRE_EVENT(GCHeapStats_V1, |
217 | g_GenerationSizes[0], g_GenerationPromotedSizes[0], |
218 | g_GenerationSizes[1], g_GenerationPromotedSizes[1], |
219 | g_GenerationSizes[2], g_GenerationPromotedSizes[2], |
220 | g_GenerationSizes[3], g_GenerationPromotedSizes[3], |
221 | promoted_finalization_mem, |
222 | GetFinalizablePromotedCount(), |
223 | static_cast<uint32_t>(total_num_pinned_objects), |
224 | total_num_sync_blocks, |
225 | static_cast<uint32_t>(total_num_gc_handles)); |
226 | #endif // FEATURE_EVENT_TRACE |
227 | |
228 | #if defined(ENABLE_PERF_COUNTERS) |
229 | for (int gen_index = 0; gen_index <= (max_generation+1); gen_index++) |
230 | { |
231 | _ASSERTE(FitsIn<size_t>(g_GenerationSizes[gen_index])); |
232 | _ASSERTE(FitsIn<size_t>(g_GenerationPromotedSizes[gen_index])); |
233 | |
234 | if (gen_index == (max_generation+1)) |
235 | { |
236 | GetPerfCounters().m_GC.cLrgObjSize = static_cast<size_t>(g_GenerationSizes[gen_index]); |
237 | } |
238 | else |
239 | { |
240 | GetPerfCounters().m_GC.cGenHeapSize[gen_index] = ((gen_index == 0) ? |
241 | youngest_budget : |
242 | static_cast<size_t>(g_GenerationSizes[gen_index])); |
243 | } |
244 | |
245 | // the perf counters only count the promoted size for gen0 and gen1. |
246 | if (gen_index < max_generation) |
247 | { |
248 | GetPerfCounters().m_GC.cbPromotedMem[gen_index] = static_cast<size_t>(g_GenerationPromotedSizes[gen_index]); |
249 | } |
250 | |
251 | if (gen_index <= max_generation) |
252 | { |
253 | GetPerfCounters().m_GC.cGenCollections[gen_index] = |
254 | dd_collection_count (hp1->dynamic_data_of (gen_index)); |
255 | } |
256 | } |
257 | |
258 | // Committed and reserved memory |
259 | { |
260 | size_t committed_mem = 0; |
261 | size_t reserved_mem = 0; |
262 | #ifdef MULTIPLE_HEAPS |
263 | int hn = 0; |
264 | for (hn = 0; hn < gc_heap::n_heaps; hn++) |
265 | { |
266 | gc_heap* hp = gc_heap::g_heaps [hn]; |
267 | #else |
268 | gc_heap* hp = pGenGCHeap; |
269 | { |
270 | #endif //MULTIPLE_HEAPS |
271 | heap_segment* seg = generation_start_segment (hp->generation_of (max_generation)); |
272 | while (seg) |
273 | { |
274 | committed_mem += heap_segment_committed (seg) - heap_segment_mem (seg); |
275 | reserved_mem += heap_segment_reserved (seg) - heap_segment_mem (seg); |
276 | seg = heap_segment_next (seg); |
277 | } |
278 | //same for large segments |
279 | seg = generation_start_segment (hp->generation_of (max_generation + 1)); |
280 | while (seg) |
281 | { |
282 | committed_mem += heap_segment_committed (seg) - |
283 | heap_segment_mem (seg); |
284 | reserved_mem += heap_segment_reserved (seg) - |
285 | heap_segment_mem (seg); |
286 | seg = heap_segment_next (seg); |
287 | } |
288 | #ifdef MULTIPLE_HEAPS |
289 | } |
290 | #else |
291 | } |
292 | #endif //MULTIPLE_HEAPS |
293 | |
294 | GetPerfCounters().m_GC.cTotalCommittedBytes = committed_mem; |
295 | GetPerfCounters().m_GC.cTotalReservedBytes = reserved_mem; |
296 | } |
297 | |
298 | _ASSERTE(FitsIn<size_t>(HeapInfo.HeapStats.FinalizationPromotedSize)); |
299 | _ASSERTE(FitsIn<size_t>(HeapInfo.HeapStats.FinalizationPromotedCount)); |
300 | GetPerfCounters().m_GC.cbPromotedFinalizationMem = static_cast<size_t>(HeapInfo.HeapStats.FinalizationPromotedSize); |
301 | GetPerfCounters().m_GC.cSurviveFinalize = static_cast<size_t>(HeapInfo.HeapStats.FinalizationPromotedCount); |
302 | |
303 | // Compute Time in GC |
304 | PERF_COUNTER_TIMER_PRECISION _currentPerfCounterTimer = GET_CYCLE_COUNT(); |
305 | |
306 | g_TotalTimeInGC = _currentPerfCounterTimer - g_TotalTimeInGC; |
307 | PERF_COUNTER_TIMER_PRECISION _timeInGCBase = (_currentPerfCounterTimer - g_TotalTimeSinceLastGCEnd); |
308 | |
309 | if (_timeInGCBase < g_TotalTimeInGC) |
310 | g_TotalTimeInGC = 0; // isn't likely except on some SMP machines-- perhaps make sure that |
311 | // _timeInGCBase >= g_TotalTimeInGC by setting affinity in GET_CYCLE_COUNT |
312 | |
313 | while (_timeInGCBase > UINT_MAX) |
314 | { |
315 | _timeInGCBase = _timeInGCBase >> 8; |
316 | g_TotalTimeInGC = g_TotalTimeInGC >> 8; |
317 | } |
318 | |
319 | // Update Total Time |
320 | GetPerfCounters().m_GC.timeInGC = (uint32_t)g_TotalTimeInGC; |
321 | GetPerfCounters().m_GC.timeInGCBase = (uint32_t)_timeInGCBase; |
322 | |
323 | if (!GetPerfCounters().m_GC.cProcessID) |
324 | GetPerfCounters().m_GC.cProcessID = (size_t)GetCurrentProcessId(); |
325 | |
326 | g_TotalTimeSinceLastGCEnd = _currentPerfCounterTimer; |
327 | |
328 | GetPerfCounters().m_GC.cPinnedObj = total_num_pinned_objects; |
329 | GetPerfCounters().m_GC.cHandles = total_num_gc_handles; |
330 | GetPerfCounters().m_GC.cSinkBlocks = total_num_sync_blocks; |
331 | #endif //ENABLE_PERF_COUNTERS |
332 | } |
333 | |
334 | size_t GCHeap::GetCurrentObjSize() |
335 | { |
336 | return (totalSurvivedSize + gc_heap::get_total_allocated()); |
337 | } |
338 | |
339 | size_t GCHeap::GetLastGCStartTime(int generation) |
340 | { |
341 | #ifdef MULTIPLE_HEAPS |
342 | gc_heap* hp = gc_heap::g_heaps[0]; |
343 | #else |
344 | gc_heap* hp = pGenGCHeap; |
345 | #endif //MULTIPLE_HEAPS |
346 | |
347 | return dd_time_clock (hp->dynamic_data_of (generation)); |
348 | } |
349 | |
350 | size_t GCHeap::GetLastGCDuration(int generation) |
351 | { |
352 | #ifdef MULTIPLE_HEAPS |
353 | gc_heap* hp = gc_heap::g_heaps[0]; |
354 | #else |
355 | gc_heap* hp = pGenGCHeap; |
356 | #endif //MULTIPLE_HEAPS |
357 | |
358 | return dd_gc_elapsed_time (hp->dynamic_data_of (generation)); |
359 | } |
360 | |
361 | size_t GetHighPrecisionTimeStamp(); |
362 | |
363 | size_t GCHeap::GetNow() |
364 | { |
365 | return GetHighPrecisionTimeStamp(); |
366 | } |
367 | |
368 | bool GCHeap::IsGCInProgressHelper (bool bConsiderGCStart) |
369 | { |
370 | return GcInProgress || (bConsiderGCStart? VolatileLoad(&gc_heap::gc_started) : FALSE); |
371 | } |
372 | |
373 | uint32_t GCHeap::WaitUntilGCComplete(bool bConsiderGCStart) |
374 | { |
375 | if (bConsiderGCStart) |
376 | { |
377 | if (gc_heap::gc_started) |
378 | { |
379 | gc_heap::wait_for_gc_done(); |
380 | } |
381 | } |
382 | |
383 | uint32_t dwWaitResult = NOERROR; |
384 | |
385 | if (GcInProgress) |
386 | { |
387 | ASSERT( WaitForGCEvent->IsValid() ); |
388 | |
389 | #ifdef DETECT_DEADLOCK |
390 | // wait for GC to complete |
391 | BlockAgain: |
392 | dwWaitResult = WaitForGCEvent->Wait(DETECT_DEADLOCK_TIMEOUT, FALSE ); |
393 | |
394 | if (dwWaitResult == WAIT_TIMEOUT) { |
395 | // Even in retail, stop in the debugger if available. |
396 | GCToOSInterface::DebugBreak(); |
397 | goto BlockAgain; |
398 | } |
399 | |
400 | #else //DETECT_DEADLOCK |
401 | |
402 | dwWaitResult = WaitForGCEvent->Wait(INFINITE, FALSE ); |
403 | |
404 | #endif //DETECT_DEADLOCK |
405 | } |
406 | |
407 | return dwWaitResult; |
408 | } |
409 | |
410 | void GCHeap::SetGCInProgress(bool fInProgress) |
411 | { |
412 | GcInProgress = fInProgress; |
413 | } |
414 | |
415 | void GCHeap::SetWaitForGCEvent() |
416 | { |
417 | WaitForGCEvent->Set(); |
418 | } |
419 | |
420 | void GCHeap::ResetWaitForGCEvent() |
421 | { |
422 | WaitForGCEvent->Reset(); |
423 | } |
424 | |
425 | void GCHeap::WaitUntilConcurrentGCComplete() |
426 | { |
427 | #ifdef BACKGROUND_GC |
428 | if (pGenGCHeap->settings.concurrent) |
429 | pGenGCHeap->background_gc_wait(); |
430 | #endif //BACKGROUND_GC |
431 | } |
432 | |
433 | bool GCHeap::IsConcurrentGCInProgress() |
434 | { |
435 | #ifdef BACKGROUND_GC |
436 | return !!pGenGCHeap->settings.concurrent; |
437 | #else |
438 | return false; |
439 | #endif //BACKGROUND_GC |
440 | } |
441 | |
442 | #ifdef FEATURE_EVENT_TRACE |
443 | void gc_heap::fire_etw_allocation_event (size_t allocation_amount, int gen_number, uint8_t* object_address) |
444 | { |
445 | gc_etw_alloc_kind kind = gen_number == 0 ? gc_etw_alloc_soh : gc_etw_alloc_loh; |
446 | FIRE_EVENT(GCAllocationTick_V3, static_cast<uint64_t>(allocation_amount), kind, heap_number, object_address); |
447 | } |
448 | |
449 | void gc_heap::fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject) |
450 | { |
451 | FIRE_EVENT(PinObjectAtGCTime, object, ppObject); |
452 | } |
453 | #endif // FEATURE_EVENT_TRACE |
454 | |
455 | uint32_t gc_heap::user_thread_wait (GCEvent *event, BOOL no_mode_change, int time_out_ms) |
456 | { |
457 | Thread* pCurThread = NULL; |
458 | bool bToggleGC = false; |
459 | uint32_t dwWaitResult = NOERROR; |
460 | |
461 | if (!no_mode_change) |
462 | { |
463 | bToggleGC = GCToEEInterface::EnablePreemptiveGC(); |
464 | } |
465 | |
466 | dwWaitResult = event->Wait(time_out_ms, FALSE); |
467 | |
468 | if (bToggleGC) |
469 | { |
470 | GCToEEInterface::DisablePreemptiveGC(); |
471 | } |
472 | |
473 | return dwWaitResult; |
474 | } |
475 | |
476 | #ifdef BACKGROUND_GC |
477 | // Wait for background gc to finish |
478 | uint32_t gc_heap::background_gc_wait (alloc_wait_reason awr, int time_out_ms) |
479 | { |
480 | dprintf(2, ("Waiting end of background gc" )); |
481 | assert (background_gc_done_event.IsValid()); |
482 | fire_alloc_wait_event_begin (awr); |
483 | uint32_t dwRet = user_thread_wait (&background_gc_done_event, FALSE, time_out_ms); |
484 | fire_alloc_wait_event_end (awr); |
485 | dprintf(2, ("Waiting end of background gc is done" )); |
486 | |
487 | return dwRet; |
488 | } |
489 | |
490 | // Wait for background gc to finish sweeping large objects |
491 | void gc_heap::background_gc_wait_lh (alloc_wait_reason awr) |
492 | { |
493 | dprintf(2, ("Waiting end of background large sweep" )); |
494 | assert (gc_lh_block_event.IsValid()); |
495 | fire_alloc_wait_event_begin (awr); |
496 | user_thread_wait (&gc_lh_block_event, FALSE); |
497 | fire_alloc_wait_event_end (awr); |
498 | dprintf(2, ("Waiting end of background large sweep is done" )); |
499 | } |
500 | |
501 | #endif //BACKGROUND_GC |
502 | |
503 | |
504 | /******************************************************************************/ |
505 | IGCHeapInternal* CreateGCHeap() { |
506 | return new(nothrow) GCHeap(); // we return wks or svr |
507 | } |
508 | |
509 | void GCHeap::DiagTraceGCSegments() |
510 | { |
511 | #ifdef FEATURE_EVENT_TRACE |
512 | heap_segment* seg = 0; |
513 | #ifdef MULTIPLE_HEAPS |
514 | // walk segments in each heap |
515 | for (int i = 0; i < gc_heap::n_heaps; i++) |
516 | { |
517 | gc_heap* h = gc_heap::g_heaps [i]; |
518 | #else |
519 | { |
520 | gc_heap* h = pGenGCHeap; |
521 | #endif //MULTIPLE_HEAPS |
522 | |
523 | for (seg = generation_start_segment (h->generation_of (max_generation)); seg != 0; seg = heap_segment_next(seg)) |
524 | { |
525 | uint8_t* address = heap_segment_mem (seg); |
526 | size_t size = heap_segment_reserved (seg) - heap_segment_mem (seg); |
527 | gc_etw_segment_type type = heap_segment_read_only_p (seg) ? gc_etw_segment_read_only_heap : gc_etw_segment_small_object_heap; |
528 | FIRE_EVENT(GCCreateSegment_V1, address, size, static_cast<uint32_t>(type)); |
529 | } |
530 | |
531 | // large obj segments |
532 | for (seg = generation_start_segment (h->generation_of (max_generation+1)); seg != 0; seg = heap_segment_next(seg)) |
533 | { |
534 | uint8_t* address = heap_segment_mem (seg); |
535 | size_t size = heap_segment_reserved (seg) - heap_segment_mem (seg); |
536 | FIRE_EVENT(GCCreateSegment_V1, address, size, static_cast<uint32_t>(gc_etw_segment_large_object_heap)); |
537 | } |
538 | } |
539 | #endif // FEATURE_EVENT_TRACE |
540 | } |
541 | |
542 | void GCHeap::DiagDescrGenerations (gen_walk_fn fn, void *context) |
543 | { |
544 | pGenGCHeap->descr_generations_to_profiler(fn, context); |
545 | } |
546 | |
547 | segment_handle GCHeap::RegisterFrozenSegment(segment_info *pseginfo) |
548 | { |
549 | #ifdef FEATURE_BASICFREEZE |
550 | heap_segment * seg = new (nothrow) heap_segment; |
551 | if (!seg) |
552 | { |
553 | return NULL; |
554 | } |
555 | |
556 | uint8_t* base_mem = (uint8_t*)pseginfo->pvMem; |
557 | heap_segment_mem(seg) = base_mem + pseginfo->ibFirstObject; |
558 | heap_segment_allocated(seg) = base_mem + pseginfo->ibAllocated; |
559 | heap_segment_committed(seg) = base_mem + pseginfo->ibCommit; |
560 | heap_segment_reserved(seg) = base_mem + pseginfo->ibReserved; |
561 | heap_segment_next(seg) = 0; |
562 | heap_segment_used(seg) = heap_segment_allocated(seg); |
563 | heap_segment_plan_allocated(seg) = 0; |
564 | seg->flags = heap_segment_flags_readonly; |
565 | |
566 | #if defined (MULTIPLE_HEAPS) && !defined (ISOLATED_HEAPS) |
567 | gc_heap* heap = gc_heap::g_heaps[0]; |
568 | heap_segment_heap(seg) = heap; |
569 | #else |
570 | gc_heap* heap = pGenGCHeap; |
571 | #endif //MULTIPLE_HEAPS && !ISOLATED_HEAPS |
572 | |
573 | if (heap->insert_ro_segment(seg) == FALSE) |
574 | { |
575 | delete seg; |
576 | return NULL; |
577 | } |
578 | |
579 | return reinterpret_cast< segment_handle >(seg); |
580 | #else |
581 | assert(!"Should not call GCHeap::RegisterFrozenSegment without FEATURE_BASICFREEZE defined!" ); |
582 | return NULL; |
583 | #endif // FEATURE_BASICFREEZE |
584 | } |
585 | |
586 | void GCHeap::UnregisterFrozenSegment(segment_handle seg) |
587 | { |
588 | #ifdef FEATURE_BASICFREEZE |
589 | #if defined (MULTIPLE_HEAPS) && !defined (ISOLATED_HEAPS) |
590 | gc_heap* heap = gc_heap::g_heaps[0]; |
591 | #else |
592 | gc_heap* heap = pGenGCHeap; |
593 | #endif //MULTIPLE_HEAPS && !ISOLATED_HEAPS |
594 | |
595 | heap->remove_ro_segment(reinterpret_cast<heap_segment*>(seg)); |
596 | #else |
597 | assert(!"Should not call GCHeap::UnregisterFrozenSegment without FEATURE_BASICFREEZE defined!" ); |
598 | #endif // FEATURE_BASICFREEZE |
599 | } |
600 | |
601 | bool GCHeap::RuntimeStructuresValid() |
602 | { |
603 | return GCScan::GetGcRuntimeStructuresValid(); |
604 | } |
605 | |
606 | void GCHeap::SetSuspensionPending(bool fSuspensionPending) |
607 | { |
608 | if (fSuspensionPending) |
609 | { |
610 | Interlocked::Increment(&g_fSuspensionPending); |
611 | } |
612 | else |
613 | { |
614 | Interlocked::Decrement(&g_fSuspensionPending); |
615 | } |
616 | } |
617 | |
618 | void GCHeap::ControlEvents(GCEventKeyword keyword, GCEventLevel level) |
619 | { |
620 | GCEventStatus::Set(GCEventProvider_Default, keyword, level); |
621 | } |
622 | |
623 | void GCHeap::ControlPrivateEvents(GCEventKeyword keyword, GCEventLevel level) |
624 | { |
625 | GCEventStatus::Set(GCEventProvider_Private, keyword, level); |
626 | } |
627 | |
628 | #endif // !DACCESS_COMPILE |
629 | |
630 | |
631 | |