| 1 | // Copyright (c) Microsoft. All rights reserved. |
| 2 | // Licensed under the MIT license. See LICENSE file in the project root for full license information. |
| 3 | // |
| 4 | // |
| 5 | // Copyright (c) Microsoft. All rights reserved. |
| 6 | // Licensed under the MIT license. See LICENSE file in the project root for full license information. |
| 7 | // |
| 8 | // optimize for speed |
| 9 | |
| 10 | |
| 11 | #ifndef _DEBUG |
| 12 | #ifdef _MSC_VER |
| 13 | #pragma optimize( "t", on ) |
| 14 | #endif |
| 15 | #endif |
| 16 | #define inline __forceinline |
| 17 | |
| 18 | #include "gc.h" |
| 19 | |
| 20 | //#define DT_LOG |
| 21 | |
| 22 | #include "gcrecord.h" |
| 23 | |
| 24 | #ifdef _MSC_VER |
| 25 | #pragma warning(disable:4293) |
| 26 | #pragma warning(disable:4477) |
| 27 | #endif //_MSC_VER |
| 28 | |
| 29 | inline void FATAL_GC_ERROR() |
| 30 | { |
| 31 | #ifndef DACCESS_COMPILE |
| 32 | GCToOSInterface::DebugBreak(); |
| 33 | #endif // DACCESS_COMPILE |
| 34 | _ASSERTE(!"Fatal Error in GC." ); |
| 35 | GCToEEInterface::HandleFatalError(COR_E_EXECUTIONENGINE); |
| 36 | } |
| 37 | |
| 38 | #ifdef _MSC_VER |
| 39 | #pragma inline_depth(20) |
| 40 | #endif |
| 41 | |
| 42 | /* the following section defines the optional features */ |
| 43 | |
| 44 | // FEATURE_STRUCTALIGN was added by Midori. In CLR we are not interested |
| 45 | // in supporting custom alignments on LOH. Currently FEATURE_LOH_COMPACTION |
| 46 | // and FEATURE_STRUCTALIGN are mutually exclusive. It shouldn't be much |
| 47 | // work to make FEATURE_STRUCTALIGN not apply to LOH so they can be both |
| 48 | // turned on. |
| 49 | #define FEATURE_LOH_COMPACTION |
| 50 | |
| 51 | #ifdef FEATURE_64BIT_ALIGNMENT |
| 52 | // We need the following feature as part of keeping 64-bit types aligned in the GC heap. |
| 53 | #define RESPECT_LARGE_ALIGNMENT //used to keep "double" objects aligned during |
| 54 | //relocation |
| 55 | #endif //FEATURE_64BIT_ALIGNMENT |
| 56 | |
| 57 | #define SHORT_PLUGS //used to keep ephemeral plugs short so they fit better into the oldest generation free items |
| 58 | |
| 59 | #ifdef SHORT_PLUGS |
| 60 | #define DESIRED_PLUG_LENGTH (1000) |
| 61 | #endif //SHORT_PLUGS |
| 62 | |
| 63 | #define FEATURE_PREMORTEM_FINALIZATION |
| 64 | #define GC_HISTORY |
| 65 | |
| 66 | #ifndef FEATURE_REDHAWK |
| 67 | #define HEAP_ANALYZE |
| 68 | #define COLLECTIBLE_CLASS |
| 69 | #endif // !FEATURE_REDHAWK |
| 70 | |
| 71 | #ifdef HEAP_ANALYZE |
| 72 | #define initial_internal_roots (1024*16) |
| 73 | #endif // HEAP_ANALYZE |
| 74 | |
| 75 | #define MARK_LIST //used sorted list to speed up plan phase |
| 76 | |
| 77 | #define BACKGROUND_GC //concurrent background GC (requires WRITE_WATCH) |
| 78 | |
| 79 | #ifdef SERVER_GC |
| 80 | #define MH_SC_MARK //scalable marking |
| 81 | //#define SNOOP_STATS //diagnostic |
| 82 | #define PARALLEL_MARK_LIST_SORT //do the sorting and merging of the multiple mark lists in server gc in parallel |
| 83 | #endif //SERVER_GC |
| 84 | |
| 85 | //This is used to mark some type volatile only when the scalable marking is used. |
| 86 | #if defined (SERVER_GC) && defined (MH_SC_MARK) |
| 87 | #define SERVER_SC_MARK_VOLATILE(x) VOLATILE(x) |
| 88 | #else //SERVER_GC&&MH_SC_MARK |
| 89 | #define SERVER_SC_MARK_VOLATILE(x) x |
| 90 | #endif //SERVER_GC&&MH_SC_MARK |
| 91 | |
| 92 | //#define MULTIPLE_HEAPS //Allow multiple heaps for servers |
| 93 | |
| 94 | #define INTERIOR_POINTERS //Allow interior pointers in the code manager |
| 95 | |
| 96 | #define CARD_BUNDLE //enable card bundle feature.(requires WRITE_WATCH) |
| 97 | |
| 98 | // If this is defined we use a map for segments in order to find the heap for |
| 99 | // a segment fast. But it does use more memory as we have to cover the whole |
| 100 | // heap range and for each entry we allocate a struct of 5 ptr-size words |
| 101 | // (3 for WKS as there's only one heap). |
| 102 | #define SEG_MAPPING_TABLE |
| 103 | |
| 104 | // If allocating the heap mapping table for the available VA consumes too |
| 105 | // much memory, you can enable this to allocate only the portion that |
| 106 | // corresponds to rw segments and grow it when needed in grow_brick_card_table. |
| 107 | // However in heap_of you will need to always compare the address with |
| 108 | // g_lowest/highest before you can look at the heap mapping table. |
| 109 | #define GROWABLE_SEG_MAPPING_TABLE |
| 110 | |
| 111 | #ifdef BACKGROUND_GC |
| 112 | #define MARK_ARRAY //Mark bit in an array |
| 113 | #endif //BACKGROUND_GC |
| 114 | |
| 115 | #if defined(BACKGROUND_GC) || defined (CARD_BUNDLE) || defined(FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP) |
| 116 | #define WRITE_WATCH //Write Watch feature |
| 117 | #endif //BACKGROUND_GC || CARD_BUNDLE |
| 118 | |
| 119 | #ifdef WRITE_WATCH |
| 120 | #define array_size 100 |
| 121 | #endif //WRITE_WATCH |
| 122 | |
| 123 | //#define SHORT_PLUGS //keep plug short |
| 124 | |
| 125 | #define FFIND_OBJECT //faster find_object, slower allocation |
| 126 | #define FFIND_DECAY 7 //Number of GC for which fast find will be active |
| 127 | |
| 128 | #ifndef MAX_LONGPATH |
| 129 | #define MAX_LONGPATH 1024 |
| 130 | #endif // MAX_LONGPATH |
| 131 | |
| 132 | //#define DEBUG_WRITE_WATCH //Additional debug for write watch |
| 133 | |
| 134 | //#define STRESS_PINNING //Stress pinning by pinning randomly |
| 135 | |
| 136 | //#define TRACE_GC //debug trace gc operation |
| 137 | //#define SIMPLE_DPRINTF |
| 138 | |
| 139 | //#define TIME_GC //time allocation and garbage collection |
| 140 | //#define TIME_WRITE_WATCH //time GetWriteWatch and ResetWriteWatch calls |
| 141 | //#define COUNT_CYCLES //Use cycle counter for timing |
| 142 | //#define JOIN_STATS //amount of time spent in the join |
| 143 | //also, see TIME_SUSPEND in switches.h. |
| 144 | |
| 145 | //#define SYNCHRONIZATION_STATS |
| 146 | //#define SEG_REUSE_STATS |
| 147 | |
| 148 | #if defined (SYNCHRONIZATION_STATS) || defined (STAGE_STATS) |
| 149 | #define BEGIN_TIMING(x) \ |
| 150 | int64_t x##_start; \ |
| 151 | x##_start = GCToOSInterface::QueryPerformanceCounter() |
| 152 | |
| 153 | #define END_TIMING(x) \ |
| 154 | int64_t x##_end; \ |
| 155 | x##_end = GCToOSInterface::QueryPerformanceCounter(); \ |
| 156 | x += x##_end - x##_start |
| 157 | |
| 158 | #else |
| 159 | #define BEGIN_TIMING(x) |
| 160 | #define END_TIMING(x) |
| 161 | #define BEGIN_TIMING_CYCLES(x) |
| 162 | #define END_TIMING_CYCLES(x) |
| 163 | #endif //SYNCHRONIZATION_STATS || STAGE_STATS |
| 164 | |
| 165 | /* End of optional features */ |
| 166 | |
| 167 | #ifdef GC_CONFIG_DRIVEN |
| 168 | void GCLogConfig (const char *fmt, ... ); |
| 169 | #define cprintf(x) {GCLogConfig x;} |
| 170 | #endif //GC_CONFIG_DRIVEN |
| 171 | |
| 172 | #ifdef _DEBUG |
| 173 | #define TRACE_GC |
| 174 | #endif |
| 175 | |
| 176 | // For the bestfit algorithm when we relocate ephemeral generations into an |
| 177 | // existing gen2 segment. |
| 178 | // We recorded sizes from 2^6, 2^7, 2^8...up to 2^30 (1GB). So that's 25 sizes total. |
| 179 | #define MIN_INDEX_POWER2 6 |
| 180 | |
| 181 | #ifdef SERVER_GC |
| 182 | |
| 183 | #ifdef BIT64 |
| 184 | #define MAX_INDEX_POWER2 30 |
| 185 | #else |
| 186 | #define MAX_INDEX_POWER2 26 |
| 187 | #endif // BIT64 |
| 188 | |
| 189 | #else //SERVER_GC |
| 190 | |
| 191 | #ifdef BIT64 |
| 192 | #define MAX_INDEX_POWER2 28 |
| 193 | #else |
| 194 | #define MAX_INDEX_POWER2 24 |
| 195 | #endif // BIT64 |
| 196 | |
| 197 | #endif //SERVER_GC |
| 198 | |
| 199 | #define MAX_NUM_BUCKETS (MAX_INDEX_POWER2 - MIN_INDEX_POWER2 + 1) |
| 200 | |
| 201 | #define MAX_NUM_FREE_SPACES 200 |
| 202 | #define MIN_NUM_FREE_SPACES 5 |
| 203 | |
| 204 | //Please leave these definitions intact. |
| 205 | // hosted api |
| 206 | #ifdef memcpy |
| 207 | #undef memcpy |
| 208 | #endif //memcpy |
| 209 | |
| 210 | #ifdef FEATURE_STRUCTALIGN |
| 211 | #define REQD_ALIGN_DCL ,int requiredAlignment |
| 212 | #define REQD_ALIGN_ARG ,requiredAlignment |
| 213 | #define REQD_ALIGN_AND_OFFSET_DCL ,int requiredAlignment,size_t alignmentOffset |
| 214 | #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL ,int requiredAlignment=DATA_ALIGNMENT,size_t alignmentOffset=0 |
| 215 | #define REQD_ALIGN_AND_OFFSET_ARG ,requiredAlignment,alignmentOffset |
| 216 | #else // FEATURE_STRUCTALIGN |
| 217 | #define REQD_ALIGN_DCL |
| 218 | #define REQD_ALIGN_ARG |
| 219 | #define REQD_ALIGN_AND_OFFSET_DCL |
| 220 | #define REQD_ALIGN_AND_OFFSET_DEFAULT_DCL |
| 221 | #define REQD_ALIGN_AND_OFFSET_ARG |
| 222 | #endif // FEATURE_STRUCTALIGN |
| 223 | |
| 224 | #ifdef MULTIPLE_HEAPS |
| 225 | #define THREAD_NUMBER_DCL ,int thread |
| 226 | #define THREAD_NUMBER_ARG ,thread |
| 227 | #define THREAD_NUMBER_FROM_CONTEXT int thread = sc->thread_number; |
| 228 | #define THREAD_FROM_HEAP int thread = heap_number; |
| 229 | #define HEAP_FROM_THREAD gc_heap* hpt = gc_heap::g_heaps[thread]; |
| 230 | #else |
| 231 | #define THREAD_NUMBER_DCL |
| 232 | #define THREAD_NUMBER_ARG |
| 233 | #define THREAD_NUMBER_FROM_CONTEXT |
| 234 | #define THREAD_FROM_HEAP |
| 235 | #define HEAP_FROM_THREAD gc_heap* hpt = 0; |
| 236 | #endif //MULTIPLE_HEAPS |
| 237 | |
| 238 | //These constants are ordered |
| 239 | const int policy_sweep = 0; |
| 240 | const int policy_compact = 1; |
| 241 | const int policy_expand = 2; |
| 242 | |
| 243 | #ifdef TRACE_GC |
| 244 | #define SEG_REUSE_LOG_0 7 |
| 245 | #define SEG_REUSE_LOG_1 (SEG_REUSE_LOG_0 + 1) |
| 246 | #define DT_LOG_0 (SEG_REUSE_LOG_1 + 1) |
| 247 | #define BGC_LOG (DT_LOG_0 + 1) |
| 248 | #define GTC_LOG (DT_LOG_0 + 2) |
| 249 | #define GC_TABLE_LOG (DT_LOG_0 + 3) |
| 250 | #define JOIN_LOG (DT_LOG_0 + 4) |
| 251 | #define SPINLOCK_LOG (DT_LOG_0 + 5) |
| 252 | #define SNOOP_LOG (DT_LOG_0 + 6) |
| 253 | |
| 254 | #ifndef DACCESS_COMPILE |
| 255 | |
| 256 | #ifdef SIMPLE_DPRINTF |
| 257 | |
| 258 | //#define dprintf(l,x) {if (trace_gc && ((l<=print_level)||gc_heap::settings.concurrent)) {printf ("\n");printf x ; fflush(stdout);}} |
| 259 | void GCLog (const char *fmt, ... ); |
| 260 | //#define dprintf(l,x) {if (trace_gc && (l<=print_level)) {GCLog x;}} |
| 261 | //#define dprintf(l,x) {if ((l==SEG_REUSE_LOG_0) || (l==SEG_REUSE_LOG_1) || (trace_gc && (l<=3))) {GCLog x;}} |
| 262 | //#define dprintf(l,x) {if (l == DT_LOG_0) {GCLog x;}} |
| 263 | //#define dprintf(l,x) {if (trace_gc && ((l <= 2) || (l == BGC_LOG) || (l==GTC_LOG))) {GCLog x;}} |
| 264 | //#define dprintf(l,x) {if ((l == 1) || (l == 2222)) {GCLog x;}} |
| 265 | #define dprintf(l,x) {if ((l <= 1) || (l == GTC_LOG)) {GCLog x;}} |
| 266 | //#define dprintf(l,x) {if ((l==GTC_LOG) || (l <= 1)) {GCLog x;}} |
| 267 | //#define dprintf(l,x) {if (trace_gc && ((l <= print_level) || (l==GTC_LOG))) {GCLog x;}} |
| 268 | //#define dprintf(l,x) {if (l==GTC_LOG) {printf ("\n");printf x ; fflush(stdout);}} |
| 269 | #else //SIMPLE_DPRINTF |
| 270 | // Nobody used the logging mechanism that used to be here. If we find ourselves |
| 271 | // wanting to inspect GC logs on unmodified builds, we can use this define here |
| 272 | // to do so. |
| 273 | #define dprintf(l, x) |
| 274 | |
| 275 | #endif //SIMPLE_DPRINTF |
| 276 | |
| 277 | #else //DACCESS_COMPILE |
| 278 | #define dprintf(l,x) |
| 279 | #endif //DACCESS_COMPILE |
| 280 | #else //TRACE_GC |
| 281 | #define dprintf(l,x) |
| 282 | #endif //TRACE_GC |
| 283 | |
| 284 | #if !defined(FEATURE_REDHAWK) && !defined(BUILD_AS_STANDALONE) |
| 285 | #undef assert |
| 286 | #define assert _ASSERTE |
| 287 | #undef ASSERT |
| 288 | #define ASSERT _ASSERTE |
| 289 | #endif // FEATURE_REDHAWK |
| 290 | |
| 291 | struct GCDebugSpinLock { |
| 292 | VOLATILE(int32_t) lock; // -1 if free, 0 if held |
| 293 | #ifdef _DEBUG |
| 294 | VOLATILE(Thread *) holding_thread; // -1 if no thread holds the lock. |
| 295 | VOLATILE(BOOL) released_by_gc_p; // a GC thread released the lock. |
| 296 | #endif |
| 297 | #if defined (SYNCHRONIZATION_STATS) |
| 298 | // number of times we went into SwitchToThread in enter_spin_lock. |
| 299 | unsigned int num_switch_thread; |
| 300 | // number of times we went into WaitLonger. |
| 301 | unsigned int num_wait_longer; |
| 302 | // number of times we went to calling SwitchToThread in WaitLonger. |
| 303 | unsigned int num_switch_thread_w; |
| 304 | // number of times we went to calling DisablePreemptiveGC in WaitLonger. |
| 305 | unsigned int num_disable_preemptive_w; |
| 306 | #endif |
| 307 | |
| 308 | GCDebugSpinLock() |
| 309 | : lock(-1) |
| 310 | #ifdef _DEBUG |
| 311 | , holding_thread((Thread*) -1) |
| 312 | #endif |
| 313 | #if defined (SYNCHRONIZATION_STATS) |
| 314 | , num_switch_thread(0), num_wait_longer(0), num_switch_thread_w(0), num_disable_preemptive_w(0) |
| 315 | #endif |
| 316 | { |
| 317 | } |
| 318 | |
| 319 | #if defined (SYNCHRONIZATION_STATS) |
| 320 | void init() |
| 321 | { |
| 322 | num_switch_thread = 0; |
| 323 | num_wait_longer = 0; |
| 324 | num_switch_thread_w = 0; |
| 325 | num_disable_preemptive_w = 0; |
| 326 | } |
| 327 | #endif |
| 328 | }; |
| 329 | typedef GCDebugSpinLock GCSpinLock; |
| 330 | |
| 331 | class mark; |
| 332 | class heap_segment; |
| 333 | class ; |
| 334 | class l_heap; |
| 335 | class sorted_table; |
| 336 | class c_synchronize; |
| 337 | class seg_free_spaces; |
| 338 | class gc_heap; |
| 339 | |
| 340 | #ifdef BACKGROUND_GC |
| 341 | class exclusive_sync; |
| 342 | class recursive_gc_sync; |
| 343 | #endif //BACKGROUND_GC |
| 344 | |
| 345 | // The following 2 modes are of the same format as in clr\src\bcl\system\runtime\gcsettings.cs |
| 346 | // make sure you change that one if you change this one! |
| 347 | enum gc_pause_mode |
| 348 | { |
| 349 | pause_batch = 0, //We are not concerned about pause length |
| 350 | pause_interactive = 1, //We are running an interactive app |
| 351 | pause_low_latency = 2, //short pauses are essential |
| 352 | //avoid long pauses from blocking full GCs unless running out of memory |
| 353 | pause_sustained_low_latency = 3, |
| 354 | pause_no_gc = 4 |
| 355 | }; |
| 356 | |
| 357 | enum gc_loh_compaction_mode |
| 358 | { |
| 359 | loh_compaction_default = 1, // the default mode, don't compact LOH. |
| 360 | loh_compaction_once = 2, // only compact once the next time a blocking full GC happens. |
| 361 | loh_compaction_auto = 4 // GC decides when to compact LOH, to be implemented. |
| 362 | }; |
| 363 | |
| 364 | enum set_pause_mode_status |
| 365 | { |
| 366 | set_pause_mode_success = 0, |
| 367 | set_pause_mode_no_gc = 1 // NoGCRegion is in progress, can't change pause mode. |
| 368 | }; |
| 369 | |
| 370 | /* |
| 371 | Latency modes required user to have specific GC knowledge (eg, budget, full blocking GC). |
| 372 | We are trying to move away from them as it makes a lot more sense for users to tell |
| 373 | us what's the most important out of the perf aspects that make sense to them. |
| 374 | |
| 375 | In general there are 3 such aspects: |
| 376 | |
| 377 | + memory footprint |
| 378 | + throughput |
| 379 | + pause predictibility |
| 380 | |
| 381 | Currently the following levels are supported. We may (and will likely) add more |
| 382 | in the future. |
| 383 | |
| 384 | +----------+--------------------+---------------------------------------+ |
| 385 | | Level | Optimization Goals | Latency Charactaristics | |
| 386 | +==========+====================+=======================================+ |
| 387 | | 0 | memory footprint | pauses can be long and more frequent | |
| 388 | +----------+--------------------+---------------------------------------+ |
| 389 | | 1 | balanced | pauses are more predictable and more | |
| 390 | | | | frequent. the longest pauses are | |
| 391 | | | | shorter than 1. | |
| 392 | +----------+--------------------+---------------------------------------+ |
| 393 | */ |
| 394 | enum gc_latency_level |
| 395 | { |
| 396 | latency_level_first = 0, |
| 397 | = latency_level_first, |
| 398 | latency_level_balanced = 1, |
| 399 | latency_level_last = latency_level_balanced, |
| 400 | latency_level_default = latency_level_balanced |
| 401 | }; |
| 402 | |
| 403 | enum gc_tuning_point |
| 404 | { |
| 405 | tuning_deciding_condemned_gen, |
| 406 | tuning_deciding_full_gc, |
| 407 | tuning_deciding_compaction, |
| 408 | tuning_deciding_expansion, |
| 409 | tuning_deciding_promote_ephemeral |
| 410 | }; |
| 411 | |
| 412 | #if defined(TRACE_GC) && defined(BACKGROUND_GC) |
| 413 | static const char * const str_bgc_state[] = |
| 414 | { |
| 415 | "not_in_process" , |
| 416 | "mark_handles" , |
| 417 | "mark_stack" , |
| 418 | "revisit_soh" , |
| 419 | "revisit_loh" , |
| 420 | "overflow_soh" , |
| 421 | "overflow_loh" , |
| 422 | "final_marking" , |
| 423 | "sweep_soh" , |
| 424 | "sweep_loh" , |
| 425 | "plan_phase" |
| 426 | }; |
| 427 | #endif // defined(TRACE_GC) && defined(BACKGROUND_GC) |
| 428 | |
| 429 | enum allocation_state |
| 430 | { |
| 431 | a_state_start = 0, |
| 432 | a_state_can_allocate, |
| 433 | a_state_cant_allocate, |
| 434 | a_state_try_fit, |
| 435 | a_state_try_fit_new_seg, |
| 436 | a_state_try_fit_new_seg_after_cg, |
| 437 | a_state_try_fit_no_seg, |
| 438 | a_state_try_fit_after_cg, |
| 439 | a_state_try_fit_after_bgc, |
| 440 | a_state_try_free_full_seg_in_bgc, |
| 441 | a_state_try_free_after_bgc, |
| 442 | a_state_try_seg_end, |
| 443 | a_state_acquire_seg, |
| 444 | a_state_acquire_seg_after_cg, |
| 445 | a_state_acquire_seg_after_bgc, |
| 446 | a_state_check_and_wait_for_bgc, |
| 447 | a_state_trigger_full_compact_gc, |
| 448 | a_state_trigger_ephemeral_gc, |
| 449 | a_state_trigger_2nd_ephemeral_gc, |
| 450 | a_state_check_retry_seg, |
| 451 | a_state_max |
| 452 | }; |
| 453 | |
| 454 | enum gc_type |
| 455 | { |
| 456 | gc_type_compacting = 0, |
| 457 | gc_type_blocking = 1, |
| 458 | #ifdef BACKGROUND_GC |
| 459 | gc_type_background = 2, |
| 460 | #endif //BACKGROUND_GC |
| 461 | gc_type_max = 3 |
| 462 | }; |
| 463 | |
| 464 | //encapsulates the mechanism for the current gc |
| 465 | class gc_mechanisms |
| 466 | { |
| 467 | public: |
| 468 | VOLATILE(size_t) gc_index; // starts from 1 for the first GC, like dd_collection_count |
| 469 | int condemned_generation; |
| 470 | BOOL promotion; |
| 471 | BOOL compaction; |
| 472 | BOOL loh_compaction; |
| 473 | BOOL heap_expansion; |
| 474 | uint32_t concurrent; |
| 475 | BOOL demotion; |
| 476 | BOOL card_bundles; |
| 477 | int gen0_reduction_count; |
| 478 | BOOL should_lock_elevation; |
| 479 | int elevation_locked_count; |
| 480 | BOOL elevation_reduced; |
| 481 | BOOL minimal_gc; |
| 482 | gc_reason reason; |
| 483 | gc_pause_mode pause_mode; |
| 484 | BOOL found_finalizers; |
| 485 | |
| 486 | #ifdef BACKGROUND_GC |
| 487 | BOOL background_p; |
| 488 | bgc_state b_state; |
| 489 | BOOL allocations_allowed; |
| 490 | #endif //BACKGROUND_GC |
| 491 | |
| 492 | #ifdef STRESS_HEAP |
| 493 | BOOL stress_induced; |
| 494 | #endif // STRESS_HEAP |
| 495 | |
| 496 | // These are opportunistically set |
| 497 | uint32_t entry_memory_load; |
| 498 | uint32_t exit_memory_load; |
| 499 | |
| 500 | void init_mechanisms(); //for each GC |
| 501 | void first_init(); // for the life of the EE |
| 502 | |
| 503 | void record (gc_history_global* history); |
| 504 | }; |
| 505 | |
| 506 | // This is a compact version of gc_mechanism that we use to save in the history. |
| 507 | class gc_mechanisms_store |
| 508 | { |
| 509 | public: |
| 510 | size_t gc_index; |
| 511 | bool promotion; |
| 512 | bool compaction; |
| 513 | bool loh_compaction; |
| 514 | bool heap_expansion; |
| 515 | bool concurrent; |
| 516 | bool demotion; |
| 517 | bool card_bundles; |
| 518 | bool should_lock_elevation; |
| 519 | int condemned_generation : 8; |
| 520 | int gen0_reduction_count : 8; |
| 521 | int elevation_locked_count : 8; |
| 522 | gc_reason reason : 8; |
| 523 | gc_pause_mode pause_mode : 8; |
| 524 | #ifdef BACKGROUND_GC |
| 525 | bgc_state b_state : 8; |
| 526 | #endif //BACKGROUND_GC |
| 527 | bool found_finalizers; |
| 528 | |
| 529 | #ifdef BACKGROUND_GC |
| 530 | bool background_p; |
| 531 | #endif //BACKGROUND_GC |
| 532 | |
| 533 | #ifdef STRESS_HEAP |
| 534 | bool stress_induced; |
| 535 | #endif // STRESS_HEAP |
| 536 | |
| 537 | #ifdef BIT64 |
| 538 | uint32_t entry_memory_load; |
| 539 | #endif // BIT64 |
| 540 | |
| 541 | void store (gc_mechanisms* gm) |
| 542 | { |
| 543 | gc_index = gm->gc_index; |
| 544 | condemned_generation = gm->condemned_generation; |
| 545 | promotion = (gm->promotion != 0); |
| 546 | compaction = (gm->compaction != 0); |
| 547 | loh_compaction = (gm->loh_compaction != 0); |
| 548 | heap_expansion = (gm->heap_expansion != 0); |
| 549 | concurrent = (gm->concurrent != 0); |
| 550 | demotion = (gm->demotion != 0); |
| 551 | card_bundles = (gm->card_bundles != 0); |
| 552 | gen0_reduction_count = gm->gen0_reduction_count; |
| 553 | should_lock_elevation = (gm->should_lock_elevation != 0); |
| 554 | elevation_locked_count = gm->elevation_locked_count; |
| 555 | reason = gm->reason; |
| 556 | pause_mode = gm->pause_mode; |
| 557 | found_finalizers = (gm->found_finalizers != 0); |
| 558 | |
| 559 | #ifdef BACKGROUND_GC |
| 560 | background_p = (gm->background_p != 0); |
| 561 | b_state = gm->b_state; |
| 562 | #endif //BACKGROUND_GC |
| 563 | |
| 564 | #ifdef STRESS_HEAP |
| 565 | stress_induced = (gm->stress_induced != 0); |
| 566 | #endif // STRESS_HEAP |
| 567 | |
| 568 | #ifdef BIT64 |
| 569 | entry_memory_load = gm->entry_memory_load; |
| 570 | #endif // BIT64 |
| 571 | } |
| 572 | }; |
| 573 | |
| 574 | #ifdef GC_STATS |
| 575 | |
| 576 | // GC specific statistics, tracking counts and timings for GCs occuring in the system. |
| 577 | // This writes the statistics to a file every 60 seconds, if a file is specified in |
| 578 | // COMPlus_GcMixLog |
| 579 | |
| 580 | struct GCStatistics |
| 581 | : public StatisticsBase |
| 582 | { |
| 583 | // initialized to the contents of COMPlus_GcMixLog, or NULL, if not present |
| 584 | static char* logFileName; |
| 585 | static FILE* logFile; |
| 586 | |
| 587 | // number of times we executed a background GC, a foreground GC, or a |
| 588 | // non-concurrent GC |
| 589 | int cntBGC, cntFGC, cntNGC; |
| 590 | |
| 591 | // min, max, and total time spent performing BGCs, FGCs, NGCs |
| 592 | // (BGC time includes everything between the moment the BGC starts until |
| 593 | // it completes, i.e. the times of all FGCs occuring concurrently) |
| 594 | MinMaxTot bgc, fgc, ngc; |
| 595 | |
| 596 | // number of times we executed a compacting GC (sweeping counts can be derived) |
| 597 | int cntCompactNGC, cntCompactFGC; |
| 598 | |
| 599 | // count of reasons |
| 600 | int cntReasons[reason_max]; |
| 601 | |
| 602 | // count of condemned generation, by NGC and FGC: |
| 603 | int cntNGCGen[max_generation+1]; |
| 604 | int cntFGCGen[max_generation]; |
| 605 | |
| 606 | /////////////////////////////////////////////////////////////////////////////////////////////// |
| 607 | // Internal mechanism: |
| 608 | |
| 609 | virtual void Initialize(); |
| 610 | virtual void DisplayAndUpdate(); |
| 611 | |
| 612 | // Public API |
| 613 | |
| 614 | static BOOL Enabled() |
| 615 | { return logFileName != NULL; } |
| 616 | |
| 617 | void AddGCStats(const gc_mechanisms& settings, size_t timeInMSec); |
| 618 | }; |
| 619 | |
| 620 | extern GCStatistics g_GCStatistics; |
| 621 | extern GCStatistics g_LastGCStatistics; |
| 622 | |
| 623 | #endif // GC_STATS |
| 624 | |
| 625 | typedef DPTR(class heap_segment) PTR_heap_segment; |
| 626 | typedef DPTR(class gc_heap) PTR_gc_heap; |
| 627 | typedef DPTR(PTR_gc_heap) PTR_PTR_gc_heap; |
| 628 | #ifdef FEATURE_PREMORTEM_FINALIZATION |
| 629 | typedef DPTR(class CFinalize) PTR_CFinalize; |
| 630 | #endif // FEATURE_PREMORTEM_FINALIZATION |
| 631 | |
| 632 | //------------------------------------- |
| 633 | //generation free list. It is an array of free lists bucketed by size, starting at sizes lower than first_bucket_size |
| 634 | //and doubling each time. The last bucket (index == num_buckets) is for largest sizes with no limit |
| 635 | |
| 636 | #define MAX_BUCKET_COUNT (13)//Max number of buckets for the small generations. |
| 637 | class alloc_list |
| 638 | { |
| 639 | uint8_t* head; |
| 640 | uint8_t* tail; |
| 641 | |
| 642 | size_t damage_count; |
| 643 | public: |
| 644 | #ifdef FL_VERIFICATION |
| 645 | size_t item_count; |
| 646 | #endif //FL_VERIFICATION |
| 647 | |
| 648 | uint8_t*& alloc_list_head () { return head;} |
| 649 | uint8_t*& alloc_list_tail () { return tail;} |
| 650 | size_t& alloc_list_damage_count(){ return damage_count; } |
| 651 | alloc_list() |
| 652 | { |
| 653 | head = 0; |
| 654 | tail = 0; |
| 655 | damage_count = 0; |
| 656 | } |
| 657 | }; |
| 658 | |
| 659 | |
| 660 | class allocator |
| 661 | { |
| 662 | size_t num_buckets; |
| 663 | size_t frst_bucket_size; |
| 664 | alloc_list first_bucket; |
| 665 | alloc_list* buckets; |
| 666 | alloc_list& alloc_list_of (unsigned int bn); |
| 667 | size_t& alloc_list_damage_count_of (unsigned int bn); |
| 668 | |
| 669 | public: |
| 670 | allocator (unsigned int num_b, size_t fbs, alloc_list* b); |
| 671 | allocator() |
| 672 | { |
| 673 | num_buckets = 1; |
| 674 | frst_bucket_size = SIZE_T_MAX; |
| 675 | } |
| 676 | unsigned int number_of_buckets() {return (unsigned int)num_buckets;} |
| 677 | |
| 678 | size_t first_bucket_size() {return frst_bucket_size;} |
| 679 | uint8_t*& alloc_list_head_of (unsigned int bn) |
| 680 | { |
| 681 | return alloc_list_of (bn).alloc_list_head(); |
| 682 | } |
| 683 | uint8_t*& alloc_list_tail_of (unsigned int bn) |
| 684 | { |
| 685 | return alloc_list_of (bn).alloc_list_tail(); |
| 686 | } |
| 687 | void clear(); |
| 688 | BOOL discard_if_no_fit_p() |
| 689 | { |
| 690 | return (num_buckets == 1); |
| 691 | } |
| 692 | |
| 693 | // This is when we know there's nothing to repair because this free |
| 694 | // list has never gone through plan phase. Right now it's only used |
| 695 | // by the background ephemeral sweep when we copy the local free list |
| 696 | // to gen0's free list. |
| 697 | // |
| 698 | // We copy head and tail manually (vs together like copy_to_alloc_list) |
| 699 | // since we need to copy tail first because when we get the free items off |
| 700 | // of each bucket we check head first. We also need to copy the |
| 701 | // smaller buckets first so when gen0 allocation needs to thread |
| 702 | // smaller items back that bucket is guaranteed to have been full |
| 703 | // copied. |
| 704 | void copy_with_no_repair (allocator* allocator_to_copy) |
| 705 | { |
| 706 | assert (num_buckets == allocator_to_copy->number_of_buckets()); |
| 707 | for (unsigned int i = 0; i < num_buckets; i++) |
| 708 | { |
| 709 | alloc_list* al = &(allocator_to_copy->alloc_list_of (i)); |
| 710 | alloc_list_tail_of(i) = al->alloc_list_tail(); |
| 711 | alloc_list_head_of(i) = al->alloc_list_head(); |
| 712 | } |
| 713 | } |
| 714 | |
| 715 | void unlink_item (unsigned int bucket_number, uint8_t* item, uint8_t* previous_item, BOOL use_undo_p); |
| 716 | void thread_item (uint8_t* item, size_t size); |
| 717 | void thread_item_front (uint8_t* itme, size_t size); |
| 718 | void thread_free_item (uint8_t* free_item, uint8_t*& head, uint8_t*& tail); |
| 719 | void copy_to_alloc_list (alloc_list* toalist); |
| 720 | void copy_from_alloc_list (alloc_list* fromalist); |
| 721 | void commit_alloc_list_changes(); |
| 722 | }; |
| 723 | |
| 724 | #define NUM_GEN_POWER2 (20) |
| 725 | #define BASE_GEN_SIZE (1*512) |
| 726 | |
| 727 | // group the frequently used ones together (need intrumentation on accessors) |
| 728 | class generation |
| 729 | { |
| 730 | public: |
| 731 | // Don't move these first two fields without adjusting the references |
| 732 | // from the __asm in jitinterface.cpp. |
| 733 | alloc_context allocation_context; |
| 734 | PTR_heap_segment start_segment; |
| 735 | uint8_t* allocation_start; |
| 736 | heap_segment* allocation_segment; |
| 737 | uint8_t* allocation_context_start_region; |
| 738 | allocator free_list_allocator; |
| 739 | size_t free_list_allocated; |
| 740 | size_t end_seg_allocated; |
| 741 | BOOL allocate_end_seg_p; |
| 742 | size_t condemned_allocated; |
| 743 | size_t free_list_space; |
| 744 | size_t free_obj_space; |
| 745 | size_t allocation_size; |
| 746 | uint8_t* plan_allocation_start; |
| 747 | size_t plan_allocation_start_size; |
| 748 | |
| 749 | // this is the pinned plugs that got allocated into this gen. |
| 750 | size_t pinned_allocated; |
| 751 | size_t pinned_allocation_compact_size; |
| 752 | size_t pinned_allocation_sweep_size; |
| 753 | int gen_num; |
| 754 | |
| 755 | #ifdef FREE_USAGE_STATS |
| 756 | size_t gen_free_spaces[NUM_GEN_POWER2]; |
| 757 | // these are non pinned plugs only |
| 758 | size_t gen_plugs[NUM_GEN_POWER2]; |
| 759 | size_t gen_current_pinned_free_spaces[NUM_GEN_POWER2]; |
| 760 | size_t pinned_free_obj_space; |
| 761 | // this is what got allocated into the pinned free spaces. |
| 762 | size_t allocated_in_pinned_free; |
| 763 | size_t allocated_since_last_pin; |
| 764 | #endif //FREE_USAGE_STATS |
| 765 | }; |
| 766 | |
| 767 | static_assert(offsetof(dac_generation, allocation_context) == offsetof(generation, allocation_context), "DAC generation offset mismatch" ); |
| 768 | static_assert(offsetof(dac_generation, start_segment) == offsetof(generation, start_segment), "DAC generation offset mismatch" ); |
| 769 | static_assert(offsetof(dac_generation, allocation_start) == offsetof(generation, allocation_start), "DAC generation offset mismatch" ); |
| 770 | |
| 771 | // static data remains the same after it's initialized. |
| 772 | // It's per generation. |
| 773 | // TODO: for gen_time_tuning, we should put the multipliers in static data. |
| 774 | struct static_data |
| 775 | { |
| 776 | size_t min_size; |
| 777 | size_t max_size; |
| 778 | size_t fragmentation_limit; |
| 779 | float fragmentation_burden_limit; |
| 780 | float limit; |
| 781 | float max_limit; |
| 782 | size_t time_clock; // time after which to collect generation, in performance counts (see QueryPerformanceCounter) |
| 783 | size_t gc_clock; // nubmer of gcs after which to collect generation |
| 784 | }; |
| 785 | |
| 786 | // The dynamic data fields are grouped into 3 categories: |
| 787 | // |
| 788 | // calculated logical data (like desired_allocation) |
| 789 | // physical data (like fragmentation) |
| 790 | // const data (sdata), initialized at the beginning |
| 791 | class dynamic_data |
| 792 | { |
| 793 | public: |
| 794 | ptrdiff_t new_allocation; |
| 795 | ptrdiff_t gc_new_allocation; // new allocation at beginning of gc |
| 796 | float surv; |
| 797 | size_t desired_allocation; |
| 798 | |
| 799 | // # of bytes taken by objects (ie, not free space) at the beginning |
| 800 | // of the GC. |
| 801 | size_t begin_data_size; |
| 802 | // # of bytes taken by survived objects after mark. |
| 803 | size_t survived_size; |
| 804 | // # of bytes taken by survived pinned plugs after mark. |
| 805 | size_t pinned_survived_size; |
| 806 | size_t artificial_pinned_survived_size; |
| 807 | size_t added_pinned_size; |
| 808 | |
| 809 | #ifdef SHORT_PLUGS |
| 810 | size_t padding_size; |
| 811 | #endif //SHORT_PLUGS |
| 812 | #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN) |
| 813 | // # of plugs that are not pinned plugs. |
| 814 | size_t num_npinned_plugs; |
| 815 | #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN |
| 816 | //total object size after a GC, ie, doesn't include fragmentation |
| 817 | size_t current_size; |
| 818 | size_t collection_count; |
| 819 | size_t promoted_size; |
| 820 | size_t freach_previous_promotion; |
| 821 | size_t fragmentation; //fragmentation when we don't compact |
| 822 | size_t gc_clock; //gc# when last GC happened |
| 823 | size_t time_clock; //time when last gc started |
| 824 | size_t gc_elapsed_time; // Time it took for the gc to complete |
| 825 | float gc_speed; // speed in bytes/msec for the gc to complete |
| 826 | |
| 827 | size_t min_size; |
| 828 | |
| 829 | static_data* sdata; |
| 830 | }; |
| 831 | |
| 832 | #define ro_in_entry 0x1 |
| 833 | |
| 834 | #ifdef SEG_MAPPING_TABLE |
| 835 | // Note that I am storing both h0 and seg0, even though in Server GC you can get to |
| 836 | // the heap* from the segment info. This is because heap_of needs to be really fast |
| 837 | // and we would not want yet another indirection. |
| 838 | struct seg_mapping |
| 839 | { |
| 840 | // if an address is > boundary it belongs to h1; else h0. |
| 841 | // since we init h0 and h1 to 0, if we get 0 it means that |
| 842 | // address doesn't exist on managed segments. And heap_of |
| 843 | // would just return heap0 which is what it does now. |
| 844 | uint8_t* boundary; |
| 845 | #ifdef MULTIPLE_HEAPS |
| 846 | gc_heap* h0; |
| 847 | gc_heap* h1; |
| 848 | #endif //MULTIPLE_HEAPS |
| 849 | // You could have an address that's inbetween 2 segments and |
| 850 | // this would return a seg, the caller then will use |
| 851 | // in_range_for_segment to determine if it's on that seg. |
| 852 | heap_segment* seg0; // this is what the seg for h0 is. |
| 853 | heap_segment* seg1; // this is what the seg for h1 is. |
| 854 | // Note that when frozen objects are used we mask seg1 |
| 855 | // with 0x1 to indicate that there is a ro segment for |
| 856 | // this entry. |
| 857 | }; |
| 858 | #endif //SEG_MAPPING_TABLE |
| 859 | |
| 860 | // alignment helpers |
| 861 | //Alignment constant for allocation |
| 862 | #define ALIGNCONST (DATA_ALIGNMENT-1) |
| 863 | |
| 864 | inline |
| 865 | size_t Align (size_t nbytes, int alignment=ALIGNCONST) |
| 866 | { |
| 867 | return (nbytes + alignment) & ~alignment; |
| 868 | } |
| 869 | |
| 870 | //return alignment constant for small object heap vs large object heap |
| 871 | inline |
| 872 | int get_alignment_constant (BOOL small_object_p) |
| 873 | { |
| 874 | #ifdef FEATURE_STRUCTALIGN |
| 875 | // If any objects on the large object heap require 8-byte alignment, |
| 876 | // the compiler will tell us so. Let's not guess an alignment here. |
| 877 | return ALIGNCONST; |
| 878 | #else // FEATURE_STRUCTALIGN |
| 879 | return small_object_p ? ALIGNCONST : 7; |
| 880 | #endif // FEATURE_STRUCTALIGN |
| 881 | } |
| 882 | |
| 883 | struct etw_opt_info |
| 884 | { |
| 885 | size_t desired_allocation; |
| 886 | size_t new_allocation; |
| 887 | int gen_number; |
| 888 | }; |
| 889 | |
| 890 | // Note, I am not removing the ones that are no longer used |
| 891 | // because the older versions of the runtime still use them |
| 892 | // and ETW interprets them. |
| 893 | enum alloc_wait_reason |
| 894 | { |
| 895 | // When we don't care about firing an event for |
| 896 | // this. |
| 897 | awr_ignored = -1, |
| 898 | |
| 899 | // when we detect we are in low memory |
| 900 | awr_low_memory = 0, |
| 901 | |
| 902 | // when we detect the ephemeral segment is too full |
| 903 | awr_low_ephemeral = 1, |
| 904 | |
| 905 | // we've given out too much budget for gen0. |
| 906 | awr_gen0_alloc = 2, |
| 907 | |
| 908 | // we've given out too much budget for loh. |
| 909 | awr_loh_alloc = 3, |
| 910 | |
| 911 | // this event is really obsolete - it's for pre-XP |
| 912 | // OSs where low mem notification is not supported. |
| 913 | awr_alloc_loh_low_mem = 4, |
| 914 | |
| 915 | // we ran out of VM spaced to reserve on loh. |
| 916 | awr_loh_oos = 5, |
| 917 | |
| 918 | // ran out of space when allocating a small object |
| 919 | awr_gen0_oos_bgc = 6, |
| 920 | |
| 921 | // ran out of space when allocating a large object |
| 922 | awr_loh_oos_bgc = 7, |
| 923 | |
| 924 | // waiting for BGC to let FGC happen |
| 925 | awr_fgc_wait_for_bgc = 8, |
| 926 | |
| 927 | // wait for bgc to finish to get loh seg. |
| 928 | // no longer used with the introduction of loh msl. |
| 929 | awr_get_loh_seg = 9, |
| 930 | |
| 931 | // we don't allow loh allocation during bgc planning. |
| 932 | // no longer used with the introduction of loh msl. |
| 933 | awr_loh_alloc_during_plan = 10, |
| 934 | |
| 935 | // we don't allow too much loh allocation during bgc. |
| 936 | awr_loh_alloc_during_bgc = 11 |
| 937 | }; |
| 938 | |
| 939 | struct alloc_thread_wait_data |
| 940 | { |
| 941 | int awr; |
| 942 | }; |
| 943 | |
| 944 | enum msl_take_state |
| 945 | { |
| 946 | mt_get_large_seg = 0, |
| 947 | mt_bgc_loh_sweep, |
| 948 | mt_wait_bgc, |
| 949 | mt_block_gc, |
| 950 | mt_clr_mem, |
| 951 | mt_clr_large_mem, |
| 952 | mt_t_eph_gc, |
| 953 | mt_t_full_gc, |
| 954 | mt_alloc_small, |
| 955 | mt_alloc_large, |
| 956 | mt_alloc_small_cant, |
| 957 | mt_alloc_large_cant, |
| 958 | mt_try_alloc, |
| 959 | mt_try_budget |
| 960 | }; |
| 961 | |
| 962 | enum msl_enter_state |
| 963 | { |
| 964 | me_acquire, |
| 965 | me_release |
| 966 | }; |
| 967 | |
| 968 | struct spinlock_info |
| 969 | { |
| 970 | msl_enter_state enter_state; |
| 971 | msl_take_state take_state; |
| 972 | EEThreadId thread_id; |
| 973 | bool loh_p; |
| 974 | }; |
| 975 | |
| 976 | #define HS_CACHE_LINE_SIZE 128 |
| 977 | |
| 978 | #ifdef SNOOP_STATS |
| 979 | struct snoop_stats_data |
| 980 | { |
| 981 | int heap_index; |
| 982 | |
| 983 | // total number of objects that we called |
| 984 | // gc_mark on. |
| 985 | size_t objects_checked_count; |
| 986 | // total number of time we called gc_mark |
| 987 | // on a 0 reference. |
| 988 | size_t zero_ref_count; |
| 989 | // total objects actually marked. |
| 990 | size_t objects_marked_count; |
| 991 | // number of objects written to the mark stack because |
| 992 | // of mark_stolen. |
| 993 | size_t stolen_stack_count; |
| 994 | // number of objects pushed onto the mark stack because |
| 995 | // of the partial mark code path. |
| 996 | size_t partial_stack_count; |
| 997 | // number of objects pushed onto the mark stack because |
| 998 | // of the non partial mark code path. |
| 999 | size_t normal_stack_count; |
| 1000 | // number of references marked without mark stack. |
| 1001 | size_t non_stack_count; |
| 1002 | |
| 1003 | // number of times we detect next heap's mark stack |
| 1004 | // is not busy. |
| 1005 | size_t stack_idle_count; |
| 1006 | |
| 1007 | // number of times we do switch to thread. |
| 1008 | size_t switch_to_thread_count; |
| 1009 | |
| 1010 | // number of times we are checking if the next heap's |
| 1011 | // mark stack is busy. |
| 1012 | size_t check_level_count; |
| 1013 | // number of times next stack is busy and level is |
| 1014 | // at the bottom. |
| 1015 | size_t busy_count; |
| 1016 | // how many interlocked exchange operations we did |
| 1017 | size_t interlocked_count; |
| 1018 | // numer of times parent objects stolen |
| 1019 | size_t partial_mark_parent_count; |
| 1020 | // numer of times we look at a normal stolen entry, |
| 1021 | // or the beginning/ending PM pair. |
| 1022 | size_t stolen_or_pm_count; |
| 1023 | // number of times we see 2 for the entry. |
| 1024 | size_t stolen_entry_count; |
| 1025 | // number of times we see a PM entry that's not ready. |
| 1026 | size_t pm_not_ready_count; |
| 1027 | // number of stolen normal marked objects and partial mark children. |
| 1028 | size_t normal_count; |
| 1029 | // number of times the bottom of mark stack was cleared. |
| 1030 | size_t stack_bottom_clear_count; |
| 1031 | }; |
| 1032 | #endif //SNOOP_STATS |
| 1033 | |
| 1034 | struct no_gc_region_info |
| 1035 | { |
| 1036 | size_t soh_allocation_size; |
| 1037 | size_t loh_allocation_size; |
| 1038 | size_t started; |
| 1039 | size_t num_gcs; |
| 1040 | size_t num_gcs_induced; |
| 1041 | start_no_gc_region_status start_status; |
| 1042 | gc_pause_mode saved_pause_mode; |
| 1043 | size_t saved_gen0_min_size; |
| 1044 | size_t saved_gen3_min_size; |
| 1045 | BOOL minimal_gc_p; |
| 1046 | }; |
| 1047 | |
| 1048 | // if you change these, make sure you update them for sos (strike.cpp) as well. |
| 1049 | // |
| 1050 | // !!!NOTE!!! |
| 1051 | // Right now I am only recording data from blocking GCs. When recording from BGC, |
| 1052 | // it should have its own copy just like gc_data_per_heap. |
| 1053 | // for BGCs we will have a very different set of datapoints to record. |
| 1054 | enum interesting_data_point |
| 1055 | { |
| 1056 | idp_pre_short = 0, |
| 1057 | idp_post_short = 1, |
| 1058 | idp_merged_pin = 2, |
| 1059 | idp_converted_pin = 3, |
| 1060 | idp_pre_pin = 4, |
| 1061 | idp_post_pin = 5, |
| 1062 | idp_pre_and_post_pin = 6, |
| 1063 | idp_pre_short_padded = 7, |
| 1064 | idp_post_short_padded = 8, |
| 1065 | max_idp_count |
| 1066 | }; |
| 1067 | |
| 1068 | //class definition of the internal class |
| 1069 | class gc_heap |
| 1070 | { |
| 1071 | friend class GCHeap; |
| 1072 | #ifdef FEATURE_PREMORTEM_FINALIZATION |
| 1073 | friend class CFinalize; |
| 1074 | #endif // FEATURE_PREMORTEM_FINALIZATION |
| 1075 | friend struct ::alloc_context; |
| 1076 | friend void ProfScanRootsHelper(Object** object, ScanContext *pSC, uint32_t dwFlags); |
| 1077 | friend void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw); |
| 1078 | friend class t_join; |
| 1079 | friend class gc_mechanisms; |
| 1080 | friend class seg_free_spaces; |
| 1081 | |
| 1082 | #ifdef BACKGROUND_GC |
| 1083 | friend class exclusive_sync; |
| 1084 | friend class recursive_gc_sync; |
| 1085 | #endif //BACKGROUND_GC |
| 1086 | |
| 1087 | #if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC) |
| 1088 | friend void checkGCWriteBarrier(); |
| 1089 | friend void initGCShadow(); |
| 1090 | #endif //defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC) |
| 1091 | |
| 1092 | friend void PopulateDacVars(GcDacVars *gcDacVars); |
| 1093 | |
| 1094 | #ifdef MULTIPLE_HEAPS |
| 1095 | typedef void (gc_heap::* card_fn) (uint8_t**, int); |
| 1096 | #define call_fn(fn) (this->*fn) |
| 1097 | #define __this this |
| 1098 | #else |
| 1099 | typedef void (* card_fn) (uint8_t**); |
| 1100 | #define call_fn(fn) (*fn) |
| 1101 | #define __this (gc_heap*)0 |
| 1102 | #endif |
| 1103 | |
| 1104 | public: |
| 1105 | |
| 1106 | #ifdef TRACE_GC |
| 1107 | PER_HEAP |
| 1108 | void print_free_list (int gen, heap_segment* seg); |
| 1109 | #endif // TRACE_GC |
| 1110 | |
| 1111 | #ifdef SYNCHRONIZATION_STATS |
| 1112 | |
| 1113 | PER_HEAP_ISOLATED |
| 1114 | void init_sync_stats() |
| 1115 | { |
| 1116 | #ifdef MULTIPLE_HEAPS |
| 1117 | for (int i = 0; i < gc_heap::n_heaps; i++) |
| 1118 | { |
| 1119 | gc_heap::g_heaps[i]->init_heap_sync_stats(); |
| 1120 | } |
| 1121 | #else //MULTIPLE_HEAPS |
| 1122 | init_heap_sync_stats(); |
| 1123 | #endif //MULTIPLE_HEAPS |
| 1124 | } |
| 1125 | |
| 1126 | PER_HEAP_ISOLATED |
| 1127 | void print_sync_stats(unsigned int gc_count_during_log) |
| 1128 | { |
| 1129 | // bad/good gl acquire is accumulative during the log interval (because the numbers are too small) |
| 1130 | // min/max msl_acquire is the min/max during the log interval, not each GC. |
| 1131 | // Threads is however many allocation threads for the last GC. |
| 1132 | // num of msl acquired, avg_msl, high and low are all for each GC. |
| 1133 | printf("%2s%2s%10s%10s%12s%6s%4s%8s( st, wl, stw, dpw)\n" , |
| 1134 | "H" , "T" , "good_sus" , "bad_sus" , "avg_msl" , "high" , "low" , "num_msl" ); |
| 1135 | |
| 1136 | #ifdef MULTIPLE_HEAPS |
| 1137 | for (int i = 0; i < gc_heap::n_heaps; i++) |
| 1138 | { |
| 1139 | gc_heap::g_heaps[i]->print_heap_sync_stats(i, gc_count_during_log); |
| 1140 | } |
| 1141 | #else //MULTIPLE_HEAPS |
| 1142 | print_heap_sync_stats(0, gc_count_during_log); |
| 1143 | #endif //MULTIPLE_HEAPS |
| 1144 | } |
| 1145 | |
| 1146 | #endif //SYNCHRONIZATION_STATS |
| 1147 | |
| 1148 | PER_HEAP |
| 1149 | void verify_soh_segment_list(); |
| 1150 | PER_HEAP |
| 1151 | void verify_mark_array_cleared (heap_segment* seg); |
| 1152 | PER_HEAP |
| 1153 | void verify_mark_array_cleared(); |
| 1154 | PER_HEAP |
| 1155 | void verify_seg_end_mark_array_cleared(); |
| 1156 | PER_HEAP |
| 1157 | void verify_partial(); |
| 1158 | |
| 1159 | #ifdef VERIFY_HEAP |
| 1160 | PER_HEAP |
| 1161 | void verify_free_lists(); |
| 1162 | PER_HEAP |
| 1163 | void verify_heap (BOOL begin_gc_p); |
| 1164 | #endif //VERIFY_HEAP |
| 1165 | |
| 1166 | PER_HEAP_ISOLATED |
| 1167 | void fire_per_heap_hist_event (gc_history_per_heap* current_gc_data_per_heap, int heap_num); |
| 1168 | |
| 1169 | PER_HEAP_ISOLATED |
| 1170 | void fire_pevents(); |
| 1171 | |
| 1172 | #ifdef FEATURE_BASICFREEZE |
| 1173 | static void walk_read_only_segment(heap_segment *seg, void *pvContext, object_callback_func pfnMethodTable, object_callback_func pfnObjRef); |
| 1174 | #endif |
| 1175 | |
| 1176 | static |
| 1177 | heap_segment* make_heap_segment (uint8_t* new_pages, |
| 1178 | size_t size, |
| 1179 | int h_number); |
| 1180 | static |
| 1181 | l_heap* make_large_heap (uint8_t* new_pages, size_t size, BOOL managed); |
| 1182 | |
| 1183 | static |
| 1184 | gc_heap* make_gc_heap( |
| 1185 | #if defined (MULTIPLE_HEAPS) |
| 1186 | GCHeap* vm_heap, |
| 1187 | int heap_number |
| 1188 | #endif //MULTIPLE_HEAPS |
| 1189 | ); |
| 1190 | |
| 1191 | static |
| 1192 | void destroy_gc_heap(gc_heap* heap); |
| 1193 | |
| 1194 | static |
| 1195 | HRESULT initialize_gc (size_t segment_size, |
| 1196 | size_t heap_size |
| 1197 | #ifdef MULTIPLE_HEAPS |
| 1198 | , unsigned number_of_heaps |
| 1199 | #endif //MULTIPLE_HEAPS |
| 1200 | ); |
| 1201 | |
| 1202 | static |
| 1203 | void shutdown_gc(); |
| 1204 | |
| 1205 | PER_HEAP |
| 1206 | CObjectHeader* allocate (size_t jsize, |
| 1207 | alloc_context* acontext); |
| 1208 | |
| 1209 | #ifdef MULTIPLE_HEAPS |
| 1210 | static void balance_heaps (alloc_context* acontext); |
| 1211 | static |
| 1212 | gc_heap* balance_heaps_loh (alloc_context* acontext, size_t size); |
| 1213 | static |
| 1214 | void gc_thread_stub (void* arg); |
| 1215 | #endif //MULTIPLE_HEAPS |
| 1216 | |
| 1217 | CObjectHeader* try_fast_alloc (size_t jsize); |
| 1218 | |
| 1219 | // For LOH allocations we only update the alloc_bytes_loh in allocation |
| 1220 | // context - we don't actually use the ptr/limit from it so I am |
| 1221 | // making this explicit by not passing in the alloc_context. |
| 1222 | PER_HEAP |
| 1223 | CObjectHeader* allocate_large_object (size_t size, int64_t& alloc_bytes); |
| 1224 | |
| 1225 | #ifdef FEATURE_STRUCTALIGN |
| 1226 | PER_HEAP |
| 1227 | uint8_t* pad_for_alignment_large (uint8_t* newAlloc, int requiredAlignment, size_t size); |
| 1228 | #endif // FEATURE_STRUCTALIGN |
| 1229 | |
| 1230 | PER_HEAP_ISOLATED |
| 1231 | void do_pre_gc(); |
| 1232 | |
| 1233 | PER_HEAP_ISOLATED |
| 1234 | void do_post_gc(); |
| 1235 | |
| 1236 | PER_HEAP |
| 1237 | BOOL expand_soh_with_minimal_gc(); |
| 1238 | |
| 1239 | // EE is always suspended when this method is called. |
| 1240 | // returning FALSE means we actually didn't do a GC. This happens |
| 1241 | // when we figured that we needed to do a BGC. |
| 1242 | PER_HEAP |
| 1243 | void garbage_collect (int n); |
| 1244 | |
| 1245 | // Since we don't want to waste a join just to do this, I am doing |
| 1246 | // doing this at the last join in gc1. |
| 1247 | PER_HEAP_ISOLATED |
| 1248 | void pm_full_gc_init_or_clear(); |
| 1249 | |
| 1250 | // This does a GC when pm_trigger_full_gc is set |
| 1251 | PER_HEAP |
| 1252 | void garbage_collect_pm_full_gc(); |
| 1253 | |
| 1254 | PER_HEAP_ISOLATED |
| 1255 | bool is_pm_ratio_exceeded(); |
| 1256 | |
| 1257 | PER_HEAP |
| 1258 | void init_records(); |
| 1259 | |
| 1260 | static |
| 1261 | uint32_t* make_card_table (uint8_t* start, uint8_t* end); |
| 1262 | |
| 1263 | static |
| 1264 | void set_fgm_result (failure_get_memory f, size_t s, BOOL loh_p); |
| 1265 | |
| 1266 | static |
| 1267 | int grow_brick_card_tables (uint8_t* start, |
| 1268 | uint8_t* end, |
| 1269 | size_t size, |
| 1270 | heap_segment* new_seg, |
| 1271 | gc_heap* hp, |
| 1272 | BOOL loh_p); |
| 1273 | |
| 1274 | PER_HEAP |
| 1275 | BOOL is_mark_set (uint8_t* o); |
| 1276 | |
| 1277 | #ifdef FEATURE_BASICFREEZE |
| 1278 | PER_HEAP_ISOLATED |
| 1279 | bool frozen_object_p(Object* obj); |
| 1280 | #endif // FEATURE_BASICFREEZE |
| 1281 | |
| 1282 | protected: |
| 1283 | |
| 1284 | PER_HEAP_ISOLATED |
| 1285 | void walk_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p); |
| 1286 | |
| 1287 | PER_HEAP |
| 1288 | void walk_heap_per_heap (walk_fn fn, void* context, int gen_number, BOOL walk_large_object_heap_p); |
| 1289 | |
| 1290 | struct walk_relocate_args |
| 1291 | { |
| 1292 | uint8_t* last_plug; |
| 1293 | BOOL is_shortened; |
| 1294 | mark* pinned_plug_entry; |
| 1295 | void* profiling_context; |
| 1296 | record_surv_fn fn; |
| 1297 | }; |
| 1298 | |
| 1299 | PER_HEAP |
| 1300 | void walk_survivors (record_surv_fn fn, void* context, walk_surv_type type); |
| 1301 | |
| 1302 | PER_HEAP |
| 1303 | void walk_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, |
| 1304 | walk_relocate_args* args); |
| 1305 | |
| 1306 | PER_HEAP |
| 1307 | void walk_relocation (void* profiling_context, record_surv_fn fn); |
| 1308 | |
| 1309 | PER_HEAP |
| 1310 | void walk_relocation_in_brick (uint8_t* tree, walk_relocate_args* args); |
| 1311 | |
| 1312 | PER_HEAP |
| 1313 | void walk_finalize_queue (fq_walk_fn fn); |
| 1314 | |
| 1315 | #if defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) |
| 1316 | PER_HEAP |
| 1317 | void walk_survivors_for_bgc (void* profiling_context, record_surv_fn fn); |
| 1318 | #endif // defined(BACKGROUND_GC) && defined(FEATURE_EVENT_TRACE) |
| 1319 | |
| 1320 | // used in blocking GCs after plan phase so this walks the plugs. |
| 1321 | PER_HEAP |
| 1322 | void walk_survivors_relocation (void* profiling_context, record_surv_fn fn); |
| 1323 | PER_HEAP |
| 1324 | void walk_survivors_for_loh (void* profiling_context, record_surv_fn fn); |
| 1325 | |
| 1326 | PER_HEAP |
| 1327 | int generation_to_condemn (int n, |
| 1328 | BOOL* blocking_collection_p, |
| 1329 | BOOL* elevation_requested_p, |
| 1330 | BOOL check_only_p); |
| 1331 | |
| 1332 | PER_HEAP_ISOLATED |
| 1333 | int joined_generation_to_condemn (BOOL should_evaluate_elevation, |
| 1334 | int initial_gen, |
| 1335 | int current_gen, |
| 1336 | BOOL* blocking_collection |
| 1337 | STRESS_HEAP_ARG(int n_original)); |
| 1338 | |
| 1339 | PER_HEAP |
| 1340 | size_t min_reclaim_fragmentation_threshold (uint32_t num_heaps); |
| 1341 | |
| 1342 | PER_HEAP_ISOLATED |
| 1343 | uint64_t min_high_fragmentation_threshold (uint64_t available_mem, uint32_t num_heaps); |
| 1344 | |
| 1345 | PER_HEAP |
| 1346 | void concurrent_print_time_delta (const char* msg); |
| 1347 | PER_HEAP |
| 1348 | void free_list_info (int gen_num, const char* msg); |
| 1349 | |
| 1350 | // in svr GC on entry and exit of this method, the GC threads are not |
| 1351 | // synchronized |
| 1352 | PER_HEAP |
| 1353 | void gc1(); |
| 1354 | |
| 1355 | PER_HEAP_ISOLATED |
| 1356 | void save_data_for_no_gc(); |
| 1357 | |
| 1358 | PER_HEAP_ISOLATED |
| 1359 | void restore_data_for_no_gc(); |
| 1360 | |
| 1361 | PER_HEAP_ISOLATED |
| 1362 | void update_collection_counts_for_no_gc(); |
| 1363 | |
| 1364 | PER_HEAP_ISOLATED |
| 1365 | BOOL should_proceed_with_gc(); |
| 1366 | |
| 1367 | PER_HEAP_ISOLATED |
| 1368 | void record_gcs_during_no_gc(); |
| 1369 | |
| 1370 | PER_HEAP |
| 1371 | BOOL find_loh_free_for_no_gc(); |
| 1372 | |
| 1373 | PER_HEAP |
| 1374 | BOOL find_loh_space_for_no_gc(); |
| 1375 | |
| 1376 | PER_HEAP |
| 1377 | BOOL commit_loh_for_no_gc (heap_segment* seg); |
| 1378 | |
| 1379 | PER_HEAP_ISOLATED |
| 1380 | start_no_gc_region_status prepare_for_no_gc_region (uint64_t total_size, |
| 1381 | BOOL loh_size_known, |
| 1382 | uint64_t loh_size, |
| 1383 | BOOL disallow_full_blocking); |
| 1384 | |
| 1385 | PER_HEAP |
| 1386 | BOOL loh_allocated_for_no_gc(); |
| 1387 | |
| 1388 | PER_HEAP_ISOLATED |
| 1389 | void release_no_gc_loh_segments(); |
| 1390 | |
| 1391 | PER_HEAP_ISOLATED |
| 1392 | void thread_no_gc_loh_segments(); |
| 1393 | |
| 1394 | PER_HEAP |
| 1395 | void check_and_set_no_gc_oom(); |
| 1396 | |
| 1397 | PER_HEAP |
| 1398 | void allocate_for_no_gc_after_gc(); |
| 1399 | |
| 1400 | PER_HEAP |
| 1401 | void set_loh_allocations_for_no_gc(); |
| 1402 | |
| 1403 | PER_HEAP |
| 1404 | void set_soh_allocations_for_no_gc(); |
| 1405 | |
| 1406 | PER_HEAP |
| 1407 | void prepare_for_no_gc_after_gc(); |
| 1408 | |
| 1409 | PER_HEAP_ISOLATED |
| 1410 | void set_allocations_for_no_gc(); |
| 1411 | |
| 1412 | PER_HEAP_ISOLATED |
| 1413 | BOOL should_proceed_for_no_gc(); |
| 1414 | |
| 1415 | PER_HEAP_ISOLATED |
| 1416 | start_no_gc_region_status get_start_no_gc_region_status(); |
| 1417 | |
| 1418 | PER_HEAP_ISOLATED |
| 1419 | end_no_gc_region_status end_no_gc_region(); |
| 1420 | |
| 1421 | PER_HEAP_ISOLATED |
| 1422 | void handle_failure_for_no_gc(); |
| 1423 | |
| 1424 | PER_HEAP |
| 1425 | void fire_etw_allocation_event (size_t allocation_amount, int gen_number, uint8_t* object_address); |
| 1426 | |
| 1427 | PER_HEAP |
| 1428 | void fire_etw_pin_object_event (uint8_t* object, uint8_t** ppObject); |
| 1429 | |
| 1430 | PER_HEAP |
| 1431 | size_t limit_from_size (size_t size, size_t room, int gen_number, |
| 1432 | int align_const); |
| 1433 | PER_HEAP |
| 1434 | int try_allocate_more_space (alloc_context* acontext, size_t jsize, |
| 1435 | int alloc_generation_number); |
| 1436 | PER_HEAP |
| 1437 | BOOL allocate_more_space (alloc_context* acontext, size_t jsize, |
| 1438 | int alloc_generation_number); |
| 1439 | |
| 1440 | PER_HEAP |
| 1441 | size_t get_full_compact_gc_count(); |
| 1442 | |
| 1443 | PER_HEAP |
| 1444 | BOOL short_on_end_of_seg (int gen_number, |
| 1445 | heap_segment* seg, |
| 1446 | int align_const); |
| 1447 | |
| 1448 | PER_HEAP |
| 1449 | BOOL a_fit_free_list_p (int gen_number, |
| 1450 | size_t size, |
| 1451 | alloc_context* acontext, |
| 1452 | int align_const); |
| 1453 | |
| 1454 | #ifdef BACKGROUND_GC |
| 1455 | PER_HEAP |
| 1456 | void wait_for_background (alloc_wait_reason awr, bool loh_p); |
| 1457 | |
| 1458 | PER_HEAP |
| 1459 | void wait_for_bgc_high_memory (alloc_wait_reason awr, bool loh_p); |
| 1460 | |
| 1461 | PER_HEAP |
| 1462 | void bgc_loh_alloc_clr (uint8_t* alloc_start, |
| 1463 | size_t size, |
| 1464 | alloc_context* acontext, |
| 1465 | int align_const, |
| 1466 | int lock_index, |
| 1467 | BOOL check_used_p, |
| 1468 | heap_segment* seg); |
| 1469 | #endif //BACKGROUND_GC |
| 1470 | |
| 1471 | #ifdef BACKGROUND_GC |
| 1472 | PER_HEAP |
| 1473 | void bgc_track_loh_alloc(); |
| 1474 | |
| 1475 | PER_HEAP |
| 1476 | void bgc_untrack_loh_alloc(); |
| 1477 | |
| 1478 | PER_HEAP |
| 1479 | BOOL bgc_loh_should_allocate(); |
| 1480 | #endif //BACKGROUND_GC |
| 1481 | |
| 1482 | #define max_saved_spinlock_info 48 |
| 1483 | |
| 1484 | #ifdef SPINLOCK_HISTORY |
| 1485 | PER_HEAP |
| 1486 | int spinlock_info_index; |
| 1487 | |
| 1488 | PER_HEAP |
| 1489 | spinlock_info last_spinlock_info[max_saved_spinlock_info + 8]; |
| 1490 | #endif //SPINLOCK_HISTORY |
| 1491 | |
| 1492 | PER_HEAP |
| 1493 | void add_saved_spinlock_info ( |
| 1494 | bool loh_p, |
| 1495 | msl_enter_state enter_state, |
| 1496 | msl_take_state take_state); |
| 1497 | |
| 1498 | PER_HEAP |
| 1499 | void trigger_gc_for_alloc (int gen_number, gc_reason reason, |
| 1500 | GCSpinLock* spin_lock, bool loh_p, |
| 1501 | msl_take_state take_state); |
| 1502 | |
| 1503 | PER_HEAP |
| 1504 | BOOL a_fit_free_list_large_p (size_t size, |
| 1505 | alloc_context* acontext, |
| 1506 | int align_const); |
| 1507 | |
| 1508 | PER_HEAP |
| 1509 | BOOL a_fit_segment_end_p (int gen_number, |
| 1510 | heap_segment* seg, |
| 1511 | size_t size, |
| 1512 | alloc_context* acontext, |
| 1513 | int align_const, |
| 1514 | BOOL* commit_failed_p); |
| 1515 | PER_HEAP |
| 1516 | BOOL loh_a_fit_segment_end_p (int gen_number, |
| 1517 | size_t size, |
| 1518 | alloc_context* acontext, |
| 1519 | int align_const, |
| 1520 | BOOL* commit_failed_p, |
| 1521 | oom_reason* oom_r); |
| 1522 | PER_HEAP |
| 1523 | BOOL loh_get_new_seg (generation* gen, |
| 1524 | size_t size, |
| 1525 | int align_const, |
| 1526 | BOOL* commit_failed_p, |
| 1527 | oom_reason* oom_r); |
| 1528 | |
| 1529 | PER_HEAP_ISOLATED |
| 1530 | size_t get_large_seg_size (size_t size); |
| 1531 | |
| 1532 | PER_HEAP |
| 1533 | BOOL retry_full_compact_gc (size_t size); |
| 1534 | |
| 1535 | PER_HEAP |
| 1536 | BOOL check_and_wait_for_bgc (alloc_wait_reason awr, |
| 1537 | BOOL* did_full_compact_gc, |
| 1538 | bool loh_p); |
| 1539 | |
| 1540 | PER_HEAP |
| 1541 | BOOL trigger_full_compact_gc (gc_reason gr, |
| 1542 | oom_reason* oom_r, |
| 1543 | bool loh_p); |
| 1544 | |
| 1545 | PER_HEAP |
| 1546 | BOOL trigger_ephemeral_gc (gc_reason gr); |
| 1547 | |
| 1548 | PER_HEAP |
| 1549 | BOOL soh_try_fit (int gen_number, |
| 1550 | size_t size, |
| 1551 | alloc_context* acontext, |
| 1552 | int align_const, |
| 1553 | BOOL* commit_failed_p, |
| 1554 | BOOL* short_seg_end_p); |
| 1555 | PER_HEAP |
| 1556 | BOOL loh_try_fit (int gen_number, |
| 1557 | size_t size, |
| 1558 | alloc_context* acontext, |
| 1559 | int align_const, |
| 1560 | BOOL* commit_failed_p, |
| 1561 | oom_reason* oom_r); |
| 1562 | |
| 1563 | PER_HEAP |
| 1564 | BOOL allocate_small (int gen_number, |
| 1565 | size_t size, |
| 1566 | alloc_context* acontext, |
| 1567 | int align_const); |
| 1568 | |
| 1569 | #ifdef RECORD_LOH_STATE |
| 1570 | #define max_saved_loh_states 12 |
| 1571 | PER_HEAP |
| 1572 | int loh_state_index; |
| 1573 | |
| 1574 | struct loh_state_info |
| 1575 | { |
| 1576 | allocation_state alloc_state; |
| 1577 | EEThreadId thread_id; |
| 1578 | }; |
| 1579 | |
| 1580 | PER_HEAP |
| 1581 | loh_state_info last_loh_states[max_saved_loh_states]; |
| 1582 | PER_HEAP |
| 1583 | void add_saved_loh_state (allocation_state loh_state_to_save, EEThreadId thread_id); |
| 1584 | #endif //RECORD_LOH_STATE |
| 1585 | PER_HEAP |
| 1586 | BOOL allocate_large (int gen_number, |
| 1587 | size_t size, |
| 1588 | alloc_context* acontext, |
| 1589 | int align_const); |
| 1590 | |
| 1591 | PER_HEAP_ISOLATED |
| 1592 | int init_semi_shared(); |
| 1593 | PER_HEAP |
| 1594 | int init_gc_heap (int heap_number); |
| 1595 | PER_HEAP |
| 1596 | void self_destroy(); |
| 1597 | PER_HEAP_ISOLATED |
| 1598 | void destroy_semi_shared(); |
| 1599 | PER_HEAP |
| 1600 | void repair_allocation_contexts (BOOL repair_p); |
| 1601 | PER_HEAP |
| 1602 | void fix_allocation_contexts (BOOL for_gc_p); |
| 1603 | PER_HEAP |
| 1604 | void fix_youngest_allocation_area (BOOL for_gc_p); |
| 1605 | PER_HEAP |
| 1606 | void fix_allocation_context (alloc_context* acontext, BOOL for_gc_p, |
| 1607 | int align_const); |
| 1608 | PER_HEAP |
| 1609 | void fix_large_allocation_area (BOOL for_gc_p); |
| 1610 | PER_HEAP |
| 1611 | void fix_older_allocation_area (generation* older_gen); |
| 1612 | PER_HEAP |
| 1613 | void set_allocation_heap_segment (generation* gen); |
| 1614 | PER_HEAP |
| 1615 | void reset_allocation_pointers (generation* gen, uint8_t* start); |
| 1616 | PER_HEAP |
| 1617 | int object_gennum (uint8_t* o); |
| 1618 | PER_HEAP |
| 1619 | int object_gennum_plan (uint8_t* o); |
| 1620 | PER_HEAP_ISOLATED |
| 1621 | void init_heap_segment (heap_segment* seg); |
| 1622 | PER_HEAP |
| 1623 | void delete_heap_segment (heap_segment* seg, BOOL consider_hoarding=FALSE); |
| 1624 | #ifdef FEATURE_BASICFREEZE |
| 1625 | PER_HEAP |
| 1626 | BOOL insert_ro_segment (heap_segment* seg); |
| 1627 | PER_HEAP |
| 1628 | void remove_ro_segment (heap_segment* seg); |
| 1629 | #endif //FEATURE_BASICFREEZE |
| 1630 | PER_HEAP |
| 1631 | BOOL set_ro_segment_in_range (heap_segment* seg); |
| 1632 | PER_HEAP |
| 1633 | BOOL unprotect_segment (heap_segment* seg); |
| 1634 | PER_HEAP |
| 1635 | heap_segment* soh_get_segment_to_expand(); |
| 1636 | PER_HEAP |
| 1637 | heap_segment* get_segment (size_t size, BOOL loh_p); |
| 1638 | PER_HEAP_ISOLATED |
| 1639 | void seg_mapping_table_add_segment (heap_segment* seg, gc_heap* hp); |
| 1640 | PER_HEAP_ISOLATED |
| 1641 | void seg_mapping_table_remove_segment (heap_segment* seg); |
| 1642 | PER_HEAP |
| 1643 | heap_segment* get_large_segment (size_t size, BOOL* did_full_compact_gc); |
| 1644 | PER_HEAP |
| 1645 | void thread_loh_segment (heap_segment* new_seg); |
| 1646 | PER_HEAP_ISOLATED |
| 1647 | heap_segment* get_segment_for_loh (size_t size |
| 1648 | #ifdef MULTIPLE_HEAPS |
| 1649 | , gc_heap* hp |
| 1650 | #endif //MULTIPLE_HEAPS |
| 1651 | ); |
| 1652 | PER_HEAP |
| 1653 | void reset_heap_segment_pages (heap_segment* seg); |
| 1654 | PER_HEAP |
| 1655 | void decommit_heap_segment_pages (heap_segment* seg, size_t ); |
| 1656 | PER_HEAP |
| 1657 | void decommit_heap_segment (heap_segment* seg); |
| 1658 | PER_HEAP |
| 1659 | void clear_gen0_bricks(); |
| 1660 | #ifdef BACKGROUND_GC |
| 1661 | PER_HEAP |
| 1662 | void rearrange_small_heap_segments(); |
| 1663 | #endif //BACKGROUND_GC |
| 1664 | PER_HEAP |
| 1665 | void rearrange_large_heap_segments(); |
| 1666 | PER_HEAP |
| 1667 | void rearrange_heap_segments(BOOL compacting); |
| 1668 | |
| 1669 | PER_HEAP_ISOLATED |
| 1670 | void reset_write_watch_for_gc_heap(void* base_address, size_t region_size); |
| 1671 | PER_HEAP_ISOLATED |
| 1672 | void get_write_watch_for_gc_heap(bool reset, void *base_address, size_t region_size, void** dirty_pages, uintptr_t* dirty_page_count_ref, bool is_runtime_suspended); |
| 1673 | |
| 1674 | PER_HEAP |
| 1675 | void switch_one_quantum(); |
| 1676 | PER_HEAP |
| 1677 | void reset_ww_by_chunk (uint8_t* start_address, size_t total_reset_size); |
| 1678 | PER_HEAP |
| 1679 | void switch_on_reset (BOOL concurrent_p, size_t* current_total_reset_size, size_t last_reset_size); |
| 1680 | PER_HEAP |
| 1681 | void reset_write_watch (BOOL concurrent_p); |
| 1682 | PER_HEAP |
| 1683 | void adjust_ephemeral_limits(); |
| 1684 | PER_HEAP |
| 1685 | void make_generation (generation& gen, heap_segment* seg, |
| 1686 | uint8_t* start, uint8_t* pointer); |
| 1687 | |
| 1688 | |
| 1689 | #define USE_PADDING_FRONT 1 |
| 1690 | #define USE_PADDING_TAIL 2 |
| 1691 | |
| 1692 | PER_HEAP |
| 1693 | BOOL size_fit_p (size_t size REQD_ALIGN_AND_OFFSET_DCL, uint8_t* alloc_pointer, uint8_t* alloc_limit, |
| 1694 | uint8_t* old_loc=0, int use_padding=USE_PADDING_TAIL); |
| 1695 | PER_HEAP |
| 1696 | BOOL a_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit, |
| 1697 | int align_const); |
| 1698 | |
| 1699 | PER_HEAP |
| 1700 | void handle_oom (int heap_num, oom_reason reason, size_t alloc_size, |
| 1701 | uint8_t* allocated, uint8_t* reserved); |
| 1702 | |
| 1703 | PER_HEAP |
| 1704 | size_t card_of ( uint8_t* object); |
| 1705 | PER_HEAP |
| 1706 | uint8_t* brick_address (size_t brick); |
| 1707 | PER_HEAP |
| 1708 | size_t brick_of (uint8_t* add); |
| 1709 | PER_HEAP |
| 1710 | uint8_t* card_address (size_t card); |
| 1711 | PER_HEAP |
| 1712 | size_t card_to_brick (size_t card); |
| 1713 | PER_HEAP |
| 1714 | void clear_card (size_t card); |
| 1715 | PER_HEAP |
| 1716 | void set_card (size_t card); |
| 1717 | PER_HEAP |
| 1718 | BOOL card_set_p (size_t card); |
| 1719 | PER_HEAP |
| 1720 | void card_table_set_bit (uint8_t* location); |
| 1721 | |
| 1722 | #ifdef CARD_BUNDLE |
| 1723 | PER_HEAP |
| 1724 | void update_card_table_bundle(); |
| 1725 | PER_HEAP |
| 1726 | void reset_card_table_write_watch(); |
| 1727 | PER_HEAP |
| 1728 | void card_bundle_clear(size_t cardb); |
| 1729 | PER_HEAP |
| 1730 | void card_bundle_set (size_t cardb); |
| 1731 | PER_HEAP |
| 1732 | void card_bundles_set (size_t start_cardb, size_t end_cardb); |
| 1733 | PER_HEAP |
| 1734 | void verify_card_bundle_bits_set(size_t first_card_word, size_t last_card_word); |
| 1735 | PER_HEAP |
| 1736 | void verify_card_bundles(); |
| 1737 | PER_HEAP |
| 1738 | BOOL card_bundle_set_p (size_t cardb); |
| 1739 | PER_HEAP |
| 1740 | BOOL find_card_dword (size_t& cardw, size_t cardw_end); |
| 1741 | PER_HEAP |
| 1742 | void enable_card_bundles(); |
| 1743 | PER_HEAP_ISOLATED |
| 1744 | BOOL card_bundles_enabled(); |
| 1745 | |
| 1746 | #endif //CARD_BUNDLE |
| 1747 | |
| 1748 | PER_HEAP |
| 1749 | BOOL find_card (uint32_t* card_table, size_t& card, |
| 1750 | size_t card_word_end, size_t& end_card); |
| 1751 | PER_HEAP |
| 1752 | BOOL grow_heap_segment (heap_segment* seg, uint8_t* high_address); |
| 1753 | PER_HEAP |
| 1754 | int grow_heap_segment (heap_segment* seg, uint8_t* high_address, uint8_t* old_loc, size_t size, BOOL pad_front_p REQD_ALIGN_AND_OFFSET_DCL); |
| 1755 | PER_HEAP |
| 1756 | void copy_brick_card_range (uint8_t* la, uint32_t* old_card_table, |
| 1757 | short* old_brick_table, |
| 1758 | heap_segment* seg, |
| 1759 | uint8_t* start, uint8_t* end); |
| 1760 | PER_HEAP |
| 1761 | void init_brick_card_range (heap_segment* seg); |
| 1762 | PER_HEAP |
| 1763 | void copy_brick_card_table_l_heap (); |
| 1764 | PER_HEAP |
| 1765 | void copy_brick_card_table(); |
| 1766 | PER_HEAP |
| 1767 | void clear_brick_table (uint8_t* from, uint8_t* end); |
| 1768 | PER_HEAP |
| 1769 | void set_brick (size_t index, ptrdiff_t val); |
| 1770 | PER_HEAP |
| 1771 | int get_brick_entry (size_t index); |
| 1772 | #ifdef MARK_ARRAY |
| 1773 | PER_HEAP |
| 1774 | unsigned int mark_array_marked (uint8_t* add); |
| 1775 | PER_HEAP |
| 1776 | void mark_array_set_marked (uint8_t* add); |
| 1777 | PER_HEAP |
| 1778 | BOOL is_mark_bit_set (uint8_t* add); |
| 1779 | PER_HEAP |
| 1780 | void gmark_array_set_marked (uint8_t* add); |
| 1781 | PER_HEAP |
| 1782 | void set_mark_array_bit (size_t mark_bit); |
| 1783 | PER_HEAP |
| 1784 | BOOL mark_array_bit_set (size_t mark_bit); |
| 1785 | PER_HEAP |
| 1786 | void mark_array_clear_marked (uint8_t* add); |
| 1787 | PER_HEAP |
| 1788 | void clear_mark_array (uint8_t* from, uint8_t* end, BOOL check_only=TRUE |
| 1789 | #ifdef FEATURE_BASICFREEZE |
| 1790 | , BOOL read_only=FALSE |
| 1791 | #endif // FEATURE_BASICFREEZE |
| 1792 | ); |
| 1793 | #ifdef BACKGROUND_GC |
| 1794 | PER_HEAP |
| 1795 | void seg_clear_mark_array_bits_soh (heap_segment* seg); |
| 1796 | PER_HEAP |
| 1797 | void clear_batch_mark_array_bits (uint8_t* start, uint8_t* end); |
| 1798 | PER_HEAP |
| 1799 | void bgc_clear_batch_mark_array_bits (uint8_t* start, uint8_t* end); |
| 1800 | PER_HEAP |
| 1801 | void clear_mark_array_by_objects (uint8_t* from, uint8_t* end, BOOL loh_p); |
| 1802 | #ifdef VERIFY_HEAP |
| 1803 | PER_HEAP |
| 1804 | void set_batch_mark_array_bits (uint8_t* start, uint8_t* end); |
| 1805 | PER_HEAP |
| 1806 | void check_batch_mark_array_bits (uint8_t* start, uint8_t* end); |
| 1807 | #endif //VERIFY_HEAP |
| 1808 | #endif //BACKGROUND_GC |
| 1809 | #endif //MARK_ARRAY |
| 1810 | |
| 1811 | PER_HEAP |
| 1812 | BOOL large_object_marked (uint8_t* o, BOOL clearp); |
| 1813 | |
| 1814 | #ifdef BACKGROUND_GC |
| 1815 | PER_HEAP |
| 1816 | BOOL background_allowed_p(); |
| 1817 | #endif //BACKGROUND_GC |
| 1818 | |
| 1819 | PER_HEAP_ISOLATED |
| 1820 | void send_full_gc_notification (int gen_num, BOOL due_to_alloc_p); |
| 1821 | |
| 1822 | PER_HEAP |
| 1823 | void check_for_full_gc (int gen_num, size_t size); |
| 1824 | |
| 1825 | PER_HEAP |
| 1826 | void adjust_limit (uint8_t* start, size_t limit_size, generation* gen, |
| 1827 | int gen_number); |
| 1828 | PER_HEAP |
| 1829 | void adjust_limit_clr (uint8_t* start, size_t limit_size, |
| 1830 | alloc_context* acontext, heap_segment* seg, |
| 1831 | int align_const, int gen_number); |
| 1832 | PER_HEAP |
| 1833 | void leave_allocation_segment (generation* gen); |
| 1834 | |
| 1835 | PER_HEAP |
| 1836 | void init_free_and_plug(); |
| 1837 | |
| 1838 | PER_HEAP |
| 1839 | void print_free_and_plug (const char* msg); |
| 1840 | |
| 1841 | PER_HEAP |
| 1842 | void add_gen_plug (int gen_number, size_t plug_size); |
| 1843 | |
| 1844 | PER_HEAP |
| 1845 | void add_gen_free (int gen_number, size_t free_size); |
| 1846 | |
| 1847 | PER_HEAP |
| 1848 | void add_item_to_current_pinned_free (int gen_number, size_t free_size); |
| 1849 | |
| 1850 | PER_HEAP |
| 1851 | void remove_gen_free (int gen_number, size_t free_size); |
| 1852 | |
| 1853 | PER_HEAP |
| 1854 | uint8_t* allocate_in_older_generation (generation* gen, size_t size, |
| 1855 | int from_gen_number, |
| 1856 | uint8_t* old_loc=0 |
| 1857 | REQD_ALIGN_AND_OFFSET_DEFAULT_DCL); |
| 1858 | PER_HEAP |
| 1859 | generation* ensure_ephemeral_heap_segment (generation* consing_gen); |
| 1860 | PER_HEAP |
| 1861 | uint8_t* allocate_in_condemned_generations (generation* gen, |
| 1862 | size_t size, |
| 1863 | int from_gen_number, |
| 1864 | #ifdef SHORT_PLUGS |
| 1865 | BOOL* convert_to_pinned_p=NULL, |
| 1866 | uint8_t* next_pinned_plug=0, |
| 1867 | heap_segment* current_seg=0, |
| 1868 | #endif //SHORT_PLUGS |
| 1869 | uint8_t* old_loc=0 |
| 1870 | REQD_ALIGN_AND_OFFSET_DEFAULT_DCL); |
| 1871 | #ifdef INTERIOR_POINTERS |
| 1872 | // Verifies that interior is actually in the range of seg; otherwise |
| 1873 | // returns 0. |
| 1874 | PER_HEAP_ISOLATED |
| 1875 | heap_segment* find_segment (uint8_t* interior, BOOL small_segment_only_p); |
| 1876 | |
| 1877 | PER_HEAP |
| 1878 | heap_segment* find_segment_per_heap (uint8_t* interior, BOOL small_segment_only_p); |
| 1879 | |
| 1880 | PER_HEAP |
| 1881 | uint8_t* find_object_for_relocation (uint8_t* o, uint8_t* low, uint8_t* high); |
| 1882 | #endif //INTERIOR_POINTERS |
| 1883 | |
| 1884 | PER_HEAP_ISOLATED |
| 1885 | gc_heap* heap_of (uint8_t* object); |
| 1886 | |
| 1887 | PER_HEAP_ISOLATED |
| 1888 | gc_heap* heap_of_gc (uint8_t* object); |
| 1889 | |
| 1890 | PER_HEAP_ISOLATED |
| 1891 | size_t& promoted_bytes (int); |
| 1892 | |
| 1893 | PER_HEAP |
| 1894 | uint8_t* find_object (uint8_t* o, uint8_t* low); |
| 1895 | |
| 1896 | PER_HEAP |
| 1897 | dynamic_data* dynamic_data_of (int gen_number); |
| 1898 | PER_HEAP |
| 1899 | ptrdiff_t get_desired_allocation (int gen_number); |
| 1900 | PER_HEAP |
| 1901 | ptrdiff_t get_new_allocation (int gen_number); |
| 1902 | PER_HEAP |
| 1903 | ptrdiff_t get_allocation (int gen_number); |
| 1904 | PER_HEAP |
| 1905 | bool new_allocation_allowed (int gen_number); |
| 1906 | #ifdef BACKGROUND_GC |
| 1907 | PER_HEAP_ISOLATED |
| 1908 | void allow_new_allocation (int gen_number); |
| 1909 | PER_HEAP_ISOLATED |
| 1910 | void disallow_new_allocation (int gen_number); |
| 1911 | #endif //BACKGROUND_GC |
| 1912 | PER_HEAP |
| 1913 | void reset_pinned_queue(); |
| 1914 | PER_HEAP |
| 1915 | void reset_pinned_queue_bos(); |
| 1916 | PER_HEAP |
| 1917 | void set_allocator_next_pin (generation* gen); |
| 1918 | PER_HEAP |
| 1919 | void set_allocator_next_pin (uint8_t* alloc_pointer, uint8_t*& alloc_limit); |
| 1920 | PER_HEAP |
| 1921 | void enque_pinned_plug (generation* gen, uint8_t* plug, size_t len); |
| 1922 | PER_HEAP |
| 1923 | void enque_pinned_plug (uint8_t* plug, |
| 1924 | BOOL save_pre_plug_info_p, |
| 1925 | uint8_t* last_object_in_last_plug); |
| 1926 | PER_HEAP |
| 1927 | void merge_with_last_pinned_plug (uint8_t* last_pinned_plug, size_t plug_size); |
| 1928 | PER_HEAP |
| 1929 | void set_pinned_info (uint8_t* last_pinned_plug, |
| 1930 | size_t plug_len, |
| 1931 | uint8_t* alloc_pointer, |
| 1932 | uint8_t*& alloc_limit); |
| 1933 | PER_HEAP |
| 1934 | void set_pinned_info (uint8_t* last_pinned_plug, size_t plug_len, generation* gen); |
| 1935 | PER_HEAP |
| 1936 | void save_post_plug_info (uint8_t* last_pinned_plug, uint8_t* last_object_in_last_plug, uint8_t* post_plug); |
| 1937 | PER_HEAP |
| 1938 | size_t deque_pinned_plug (); |
| 1939 | PER_HEAP |
| 1940 | mark* pinned_plug_of (size_t bos); |
| 1941 | PER_HEAP |
| 1942 | mark* oldest_pin (); |
| 1943 | PER_HEAP |
| 1944 | mark* before_oldest_pin(); |
| 1945 | PER_HEAP |
| 1946 | BOOL pinned_plug_que_empty_p (); |
| 1947 | PER_HEAP |
| 1948 | void make_mark_stack (mark* arr); |
| 1949 | #ifdef MH_SC_MARK |
| 1950 | PER_HEAP |
| 1951 | int& mark_stack_busy(); |
| 1952 | PER_HEAP |
| 1953 | VOLATILE(uint8_t*)& ref_mark_stack (gc_heap* hp, int index); |
| 1954 | #endif |
| 1955 | #ifdef BACKGROUND_GC |
| 1956 | PER_HEAP_ISOLATED |
| 1957 | size_t& bpromoted_bytes (int); |
| 1958 | PER_HEAP |
| 1959 | void make_background_mark_stack (uint8_t** arr); |
| 1960 | PER_HEAP |
| 1961 | void make_c_mark_list (uint8_t** arr); |
| 1962 | #endif //BACKGROUND_GC |
| 1963 | PER_HEAP |
| 1964 | generation* generation_of (int n); |
| 1965 | PER_HEAP |
| 1966 | BOOL gc_mark1 (uint8_t* o); |
| 1967 | PER_HEAP |
| 1968 | BOOL gc_mark (uint8_t* o, uint8_t* low, uint8_t* high); |
| 1969 | PER_HEAP |
| 1970 | uint8_t* mark_object(uint8_t* o THREAD_NUMBER_DCL); |
| 1971 | #ifdef HEAP_ANALYZE |
| 1972 | PER_HEAP |
| 1973 | void ha_mark_object_simple (uint8_t** o THREAD_NUMBER_DCL); |
| 1974 | #endif //HEAP_ANALYZE |
| 1975 | PER_HEAP |
| 1976 | void mark_object_simple (uint8_t** o THREAD_NUMBER_DCL); |
| 1977 | PER_HEAP |
| 1978 | void mark_object_simple1 (uint8_t* o, uint8_t* start THREAD_NUMBER_DCL); |
| 1979 | |
| 1980 | #ifdef MH_SC_MARK |
| 1981 | PER_HEAP |
| 1982 | void mark_steal (); |
| 1983 | #endif //MH_SC_MARK |
| 1984 | |
| 1985 | #ifdef BACKGROUND_GC |
| 1986 | |
| 1987 | PER_HEAP |
| 1988 | BOOL background_marked (uint8_t* o); |
| 1989 | PER_HEAP |
| 1990 | BOOL background_mark1 (uint8_t* o); |
| 1991 | PER_HEAP |
| 1992 | BOOL background_mark (uint8_t* o, uint8_t* low, uint8_t* high); |
| 1993 | PER_HEAP |
| 1994 | uint8_t* background_mark_object (uint8_t* o THREAD_NUMBER_DCL); |
| 1995 | PER_HEAP |
| 1996 | void background_mark_simple (uint8_t* o THREAD_NUMBER_DCL); |
| 1997 | PER_HEAP |
| 1998 | void background_mark_simple1 (uint8_t* o THREAD_NUMBER_DCL); |
| 1999 | PER_HEAP_ISOLATED |
| 2000 | void background_promote (Object**, ScanContext* , uint32_t); |
| 2001 | PER_HEAP |
| 2002 | BOOL background_object_marked (uint8_t* o, BOOL clearp); |
| 2003 | PER_HEAP |
| 2004 | void init_background_gc(); |
| 2005 | PER_HEAP |
| 2006 | uint8_t* background_next_end (heap_segment*, BOOL); |
| 2007 | // while we are in LOH sweep we can't modify the segment list |
| 2008 | // there so we mark them as to be deleted and deleted them |
| 2009 | // at the next chance we get. |
| 2010 | PER_HEAP |
| 2011 | void background_delay_delete_loh_segments(); |
| 2012 | PER_HEAP |
| 2013 | void generation_delete_heap_segment (generation*, |
| 2014 | heap_segment*, heap_segment*, heap_segment*); |
| 2015 | PER_HEAP |
| 2016 | void set_mem_verify (uint8_t*, uint8_t*, uint8_t); |
| 2017 | PER_HEAP |
| 2018 | void process_background_segment_end (heap_segment*, generation*, uint8_t*, |
| 2019 | heap_segment*, BOOL*); |
| 2020 | PER_HEAP |
| 2021 | void process_n_background_segments (heap_segment*, heap_segment*, generation* gen); |
| 2022 | PER_HEAP |
| 2023 | BOOL fgc_should_consider_object (uint8_t* o, |
| 2024 | heap_segment* seg, |
| 2025 | BOOL consider_bgc_mark_p, |
| 2026 | BOOL check_current_sweep_p, |
| 2027 | BOOL check_saved_sweep_p); |
| 2028 | PER_HEAP |
| 2029 | void should_check_bgc_mark (heap_segment* seg, |
| 2030 | BOOL* consider_bgc_mark_p, |
| 2031 | BOOL* check_current_sweep_p, |
| 2032 | BOOL* check_saved_sweep_p); |
| 2033 | PER_HEAP |
| 2034 | void background_ephemeral_sweep(); |
| 2035 | PER_HEAP |
| 2036 | void background_sweep (); |
| 2037 | PER_HEAP |
| 2038 | void background_mark_through_object (uint8_t* oo THREAD_NUMBER_DCL); |
| 2039 | PER_HEAP |
| 2040 | uint8_t* background_seg_end (heap_segment* seg, BOOL concurrent_p); |
| 2041 | PER_HEAP |
| 2042 | uint8_t* background_first_overflow (uint8_t* min_add, |
| 2043 | heap_segment* seg, |
| 2044 | BOOL concurrent_p, |
| 2045 | BOOL small_object_p); |
| 2046 | PER_HEAP |
| 2047 | void background_process_mark_overflow_internal (int condemned_gen_number, |
| 2048 | uint8_t* min_add, uint8_t* max_add, |
| 2049 | BOOL concurrent_p); |
| 2050 | PER_HEAP |
| 2051 | BOOL background_process_mark_overflow (BOOL concurrent_p); |
| 2052 | |
| 2053 | // for foreground GC to get hold of background structures containing refs |
| 2054 | PER_HEAP |
| 2055 | void |
| 2056 | scan_background_roots (promote_func* fn, int hn, ScanContext *pSC); |
| 2057 | |
| 2058 | PER_HEAP |
| 2059 | BOOL bgc_mark_array_range (heap_segment* seg, |
| 2060 | BOOL whole_seg_p, |
| 2061 | uint8_t** range_beg, |
| 2062 | uint8_t** range_end); |
| 2063 | PER_HEAP |
| 2064 | void bgc_verify_mark_array_cleared (heap_segment* seg); |
| 2065 | PER_HEAP |
| 2066 | void verify_mark_bits_cleared (uint8_t* obj, size_t s); |
| 2067 | PER_HEAP |
| 2068 | void clear_all_mark_array(); |
| 2069 | #endif //BACKGROUND_GC |
| 2070 | |
| 2071 | PER_HEAP |
| 2072 | uint8_t* next_end (heap_segment* seg, uint8_t* f); |
| 2073 | PER_HEAP |
| 2074 | void mark_through_object (uint8_t* oo, BOOL mark_class_object_p THREAD_NUMBER_DCL); |
| 2075 | PER_HEAP |
| 2076 | BOOL process_mark_overflow (int condemned_gen_number); |
| 2077 | PER_HEAP |
| 2078 | void process_mark_overflow_internal (int condemned_gen_number, |
| 2079 | uint8_t* min_address, uint8_t* max_address); |
| 2080 | |
| 2081 | #ifdef SNOOP_STATS |
| 2082 | PER_HEAP |
| 2083 | void print_snoop_stat(); |
| 2084 | #endif //SNOOP_STATS |
| 2085 | |
| 2086 | #ifdef MH_SC_MARK |
| 2087 | |
| 2088 | PER_HEAP |
| 2089 | BOOL check_next_mark_stack (gc_heap* next_heap); |
| 2090 | |
| 2091 | #endif //MH_SC_MARK |
| 2092 | |
| 2093 | PER_HEAP |
| 2094 | void scan_dependent_handles (int condemned_gen_number, ScanContext *sc, BOOL initial_scan_p); |
| 2095 | |
| 2096 | PER_HEAP |
| 2097 | void mark_phase (int condemned_gen_number, BOOL mark_only_p); |
| 2098 | |
| 2099 | PER_HEAP |
| 2100 | void pin_object (uint8_t* o, uint8_t** ppObject, uint8_t* low, uint8_t* high); |
| 2101 | |
| 2102 | #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) |
| 2103 | PER_HEAP_ISOLATED |
| 2104 | size_t get_total_pinned_objects(); |
| 2105 | #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE |
| 2106 | |
| 2107 | PER_HEAP |
| 2108 | void reset_mark_stack (); |
| 2109 | PER_HEAP |
| 2110 | uint8_t* insert_node (uint8_t* new_node, size_t sequence_number, |
| 2111 | uint8_t* tree, uint8_t* last_node); |
| 2112 | PER_HEAP |
| 2113 | size_t update_brick_table (uint8_t* tree, size_t current_brick, |
| 2114 | uint8_t* x, uint8_t* plug_end); |
| 2115 | |
| 2116 | PER_HEAP |
| 2117 | void plan_generation_start (generation* gen, generation* consing_gen, uint8_t* next_plug_to_allocate); |
| 2118 | |
| 2119 | PER_HEAP |
| 2120 | void realloc_plan_generation_start (generation* gen, generation* consing_gen); |
| 2121 | |
| 2122 | PER_HEAP |
| 2123 | void plan_generation_starts (generation*& consing_gen); |
| 2124 | |
| 2125 | PER_HEAP |
| 2126 | void advance_pins_for_demotion (generation* gen); |
| 2127 | |
| 2128 | PER_HEAP |
| 2129 | void process_ephemeral_boundaries(uint8_t* x, int& active_new_gen_number, |
| 2130 | int& active_old_gen_number, |
| 2131 | generation*& consing_gen, |
| 2132 | BOOL& allocate_in_condemned); |
| 2133 | PER_HEAP |
| 2134 | void seg_clear_mark_bits (heap_segment* seg); |
| 2135 | PER_HEAP |
| 2136 | void sweep_ro_segments (heap_segment* start_seg); |
| 2137 | PER_HEAP |
| 2138 | void convert_to_pinned_plug (BOOL& last_npinned_plug_p, |
| 2139 | BOOL& last_pinned_plug_p, |
| 2140 | BOOL& pinned_plug_p, |
| 2141 | size_t ps, |
| 2142 | size_t& artificial_pinned_size); |
| 2143 | PER_HEAP |
| 2144 | void store_plug_gap_info (uint8_t* plug_start, |
| 2145 | uint8_t* plug_end, |
| 2146 | BOOL& last_npinned_plug_p, |
| 2147 | BOOL& last_pinned_plug_p, |
| 2148 | uint8_t*& last_pinned_plug, |
| 2149 | BOOL& pinned_plug_p, |
| 2150 | uint8_t* last_object_in_last_plug, |
| 2151 | BOOL& merge_with_last_pin_p, |
| 2152 | // this is only for verification purpose |
| 2153 | size_t last_plug_len); |
| 2154 | PER_HEAP |
| 2155 | void plan_phase (int condemned_gen_number); |
| 2156 | |
| 2157 | PER_HEAP |
| 2158 | void record_interesting_data_point (interesting_data_point idp); |
| 2159 | |
| 2160 | #ifdef GC_CONFIG_DRIVEN |
| 2161 | PER_HEAP |
| 2162 | void record_interesting_info_per_heap(); |
| 2163 | PER_HEAP_ISOLATED |
| 2164 | void record_global_mechanisms(); |
| 2165 | PER_HEAP_ISOLATED |
| 2166 | BOOL should_do_sweeping_gc (BOOL compact_p); |
| 2167 | #endif //GC_CONFIG_DRIVEN |
| 2168 | |
| 2169 | #ifdef FEATURE_LOH_COMPACTION |
| 2170 | // plan_loh can allocate memory so it can fail. If it fails, we will |
| 2171 | // fall back to sweeping. |
| 2172 | PER_HEAP |
| 2173 | BOOL plan_loh(); |
| 2174 | |
| 2175 | PER_HEAP |
| 2176 | void compact_loh(); |
| 2177 | |
| 2178 | PER_HEAP |
| 2179 | void relocate_in_loh_compact(); |
| 2180 | |
| 2181 | PER_HEAP |
| 2182 | void walk_relocation_for_loh (void* profiling_context, record_surv_fn fn); |
| 2183 | |
| 2184 | PER_HEAP |
| 2185 | BOOL loh_enque_pinned_plug (uint8_t* plug, size_t len); |
| 2186 | |
| 2187 | PER_HEAP |
| 2188 | void loh_set_allocator_next_pin(); |
| 2189 | |
| 2190 | PER_HEAP |
| 2191 | BOOL loh_pinned_plug_que_empty_p(); |
| 2192 | |
| 2193 | PER_HEAP |
| 2194 | size_t loh_deque_pinned_plug(); |
| 2195 | |
| 2196 | PER_HEAP |
| 2197 | mark* loh_pinned_plug_of (size_t bos); |
| 2198 | |
| 2199 | PER_HEAP |
| 2200 | mark* loh_oldest_pin(); |
| 2201 | |
| 2202 | PER_HEAP |
| 2203 | BOOL loh_size_fit_p (size_t size, uint8_t* alloc_pointer, uint8_t* alloc_limit); |
| 2204 | |
| 2205 | PER_HEAP |
| 2206 | uint8_t* loh_allocate_in_condemned (uint8_t* old_loc, size_t size); |
| 2207 | |
| 2208 | PER_HEAP_ISOLATED |
| 2209 | BOOL loh_object_p (uint8_t* o); |
| 2210 | |
| 2211 | PER_HEAP_ISOLATED |
| 2212 | BOOL should_compact_loh(); |
| 2213 | |
| 2214 | // If the LOH compaction mode is just to compact once, |
| 2215 | // we need to see if we should reset it back to not compact. |
| 2216 | // We would only reset if every heap's LOH was compacted. |
| 2217 | PER_HEAP_ISOLATED |
| 2218 | void check_loh_compact_mode (BOOL all_heaps_compacted_p); |
| 2219 | #endif //FEATURE_LOH_COMPACTION |
| 2220 | |
| 2221 | PER_HEAP |
| 2222 | void decommit_ephemeral_segment_pages (int condemned_gen_number); |
| 2223 | PER_HEAP |
| 2224 | void fix_generation_bounds (int condemned_gen_number, |
| 2225 | generation* consing_gen); |
| 2226 | PER_HEAP |
| 2227 | uint8_t* generation_limit (int gen_number); |
| 2228 | |
| 2229 | struct make_free_args |
| 2230 | { |
| 2231 | int free_list_gen_number; |
| 2232 | uint8_t* current_gen_limit; |
| 2233 | generation* free_list_gen; |
| 2234 | uint8_t* highest_plug; |
| 2235 | }; |
| 2236 | PER_HEAP |
| 2237 | uint8_t* allocate_at_end (size_t size); |
| 2238 | PER_HEAP |
| 2239 | BOOL ensure_gap_allocation (int condemned_gen_number); |
| 2240 | // make_free_lists is only called by blocking GCs. |
| 2241 | PER_HEAP |
| 2242 | void make_free_lists (int condemned_gen_number); |
| 2243 | PER_HEAP |
| 2244 | void make_free_list_in_brick (uint8_t* tree, make_free_args* args); |
| 2245 | PER_HEAP |
| 2246 | void thread_gap (uint8_t* gap_start, size_t size, generation* gen); |
| 2247 | PER_HEAP |
| 2248 | void loh_thread_gap_front (uint8_t* gap_start, size_t size, generation* gen); |
| 2249 | PER_HEAP |
| 2250 | void make_unused_array (uint8_t* x, size_t size, BOOL clearp=FALSE, BOOL resetp=FALSE); |
| 2251 | PER_HEAP |
| 2252 | void clear_unused_array (uint8_t* x, size_t size); |
| 2253 | PER_HEAP |
| 2254 | void relocate_address (uint8_t** old_address THREAD_NUMBER_DCL); |
| 2255 | struct relocate_args |
| 2256 | { |
| 2257 | uint8_t* last_plug; |
| 2258 | uint8_t* low; |
| 2259 | uint8_t* high; |
| 2260 | BOOL is_shortened; |
| 2261 | mark* pinned_plug_entry; |
| 2262 | }; |
| 2263 | |
| 2264 | PER_HEAP |
| 2265 | void reloc_survivor_helper (uint8_t** pval); |
| 2266 | PER_HEAP |
| 2267 | void check_class_object_demotion (uint8_t* obj); |
| 2268 | PER_HEAP |
| 2269 | void check_class_object_demotion_internal (uint8_t* obj); |
| 2270 | |
| 2271 | PER_HEAP |
| 2272 | void check_demotion_helper (uint8_t** pval, uint8_t* parent_obj); |
| 2273 | |
| 2274 | PER_HEAP |
| 2275 | void relocate_survivor_helper (uint8_t* plug, uint8_t* plug_end); |
| 2276 | |
| 2277 | PER_HEAP |
| 2278 | void verify_pins_with_post_plug_info (const char* msg); |
| 2279 | |
| 2280 | #ifdef COLLECTIBLE_CLASS |
| 2281 | PER_HEAP |
| 2282 | void unconditional_set_card_collectible (uint8_t* obj); |
| 2283 | #endif //COLLECTIBLE_CLASS |
| 2284 | |
| 2285 | PER_HEAP |
| 2286 | void relocate_shortened_survivor_helper (uint8_t* plug, uint8_t* plug_end, mark* pinned_plug_entry); |
| 2287 | |
| 2288 | PER_HEAP |
| 2289 | void relocate_obj_helper (uint8_t* x, size_t s); |
| 2290 | |
| 2291 | PER_HEAP |
| 2292 | void reloc_ref_in_shortened_obj (uint8_t** address_to_set_card, uint8_t** address_to_reloc); |
| 2293 | |
| 2294 | PER_HEAP |
| 2295 | void relocate_pre_plug_info (mark* pinned_plug_entry); |
| 2296 | |
| 2297 | PER_HEAP |
| 2298 | void relocate_shortened_obj_helper (uint8_t* x, size_t s, uint8_t* end, mark* pinned_plug_entry, BOOL is_pinned); |
| 2299 | |
| 2300 | PER_HEAP |
| 2301 | void relocate_survivors_in_plug (uint8_t* plug, uint8_t* plug_end, |
| 2302 | BOOL check_last_object_p, |
| 2303 | mark* pinned_plug_entry); |
| 2304 | PER_HEAP |
| 2305 | void relocate_survivors_in_brick (uint8_t* tree, relocate_args* args); |
| 2306 | |
| 2307 | PER_HEAP |
| 2308 | void update_oldest_pinned_plug(); |
| 2309 | |
| 2310 | PER_HEAP |
| 2311 | void relocate_survivors (int condemned_gen_number, |
| 2312 | uint8_t* first_condemned_address ); |
| 2313 | PER_HEAP |
| 2314 | void relocate_phase (int condemned_gen_number, |
| 2315 | uint8_t* first_condemned_address); |
| 2316 | |
| 2317 | struct compact_args |
| 2318 | { |
| 2319 | BOOL copy_cards_p; |
| 2320 | uint8_t* last_plug; |
| 2321 | ptrdiff_t last_plug_relocation; |
| 2322 | uint8_t* before_last_plug; |
| 2323 | size_t current_compacted_brick; |
| 2324 | BOOL is_shortened; |
| 2325 | mark* pinned_plug_entry; |
| 2326 | BOOL check_gennum_p; |
| 2327 | int src_gennum; |
| 2328 | |
| 2329 | void print() |
| 2330 | { |
| 2331 | dprintf (3, ("last plug: %Ix, last plug reloc: %Ix, before last: %Ix, b: %Ix" , |
| 2332 | last_plug, last_plug_relocation, before_last_plug, current_compacted_brick)); |
| 2333 | } |
| 2334 | }; |
| 2335 | |
| 2336 | PER_HEAP |
| 2337 | void copy_cards_range (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p); |
| 2338 | PER_HEAP |
| 2339 | void gcmemcopy (uint8_t* dest, uint8_t* src, size_t len, BOOL copy_cards_p); |
| 2340 | PER_HEAP |
| 2341 | void compact_plug (uint8_t* plug, size_t size, BOOL check_last_object_p, compact_args* args); |
| 2342 | PER_HEAP |
| 2343 | void compact_in_brick (uint8_t* tree, compact_args* args); |
| 2344 | |
| 2345 | PER_HEAP |
| 2346 | mark* get_next_pinned_entry (uint8_t* tree, |
| 2347 | BOOL* has_pre_plug_info_p, |
| 2348 | BOOL* has_post_plug_info_p, |
| 2349 | BOOL deque_p=TRUE); |
| 2350 | |
| 2351 | PER_HEAP |
| 2352 | mark* get_oldest_pinned_entry (BOOL* has_pre_plug_info_p, BOOL* has_post_plug_info_p); |
| 2353 | |
| 2354 | PER_HEAP |
| 2355 | void recover_saved_pinned_info(); |
| 2356 | |
| 2357 | PER_HEAP |
| 2358 | void compact_phase (int condemned_gen_number, uint8_t* |
| 2359 | first_condemned_address, BOOL clear_cards); |
| 2360 | PER_HEAP |
| 2361 | void clear_cards (size_t start_card, size_t end_card); |
| 2362 | PER_HEAP |
| 2363 | void clear_card_for_addresses (uint8_t* start_address, uint8_t* end_address); |
| 2364 | PER_HEAP |
| 2365 | void copy_cards (size_t dst_card, size_t src_card, |
| 2366 | size_t end_card, BOOL nextp); |
| 2367 | PER_HEAP |
| 2368 | void copy_cards_for_addresses (uint8_t* dest, uint8_t* src, size_t len); |
| 2369 | |
| 2370 | #ifdef BACKGROUND_GC |
| 2371 | PER_HEAP |
| 2372 | void copy_mark_bits (size_t dst_mark_bit, size_t src_mark_bit, size_t end_mark_bit); |
| 2373 | PER_HEAP |
| 2374 | void copy_mark_bits_for_addresses (uint8_t* dest, uint8_t* src, size_t len); |
| 2375 | #endif //BACKGROUND_GC |
| 2376 | |
| 2377 | |
| 2378 | PER_HEAP |
| 2379 | BOOL ephemeral_pointer_p (uint8_t* o); |
| 2380 | PER_HEAP |
| 2381 | void fix_brick_to_highest (uint8_t* o, uint8_t* next_o); |
| 2382 | PER_HEAP |
| 2383 | uint8_t* find_first_object (uint8_t* start_address, uint8_t* first_object); |
| 2384 | PER_HEAP |
| 2385 | uint8_t* compute_next_boundary (uint8_t* low, int gen_number, BOOL relocating); |
| 2386 | PER_HEAP |
| 2387 | void keep_card_live (uint8_t* o, size_t& n_gen, |
| 2388 | size_t& cg_pointers_found); |
| 2389 | PER_HEAP |
| 2390 | void mark_through_cards_helper (uint8_t** poo, size_t& ngen, |
| 2391 | size_t& cg_pointers_found, |
| 2392 | card_fn fn, uint8_t* nhigh, |
| 2393 | uint8_t* next_boundary); |
| 2394 | |
| 2395 | PER_HEAP |
| 2396 | BOOL card_transition (uint8_t* po, uint8_t* end, size_t card_word_end, |
| 2397 | size_t& cg_pointers_found, |
| 2398 | size_t& n_eph, size_t& n_card_set, |
| 2399 | size_t& card, size_t& end_card, |
| 2400 | BOOL& foundp, uint8_t*& start_address, |
| 2401 | uint8_t*& limit, size_t& n_cards_cleared); |
| 2402 | PER_HEAP |
| 2403 | void mark_through_cards_for_segments (card_fn fn, BOOL relocating); |
| 2404 | |
| 2405 | PER_HEAP |
| 2406 | void repair_allocation_in_expanded_heap (generation* gen); |
| 2407 | PER_HEAP |
| 2408 | BOOL can_fit_in_spaces_p (size_t* ordered_blocks, int small_index, size_t* ordered_spaces, int big_index); |
| 2409 | PER_HEAP |
| 2410 | BOOL can_fit_blocks_p (size_t* ordered_blocks, int block_index, size_t* ordered_spaces, int* space_index); |
| 2411 | PER_HEAP |
| 2412 | BOOL can_fit_all_blocks_p (size_t* ordered_blocks, size_t* ordered_spaces, int count); |
| 2413 | #ifdef SEG_REUSE_STATS |
| 2414 | PER_HEAP |
| 2415 | size_t dump_buckets (size_t* ordered_indices, int count, size_t* total_size); |
| 2416 | #endif //SEG_REUSE_STATS |
| 2417 | PER_HEAP |
| 2418 | void build_ordered_free_spaces (heap_segment* seg); |
| 2419 | PER_HEAP |
| 2420 | void count_plug (size_t last_plug_size, uint8_t*& last_plug); |
| 2421 | PER_HEAP |
| 2422 | void count_plugs_in_brick (uint8_t* tree, uint8_t*& last_plug); |
| 2423 | PER_HEAP |
| 2424 | void build_ordered_plug_indices (); |
| 2425 | PER_HEAP |
| 2426 | void init_ordered_free_space_indices (); |
| 2427 | PER_HEAP |
| 2428 | void trim_free_spaces_indices (); |
| 2429 | PER_HEAP |
| 2430 | BOOL try_best_fit (BOOL end_of_segment_p); |
| 2431 | PER_HEAP |
| 2432 | BOOL best_fit (size_t free_space, size_t largest_free_space, size_t additional_space, BOOL* use_additional_space); |
| 2433 | PER_HEAP |
| 2434 | BOOL process_free_space (heap_segment* seg, |
| 2435 | size_t free_space, |
| 2436 | size_t min_free_size, |
| 2437 | size_t min_cont_size, |
| 2438 | size_t* total_free_space, |
| 2439 | size_t* largest_free_space); |
| 2440 | PER_HEAP |
| 2441 | size_t compute_eph_gen_starts_size(); |
| 2442 | PER_HEAP |
| 2443 | void compute_new_ephemeral_size(); |
| 2444 | PER_HEAP |
| 2445 | BOOL expand_reused_seg_p(); |
| 2446 | PER_HEAP |
| 2447 | BOOL can_expand_into_p (heap_segment* seg, size_t min_free_size, |
| 2448 | size_t min_cont_size, allocator* al); |
| 2449 | PER_HEAP |
| 2450 | uint8_t* allocate_in_expanded_heap (generation* gen, size_t size, |
| 2451 | BOOL& adjacentp, uint8_t* old_loc, |
| 2452 | #ifdef SHORT_PLUGS |
| 2453 | BOOL set_padding_on_saved_p, |
| 2454 | mark* pinned_plug_entry, |
| 2455 | #endif //SHORT_PLUGS |
| 2456 | BOOL consider_bestfit, int active_new_gen_number |
| 2457 | REQD_ALIGN_AND_OFFSET_DEFAULT_DCL); |
| 2458 | PER_HEAP |
| 2459 | void realloc_plug (size_t last_plug_size, uint8_t*& last_plug, |
| 2460 | generation* gen, uint8_t* start_address, |
| 2461 | unsigned int& active_new_gen_number, |
| 2462 | uint8_t*& last_pinned_gap, BOOL& leftp, |
| 2463 | BOOL shortened_p |
| 2464 | #ifdef SHORT_PLUGS |
| 2465 | , mark* pinned_plug_entry |
| 2466 | #endif //SHORT_PLUGS |
| 2467 | ); |
| 2468 | PER_HEAP |
| 2469 | void realloc_in_brick (uint8_t* tree, uint8_t*& last_plug, uint8_t* start_address, |
| 2470 | generation* gen, |
| 2471 | unsigned int& active_new_gen_number, |
| 2472 | uint8_t*& last_pinned_gap, BOOL& leftp); |
| 2473 | PER_HEAP |
| 2474 | void realloc_plugs (generation* consing_gen, heap_segment* seg, |
| 2475 | uint8_t* start_address, uint8_t* end_address, |
| 2476 | unsigned active_new_gen_number); |
| 2477 | |
| 2478 | PER_HEAP |
| 2479 | void set_expand_in_full_gc (int condemned_gen_number); |
| 2480 | |
| 2481 | PER_HEAP |
| 2482 | void verify_no_pins (uint8_t* start, uint8_t* end); |
| 2483 | |
| 2484 | PER_HEAP |
| 2485 | generation* expand_heap (int condemned_generation, |
| 2486 | generation* consing_gen, |
| 2487 | heap_segment* new_heap_segment); |
| 2488 | |
| 2489 | PER_HEAP |
| 2490 | void save_ephemeral_generation_starts(); |
| 2491 | |
| 2492 | PER_HEAP |
| 2493 | void set_static_data(); |
| 2494 | |
| 2495 | PER_HEAP_ISOLATED |
| 2496 | void init_static_data(); |
| 2497 | |
| 2498 | PER_HEAP |
| 2499 | bool init_dynamic_data (); |
| 2500 | PER_HEAP |
| 2501 | float surv_to_growth (float cst, float limit, float max_limit); |
| 2502 | PER_HEAP |
| 2503 | size_t desired_new_allocation (dynamic_data* dd, size_t out, |
| 2504 | int gen_number, int pass); |
| 2505 | |
| 2506 | PER_HEAP |
| 2507 | void trim_youngest_desired_low_memory(); |
| 2508 | |
| 2509 | PER_HEAP |
| 2510 | void decommit_ephemeral_segment_pages(); |
| 2511 | |
| 2512 | #ifdef BIT64 |
| 2513 | PER_HEAP_ISOLATED |
| 2514 | size_t trim_youngest_desired (uint32_t memory_load, |
| 2515 | size_t total_new_allocation, |
| 2516 | size_t total_min_allocation); |
| 2517 | PER_HEAP_ISOLATED |
| 2518 | size_t joined_youngest_desired (size_t new_allocation); |
| 2519 | #endif // BIT64 |
| 2520 | PER_HEAP_ISOLATED |
| 2521 | size_t get_total_heap_size (); |
| 2522 | PER_HEAP_ISOLATED |
| 2523 | size_t get_total_committed_size(); |
| 2524 | PER_HEAP_ISOLATED |
| 2525 | size_t get_total_fragmentation(); |
| 2526 | |
| 2527 | PER_HEAP_ISOLATED |
| 2528 | void get_memory_info (uint32_t* memory_load, |
| 2529 | uint64_t* available_physical=NULL, |
| 2530 | uint64_t* available_page_file=NULL); |
| 2531 | PER_HEAP |
| 2532 | size_t generation_size (int gen_number); |
| 2533 | PER_HEAP_ISOLATED |
| 2534 | size_t get_total_survived_size(); |
| 2535 | PER_HEAP |
| 2536 | size_t get_current_allocated(); |
| 2537 | PER_HEAP_ISOLATED |
| 2538 | size_t get_total_allocated(); |
| 2539 | PER_HEAP |
| 2540 | size_t current_generation_size (int gen_number); |
| 2541 | PER_HEAP |
| 2542 | size_t generation_plan_size (int gen_number); |
| 2543 | PER_HEAP |
| 2544 | void compute_promoted_allocation (int gen_number); |
| 2545 | PER_HEAP |
| 2546 | size_t compute_in (int gen_number); |
| 2547 | PER_HEAP |
| 2548 | void compute_new_dynamic_data (int gen_number); |
| 2549 | PER_HEAP |
| 2550 | gc_history_per_heap* get_gc_data_per_heap(); |
| 2551 | PER_HEAP |
| 2552 | size_t new_allocation_limit (size_t size, size_t free_size, int gen_number); |
| 2553 | PER_HEAP |
| 2554 | size_t generation_fragmentation (generation* gen, |
| 2555 | generation* consing_gen, |
| 2556 | uint8_t* end); |
| 2557 | PER_HEAP |
| 2558 | size_t generation_sizes (generation* gen); |
| 2559 | PER_HEAP |
| 2560 | size_t committed_size(); |
| 2561 | PER_HEAP |
| 2562 | size_t approximate_new_allocation(); |
| 2563 | PER_HEAP |
| 2564 | size_t end_space_after_gc(); |
| 2565 | PER_HEAP |
| 2566 | BOOL decide_on_compacting (int condemned_gen_number, |
| 2567 | size_t fragmentation, |
| 2568 | BOOL& should_expand); |
| 2569 | PER_HEAP |
| 2570 | BOOL ephemeral_gen_fit_p (gc_tuning_point tp); |
| 2571 | PER_HEAP |
| 2572 | void reset_large_object (uint8_t* o); |
| 2573 | PER_HEAP |
| 2574 | void sweep_large_objects (); |
| 2575 | PER_HEAP |
| 2576 | void relocate_in_large_objects (); |
| 2577 | PER_HEAP |
| 2578 | void mark_through_cards_for_large_objects (card_fn fn, BOOL relocating); |
| 2579 | PER_HEAP |
| 2580 | void descr_segment (heap_segment* seg); |
| 2581 | PER_HEAP |
| 2582 | void descr_generations (BOOL begin_gc_p); |
| 2583 | |
| 2584 | PER_HEAP_ISOLATED |
| 2585 | void descr_generations_to_profiler (gen_walk_fn fn, void *context); |
| 2586 | |
| 2587 | /*------------ Multiple non isolated heaps ----------------*/ |
| 2588 | #ifdef MULTIPLE_HEAPS |
| 2589 | PER_HEAP_ISOLATED |
| 2590 | BOOL create_thread_support (unsigned number_of_heaps); |
| 2591 | PER_HEAP_ISOLATED |
| 2592 | void destroy_thread_support (); |
| 2593 | PER_HEAP |
| 2594 | bool create_gc_thread(); |
| 2595 | PER_HEAP |
| 2596 | void gc_thread_function(); |
| 2597 | #ifdef MARK_LIST |
| 2598 | #ifdef PARALLEL_MARK_LIST_SORT |
| 2599 | PER_HEAP |
| 2600 | void sort_mark_list(); |
| 2601 | PER_HEAP |
| 2602 | void merge_mark_lists(); |
| 2603 | PER_HEAP |
| 2604 | void append_to_mark_list(uint8_t **start, uint8_t **end); |
| 2605 | #else //PARALLEL_MARK_LIST_SORT |
| 2606 | PER_HEAP_ISOLATED |
| 2607 | void combine_mark_lists(); |
| 2608 | #endif //PARALLEL_MARK_LIST_SORT |
| 2609 | #endif |
| 2610 | #endif //MULTIPLE_HEAPS |
| 2611 | |
| 2612 | /*------------ End of Multiple non isolated heaps ---------*/ |
| 2613 | |
| 2614 | #ifndef SEG_MAPPING_TABLE |
| 2615 | PER_HEAP_ISOLATED |
| 2616 | heap_segment* segment_of (uint8_t* add, ptrdiff_t & delta, |
| 2617 | BOOL verify_p = FALSE); |
| 2618 | #endif //SEG_MAPPING_TABLE |
| 2619 | |
| 2620 | #ifdef BACKGROUND_GC |
| 2621 | |
| 2622 | //this is called by revisit.... |
| 2623 | PER_HEAP |
| 2624 | uint8_t* high_page (heap_segment* seg, BOOL concurrent_p); |
| 2625 | |
| 2626 | PER_HEAP |
| 2627 | void revisit_written_page (uint8_t* page, uint8_t* end, BOOL concurrent_p, |
| 2628 | heap_segment* seg, uint8_t*& last_page, |
| 2629 | uint8_t*& last_object, BOOL large_objects_p, |
| 2630 | size_t& num_marked_objects); |
| 2631 | PER_HEAP |
| 2632 | void revisit_written_pages (BOOL concurrent_p, BOOL reset_only_p=FALSE); |
| 2633 | |
| 2634 | PER_HEAP |
| 2635 | void concurrent_scan_dependent_handles (ScanContext *sc); |
| 2636 | |
| 2637 | PER_HEAP_ISOLATED |
| 2638 | void suspend_EE (); |
| 2639 | |
| 2640 | PER_HEAP_ISOLATED |
| 2641 | void bgc_suspend_EE (); |
| 2642 | |
| 2643 | PER_HEAP_ISOLATED |
| 2644 | void restart_EE (); |
| 2645 | |
| 2646 | PER_HEAP |
| 2647 | void background_verify_mark (Object*& object, ScanContext* sc, uint32_t flags); |
| 2648 | |
| 2649 | PER_HEAP |
| 2650 | void background_scan_dependent_handles (ScanContext *sc); |
| 2651 | |
| 2652 | PER_HEAP |
| 2653 | void allow_fgc(); |
| 2654 | |
| 2655 | // Restores BGC settings if necessary. |
| 2656 | PER_HEAP_ISOLATED |
| 2657 | void recover_bgc_settings(); |
| 2658 | |
| 2659 | PER_HEAP |
| 2660 | void save_bgc_data_per_heap(); |
| 2661 | |
| 2662 | PER_HEAP |
| 2663 | BOOL should_commit_mark_array(); |
| 2664 | |
| 2665 | PER_HEAP |
| 2666 | void clear_commit_flag(); |
| 2667 | |
| 2668 | PER_HEAP_ISOLATED |
| 2669 | void clear_commit_flag_global(); |
| 2670 | |
| 2671 | PER_HEAP_ISOLATED |
| 2672 | void verify_mark_array_cleared (heap_segment* seg, uint32_t* mark_array_addr); |
| 2673 | |
| 2674 | PER_HEAP_ISOLATED |
| 2675 | void verify_mark_array_cleared (uint8_t* begin, uint8_t* end, uint32_t* mark_array_addr); |
| 2676 | |
| 2677 | PER_HEAP_ISOLATED |
| 2678 | BOOL commit_mark_array_by_range (uint8_t* begin, |
| 2679 | uint8_t* end, |
| 2680 | uint32_t* mark_array_addr); |
| 2681 | |
| 2682 | PER_HEAP_ISOLATED |
| 2683 | BOOL commit_mark_array_new_seg (gc_heap* hp, |
| 2684 | heap_segment* seg, |
| 2685 | uint32_t* new_card_table = 0, |
| 2686 | uint8_t* new_lowest_address = 0); |
| 2687 | |
| 2688 | PER_HEAP_ISOLATED |
| 2689 | BOOL commit_mark_array_with_check (heap_segment* seg, uint32_t* mark_array_addr); |
| 2690 | |
| 2691 | // commit the portion of the mark array that corresponds to |
| 2692 | // this segment (from beginning to reserved). |
| 2693 | // seg and heap_segment_reserved (seg) are guaranteed to be |
| 2694 | // page aligned. |
| 2695 | PER_HEAP_ISOLATED |
| 2696 | BOOL commit_mark_array_by_seg (heap_segment* seg, uint32_t* mark_array_addr); |
| 2697 | |
| 2698 | // During BGC init, we commit the mark array for all in range |
| 2699 | // segments whose mark array hasn't been committed or fully |
| 2700 | // committed. All rw segments are in range, only ro segments |
| 2701 | // can be partial in range. |
| 2702 | PER_HEAP |
| 2703 | BOOL commit_mark_array_bgc_init (uint32_t* mark_array_addr); |
| 2704 | |
| 2705 | PER_HEAP |
| 2706 | BOOL commit_new_mark_array (uint32_t* new_mark_array); |
| 2707 | |
| 2708 | // We need to commit all segments that intersect with the bgc |
| 2709 | // range. If a segment is only partially in range, we still |
| 2710 | // should commit the mark array for the whole segment as |
| 2711 | // we will set the mark array commit flag for this segment. |
| 2712 | PER_HEAP_ISOLATED |
| 2713 | BOOL commit_new_mark_array_global (uint32_t* new_mark_array); |
| 2714 | |
| 2715 | // We can't decommit the first and the last page in the mark array |
| 2716 | // if the beginning and ending don't happen to be page aligned. |
| 2717 | PER_HEAP |
| 2718 | void decommit_mark_array_by_seg (heap_segment* seg); |
| 2719 | |
| 2720 | PER_HEAP |
| 2721 | void background_mark_phase(); |
| 2722 | |
| 2723 | PER_HEAP |
| 2724 | void background_drain_mark_list (int thread); |
| 2725 | |
| 2726 | PER_HEAP |
| 2727 | void background_grow_c_mark_list(); |
| 2728 | |
| 2729 | PER_HEAP_ISOLATED |
| 2730 | void background_promote_callback(Object** object, ScanContext* sc, uint32_t flags); |
| 2731 | |
| 2732 | PER_HEAP |
| 2733 | void mark_absorb_new_alloc(); |
| 2734 | |
| 2735 | PER_HEAP |
| 2736 | void restart_vm(); |
| 2737 | |
| 2738 | PER_HEAP |
| 2739 | BOOL prepare_bgc_thread(gc_heap* gh); |
| 2740 | PER_HEAP |
| 2741 | BOOL create_bgc_thread(gc_heap* gh); |
| 2742 | PER_HEAP_ISOLATED |
| 2743 | BOOL create_bgc_threads_support (int number_of_heaps); |
| 2744 | PER_HEAP |
| 2745 | BOOL create_bgc_thread_support(); |
| 2746 | PER_HEAP_ISOLATED |
| 2747 | int check_for_ephemeral_alloc(); |
| 2748 | PER_HEAP_ISOLATED |
| 2749 | void wait_to_proceed(); |
| 2750 | PER_HEAP_ISOLATED |
| 2751 | void fire_alloc_wait_event_begin (alloc_wait_reason awr); |
| 2752 | PER_HEAP_ISOLATED |
| 2753 | void fire_alloc_wait_event_end (alloc_wait_reason awr); |
| 2754 | PER_HEAP |
| 2755 | void background_gc_wait_lh (alloc_wait_reason awr = awr_ignored); |
| 2756 | PER_HEAP |
| 2757 | uint32_t background_gc_wait (alloc_wait_reason awr = awr_ignored, int time_out_ms = INFINITE); |
| 2758 | PER_HEAP_ISOLATED |
| 2759 | void start_c_gc(); |
| 2760 | PER_HEAP |
| 2761 | void kill_gc_thread(); |
| 2762 | PER_HEAP |
| 2763 | void bgc_thread_function(); |
| 2764 | PER_HEAP_ISOLATED |
| 2765 | void do_background_gc(); |
| 2766 | static |
| 2767 | void bgc_thread_stub (void* arg); |
| 2768 | #endif //BACKGROUND_GC |
| 2769 | |
| 2770 | public: |
| 2771 | |
| 2772 | PER_HEAP_ISOLATED |
| 2773 | VOLATILE(bool) internal_gc_done; |
| 2774 | |
| 2775 | #ifdef BACKGROUND_GC |
| 2776 | PER_HEAP_ISOLATED |
| 2777 | uint32_t cm_in_progress; |
| 2778 | |
| 2779 | // normally this is FALSE; we set it to TRUE at the end of the gen1 GC |
| 2780 | // we do right before the bgc starts. |
| 2781 | PER_HEAP_ISOLATED |
| 2782 | BOOL dont_restart_ee_p; |
| 2783 | |
| 2784 | PER_HEAP_ISOLATED |
| 2785 | GCEvent bgc_start_event; |
| 2786 | #endif //BACKGROUND_GC |
| 2787 | |
| 2788 | // The variables in this block are known to the DAC and must come first |
| 2789 | // in the gc_heap class. |
| 2790 | |
| 2791 | // Keeps track of the highest address allocated by Alloc |
| 2792 | PER_HEAP |
| 2793 | uint8_t* alloc_allocated; |
| 2794 | |
| 2795 | // The ephemeral heap segment |
| 2796 | PER_HEAP |
| 2797 | heap_segment* ephemeral_heap_segment; |
| 2798 | |
| 2799 | // The finalize queue. |
| 2800 | PER_HEAP |
| 2801 | CFinalize* finalize_queue; |
| 2802 | |
| 2803 | // OOM info. |
| 2804 | PER_HEAP |
| 2805 | oom_history oom_info; |
| 2806 | |
| 2807 | // Interesting data, recorded per-heap. |
| 2808 | PER_HEAP |
| 2809 | size_t interesting_data_per_heap[max_idp_count]; |
| 2810 | |
| 2811 | PER_HEAP |
| 2812 | size_t compact_reasons_per_heap[max_compact_reasons_count]; |
| 2813 | |
| 2814 | PER_HEAP |
| 2815 | size_t expand_mechanisms_per_heap[max_expand_mechanisms_count]; |
| 2816 | |
| 2817 | PER_HEAP |
| 2818 | size_t interesting_mechanism_bits_per_heap[max_gc_mechanism_bits_count]; |
| 2819 | |
| 2820 | PER_HEAP |
| 2821 | uint8_t** internal_root_array; |
| 2822 | |
| 2823 | PER_HEAP |
| 2824 | size_t internal_root_array_index; |
| 2825 | |
| 2826 | PER_HEAP |
| 2827 | BOOL heap_analyze_success; |
| 2828 | |
| 2829 | // The generation table. Must always be last. |
| 2830 | PER_HEAP |
| 2831 | generation generation_table [NUMBERGENERATIONS + 1]; |
| 2832 | |
| 2833 | // End DAC zone |
| 2834 | |
| 2835 | PER_HEAP |
| 2836 | BOOL expanded_in_fgc; |
| 2837 | |
| 2838 | PER_HEAP_ISOLATED |
| 2839 | uint32_t wait_for_gc_done(int32_t timeOut = INFINITE); |
| 2840 | |
| 2841 | // Returns TRUE if the current thread used to be in cooperative mode |
| 2842 | // before calling this function. |
| 2843 | PER_HEAP_ISOLATED |
| 2844 | bool enable_preemptive (); |
| 2845 | PER_HEAP_ISOLATED |
| 2846 | void disable_preemptive (bool restore_cooperative); |
| 2847 | |
| 2848 | /* ------------------- per heap members --------------------------*/ |
| 2849 | |
| 2850 | PER_HEAP |
| 2851 | #ifndef MULTIPLE_HEAPS |
| 2852 | GCEvent gc_done_event; |
| 2853 | #else // MULTIPLE_HEAPS |
| 2854 | GCEvent gc_done_event; |
| 2855 | #endif // MULTIPLE_HEAPS |
| 2856 | |
| 2857 | PER_HEAP |
| 2858 | VOLATILE(int32_t) gc_done_event_lock; |
| 2859 | |
| 2860 | PER_HEAP |
| 2861 | VOLATILE(bool) gc_done_event_set; |
| 2862 | |
| 2863 | PER_HEAP |
| 2864 | void set_gc_done(); |
| 2865 | |
| 2866 | PER_HEAP |
| 2867 | void reset_gc_done(); |
| 2868 | |
| 2869 | PER_HEAP |
| 2870 | void enter_gc_done_event_lock(); |
| 2871 | |
| 2872 | PER_HEAP |
| 2873 | void exit_gc_done_event_lock(); |
| 2874 | |
| 2875 | PER_HEAP |
| 2876 | uint8_t* ephemeral_low; //lowest ephemeral address |
| 2877 | |
| 2878 | PER_HEAP |
| 2879 | uint8_t* ephemeral_high; //highest ephemeral address |
| 2880 | |
| 2881 | PER_HEAP |
| 2882 | uint32_t* card_table; |
| 2883 | |
| 2884 | PER_HEAP |
| 2885 | short* brick_table; |
| 2886 | |
| 2887 | #ifdef MARK_ARRAY |
| 2888 | PER_HEAP |
| 2889 | uint32_t* mark_array; |
| 2890 | #endif //MARK_ARRAY |
| 2891 | |
| 2892 | #ifdef CARD_BUNDLE |
| 2893 | PER_HEAP |
| 2894 | uint32_t* card_bundle_table; |
| 2895 | #endif //CARD_BUNDLE |
| 2896 | |
| 2897 | #if !defined(SEG_MAPPING_TABLE) || defined(FEATURE_BASICFREEZE) |
| 2898 | PER_HEAP_ISOLATED |
| 2899 | sorted_table* seg_table; |
| 2900 | #endif //!SEG_MAPPING_TABLE || FEATURE_BASICFREEZE |
| 2901 | |
| 2902 | PER_HEAP_ISOLATED |
| 2903 | VOLATILE(BOOL) gc_started; |
| 2904 | |
| 2905 | // The following 2 events are there to support the gen2 |
| 2906 | // notification feature which is only enabled if concurrent |
| 2907 | // GC is disabled. |
| 2908 | PER_HEAP_ISOLATED |
| 2909 | GCEvent full_gc_approach_event; |
| 2910 | |
| 2911 | PER_HEAP_ISOLATED |
| 2912 | GCEvent full_gc_end_event; |
| 2913 | |
| 2914 | // Full GC Notification percentages. |
| 2915 | PER_HEAP_ISOLATED |
| 2916 | uint32_t fgn_maxgen_percent; |
| 2917 | |
| 2918 | PER_HEAP_ISOLATED |
| 2919 | uint32_t fgn_loh_percent; |
| 2920 | |
| 2921 | PER_HEAP_ISOLATED |
| 2922 | VOLATILE(bool) full_gc_approach_event_set; |
| 2923 | |
| 2924 | #ifdef BACKGROUND_GC |
| 2925 | PER_HEAP_ISOLATED |
| 2926 | BOOL fgn_last_gc_was_concurrent; |
| 2927 | #endif //BACKGROUND_GC |
| 2928 | |
| 2929 | PER_HEAP |
| 2930 | size_t fgn_last_alloc; |
| 2931 | |
| 2932 | static uint32_t user_thread_wait (GCEvent *event, BOOL no_mode_change, int time_out_ms=INFINITE); |
| 2933 | |
| 2934 | static wait_full_gc_status full_gc_wait (GCEvent *event, int time_out_ms); |
| 2935 | |
| 2936 | PER_HEAP |
| 2937 | uint8_t* demotion_low; |
| 2938 | |
| 2939 | PER_HEAP |
| 2940 | uint8_t* demotion_high; |
| 2941 | |
| 2942 | PER_HEAP |
| 2943 | BOOL demote_gen1_p; |
| 2944 | |
| 2945 | PER_HEAP |
| 2946 | uint8_t* last_gen1_pin_end; |
| 2947 | |
| 2948 | PER_HEAP |
| 2949 | gen_to_condemn_tuning gen_to_condemn_reasons; |
| 2950 | |
| 2951 | PER_HEAP |
| 2952 | size_t etw_allocation_running_amount[2]; |
| 2953 | |
| 2954 | PER_HEAP |
| 2955 | int gc_policy; //sweep, compact, expand |
| 2956 | |
| 2957 | #ifdef MULTIPLE_HEAPS |
| 2958 | PER_HEAP_ISOLATED |
| 2959 | bool gc_thread_no_affinitize_p; |
| 2960 | |
| 2961 | PER_HEAP_ISOLATED |
| 2962 | GCEvent gc_start_event; |
| 2963 | |
| 2964 | PER_HEAP_ISOLATED |
| 2965 | GCEvent ee_suspend_event; |
| 2966 | |
| 2967 | PER_HEAP |
| 2968 | heap_segment* new_heap_segment; |
| 2969 | |
| 2970 | #define alloc_quantum_balance_units (16) |
| 2971 | |
| 2972 | PER_HEAP_ISOLATED |
| 2973 | size_t min_balance_threshold; |
| 2974 | #else //MULTIPLE_HEAPS |
| 2975 | |
| 2976 | PER_HEAP |
| 2977 | size_t allocation_running_time; |
| 2978 | |
| 2979 | PER_HEAP |
| 2980 | size_t allocation_running_amount; |
| 2981 | |
| 2982 | #endif //MULTIPLE_HEAPS |
| 2983 | |
| 2984 | PER_HEAP_ISOLATED |
| 2985 | gc_latency_level latency_level; |
| 2986 | |
| 2987 | PER_HEAP_ISOLATED |
| 2988 | gc_mechanisms settings; |
| 2989 | |
| 2990 | PER_HEAP_ISOLATED |
| 2991 | gc_history_global gc_data_global; |
| 2992 | |
| 2993 | PER_HEAP_ISOLATED |
| 2994 | size_t gc_last_ephemeral_decommit_time; |
| 2995 | |
| 2996 | PER_HEAP_ISOLATED |
| 2997 | size_t gc_gen0_desired_high; |
| 2998 | |
| 2999 | PER_HEAP |
| 3000 | size_t gen0_big_free_spaces; |
| 3001 | |
| 3002 | #ifdef SHORT_PLUGS |
| 3003 | PER_HEAP_ISOLATED |
| 3004 | double short_plugs_pad_ratio; |
| 3005 | #endif //SHORT_PLUGS |
| 3006 | |
| 3007 | #ifdef BIT64 |
| 3008 | PER_HEAP_ISOLATED |
| 3009 | size_t youngest_gen_desired_th; |
| 3010 | #endif //BIT64 |
| 3011 | |
| 3012 | PER_HEAP_ISOLATED |
| 3013 | uint32_t last_gc_memory_load; |
| 3014 | |
| 3015 | PER_HEAP_ISOLATED |
| 3016 | size_t last_gc_heap_size; |
| 3017 | |
| 3018 | PER_HEAP_ISOLATED |
| 3019 | size_t last_gc_fragmentation; |
| 3020 | |
| 3021 | PER_HEAP_ISOLATED |
| 3022 | uint32_t high_memory_load_th; |
| 3023 | |
| 3024 | PER_HEAP_ISOLATED |
| 3025 | uint32_t m_high_memory_load_th; |
| 3026 | |
| 3027 | PER_HEAP_ISOLATED |
| 3028 | uint32_t v_high_memory_load_th; |
| 3029 | |
| 3030 | PER_HEAP_ISOLATED |
| 3031 | uint64_t mem_one_percent; |
| 3032 | |
| 3033 | PER_HEAP_ISOLATED |
| 3034 | uint64_t total_physical_mem; |
| 3035 | |
| 3036 | PER_HEAP_ISOLATED |
| 3037 | uint64_t entry_available_physical_mem; |
| 3038 | |
| 3039 | PER_HEAP_ISOLATED |
| 3040 | size_t last_gc_index; |
| 3041 | |
| 3042 | #ifdef SEG_MAPPING_TABLE |
| 3043 | PER_HEAP_ISOLATED |
| 3044 | size_t min_segment_size; |
| 3045 | |
| 3046 | PER_HEAP_ISOLATED |
| 3047 | size_t min_segment_size_shr; |
| 3048 | #endif //SEG_MAPPING_TABLE |
| 3049 | |
| 3050 | // For SOH we always allocate segments of the same |
| 3051 | // size unless no_gc_region requires larger ones. |
| 3052 | PER_HEAP_ISOLATED |
| 3053 | size_t soh_segment_size; |
| 3054 | |
| 3055 | PER_HEAP_ISOLATED |
| 3056 | size_t min_loh_segment_size; |
| 3057 | |
| 3058 | PER_HEAP_ISOLATED |
| 3059 | size_t segment_info_size; |
| 3060 | |
| 3061 | PER_HEAP |
| 3062 | uint8_t* lowest_address; |
| 3063 | |
| 3064 | PER_HEAP |
| 3065 | uint8_t* highest_address; |
| 3066 | |
| 3067 | PER_HEAP |
| 3068 | BOOL ephemeral_promotion; |
| 3069 | PER_HEAP |
| 3070 | uint8_t* saved_ephemeral_plan_start[NUMBERGENERATIONS-1]; |
| 3071 | PER_HEAP |
| 3072 | size_t saved_ephemeral_plan_start_size[NUMBERGENERATIONS-1]; |
| 3073 | |
| 3074 | protected: |
| 3075 | #ifdef MULTIPLE_HEAPS |
| 3076 | PER_HEAP |
| 3077 | GCHeap* vm_heap; |
| 3078 | PER_HEAP |
| 3079 | int heap_number; |
| 3080 | PER_HEAP |
| 3081 | VOLATILE(int) alloc_context_count; |
| 3082 | #else //MULTIPLE_HEAPS |
| 3083 | #define vm_heap ((GCHeap*) g_theGCHeap) |
| 3084 | #define heap_number (0) |
| 3085 | #endif //MULTIPLE_HEAPS |
| 3086 | |
| 3087 | PER_HEAP |
| 3088 | size_t time_bgc_last; |
| 3089 | |
| 3090 | PER_HEAP |
| 3091 | uint8_t* gc_low; // lowest address being condemned |
| 3092 | |
| 3093 | PER_HEAP |
| 3094 | uint8_t* gc_high; //highest address being condemned |
| 3095 | |
| 3096 | PER_HEAP |
| 3097 | size_t mark_stack_tos; |
| 3098 | |
| 3099 | PER_HEAP |
| 3100 | size_t mark_stack_bos; |
| 3101 | |
| 3102 | PER_HEAP |
| 3103 | size_t mark_stack_array_length; |
| 3104 | |
| 3105 | PER_HEAP |
| 3106 | mark* mark_stack_array; |
| 3107 | |
| 3108 | #if defined (_DEBUG) && defined (VERIFY_HEAP) |
| 3109 | PER_HEAP |
| 3110 | BOOL verify_pinned_queue_p; |
| 3111 | #endif // _DEBUG && VERIFY_HEAP |
| 3112 | |
| 3113 | PER_HEAP |
| 3114 | uint8_t* oldest_pinned_plug; |
| 3115 | |
| 3116 | #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) |
| 3117 | PER_HEAP |
| 3118 | size_t num_pinned_objects; |
| 3119 | #endif //ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE |
| 3120 | |
| 3121 | #ifdef FEATURE_LOH_COMPACTION |
| 3122 | PER_HEAP |
| 3123 | size_t loh_pinned_queue_tos; |
| 3124 | |
| 3125 | PER_HEAP |
| 3126 | size_t loh_pinned_queue_bos; |
| 3127 | |
| 3128 | PER_HEAP |
| 3129 | size_t loh_pinned_queue_length; |
| 3130 | |
| 3131 | PER_HEAP_ISOLATED |
| 3132 | int loh_pinned_queue_decay; |
| 3133 | |
| 3134 | PER_HEAP |
| 3135 | mark* loh_pinned_queue; |
| 3136 | |
| 3137 | // This is for forced LOH compaction via the complus env var |
| 3138 | PER_HEAP_ISOLATED |
| 3139 | BOOL loh_compaction_always_p; |
| 3140 | |
| 3141 | // This is set by the user. |
| 3142 | PER_HEAP_ISOLATED |
| 3143 | gc_loh_compaction_mode loh_compaction_mode; |
| 3144 | |
| 3145 | // We may not compact LOH on every heap if we can't |
| 3146 | // grow the pinned queue. This is to indicate whether |
| 3147 | // this heap's LOH is compacted or not. So even if |
| 3148 | // settings.loh_compaction is TRUE this may not be TRUE. |
| 3149 | PER_HEAP |
| 3150 | BOOL loh_compacted_p; |
| 3151 | #endif //FEATURE_LOH_COMPACTION |
| 3152 | |
| 3153 | #ifdef BACKGROUND_GC |
| 3154 | |
| 3155 | PER_HEAP |
| 3156 | EEThreadId bgc_thread_id; |
| 3157 | |
| 3158 | #ifdef WRITE_WATCH |
| 3159 | PER_HEAP |
| 3160 | uint8_t* background_written_addresses [array_size+2]; |
| 3161 | #endif //WRITE_WATCH |
| 3162 | |
| 3163 | PER_HEAP_ISOLATED |
| 3164 | VOLATILE(c_gc_state) current_c_gc_state; //tells the large object allocator to |
| 3165 | //mark the object as new since the start of gc. |
| 3166 | |
| 3167 | PER_HEAP_ISOLATED |
| 3168 | gc_mechanisms saved_bgc_settings; |
| 3169 | |
| 3170 | PER_HEAP |
| 3171 | gc_history_per_heap bgc_data_per_heap; |
| 3172 | |
| 3173 | PER_HEAP |
| 3174 | BOOL bgc_thread_running; // gc thread is its main loop |
| 3175 | |
| 3176 | PER_HEAP_ISOLATED |
| 3177 | BOOL keep_bgc_threads_p; |
| 3178 | |
| 3179 | // This event is used by BGC threads to do something on |
| 3180 | // one specific thread while other BGC threads have to |
| 3181 | // wait. This is different from a join 'cause you can't |
| 3182 | // specify which thread should be doing some task |
| 3183 | // while other threads have to wait. |
| 3184 | // For example, to make the BGC threads managed threads |
| 3185 | // we need to create them on the thread that called |
| 3186 | // SuspendEE which is heap 0. |
| 3187 | PER_HEAP_ISOLATED |
| 3188 | GCEvent bgc_threads_sync_event; |
| 3189 | |
| 3190 | PER_HEAP |
| 3191 | Thread* bgc_thread; |
| 3192 | |
| 3193 | PER_HEAP |
| 3194 | CLRCriticalSection bgc_threads_timeout_cs; |
| 3195 | |
| 3196 | PER_HEAP_ISOLATED |
| 3197 | GCEvent background_gc_done_event; |
| 3198 | |
| 3199 | PER_HEAP_ISOLATED |
| 3200 | GCEvent ee_proceed_event; |
| 3201 | |
| 3202 | PER_HEAP |
| 3203 | GCEvent gc_lh_block_event; |
| 3204 | |
| 3205 | PER_HEAP_ISOLATED |
| 3206 | bool gc_can_use_concurrent; |
| 3207 | |
| 3208 | PER_HEAP_ISOLATED |
| 3209 | bool temp_disable_concurrent_p; |
| 3210 | |
| 3211 | PER_HEAP_ISOLATED |
| 3212 | BOOL do_ephemeral_gc_p; |
| 3213 | |
| 3214 | PER_HEAP_ISOLATED |
| 3215 | BOOL do_concurrent_p; |
| 3216 | |
| 3217 | PER_HEAP |
| 3218 | VOLATILE(bgc_state) current_bgc_state; |
| 3219 | |
| 3220 | struct gc_history |
| 3221 | { |
| 3222 | size_t gc_index; |
| 3223 | bgc_state current_bgc_state; |
| 3224 | uint32_t gc_time_ms; |
| 3225 | // This is in bytes per ms; consider breaking it |
| 3226 | // into the efficiency per phase. |
| 3227 | size_t gc_efficiency; |
| 3228 | uint8_t* eph_low; |
| 3229 | uint8_t* gen0_start; |
| 3230 | uint8_t* eph_high; |
| 3231 | uint8_t* bgc_highest; |
| 3232 | uint8_t* bgc_lowest; |
| 3233 | uint8_t* fgc_highest; |
| 3234 | uint8_t* fgc_lowest; |
| 3235 | uint8_t* g_highest; |
| 3236 | uint8_t* g_lowest; |
| 3237 | }; |
| 3238 | |
| 3239 | #define max_history_count 64 |
| 3240 | |
| 3241 | PER_HEAP |
| 3242 | int gchist_index_per_heap; |
| 3243 | |
| 3244 | PER_HEAP |
| 3245 | gc_history gchist_per_heap[max_history_count]; |
| 3246 | |
| 3247 | PER_HEAP_ISOLATED |
| 3248 | int gchist_index; |
| 3249 | |
| 3250 | PER_HEAP_ISOLATED |
| 3251 | gc_mechanisms_store gchist[max_history_count]; |
| 3252 | |
| 3253 | PER_HEAP |
| 3254 | void add_to_history_per_heap(); |
| 3255 | |
| 3256 | PER_HEAP_ISOLATED |
| 3257 | void add_to_history(); |
| 3258 | |
| 3259 | PER_HEAP |
| 3260 | size_t total_promoted_bytes; |
| 3261 | |
| 3262 | PER_HEAP |
| 3263 | size_t bgc_overflow_count; |
| 3264 | |
| 3265 | PER_HEAP |
| 3266 | size_t bgc_begin_loh_size; |
| 3267 | PER_HEAP |
| 3268 | size_t end_loh_size; |
| 3269 | |
| 3270 | // We need to throttle the LOH allocations during BGC since we can't |
| 3271 | // collect LOH when BGC is in progress. |
| 3272 | // We allow the LOH heap size to double during a BGC. So for every |
| 3273 | // 10% increase we will have the LOH allocating thread sleep for one more |
| 3274 | // ms. So we are already 30% over the original heap size the thread will |
| 3275 | // sleep for 3ms. |
| 3276 | PER_HEAP |
| 3277 | uint32_t bgc_alloc_spin_loh; |
| 3278 | |
| 3279 | // This includes what we allocate at the end of segment - allocating |
| 3280 | // in free list doesn't increase the heap size. |
| 3281 | PER_HEAP |
| 3282 | size_t bgc_loh_size_increased; |
| 3283 | |
| 3284 | PER_HEAP |
| 3285 | size_t bgc_loh_allocated_in_free; |
| 3286 | |
| 3287 | PER_HEAP |
| 3288 | size_t background_soh_alloc_count; |
| 3289 | |
| 3290 | PER_HEAP |
| 3291 | size_t background_loh_alloc_count; |
| 3292 | |
| 3293 | PER_HEAP |
| 3294 | VOLATILE(int32_t) loh_alloc_thread_count; |
| 3295 | |
| 3296 | PER_HEAP |
| 3297 | uint8_t** background_mark_stack_tos; |
| 3298 | |
| 3299 | PER_HEAP |
| 3300 | uint8_t** background_mark_stack_array; |
| 3301 | |
| 3302 | PER_HEAP |
| 3303 | size_t background_mark_stack_array_length; |
| 3304 | |
| 3305 | PER_HEAP |
| 3306 | uint8_t* background_min_overflow_address; |
| 3307 | |
| 3308 | PER_HEAP |
| 3309 | uint8_t* background_max_overflow_address; |
| 3310 | |
| 3311 | // We can't process the soh range concurrently so we |
| 3312 | // wait till final mark to process it. |
| 3313 | PER_HEAP |
| 3314 | BOOL processed_soh_overflow_p; |
| 3315 | |
| 3316 | PER_HEAP |
| 3317 | uint8_t* background_min_soh_overflow_address; |
| 3318 | |
| 3319 | PER_HEAP |
| 3320 | uint8_t* background_max_soh_overflow_address; |
| 3321 | |
| 3322 | PER_HEAP |
| 3323 | heap_segment* saved_overflow_ephemeral_seg; |
| 3324 | |
| 3325 | PER_HEAP |
| 3326 | heap_segment* saved_sweep_ephemeral_seg; |
| 3327 | |
| 3328 | PER_HEAP |
| 3329 | uint8_t* saved_sweep_ephemeral_start; |
| 3330 | |
| 3331 | PER_HEAP |
| 3332 | uint8_t* background_saved_lowest_address; |
| 3333 | |
| 3334 | PER_HEAP |
| 3335 | uint8_t* background_saved_highest_address; |
| 3336 | |
| 3337 | // This is used for synchronization between the bgc thread |
| 3338 | // for this heap and the user threads allocating on this |
| 3339 | // heap. |
| 3340 | PER_HEAP |
| 3341 | exclusive_sync* bgc_alloc_lock; |
| 3342 | |
| 3343 | #ifdef SNOOP_STATS |
| 3344 | PER_HEAP |
| 3345 | snoop_stats_data snoop_stat; |
| 3346 | #endif //SNOOP_STATS |
| 3347 | |
| 3348 | |
| 3349 | PER_HEAP |
| 3350 | uint8_t** c_mark_list; |
| 3351 | |
| 3352 | PER_HEAP |
| 3353 | size_t c_mark_list_length; |
| 3354 | |
| 3355 | PER_HEAP |
| 3356 | size_t c_mark_list_index; |
| 3357 | #endif //BACKGROUND_GC |
| 3358 | |
| 3359 | #ifdef MARK_LIST |
| 3360 | PER_HEAP |
| 3361 | uint8_t** mark_list; |
| 3362 | |
| 3363 | PER_HEAP_ISOLATED |
| 3364 | size_t mark_list_size; |
| 3365 | |
| 3366 | PER_HEAP |
| 3367 | uint8_t** mark_list_end; |
| 3368 | |
| 3369 | PER_HEAP |
| 3370 | uint8_t** mark_list_index; |
| 3371 | |
| 3372 | PER_HEAP_ISOLATED |
| 3373 | uint8_t** g_mark_list; |
| 3374 | #ifdef PARALLEL_MARK_LIST_SORT |
| 3375 | PER_HEAP_ISOLATED |
| 3376 | uint8_t** g_mark_list_copy; |
| 3377 | PER_HEAP |
| 3378 | uint8_t*** mark_list_piece_start; |
| 3379 | uint8_t*** mark_list_piece_end; |
| 3380 | #endif //PARALLEL_MARK_LIST_SORT |
| 3381 | #endif //MARK_LIST |
| 3382 | |
| 3383 | PER_HEAP |
| 3384 | uint8_t* min_overflow_address; |
| 3385 | |
| 3386 | PER_HEAP |
| 3387 | uint8_t* max_overflow_address; |
| 3388 | |
| 3389 | #ifndef MULTIPLE_HEAPS |
| 3390 | PER_HEAP |
| 3391 | uint8_t* shigh; //keeps track of the highest marked object |
| 3392 | |
| 3393 | PER_HEAP |
| 3394 | uint8_t* slow; //keeps track of the lowest marked object |
| 3395 | #endif //MULTIPLE_HEAPS |
| 3396 | |
| 3397 | PER_HEAP |
| 3398 | size_t allocation_quantum; |
| 3399 | |
| 3400 | PER_HEAP |
| 3401 | size_t alloc_contexts_used; |
| 3402 | |
| 3403 | PER_HEAP_ISOLATED |
| 3404 | no_gc_region_info current_no_gc_region_info; |
| 3405 | |
| 3406 | PER_HEAP |
| 3407 | size_t soh_allocation_no_gc; |
| 3408 | |
| 3409 | PER_HEAP |
| 3410 | size_t loh_allocation_no_gc; |
| 3411 | |
| 3412 | PER_HEAP |
| 3413 | bool no_gc_oom_p; |
| 3414 | |
| 3415 | PER_HEAP |
| 3416 | heap_segment* saved_loh_segment_no_gc; |
| 3417 | |
| 3418 | PER_HEAP_ISOLATED |
| 3419 | BOOL proceed_with_gc_p; |
| 3420 | |
| 3421 | #define youngest_generation (generation_of (0)) |
| 3422 | #define large_object_generation (generation_of (max_generation+1)) |
| 3423 | |
| 3424 | // The more_space_lock and gc_lock is used for 3 purposes: |
| 3425 | // |
| 3426 | // 1) to coordinate threads that exceed their quantum (UP & MP) (more_space_lock_soh) |
| 3427 | // 2) to synchronize allocations of large objects (more_space_lock_loh) |
| 3428 | // 3) to synchronize the GC itself (gc_lock) |
| 3429 | // |
| 3430 | PER_HEAP_ISOLATED |
| 3431 | GCSpinLock gc_lock; //lock while doing GC |
| 3432 | |
| 3433 | PER_HEAP |
| 3434 | GCSpinLock more_space_lock_soh; //lock while allocating more space for soh |
| 3435 | |
| 3436 | PER_HEAP |
| 3437 | GCSpinLock more_space_lock_loh; |
| 3438 | |
| 3439 | #ifdef SYNCHRONIZATION_STATS |
| 3440 | |
| 3441 | PER_HEAP |
| 3442 | unsigned int good_suspension; |
| 3443 | |
| 3444 | PER_HEAP |
| 3445 | unsigned int bad_suspension; |
| 3446 | |
| 3447 | // Number of times when msl_acquire is > 200 cycles. |
| 3448 | PER_HEAP |
| 3449 | unsigned int num_high_msl_acquire; |
| 3450 | |
| 3451 | // Number of times when msl_acquire is < 200 cycles. |
| 3452 | PER_HEAP |
| 3453 | unsigned int num_low_msl_acquire; |
| 3454 | |
| 3455 | // Number of times the more_space_lock is acquired. |
| 3456 | PER_HEAP |
| 3457 | unsigned int num_msl_acquired; |
| 3458 | |
| 3459 | // Total cycles it takes to acquire the more_space_lock. |
| 3460 | PER_HEAP |
| 3461 | uint64_t total_msl_acquire; |
| 3462 | |
| 3463 | PER_HEAP |
| 3464 | void init_heap_sync_stats() |
| 3465 | { |
| 3466 | good_suspension = 0; |
| 3467 | bad_suspension = 0; |
| 3468 | num_msl_acquired = 0; |
| 3469 | total_msl_acquire = 0; |
| 3470 | num_high_msl_acquire = 0; |
| 3471 | num_low_msl_acquire = 0; |
| 3472 | more_space_lock.init(); |
| 3473 | gc_lock.init(); |
| 3474 | } |
| 3475 | |
| 3476 | PER_HEAP |
| 3477 | void print_heap_sync_stats(unsigned int heap_num, unsigned int gc_count_during_log) |
| 3478 | { |
| 3479 | printf("%2d%2d%10u%10u%12u%6u%4u%8u(%4u,%4u,%4u,%4u)\n" , |
| 3480 | heap_num, |
| 3481 | alloc_contexts_used, |
| 3482 | good_suspension, |
| 3483 | bad_suspension, |
| 3484 | (unsigned int)(total_msl_acquire / gc_count_during_log), |
| 3485 | num_high_msl_acquire / gc_count_during_log, |
| 3486 | num_low_msl_acquire / gc_count_during_log, |
| 3487 | num_msl_acquired / gc_count_during_log, |
| 3488 | more_space_lock.num_switch_thread / gc_count_during_log, |
| 3489 | more_space_lock.num_wait_longer / gc_count_during_log, |
| 3490 | more_space_lock.num_switch_thread_w / gc_count_during_log, |
| 3491 | more_space_lock.num_disable_preemptive_w / gc_count_during_log); |
| 3492 | } |
| 3493 | |
| 3494 | #endif //SYNCHRONIZATION_STATS |
| 3495 | |
| 3496 | #define NUM_LOH_ALIST (7) |
| 3497 | #define BASE_LOH_ALIST (64*1024) |
| 3498 | PER_HEAP |
| 3499 | alloc_list loh_alloc_list[NUM_LOH_ALIST-1]; |
| 3500 | |
| 3501 | #define NUM_GEN2_ALIST (12) |
| 3502 | #ifdef BIT64 |
| 3503 | #define BASE_GEN2_ALIST (1*256) |
| 3504 | #else |
| 3505 | #define BASE_GEN2_ALIST (1*128) |
| 3506 | #endif // BIT64 |
| 3507 | PER_HEAP |
| 3508 | alloc_list gen2_alloc_list[NUM_GEN2_ALIST-1]; |
| 3509 | |
| 3510 | //------------------------------------------ |
| 3511 | |
| 3512 | PER_HEAP |
| 3513 | dynamic_data dynamic_data_table [NUMBERGENERATIONS+1]; |
| 3514 | |
| 3515 | PER_HEAP |
| 3516 | gc_history_per_heap gc_data_per_heap; |
| 3517 | |
| 3518 | PER_HEAP |
| 3519 | size_t maxgen_pinned_compact_before_advance; |
| 3520 | |
| 3521 | // dynamic tuning. |
| 3522 | PER_HEAP |
| 3523 | BOOL dt_low_ephemeral_space_p (gc_tuning_point tp); |
| 3524 | // if elevate_p is FALSE, it means we are determining fragmentation for a generation |
| 3525 | // to see if we should condemn this gen; otherwise it means we are determining if |
| 3526 | // we should elevate to doing max_gen from an ephemeral gen. |
| 3527 | PER_HEAP |
| 3528 | BOOL dt_high_frag_p (gc_tuning_point tp, int gen_number, BOOL elevate_p=FALSE); |
| 3529 | PER_HEAP |
| 3530 | BOOL |
| 3531 | dt_estimate_reclaim_space_p (gc_tuning_point tp, int gen_number); |
| 3532 | PER_HEAP |
| 3533 | BOOL dt_estimate_high_frag_p (gc_tuning_point tp, int gen_number, uint64_t available_mem); |
| 3534 | PER_HEAP |
| 3535 | BOOL dt_low_card_table_efficiency_p (gc_tuning_point tp); |
| 3536 | |
| 3537 | PER_HEAP |
| 3538 | int generation_skip_ratio;//in % |
| 3539 | |
| 3540 | PER_HEAP |
| 3541 | BOOL gen0_bricks_cleared; |
| 3542 | #ifdef FFIND_OBJECT |
| 3543 | PER_HEAP |
| 3544 | int gen0_must_clear_bricks; |
| 3545 | #endif //FFIND_OBJECT |
| 3546 | |
| 3547 | PER_HEAP_ISOLATED |
| 3548 | bool maxgen_size_inc_p; |
| 3549 | |
| 3550 | PER_HEAP_ISOLATED |
| 3551 | size_t full_gc_counts[gc_type_max]; |
| 3552 | |
| 3553 | // the # of bytes allocates since the last full compacting GC. |
| 3554 | PER_HEAP |
| 3555 | uint64_t loh_alloc_since_cg; |
| 3556 | |
| 3557 | PER_HEAP |
| 3558 | BOOL elevation_requested; |
| 3559 | |
| 3560 | // if this is TRUE, we should always guarantee that we do a |
| 3561 | // full compacting GC before we OOM. |
| 3562 | PER_HEAP |
| 3563 | BOOL last_gc_before_oom; |
| 3564 | |
| 3565 | PER_HEAP_ISOLATED |
| 3566 | BOOL should_expand_in_full_gc; |
| 3567 | |
| 3568 | // When we decide if we should expand the heap or not, we are |
| 3569 | // fine NOT to expand if we find enough free space in gen0's free |
| 3570 | // list or end of seg and we check this in decide_on_compacting. |
| 3571 | // This is an expensive check so we just record the fact and not |
| 3572 | // need to check in the allocator again. |
| 3573 | PER_HEAP |
| 3574 | BOOL sufficient_gen0_space_p; |
| 3575 | |
| 3576 | #ifdef MULTIPLE_HEAPS |
| 3577 | PER_HEAP |
| 3578 | bool gen0_allocated_after_gc_p; |
| 3579 | #endif //MULTIPLE_HEAPS |
| 3580 | |
| 3581 | // A provisional mode means we could change our mind in the middle of a GC |
| 3582 | // and want to do a different GC instead. |
| 3583 | // |
| 3584 | // Right now there's only one such case which is in the middle of a gen1 |
| 3585 | // GC we want to do a blocking gen2 instead. If/When we have more we should |
| 3586 | // have an enum that tells us which case in this provisional mode |
| 3587 | // we are in. |
| 3588 | // |
| 3589 | // When this mode is triggered, our current (only) condition says |
| 3590 | // we have high fragmentation in gen2 even after we do a compacting |
| 3591 | // full GC which is an indication of heavy pinning in gen2. In this |
| 3592 | // case we never do BGCs, we just do either gen0 or gen1's till a |
| 3593 | // gen1 needs to increase the gen2 size, in which case we finish up |
| 3594 | // the current gen1 as a sweeping GC and immediately do a compacting |
| 3595 | // full GC instead (without restarting EE). |
| 3596 | PER_HEAP_ISOLATED |
| 3597 | bool provisional_mode_triggered; |
| 3598 | |
| 3599 | PER_HEAP_ISOLATED |
| 3600 | bool pm_trigger_full_gc; |
| 3601 | |
| 3602 | // For testing only BEG |
| 3603 | // pm_stress_on currently means (since we just have one mode) we |
| 3604 | // randomly turn the mode on; and after a random # of NGC2s we |
| 3605 | // turn it off. |
| 3606 | // NOTE that this means concurrent will be disabled so we can |
| 3607 | // simulate what this mode is supposed to be used. |
| 3608 | PER_HEAP_ISOLATED |
| 3609 | bool pm_stress_on; |
| 3610 | |
| 3611 | PER_HEAP_ISOLATED |
| 3612 | size_t provisional_triggered_gc_count; |
| 3613 | |
| 3614 | PER_HEAP_ISOLATED |
| 3615 | size_t provisional_off_gc_count; |
| 3616 | // For testing only END |
| 3617 | |
| 3618 | PER_HEAP_ISOLATED |
| 3619 | size_t num_provisional_triggered; |
| 3620 | |
| 3621 | #ifdef BACKGROUND_GC |
| 3622 | PER_HEAP_ISOLATED |
| 3623 | size_t ephemeral_fgc_counts[max_generation]; |
| 3624 | |
| 3625 | PER_HEAP_ISOLATED |
| 3626 | BOOL alloc_wait_event_p; |
| 3627 | |
| 3628 | PER_HEAP |
| 3629 | uint8_t* next_sweep_obj; |
| 3630 | |
| 3631 | PER_HEAP |
| 3632 | uint8_t* current_sweep_pos; |
| 3633 | |
| 3634 | #endif //BACKGROUND_GC |
| 3635 | |
| 3636 | PER_HEAP |
| 3637 | fgm_history fgm_result; |
| 3638 | |
| 3639 | PER_HEAP_ISOLATED |
| 3640 | size_t eph_gen_starts_size; |
| 3641 | |
| 3642 | #ifdef GC_CONFIG_DRIVEN |
| 3643 | PER_HEAP_ISOLATED |
| 3644 | size_t time_init; |
| 3645 | |
| 3646 | PER_HEAP_ISOLATED |
| 3647 | size_t time_since_init; |
| 3648 | |
| 3649 | // 0 stores compacting GCs; |
| 3650 | // 1 stores sweeping GCs; |
| 3651 | PER_HEAP_ISOLATED |
| 3652 | size_t compact_or_sweep_gcs[2]; |
| 3653 | |
| 3654 | PER_HEAP |
| 3655 | size_t interesting_data_per_gc[max_idp_count]; |
| 3656 | #endif //GC_CONFIG_DRIVEN |
| 3657 | |
| 3658 | PER_HEAP |
| 3659 | BOOL ro_segments_in_range; |
| 3660 | |
| 3661 | #ifdef BACKGROUND_GC |
| 3662 | PER_HEAP |
| 3663 | heap_segment* freeable_small_heap_segment; |
| 3664 | #endif //BACKGROUND_GC |
| 3665 | |
| 3666 | PER_HEAP |
| 3667 | heap_segment* freeable_large_heap_segment; |
| 3668 | |
| 3669 | PER_HEAP_ISOLATED |
| 3670 | heap_segment* segment_standby_list; |
| 3671 | |
| 3672 | PER_HEAP |
| 3673 | size_t ordered_free_space_indices[MAX_NUM_BUCKETS]; |
| 3674 | |
| 3675 | PER_HEAP |
| 3676 | size_t saved_ordered_free_space_indices[MAX_NUM_BUCKETS]; |
| 3677 | |
| 3678 | PER_HEAP |
| 3679 | size_t ordered_plug_indices[MAX_NUM_BUCKETS]; |
| 3680 | |
| 3681 | PER_HEAP |
| 3682 | size_t saved_ordered_plug_indices[MAX_NUM_BUCKETS]; |
| 3683 | |
| 3684 | PER_HEAP |
| 3685 | BOOL ordered_plug_indices_init; |
| 3686 | |
| 3687 | PER_HEAP |
| 3688 | BOOL use_bestfit; |
| 3689 | |
| 3690 | PER_HEAP |
| 3691 | uint8_t* bestfit_first_pin; |
| 3692 | |
| 3693 | PER_HEAP |
| 3694 | BOOL commit_end_of_seg; |
| 3695 | |
| 3696 | PER_HEAP |
| 3697 | size_t max_free_space_items; // dynamically adjusted. |
| 3698 | |
| 3699 | PER_HEAP |
| 3700 | size_t free_space_buckets; |
| 3701 | |
| 3702 | PER_HEAP |
| 3703 | size_t free_space_items; |
| 3704 | |
| 3705 | // -1 means we are using all the free |
| 3706 | // spaces we have (not including |
| 3707 | // end of seg space). |
| 3708 | PER_HEAP |
| 3709 | int trimmed_free_space_index; |
| 3710 | |
| 3711 | PER_HEAP |
| 3712 | size_t total_ephemeral_plugs; |
| 3713 | |
| 3714 | PER_HEAP |
| 3715 | seg_free_spaces* bestfit_seg; |
| 3716 | |
| 3717 | // Note: we know this from the plan phase. |
| 3718 | // total_ephemeral_plugs actually has the same value |
| 3719 | // but while we are calculating its value we also store |
| 3720 | // info on how big the plugs are for best fit which we |
| 3721 | // don't do in plan phase. |
| 3722 | // TODO: get rid of total_ephemeral_plugs. |
| 3723 | PER_HEAP |
| 3724 | size_t total_ephemeral_size; |
| 3725 | |
| 3726 | public: |
| 3727 | |
| 3728 | #ifdef HEAP_ANALYZE |
| 3729 | |
| 3730 | PER_HEAP_ISOLATED |
| 3731 | BOOL heap_analyze_enabled; |
| 3732 | |
| 3733 | PER_HEAP |
| 3734 | size_t internal_root_array_length; |
| 3735 | |
| 3736 | // next two fields are used to optimize the search for the object |
| 3737 | // enclosing the current reference handled by ha_mark_object_simple. |
| 3738 | PER_HEAP |
| 3739 | uint8_t* current_obj; |
| 3740 | |
| 3741 | PER_HEAP |
| 3742 | size_t current_obj_size; |
| 3743 | |
| 3744 | #endif //HEAP_ANALYZE |
| 3745 | |
| 3746 | /* ----------------------- global members ----------------------- */ |
| 3747 | public: |
| 3748 | |
| 3749 | PER_HEAP |
| 3750 | int condemned_generation_num; |
| 3751 | |
| 3752 | PER_HEAP |
| 3753 | BOOL blocking_collection; |
| 3754 | |
| 3755 | #ifdef MULTIPLE_HEAPS |
| 3756 | static |
| 3757 | int n_heaps; |
| 3758 | |
| 3759 | static |
| 3760 | gc_heap** g_heaps; |
| 3761 | |
| 3762 | static |
| 3763 | size_t* g_promoted; |
| 3764 | #ifdef BACKGROUND_GC |
| 3765 | static |
| 3766 | size_t* g_bpromoted; |
| 3767 | #endif //BACKGROUND_GC |
| 3768 | #ifdef MH_SC_MARK |
| 3769 | PER_HEAP_ISOLATED |
| 3770 | int* g_mark_stack_busy; |
| 3771 | #endif //MH_SC_MARK |
| 3772 | #else |
| 3773 | static |
| 3774 | size_t g_promoted; |
| 3775 | #ifdef BACKGROUND_GC |
| 3776 | static |
| 3777 | size_t g_bpromoted; |
| 3778 | #endif //BACKGROUND_GC |
| 3779 | #endif //MULTIPLE_HEAPS |
| 3780 | |
| 3781 | static |
| 3782 | size_t reserved_memory; |
| 3783 | static |
| 3784 | size_t reserved_memory_limit; |
| 3785 | static |
| 3786 | BOOL g_low_memory_status; |
| 3787 | |
| 3788 | protected: |
| 3789 | PER_HEAP |
| 3790 | void update_collection_counts (); |
| 3791 | }; // class gc_heap |
| 3792 | |
| 3793 | #define ASSERT_OFFSETS_MATCH(field) \ |
| 3794 | static_assert(offsetof(dac_gc_heap, field) == offsetof(gc_heap, field), #field " offset mismatch") |
| 3795 | |
| 3796 | #ifdef MULTIPLE_HEAPS |
| 3797 | ASSERT_OFFSETS_MATCH(alloc_allocated); |
| 3798 | ASSERT_OFFSETS_MATCH(ephemeral_heap_segment); |
| 3799 | ASSERT_OFFSETS_MATCH(finalize_queue); |
| 3800 | ASSERT_OFFSETS_MATCH(oom_info); |
| 3801 | ASSERT_OFFSETS_MATCH(interesting_data_per_heap); |
| 3802 | ASSERT_OFFSETS_MATCH(compact_reasons_per_heap); |
| 3803 | ASSERT_OFFSETS_MATCH(expand_mechanisms_per_heap); |
| 3804 | ASSERT_OFFSETS_MATCH(interesting_mechanism_bits_per_heap); |
| 3805 | ASSERT_OFFSETS_MATCH(internal_root_array); |
| 3806 | ASSERT_OFFSETS_MATCH(internal_root_array_index); |
| 3807 | ASSERT_OFFSETS_MATCH(heap_analyze_success); |
| 3808 | ASSERT_OFFSETS_MATCH(generation_table); |
| 3809 | #endif // MULTIPLE_HEAPS |
| 3810 | |
| 3811 | #ifdef FEATURE_PREMORTEM_FINALIZATION |
| 3812 | class CFinalize |
| 3813 | { |
| 3814 | #ifdef DACCESS_COMPILE |
| 3815 | friend class ::ClrDataAccess; |
| 3816 | #endif // DACCESS_COMPILE |
| 3817 | |
| 3818 | friend class CFinalizeStaticAsserts; |
| 3819 | |
| 3820 | private: |
| 3821 | |
| 3822 | //adjust the count and add a constant to add a segment |
| 3823 | static const int = 2; |
| 3824 | static const int FinalizerListSeg = NUMBERGENERATIONS+1; |
| 3825 | static const int CriticalFinalizerListSeg = NUMBERGENERATIONS; |
| 3826 | //Does not correspond to a segment |
| 3827 | static const int FreeList = NUMBERGENERATIONS+ExtraSegCount; |
| 3828 | |
| 3829 | PTR_PTR_Object m_FillPointers[NUMBERGENERATIONS+ExtraSegCount]; |
| 3830 | PTR_PTR_Object m_Array; |
| 3831 | PTR_PTR_Object m_EndArray; |
| 3832 | size_t m_PromotedCount; |
| 3833 | |
| 3834 | VOLATILE(int32_t) lock; |
| 3835 | #ifdef _DEBUG |
| 3836 | EEThreadId lockowner_threadid; |
| 3837 | #endif // _DEBUG |
| 3838 | |
| 3839 | BOOL GrowArray(); |
| 3840 | void MoveItem (Object** fromIndex, |
| 3841 | unsigned int fromSeg, |
| 3842 | unsigned int toSeg); |
| 3843 | |
| 3844 | inline PTR_PTR_Object& SegQueue (unsigned int Seg) |
| 3845 | { |
| 3846 | return (Seg ? m_FillPointers [Seg-1] : m_Array); |
| 3847 | } |
| 3848 | inline PTR_PTR_Object& SegQueueLimit (unsigned int Seg) |
| 3849 | { |
| 3850 | return m_FillPointers [Seg]; |
| 3851 | } |
| 3852 | |
| 3853 | BOOL IsSegEmpty ( unsigned int i) |
| 3854 | { |
| 3855 | ASSERT ( (int)i < FreeList); |
| 3856 | return (SegQueueLimit(i) == SegQueue (i)); |
| 3857 | |
| 3858 | } |
| 3859 | |
| 3860 | BOOL FinalizeSegForAppDomain (void *pDomain, |
| 3861 | BOOL fRunFinalizers, |
| 3862 | unsigned int Seg); |
| 3863 | |
| 3864 | public: |
| 3865 | ~CFinalize(); |
| 3866 | bool Initialize(); |
| 3867 | void EnterFinalizeLock(); |
| 3868 | void LeaveFinalizeLock(); |
| 3869 | bool RegisterForFinalization (int gen, Object* obj, size_t size=0); |
| 3870 | Object* GetNextFinalizableObject (BOOL only_non_critical=FALSE); |
| 3871 | BOOL ScanForFinalization (promote_func* fn, int gen,BOOL mark_only_p, gc_heap* hp); |
| 3872 | void RelocateFinalizationData (int gen, gc_heap* hp); |
| 3873 | void WalkFReachableObjects (fq_walk_fn fn); |
| 3874 | void GcScanRoots (promote_func* fn, int hn, ScanContext *pSC); |
| 3875 | void UpdatePromotedGenerations (int gen, BOOL gen_0_empty_p); |
| 3876 | size_t GetPromotedCount(); |
| 3877 | |
| 3878 | //Methods used by the shutdown code to call every finalizer |
| 3879 | void SetSegForShutDown(BOOL fHasLock); |
| 3880 | size_t GetNumberFinalizableObjects(); |
| 3881 | void DiscardNonCriticalObjects(); |
| 3882 | |
| 3883 | //Methods used by the app domain unloading call to finalize objects in an app domain |
| 3884 | bool FinalizeAppDomain (void *pDomain, bool fRunFinalizers); |
| 3885 | |
| 3886 | void CheckFinalizerObjects(); |
| 3887 | }; |
| 3888 | |
| 3889 | class CFinalizeStaticAsserts { |
| 3890 | static_assert(dac_finalize_queue::ExtraSegCount == CFinalize::ExtraSegCount, "ExtraSegCount mismatch" ); |
| 3891 | static_assert(offsetof(dac_finalize_queue, m_FillPointers) == offsetof(CFinalize, m_FillPointers), "CFinalize layout mismatch" ); |
| 3892 | }; |
| 3893 | #endif // FEATURE_PREMORTEM_FINALIZATION |
| 3894 | |
| 3895 | inline |
| 3896 | size_t& dd_begin_data_size (dynamic_data* inst) |
| 3897 | { |
| 3898 | return inst->begin_data_size; |
| 3899 | } |
| 3900 | inline |
| 3901 | size_t& dd_survived_size (dynamic_data* inst) |
| 3902 | { |
| 3903 | return inst->survived_size; |
| 3904 | } |
| 3905 | #if defined (RESPECT_LARGE_ALIGNMENT) || defined (FEATURE_STRUCTALIGN) |
| 3906 | inline |
| 3907 | size_t& dd_num_npinned_plugs(dynamic_data* inst) |
| 3908 | { |
| 3909 | return inst->num_npinned_plugs; |
| 3910 | } |
| 3911 | #endif //RESPECT_LARGE_ALIGNMENT || FEATURE_STRUCTALIGN |
| 3912 | inline |
| 3913 | size_t& dd_pinned_survived_size (dynamic_data* inst) |
| 3914 | { |
| 3915 | return inst->pinned_survived_size; |
| 3916 | } |
| 3917 | inline |
| 3918 | size_t& dd_added_pinned_size (dynamic_data* inst) |
| 3919 | { |
| 3920 | return inst->added_pinned_size; |
| 3921 | } |
| 3922 | inline |
| 3923 | size_t& dd_artificial_pinned_survived_size (dynamic_data* inst) |
| 3924 | { |
| 3925 | return inst->artificial_pinned_survived_size; |
| 3926 | } |
| 3927 | #ifdef SHORT_PLUGS |
| 3928 | inline |
| 3929 | size_t& dd_padding_size (dynamic_data* inst) |
| 3930 | { |
| 3931 | return inst->padding_size; |
| 3932 | } |
| 3933 | #endif //SHORT_PLUGS |
| 3934 | inline |
| 3935 | size_t& dd_current_size (dynamic_data* inst) |
| 3936 | { |
| 3937 | return inst->current_size; |
| 3938 | } |
| 3939 | inline |
| 3940 | float& dd_surv (dynamic_data* inst) |
| 3941 | { |
| 3942 | return inst->surv; |
| 3943 | } |
| 3944 | inline |
| 3945 | size_t& dd_freach_previous_promotion (dynamic_data* inst) |
| 3946 | { |
| 3947 | return inst->freach_previous_promotion; |
| 3948 | } |
| 3949 | inline |
| 3950 | size_t& dd_desired_allocation (dynamic_data* inst) |
| 3951 | { |
| 3952 | return inst->desired_allocation; |
| 3953 | } |
| 3954 | inline |
| 3955 | size_t& dd_collection_count (dynamic_data* inst) |
| 3956 | { |
| 3957 | return inst->collection_count; |
| 3958 | } |
| 3959 | inline |
| 3960 | size_t& dd_promoted_size (dynamic_data* inst) |
| 3961 | { |
| 3962 | return inst->promoted_size; |
| 3963 | } |
| 3964 | inline |
| 3965 | float& dd_limit (dynamic_data* inst) |
| 3966 | { |
| 3967 | return inst->sdata->limit; |
| 3968 | } |
| 3969 | inline |
| 3970 | float& dd_max_limit (dynamic_data* inst) |
| 3971 | { |
| 3972 | return inst->sdata->max_limit; |
| 3973 | } |
| 3974 | inline |
| 3975 | size_t& dd_max_size (dynamic_data* inst) |
| 3976 | { |
| 3977 | return inst->sdata->max_size; |
| 3978 | } |
| 3979 | inline |
| 3980 | size_t& dd_min_size (dynamic_data* inst) |
| 3981 | { |
| 3982 | return inst->min_size; |
| 3983 | } |
| 3984 | inline |
| 3985 | ptrdiff_t& dd_new_allocation (dynamic_data* inst) |
| 3986 | { |
| 3987 | return inst->new_allocation; |
| 3988 | } |
| 3989 | inline |
| 3990 | ptrdiff_t& dd_gc_new_allocation (dynamic_data* inst) |
| 3991 | { |
| 3992 | return inst->gc_new_allocation; |
| 3993 | } |
| 3994 | inline |
| 3995 | size_t& dd_fragmentation_limit (dynamic_data* inst) |
| 3996 | { |
| 3997 | return inst->sdata->fragmentation_limit; |
| 3998 | } |
| 3999 | inline |
| 4000 | float& dd_fragmentation_burden_limit (dynamic_data* inst) |
| 4001 | { |
| 4002 | return inst->sdata->fragmentation_burden_limit; |
| 4003 | } |
| 4004 | inline |
| 4005 | float dd_v_fragmentation_burden_limit (dynamic_data* inst) |
| 4006 | { |
| 4007 | return (min (2*dd_fragmentation_burden_limit (inst), 0.75f)); |
| 4008 | } |
| 4009 | inline |
| 4010 | size_t& dd_fragmentation (dynamic_data* inst) |
| 4011 | { |
| 4012 | return inst->fragmentation; |
| 4013 | } |
| 4014 | inline |
| 4015 | size_t& dd_gc_clock (dynamic_data* inst) |
| 4016 | { |
| 4017 | return inst->gc_clock; |
| 4018 | } |
| 4019 | inline |
| 4020 | size_t& dd_time_clock (dynamic_data* inst) |
| 4021 | { |
| 4022 | return inst->time_clock; |
| 4023 | } |
| 4024 | |
| 4025 | inline |
| 4026 | size_t& dd_gc_clock_interval (dynamic_data* inst) |
| 4027 | { |
| 4028 | return inst->sdata->gc_clock; |
| 4029 | } |
| 4030 | inline |
| 4031 | size_t& dd_time_clock_interval (dynamic_data* inst) |
| 4032 | { |
| 4033 | return inst->sdata->time_clock; |
| 4034 | } |
| 4035 | |
| 4036 | inline |
| 4037 | size_t& dd_gc_elapsed_time (dynamic_data* inst) |
| 4038 | { |
| 4039 | return inst->gc_elapsed_time; |
| 4040 | } |
| 4041 | |
| 4042 | inline |
| 4043 | float& dd_gc_speed (dynamic_data* inst) |
| 4044 | { |
| 4045 | return inst->gc_speed; |
| 4046 | } |
| 4047 | |
| 4048 | inline |
| 4049 | alloc_context* generation_alloc_context (generation* inst) |
| 4050 | { |
| 4051 | return &(inst->allocation_context); |
| 4052 | } |
| 4053 | |
| 4054 | inline |
| 4055 | uint8_t*& generation_allocation_start (generation* inst) |
| 4056 | { |
| 4057 | return inst->allocation_start; |
| 4058 | } |
| 4059 | inline |
| 4060 | uint8_t*& generation_allocation_pointer (generation* inst) |
| 4061 | { |
| 4062 | return inst->allocation_context.alloc_ptr; |
| 4063 | } |
| 4064 | inline |
| 4065 | uint8_t*& generation_allocation_limit (generation* inst) |
| 4066 | { |
| 4067 | return inst->allocation_context.alloc_limit; |
| 4068 | } |
| 4069 | inline |
| 4070 | allocator* generation_allocator (generation* inst) |
| 4071 | { |
| 4072 | return &inst->free_list_allocator; |
| 4073 | } |
| 4074 | |
| 4075 | inline |
| 4076 | PTR_heap_segment& generation_start_segment (generation* inst) |
| 4077 | { |
| 4078 | return inst->start_segment; |
| 4079 | } |
| 4080 | inline |
| 4081 | heap_segment*& generation_allocation_segment (generation* inst) |
| 4082 | { |
| 4083 | return inst->allocation_segment; |
| 4084 | } |
| 4085 | inline |
| 4086 | uint8_t*& generation_plan_allocation_start (generation* inst) |
| 4087 | { |
| 4088 | return inst->plan_allocation_start; |
| 4089 | } |
| 4090 | inline |
| 4091 | size_t& generation_plan_allocation_start_size (generation* inst) |
| 4092 | { |
| 4093 | return inst->plan_allocation_start_size; |
| 4094 | } |
| 4095 | inline |
| 4096 | uint8_t*& generation_allocation_context_start_region (generation* inst) |
| 4097 | { |
| 4098 | return inst->allocation_context_start_region; |
| 4099 | } |
| 4100 | inline |
| 4101 | size_t& generation_free_list_space (generation* inst) |
| 4102 | { |
| 4103 | return inst->free_list_space; |
| 4104 | } |
| 4105 | inline |
| 4106 | size_t& generation_free_obj_space (generation* inst) |
| 4107 | { |
| 4108 | return inst->free_obj_space; |
| 4109 | } |
| 4110 | inline |
| 4111 | size_t& generation_allocation_size (generation* inst) |
| 4112 | { |
| 4113 | return inst->allocation_size; |
| 4114 | } |
| 4115 | |
| 4116 | inline |
| 4117 | size_t& generation_pinned_allocated (generation* inst) |
| 4118 | { |
| 4119 | return inst->pinned_allocated; |
| 4120 | } |
| 4121 | inline |
| 4122 | size_t& generation_pinned_allocation_sweep_size (generation* inst) |
| 4123 | { |
| 4124 | return inst->pinned_allocation_sweep_size; |
| 4125 | } |
| 4126 | inline |
| 4127 | size_t& generation_pinned_allocation_compact_size (generation* inst) |
| 4128 | { |
| 4129 | return inst->pinned_allocation_compact_size; |
| 4130 | } |
| 4131 | inline |
| 4132 | size_t& generation_free_list_allocated (generation* inst) |
| 4133 | { |
| 4134 | return inst->free_list_allocated; |
| 4135 | } |
| 4136 | inline |
| 4137 | size_t& generation_end_seg_allocated (generation* inst) |
| 4138 | { |
| 4139 | return inst->end_seg_allocated; |
| 4140 | } |
| 4141 | inline |
| 4142 | BOOL& generation_allocate_end_seg_p (generation* inst) |
| 4143 | { |
| 4144 | return inst->allocate_end_seg_p; |
| 4145 | } |
| 4146 | inline |
| 4147 | size_t& generation_condemned_allocated (generation* inst) |
| 4148 | { |
| 4149 | return inst->condemned_allocated; |
| 4150 | } |
| 4151 | #ifdef FREE_USAGE_STATS |
| 4152 | inline |
| 4153 | size_t& generation_pinned_free_obj_space (generation* inst) |
| 4154 | { |
| 4155 | return inst->pinned_free_obj_space; |
| 4156 | } |
| 4157 | inline |
| 4158 | size_t& generation_allocated_in_pinned_free (generation* inst) |
| 4159 | { |
| 4160 | return inst->allocated_in_pinned_free; |
| 4161 | } |
| 4162 | inline |
| 4163 | size_t& generation_allocated_since_last_pin (generation* inst) |
| 4164 | { |
| 4165 | return inst->allocated_since_last_pin; |
| 4166 | } |
| 4167 | #endif //FREE_USAGE_STATS |
| 4168 | inline |
| 4169 | float generation_allocator_efficiency (generation* inst) |
| 4170 | { |
| 4171 | if ((generation_free_list_allocated (inst) + generation_free_obj_space (inst)) != 0) |
| 4172 | { |
| 4173 | return ((float) (generation_free_list_allocated (inst)) / (float)(generation_free_list_allocated (inst) + generation_free_obj_space (inst))); |
| 4174 | } |
| 4175 | else |
| 4176 | return 0; |
| 4177 | } |
| 4178 | inline |
| 4179 | size_t generation_unusable_fragmentation (generation* inst) |
| 4180 | { |
| 4181 | return (size_t)(generation_free_obj_space (inst) + |
| 4182 | (1.0f-generation_allocator_efficiency(inst))*generation_free_list_space (inst)); |
| 4183 | } |
| 4184 | |
| 4185 | #define plug_skew sizeof(ObjHeader) |
| 4186 | // We always use USE_PADDING_TAIL when fitting so items on the free list should be |
| 4187 | // twice the min_obj_size. |
| 4188 | #define min_free_list (2*min_obj_size) |
| 4189 | struct plug |
| 4190 | { |
| 4191 | uint8_t * skew[plug_skew / sizeof(uint8_t *)]; |
| 4192 | }; |
| 4193 | |
| 4194 | class pair |
| 4195 | { |
| 4196 | public: |
| 4197 | short left; |
| 4198 | short right; |
| 4199 | }; |
| 4200 | |
| 4201 | //Note that these encode the fact that plug_skew is a multiple of uint8_t*. |
| 4202 | // Each of new field is prepended to the prior struct. |
| 4203 | |
| 4204 | struct plug_and_pair |
| 4205 | { |
| 4206 | pair m_pair; |
| 4207 | plug m_plug; |
| 4208 | }; |
| 4209 | |
| 4210 | struct plug_and_reloc |
| 4211 | { |
| 4212 | ptrdiff_t reloc; |
| 4213 | pair m_pair; |
| 4214 | plug m_plug; |
| 4215 | }; |
| 4216 | |
| 4217 | struct plug_and_gap |
| 4218 | { |
| 4219 | ptrdiff_t gap; |
| 4220 | ptrdiff_t reloc; |
| 4221 | union |
| 4222 | { |
| 4223 | pair m_pair; |
| 4224 | int lr; //for clearing the entire pair in one instruction |
| 4225 | }; |
| 4226 | plug m_plug; |
| 4227 | }; |
| 4228 | |
| 4229 | struct gap_reloc_pair |
| 4230 | { |
| 4231 | size_t gap; |
| 4232 | size_t reloc; |
| 4233 | pair m_pair; |
| 4234 | }; |
| 4235 | |
| 4236 | #define min_pre_pin_obj_size (sizeof (gap_reloc_pair) + min_obj_size) |
| 4237 | |
| 4238 | struct DECLSPEC_ALIGN(8) aligned_plug_and_gap |
| 4239 | { |
| 4240 | plug_and_gap plugandgap; |
| 4241 | }; |
| 4242 | |
| 4243 | struct loh_obj_and_pad |
| 4244 | { |
| 4245 | ptrdiff_t reloc; |
| 4246 | plug m_plug; |
| 4247 | }; |
| 4248 | |
| 4249 | struct loh_padding_obj |
| 4250 | { |
| 4251 | uint8_t* mt; |
| 4252 | size_t len; |
| 4253 | ptrdiff_t reloc; |
| 4254 | plug m_plug; |
| 4255 | }; |
| 4256 | #define loh_padding_obj_size (sizeof(loh_padding_obj)) |
| 4257 | |
| 4258 | //flags description |
| 4259 | #define heap_segment_flags_readonly 1 |
| 4260 | #define heap_segment_flags_inrange 2 |
| 4261 | #define heap_segment_flags_unmappable 4 |
| 4262 | #define heap_segment_flags_loh 8 |
| 4263 | #ifdef BACKGROUND_GC |
| 4264 | #define heap_segment_flags_swept 16 |
| 4265 | #define heap_segment_flags_decommitted 32 |
| 4266 | #define heap_segment_flags_ma_committed 64 |
| 4267 | // for segments whose mark array is only partially committed. |
| 4268 | #define heap_segment_flags_ma_pcommitted 128 |
| 4269 | #define heap_segment_flags_loh_delete 256 |
| 4270 | #endif //BACKGROUND_GC |
| 4271 | |
| 4272 | //need to be careful to keep enough pad items to fit a relocation node |
| 4273 | //padded to QuadWord before the plug_skew |
| 4274 | |
| 4275 | class heap_segment |
| 4276 | { |
| 4277 | public: |
| 4278 | uint8_t* allocated; |
| 4279 | uint8_t* committed; |
| 4280 | uint8_t* reserved; |
| 4281 | uint8_t* used; |
| 4282 | uint8_t* mem; |
| 4283 | size_t flags; |
| 4284 | PTR_heap_segment next; |
| 4285 | uint8_t* background_allocated; |
| 4286 | #ifdef MULTIPLE_HEAPS |
| 4287 | gc_heap* heap; |
| 4288 | #endif //MULTIPLE_HEAPS |
| 4289 | uint8_t* plan_allocated; |
| 4290 | uint8_t* saved_bg_allocated; |
| 4291 | |
| 4292 | #ifdef _MSC_VER |
| 4293 | // Disable this warning - we intentionally want __declspec(align()) to insert padding for us |
| 4294 | #pragma warning(disable:4324) // structure was padded due to __declspec(align()) |
| 4295 | #endif |
| 4296 | aligned_plug_and_gap padandplug; |
| 4297 | #ifdef _MSC_VER |
| 4298 | #pragma warning(default:4324) // structure was padded due to __declspec(align()) |
| 4299 | #endif |
| 4300 | }; |
| 4301 | |
| 4302 | static_assert(offsetof(dac_heap_segment, allocated) == offsetof(heap_segment, allocated), "DAC heap segment layout mismatch" ); |
| 4303 | static_assert(offsetof(dac_heap_segment, committed) == offsetof(heap_segment, committed), "DAC heap segment layout mismatch" ); |
| 4304 | static_assert(offsetof(dac_heap_segment, reserved) == offsetof(heap_segment, reserved), "DAC heap segment layout mismatch" ); |
| 4305 | static_assert(offsetof(dac_heap_segment, used) == offsetof(heap_segment, used), "DAC heap segment layout mismatch" ); |
| 4306 | static_assert(offsetof(dac_heap_segment, mem) == offsetof(heap_segment, mem), "DAC heap segment layout mismatch" ); |
| 4307 | static_assert(offsetof(dac_heap_segment, flags) == offsetof(heap_segment, flags), "DAC heap segment layout mismatch" ); |
| 4308 | static_assert(offsetof(dac_heap_segment, next) == offsetof(heap_segment, next), "DAC heap segment layout mismatch" ); |
| 4309 | static_assert(offsetof(dac_heap_segment, background_allocated) == offsetof(heap_segment, background_allocated), "DAC heap segment layout mismatch" ); |
| 4310 | #ifdef MULTIPLE_HEAPS |
| 4311 | static_assert(offsetof(dac_heap_segment, heap) == offsetof(heap_segment, heap), "DAC heap segment layout mismatch" ); |
| 4312 | #endif // MULTIPLE_HEAPS |
| 4313 | |
| 4314 | inline |
| 4315 | uint8_t*& heap_segment_reserved (heap_segment* inst) |
| 4316 | { |
| 4317 | return inst->reserved; |
| 4318 | } |
| 4319 | inline |
| 4320 | uint8_t*& heap_segment_committed (heap_segment* inst) |
| 4321 | { |
| 4322 | return inst->committed; |
| 4323 | } |
| 4324 | inline |
| 4325 | uint8_t*& heap_segment_used (heap_segment* inst) |
| 4326 | { |
| 4327 | return inst->used; |
| 4328 | } |
| 4329 | inline |
| 4330 | uint8_t*& heap_segment_allocated (heap_segment* inst) |
| 4331 | { |
| 4332 | return inst->allocated; |
| 4333 | } |
| 4334 | |
| 4335 | inline |
| 4336 | BOOL heap_segment_read_only_p (heap_segment* inst) |
| 4337 | { |
| 4338 | return ((inst->flags & heap_segment_flags_readonly) != 0); |
| 4339 | } |
| 4340 | |
| 4341 | inline |
| 4342 | BOOL heap_segment_in_range_p (heap_segment* inst) |
| 4343 | { |
| 4344 | return (!(inst->flags & heap_segment_flags_readonly) || |
| 4345 | ((inst->flags & heap_segment_flags_inrange) != 0)); |
| 4346 | } |
| 4347 | |
| 4348 | inline |
| 4349 | BOOL heap_segment_unmappable_p (heap_segment* inst) |
| 4350 | { |
| 4351 | return (!(inst->flags & heap_segment_flags_readonly) || |
| 4352 | ((inst->flags & heap_segment_flags_unmappable) != 0)); |
| 4353 | } |
| 4354 | |
| 4355 | inline |
| 4356 | BOOL heap_segment_loh_p (heap_segment * inst) |
| 4357 | { |
| 4358 | return !!(inst->flags & heap_segment_flags_loh); |
| 4359 | } |
| 4360 | |
| 4361 | #ifdef BACKGROUND_GC |
| 4362 | inline |
| 4363 | BOOL heap_segment_decommitted_p (heap_segment * inst) |
| 4364 | { |
| 4365 | return !!(inst->flags & heap_segment_flags_decommitted); |
| 4366 | } |
| 4367 | #endif //BACKGROUND_GC |
| 4368 | |
| 4369 | inline |
| 4370 | PTR_heap_segment & heap_segment_next (heap_segment* inst) |
| 4371 | { |
| 4372 | return inst->next; |
| 4373 | } |
| 4374 | inline |
| 4375 | uint8_t*& heap_segment_mem (heap_segment* inst) |
| 4376 | { |
| 4377 | return inst->mem; |
| 4378 | } |
| 4379 | inline |
| 4380 | uint8_t*& heap_segment_plan_allocated (heap_segment* inst) |
| 4381 | { |
| 4382 | return inst->plan_allocated; |
| 4383 | } |
| 4384 | |
| 4385 | #ifdef BACKGROUND_GC |
| 4386 | inline |
| 4387 | uint8_t*& heap_segment_background_allocated (heap_segment* inst) |
| 4388 | { |
| 4389 | return inst->background_allocated; |
| 4390 | } |
| 4391 | inline |
| 4392 | uint8_t*& heap_segment_saved_bg_allocated (heap_segment* inst) |
| 4393 | { |
| 4394 | return inst->saved_bg_allocated; |
| 4395 | } |
| 4396 | #endif //BACKGROUND_GC |
| 4397 | |
| 4398 | #ifdef MULTIPLE_HEAPS |
| 4399 | inline |
| 4400 | gc_heap*& heap_segment_heap (heap_segment* inst) |
| 4401 | { |
| 4402 | return inst->heap; |
| 4403 | } |
| 4404 | #endif //MULTIPLE_HEAPS |
| 4405 | |
| 4406 | inline |
| 4407 | generation* gc_heap::generation_of (int n) |
| 4408 | { |
| 4409 | assert (((n <= max_generation+1) && (n >= 0))); |
| 4410 | return &generation_table [ n ]; |
| 4411 | } |
| 4412 | |
| 4413 | inline |
| 4414 | dynamic_data* gc_heap::dynamic_data_of (int gen_number) |
| 4415 | { |
| 4416 | return &dynamic_data_table [ gen_number ]; |
| 4417 | } |
| 4418 | |
| 4419 | #define GC_PAGE_SIZE 0x1000 |
| 4420 | |
| 4421 | #define card_word_width ((size_t)32) |
| 4422 | |
| 4423 | // |
| 4424 | // The value of card_size is determined empirically according to the average size of an object |
| 4425 | // In the code we also rely on the assumption that one card_table entry (uint32_t) covers an entire os page |
| 4426 | // |
| 4427 | #if defined (BIT64) |
| 4428 | #define card_size ((size_t)(2*GC_PAGE_SIZE/card_word_width)) |
| 4429 | #else |
| 4430 | #define card_size ((size_t)(GC_PAGE_SIZE/card_word_width)) |
| 4431 | #endif // BIT64 |
| 4432 | |
| 4433 | inline |
| 4434 | size_t card_word (size_t card) |
| 4435 | { |
| 4436 | return card / card_word_width; |
| 4437 | } |
| 4438 | |
| 4439 | inline |
| 4440 | unsigned card_bit (size_t card) |
| 4441 | { |
| 4442 | return (unsigned)(card % card_word_width); |
| 4443 | } |
| 4444 | |
| 4445 | inline |
| 4446 | size_t gcard_of (uint8_t* object) |
| 4447 | { |
| 4448 | return (size_t)(object) / card_size; |
| 4449 | } |
| 4450 | |
| 4451 | inline |
| 4452 | void YieldProcessorScalingFactor() |
| 4453 | { |
| 4454 | unsigned int n = g_yieldProcessorScalingFactor; |
| 4455 | _ASSERTE(n != 0); |
| 4456 | do |
| 4457 | { |
| 4458 | YieldProcessor(); |
| 4459 | } while (--n != 0); |
| 4460 | } |
| 4461 | |