1 | /* |
2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "gc/g1/g1Arguments.hpp" |
27 | #include "gc/g1/g1CollectedHeap.inline.hpp" |
28 | #include "gc/g1/g1ConcurrentRefine.hpp" |
29 | #include "gc/g1/heapRegion.hpp" |
30 | #include "gc/g1/heapRegionManager.inline.hpp" |
31 | #include "gc/g1/heapRegionSet.inline.hpp" |
32 | #include "gc/g1/heterogeneousHeapRegionManager.hpp" |
33 | #include "memory/allocation.hpp" |
34 | #include "utilities/bitMap.inline.hpp" |
35 | |
36 | class MasterFreeRegionListChecker : public HeapRegionSetChecker { |
37 | public: |
38 | void check_mt_safety() { |
39 | // Master Free List MT safety protocol: |
40 | // (a) If we're at a safepoint, operations on the master free list |
41 | // should be invoked by either the VM thread (which will serialize |
42 | // them) or by the GC workers while holding the |
43 | // FreeList_lock. |
44 | // (b) If we're not at a safepoint, operations on the master free |
45 | // list should be invoked while holding the Heap_lock. |
46 | |
47 | if (SafepointSynchronize::is_at_safepoint()) { |
48 | guarantee(Thread::current()->is_VM_thread() || |
49 | FreeList_lock->owned_by_self(), "master free list MT safety protocol at a safepoint" ); |
50 | } else { |
51 | guarantee(Heap_lock->owned_by_self(), "master free list MT safety protocol outside a safepoint" ); |
52 | } |
53 | } |
54 | bool is_correct_type(HeapRegion* hr) { return hr->is_free(); } |
55 | const char* get_description() { return "Free Regions" ; } |
56 | }; |
57 | |
58 | HeapRegionManager::HeapRegionManager() : |
59 | _bot_mapper(NULL), |
60 | _cardtable_mapper(NULL), |
61 | _card_counts_mapper(NULL), |
62 | _available_map(mtGC), |
63 | _num_committed(0), |
64 | _allocated_heapregions_length(0), |
65 | _regions(), _heap_mapper(NULL), |
66 | _prev_bitmap_mapper(NULL), |
67 | _next_bitmap_mapper(NULL), |
68 | _free_list("Free list" , new MasterFreeRegionListChecker()) |
69 | { } |
70 | |
71 | HeapRegionManager* HeapRegionManager::create_manager(G1CollectedHeap* heap) { |
72 | if (G1Arguments::is_heterogeneous_heap()) { |
73 | return new HeterogeneousHeapRegionManager((uint)(G1Arguments::heap_max_size_bytes() / HeapRegion::GrainBytes) /*heap size as num of regions*/); |
74 | } |
75 | return new HeapRegionManager(); |
76 | } |
77 | |
78 | void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage, |
79 | G1RegionToSpaceMapper* prev_bitmap, |
80 | G1RegionToSpaceMapper* next_bitmap, |
81 | G1RegionToSpaceMapper* bot, |
82 | G1RegionToSpaceMapper* cardtable, |
83 | G1RegionToSpaceMapper* card_counts) { |
84 | _allocated_heapregions_length = 0; |
85 | |
86 | _heap_mapper = heap_storage; |
87 | |
88 | _prev_bitmap_mapper = prev_bitmap; |
89 | _next_bitmap_mapper = next_bitmap; |
90 | |
91 | _bot_mapper = bot; |
92 | _cardtable_mapper = cardtable; |
93 | |
94 | _card_counts_mapper = card_counts; |
95 | |
96 | MemRegion reserved = heap_storage->reserved(); |
97 | _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes); |
98 | |
99 | _available_map.initialize(_regions.length()); |
100 | } |
101 | |
102 | bool HeapRegionManager::is_available(uint region) const { |
103 | return _available_map.at(region); |
104 | } |
105 | |
106 | #ifdef ASSERT |
107 | bool HeapRegionManager::is_free(HeapRegion* hr) const { |
108 | return _free_list.contains(hr); |
109 | } |
110 | #endif |
111 | |
112 | HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) { |
113 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
114 | HeapWord* bottom = g1h->bottom_addr_for_region(hrm_index); |
115 | MemRegion mr(bottom, bottom + HeapRegion::GrainWords); |
116 | assert(reserved().contains(mr), "invariant" ); |
117 | return g1h->new_heap_region(hrm_index, mr); |
118 | } |
119 | |
120 | void HeapRegionManager::commit_regions(uint index, size_t num_regions, WorkGang* pretouch_gang) { |
121 | guarantee(num_regions > 0, "Must commit more than zero regions" ); |
122 | guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions" ); |
123 | |
124 | _num_committed += (uint)num_regions; |
125 | |
126 | _heap_mapper->commit_regions(index, num_regions, pretouch_gang); |
127 | |
128 | // Also commit auxiliary data |
129 | _prev_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang); |
130 | _next_bitmap_mapper->commit_regions(index, num_regions, pretouch_gang); |
131 | |
132 | _bot_mapper->commit_regions(index, num_regions, pretouch_gang); |
133 | _cardtable_mapper->commit_regions(index, num_regions, pretouch_gang); |
134 | |
135 | _card_counts_mapper->commit_regions(index, num_regions, pretouch_gang); |
136 | } |
137 | |
138 | void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) { |
139 | guarantee(num_regions >= 1, "Need to specify at least one region to uncommit, tried to uncommit zero regions at %u" , start); |
140 | guarantee(_num_committed >= num_regions, "pre-condition" ); |
141 | |
142 | // Print before uncommitting. |
143 | if (G1CollectedHeap::heap()->hr_printer()->is_active()) { |
144 | for (uint i = start; i < start + num_regions; i++) { |
145 | HeapRegion* hr = at(i); |
146 | G1CollectedHeap::heap()->hr_printer()->uncommit(hr); |
147 | } |
148 | } |
149 | |
150 | _num_committed -= (uint)num_regions; |
151 | |
152 | _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range); |
153 | _heap_mapper->uncommit_regions(start, num_regions); |
154 | |
155 | // Also uncommit auxiliary data |
156 | _prev_bitmap_mapper->uncommit_regions(start, num_regions); |
157 | _next_bitmap_mapper->uncommit_regions(start, num_regions); |
158 | |
159 | _bot_mapper->uncommit_regions(start, num_regions); |
160 | _cardtable_mapper->uncommit_regions(start, num_regions); |
161 | |
162 | _card_counts_mapper->uncommit_regions(start, num_regions); |
163 | } |
164 | |
165 | void HeapRegionManager::make_regions_available(uint start, uint num_regions, WorkGang* pretouch_gang) { |
166 | guarantee(num_regions > 0, "No point in calling this for zero regions" ); |
167 | commit_regions(start, num_regions, pretouch_gang); |
168 | for (uint i = start; i < start + num_regions; i++) { |
169 | if (_regions.get_by_index(i) == NULL) { |
170 | HeapRegion* new_hr = new_heap_region(i); |
171 | OrderAccess::storestore(); |
172 | _regions.set_by_index(i, new_hr); |
173 | _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1); |
174 | } |
175 | } |
176 | |
177 | _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range); |
178 | |
179 | for (uint i = start; i < start + num_regions; i++) { |
180 | assert(is_available(i), "Just made region %u available but is apparently not." , i); |
181 | HeapRegion* hr = at(i); |
182 | if (G1CollectedHeap::heap()->hr_printer()->is_active()) { |
183 | G1CollectedHeap::heap()->hr_printer()->commit(hr); |
184 | } |
185 | HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i); |
186 | MemRegion mr(bottom, bottom + HeapRegion::GrainWords); |
187 | |
188 | hr->initialize(mr); |
189 | insert_into_free_list(at(i)); |
190 | } |
191 | } |
192 | |
193 | MemoryUsage HeapRegionManager::get_auxiliary_data_memory_usage() const { |
194 | size_t used_sz = |
195 | _prev_bitmap_mapper->committed_size() + |
196 | _next_bitmap_mapper->committed_size() + |
197 | _bot_mapper->committed_size() + |
198 | _cardtable_mapper->committed_size() + |
199 | _card_counts_mapper->committed_size(); |
200 | |
201 | size_t committed_sz = |
202 | _prev_bitmap_mapper->reserved_size() + |
203 | _next_bitmap_mapper->reserved_size() + |
204 | _bot_mapper->reserved_size() + |
205 | _cardtable_mapper->reserved_size() + |
206 | _card_counts_mapper->reserved_size(); |
207 | |
208 | return MemoryUsage(0, used_sz, committed_sz, committed_sz); |
209 | } |
210 | |
211 | uint HeapRegionManager::expand_by(uint num_regions, WorkGang* pretouch_workers) { |
212 | return expand_at(0, num_regions, pretouch_workers); |
213 | } |
214 | |
215 | uint HeapRegionManager::expand_at(uint start, uint num_regions, WorkGang* pretouch_workers) { |
216 | if (num_regions == 0) { |
217 | return 0; |
218 | } |
219 | |
220 | uint cur = start; |
221 | uint idx_last_found = 0; |
222 | uint num_last_found = 0; |
223 | |
224 | uint expanded = 0; |
225 | |
226 | while (expanded < num_regions && |
227 | (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) { |
228 | uint to_expand = MIN2(num_regions - expanded, num_last_found); |
229 | make_regions_available(idx_last_found, to_expand, pretouch_workers); |
230 | expanded += to_expand; |
231 | cur = idx_last_found + num_last_found + 1; |
232 | } |
233 | |
234 | verify_optional(); |
235 | return expanded; |
236 | } |
237 | |
238 | uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) { |
239 | uint found = 0; |
240 | size_t length_found = 0; |
241 | uint cur = 0; |
242 | |
243 | while (length_found < num && cur < max_length()) { |
244 | HeapRegion* hr = _regions.get_by_index(cur); |
245 | if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) { |
246 | // This region is a potential candidate for allocation into. |
247 | length_found++; |
248 | } else { |
249 | // This region is not a candidate. The next region is the next possible one. |
250 | found = cur + 1; |
251 | length_found = 0; |
252 | } |
253 | cur++; |
254 | } |
255 | |
256 | if (length_found == num) { |
257 | for (uint i = found; i < (found + num); i++) { |
258 | HeapRegion* hr = _regions.get_by_index(i); |
259 | // sanity check |
260 | guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()), |
261 | "Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT |
262 | " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)); |
263 | } |
264 | return found; |
265 | } else { |
266 | return G1_NO_HRM_INDEX; |
267 | } |
268 | } |
269 | |
270 | HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const { |
271 | guarantee(r != NULL, "Start region must be a valid region" ); |
272 | guarantee(is_available(r->hrm_index()), "Trying to iterate starting from region %u which is not in the heap" , r->hrm_index()); |
273 | for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) { |
274 | HeapRegion* hr = _regions.get_by_index(i); |
275 | if (is_available(i)) { |
276 | return hr; |
277 | } |
278 | } |
279 | return NULL; |
280 | } |
281 | |
282 | void HeapRegionManager::iterate(HeapRegionClosure* blk) const { |
283 | uint len = max_length(); |
284 | |
285 | for (uint i = 0; i < len; i++) { |
286 | if (!is_available(i)) { |
287 | continue; |
288 | } |
289 | guarantee(at(i) != NULL, "Tried to access region %u that has a NULL HeapRegion*" , i); |
290 | bool res = blk->do_heap_region(at(i)); |
291 | if (res) { |
292 | blk->set_incomplete(); |
293 | return; |
294 | } |
295 | } |
296 | } |
297 | |
298 | uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) const { |
299 | guarantee(res_idx != NULL, "checking" ); |
300 | guarantee(start_idx <= (max_length() + 1), "checking" ); |
301 | |
302 | uint num_regions = 0; |
303 | |
304 | uint cur = start_idx; |
305 | while (cur < max_length() && is_available(cur)) { |
306 | cur++; |
307 | } |
308 | if (cur == max_length()) { |
309 | return num_regions; |
310 | } |
311 | *res_idx = cur; |
312 | while (cur < max_length() && !is_available(cur)) { |
313 | cur++; |
314 | } |
315 | num_regions = cur - *res_idx; |
316 | #ifdef ASSERT |
317 | for (uint i = *res_idx; i < (*res_idx + num_regions); i++) { |
318 | assert(!is_available(i), "just checking" ); |
319 | } |
320 | assert(cur == max_length() || num_regions == 0 || is_available(cur), |
321 | "The region at the current position %u must be available or at the end of the heap." , cur); |
322 | #endif |
323 | return num_regions; |
324 | } |
325 | |
326 | uint HeapRegionManager::find_highest_free(bool* expanded) { |
327 | // Loop downwards from the highest region index, looking for an |
328 | // entry which is either free or not yet committed. If not yet |
329 | // committed, expand_at that index. |
330 | uint curr = max_length() - 1; |
331 | while (true) { |
332 | HeapRegion *hr = _regions.get_by_index(curr); |
333 | if (hr == NULL || !is_available(curr)) { |
334 | uint res = expand_at(curr, 1, NULL); |
335 | if (res == 1) { |
336 | *expanded = true; |
337 | return curr; |
338 | } |
339 | } else { |
340 | if (hr->is_free()) { |
341 | *expanded = false; |
342 | return curr; |
343 | } |
344 | } |
345 | if (curr == 0) { |
346 | return G1_NO_HRM_INDEX; |
347 | } |
348 | curr--; |
349 | } |
350 | } |
351 | |
352 | bool HeapRegionManager::allocate_containing_regions(MemRegion range, size_t* commit_count, WorkGang* pretouch_workers) { |
353 | size_t commits = 0; |
354 | uint start_index = (uint)_regions.get_index_by_address(range.start()); |
355 | uint last_index = (uint)_regions.get_index_by_address(range.last()); |
356 | |
357 | // Ensure that each G1 region in the range is free, returning false if not. |
358 | // Commit those that are not yet available, and keep count. |
359 | for (uint curr_index = start_index; curr_index <= last_index; curr_index++) { |
360 | if (!is_available(curr_index)) { |
361 | commits++; |
362 | expand_at(curr_index, 1, pretouch_workers); |
363 | } |
364 | HeapRegion* curr_region = _regions.get_by_index(curr_index); |
365 | if (!curr_region->is_free()) { |
366 | return false; |
367 | } |
368 | } |
369 | |
370 | allocate_free_regions_starting_at(start_index, (last_index - start_index) + 1); |
371 | *commit_count = commits; |
372 | return true; |
373 | } |
374 | |
375 | void HeapRegionManager::par_iterate(HeapRegionClosure* blk, HeapRegionClaimer* hrclaimer, const uint start_index) const { |
376 | // Every worker will actually look at all regions, skipping over regions that |
377 | // are currently not committed. |
378 | // This also (potentially) iterates over regions newly allocated during GC. This |
379 | // is no problem except for some extra work. |
380 | const uint n_regions = hrclaimer->n_regions(); |
381 | for (uint count = 0; count < n_regions; count++) { |
382 | const uint index = (start_index + count) % n_regions; |
383 | assert(index < n_regions, "sanity" ); |
384 | // Skip over unavailable regions |
385 | if (!is_available(index)) { |
386 | continue; |
387 | } |
388 | HeapRegion* r = _regions.get_by_index(index); |
389 | // We'll ignore regions already claimed. |
390 | // However, if the iteration is specified as concurrent, the values for |
391 | // is_starts_humongous and is_continues_humongous can not be trusted, |
392 | // and we should just blindly iterate over regions regardless of their |
393 | // humongous status. |
394 | if (hrclaimer->is_region_claimed(index)) { |
395 | continue; |
396 | } |
397 | // OK, try to claim it |
398 | if (!hrclaimer->claim_region(index)) { |
399 | continue; |
400 | } |
401 | bool res = blk->do_heap_region(r); |
402 | if (res) { |
403 | return; |
404 | } |
405 | } |
406 | } |
407 | |
408 | uint HeapRegionManager::shrink_by(uint num_regions_to_remove) { |
409 | assert(length() > 0, "the region sequence should not be empty" ); |
410 | assert(length() <= _allocated_heapregions_length, "invariant" ); |
411 | assert(_allocated_heapregions_length > 0, "we should have at least one region committed" ); |
412 | assert(num_regions_to_remove < length(), "We should never remove all regions" ); |
413 | |
414 | if (num_regions_to_remove == 0) { |
415 | return 0; |
416 | } |
417 | |
418 | uint removed = 0; |
419 | uint cur = _allocated_heapregions_length - 1; |
420 | uint idx_last_found = 0; |
421 | uint num_last_found = 0; |
422 | |
423 | while ((removed < num_regions_to_remove) && |
424 | (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) { |
425 | uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found); |
426 | |
427 | shrink_at(idx_last_found + num_last_found - to_remove, to_remove); |
428 | |
429 | cur = idx_last_found; |
430 | removed += to_remove; |
431 | } |
432 | |
433 | verify_optional(); |
434 | |
435 | return removed; |
436 | } |
437 | |
438 | void HeapRegionManager::shrink_at(uint index, size_t num_regions) { |
439 | #ifdef ASSERT |
440 | for (uint i = index; i < (index + num_regions); i++) { |
441 | assert(is_available(i), "Expected available region at index %u" , i); |
442 | assert(at(i)->is_empty(), "Expected empty region at index %u" , i); |
443 | assert(at(i)->is_free(), "Expected free region at index %u" , i); |
444 | } |
445 | #endif |
446 | uncommit_regions(index, num_regions); |
447 | } |
448 | |
449 | uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const { |
450 | guarantee(start_idx < _allocated_heapregions_length, "checking" ); |
451 | guarantee(res_idx != NULL, "checking" ); |
452 | |
453 | uint num_regions_found = 0; |
454 | |
455 | jlong cur = start_idx; |
456 | while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) { |
457 | cur--; |
458 | } |
459 | if (cur == -1) { |
460 | return num_regions_found; |
461 | } |
462 | jlong old_cur = cur; |
463 | // cur indexes the first empty region |
464 | while (cur != -1 && is_available(cur) && at(cur)->is_empty()) { |
465 | cur--; |
466 | } |
467 | *res_idx = cur + 1; |
468 | num_regions_found = old_cur - cur; |
469 | |
470 | #ifdef ASSERT |
471 | for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) { |
472 | assert(at(i)->is_empty(), "just checking" ); |
473 | } |
474 | #endif |
475 | return num_regions_found; |
476 | } |
477 | |
478 | void HeapRegionManager::verify() { |
479 | guarantee(length() <= _allocated_heapregions_length, |
480 | "invariant: _length: %u _allocated_length: %u" , |
481 | length(), _allocated_heapregions_length); |
482 | guarantee(_allocated_heapregions_length <= max_length(), |
483 | "invariant: _allocated_length: %u _max_length: %u" , |
484 | _allocated_heapregions_length, max_length()); |
485 | |
486 | bool prev_committed = true; |
487 | uint num_committed = 0; |
488 | HeapWord* prev_end = heap_bottom(); |
489 | for (uint i = 0; i < _allocated_heapregions_length; i++) { |
490 | if (!is_available(i)) { |
491 | prev_committed = false; |
492 | continue; |
493 | } |
494 | num_committed++; |
495 | HeapRegion* hr = _regions.get_by_index(i); |
496 | guarantee(hr != NULL, "invariant: i: %u" , i); |
497 | guarantee(!prev_committed || hr->bottom() == prev_end, |
498 | "invariant i: %u " HR_FORMAT " prev_end: " PTR_FORMAT, |
499 | i, HR_FORMAT_PARAMS(hr), p2i(prev_end)); |
500 | guarantee(hr->hrm_index() == i, |
501 | "invariant: i: %u hrm_index(): %u" , i, hr->hrm_index()); |
502 | // Asserts will fire if i is >= _length |
503 | HeapWord* addr = hr->bottom(); |
504 | guarantee(addr_to_region(addr) == hr, "sanity" ); |
505 | // We cannot check whether the region is part of a particular set: at the time |
506 | // this method may be called, we have only completed allocation of the regions, |
507 | // but not put into a region set. |
508 | prev_committed = true; |
509 | prev_end = hr->end(); |
510 | } |
511 | for (uint i = _allocated_heapregions_length; i < max_length(); i++) { |
512 | guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u" , i); |
513 | } |
514 | |
515 | guarantee(num_committed == _num_committed, "Found %u committed regions, but should be %u" , num_committed, _num_committed); |
516 | _free_list.verify(); |
517 | } |
518 | |
519 | #ifndef PRODUCT |
520 | void HeapRegionManager::verify_optional() { |
521 | verify(); |
522 | } |
523 | #endif // PRODUCT |
524 | |
525 | HeapRegionClaimer::HeapRegionClaimer(uint n_workers) : |
526 | _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm->_allocated_heapregions_length), _claims(NULL) { |
527 | assert(n_workers > 0, "Need at least one worker." ); |
528 | uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC); |
529 | memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions); |
530 | _claims = new_claims; |
531 | } |
532 | |
533 | HeapRegionClaimer::~HeapRegionClaimer() { |
534 | if (_claims != NULL) { |
535 | FREE_C_HEAP_ARRAY(uint, _claims); |
536 | } |
537 | } |
538 | |
539 | uint HeapRegionClaimer::offset_for_worker(uint worker_id) const { |
540 | assert(worker_id < _n_workers, "Invalid worker_id." ); |
541 | return _n_regions * worker_id / _n_workers; |
542 | } |
543 | |
544 | bool HeapRegionClaimer::is_region_claimed(uint region_index) const { |
545 | assert(region_index < _n_regions, "Invalid index." ); |
546 | return _claims[region_index] == Claimed; |
547 | } |
548 | |
549 | bool HeapRegionClaimer::claim_region(uint region_index) { |
550 | assert(region_index < _n_regions, "Invalid index." ); |
551 | uint old_val = Atomic::cmpxchg(Claimed, &_claims[region_index], Unclaimed); |
552 | return old_val == Unclaimed; |
553 | } |
554 | |