| 1 | /* |
| 2 | * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #include "precompiled.hpp" |
| 26 | #include "gc/g1/g1CollectedHeap.inline.hpp" |
| 27 | #include "gc/g1/g1DirtyCardQueue.hpp" |
| 28 | #include "gc/g1/g1HotCardCache.hpp" |
| 29 | #include "runtime/atomic.hpp" |
| 30 | |
| 31 | G1HotCardCache::G1HotCardCache(G1CollectedHeap *g1h): |
| 32 | _g1h(g1h), _use_cache(false), _card_counts(g1h), |
| 33 | _hot_cache(NULL), _hot_cache_size(0), _hot_cache_par_chunk_size(0), |
| 34 | _hot_cache_idx(0), _hot_cache_par_claimed_idx(0) |
| 35 | {} |
| 36 | |
| 37 | void G1HotCardCache::initialize(G1RegionToSpaceMapper* card_counts_storage) { |
| 38 | if (default_use_cache()) { |
| 39 | _use_cache = true; |
| 40 | |
| 41 | _hot_cache_size = (size_t)1 << G1ConcRSLogCacheSize; |
| 42 | _hot_cache = ArrayAllocator<CardValue*>::allocate(_hot_cache_size, mtGC); |
| 43 | |
| 44 | reset_hot_cache_internal(); |
| 45 | |
| 46 | // For refining the cards in the hot cache in parallel |
| 47 | _hot_cache_par_chunk_size = ClaimChunkSize; |
| 48 | _hot_cache_par_claimed_idx = 0; |
| 49 | |
| 50 | _card_counts.initialize(card_counts_storage); |
| 51 | } |
| 52 | } |
| 53 | |
| 54 | G1HotCardCache::~G1HotCardCache() { |
| 55 | if (default_use_cache()) { |
| 56 | assert(_hot_cache != NULL, "Logic" ); |
| 57 | ArrayAllocator<CardValue*>::free(_hot_cache, _hot_cache_size); |
| 58 | _hot_cache = NULL; |
| 59 | } |
| 60 | } |
| 61 | |
| 62 | CardTable::CardValue* G1HotCardCache::insert(CardValue* card_ptr) { |
| 63 | uint count = _card_counts.add_card_count(card_ptr); |
| 64 | if (!_card_counts.is_hot(count)) { |
| 65 | // The card is not hot so do not store it in the cache; |
| 66 | // return it for immediate refining. |
| 67 | return card_ptr; |
| 68 | } |
| 69 | // Otherwise, the card is hot. |
| 70 | size_t index = Atomic::add(1u, &_hot_cache_idx) - 1; |
| 71 | size_t masked_index = index & (_hot_cache_size - 1); |
| 72 | CardValue* current_ptr = _hot_cache[masked_index]; |
| 73 | |
| 74 | // Try to store the new card pointer into the cache. Compare-and-swap to guard |
| 75 | // against the unlikely event of a race resulting in another card pointer to |
| 76 | // have already been written to the cache. In this case we will return |
| 77 | // card_ptr in favor of the other option, which would be starting over. This |
| 78 | // should be OK since card_ptr will likely be the older card already when/if |
| 79 | // this ever happens. |
| 80 | CardValue* previous_ptr = Atomic::cmpxchg(card_ptr, |
| 81 | &_hot_cache[masked_index], |
| 82 | current_ptr); |
| 83 | return (previous_ptr == current_ptr) ? previous_ptr : card_ptr; |
| 84 | } |
| 85 | |
| 86 | void G1HotCardCache::drain(G1CardTableEntryClosure* cl, uint worker_i) { |
| 87 | assert(default_use_cache(), "Drain only necessary if we use the hot card cache." ); |
| 88 | |
| 89 | assert(_hot_cache != NULL, "Logic" ); |
| 90 | assert(!use_cache(), "cache should be disabled" ); |
| 91 | |
| 92 | while (_hot_cache_par_claimed_idx < _hot_cache_size) { |
| 93 | size_t end_idx = Atomic::add(_hot_cache_par_chunk_size, |
| 94 | &_hot_cache_par_claimed_idx); |
| 95 | size_t start_idx = end_idx - _hot_cache_par_chunk_size; |
| 96 | // The current worker has successfully claimed the chunk [start_idx..end_idx) |
| 97 | end_idx = MIN2(end_idx, _hot_cache_size); |
| 98 | for (size_t i = start_idx; i < end_idx; i++) { |
| 99 | CardValue* card_ptr = _hot_cache[i]; |
| 100 | if (card_ptr != NULL) { |
| 101 | bool result = cl->do_card_ptr(card_ptr, worker_i); |
| 102 | assert(result, "Closure should always return true" ); |
| 103 | } else { |
| 104 | break; |
| 105 | } |
| 106 | } |
| 107 | } |
| 108 | |
| 109 | // The existing entries in the hot card cache, which were just refined |
| 110 | // above, are discarded prior to re-enabling the cache near the end of the GC. |
| 111 | } |
| 112 | |
| 113 | void G1HotCardCache::reset_card_counts(HeapRegion* hr) { |
| 114 | _card_counts.clear_region(hr); |
| 115 | } |
| 116 | |