| 1 | /* |
| 2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_GC_G1_G1BLOCKOFFSETTABLE_HPP |
| 26 | #define SHARE_GC_G1_G1BLOCKOFFSETTABLE_HPP |
| 27 | |
| 28 | #include "gc/g1/g1RegionToSpaceMapper.hpp" |
| 29 | #include "gc/shared/blockOffsetTable.hpp" |
| 30 | #include "memory/memRegion.hpp" |
| 31 | #include "memory/virtualspace.hpp" |
| 32 | #include "utilities/globalDefinitions.hpp" |
| 33 | |
| 34 | // Forward declarations |
| 35 | class G1BlockOffsetTable; |
| 36 | class G1ContiguousSpace; |
| 37 | |
| 38 | // This implementation of "G1BlockOffsetTable" divides the covered region |
| 39 | // into "N"-word subregions (where "N" = 2^"LogN". An array with an entry |
| 40 | // for each such subregion indicates how far back one must go to find the |
| 41 | // start of the chunk that includes the first word of the subregion. |
| 42 | // |
| 43 | // Each G1BlockOffsetTablePart is owned by a G1ContiguousSpace. |
| 44 | |
| 45 | class G1BlockOffsetTable: public CHeapObj<mtGC> { |
| 46 | friend class G1BlockOffsetTablePart; |
| 47 | friend class VMStructs; |
| 48 | |
| 49 | private: |
| 50 | // The reserved region covered by the table. |
| 51 | MemRegion _reserved; |
| 52 | |
| 53 | // Array for keeping offsets for retrieving object start fast given an |
| 54 | // address. |
| 55 | volatile u_char* _offset_array; // byte array keeping backwards offsets |
| 56 | |
| 57 | void check_offset(size_t offset, const char* msg) const { |
| 58 | assert(offset <= BOTConstants::N_words, |
| 59 | "%s - offset: " SIZE_FORMAT ", N_words: %u" , |
| 60 | msg, offset, BOTConstants::N_words); |
| 61 | } |
| 62 | |
| 63 | // Bounds checking accessors: |
| 64 | // For performance these have to devolve to array accesses in product builds. |
| 65 | inline u_char offset_array(size_t index) const; |
| 66 | |
| 67 | inline void set_offset_array_raw(size_t index, u_char offset); |
| 68 | inline void set_offset_array(size_t index, u_char offset); |
| 69 | |
| 70 | inline void set_offset_array(size_t index, HeapWord* high, HeapWord* low); |
| 71 | |
| 72 | inline void set_offset_array(size_t left, size_t right, u_char offset); |
| 73 | |
| 74 | bool is_card_boundary(HeapWord* p) const; |
| 75 | |
| 76 | void check_index(size_t index, const char* msg) const NOT_DEBUG_RETURN; |
| 77 | |
| 78 | public: |
| 79 | |
| 80 | // Return the number of slots needed for an offset array |
| 81 | // that covers mem_region_words words. |
| 82 | static size_t compute_size(size_t mem_region_words) { |
| 83 | size_t number_of_slots = (mem_region_words / BOTConstants::N_words); |
| 84 | return ReservedSpace::allocation_align_size_up(number_of_slots); |
| 85 | } |
| 86 | |
| 87 | // Returns how many bytes of the heap a single byte of the BOT corresponds to. |
| 88 | static size_t heap_map_factor() { |
| 89 | return BOTConstants::N_bytes; |
| 90 | } |
| 91 | |
| 92 | // Initialize the Block Offset Table to cover the memory region passed |
| 93 | // in the heap parameter. |
| 94 | G1BlockOffsetTable(MemRegion heap, G1RegionToSpaceMapper* storage); |
| 95 | |
| 96 | // Return the appropriate index into "_offset_array" for "p". |
| 97 | inline size_t index_for(const void* p) const; |
| 98 | inline size_t index_for_raw(const void* p) const; |
| 99 | |
| 100 | // Return the address indicating the start of the region corresponding to |
| 101 | // "index" in "_offset_array". |
| 102 | inline HeapWord* address_for_index(size_t index) const; |
| 103 | // Variant of address_for_index that does not check the index for validity. |
| 104 | inline HeapWord* address_for_index_raw(size_t index) const { |
| 105 | return _reserved.start() + (index << BOTConstants::LogN_words); |
| 106 | } |
| 107 | }; |
| 108 | |
| 109 | class G1BlockOffsetTablePart { |
| 110 | friend class G1BlockOffsetTable; |
| 111 | friend class VMStructs; |
| 112 | private: |
| 113 | // allocation boundary at which offset array must be updated |
| 114 | HeapWord* _next_offset_threshold; |
| 115 | size_t _next_offset_index; // index corresponding to that boundary |
| 116 | |
| 117 | // Indicates if an object can span into this G1BlockOffsetTablePart. |
| 118 | debug_only(bool _object_can_span;) |
| 119 | |
| 120 | // This is the global BlockOffsetTable. |
| 121 | G1BlockOffsetTable* _bot; |
| 122 | |
| 123 | // The space that owns this subregion. |
| 124 | G1ContiguousSpace* _space; |
| 125 | |
| 126 | // Sets the entries |
| 127 | // corresponding to the cards starting at "start" and ending at "end" |
| 128 | // to point back to the card before "start": the interval [start, end) |
| 129 | // is right-open. |
| 130 | void set_remainder_to_point_to_start(HeapWord* start, HeapWord* end); |
| 131 | // Same as above, except that the args here are a card _index_ interval |
| 132 | // that is closed: [start_index, end_index] |
| 133 | void set_remainder_to_point_to_start_incl(size_t start, size_t end); |
| 134 | |
| 135 | // Zero out the entry for _bottom (offset will be zero). Does not check for availability of the |
| 136 | // memory first. |
| 137 | void zero_bottom_entry_raw(); |
| 138 | // Variant of initialize_threshold that does not check for availability of the |
| 139 | // memory first. |
| 140 | HeapWord* initialize_threshold_raw(); |
| 141 | |
| 142 | inline size_t block_size(const HeapWord* p) const; |
| 143 | |
| 144 | // Returns the address of a block whose start is at most "addr". |
| 145 | // If "has_max_index" is true, "assumes "max_index" is the last valid one |
| 146 | // in the array. |
| 147 | inline HeapWord* block_at_or_preceding(const void* addr, |
| 148 | bool has_max_index, |
| 149 | size_t max_index) const; |
| 150 | |
| 151 | // "q" is a block boundary that is <= "addr"; "n" is the address of the |
| 152 | // next block (or the end of the space.) Return the address of the |
| 153 | // beginning of the block that contains "addr". Does so without side |
| 154 | // effects (see, e.g., spec of block_start.) |
| 155 | inline HeapWord* forward_to_block_containing_addr_const(HeapWord* q, HeapWord* n, |
| 156 | const void* addr) const; |
| 157 | |
| 158 | // "q" is a block boundary that is <= "addr"; return the address of the |
| 159 | // beginning of the block that contains "addr". May have side effects |
| 160 | // on "this", by updating imprecise entries. |
| 161 | inline HeapWord* forward_to_block_containing_addr(HeapWord* q, |
| 162 | const void* addr); |
| 163 | |
| 164 | // "q" is a block boundary that is <= "addr"; "n" is the address of the |
| 165 | // next block (or the end of the space.) Return the address of the |
| 166 | // beginning of the block that contains "addr". May have side effects |
| 167 | // on "this", by updating imprecise entries. |
| 168 | HeapWord* forward_to_block_containing_addr_slow(HeapWord* q, |
| 169 | HeapWord* n, |
| 170 | const void* addr); |
| 171 | |
| 172 | // Requires that "*threshold_" be the first array entry boundary at or |
| 173 | // above "blk_start", and that "*index_" be the corresponding array |
| 174 | // index. If the block starts at or crosses "*threshold_", records |
| 175 | // "blk_start" as the appropriate block start for the array index |
| 176 | // starting at "*threshold_", and for any other indices crossed by the |
| 177 | // block. Updates "*threshold_" and "*index_" to correspond to the first |
| 178 | // index after the block end. |
| 179 | void alloc_block_work(HeapWord** threshold_, size_t* index_, |
| 180 | HeapWord* blk_start, HeapWord* blk_end); |
| 181 | |
| 182 | void check_all_cards(size_t left_card, size_t right_card) const; |
| 183 | |
| 184 | public: |
| 185 | // The elements of the array are initialized to zero. |
| 186 | G1BlockOffsetTablePart(G1BlockOffsetTable* array, G1ContiguousSpace* gsp); |
| 187 | |
| 188 | void verify() const; |
| 189 | |
| 190 | // Returns the address of the start of the block containing "addr", or |
| 191 | // else "null" if it is covered by no block. (May have side effects, |
| 192 | // namely updating of shared array entries that "point" too far |
| 193 | // backwards. This can occur, for example, when lab allocation is used |
| 194 | // in a space covered by the table.) |
| 195 | inline HeapWord* block_start(const void* addr); |
| 196 | // Same as above, but does not have any of the possible side effects |
| 197 | // discussed above. |
| 198 | inline HeapWord* block_start_const(const void* addr) const; |
| 199 | |
| 200 | // Initialize the threshold to reflect the first boundary after the |
| 201 | // bottom of the covered region. |
| 202 | HeapWord* initialize_threshold(); |
| 203 | |
| 204 | void reset_bot() { |
| 205 | zero_bottom_entry_raw(); |
| 206 | initialize_threshold_raw(); |
| 207 | } |
| 208 | |
| 209 | // Return the next threshold, the point at which the table should be |
| 210 | // updated. |
| 211 | HeapWord* threshold() const { return _next_offset_threshold; } |
| 212 | |
| 213 | // These must be guaranteed to work properly (i.e., do nothing) |
| 214 | // when "blk_start" ("blk" for second version) is "NULL". In this |
| 215 | // implementation, that's true because NULL is represented as 0, and thus |
| 216 | // never exceeds the "_next_offset_threshold". |
| 217 | void alloc_block(HeapWord* blk_start, HeapWord* blk_end) { |
| 218 | if (blk_end > _next_offset_threshold) { |
| 219 | alloc_block_work(&_next_offset_threshold, &_next_offset_index, blk_start, blk_end); |
| 220 | } |
| 221 | } |
| 222 | void alloc_block(HeapWord* blk, size_t size) { |
| 223 | alloc_block(blk, blk+size); |
| 224 | } |
| 225 | |
| 226 | void set_for_starts_humongous(HeapWord* obj_top, size_t fill_size); |
| 227 | void set_object_can_span(bool can_span) NOT_DEBUG_RETURN; |
| 228 | |
| 229 | void print_on(outputStream* out) PRODUCT_RETURN; |
| 230 | }; |
| 231 | |
| 232 | #endif // SHARE_GC_G1_G1BLOCKOFFSETTABLE_HPP |
| 233 | |