| 1 | /* | 
|---|
| 2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. | 
|---|
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 
|---|
| 4 | * | 
|---|
| 5 | * This code is free software; you can redistribute it and/or modify it | 
|---|
| 6 | * under the terms of the GNU General Public License version 2 only, as | 
|---|
| 7 | * published by the Free Software Foundation. | 
|---|
| 8 | * | 
|---|
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT | 
|---|
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|---|
| 11 | * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
|---|
| 12 | * version 2 for more details (a copy is included in the LICENSE file that | 
|---|
| 13 | * accompanied this code). | 
|---|
| 14 | * | 
|---|
| 15 | * You should have received a copy of the GNU General Public License version | 
|---|
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, | 
|---|
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 
|---|
| 18 | * | 
|---|
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | 
|---|
| 20 | * or visit www.oracle.com if you need additional information or have any | 
|---|
| 21 | * questions. | 
|---|
| 22 | * | 
|---|
| 23 | */ | 
|---|
| 24 |  | 
|---|
| 25 | #ifndef SHARE_OOPS_MARKOOP_HPP | 
|---|
| 26 | #define SHARE_OOPS_MARKOOP_HPP | 
|---|
| 27 |  | 
|---|
| 28 | #include "oops/oop.hpp" | 
|---|
| 29 |  | 
|---|
| 30 | // The markOop describes the header of an object. | 
|---|
| 31 | // | 
|---|
| 32 | // Note that the mark is not a real oop but just a word. | 
|---|
| 33 | // It is placed in the oop hierarchy for historical reasons. | 
|---|
| 34 | // | 
|---|
| 35 | // Bit-format of an object header (most significant first, big endian layout below): | 
|---|
| 36 | // | 
|---|
| 37 | //  32 bits: | 
|---|
| 38 | //  -------- | 
|---|
| 39 | //             hash:25 ------------>| age:4    biased_lock:1 lock:2 (normal object) | 
|---|
| 40 | //             JavaThread*:23 epoch:2 age:4    biased_lock:1 lock:2 (biased object) | 
|---|
| 41 | //             size:32 ------------------------------------------>| (CMS free block) | 
|---|
| 42 | //             PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object) | 
|---|
| 43 | // | 
|---|
| 44 | //  64 bits: | 
|---|
| 45 | //  -------- | 
|---|
| 46 | //  unused:25 hash:31 -->| unused:1   age:4    biased_lock:1 lock:2 (normal object) | 
|---|
| 47 | //  JavaThread*:54 epoch:2 unused:1   age:4    biased_lock:1 lock:2 (biased object) | 
|---|
| 48 | //  PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object) | 
|---|
| 49 | //  size:64 ----------------------------------------------------->| (CMS free block) | 
|---|
| 50 | // | 
|---|
| 51 | //  unused:25 hash:31 -->| cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && normal object) | 
|---|
| 52 | //  JavaThread*:54 epoch:2 cms_free:1 age:4    biased_lock:1 lock:2 (COOPs && biased object) | 
|---|
| 53 | //  narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object) | 
|---|
| 54 | //  unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block) | 
|---|
| 55 | // | 
|---|
| 56 | //  - hash contains the identity hash value: largest value is | 
|---|
| 57 | //    31 bits, see os::random().  Also, 64-bit vm's require | 
|---|
| 58 | //    a hash value no bigger than 32 bits because they will not | 
|---|
| 59 | //    properly generate a mask larger than that: see library_call.cpp | 
|---|
| 60 | //    and c1_CodePatterns_sparc.cpp. | 
|---|
| 61 | // | 
|---|
| 62 | //  - the biased lock pattern is used to bias a lock toward a given | 
|---|
| 63 | //    thread. When this pattern is set in the low three bits, the lock | 
|---|
| 64 | //    is either biased toward a given thread or "anonymously" biased, | 
|---|
| 65 | //    indicating that it is possible for it to be biased. When the | 
|---|
| 66 | //    lock is biased toward a given thread, locking and unlocking can | 
|---|
| 67 | //    be performed by that thread without using atomic operations. | 
|---|
| 68 | //    When a lock's bias is revoked, it reverts back to the normal | 
|---|
| 69 | //    locking scheme described below. | 
|---|
| 70 | // | 
|---|
| 71 | //    Note that we are overloading the meaning of the "unlocked" state | 
|---|
| 72 | //    of the header. Because we steal a bit from the age we can | 
|---|
| 73 | //    guarantee that the bias pattern will never be seen for a truly | 
|---|
| 74 | //    unlocked object. | 
|---|
| 75 | // | 
|---|
| 76 | //    Note also that the biased state contains the age bits normally | 
|---|
| 77 | //    contained in the object header. Large increases in scavenge | 
|---|
| 78 | //    times were seen when these bits were absent and an arbitrary age | 
|---|
| 79 | //    assigned to all biased objects, because they tended to consume a | 
|---|
| 80 | //    significant fraction of the eden semispaces and were not | 
|---|
| 81 | //    promoted promptly, causing an increase in the amount of copying | 
|---|
| 82 | //    performed. The runtime system aligns all JavaThread* pointers to | 
|---|
| 83 | //    a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM)) | 
|---|
| 84 | //    to make room for the age bits & the epoch bits (used in support of | 
|---|
| 85 | //    biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs). | 
|---|
| 86 | // | 
|---|
| 87 | //    [JavaThread* | epoch | age | 1 | 01]       lock is biased toward given thread | 
|---|
| 88 | //    [0           | epoch | age | 1 | 01]       lock is anonymously biased | 
|---|
| 89 | // | 
|---|
| 90 | //  - the two lock bits are used to describe three states: locked/unlocked and monitor. | 
|---|
| 91 | // | 
|---|
| 92 | //    [ptr             | 00]  locked             ptr points to real header on stack | 
|---|
| 93 | //    [header      | 0 | 01]  unlocked           regular object header | 
|---|
| 94 | //    [ptr             | 10]  monitor            inflated lock (header is wapped out) | 
|---|
| 95 | //    [ptr             | 11]  marked             used by markSweep to mark an object | 
|---|
| 96 | //                                               not valid at any other time | 
|---|
| 97 | // | 
|---|
| 98 | //    We assume that stack/thread pointers have the lowest two bits cleared. | 
|---|
| 99 |  | 
|---|
| 100 | class BasicLock; | 
|---|
| 101 | class ObjectMonitor; | 
|---|
| 102 | class JavaThread; | 
|---|
| 103 |  | 
|---|
| 104 | class markOopDesc: public oopDesc { | 
|---|
| 105 | private: | 
|---|
| 106 | // Conversion | 
|---|
| 107 | uintptr_t value() const { return (uintptr_t) this; } | 
|---|
| 108 |  | 
|---|
| 109 | public: | 
|---|
| 110 | // Constants | 
|---|
| 111 | enum { age_bits                 = 4, | 
|---|
| 112 | lock_bits                = 2, | 
|---|
| 113 | biased_lock_bits         = 1, | 
|---|
| 114 | max_hash_bits            = BitsPerWord - age_bits - lock_bits - biased_lock_bits, | 
|---|
| 115 | hash_bits                = max_hash_bits > 31 ? 31 : max_hash_bits, | 
|---|
| 116 | cms_bits                 = LP64_ONLY(1) NOT_LP64(0), | 
|---|
| 117 | epoch_bits               = 2 | 
|---|
| 118 | }; | 
|---|
| 119 |  | 
|---|
| 120 | // The biased locking code currently requires that the age bits be | 
|---|
| 121 | // contiguous to the lock bits. | 
|---|
| 122 | enum { lock_shift               = 0, | 
|---|
| 123 | biased_lock_shift        = lock_bits, | 
|---|
| 124 | age_shift                = lock_bits + biased_lock_bits, | 
|---|
| 125 | cms_shift                = age_shift + age_bits, | 
|---|
| 126 | hash_shift               = cms_shift + cms_bits, | 
|---|
| 127 | epoch_shift              = hash_shift | 
|---|
| 128 | }; | 
|---|
| 129 |  | 
|---|
| 130 | enum { lock_mask                = right_n_bits(lock_bits), | 
|---|
| 131 | lock_mask_in_place       = lock_mask << lock_shift, | 
|---|
| 132 | biased_lock_mask         = right_n_bits(lock_bits + biased_lock_bits), | 
|---|
| 133 | biased_lock_mask_in_place= biased_lock_mask << lock_shift, | 
|---|
| 134 | biased_lock_bit_in_place = 1 << biased_lock_shift, | 
|---|
| 135 | age_mask                 = right_n_bits(age_bits), | 
|---|
| 136 | age_mask_in_place        = age_mask << age_shift, | 
|---|
| 137 | epoch_mask               = right_n_bits(epoch_bits), | 
|---|
| 138 | epoch_mask_in_place      = epoch_mask << epoch_shift, | 
|---|
| 139 | cms_mask                 = right_n_bits(cms_bits), | 
|---|
| 140 | cms_mask_in_place        = cms_mask << cms_shift | 
|---|
| 141 | }; | 
|---|
| 142 |  | 
|---|
| 143 | const static uintptr_t hash_mask = right_n_bits(hash_bits); | 
|---|
| 144 | const static uintptr_t hash_mask_in_place = hash_mask << hash_shift; | 
|---|
| 145 |  | 
|---|
| 146 | // Alignment of JavaThread pointers encoded in object header required by biased locking | 
|---|
| 147 | enum { biased_lock_alignment    = 2 << (epoch_shift + epoch_bits) | 
|---|
| 148 | }; | 
|---|
| 149 |  | 
|---|
| 150 | enum { locked_value             = 0, | 
|---|
| 151 | unlocked_value           = 1, | 
|---|
| 152 | monitor_value            = 2, | 
|---|
| 153 | marked_value             = 3, | 
|---|
| 154 | biased_lock_pattern      = 5 | 
|---|
| 155 | }; | 
|---|
| 156 |  | 
|---|
| 157 | enum { no_hash                  = 0 };  // no hash value assigned | 
|---|
| 158 |  | 
|---|
| 159 | enum { no_hash_in_place         = (address_word)no_hash << hash_shift, | 
|---|
| 160 | no_lock_in_place         = unlocked_value | 
|---|
| 161 | }; | 
|---|
| 162 |  | 
|---|
| 163 | enum { max_age                  = age_mask }; | 
|---|
| 164 |  | 
|---|
| 165 | enum { max_bias_epoch           = epoch_mask }; | 
|---|
| 166 |  | 
|---|
| 167 | // Biased Locking accessors. | 
|---|
| 168 | // These must be checked by all code which calls into the | 
|---|
| 169 | // ObjectSynchronizer and other code. The biasing is not understood | 
|---|
| 170 | // by the lower-level CAS-based locking code, although the runtime | 
|---|
| 171 | // fixes up biased locks to be compatible with it when a bias is | 
|---|
| 172 | // revoked. | 
|---|
| 173 | bool has_bias_pattern() const { | 
|---|
| 174 | return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern); | 
|---|
| 175 | } | 
|---|
| 176 | JavaThread* biased_locker() const { | 
|---|
| 177 | assert(has_bias_pattern(), "should not call this otherwise"); | 
|---|
| 178 | return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place)))); | 
|---|
| 179 | } | 
|---|
| 180 | // Indicates that the mark has the bias bit set but that it has not | 
|---|
| 181 | // yet been biased toward a particular thread | 
|---|
| 182 | bool is_biased_anonymously() const { | 
|---|
| 183 | return (has_bias_pattern() && (biased_locker() == NULL)); | 
|---|
| 184 | } | 
|---|
| 185 | // Indicates epoch in which this bias was acquired. If the epoch | 
|---|
| 186 | // changes due to too many bias revocations occurring, the biases | 
|---|
| 187 | // from the previous epochs are all considered invalid. | 
|---|
| 188 | int bias_epoch() const { | 
|---|
| 189 | assert(has_bias_pattern(), "should not call this otherwise"); | 
|---|
| 190 | return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift); | 
|---|
| 191 | } | 
|---|
| 192 | markOop set_bias_epoch(int epoch) { | 
|---|
| 193 | assert(has_bias_pattern(), "should not call this otherwise"); | 
|---|
| 194 | assert((epoch & (~epoch_mask)) == 0, "epoch overflow"); | 
|---|
| 195 | return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift)); | 
|---|
| 196 | } | 
|---|
| 197 | markOop incr_bias_epoch() { | 
|---|
| 198 | return set_bias_epoch((1 + bias_epoch()) & epoch_mask); | 
|---|
| 199 | } | 
|---|
| 200 | // Prototype mark for initialization | 
|---|
| 201 | static markOop biased_locking_prototype() { | 
|---|
| 202 | return markOop( biased_lock_pattern ); | 
|---|
| 203 | } | 
|---|
| 204 |  | 
|---|
| 205 | // lock accessors (note that these assume lock_shift == 0) | 
|---|
| 206 | bool is_locked()   const { | 
|---|
| 207 | return (mask_bits(value(), lock_mask_in_place) != unlocked_value); | 
|---|
| 208 | } | 
|---|
| 209 | bool is_unlocked() const { | 
|---|
| 210 | return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); | 
|---|
| 211 | } | 
|---|
| 212 | bool is_marked()   const { | 
|---|
| 213 | return (mask_bits(value(), lock_mask_in_place) == marked_value); | 
|---|
| 214 | } | 
|---|
| 215 | bool is_neutral()  const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); } | 
|---|
| 216 |  | 
|---|
| 217 | // Special temporary state of the markOop while being inflated. | 
|---|
| 218 | // Code that looks at mark outside a lock need to take this into account. | 
|---|
| 219 | bool is_being_inflated() const { return (value() == 0); } | 
|---|
| 220 |  | 
|---|
| 221 | // Distinguished markword value - used when inflating over | 
|---|
| 222 | // an existing stacklock.  0 indicates the markword is "BUSY". | 
|---|
| 223 | // Lockword mutators that use a LD...CAS idiom should always | 
|---|
| 224 | // check for and avoid overwriting a 0 value installed by some | 
|---|
| 225 | // other thread.  (They should spin or block instead.  The 0 value | 
|---|
| 226 | // is transient and *should* be short-lived). | 
|---|
| 227 | static markOop INFLATING() { return (markOop) 0; }    // inflate-in-progress | 
|---|
| 228 |  | 
|---|
| 229 | // Should this header be preserved during GC? | 
|---|
| 230 | inline bool must_be_preserved(oop obj_containing_mark) const; | 
|---|
| 231 | inline bool must_be_preserved_with_bias(oop obj_containing_mark) const; | 
|---|
| 232 |  | 
|---|
| 233 | // Should this header (including its age bits) be preserved in the | 
|---|
| 234 | // case of a promotion failure during scavenge? | 
|---|
| 235 | // Note that we special case this situation. We want to avoid | 
|---|
| 236 | // calling BiasedLocking::preserve_marks()/restore_marks() (which | 
|---|
| 237 | // decrease the number of mark words that need to be preserved | 
|---|
| 238 | // during GC) during each scavenge. During scavenges in which there | 
|---|
| 239 | // is no promotion failure, we actually don't need to call the above | 
|---|
| 240 | // routines at all, since we don't mutate and re-initialize the | 
|---|
| 241 | // marks of promoted objects using init_mark(). However, during | 
|---|
| 242 | // scavenges which result in promotion failure, we do re-initialize | 
|---|
| 243 | // the mark words of objects, meaning that we should have called | 
|---|
| 244 | // these mark word preservation routines. Currently there's no good | 
|---|
| 245 | // place in which to call them in any of the scavengers (although | 
|---|
| 246 | // guarded by appropriate locks we could make one), but the | 
|---|
| 247 | // observation is that promotion failures are quite rare and | 
|---|
| 248 | // reducing the number of mark words preserved during them isn't a | 
|---|
| 249 | // high priority. | 
|---|
| 250 | inline bool must_be_preserved_for_promotion_failure(oop obj_containing_mark) const; | 
|---|
| 251 | inline bool must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const; | 
|---|
| 252 |  | 
|---|
| 253 | // Should this header be preserved during a scavenge where CMS is | 
|---|
| 254 | // the old generation? | 
|---|
| 255 | // (This is basically the same body as must_be_preserved_for_promotion_failure(), | 
|---|
| 256 | // but takes the Klass* as argument instead) | 
|---|
| 257 | inline bool must_be_preserved_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const; | 
|---|
| 258 | inline bool must_be_preserved_with_bias_for_cms_scavenge(Klass* klass_of_obj_containing_mark) const; | 
|---|
| 259 |  | 
|---|
| 260 | // WARNING: The following routines are used EXCLUSIVELY by | 
|---|
| 261 | // synchronization functions. They are not really gc safe. | 
|---|
| 262 | // They must get updated if markOop layout get changed. | 
|---|
| 263 | markOop set_unlocked() const { | 
|---|
| 264 | return markOop(value() | unlocked_value); | 
|---|
| 265 | } | 
|---|
| 266 | bool has_locker() const { | 
|---|
| 267 | return ((value() & lock_mask_in_place) == locked_value); | 
|---|
| 268 | } | 
|---|
| 269 | BasicLock* locker() const { | 
|---|
| 270 | assert(has_locker(), "check"); | 
|---|
| 271 | return (BasicLock*) value(); | 
|---|
| 272 | } | 
|---|
| 273 | bool has_monitor() const { | 
|---|
| 274 | return ((value() & monitor_value) != 0); | 
|---|
| 275 | } | 
|---|
| 276 | ObjectMonitor* monitor() const { | 
|---|
| 277 | assert(has_monitor(), "check"); | 
|---|
| 278 | // Use xor instead of &~ to provide one extra tag-bit check. | 
|---|
| 279 | return (ObjectMonitor*) (value() ^ monitor_value); | 
|---|
| 280 | } | 
|---|
| 281 | bool has_displaced_mark_helper() const { | 
|---|
| 282 | return ((value() & unlocked_value) == 0); | 
|---|
| 283 | } | 
|---|
| 284 | markOop displaced_mark_helper() const { | 
|---|
| 285 | assert(has_displaced_mark_helper(), "check"); | 
|---|
| 286 | intptr_t ptr = (value() & ~monitor_value); | 
|---|
| 287 | return *(markOop*)ptr; | 
|---|
| 288 | } | 
|---|
| 289 | void set_displaced_mark_helper(markOop m) const { | 
|---|
| 290 | assert(has_displaced_mark_helper(), "check"); | 
|---|
| 291 | intptr_t ptr = (value() & ~monitor_value); | 
|---|
| 292 | *(markOop*)ptr = m; | 
|---|
| 293 | } | 
|---|
| 294 | markOop copy_set_hash(intptr_t hash) const { | 
|---|
| 295 | intptr_t tmp = value() & (~hash_mask_in_place); | 
|---|
| 296 | tmp |= ((hash & hash_mask) << hash_shift); | 
|---|
| 297 | return (markOop)tmp; | 
|---|
| 298 | } | 
|---|
| 299 | // it is only used to be stored into BasicLock as the | 
|---|
| 300 | // indicator that the lock is using heavyweight monitor | 
|---|
| 301 | static markOop unused_mark() { | 
|---|
| 302 | return (markOop) marked_value; | 
|---|
| 303 | } | 
|---|
| 304 | // the following two functions create the markOop to be | 
|---|
| 305 | // stored into object header, it encodes monitor info | 
|---|
| 306 | static markOop encode(BasicLock* lock) { | 
|---|
| 307 | return (markOop) lock; | 
|---|
| 308 | } | 
|---|
| 309 | static markOop encode(ObjectMonitor* monitor) { | 
|---|
| 310 | intptr_t tmp = (intptr_t) monitor; | 
|---|
| 311 | return (markOop) (tmp | monitor_value); | 
|---|
| 312 | } | 
|---|
| 313 | static markOop encode(JavaThread* thread, uint age, int bias_epoch) { | 
|---|
| 314 | intptr_t tmp = (intptr_t) thread; | 
|---|
| 315 | assert(UseBiasedLocking && ((tmp & (epoch_mask_in_place | age_mask_in_place | biased_lock_mask_in_place)) == 0), "misaligned JavaThread pointer"); | 
|---|
| 316 | assert(age <= max_age, "age too large"); | 
|---|
| 317 | assert(bias_epoch <= max_bias_epoch, "bias epoch too large"); | 
|---|
| 318 | return (markOop) (tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern); | 
|---|
| 319 | } | 
|---|
| 320 |  | 
|---|
| 321 | // used to encode pointers during GC | 
|---|
| 322 | markOop clear_lock_bits() { return markOop(value() & ~lock_mask_in_place); } | 
|---|
| 323 |  | 
|---|
| 324 | // age operations | 
|---|
| 325 | markOop set_marked()   { return markOop((value() & ~lock_mask_in_place) | marked_value); } | 
|---|
| 326 | markOop set_unmarked() { return markOop((value() & ~lock_mask_in_place) | unlocked_value); } | 
|---|
| 327 |  | 
|---|
| 328 | uint    age()               const { return mask_bits(value() >> age_shift, age_mask); } | 
|---|
| 329 | markOop set_age(uint v) const { | 
|---|
| 330 | assert((v & ~age_mask) == 0, "shouldn't overflow age field"); | 
|---|
| 331 | return markOop((value() & ~age_mask_in_place) | (((uintptr_t)v & age_mask) << age_shift)); | 
|---|
| 332 | } | 
|---|
| 333 | markOop incr_age()          const { return age() == max_age ? markOop(this) : set_age(age() + 1); } | 
|---|
| 334 |  | 
|---|
| 335 | // hash operations | 
|---|
| 336 | intptr_t hash() const { | 
|---|
| 337 | return mask_bits(value() >> hash_shift, hash_mask); | 
|---|
| 338 | } | 
|---|
| 339 |  | 
|---|
| 340 | bool has_no_hash() const { | 
|---|
| 341 | return hash() == no_hash; | 
|---|
| 342 | } | 
|---|
| 343 |  | 
|---|
| 344 | // Prototype mark for initialization | 
|---|
| 345 | static markOop prototype() { | 
|---|
| 346 | return markOop( no_hash_in_place | no_lock_in_place ); | 
|---|
| 347 | } | 
|---|
| 348 |  | 
|---|
| 349 | // Helper function for restoration of unmarked mark oops during GC | 
|---|
| 350 | static inline markOop prototype_for_object(oop obj); | 
|---|
| 351 |  | 
|---|
| 352 | // Debugging | 
|---|
| 353 | void print_on(outputStream* st) const; | 
|---|
| 354 |  | 
|---|
| 355 | // Prepare address of oop for placement into mark | 
|---|
| 356 | inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); } | 
|---|
| 357 |  | 
|---|
| 358 | // Recover address of oop from encoded form used in mark | 
|---|
| 359 | inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); } | 
|---|
| 360 |  | 
|---|
| 361 | // These markOops indicate cms free chunk blocks and not objects. | 
|---|
| 362 | // In 64 bit, the markOop is set to distinguish them from oops. | 
|---|
| 363 | // These are defined in 32 bit mode for vmStructs. | 
|---|
| 364 | const static uintptr_t cms_free_chunk_pattern  = 0x1; | 
|---|
| 365 |  | 
|---|
| 366 | // Constants for the size field. | 
|---|
| 367 | enum { size_shift                = cms_shift + cms_bits, | 
|---|
| 368 | size_bits                 = 35    // need for compressed oops 32G | 
|---|
| 369 | }; | 
|---|
| 370 | // These values are too big for Win64 | 
|---|
| 371 | const static uintptr_t size_mask = LP64_ONLY(right_n_bits(size_bits)) | 
|---|
| 372 | NOT_LP64(0); | 
|---|
| 373 | const static uintptr_t size_mask_in_place = | 
|---|
| 374 | (address_word)size_mask << size_shift; | 
|---|
| 375 |  | 
|---|
| 376 | #ifdef _LP64 | 
|---|
| 377 | static markOop cms_free_prototype() { | 
|---|
| 378 | return markOop(((intptr_t)prototype() & ~cms_mask_in_place) | | 
|---|
| 379 | ((cms_free_chunk_pattern & cms_mask) << cms_shift)); | 
|---|
| 380 | } | 
|---|
| 381 | uintptr_t cms_encoding() const { | 
|---|
| 382 | return mask_bits(value() >> cms_shift, cms_mask); | 
|---|
| 383 | } | 
|---|
| 384 | bool is_cms_free_chunk() const { | 
|---|
| 385 | return is_neutral() && | 
|---|
| 386 | (cms_encoding() & cms_free_chunk_pattern) == cms_free_chunk_pattern; | 
|---|
| 387 | } | 
|---|
| 388 |  | 
|---|
| 389 | size_t get_size() const       { return (size_t)(value() >> size_shift); } | 
|---|
| 390 | static markOop set_size_and_free(size_t size) { | 
|---|
| 391 | assert((size & ~size_mask) == 0, "shouldn't overflow size field"); | 
|---|
| 392 | return markOop(((intptr_t)cms_free_prototype() & ~size_mask_in_place) | | 
|---|
| 393 | (((intptr_t)size & size_mask) << size_shift)); | 
|---|
| 394 | } | 
|---|
| 395 | #endif // _LP64 | 
|---|
| 396 | }; | 
|---|
| 397 |  | 
|---|
| 398 | #endif // SHARE_OOPS_MARKOOP_HPP | 
|---|
| 399 |  | 
|---|