1/*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_OOPS_OOP_INLINE_HPP
26#define SHARE_OOPS_OOP_INLINE_HPP
27
28#include "gc/shared/collectedHeap.hpp"
29#include "memory/universe.hpp"
30#include "oops/access.inline.hpp"
31#include "oops/arrayKlass.hpp"
32#include "oops/arrayOop.hpp"
33#include "oops/compressedOops.inline.hpp"
34#include "oops/klass.inline.hpp"
35#include "oops/markOop.inline.hpp"
36#include "oops/oop.hpp"
37#include "runtime/atomic.hpp"
38#include "runtime/orderAccess.hpp"
39#include "runtime/os.hpp"
40#include "utilities/align.hpp"
41#include "utilities/macros.hpp"
42
43// Implementation of all inlined member functions defined in oop.hpp
44// We need a separate file to avoid circular references
45
46markOop oopDesc::mark() const {
47 return HeapAccess<MO_VOLATILE>::load_at(as_oop(), mark_offset_in_bytes());
48}
49
50markOop oopDesc::mark_raw() const {
51 return _mark;
52}
53
54markOop* oopDesc::mark_addr_raw() const {
55 return (markOop*) &_mark;
56}
57
58void oopDesc::set_mark(volatile markOop m) {
59 HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m);
60}
61
62void oopDesc::set_mark_raw(volatile markOop m) {
63 _mark = m;
64}
65
66void oopDesc::set_mark_raw(HeapWord* mem, markOop m) {
67 *(markOop*)(((char*)mem) + mark_offset_in_bytes()) = m;
68}
69
70void oopDesc::release_set_mark(markOop m) {
71 HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m);
72}
73
74markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
75 return HeapAccess<>::atomic_cmpxchg_at(new_mark, as_oop(), mark_offset_in_bytes(), old_mark);
76}
77
78markOop oopDesc::cas_set_mark_raw(markOop new_mark, markOop old_mark, atomic_memory_order order) {
79 return Atomic::cmpxchg(new_mark, &_mark, old_mark, order);
80}
81
82void oopDesc::init_mark() {
83 set_mark(markOopDesc::prototype_for_object(this));
84}
85
86void oopDesc::init_mark_raw() {
87 set_mark_raw(markOopDesc::prototype_for_object(this));
88}
89
90Klass* oopDesc::klass() const {
91 if (UseCompressedClassPointers) {
92 return CompressedKlassPointers::decode_not_null(_metadata._compressed_klass);
93 } else {
94 return _metadata._klass;
95 }
96}
97
98Klass* oopDesc::klass_or_null() const volatile {
99 if (UseCompressedClassPointers) {
100 return CompressedKlassPointers::decode(_metadata._compressed_klass);
101 } else {
102 return _metadata._klass;
103 }
104}
105
106Klass* oopDesc::klass_or_null_acquire() const volatile {
107 if (UseCompressedClassPointers) {
108 // Workaround for non-const load_acquire parameter.
109 const volatile narrowKlass* addr = &_metadata._compressed_klass;
110 volatile narrowKlass* xaddr = const_cast<volatile narrowKlass*>(addr);
111 return CompressedKlassPointers::decode(OrderAccess::load_acquire(xaddr));
112 } else {
113 return OrderAccess::load_acquire(&_metadata._klass);
114 }
115}
116
117Klass** oopDesc::klass_addr(HeapWord* mem) {
118 // Only used internally and with CMS and will not work with
119 // UseCompressedOops
120 assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
121 ByteSize offset = byte_offset_of(oopDesc, _metadata._klass);
122 return (Klass**) (((char*)mem) + in_bytes(offset));
123}
124
125narrowKlass* oopDesc::compressed_klass_addr(HeapWord* mem) {
126 assert(UseCompressedClassPointers, "only called by compressed klass pointers");
127 ByteSize offset = byte_offset_of(oopDesc, _metadata._compressed_klass);
128 return (narrowKlass*) (((char*)mem) + in_bytes(offset));
129}
130
131Klass** oopDesc::klass_addr() {
132 return klass_addr((HeapWord*)this);
133}
134
135narrowKlass* oopDesc::compressed_klass_addr() {
136 return compressed_klass_addr((HeapWord*)this);
137}
138
139#define CHECK_SET_KLASS(k) \
140 do { \
141 assert(Universe::is_bootstrapping() || k != NULL, "NULL Klass"); \
142 assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass"); \
143 } while (0)
144
145void oopDesc::set_klass(Klass* k) {
146 CHECK_SET_KLASS(k);
147 if (UseCompressedClassPointers) {
148 *compressed_klass_addr() = CompressedKlassPointers::encode_not_null(k);
149 } else {
150 *klass_addr() = k;
151 }
152}
153
154void oopDesc::release_set_klass(HeapWord* mem, Klass* klass) {
155 CHECK_SET_KLASS(klass);
156 if (UseCompressedClassPointers) {
157 OrderAccess::release_store(compressed_klass_addr(mem),
158 CompressedKlassPointers::encode_not_null(klass));
159 } else {
160 OrderAccess::release_store(klass_addr(mem), klass);
161 }
162}
163
164#undef CHECK_SET_KLASS
165
166int oopDesc::klass_gap() const {
167 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
168}
169
170void oopDesc::set_klass_gap(HeapWord* mem, int v) {
171 if (UseCompressedClassPointers) {
172 *(int*)(((char*)mem) + klass_gap_offset_in_bytes()) = v;
173 }
174}
175
176void oopDesc::set_klass_gap(int v) {
177 set_klass_gap((HeapWord*)this, v);
178}
179
180void oopDesc::set_klass_to_list_ptr(oop k) {
181 // This is only to be used during GC, for from-space objects, so no
182 // barrier is needed.
183 if (UseCompressedClassPointers) {
184 _metadata._compressed_klass = (narrowKlass)CompressedOops::encode(k); // may be null (parnew overflow handling)
185 } else {
186 _metadata._klass = (Klass*)(address)k;
187 }
188}
189
190oop oopDesc::list_ptr_from_klass() {
191 // This is only to be used during GC, for from-space objects.
192 if (UseCompressedClassPointers) {
193 return CompressedOops::decode((narrowOop)_metadata._compressed_klass);
194 } else {
195 // Special case for GC
196 return (oop)(address)_metadata._klass;
197 }
198}
199
200bool oopDesc::is_a(Klass* k) const {
201 return klass()->is_subtype_of(k);
202}
203
204int oopDesc::size() {
205 return size_given_klass(klass());
206}
207
208int oopDesc::size_given_klass(Klass* klass) {
209 int lh = klass->layout_helper();
210 int s;
211
212 // lh is now a value computed at class initialization that may hint
213 // at the size. For instances, this is positive and equal to the
214 // size. For arrays, this is negative and provides log2 of the
215 // array element size. For other oops, it is zero and thus requires
216 // a virtual call.
217 //
218 // We go to all this trouble because the size computation is at the
219 // heart of phase 2 of mark-compaction, and called for every object,
220 // alive or dead. So the speed here is equal in importance to the
221 // speed of allocation.
222
223 if (lh > Klass::_lh_neutral_value) {
224 if (!Klass::layout_helper_needs_slow_path(lh)) {
225 s = lh >> LogHeapWordSize; // deliver size scaled by wordSize
226 } else {
227 s = klass->oop_size(this);
228 }
229 } else if (lh <= Klass::_lh_neutral_value) {
230 // The most common case is instances; fall through if so.
231 if (lh < Klass::_lh_neutral_value) {
232 // Second most common case is arrays. We have to fetch the
233 // length of the array, shift (multiply) it appropriately,
234 // up to wordSize, add the header, and align to object size.
235 size_t size_in_bytes;
236 size_t array_length = (size_t) ((arrayOop)this)->length();
237 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
238 size_in_bytes += Klass::layout_helper_header_size(lh);
239
240 // This code could be simplified, but by keeping array_header_in_bytes
241 // in units of bytes and doing it this way we can round up just once,
242 // skipping the intermediate round to HeapWordSize.
243 s = (int)(align_up(size_in_bytes, MinObjAlignmentInBytes) / HeapWordSize);
244
245 // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field
246 // of an "old copy" of an object array in the young gen so it indicates
247 // the grey portion of an already copied array. This will cause the first
248 // disjunct below to fail if the two comparands are computed across such
249 // a concurrent change.
250 // ParNew also runs with promotion labs (which look like int
251 // filler arrays) which are subject to changing their declared size
252 // when finally retiring a PLAB; this also can cause the first disjunct
253 // to fail for another worker thread that is concurrently walking the block
254 // offset table. Both these invariant failures are benign for their
255 // current uses; we relax the assertion checking to cover these two cases below:
256 // is_objArray() && is_forwarded() // covers first scenario above
257 // || is_typeArray() // covers second scenario above
258 // If and when UseParallelGC uses the same obj array oop stealing/chunking
259 // technique, we will need to suitably modify the assertion.
260 assert((s == klass->oop_size(this)) ||
261 (Universe::heap()->is_gc_active() &&
262 ((is_typeArray() && UseConcMarkSweepGC) ||
263 (is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
264 "wrong array object size");
265 } else {
266 // Must be zero, so bite the bullet and take the virtual call.
267 s = klass->oop_size(this);
268 }
269 }
270
271 assert(s > 0, "Oop size must be greater than zero, not %d", s);
272 assert(is_object_aligned(s), "Oop size is not properly aligned: %d", s);
273 return s;
274}
275
276bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
277bool oopDesc::is_array() const { return klass()->is_array_klass(); }
278bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
279bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
280
281void* oopDesc::field_addr_raw(int offset) const { return reinterpret_cast<void*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
282void* oopDesc::field_addr(int offset) const { return Access<>::resolve(as_oop())->field_addr_raw(offset); }
283
284template <class T>
285T* oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); }
286
287template <typename T>
288size_t oopDesc::field_offset(T* p) const { return pointer_delta((void*)p, (void*)this, 1); }
289
290template <DecoratorSet decorators>
291inline oop oopDesc::obj_field_access(int offset) const { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
292inline oop oopDesc::obj_field(int offset) const { return HeapAccess<>::oop_load_at(as_oop(), offset); }
293
294inline void oopDesc::obj_field_put(int offset, oop value) { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
295
296inline jbyte oopDesc::byte_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
297inline void oopDesc::byte_field_put(int offset, jbyte value) { HeapAccess<>::store_at(as_oop(), offset, value); }
298
299inline jchar oopDesc::char_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
300inline void oopDesc::char_field_put(int offset, jchar value) { HeapAccess<>::store_at(as_oop(), offset, value); }
301
302inline jboolean oopDesc::bool_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
303inline void oopDesc::bool_field_put(int offset, jboolean value) { HeapAccess<>::store_at(as_oop(), offset, jboolean(value & 1)); }
304
305inline jshort oopDesc::short_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
306inline void oopDesc::short_field_put(int offset, jshort value) { HeapAccess<>::store_at(as_oop(), offset, value); }
307
308inline jint oopDesc::int_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
309inline jint oopDesc::int_field_raw(int offset) const { return RawAccess<>::load_at(as_oop(), offset); }
310inline void oopDesc::int_field_put(int offset, jint value) { HeapAccess<>::store_at(as_oop(), offset, value); }
311
312inline jlong oopDesc::long_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
313inline void oopDesc::long_field_put(int offset, jlong value) { HeapAccess<>::store_at(as_oop(), offset, value); }
314
315inline jfloat oopDesc::float_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
316inline void oopDesc::float_field_put(int offset, jfloat value) { HeapAccess<>::store_at(as_oop(), offset, value); }
317
318inline jdouble oopDesc::double_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
319inline void oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); }
320
321bool oopDesc::is_locked() const {
322 return mark()->is_locked();
323}
324
325bool oopDesc::is_unlocked() const {
326 return mark()->is_unlocked();
327}
328
329bool oopDesc::has_bias_pattern() const {
330 return mark()->has_bias_pattern();
331}
332
333bool oopDesc::has_bias_pattern_raw() const {
334 return mark_raw()->has_bias_pattern();
335}
336
337// Used only for markSweep, scavenging
338bool oopDesc::is_gc_marked() const {
339 return mark_raw()->is_marked();
340}
341
342// Used by scavengers
343bool oopDesc::is_forwarded() const {
344 // The extra heap check is needed since the obj might be locked, in which case the
345 // mark would point to a stack location and have the sentinel bit cleared
346 return mark_raw()->is_marked();
347}
348
349// Used by scavengers
350void oopDesc::forward_to(oop p) {
351 assert(check_obj_alignment(p),
352 "forwarding to something not aligned");
353 assert(Universe::heap()->is_in_reserved(p),
354 "forwarding to something not in heap");
355 assert(!is_archived_object(oop(this)) &&
356 !is_archived_object(p),
357 "forwarding archive object");
358 markOop m = markOopDesc::encode_pointer_as_mark(p);
359 assert(m->decode_pointer() == p, "encoding must be reversable");
360 set_mark_raw(m);
361}
362
363// Used by parallel scavengers
364bool oopDesc::cas_forward_to(oop p, markOop compare, atomic_memory_order order) {
365 assert(check_obj_alignment(p),
366 "forwarding to something not aligned");
367 assert(Universe::heap()->is_in_reserved(p),
368 "forwarding to something not in heap");
369 markOop m = markOopDesc::encode_pointer_as_mark(p);
370 assert(m->decode_pointer() == p, "encoding must be reversable");
371 return cas_set_mark_raw(m, compare, order) == compare;
372}
373
374oop oopDesc::forward_to_atomic(oop p, markOop compare, atomic_memory_order order) {
375 // CMS forwards some non-heap value into the mark oop to reserve oops during
376 // promotion, so the next two asserts do not hold.
377 assert(UseConcMarkSweepGC || check_obj_alignment(p),
378 "forwarding to something not aligned");
379 assert(UseConcMarkSweepGC || Universe::heap()->is_in_reserved(p),
380 "forwarding to something not in heap");
381 markOop m = markOopDesc::encode_pointer_as_mark(p);
382 assert(m->decode_pointer() == p, "encoding must be reversable");
383 markOop old_mark = cas_set_mark_raw(m, compare, order);
384 if (old_mark == compare) {
385 return NULL;
386 } else {
387 return (oop)old_mark->decode_pointer();
388 }
389}
390
391// Note that the forwardee is not the same thing as the displaced_mark.
392// The forwardee is used when copying during scavenge and mark-sweep.
393// It does need to clear the low two locking- and GC-related bits.
394oop oopDesc::forwardee() const {
395 return (oop) mark_raw()->decode_pointer();
396}
397
398// Note that the forwardee is not the same thing as the displaced_mark.
399// The forwardee is used when copying during scavenge and mark-sweep.
400// It does need to clear the low two locking- and GC-related bits.
401oop oopDesc::forwardee_acquire() const {
402 markOop m = OrderAccess::load_acquire(&_mark);
403 return (oop) m->decode_pointer();
404}
405
406// The following method needs to be MT safe.
407uint oopDesc::age() const {
408 assert(!is_forwarded(), "Attempt to read age from forwarded mark");
409 if (has_displaced_mark_raw()) {
410 return displaced_mark_raw()->age();
411 } else {
412 return mark_raw()->age();
413 }
414}
415
416void oopDesc::incr_age() {
417 assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
418 if (has_displaced_mark_raw()) {
419 set_displaced_mark_raw(displaced_mark_raw()->incr_age());
420 } else {
421 set_mark_raw(mark_raw()->incr_age());
422 }
423}
424
425template <typename OopClosureType>
426void oopDesc::oop_iterate(OopClosureType* cl) {
427 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass());
428}
429
430template <typename OopClosureType>
431void oopDesc::oop_iterate(OopClosureType* cl, MemRegion mr) {
432 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, klass(), mr);
433}
434
435template <typename OopClosureType>
436int oopDesc::oop_iterate_size(OopClosureType* cl) {
437 Klass* k = klass();
438 int size = size_given_klass(k);
439 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k);
440 return size;
441}
442
443template <typename OopClosureType>
444int oopDesc::oop_iterate_size(OopClosureType* cl, MemRegion mr) {
445 Klass* k = klass();
446 int size = size_given_klass(k);
447 OopIteratorClosureDispatch::oop_oop_iterate(cl, this, k, mr);
448 return size;
449}
450
451template <typename OopClosureType>
452void oopDesc::oop_iterate_backwards(OopClosureType* cl) {
453 OopIteratorClosureDispatch::oop_oop_iterate_backwards(cl, this, klass());
454}
455
456bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
457 return obj == NULL || obj->klass()->is_subtype_of(klass);
458}
459
460intptr_t oopDesc::identity_hash() {
461 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
462 // Note: The mark must be read into local variable to avoid concurrent updates.
463 markOop mrk = mark();
464 if (mrk->is_unlocked() && !mrk->has_no_hash()) {
465 return mrk->hash();
466 } else if (mrk->is_marked()) {
467 return mrk->hash();
468 } else {
469 return slow_identity_hash();
470 }
471}
472
473bool oopDesc::has_displaced_mark_raw() const {
474 return mark_raw()->has_displaced_mark_helper();
475}
476
477markOop oopDesc::displaced_mark_raw() const {
478 return mark_raw()->displaced_mark_helper();
479}
480
481void oopDesc::set_displaced_mark_raw(markOop m) {
482 mark_raw()->set_displaced_mark_helper(m);
483}
484
485#endif // SHARE_OOPS_OOP_INLINE_HPP
486