1/*
2 * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP
26#define SHARE_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP
27
28#include "gc/parallel/parallelScavengeHeap.hpp"
29#include "gc/parallel/parMarkBitMap.inline.hpp"
30#include "gc/parallel/psParallelCompact.hpp"
31#include "gc/shared/collectedHeap.hpp"
32#include "oops/access.inline.hpp"
33#include "oops/compressedOops.inline.hpp"
34#include "oops/klass.hpp"
35#include "oops/oop.inline.hpp"
36
37inline bool PSParallelCompact::is_marked(oop obj) {
38 return mark_bitmap()->is_marked(obj);
39}
40
41inline double PSParallelCompact::normal_distribution(double density) {
42 assert(_dwl_initialized, "uninitialized");
43 const double squared_term = (density - _dwl_mean) / _dwl_std_dev;
44 return _dwl_first_term * exp(-0.5 * squared_term * squared_term);
45}
46
47inline bool PSParallelCompact::dead_space_crosses_boundary(const RegionData* region,
48 idx_t bit) {
49 assert(bit > 0, "cannot call this for the first bit/region");
50 assert(_summary_data.region_to_addr(region) == _mark_bitmap.bit_to_addr(bit),
51 "sanity check");
52
53 // Dead space crosses the boundary if (1) a partial object does not extend
54 // onto the region, (2) an object does not start at the beginning of the
55 // region, and (3) an object does not end at the end of the prior region.
56 return region->partial_obj_size() == 0 &&
57 !_mark_bitmap.is_obj_beg(bit) &&
58 !_mark_bitmap.is_obj_end(bit - 1);
59}
60
61inline bool PSParallelCompact::is_in(HeapWord* p, HeapWord* beg_addr, HeapWord* end_addr) {
62 return p >= beg_addr && p < end_addr;
63}
64
65inline bool PSParallelCompact::is_in(oop* p, HeapWord* beg_addr, HeapWord* end_addr) {
66 return is_in((HeapWord*)p, beg_addr, end_addr);
67}
68
69inline MutableSpace* PSParallelCompact::space(SpaceId id) {
70 assert(id < last_space_id, "id out of range");
71 return _space_info[id].space();
72}
73
74inline HeapWord* PSParallelCompact::new_top(SpaceId id) {
75 assert(id < last_space_id, "id out of range");
76 return _space_info[id].new_top();
77}
78
79inline HeapWord* PSParallelCompact::dense_prefix(SpaceId id) {
80 assert(id < last_space_id, "id out of range");
81 return _space_info[id].dense_prefix();
82}
83
84inline ObjectStartArray* PSParallelCompact::start_array(SpaceId id) {
85 assert(id < last_space_id, "id out of range");
86 return _space_info[id].start_array();
87}
88
89#ifdef ASSERT
90inline void PSParallelCompact::check_new_location(HeapWord* old_addr, HeapWord* new_addr) {
91 assert(old_addr >= new_addr || space_id(old_addr) != space_id(new_addr),
92 "must move left or to a different space");
93 assert(is_object_aligned(old_addr) && is_object_aligned(new_addr),
94 "checking alignment");
95}
96#endif // ASSERT
97
98inline bool PSParallelCompact::mark_obj(oop obj) {
99 const int obj_size = obj->size();
100 if (mark_bitmap()->mark_obj(obj, obj_size)) {
101 _summary_data.add_obj(obj, obj_size);
102 return true;
103 } else {
104 return false;
105 }
106}
107
108template <class T>
109inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
110 T heap_oop = RawAccess<>::oop_load(p);
111 if (!CompressedOops::is_null(heap_oop)) {
112 oop obj = CompressedOops::decode_not_null(heap_oop);
113 assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
114
115 oop new_obj = (oop)summary_data().calc_new_pointer(obj, cm);
116 assert(new_obj != NULL, // is forwarding ptr?
117 "should be forwarded");
118 // Just always do the update unconditionally?
119 if (new_obj != NULL) {
120 assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
121 "should be in object space");
122 RawAccess<IS_NOT_NULL>::oop_store(p, new_obj);
123 }
124 }
125}
126
127class PCAdjustPointerClosure: public BasicOopIterateClosure {
128public:
129 PCAdjustPointerClosure(ParCompactionManager* cm) {
130 assert(cm != NULL, "associate ParCompactionManage should not be NULL");
131 _cm = cm;
132 }
133 template <typename T> void do_oop_nv(T* p) { PSParallelCompact::adjust_pointer(p, _cm); }
134 virtual void do_oop(oop* p) { do_oop_nv(p); }
135 virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
136
137 // This closure provides its own oop verification code.
138 debug_only(virtual bool should_verify_oops() { return false; })
139 virtual ReferenceIterationMode reference_iteration_mode() { return DO_FIELDS; }
140private:
141 ParCompactionManager* _cm;
142};
143
144#endif // SHARE_GC_PARALLEL_PSPARALLELCOMPACT_INLINE_HPP
145