1/*
2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_GC_SHARED_SPACE_HPP
26#define SHARE_GC_SHARED_SPACE_HPP
27
28#include "gc/shared/blockOffsetTable.hpp"
29#include "gc/shared/cardTable.hpp"
30#include "gc/shared/workgroup.hpp"
31#include "memory/allocation.hpp"
32#include "memory/iterator.hpp"
33#include "memory/memRegion.hpp"
34#include "oops/markOop.hpp"
35#include "runtime/mutexLocker.hpp"
36#include "utilities/align.hpp"
37#include "utilities/macros.hpp"
38
39// A space is an abstraction for the "storage units" backing
40// up the generation abstraction. It includes specific
41// implementations for keeping track of free and used space,
42// for iterating over objects and free blocks, etc.
43
44// Forward decls.
45class Space;
46class BlockOffsetArray;
47class BlockOffsetArrayContigSpace;
48class Generation;
49class CompactibleSpace;
50class BlockOffsetTable;
51class CardTableRS;
52class DirtyCardToOopClosure;
53
54// A Space describes a heap area. Class Space is an abstract
55// base class.
56//
57// Space supports allocation, size computation and GC support is provided.
58//
59// Invariant: bottom() and end() are on page_size boundaries and
60// bottom() <= top() <= end()
61// top() is inclusive and end() is exclusive.
62
63class Space: public CHeapObj<mtGC> {
64 friend class VMStructs;
65 protected:
66 HeapWord* _bottom;
67 HeapWord* _end;
68
69 // Used in support of save_marks()
70 HeapWord* _saved_mark_word;
71
72 // A sequential tasks done structure. This supports
73 // parallel GC, where we have threads dynamically
74 // claiming sub-tasks from a larger parallel task.
75 SequentialSubTasksDone _par_seq_tasks;
76
77 Space():
78 _bottom(NULL), _end(NULL) { }
79
80 public:
81 // Accessors
82 HeapWord* bottom() const { return _bottom; }
83 HeapWord* end() const { return _end; }
84 virtual void set_bottom(HeapWord* value) { _bottom = value; }
85 virtual void set_end(HeapWord* value) { _end = value; }
86
87 virtual HeapWord* saved_mark_word() const { return _saved_mark_word; }
88
89 void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
90
91 // Returns true if this object has been allocated since a
92 // generation's "save_marks" call.
93 virtual bool obj_allocated_since_save_marks(const oop obj) const {
94 return (HeapWord*)obj >= saved_mark_word();
95 }
96
97 virtual MemRegionClosure* preconsumptionDirtyCardClosure() const {
98 return NULL;
99 }
100
101 // Returns a subregion of the space containing only the allocated objects in
102 // the space.
103 virtual MemRegion used_region() const = 0;
104
105 // Returns a region that is guaranteed to contain (at least) all objects
106 // allocated at the time of the last call to "save_marks". If the space
107 // initializes its DirtyCardToOopClosure's specifying the "contig" option
108 // (that is, if the space is contiguous), then this region must contain only
109 // such objects: the memregion will be from the bottom of the region to the
110 // saved mark. Otherwise, the "obj_allocated_since_save_marks" method of
111 // the space must distinguish between objects in the region allocated before
112 // and after the call to save marks.
113 MemRegion used_region_at_save_marks() const {
114 return MemRegion(bottom(), saved_mark_word());
115 }
116
117 // Initialization.
118 // "initialize" should be called once on a space, before it is used for
119 // any purpose. The "mr" arguments gives the bounds of the space, and
120 // the "clear_space" argument should be true unless the memory in "mr" is
121 // known to be zeroed.
122 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
123
124 // The "clear" method must be called on a region that may have
125 // had allocation performed in it, but is now to be considered empty.
126 virtual void clear(bool mangle_space);
127
128 // For detecting GC bugs. Should only be called at GC boundaries, since
129 // some unused space may be used as scratch space during GC's.
130 // We also call this when expanding a space to satisfy an allocation
131 // request. See bug #4668531
132 virtual void mangle_unused_area() = 0;
133 virtual void mangle_unused_area_complete() = 0;
134
135 // Testers
136 bool is_empty() const { return used() == 0; }
137 bool not_empty() const { return used() > 0; }
138
139 // Returns true iff the given the space contains the
140 // given address as part of an allocated object. For
141 // certain kinds of spaces, this might be a potentially
142 // expensive operation. To prevent performance problems
143 // on account of its inadvertent use in product jvm's,
144 // we restrict its use to assertion checks only.
145 bool is_in(const void* p) const {
146 return used_region().contains(p);
147 }
148 bool is_in(oop obj) const {
149 return is_in((void*)obj);
150 }
151
152 // Returns true iff the given reserved memory of the space contains the
153 // given address.
154 bool is_in_reserved(const void* p) const { return _bottom <= p && p < _end; }
155
156 // Returns true iff the given block is not allocated.
157 virtual bool is_free_block(const HeapWord* p) const = 0;
158
159 // Test whether p is double-aligned
160 static bool is_aligned(void* p) {
161 return ::is_aligned(p, sizeof(double));
162 }
163
164 // Size computations. Sizes are in bytes.
165 size_t capacity() const { return byte_size(bottom(), end()); }
166 virtual size_t used() const = 0;
167 virtual size_t free() const = 0;
168
169 // Iterate over all the ref-containing fields of all objects in the
170 // space, calling "cl.do_oop" on each. Fields in objects allocated by
171 // applications of the closure are not included in the iteration.
172 virtual void oop_iterate(OopIterateClosure* cl);
173
174 // Iterate over all objects in the space, calling "cl.do_object" on
175 // each. Objects allocated by applications of the closure are not
176 // included in the iteration.
177 virtual void object_iterate(ObjectClosure* blk) = 0;
178 // Similar to object_iterate() except only iterates over
179 // objects whose internal references point to objects in the space.
180 virtual void safe_object_iterate(ObjectClosure* blk) = 0;
181
182 // Create and return a new dirty card to oop closure. Can be
183 // overridden to return the appropriate type of closure
184 // depending on the type of space in which the closure will
185 // operate. ResourceArea allocated.
186 virtual DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl,
187 CardTable::PrecisionStyle precision,
188 HeapWord* boundary,
189 bool parallel);
190
191 // If "p" is in the space, returns the address of the start of the
192 // "block" that contains "p". We say "block" instead of "object" since
193 // some heaps may not pack objects densely; a chunk may either be an
194 // object or a non-object. If "p" is not in the space, return NULL.
195 virtual HeapWord* block_start_const(const void* p) const = 0;
196
197 // The non-const version may have benevolent side effects on the data
198 // structure supporting these calls, possibly speeding up future calls.
199 // The default implementation, however, is simply to call the const
200 // version.
201 virtual HeapWord* block_start(const void* p);
202
203 // Requires "addr" to be the start of a chunk, and returns its size.
204 // "addr + size" is required to be the start of a new chunk, or the end
205 // of the active area of the heap.
206 virtual size_t block_size(const HeapWord* addr) const = 0;
207
208 // Requires "addr" to be the start of a block, and returns "TRUE" iff
209 // the block is an object.
210 virtual bool block_is_obj(const HeapWord* addr) const = 0;
211
212 // Requires "addr" to be the start of a block, and returns "TRUE" iff
213 // the block is an object and the object is alive.
214 virtual bool obj_is_alive(const HeapWord* addr) const;
215
216 // Allocation (return NULL if full). Assumes the caller has established
217 // mutually exclusive access to the space.
218 virtual HeapWord* allocate(size_t word_size) = 0;
219
220 // Allocation (return NULL if full). Enforces mutual exclusion internally.
221 virtual HeapWord* par_allocate(size_t word_size) = 0;
222
223#if INCLUDE_SERIALGC
224 // Mark-sweep-compact support: all spaces can update pointers to objects
225 // moving as a part of compaction.
226 virtual void adjust_pointers() = 0;
227#endif
228
229 virtual void print() const;
230 virtual void print_on(outputStream* st) const;
231 virtual void print_short() const;
232 virtual void print_short_on(outputStream* st) const;
233
234
235 // Accessor for parallel sequential tasks.
236 SequentialSubTasksDone* par_seq_tasks() { return &_par_seq_tasks; }
237
238 // IF "this" is a ContiguousSpace, return it, else return NULL.
239 virtual ContiguousSpace* toContiguousSpace() {
240 return NULL;
241 }
242
243 // Debugging
244 virtual void verify() const = 0;
245};
246
247// A MemRegionClosure (ResourceObj) whose "do_MemRegion" function applies an
248// OopClosure to (the addresses of) all the ref-containing fields that could
249// be modified by virtue of the given MemRegion being dirty. (Note that
250// because of the imprecise nature of the write barrier, this may iterate
251// over oops beyond the region.)
252// This base type for dirty card to oop closures handles memory regions
253// in non-contiguous spaces with no boundaries, and should be sub-classed
254// to support other space types. See ContiguousDCTOC for a sub-class
255// that works with ContiguousSpaces.
256
257class DirtyCardToOopClosure: public MemRegionClosureRO {
258protected:
259 OopIterateClosure* _cl;
260 Space* _sp;
261 CardTable::PrecisionStyle _precision;
262 HeapWord* _boundary; // If non-NULL, process only non-NULL oops
263 // pointing below boundary.
264 HeapWord* _min_done; // ObjHeadPreciseArray precision requires
265 // a downwards traversal; this is the
266 // lowest location already done (or,
267 // alternatively, the lowest address that
268 // shouldn't be done again. NULL means infinity.)
269 NOT_PRODUCT(HeapWord* _last_bottom;)
270 NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
271
272 // Get the actual top of the area on which the closure will
273 // operate, given where the top is assumed to be (the end of the
274 // memory region passed to do_MemRegion) and where the object
275 // at the top is assumed to start. For example, an object may
276 // start at the top but actually extend past the assumed top,
277 // in which case the top becomes the end of the object.
278 virtual HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
279
280 // Walk the given memory region from bottom to (actual) top
281 // looking for objects and applying the oop closure (_cl) to
282 // them. The base implementation of this treats the area as
283 // blocks, where a block may or may not be an object. Sub-
284 // classes should override this to provide more accurate
285 // or possibly more efficient walking.
286 virtual void walk_mem_region(MemRegion mr, HeapWord* bottom, HeapWord* top);
287
288public:
289 DirtyCardToOopClosure(Space* sp, OopIterateClosure* cl,
290 CardTable::PrecisionStyle precision,
291 HeapWord* boundary) :
292 _cl(cl), _sp(sp), _precision(precision), _boundary(boundary),
293 _min_done(NULL) {
294 NOT_PRODUCT(_last_bottom = NULL);
295 NOT_PRODUCT(_last_explicit_min_done = NULL);
296 }
297
298 void do_MemRegion(MemRegion mr);
299
300 void set_min_done(HeapWord* min_done) {
301 _min_done = min_done;
302 NOT_PRODUCT(_last_explicit_min_done = _min_done);
303 }
304#ifndef PRODUCT
305 void set_last_bottom(HeapWord* last_bottom) {
306 _last_bottom = last_bottom;
307 }
308#endif
309};
310
311// A structure to represent a point at which objects are being copied
312// during compaction.
313class CompactPoint : public StackObj {
314public:
315 Generation* gen;
316 CompactibleSpace* space;
317 HeapWord* threshold;
318
319 CompactPoint(Generation* g = NULL) :
320 gen(g), space(NULL), threshold(0) {}
321};
322
323// A space that supports compaction operations. This is usually, but not
324// necessarily, a space that is normally contiguous. But, for example, a
325// free-list-based space whose normal collection is a mark-sweep without
326// compaction could still support compaction in full GC's.
327//
328// The compaction operations are implemented by the
329// scan_and_{adjust_pointers,compact,forward} function templates.
330// The following are, non-virtual, auxiliary functions used by these function templates:
331// - scan_limit()
332// - scanned_block_is_obj()
333// - scanned_block_size()
334// - adjust_obj_size()
335// - obj_size()
336// These functions are to be used exclusively by the scan_and_* function templates,
337// and must be defined for all (non-abstract) subclasses of CompactibleSpace.
338//
339// NOTE: Any subclasses to CompactibleSpace wanting to change/define the behavior
340// in any of the auxiliary functions must also override the corresponding
341// prepare_for_compaction/adjust_pointers/compact functions using them.
342// If not, such changes will not be used or have no effect on the compaction operations.
343//
344// This translates to the following dependencies:
345// Overrides/definitions of
346// - scan_limit
347// - scanned_block_is_obj
348// - scanned_block_size
349// require override/definition of prepare_for_compaction().
350// Similar dependencies exist between
351// - adjust_obj_size and adjust_pointers()
352// - obj_size and compact().
353//
354// Additionally, this also means that changes to block_size() or block_is_obj() that
355// should be effective during the compaction operations must provide a corresponding
356// definition of scanned_block_size/scanned_block_is_obj respectively.
357class CompactibleSpace: public Space {
358 friend class VMStructs;
359 friend class CompactibleFreeListSpace;
360private:
361 HeapWord* _compaction_top;
362 CompactibleSpace* _next_compaction_space;
363
364 // Auxiliary functions for scan_and_{forward,adjust_pointers,compact} support.
365 inline size_t adjust_obj_size(size_t size) const {
366 return size;
367 }
368
369 inline size_t obj_size(const HeapWord* addr) const;
370
371 template <class SpaceType>
372 static inline void verify_up_to_first_dead(SpaceType* space) NOT_DEBUG_RETURN;
373
374 template <class SpaceType>
375 static inline void clear_empty_region(SpaceType* space);
376
377public:
378 CompactibleSpace() :
379 _compaction_top(NULL), _next_compaction_space(NULL) {}
380
381 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
382 virtual void clear(bool mangle_space);
383
384 // Used temporarily during a compaction phase to hold the value
385 // top should have when compaction is complete.
386 HeapWord* compaction_top() const { return _compaction_top; }
387
388 void set_compaction_top(HeapWord* value) {
389 assert(value == NULL || (value >= bottom() && value <= end()),
390 "should point inside space");
391 _compaction_top = value;
392 }
393
394 // Perform operations on the space needed after a compaction
395 // has been performed.
396 virtual void reset_after_compaction() = 0;
397
398 // Returns the next space (in the current generation) to be compacted in
399 // the global compaction order. Also is used to select the next
400 // space into which to compact.
401
402 virtual CompactibleSpace* next_compaction_space() const {
403 return _next_compaction_space;
404 }
405
406 void set_next_compaction_space(CompactibleSpace* csp) {
407 _next_compaction_space = csp;
408 }
409
410#if INCLUDE_SERIALGC
411 // MarkSweep support phase2
412
413 // Start the process of compaction of the current space: compute
414 // post-compaction addresses, and insert forwarding pointers. The fields
415 // "cp->gen" and "cp->compaction_space" are the generation and space into
416 // which we are currently compacting. This call updates "cp" as necessary,
417 // and leaves the "compaction_top" of the final value of
418 // "cp->compaction_space" up-to-date. Offset tables may be updated in
419 // this phase as if the final copy had occurred; if so, "cp->threshold"
420 // indicates when the next such action should be taken.
421 virtual void prepare_for_compaction(CompactPoint* cp) = 0;
422 // MarkSweep support phase3
423 virtual void adjust_pointers();
424 // MarkSweep support phase4
425 virtual void compact();
426#endif // INCLUDE_SERIALGC
427
428 // The maximum percentage of objects that can be dead in the compacted
429 // live part of a compacted space ("deadwood" support.)
430 virtual size_t allowed_dead_ratio() const { return 0; };
431
432 // Some contiguous spaces may maintain some data structures that should
433 // be updated whenever an allocation crosses a boundary. This function
434 // returns the first such boundary.
435 // (The default implementation returns the end of the space, so the
436 // boundary is never crossed.)
437 virtual HeapWord* initialize_threshold() { return end(); }
438
439 // "q" is an object of the given "size" that should be forwarded;
440 // "cp" names the generation ("gen") and containing "this" (which must
441 // also equal "cp->space"). "compact_top" is where in "this" the
442 // next object should be forwarded to. If there is room in "this" for
443 // the object, insert an appropriate forwarding pointer in "q".
444 // If not, go to the next compaction space (there must
445 // be one, since compaction must succeed -- we go to the first space of
446 // the previous generation if necessary, updating "cp"), reset compact_top
447 // and then forward. In either case, returns the new value of "compact_top".
448 // If the forwarding crosses "cp->threshold", invokes the "cross_threshold"
449 // function of the then-current compaction space, and updates "cp->threshold
450 // accordingly".
451 virtual HeapWord* forward(oop q, size_t size, CompactPoint* cp,
452 HeapWord* compact_top);
453
454 // Return a size with adjustments as required of the space.
455 virtual size_t adjust_object_size_v(size_t size) const { return size; }
456
457 void set_first_dead(HeapWord* value) { _first_dead = value; }
458 void set_end_of_live(HeapWord* value) { _end_of_live = value; }
459
460protected:
461 // Used during compaction.
462 HeapWord* _first_dead;
463 HeapWord* _end_of_live;
464
465 // Minimum size of a free block.
466 virtual size_t minimum_free_block_size() const { return 0; }
467
468 // This the function is invoked when an allocation of an object covering
469 // "start" to "end occurs crosses the threshold; returns the next
470 // threshold. (The default implementation does nothing.)
471 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* the_end) {
472 return end();
473 }
474
475 // Below are template functions for scan_and_* algorithms (avoiding virtual calls).
476 // The space argument should be a subclass of CompactibleSpace, implementing
477 // scan_limit(), scanned_block_is_obj(), and scanned_block_size(),
478 // and possibly also overriding obj_size(), and adjust_obj_size().
479 // These functions should avoid virtual calls whenever possible.
480
481#if INCLUDE_SERIALGC
482 // Frequently calls adjust_obj_size().
483 template <class SpaceType>
484 static inline void scan_and_adjust_pointers(SpaceType* space);
485#endif
486
487 // Frequently calls obj_size().
488 template <class SpaceType>
489 static inline void scan_and_compact(SpaceType* space);
490
491 // Frequently calls scanned_block_is_obj() and scanned_block_size().
492 // Requires the scan_limit() function.
493 template <class SpaceType>
494 static inline void scan_and_forward(SpaceType* space, CompactPoint* cp);
495};
496
497class GenSpaceMangler;
498
499// A space in which the free area is contiguous. It therefore supports
500// faster allocation, and compaction.
501class ContiguousSpace: public CompactibleSpace {
502 friend class VMStructs;
503 // Allow scan_and_forward function to call (private) overrides for auxiliary functions on this class
504 template <typename SpaceType>
505 friend void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp);
506
507 private:
508 // Auxiliary functions for scan_and_forward support.
509 // See comments for CompactibleSpace for more information.
510 inline HeapWord* scan_limit() const {
511 return top();
512 }
513
514 inline bool scanned_block_is_obj(const HeapWord* addr) const {
515 return true; // Always true, since scan_limit is top
516 }
517
518 inline size_t scanned_block_size(const HeapWord* addr) const;
519
520 protected:
521 HeapWord* _top;
522 HeapWord* _concurrent_iteration_safe_limit;
523 // A helper for mangling the unused area of the space in debug builds.
524 GenSpaceMangler* _mangler;
525
526 GenSpaceMangler* mangler() { return _mangler; }
527
528 // Allocation helpers (return NULL if full).
529 inline HeapWord* allocate_impl(size_t word_size);
530 inline HeapWord* par_allocate_impl(size_t word_size);
531
532 public:
533 ContiguousSpace();
534 ~ContiguousSpace();
535
536 virtual void initialize(MemRegion mr, bool clear_space, bool mangle_space);
537 virtual void clear(bool mangle_space);
538
539 // Accessors
540 HeapWord* top() const { return _top; }
541 void set_top(HeapWord* value) { _top = value; }
542
543 void set_saved_mark() { _saved_mark_word = top(); }
544 void reset_saved_mark() { _saved_mark_word = bottom(); }
545
546 bool saved_mark_at_top() const { return saved_mark_word() == top(); }
547
548 // In debug mode mangle (write it with a particular bit
549 // pattern) the unused part of a space.
550
551 // Used to save the an address in a space for later use during mangling.
552 void set_top_for_allocations(HeapWord* v) PRODUCT_RETURN;
553 // Used to save the space's current top for later use during mangling.
554 void set_top_for_allocations() PRODUCT_RETURN;
555
556 // Mangle regions in the space from the current top up to the
557 // previously mangled part of the space.
558 void mangle_unused_area() PRODUCT_RETURN;
559 // Mangle [top, end)
560 void mangle_unused_area_complete() PRODUCT_RETURN;
561
562 // Do some sparse checking on the area that should have been mangled.
563 void check_mangled_unused_area(HeapWord* limit) PRODUCT_RETURN;
564 // Check the complete area that should have been mangled.
565 // This code may be NULL depending on the macro DEBUG_MANGLING.
566 void check_mangled_unused_area_complete() PRODUCT_RETURN;
567
568 // Size computations: sizes in bytes.
569 size_t capacity() const { return byte_size(bottom(), end()); }
570 size_t used() const { return byte_size(bottom(), top()); }
571 size_t free() const { return byte_size(top(), end()); }
572
573 virtual bool is_free_block(const HeapWord* p) const;
574
575 // In a contiguous space we have a more obvious bound on what parts
576 // contain objects.
577 MemRegion used_region() const { return MemRegion(bottom(), top()); }
578
579 // Allocation (return NULL if full)
580 virtual HeapWord* allocate(size_t word_size);
581 virtual HeapWord* par_allocate(size_t word_size);
582 HeapWord* allocate_aligned(size_t word_size);
583
584 // Iteration
585 void oop_iterate(OopIterateClosure* cl);
586 void object_iterate(ObjectClosure* blk);
587 // For contiguous spaces this method will iterate safely over objects
588 // in the space (i.e., between bottom and top) when at a safepoint.
589 void safe_object_iterate(ObjectClosure* blk);
590
591 // Iterate over as many initialized objects in the space as possible,
592 // calling "cl.do_object_careful" on each. Return NULL if all objects
593 // in the space (at the start of the iteration) were iterated over.
594 // Return an address indicating the extent of the iteration in the
595 // event that the iteration had to return because of finding an
596 // uninitialized object in the space, or if the closure "cl"
597 // signaled early termination.
598 HeapWord* object_iterate_careful(ObjectClosureCareful* cl);
599 HeapWord* concurrent_iteration_safe_limit() {
600 assert(_concurrent_iteration_safe_limit <= top(),
601 "_concurrent_iteration_safe_limit update missed");
602 return _concurrent_iteration_safe_limit;
603 }
604 // changes the safe limit, all objects from bottom() to the new
605 // limit should be properly initialized
606 void set_concurrent_iteration_safe_limit(HeapWord* new_limit) {
607 assert(new_limit <= top(), "uninitialized objects in the safe range");
608 _concurrent_iteration_safe_limit = new_limit;
609 }
610
611 // In support of parallel oop_iterate.
612 template <typename OopClosureType>
613 void par_oop_iterate(MemRegion mr, OopClosureType* blk);
614
615 // Compaction support
616 virtual void reset_after_compaction() {
617 assert(compaction_top() >= bottom() && compaction_top() <= end(), "should point inside space");
618 set_top(compaction_top());
619 // set new iteration safe limit
620 set_concurrent_iteration_safe_limit(compaction_top());
621 }
622
623 // Override.
624 DirtyCardToOopClosure* new_dcto_cl(OopIterateClosure* cl,
625 CardTable::PrecisionStyle precision,
626 HeapWord* boundary,
627 bool parallel);
628
629 // Apply "blk->do_oop" to the addresses of all reference fields in objects
630 // starting with the _saved_mark_word, which was noted during a generation's
631 // save_marks and is required to denote the head of an object.
632 // Fields in objects allocated by applications of the closure
633 // *are* included in the iteration.
634 // Updates _saved_mark_word to point to just after the last object
635 // iterated over.
636 template <typename OopClosureType>
637 void oop_since_save_marks_iterate(OopClosureType* blk);
638
639 // Same as object_iterate, but starting from "mark", which is required
640 // to denote the start of an object. Objects allocated by
641 // applications of the closure *are* included in the iteration.
642 virtual void object_iterate_from(HeapWord* mark, ObjectClosure* blk);
643
644 // Very inefficient implementation.
645 virtual HeapWord* block_start_const(const void* p) const;
646 size_t block_size(const HeapWord* p) const;
647 // If a block is in the allocated area, it is an object.
648 bool block_is_obj(const HeapWord* p) const { return p < top(); }
649
650 // Addresses for inlined allocation
651 HeapWord** top_addr() { return &_top; }
652 HeapWord** end_addr() { return &_end; }
653
654#if INCLUDE_SERIALGC
655 // Overrides for more efficient compaction support.
656 void prepare_for_compaction(CompactPoint* cp);
657#endif
658
659 virtual void print_on(outputStream* st) const;
660
661 // Checked dynamic downcasts.
662 virtual ContiguousSpace* toContiguousSpace() {
663 return this;
664 }
665
666 // Debugging
667 virtual void verify() const;
668
669 // Used to increase collection frequency. "factor" of 0 means entire
670 // space.
671 void allocate_temporary_filler(int factor);
672};
673
674
675// A dirty card to oop closure that does filtering.
676// It knows how to filter out objects that are outside of the _boundary.
677class FilteringDCTOC : public DirtyCardToOopClosure {
678protected:
679 // Override.
680 void walk_mem_region(MemRegion mr,
681 HeapWord* bottom, HeapWord* top);
682
683 // Walk the given memory region, from bottom to top, applying
684 // the given oop closure to (possibly) all objects found. The
685 // given oop closure may or may not be the same as the oop
686 // closure with which this closure was created, as it may
687 // be a filtering closure which makes use of the _boundary.
688 // We offer two signatures, so the FilteringClosure static type is
689 // apparent.
690 virtual void walk_mem_region_with_cl(MemRegion mr,
691 HeapWord* bottom, HeapWord* top,
692 OopIterateClosure* cl) = 0;
693 virtual void walk_mem_region_with_cl(MemRegion mr,
694 HeapWord* bottom, HeapWord* top,
695 FilteringClosure* cl) = 0;
696
697public:
698 FilteringDCTOC(Space* sp, OopIterateClosure* cl,
699 CardTable::PrecisionStyle precision,
700 HeapWord* boundary) :
701 DirtyCardToOopClosure(sp, cl, precision, boundary) {}
702};
703
704// A dirty card to oop closure for contiguous spaces
705// (ContiguousSpace and sub-classes).
706// It is a FilteringClosure, as defined above, and it knows:
707//
708// 1. That the actual top of any area in a memory region
709// contained by the space is bounded by the end of the contiguous
710// region of the space.
711// 2. That the space is really made up of objects and not just
712// blocks.
713
714class ContiguousSpaceDCTOC : public FilteringDCTOC {
715protected:
716 // Overrides.
717 HeapWord* get_actual_top(HeapWord* top, HeapWord* top_obj);
718
719 virtual void walk_mem_region_with_cl(MemRegion mr,
720 HeapWord* bottom, HeapWord* top,
721 OopIterateClosure* cl);
722 virtual void walk_mem_region_with_cl(MemRegion mr,
723 HeapWord* bottom, HeapWord* top,
724 FilteringClosure* cl);
725
726public:
727 ContiguousSpaceDCTOC(ContiguousSpace* sp, OopIterateClosure* cl,
728 CardTable::PrecisionStyle precision,
729 HeapWord* boundary) :
730 FilteringDCTOC(sp, cl, precision, boundary)
731 {}
732};
733
734// A ContigSpace that Supports an efficient "block_start" operation via
735// a BlockOffsetArray (whose BlockOffsetSharedArray may be shared with
736// other spaces.) This is the abstract base class for old generation
737// (tenured) spaces.
738
739class OffsetTableContigSpace: public ContiguousSpace {
740 friend class VMStructs;
741 protected:
742 BlockOffsetArrayContigSpace _offsets;
743 Mutex _par_alloc_lock;
744
745 public:
746 // Constructor
747 OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
748 MemRegion mr);
749
750 void set_bottom(HeapWord* value);
751 void set_end(HeapWord* value);
752
753 void clear(bool mangle_space);
754
755 inline HeapWord* block_start_const(const void* p) const;
756
757 // Add offset table update.
758 virtual inline HeapWord* allocate(size_t word_size);
759 inline HeapWord* par_allocate(size_t word_size);
760
761 // MarkSweep support phase3
762 virtual HeapWord* initialize_threshold();
763 virtual HeapWord* cross_threshold(HeapWord* start, HeapWord* end);
764
765 virtual void print_on(outputStream* st) const;
766
767 // Debugging
768 void verify() const;
769};
770
771
772// Class TenuredSpace is used by TenuredGeneration
773
774class TenuredSpace: public OffsetTableContigSpace {
775 friend class VMStructs;
776 protected:
777 // Mark sweep support
778 size_t allowed_dead_ratio() const;
779 public:
780 // Constructor
781 TenuredSpace(BlockOffsetSharedArray* sharedOffsetArray,
782 MemRegion mr) :
783 OffsetTableContigSpace(sharedOffsetArray, mr) {}
784};
785#endif // SHARE_GC_SHARED_SPACE_HPP
786