Thu, 22 Sep 2011 10:57:37 -0700
6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp
1 /*
2 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_HPP
28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
29 #include "memory/allocation.hpp"
31 //
32 // PSPromotionLAB is a parallel scavenge promotion lab. This class acts very
33 // much like a MutableSpace. We couldn't embed a MutableSpace, though, as
34 // it has a considerable number of asserts and invariants that are violated.
35 //
37 class ObjectStartArray;
39 class PSPromotionLAB : public CHeapObj {
40 protected:
41 static size_t filler_header_size;
43 enum LabState {
44 needs_flush,
45 flushed,
46 zero_size
47 };
49 HeapWord* _top;
50 HeapWord* _bottom;
51 HeapWord* _end;
52 LabState _state;
54 void set_top(HeapWord* value) { _top = value; }
55 void set_bottom(HeapWord* value) { _bottom = value; }
56 void set_end(HeapWord* value) { _end = value; }
58 // The shared initialize code invokes this.
59 debug_only(virtual bool lab_is_valid(MemRegion lab) { return false; });
61 PSPromotionLAB() : _top(NULL), _bottom(NULL), _end(NULL) { }
63 public:
64 // Filling and flushing.
65 void initialize(MemRegion lab);
67 virtual void flush();
69 // Accessors
70 HeapWord* bottom() const { return _bottom; }
71 HeapWord* end() const { return _end; }
72 HeapWord* top() const { return _top; }
74 bool is_flushed() { return _state == flushed; }
76 bool unallocate_object(oop obj);
78 // Returns a subregion containing all objects in this space.
79 MemRegion used_region() { return MemRegion(bottom(), top()); }
81 // Boolean querries.
82 bool is_empty() const { return used() == 0; }
83 bool not_empty() const { return used() > 0; }
84 bool contains(const void* p) const { return _bottom <= p && p < _end; }
86 // Size computations. Sizes are in bytes.
87 size_t capacity() const { return byte_size(bottom(), end()); }
88 size_t used() const { return byte_size(bottom(), top()); }
89 size_t free() const { return byte_size(top(), end()); }
90 };
92 class PSYoungPromotionLAB : public PSPromotionLAB {
93 public:
94 PSYoungPromotionLAB() { }
96 // Not MT safe
97 HeapWord* allocate(size_t size) {
98 // Can't assert this, when young fills, we keep the LAB around, but flushed.
99 // assert(_state != flushed, "Sanity");
100 HeapWord* obj = top();
101 HeapWord* new_top = obj + size;
102 // The 'new_top>obj' check is needed to detect overflow of obj+size.
103 if (new_top > obj && new_top <= end()) {
104 set_top(new_top);
105 assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top),
106 "checking alignment");
107 return obj;
108 }
110 return NULL;
111 }
113 debug_only(virtual bool lab_is_valid(MemRegion lab));
114 };
116 class PSOldPromotionLAB : public PSPromotionLAB {
117 private:
118 ObjectStartArray* _start_array;
120 public:
121 PSOldPromotionLAB() : _start_array(NULL) { }
122 PSOldPromotionLAB(ObjectStartArray* start_array) : _start_array(start_array) { }
124 void set_start_array(ObjectStartArray* start_array) { _start_array = start_array; }
126 void flush();
128 // Not MT safe
129 HeapWord* allocate(size_t size) {
130 // Cannot test for this now that we're doing promotion failures
131 // assert(_state != flushed, "Sanity");
132 assert(_start_array != NULL, "Sanity");
133 HeapWord* obj = top();
134 HeapWord* new_top = obj + size;
135 // The 'new_top>obj' check is needed to detect overflow of obj+size.
136 if (new_top > obj && new_top <= end()) {
137 set_top(new_top);
138 assert(is_object_aligned((intptr_t)obj) && is_object_aligned((intptr_t)new_top),
139 "checking alignment");
140 _start_array->allocate_block(obj);
141 return obj;
142 }
144 return NULL;
145 }
147 debug_only(virtual bool lab_is_valid(MemRegion lab));
148 };
150 #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPROMOTIONLAB_HPP