Thu, 22 Sep 2011 10:57:37 -0700
6484982: G1: process references during evacuation pauses
Summary: G1 now uses two reference processors - one is used by concurrent marking and the other is used by STW GCs (both full and incremental evacuation pauses). In an evacuation pause, the reference processor is embedded into the closures used to scan objects. Doing so causes causes reference objects to be 'discovered' by the reference processor. At the end of the evacuation pause, these discovered reference objects are processed - preserving (and copying) referent objects (and their reachable graphs) as appropriate.
Reviewed-by: ysr, jwilhelm, brutisso, stefank, tonyp
1 /*
2 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
29 #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
31 #include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
32 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
33 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
34 #include "oops/objArrayKlass.inline.hpp"
35 #include "oops/oop.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "oops/oop.pcgc.inline.hpp"
38 #include "utilities/stack.inline.hpp"
40 PSOldGen* ParCompactionManager::_old_gen = NULL;
41 ParCompactionManager** ParCompactionManager::_manager_array = NULL;
42 OopTaskQueueSet* ParCompactionManager::_stack_array = NULL;
43 ParCompactionManager::ObjArrayTaskQueueSet*
44 ParCompactionManager::_objarray_queues = NULL;
45 ObjectStartArray* ParCompactionManager::_start_array = NULL;
46 ParMarkBitMap* ParCompactionManager::_mark_bitmap = NULL;
47 RegionTaskQueueSet* ParCompactionManager::_region_array = NULL;
49 ParCompactionManager::ParCompactionManager() :
50 _action(CopyAndUpdate) {
52 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
53 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
55 _old_gen = heap->old_gen();
56 _start_array = old_gen()->start_array();
58 marking_stack()->initialize();
59 _objarray_stack.initialize();
60 region_stack()->initialize();
61 }
63 void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
64 assert(PSParallelCompact::gc_task_manager() != NULL,
65 "Needed for initialization");
67 _mark_bitmap = mbm;
69 uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
71 assert(_manager_array == NULL, "Attempt to initialize twice");
72 _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1 );
73 guarantee(_manager_array != NULL, "Could not allocate manager_array");
75 _stack_array = new OopTaskQueueSet(parallel_gc_threads);
76 guarantee(_stack_array != NULL, "Could not allocate stack_array");
77 _objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
78 guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues");
79 _region_array = new RegionTaskQueueSet(parallel_gc_threads);
80 guarantee(_region_array != NULL, "Could not allocate region_array");
82 // Create and register the ParCompactionManager(s) for the worker threads.
83 for(uint i=0; i<parallel_gc_threads; i++) {
84 _manager_array[i] = new ParCompactionManager();
85 guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
86 stack_array()->register_queue(i, _manager_array[i]->marking_stack());
87 _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
88 region_array()->register_queue(i, _manager_array[i]->region_stack());
89 }
91 // The VMThread gets its own ParCompactionManager, which is not available
92 // for work stealing.
93 _manager_array[parallel_gc_threads] = new ParCompactionManager();
94 guarantee(_manager_array[parallel_gc_threads] != NULL,
95 "Could not create ParCompactionManager");
96 assert(PSParallelCompact::gc_task_manager()->workers() != 0,
97 "Not initialized?");
98 }
100 bool ParCompactionManager::should_update() {
101 assert(action() != NotValid, "Action is not set");
102 return (action() == ParCompactionManager::Update) ||
103 (action() == ParCompactionManager::CopyAndUpdate) ||
104 (action() == ParCompactionManager::UpdateAndCopy);
105 }
107 bool ParCompactionManager::should_copy() {
108 assert(action() != NotValid, "Action is not set");
109 return (action() == ParCompactionManager::Copy) ||
110 (action() == ParCompactionManager::CopyAndUpdate) ||
111 (action() == ParCompactionManager::UpdateAndCopy);
112 }
114 bool ParCompactionManager::should_verify_only() {
115 assert(action() != NotValid, "Action is not set");
116 return action() == ParCompactionManager::VerifyUpdate;
117 }
119 bool ParCompactionManager::should_reset_only() {
120 assert(action() != NotValid, "Action is not set");
121 return action() == ParCompactionManager::ResetObjects;
122 }
124 ParCompactionManager*
125 ParCompactionManager::gc_thread_compaction_manager(int index) {
126 assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
127 assert(_manager_array != NULL, "Sanity");
128 return _manager_array[index];
129 }
131 void ParCompactionManager::reset() {
132 for(uint i = 0; i < ParallelGCThreads + 1; i++) {
133 assert(manager_array(i)->revisit_klass_stack()->is_empty(), "sanity");
134 assert(manager_array(i)->revisit_mdo_stack()->is_empty(), "sanity");
135 }
136 }
138 void ParCompactionManager::follow_marking_stacks() {
139 do {
140 // Drain the overflow stack first, to allow stealing from the marking stack.
141 oop obj;
142 while (marking_stack()->pop_overflow(obj)) {
143 obj->follow_contents(this);
144 }
145 while (marking_stack()->pop_local(obj)) {
146 obj->follow_contents(this);
147 }
149 // Process ObjArrays one at a time to avoid marking stack bloat.
150 ObjArrayTask task;
151 if (_objarray_stack.pop_overflow(task)) {
152 objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
153 k->oop_follow_contents(this, task.obj(), task.index());
154 } else if (_objarray_stack.pop_local(task)) {
155 objArrayKlass* const k = (objArrayKlass*)task.obj()->blueprint();
156 k->oop_follow_contents(this, task.obj(), task.index());
157 }
158 } while (!marking_stacks_empty());
160 assert(marking_stacks_empty(), "Sanity");
161 }
163 void ParCompactionManager::drain_region_stacks() {
164 do {
165 // Drain overflow stack first so other threads can steal.
166 size_t region_index;
167 while (region_stack()->pop_overflow(region_index)) {
168 PSParallelCompact::fill_and_update_region(this, region_index);
169 }
171 while (region_stack()->pop_local(region_index)) {
172 PSParallelCompact::fill_and_update_region(this, region_index);
173 }
174 } while (!region_stack()->is_empty());
175 }