Thu, 09 Apr 2015 15:59:26 +0200
8066771: Refactor VM GC operations caused by allocation failure
Reviewed-by: brutisso, jmasa
1 /*
2 * Copyright (c) 2002, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
27 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
28 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
29 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
30 #include "gc_implementation/shared/gcTrace.hpp"
31 #include "gc_implementation/shared/mutableSpace.hpp"
32 #include "memory/allocation.inline.hpp"
33 #include "memory/memRegion.hpp"
34 #include "memory/padded.inline.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "oops/oop.psgc.inline.hpp"
38 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
40 PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
41 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
42 PSOldGen* PSPromotionManager::_old_gen = NULL;
43 MutableSpace* PSPromotionManager::_young_space = NULL;
45 void PSPromotionManager::initialize() {
46 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
47 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
49 _old_gen = heap->old_gen();
50 _young_space = heap->young_gen()->to_space();
52 // To prevent false sharing, we pad the PSPromotionManagers
53 // and make sure that the first instance starts at a cache line.
54 assert(_manager_array == NULL, "Attempt to initialize twice");
55 _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(ParallelGCThreads + 1);
56 guarantee(_manager_array != NULL, "Could not initialize promotion manager");
58 _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
59 guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager");
61 // Create and register the PSPromotionManager(s) for the worker threads.
62 for(uint i=0; i<ParallelGCThreads; i++) {
63 stack_array_depth()->register_queue(i, _manager_array[i].claimed_stack_depth());
64 }
65 // The VMThread gets its own PSPromotionManager, which is not available
66 // for work stealing.
67 }
69 PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
70 assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
71 assert(_manager_array != NULL, "Sanity");
72 return &_manager_array[index];
73 }
75 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
76 assert(_manager_array != NULL, "Sanity");
77 return &_manager_array[ParallelGCThreads];
78 }
80 void PSPromotionManager::pre_scavenge() {
81 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
82 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
84 _young_space = heap->young_gen()->to_space();
86 for(uint i=0; i<ParallelGCThreads+1; i++) {
87 manager_array(i)->reset();
88 }
89 }
91 bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) {
92 bool promotion_failure_occurred = false;
94 TASKQUEUE_STATS_ONLY(if (PrintGCDetails && ParallelGCVerbose) print_stats());
95 for (uint i = 0; i < ParallelGCThreads + 1; i++) {
96 PSPromotionManager* manager = manager_array(i);
97 assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
98 if (manager->_promotion_failed_info.has_failed()) {
99 gc_tracer.report_promotion_failed(manager->_promotion_failed_info);
100 promotion_failure_occurred = true;
101 }
102 manager->flush_labs();
103 }
104 return promotion_failure_occurred;
105 }
107 #if TASKQUEUE_STATS
108 void
109 PSPromotionManager::print_taskqueue_stats(uint i) const {
110 tty->print("%3u ", i);
111 _claimed_stack_depth.stats.print();
112 tty->cr();
113 }
115 void
116 PSPromotionManager::print_local_stats(uint i) const {
117 #define FMT " " SIZE_FORMAT_W(10)
118 tty->print_cr("%3u" FMT FMT FMT FMT, i, _masked_pushes, _masked_steals,
119 _arrays_chunked, _array_chunks_processed);
120 #undef FMT
121 }
123 static const char* const pm_stats_hdr[] = {
124 " --------masked------- arrays array",
125 "thr push steal chunked chunks",
126 "--- ---------- ---------- ---------- ----------"
127 };
129 void
130 PSPromotionManager::print_stats() {
131 tty->print_cr("== GC Tasks Stats, GC %3d",
132 Universe::heap()->total_collections());
134 tty->print("thr "); TaskQueueStats::print_header(1); tty->cr();
135 tty->print("--- "); TaskQueueStats::print_header(2); tty->cr();
136 for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
137 manager_array(i)->print_taskqueue_stats(i);
138 }
140 const uint hlines = sizeof(pm_stats_hdr) / sizeof(pm_stats_hdr[0]);
141 for (uint i = 0; i < hlines; ++i) tty->print_cr("%s", pm_stats_hdr[i]);
142 for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
143 manager_array(i)->print_local_stats(i);
144 }
145 }
147 void
148 PSPromotionManager::reset_stats() {
149 claimed_stack_depth()->stats.reset();
150 _masked_pushes = _masked_steals = 0;
151 _arrays_chunked = _array_chunks_processed = 0;
152 }
153 #endif // TASKQUEUE_STATS
155 PSPromotionManager::PSPromotionManager() {
156 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
157 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
159 // We set the old lab's start array.
160 _old_lab.set_start_array(old_gen()->start_array());
162 uint queue_size;
163 claimed_stack_depth()->initialize();
164 queue_size = claimed_stack_depth()->max_elems();
166 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
167 if (_totally_drain) {
168 _target_stack_size = 0;
169 } else {
170 // don't let the target stack size to be more than 1/4 of the entries
171 _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize,
172 (uint) (queue_size / 4));
173 }
175 _array_chunk_size = ParGCArrayScanChunk;
176 // let's choose 1.5x the chunk size
177 _min_array_size_for_chunking = 3 * _array_chunk_size / 2;
179 reset();
180 }
182 void PSPromotionManager::reset() {
183 assert(stacks_empty(), "reset of non-empty stack");
185 // We need to get an assert in here to make sure the labs are always flushed.
187 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
188 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
190 // Do not prefill the LAB's, save heap wastage!
191 HeapWord* lab_base = young_space()->top();
192 _young_lab.initialize(MemRegion(lab_base, (size_t)0));
193 _young_gen_is_full = false;
195 lab_base = old_gen()->object_space()->top();
196 _old_lab.initialize(MemRegion(lab_base, (size_t)0));
197 _old_gen_is_full = false;
199 _promotion_failed_info.reset();
201 TASKQUEUE_STATS_ONLY(reset_stats());
202 }
205 void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
206 totally_drain = totally_drain || _totally_drain;
208 #ifdef ASSERT
209 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
210 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
211 MutableSpace* to_space = heap->young_gen()->to_space();
212 MutableSpace* old_space = heap->old_gen()->object_space();
213 #endif /* ASSERT */
215 OopStarTaskQueue* const tq = claimed_stack_depth();
216 do {
217 StarTask p;
219 // Drain overflow stack first, so other threads can steal from
220 // claimed stack while we work.
221 while (tq->pop_overflow(p)) {
222 process_popped_location_depth(p);
223 }
225 if (totally_drain) {
226 while (tq->pop_local(p)) {
227 process_popped_location_depth(p);
228 }
229 } else {
230 while (tq->size() > _target_stack_size && tq->pop_local(p)) {
231 process_popped_location_depth(p);
232 }
233 }
234 } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty());
236 assert(!totally_drain || tq->taskqueue_empty(), "Sanity");
237 assert(totally_drain || tq->size() <= _target_stack_size, "Sanity");
238 assert(tq->overflow_empty(), "Sanity");
239 }
241 void PSPromotionManager::flush_labs() {
242 assert(stacks_empty(), "Attempt to flush lab with live stack");
244 // If either promotion lab fills up, we can flush the
245 // lab but not refill it, so check first.
246 assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity");
247 if (!_young_lab.is_flushed())
248 _young_lab.flush();
250 assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
251 if (!_old_lab.is_flushed())
252 _old_lab.flush();
254 // Let PSScavenge know if we overflowed
255 if (_young_gen_is_full) {
256 PSScavenge::set_survivor_overflow(true);
257 }
258 }
260 template <class T> void PSPromotionManager::process_array_chunk_work(
261 oop obj,
262 int start, int end) {
263 assert(start <= end, "invariant");
264 T* const base = (T*)objArrayOop(obj)->base();
265 T* p = base + start;
266 T* const chunk_end = base + end;
267 while (p < chunk_end) {
268 if (PSScavenge::should_scavenge(p)) {
269 claim_or_forward_depth(p);
270 }
271 ++p;
272 }
273 }
275 void PSPromotionManager::process_array_chunk(oop old) {
276 assert(PSChunkLargeArrays, "invariant");
277 assert(old->is_objArray(), "invariant");
278 assert(old->is_forwarded(), "invariant");
280 TASKQUEUE_STATS_ONLY(++_array_chunks_processed);
282 oop const obj = old->forwardee();
284 int start;
285 int const end = arrayOop(old)->length();
286 if (end > (int) _min_array_size_for_chunking) {
287 // we'll chunk more
288 start = end - _array_chunk_size;
289 assert(start > 0, "invariant");
290 arrayOop(old)->set_length(start);
291 push_depth(mask_chunked_array_oop(old));
292 TASKQUEUE_STATS_ONLY(++_masked_pushes);
293 } else {
294 // this is the final chunk for this array
295 start = 0;
296 int const actual_length = arrayOop(obj)->length();
297 arrayOop(old)->set_length(actual_length);
298 }
300 if (UseCompressedOops) {
301 process_array_chunk_work<narrowOop>(obj, start, end);
302 } else {
303 process_array_chunk_work<oop>(obj, start, end);
304 }
305 }
307 oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
308 assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
310 // Attempt to CAS in the header.
311 // This tests if the header is still the same as when
312 // this started. If it is the same (i.e., no forwarding
313 // pointer has been installed), then this thread owns
314 // it.
315 if (obj->cas_forward_to(obj, obj_mark)) {
316 // We won any races, we "own" this object.
317 assert(obj == obj->forwardee(), "Sanity");
319 _promotion_failed_info.register_copy_failure(obj->size());
321 obj->push_contents(this);
323 // Save the mark if needed
324 PSScavenge::oop_promotion_failed(obj, obj_mark);
325 } else {
326 // We lost, someone else "owns" this object
327 guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed.");
329 // No unallocation to worry about.
330 obj = obj->forwardee();
331 }
333 #ifndef PRODUCT
334 if (TraceScavenge) {
335 gclog_or_tty->print_cr("{%s %s 0x%x (%d)}",
336 "promotion-failure",
337 obj->klass()->internal_name(),
338 (void *)obj, obj->size());
340 }
341 #endif
343 return obj;
344 }