Sat, 23 Nov 2013 12:25:13 +0100
8028128: Add a type safe alternative for working with counter based data
Reviewed-by: dholmes, egahlin
1 /*
2 * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
27 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
28 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
29 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
30 #include "gc_implementation/shared/gcTrace.hpp"
31 #include "gc_implementation/shared/mutableSpace.hpp"
32 #include "memory/allocation.inline.hpp"
33 #include "memory/memRegion.hpp"
34 #include "memory/padded.inline.hpp"
35 #include "oops/oop.inline.hpp"
36 #include "oops/oop.psgc.inline.hpp"
38 PaddedEnd<PSPromotionManager>* PSPromotionManager::_manager_array = NULL;
39 OopStarTaskQueueSet* PSPromotionManager::_stack_array_depth = NULL;
40 PSOldGen* PSPromotionManager::_old_gen = NULL;
41 MutableSpace* PSPromotionManager::_young_space = NULL;
43 void PSPromotionManager::initialize() {
44 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
45 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
47 _old_gen = heap->old_gen();
48 _young_space = heap->young_gen()->to_space();
50 // To prevent false sharing, we pad the PSPromotionManagers
51 // and make sure that the first instance starts at a cache line.
52 assert(_manager_array == NULL, "Attempt to initialize twice");
53 _manager_array = PaddedArray<PSPromotionManager, mtGC>::create_unfreeable(ParallelGCThreads + 1);
54 guarantee(_manager_array != NULL, "Could not initialize promotion manager");
56 _stack_array_depth = new OopStarTaskQueueSet(ParallelGCThreads);
57 guarantee(_stack_array_depth != NULL, "Could not initialize promotion manager");
59 // Create and register the PSPromotionManager(s) for the worker threads.
60 for(uint i=0; i<ParallelGCThreads; i++) {
61 stack_array_depth()->register_queue(i, _manager_array[i].claimed_stack_depth());
62 }
63 // The VMThread gets its own PSPromotionManager, which is not available
64 // for work stealing.
65 }
67 PSPromotionManager* PSPromotionManager::gc_thread_promotion_manager(int index) {
68 assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
69 assert(_manager_array != NULL, "Sanity");
70 return &_manager_array[index];
71 }
73 PSPromotionManager* PSPromotionManager::vm_thread_promotion_manager() {
74 assert(_manager_array != NULL, "Sanity");
75 return &_manager_array[ParallelGCThreads];
76 }
78 void PSPromotionManager::pre_scavenge() {
79 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
80 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
82 _young_space = heap->young_gen()->to_space();
84 for(uint i=0; i<ParallelGCThreads+1; i++) {
85 manager_array(i)->reset();
86 }
87 }
89 bool PSPromotionManager::post_scavenge(YoungGCTracer& gc_tracer) {
90 bool promotion_failure_occurred = false;
92 TASKQUEUE_STATS_ONLY(if (PrintGCDetails && ParallelGCVerbose) print_stats());
93 for (uint i = 0; i < ParallelGCThreads + 1; i++) {
94 PSPromotionManager* manager = manager_array(i);
95 assert(manager->claimed_stack_depth()->is_empty(), "should be empty");
96 if (manager->_promotion_failed_info.has_failed()) {
97 gc_tracer.report_promotion_failed(manager->_promotion_failed_info);
98 promotion_failure_occurred = true;
99 }
100 manager->flush_labs();
101 }
102 return promotion_failure_occurred;
103 }
105 #if TASKQUEUE_STATS
106 void
107 PSPromotionManager::print_taskqueue_stats(uint i) const {
108 tty->print("%3u ", i);
109 _claimed_stack_depth.stats.print();
110 tty->cr();
111 }
113 void
114 PSPromotionManager::print_local_stats(uint i) const {
115 #define FMT " " SIZE_FORMAT_W(10)
116 tty->print_cr("%3u" FMT FMT FMT FMT, i, _masked_pushes, _masked_steals,
117 _arrays_chunked, _array_chunks_processed);
118 #undef FMT
119 }
121 static const char* const pm_stats_hdr[] = {
122 " --------masked------- arrays array",
123 "thr push steal chunked chunks",
124 "--- ---------- ---------- ---------- ----------"
125 };
127 void
128 PSPromotionManager::print_stats() {
129 tty->print_cr("== GC Tasks Stats, GC %3d",
130 Universe::heap()->total_collections());
132 tty->print("thr "); TaskQueueStats::print_header(1); tty->cr();
133 tty->print("--- "); TaskQueueStats::print_header(2); tty->cr();
134 for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
135 manager_array(i)->print_taskqueue_stats(i);
136 }
138 const uint hlines = sizeof(pm_stats_hdr) / sizeof(pm_stats_hdr[0]);
139 for (uint i = 0; i < hlines; ++i) tty->print_cr(pm_stats_hdr[i]);
140 for (uint i = 0; i < ParallelGCThreads + 1; ++i) {
141 manager_array(i)->print_local_stats(i);
142 }
143 }
145 void
146 PSPromotionManager::reset_stats() {
147 claimed_stack_depth()->stats.reset();
148 _masked_pushes = _masked_steals = 0;
149 _arrays_chunked = _array_chunks_processed = 0;
150 }
151 #endif // TASKQUEUE_STATS
153 PSPromotionManager::PSPromotionManager() {
154 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
155 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
157 // We set the old lab's start array.
158 _old_lab.set_start_array(old_gen()->start_array());
160 uint queue_size;
161 claimed_stack_depth()->initialize();
162 queue_size = claimed_stack_depth()->max_elems();
164 _totally_drain = (ParallelGCThreads == 1) || (GCDrainStackTargetSize == 0);
165 if (_totally_drain) {
166 _target_stack_size = 0;
167 } else {
168 // don't let the target stack size to be more than 1/4 of the entries
169 _target_stack_size = (uint) MIN2((uint) GCDrainStackTargetSize,
170 (uint) (queue_size / 4));
171 }
173 _array_chunk_size = ParGCArrayScanChunk;
174 // let's choose 1.5x the chunk size
175 _min_array_size_for_chunking = 3 * _array_chunk_size / 2;
177 reset();
178 }
180 void PSPromotionManager::reset() {
181 assert(stacks_empty(), "reset of non-empty stack");
183 // We need to get an assert in here to make sure the labs are always flushed.
185 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
186 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
188 // Do not prefill the LAB's, save heap wastage!
189 HeapWord* lab_base = young_space()->top();
190 _young_lab.initialize(MemRegion(lab_base, (size_t)0));
191 _young_gen_is_full = false;
193 lab_base = old_gen()->object_space()->top();
194 _old_lab.initialize(MemRegion(lab_base, (size_t)0));
195 _old_gen_is_full = false;
197 _promotion_failed_info.reset();
199 TASKQUEUE_STATS_ONLY(reset_stats());
200 }
203 void PSPromotionManager::drain_stacks_depth(bool totally_drain) {
204 totally_drain = totally_drain || _totally_drain;
206 #ifdef ASSERT
207 ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
208 assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
209 MutableSpace* to_space = heap->young_gen()->to_space();
210 MutableSpace* old_space = heap->old_gen()->object_space();
211 #endif /* ASSERT */
213 OopStarTaskQueue* const tq = claimed_stack_depth();
214 do {
215 StarTask p;
217 // Drain overflow stack first, so other threads can steal from
218 // claimed stack while we work.
219 while (tq->pop_overflow(p)) {
220 process_popped_location_depth(p);
221 }
223 if (totally_drain) {
224 while (tq->pop_local(p)) {
225 process_popped_location_depth(p);
226 }
227 } else {
228 while (tq->size() > _target_stack_size && tq->pop_local(p)) {
229 process_popped_location_depth(p);
230 }
231 }
232 } while (totally_drain && !tq->taskqueue_empty() || !tq->overflow_empty());
234 assert(!totally_drain || tq->taskqueue_empty(), "Sanity");
235 assert(totally_drain || tq->size() <= _target_stack_size, "Sanity");
236 assert(tq->overflow_empty(), "Sanity");
237 }
239 void PSPromotionManager::flush_labs() {
240 assert(stacks_empty(), "Attempt to flush lab with live stack");
242 // If either promotion lab fills up, we can flush the
243 // lab but not refill it, so check first.
244 assert(!_young_lab.is_flushed() || _young_gen_is_full, "Sanity");
245 if (!_young_lab.is_flushed())
246 _young_lab.flush();
248 assert(!_old_lab.is_flushed() || _old_gen_is_full, "Sanity");
249 if (!_old_lab.is_flushed())
250 _old_lab.flush();
252 // Let PSScavenge know if we overflowed
253 if (_young_gen_is_full) {
254 PSScavenge::set_survivor_overflow(true);
255 }
256 }
258 template <class T> void PSPromotionManager::process_array_chunk_work(
259 oop obj,
260 int start, int end) {
261 assert(start <= end, "invariant");
262 T* const base = (T*)objArrayOop(obj)->base();
263 T* p = base + start;
264 T* const chunk_end = base + end;
265 while (p < chunk_end) {
266 if (PSScavenge::should_scavenge(p)) {
267 claim_or_forward_depth(p);
268 }
269 ++p;
270 }
271 }
273 void PSPromotionManager::process_array_chunk(oop old) {
274 assert(PSChunkLargeArrays, "invariant");
275 assert(old->is_objArray(), "invariant");
276 assert(old->is_forwarded(), "invariant");
278 TASKQUEUE_STATS_ONLY(++_array_chunks_processed);
280 oop const obj = old->forwardee();
282 int start;
283 int const end = arrayOop(old)->length();
284 if (end > (int) _min_array_size_for_chunking) {
285 // we'll chunk more
286 start = end - _array_chunk_size;
287 assert(start > 0, "invariant");
288 arrayOop(old)->set_length(start);
289 push_depth(mask_chunked_array_oop(old));
290 TASKQUEUE_STATS_ONLY(++_masked_pushes);
291 } else {
292 // this is the final chunk for this array
293 start = 0;
294 int const actual_length = arrayOop(obj)->length();
295 arrayOop(old)->set_length(actual_length);
296 }
298 if (UseCompressedOops) {
299 process_array_chunk_work<narrowOop>(obj, start, end);
300 } else {
301 process_array_chunk_work<oop>(obj, start, end);
302 }
303 }
305 oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) {
306 assert(_old_gen_is_full || PromotionFailureALot, "Sanity");
308 // Attempt to CAS in the header.
309 // This tests if the header is still the same as when
310 // this started. If it is the same (i.e., no forwarding
311 // pointer has been installed), then this thread owns
312 // it.
313 if (obj->cas_forward_to(obj, obj_mark)) {
314 // We won any races, we "own" this object.
315 assert(obj == obj->forwardee(), "Sanity");
317 _promotion_failed_info.register_copy_failure(obj->size());
319 obj->push_contents(this);
321 // Save the mark if needed
322 PSScavenge::oop_promotion_failed(obj, obj_mark);
323 } else {
324 // We lost, someone else "owns" this object
325 guarantee(obj->is_forwarded(), "Object must be forwarded if the cas failed.");
327 // No unallocation to worry about.
328 obj = obj->forwardee();
329 }
331 #ifndef PRODUCT
332 if (TraceScavenge) {
333 gclog_or_tty->print_cr("{%s %s 0x%x (%d)}",
334 "promotion-failure",
335 obj->klass()->internal_name(),
336 (void *)obj, obj->size());
338 }
339 #endif
341 return obj;
342 }