Thu, 13 Jun 2013 22:02:40 -0700
8014431: cleanup warnings indicated by the -Wunused-value compiler option on linux
Reviewed-by: dholmes, coleenp
Contributed-by: jeremymanson@google.com, calvin.cheung@oracle.com
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/shared/gcTimer.hpp"
27 #include "gc_implementation/shared/gcTrace.hpp"
28 #include "gc_implementation/shared/spaceDecorator.hpp"
29 #include "gc_interface/collectedHeap.inline.hpp"
30 #include "memory/allocation.inline.hpp"
31 #include "memory/blockOffsetTable.inline.hpp"
32 #include "memory/cardTableRS.hpp"
33 #include "memory/gcLocker.inline.hpp"
34 #include "memory/genCollectedHeap.hpp"
35 #include "memory/genMarkSweep.hpp"
36 #include "memory/genOopClosures.hpp"
37 #include "memory/genOopClosures.inline.hpp"
38 #include "memory/generation.hpp"
39 #include "memory/generation.inline.hpp"
40 #include "memory/space.inline.hpp"
41 #include "oops/oop.inline.hpp"
42 #include "runtime/java.hpp"
43 #include "utilities/copy.hpp"
44 #include "utilities/events.hpp"
46 Generation::Generation(ReservedSpace rs, size_t initial_size, int level) :
47 _level(level),
48 _ref_processor(NULL) {
49 if (!_virtual_space.initialize(rs, initial_size)) {
50 vm_exit_during_initialization("Could not reserve enough space for "
51 "object heap");
52 }
53 // Mangle all of the the initial generation.
54 if (ZapUnusedHeapArea) {
55 MemRegion mangle_region((HeapWord*)_virtual_space.low(),
56 (HeapWord*)_virtual_space.high());
57 SpaceMangler::mangle_region(mangle_region);
58 }
59 _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(),
60 (HeapWord*)_virtual_space.high_boundary());
61 }
63 GenerationSpec* Generation::spec() {
64 GenCollectedHeap* gch = GenCollectedHeap::heap();
65 assert(0 <= level() && level() < gch->_n_gens, "Bad gen level");
66 return gch->_gen_specs[level()];
67 }
69 size_t Generation::max_capacity() const {
70 return reserved().byte_size();
71 }
73 void Generation::print_heap_change(size_t prev_used) const {
74 if (PrintGCDetails && Verbose) {
75 gclog_or_tty->print(" " SIZE_FORMAT
76 "->" SIZE_FORMAT
77 "(" SIZE_FORMAT ")",
78 prev_used, used(), capacity());
79 } else {
80 gclog_or_tty->print(" " SIZE_FORMAT "K"
81 "->" SIZE_FORMAT "K"
82 "(" SIZE_FORMAT "K)",
83 prev_used / K, used() / K, capacity() / K);
84 }
85 }
87 // By default we get a single threaded default reference processor;
88 // generations needing multi-threaded refs processing or discovery override this method.
89 void Generation::ref_processor_init() {
90 assert(_ref_processor == NULL, "a reference processor already exists");
91 assert(!_reserved.is_empty(), "empty generation?");
92 _ref_processor = new ReferenceProcessor(_reserved); // a vanilla reference processor
93 if (_ref_processor == NULL) {
94 vm_exit_during_initialization("Could not allocate ReferenceProcessor object");
95 }
96 }
98 void Generation::print() const { print_on(tty); }
100 void Generation::print_on(outputStream* st) const {
101 st->print(" %-20s", name());
102 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
103 capacity()/K, used()/K);
104 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
105 _virtual_space.low_boundary(),
106 _virtual_space.high(),
107 _virtual_space.high_boundary());
108 }
110 void Generation::print_summary_info() { print_summary_info_on(tty); }
112 void Generation::print_summary_info_on(outputStream* st) {
113 StatRecord* sr = stat_record();
114 double time = sr->accumulated_time.seconds();
115 st->print_cr("[Accumulated GC generation %d time %3.7f secs, "
116 "%d GC's, avg GC time %3.7f]",
117 level(), time, sr->invocations,
118 sr->invocations > 0 ? time / sr->invocations : 0.0);
119 }
121 // Utility iterator classes
123 class GenerationIsInReservedClosure : public SpaceClosure {
124 public:
125 const void* _p;
126 Space* sp;
127 virtual void do_space(Space* s) {
128 if (sp == NULL) {
129 if (s->is_in_reserved(_p)) sp = s;
130 }
131 }
132 GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {}
133 };
135 class GenerationIsInClosure : public SpaceClosure {
136 public:
137 const void* _p;
138 Space* sp;
139 virtual void do_space(Space* s) {
140 if (sp == NULL) {
141 if (s->is_in(_p)) sp = s;
142 }
143 }
144 GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {}
145 };
147 bool Generation::is_in(const void* p) const {
148 GenerationIsInClosure blk(p);
149 ((Generation*)this)->space_iterate(&blk);
150 return blk.sp != NULL;
151 }
153 DefNewGeneration* Generation::as_DefNewGeneration() {
154 assert((kind() == Generation::DefNew) ||
155 (kind() == Generation::ParNew) ||
156 (kind() == Generation::ASParNew),
157 "Wrong youngest generation type");
158 return (DefNewGeneration*) this;
159 }
161 Generation* Generation::next_gen() const {
162 GenCollectedHeap* gch = GenCollectedHeap::heap();
163 int next = level() + 1;
164 if (next < gch->_n_gens) {
165 return gch->_gens[next];
166 } else {
167 return NULL;
168 }
169 }
171 size_t Generation::max_contiguous_available() const {
172 // The largest number of contiguous free words in this or any higher generation.
173 size_t max = 0;
174 for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) {
175 size_t avail = gen->contiguous_available();
176 if (avail > max) {
177 max = avail;
178 }
179 }
180 return max;
181 }
183 bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const {
184 size_t available = max_contiguous_available();
185 bool res = (available >= max_promotion_in_bytes);
186 if (PrintGC && Verbose) {
187 gclog_or_tty->print_cr(
188 "Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")",
189 res? "":" not", available, res? ">=":"<",
190 max_promotion_in_bytes);
191 }
192 return res;
193 }
195 // Ignores "ref" and calls allocate().
196 oop Generation::promote(oop obj, size_t obj_size) {
197 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
199 #ifndef PRODUCT
200 if (Universe::heap()->promotion_should_fail()) {
201 return NULL;
202 }
203 #endif // #ifndef PRODUCT
205 HeapWord* result = allocate(obj_size, false);
206 if (result != NULL) {
207 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size);
208 return oop(result);
209 } else {
210 GenCollectedHeap* gch = GenCollectedHeap::heap();
211 return gch->handle_failed_promotion(this, obj, obj_size);
212 }
213 }
215 oop Generation::par_promote(int thread_num,
216 oop obj, markOop m, size_t word_sz) {
217 // Could do a bad general impl here that gets a lock. But no.
218 ShouldNotCallThis();
219 return NULL;
220 }
222 void Generation::par_promote_alloc_undo(int thread_num,
223 HeapWord* obj, size_t word_sz) {
224 // Could do a bad general impl here that gets a lock. But no.
225 guarantee(false, "No good general implementation.");
226 }
228 Space* Generation::space_containing(const void* p) const {
229 GenerationIsInReservedClosure blk(p);
230 // Cast away const
231 ((Generation*)this)->space_iterate(&blk);
232 return blk.sp;
233 }
235 // Some of these are mediocre general implementations. Should be
236 // overridden to get better performance.
238 class GenerationBlockStartClosure : public SpaceClosure {
239 public:
240 const void* _p;
241 HeapWord* _start;
242 virtual void do_space(Space* s) {
243 if (_start == NULL && s->is_in_reserved(_p)) {
244 _start = s->block_start(_p);
245 }
246 }
247 GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; }
248 };
250 HeapWord* Generation::block_start(const void* p) const {
251 GenerationBlockStartClosure blk(p);
252 // Cast away const
253 ((Generation*)this)->space_iterate(&blk);
254 return blk._start;
255 }
257 class GenerationBlockSizeClosure : public SpaceClosure {
258 public:
259 const HeapWord* _p;
260 size_t size;
261 virtual void do_space(Space* s) {
262 if (size == 0 && s->is_in_reserved(_p)) {
263 size = s->block_size(_p);
264 }
265 }
266 GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; }
267 };
269 size_t Generation::block_size(const HeapWord* p) const {
270 GenerationBlockSizeClosure blk(p);
271 // Cast away const
272 ((Generation*)this)->space_iterate(&blk);
273 assert(blk.size > 0, "seems reasonable");
274 return blk.size;
275 }
277 class GenerationBlockIsObjClosure : public SpaceClosure {
278 public:
279 const HeapWord* _p;
280 bool is_obj;
281 virtual void do_space(Space* s) {
282 if (!is_obj && s->is_in_reserved(_p)) {
283 is_obj |= s->block_is_obj(_p);
284 }
285 }
286 GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; }
287 };
289 bool Generation::block_is_obj(const HeapWord* p) const {
290 GenerationBlockIsObjClosure blk(p);
291 // Cast away const
292 ((Generation*)this)->space_iterate(&blk);
293 return blk.is_obj;
294 }
296 class GenerationOopIterateClosure : public SpaceClosure {
297 public:
298 ExtendedOopClosure* cl;
299 MemRegion mr;
300 virtual void do_space(Space* s) {
301 s->oop_iterate(mr, cl);
302 }
303 GenerationOopIterateClosure(ExtendedOopClosure* _cl, MemRegion _mr) :
304 cl(_cl), mr(_mr) {}
305 };
307 void Generation::oop_iterate(ExtendedOopClosure* cl) {
308 GenerationOopIterateClosure blk(cl, _reserved);
309 space_iterate(&blk);
310 }
312 void Generation::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
313 GenerationOopIterateClosure blk(cl, mr);
314 space_iterate(&blk);
315 }
317 void Generation::younger_refs_in_space_iterate(Space* sp,
318 OopsInGenClosure* cl) {
319 GenRemSet* rs = SharedHeap::heap()->rem_set();
320 rs->younger_refs_in_space_iterate(sp, cl);
321 }
323 class GenerationObjIterateClosure : public SpaceClosure {
324 private:
325 ObjectClosure* _cl;
326 public:
327 virtual void do_space(Space* s) {
328 s->object_iterate(_cl);
329 }
330 GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
331 };
333 void Generation::object_iterate(ObjectClosure* cl) {
334 GenerationObjIterateClosure blk(cl);
335 space_iterate(&blk);
336 }
338 class GenerationSafeObjIterateClosure : public SpaceClosure {
339 private:
340 ObjectClosure* _cl;
341 public:
342 virtual void do_space(Space* s) {
343 s->safe_object_iterate(_cl);
344 }
345 GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {}
346 };
348 void Generation::safe_object_iterate(ObjectClosure* cl) {
349 GenerationSafeObjIterateClosure blk(cl);
350 space_iterate(&blk);
351 }
353 void Generation::prepare_for_compaction(CompactPoint* cp) {
354 // Generic implementation, can be specialized
355 CompactibleSpace* space = first_compaction_space();
356 while (space != NULL) {
357 space->prepare_for_compaction(cp);
358 space = space->next_compaction_space();
359 }
360 }
362 class AdjustPointersClosure: public SpaceClosure {
363 public:
364 void do_space(Space* sp) {
365 sp->adjust_pointers();
366 }
367 };
369 void Generation::adjust_pointers() {
370 // Note that this is done over all spaces, not just the compactible
371 // ones.
372 AdjustPointersClosure blk;
373 space_iterate(&blk, true);
374 }
376 void Generation::compact() {
377 CompactibleSpace* sp = first_compaction_space();
378 while (sp != NULL) {
379 sp->compact();
380 sp = sp->next_compaction_space();
381 }
382 }
384 CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size,
385 int level,
386 GenRemSet* remset) :
387 Generation(rs, initial_byte_size, level), _rs(remset),
388 _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(),
389 _used_at_prologue()
390 {
391 HeapWord* start = (HeapWord*)rs.base();
392 size_t reserved_byte_size = rs.size();
393 assert((uintptr_t(start) & 3) == 0, "bad alignment");
394 assert((reserved_byte_size & 3) == 0, "bad alignment");
395 MemRegion reserved_mr(start, heap_word_size(reserved_byte_size));
396 _bts = new BlockOffsetSharedArray(reserved_mr,
397 heap_word_size(initial_byte_size));
398 MemRegion committed_mr(start, heap_word_size(initial_byte_size));
399 _rs->resize_covered_region(committed_mr);
400 if (_bts == NULL)
401 vm_exit_during_initialization("Could not allocate a BlockOffsetArray");
403 // Verify that the start and end of this generation is the start of a card.
404 // If this wasn't true, a single card could span more than on generation,
405 // which would cause problems when we commit/uncommit memory, and when we
406 // clear and dirty cards.
407 guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned");
408 if (reserved_mr.end() != Universe::heap()->reserved_region().end()) {
409 // Don't check at the very end of the heap as we'll assert that we're probing off
410 // the end if we try.
411 guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned");
412 }
413 _min_heap_delta_bytes = MinHeapDeltaBytes;
414 _capacity_at_prologue = initial_byte_size;
415 _used_at_prologue = 0;
416 }
418 bool CardGeneration::expand(size_t bytes, size_t expand_bytes) {
419 assert_locked_or_safepoint(Heap_lock);
420 if (bytes == 0) {
421 return true; // That's what grow_by(0) would return
422 }
423 size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes);
424 if (aligned_bytes == 0){
425 // The alignment caused the number of bytes to wrap. An expand_by(0) will
426 // return true with the implication that an expansion was done when it
427 // was not. A call to expand implies a best effort to expand by "bytes"
428 // but not a guarantee. Align down to give a best effort. This is likely
429 // the most that the generation can expand since it has some capacity to
430 // start with.
431 aligned_bytes = ReservedSpace::page_align_size_down(bytes);
432 }
433 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes);
434 bool success = false;
435 if (aligned_expand_bytes > aligned_bytes) {
436 success = grow_by(aligned_expand_bytes);
437 }
438 if (!success) {
439 success = grow_by(aligned_bytes);
440 }
441 if (!success) {
442 success = grow_to_reserved();
443 }
444 if (PrintGC && Verbose) {
445 if (success && GC_locker::is_active_and_needs_gc()) {
446 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead");
447 }
448 }
450 return success;
451 }
454 // No young generation references, clear this generation's cards.
455 void CardGeneration::clear_remembered_set() {
456 _rs->clear(reserved());
457 }
460 // Objects in this generation may have moved, invalidate this
461 // generation's cards.
462 void CardGeneration::invalidate_remembered_set() {
463 _rs->invalidate(used_region());
464 }
467 void CardGeneration::compute_new_size() {
468 assert(_shrink_factor <= 100, "invalid shrink factor");
469 size_t current_shrink_factor = _shrink_factor;
470 _shrink_factor = 0;
472 // We don't have floating point command-line arguments
473 // Note: argument processing ensures that MinHeapFreeRatio < 100.
474 const double minimum_free_percentage = MinHeapFreeRatio / 100.0;
475 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
477 // Compute some numbers about the state of the heap.
478 const size_t used_after_gc = used();
479 const size_t capacity_after_gc = capacity();
481 const double min_tmp = used_after_gc / maximum_used_percentage;
482 size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx));
483 // Don't shrink less than the initial generation size
484 minimum_desired_capacity = MAX2(minimum_desired_capacity,
485 spec()->init_size());
486 assert(used_after_gc <= minimum_desired_capacity, "sanity check");
488 if (PrintGC && Verbose) {
489 const size_t free_after_gc = free();
490 const double free_percentage = ((double)free_after_gc) / capacity_after_gc;
491 gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: ");
492 gclog_or_tty->print_cr(" "
493 " minimum_free_percentage: %6.2f"
494 " maximum_used_percentage: %6.2f",
495 minimum_free_percentage,
496 maximum_used_percentage);
497 gclog_or_tty->print_cr(" "
498 " free_after_gc : %6.1fK"
499 " used_after_gc : %6.1fK"
500 " capacity_after_gc : %6.1fK",
501 free_after_gc / (double) K,
502 used_after_gc / (double) K,
503 capacity_after_gc / (double) K);
504 gclog_or_tty->print_cr(" "
505 " free_percentage: %6.2f",
506 free_percentage);
507 }
509 if (capacity_after_gc < minimum_desired_capacity) {
510 // If we have less free space than we want then expand
511 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc;
512 // Don't expand unless it's significant
513 if (expand_bytes >= _min_heap_delta_bytes) {
514 expand(expand_bytes, 0); // safe if expansion fails
515 }
516 if (PrintGC && Verbose) {
517 gclog_or_tty->print_cr(" expanding:"
518 " minimum_desired_capacity: %6.1fK"
519 " expand_bytes: %6.1fK"
520 " _min_heap_delta_bytes: %6.1fK",
521 minimum_desired_capacity / (double) K,
522 expand_bytes / (double) K,
523 _min_heap_delta_bytes / (double) K);
524 }
525 return;
526 }
528 // No expansion, now see if we want to shrink
529 size_t shrink_bytes = 0;
530 // We would never want to shrink more than this
531 size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity;
533 if (MaxHeapFreeRatio < 100) {
534 const double maximum_free_percentage = MaxHeapFreeRatio / 100.0;
535 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
536 const double max_tmp = used_after_gc / minimum_used_percentage;
537 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx));
538 maximum_desired_capacity = MAX2(maximum_desired_capacity,
539 spec()->init_size());
540 if (PrintGC && Verbose) {
541 gclog_or_tty->print_cr(" "
542 " maximum_free_percentage: %6.2f"
543 " minimum_used_percentage: %6.2f",
544 maximum_free_percentage,
545 minimum_used_percentage);
546 gclog_or_tty->print_cr(" "
547 " _capacity_at_prologue: %6.1fK"
548 " minimum_desired_capacity: %6.1fK"
549 " maximum_desired_capacity: %6.1fK",
550 _capacity_at_prologue / (double) K,
551 minimum_desired_capacity / (double) K,
552 maximum_desired_capacity / (double) K);
553 }
554 assert(minimum_desired_capacity <= maximum_desired_capacity,
555 "sanity check");
557 if (capacity_after_gc > maximum_desired_capacity) {
558 // Capacity too large, compute shrinking size
559 shrink_bytes = capacity_after_gc - maximum_desired_capacity;
560 // We don't want shrink all the way back to initSize if people call
561 // System.gc(), because some programs do that between "phases" and then
562 // we'd just have to grow the heap up again for the next phase. So we
563 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
564 // on the third call, and 100% by the fourth call. But if we recompute
565 // size without shrinking, it goes back to 0%.
566 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
567 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
568 if (current_shrink_factor == 0) {
569 _shrink_factor = 10;
570 } else {
571 _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100);
572 }
573 if (PrintGC && Verbose) {
574 gclog_or_tty->print_cr(" "
575 " shrinking:"
576 " initSize: %.1fK"
577 " maximum_desired_capacity: %.1fK",
578 spec()->init_size() / (double) K,
579 maximum_desired_capacity / (double) K);
580 gclog_or_tty->print_cr(" "
581 " shrink_bytes: %.1fK"
582 " current_shrink_factor: %d"
583 " new shrink factor: %d"
584 " _min_heap_delta_bytes: %.1fK",
585 shrink_bytes / (double) K,
586 current_shrink_factor,
587 _shrink_factor,
588 _min_heap_delta_bytes / (double) K);
589 }
590 }
591 }
593 if (capacity_after_gc > _capacity_at_prologue) {
594 // We might have expanded for promotions, in which case we might want to
595 // take back that expansion if there's room after GC. That keeps us from
596 // stretching the heap with promotions when there's plenty of room.
597 size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue;
598 expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes);
599 // We have two shrinking computations, take the largest
600 shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion);
601 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size");
602 if (PrintGC && Verbose) {
603 gclog_or_tty->print_cr(" "
604 " aggressive shrinking:"
605 " _capacity_at_prologue: %.1fK"
606 " capacity_after_gc: %.1fK"
607 " expansion_for_promotion: %.1fK"
608 " shrink_bytes: %.1fK",
609 capacity_after_gc / (double) K,
610 _capacity_at_prologue / (double) K,
611 expansion_for_promotion / (double) K,
612 shrink_bytes / (double) K);
613 }
614 }
615 // Don't shrink unless it's significant
616 if (shrink_bytes >= _min_heap_delta_bytes) {
617 shrink(shrink_bytes);
618 }
619 }
621 // Currently nothing to do.
622 void CardGeneration::prepare_for_verify() {}
625 void OneContigSpaceCardGeneration::collect(bool full,
626 bool clear_all_soft_refs,
627 size_t size,
628 bool is_tlab) {
629 GenCollectedHeap* gch = GenCollectedHeap::heap();
631 SpecializationStats::clear();
632 // Temporarily expand the span of our ref processor, so
633 // refs discovery is over the entire heap, not just this generation
634 ReferenceProcessorSpanMutator
635 x(ref_processor(), gch->reserved_region());
637 STWGCTimer* gc_timer = GenMarkSweep::gc_timer();
638 gc_timer->register_gc_start(os::elapsed_counter());
640 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer();
641 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start());
643 GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs);
645 gc_timer->register_gc_end(os::elapsed_counter());
647 gc_tracer->report_gc_end(os::elapsed_counter(), gc_timer->time_partitions());
649 SpecializationStats::print();
650 }
652 HeapWord*
653 OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size,
654 bool is_tlab,
655 bool parallel) {
656 assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation");
657 if (parallel) {
658 MutexLocker x(ParGCRareEvent_lock);
659 HeapWord* result = NULL;
660 size_t byte_size = word_size * HeapWordSize;
661 while (true) {
662 expand(byte_size, _min_heap_delta_bytes);
663 if (GCExpandToAllocateDelayMillis > 0) {
664 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false);
665 }
666 result = _the_space->par_allocate(word_size);
667 if ( result != NULL) {
668 return result;
669 } else {
670 // If there's not enough expansion space available, give up.
671 if (_virtual_space.uncommitted_size() < byte_size) {
672 return NULL;
673 }
674 // else try again
675 }
676 }
677 } else {
678 expand(word_size*HeapWordSize, _min_heap_delta_bytes);
679 return _the_space->allocate(word_size);
680 }
681 }
683 bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) {
684 GCMutexLocker x(ExpandHeap_lock);
685 return CardGeneration::expand(bytes, expand_bytes);
686 }
689 void OneContigSpaceCardGeneration::shrink(size_t bytes) {
690 assert_locked_or_safepoint(ExpandHeap_lock);
691 size_t size = ReservedSpace::page_align_size_down(bytes);
692 if (size > 0) {
693 shrink_by(size);
694 }
695 }
698 size_t OneContigSpaceCardGeneration::capacity() const {
699 return _the_space->capacity();
700 }
703 size_t OneContigSpaceCardGeneration::used() const {
704 return _the_space->used();
705 }
708 size_t OneContigSpaceCardGeneration::free() const {
709 return _the_space->free();
710 }
712 MemRegion OneContigSpaceCardGeneration::used_region() const {
713 return the_space()->used_region();
714 }
716 size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const {
717 return _the_space->free();
718 }
720 size_t OneContigSpaceCardGeneration::contiguous_available() const {
721 return _the_space->free() + _virtual_space.uncommitted_size();
722 }
724 bool OneContigSpaceCardGeneration::grow_by(size_t bytes) {
725 assert_locked_or_safepoint(ExpandHeap_lock);
726 bool result = _virtual_space.expand_by(bytes);
727 if (result) {
728 size_t new_word_size =
729 heap_word_size(_virtual_space.committed_size());
730 MemRegion mr(_the_space->bottom(), new_word_size);
731 // Expand card table
732 Universe::heap()->barrier_set()->resize_covered_region(mr);
733 // Expand shared block offset array
734 _bts->resize(new_word_size);
736 // Fix for bug #4668531
737 if (ZapUnusedHeapArea) {
738 MemRegion mangle_region(_the_space->end(),
739 (HeapWord*)_virtual_space.high());
740 SpaceMangler::mangle_region(mangle_region);
741 }
743 // Expand space -- also expands space's BOT
744 // (which uses (part of) shared array above)
745 _the_space->set_end((HeapWord*)_virtual_space.high());
747 // update the space and generation capacity counters
748 update_counters();
750 if (Verbose && PrintGC) {
751 size_t new_mem_size = _virtual_space.committed_size();
752 size_t old_mem_size = new_mem_size - bytes;
753 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by "
754 SIZE_FORMAT "K to " SIZE_FORMAT "K",
755 name(), old_mem_size/K, bytes/K, new_mem_size/K);
756 }
757 }
758 return result;
759 }
762 bool OneContigSpaceCardGeneration::grow_to_reserved() {
763 assert_locked_or_safepoint(ExpandHeap_lock);
764 bool success = true;
765 const size_t remaining_bytes = _virtual_space.uncommitted_size();
766 if (remaining_bytes > 0) {
767 success = grow_by(remaining_bytes);
768 DEBUG_ONLY(if (!success) warning("grow to reserved failed");)
769 }
770 return success;
771 }
773 void OneContigSpaceCardGeneration::shrink_by(size_t bytes) {
774 assert_locked_or_safepoint(ExpandHeap_lock);
775 // Shrink committed space
776 _virtual_space.shrink_by(bytes);
777 // Shrink space; this also shrinks the space's BOT
778 _the_space->set_end((HeapWord*) _virtual_space.high());
779 size_t new_word_size = heap_word_size(_the_space->capacity());
780 // Shrink the shared block offset array
781 _bts->resize(new_word_size);
782 MemRegion mr(_the_space->bottom(), new_word_size);
783 // Shrink the card table
784 Universe::heap()->barrier_set()->resize_covered_region(mr);
786 if (Verbose && PrintGC) {
787 size_t new_mem_size = _virtual_space.committed_size();
788 size_t old_mem_size = new_mem_size + bytes;
789 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K",
790 name(), old_mem_size/K, new_mem_size/K);
791 }
792 }
794 // Currently nothing to do.
795 void OneContigSpaceCardGeneration::prepare_for_verify() {}
798 // Override for a card-table generation with one contiguous
799 // space. NOTE: For reasons that are lost in the fog of history,
800 // this code is used when you iterate over perm gen objects,
801 // even when one uses CDS, where the perm gen has a couple of
802 // other spaces; this is because CompactingPermGenGen derives
803 // from OneContigSpaceCardGeneration. This should be cleaned up,
804 // see CR 6897789..
805 void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) {
806 _the_space->object_iterate(blk);
807 }
809 void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk,
810 bool usedOnly) {
811 blk->do_space(_the_space);
812 }
814 void OneContigSpaceCardGeneration::object_iterate_since_last_GC(ObjectClosure* blk) {
815 // Deal with delayed initialization of _the_space,
816 // and lack of initialization of _last_gc.
817 if (_last_gc.space() == NULL) {
818 assert(the_space() != NULL, "shouldn't be NULL");
819 _last_gc = the_space()->bottom_mark();
820 }
821 the_space()->object_iterate_from(_last_gc, blk);
822 }
824 void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) {
825 blk->set_generation(this);
826 younger_refs_in_space_iterate(_the_space, blk);
827 blk->reset_generation();
828 }
830 void OneContigSpaceCardGeneration::save_marks() {
831 _the_space->set_saved_mark();
832 }
835 void OneContigSpaceCardGeneration::reset_saved_marks() {
836 _the_space->reset_saved_mark();
837 }
840 bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() {
841 return _the_space->saved_mark_at_top();
842 }
844 #define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \
845 \
846 void OneContigSpaceCardGeneration:: \
847 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
848 blk->set_generation(this); \
849 _the_space->oop_since_save_marks_iterate##nv_suffix(blk); \
850 blk->reset_generation(); \
851 save_marks(); \
852 }
854 ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN)
856 #undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN
859 void OneContigSpaceCardGeneration::gc_epilogue(bool full) {
860 _last_gc = WaterMark(the_space(), the_space()->top());
862 // update the generation and space performance counters
863 update_counters();
864 if (ZapUnusedHeapArea) {
865 the_space()->check_mangled_unused_area_complete();
866 }
867 }
869 void OneContigSpaceCardGeneration::record_spaces_top() {
870 assert(ZapUnusedHeapArea, "Not mangling unused space");
871 the_space()->set_top_for_allocations();
872 }
874 void OneContigSpaceCardGeneration::verify() {
875 the_space()->verify();
876 }
878 void OneContigSpaceCardGeneration::print_on(outputStream* st) const {
879 Generation::print_on(st);
880 st->print(" the");
881 the_space()->print_on(st);
882 }