|
1 /* |
|
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. |
|
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
|
4 * |
|
5 * This code is free software; you can redistribute it and/or modify it |
|
6 * under the terms of the GNU General Public License version 2 only, as |
|
7 * published by the Free Software Foundation. |
|
8 * |
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT |
|
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
|
12 * version 2 for more details (a copy is included in the LICENSE file that |
|
13 * accompanied this code). |
|
14 * |
|
15 * You should have received a copy of the GNU General Public License version |
|
16 * 2 along with this work; if not, write to the Free Software Foundation, |
|
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
|
18 * |
|
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
|
20 * or visit www.oracle.com if you need additional information or have any |
|
21 * questions. |
|
22 * |
|
23 */ |
|
24 |
|
25 #include "precompiled.hpp" |
|
26 #include "gc_implementation/shared/gcTimer.hpp" |
|
27 #include "gc_implementation/shared/gcTrace.hpp" |
|
28 #include "gc_implementation/shared/spaceDecorator.hpp" |
|
29 #include "gc_interface/collectedHeap.inline.hpp" |
|
30 #include "memory/allocation.inline.hpp" |
|
31 #include "memory/blockOffsetTable.inline.hpp" |
|
32 #include "memory/cardTableRS.hpp" |
|
33 #include "memory/gcLocker.inline.hpp" |
|
34 #include "memory/genCollectedHeap.hpp" |
|
35 #include "memory/genMarkSweep.hpp" |
|
36 #include "memory/genOopClosures.hpp" |
|
37 #include "memory/genOopClosures.inline.hpp" |
|
38 #include "memory/generation.hpp" |
|
39 #include "memory/generation.inline.hpp" |
|
40 #include "memory/space.inline.hpp" |
|
41 #include "oops/oop.inline.hpp" |
|
42 #include "runtime/java.hpp" |
|
43 #include "utilities/copy.hpp" |
|
44 #include "utilities/events.hpp" |
|
45 |
|
46 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
|
47 |
|
48 Generation::Generation(ReservedSpace rs, size_t initial_size, int level) : |
|
49 _level(level), |
|
50 _ref_processor(NULL) { |
|
51 if (!_virtual_space.initialize(rs, initial_size)) { |
|
52 vm_exit_during_initialization("Could not reserve enough space for " |
|
53 "object heap"); |
|
54 } |
|
55 // Mangle all of the the initial generation. |
|
56 if (ZapUnusedHeapArea) { |
|
57 MemRegion mangle_region((HeapWord*)_virtual_space.low(), |
|
58 (HeapWord*)_virtual_space.high()); |
|
59 SpaceMangler::mangle_region(mangle_region); |
|
60 } |
|
61 _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(), |
|
62 (HeapWord*)_virtual_space.high_boundary()); |
|
63 } |
|
64 |
|
65 GenerationSpec* Generation::spec() { |
|
66 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
67 assert(0 <= level() && level() < gch->_n_gens, "Bad gen level"); |
|
68 return gch->_gen_specs[level()]; |
|
69 } |
|
70 |
|
71 size_t Generation::max_capacity() const { |
|
72 return reserved().byte_size(); |
|
73 } |
|
74 |
|
75 void Generation::print_heap_change(size_t prev_used) const { |
|
76 if (PrintGCDetails && Verbose) { |
|
77 gclog_or_tty->print(" " SIZE_FORMAT |
|
78 "->" SIZE_FORMAT |
|
79 "(" SIZE_FORMAT ")", |
|
80 prev_used, used(), capacity()); |
|
81 } else { |
|
82 gclog_or_tty->print(" " SIZE_FORMAT "K" |
|
83 "->" SIZE_FORMAT "K" |
|
84 "(" SIZE_FORMAT "K)", |
|
85 prev_used / K, used() / K, capacity() / K); |
|
86 } |
|
87 } |
|
88 |
|
89 // By default we get a single threaded default reference processor; |
|
90 // generations needing multi-threaded refs processing or discovery override this method. |
|
91 void Generation::ref_processor_init() { |
|
92 assert(_ref_processor == NULL, "a reference processor already exists"); |
|
93 assert(!_reserved.is_empty(), "empty generation?"); |
|
94 _ref_processor = new ReferenceProcessor(_reserved); // a vanilla reference processor |
|
95 if (_ref_processor == NULL) { |
|
96 vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); |
|
97 } |
|
98 } |
|
99 |
|
100 void Generation::print() const { print_on(tty); } |
|
101 |
|
102 void Generation::print_on(outputStream* st) const { |
|
103 st->print(" %-20s", name()); |
|
104 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
|
105 capacity()/K, used()/K); |
|
106 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
|
107 _virtual_space.low_boundary(), |
|
108 _virtual_space.high(), |
|
109 _virtual_space.high_boundary()); |
|
110 } |
|
111 |
|
112 void Generation::print_summary_info() { print_summary_info_on(tty); } |
|
113 |
|
114 void Generation::print_summary_info_on(outputStream* st) { |
|
115 StatRecord* sr = stat_record(); |
|
116 double time = sr->accumulated_time.seconds(); |
|
117 st->print_cr("[Accumulated GC generation %d time %3.7f secs, " |
|
118 "%d GC's, avg GC time %3.7f]", |
|
119 level(), time, sr->invocations, |
|
120 sr->invocations > 0 ? time / sr->invocations : 0.0); |
|
121 } |
|
122 |
|
123 // Utility iterator classes |
|
124 |
|
125 class GenerationIsInReservedClosure : public SpaceClosure { |
|
126 public: |
|
127 const void* _p; |
|
128 Space* sp; |
|
129 virtual void do_space(Space* s) { |
|
130 if (sp == NULL) { |
|
131 if (s->is_in_reserved(_p)) sp = s; |
|
132 } |
|
133 } |
|
134 GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {} |
|
135 }; |
|
136 |
|
137 class GenerationIsInClosure : public SpaceClosure { |
|
138 public: |
|
139 const void* _p; |
|
140 Space* sp; |
|
141 virtual void do_space(Space* s) { |
|
142 if (sp == NULL) { |
|
143 if (s->is_in(_p)) sp = s; |
|
144 } |
|
145 } |
|
146 GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {} |
|
147 }; |
|
148 |
|
149 bool Generation::is_in(const void* p) const { |
|
150 GenerationIsInClosure blk(p); |
|
151 ((Generation*)this)->space_iterate(&blk); |
|
152 return blk.sp != NULL; |
|
153 } |
|
154 |
|
155 DefNewGeneration* Generation::as_DefNewGeneration() { |
|
156 assert((kind() == Generation::DefNew) || |
|
157 (kind() == Generation::ParNew) || |
|
158 (kind() == Generation::ASParNew), |
|
159 "Wrong youngest generation type"); |
|
160 return (DefNewGeneration*) this; |
|
161 } |
|
162 |
|
163 Generation* Generation::next_gen() const { |
|
164 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
165 int next = level() + 1; |
|
166 if (next < gch->_n_gens) { |
|
167 return gch->_gens[next]; |
|
168 } else { |
|
169 return NULL; |
|
170 } |
|
171 } |
|
172 |
|
173 size_t Generation::max_contiguous_available() const { |
|
174 // The largest number of contiguous free words in this or any higher generation. |
|
175 size_t max = 0; |
|
176 for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) { |
|
177 size_t avail = gen->contiguous_available(); |
|
178 if (avail > max) { |
|
179 max = avail; |
|
180 } |
|
181 } |
|
182 return max; |
|
183 } |
|
184 |
|
185 bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { |
|
186 size_t available = max_contiguous_available(); |
|
187 bool res = (available >= max_promotion_in_bytes); |
|
188 if (PrintGC && Verbose) { |
|
189 gclog_or_tty->print_cr( |
|
190 "Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")", |
|
191 res? "":" not", available, res? ">=":"<", |
|
192 max_promotion_in_bytes); |
|
193 } |
|
194 return res; |
|
195 } |
|
196 |
|
197 // Ignores "ref" and calls allocate(). |
|
198 oop Generation::promote(oop obj, size_t obj_size) { |
|
199 assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
|
200 |
|
201 #ifndef PRODUCT |
|
202 if (Universe::heap()->promotion_should_fail()) { |
|
203 return NULL; |
|
204 } |
|
205 #endif // #ifndef PRODUCT |
|
206 |
|
207 HeapWord* result = allocate(obj_size, false); |
|
208 if (result != NULL) { |
|
209 Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); |
|
210 return oop(result); |
|
211 } else { |
|
212 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
213 return gch->handle_failed_promotion(this, obj, obj_size); |
|
214 } |
|
215 } |
|
216 |
|
217 oop Generation::par_promote(int thread_num, |
|
218 oop obj, markOop m, size_t word_sz) { |
|
219 // Could do a bad general impl here that gets a lock. But no. |
|
220 ShouldNotCallThis(); |
|
221 return NULL; |
|
222 } |
|
223 |
|
224 void Generation::par_promote_alloc_undo(int thread_num, |
|
225 HeapWord* obj, size_t word_sz) { |
|
226 // Could do a bad general impl here that gets a lock. But no. |
|
227 guarantee(false, "No good general implementation."); |
|
228 } |
|
229 |
|
230 Space* Generation::space_containing(const void* p) const { |
|
231 GenerationIsInReservedClosure blk(p); |
|
232 // Cast away const |
|
233 ((Generation*)this)->space_iterate(&blk); |
|
234 return blk.sp; |
|
235 } |
|
236 |
|
237 // Some of these are mediocre general implementations. Should be |
|
238 // overridden to get better performance. |
|
239 |
|
240 class GenerationBlockStartClosure : public SpaceClosure { |
|
241 public: |
|
242 const void* _p; |
|
243 HeapWord* _start; |
|
244 virtual void do_space(Space* s) { |
|
245 if (_start == NULL && s->is_in_reserved(_p)) { |
|
246 _start = s->block_start(_p); |
|
247 } |
|
248 } |
|
249 GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; } |
|
250 }; |
|
251 |
|
252 HeapWord* Generation::block_start(const void* p) const { |
|
253 GenerationBlockStartClosure blk(p); |
|
254 // Cast away const |
|
255 ((Generation*)this)->space_iterate(&blk); |
|
256 return blk._start; |
|
257 } |
|
258 |
|
259 class GenerationBlockSizeClosure : public SpaceClosure { |
|
260 public: |
|
261 const HeapWord* _p; |
|
262 size_t size; |
|
263 virtual void do_space(Space* s) { |
|
264 if (size == 0 && s->is_in_reserved(_p)) { |
|
265 size = s->block_size(_p); |
|
266 } |
|
267 } |
|
268 GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; } |
|
269 }; |
|
270 |
|
271 size_t Generation::block_size(const HeapWord* p) const { |
|
272 GenerationBlockSizeClosure blk(p); |
|
273 // Cast away const |
|
274 ((Generation*)this)->space_iterate(&blk); |
|
275 assert(blk.size > 0, "seems reasonable"); |
|
276 return blk.size; |
|
277 } |
|
278 |
|
279 class GenerationBlockIsObjClosure : public SpaceClosure { |
|
280 public: |
|
281 const HeapWord* _p; |
|
282 bool is_obj; |
|
283 virtual void do_space(Space* s) { |
|
284 if (!is_obj && s->is_in_reserved(_p)) { |
|
285 is_obj |= s->block_is_obj(_p); |
|
286 } |
|
287 } |
|
288 GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; } |
|
289 }; |
|
290 |
|
291 bool Generation::block_is_obj(const HeapWord* p) const { |
|
292 GenerationBlockIsObjClosure blk(p); |
|
293 // Cast away const |
|
294 ((Generation*)this)->space_iterate(&blk); |
|
295 return blk.is_obj; |
|
296 } |
|
297 |
|
298 class GenerationOopIterateClosure : public SpaceClosure { |
|
299 public: |
|
300 ExtendedOopClosure* cl; |
|
301 MemRegion mr; |
|
302 virtual void do_space(Space* s) { |
|
303 s->oop_iterate(mr, cl); |
|
304 } |
|
305 GenerationOopIterateClosure(ExtendedOopClosure* _cl, MemRegion _mr) : |
|
306 cl(_cl), mr(_mr) {} |
|
307 }; |
|
308 |
|
309 void Generation::oop_iterate(ExtendedOopClosure* cl) { |
|
310 GenerationOopIterateClosure blk(cl, _reserved); |
|
311 space_iterate(&blk); |
|
312 } |
|
313 |
|
314 void Generation::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) { |
|
315 GenerationOopIterateClosure blk(cl, mr); |
|
316 space_iterate(&blk); |
|
317 } |
|
318 |
|
319 void Generation::younger_refs_in_space_iterate(Space* sp, |
|
320 OopsInGenClosure* cl) { |
|
321 GenRemSet* rs = SharedHeap::heap()->rem_set(); |
|
322 rs->younger_refs_in_space_iterate(sp, cl); |
|
323 } |
|
324 |
|
325 class GenerationObjIterateClosure : public SpaceClosure { |
|
326 private: |
|
327 ObjectClosure* _cl; |
|
328 public: |
|
329 virtual void do_space(Space* s) { |
|
330 s->object_iterate(_cl); |
|
331 } |
|
332 GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {} |
|
333 }; |
|
334 |
|
335 void Generation::object_iterate(ObjectClosure* cl) { |
|
336 GenerationObjIterateClosure blk(cl); |
|
337 space_iterate(&blk); |
|
338 } |
|
339 |
|
340 class GenerationSafeObjIterateClosure : public SpaceClosure { |
|
341 private: |
|
342 ObjectClosure* _cl; |
|
343 public: |
|
344 virtual void do_space(Space* s) { |
|
345 s->safe_object_iterate(_cl); |
|
346 } |
|
347 GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {} |
|
348 }; |
|
349 |
|
350 void Generation::safe_object_iterate(ObjectClosure* cl) { |
|
351 GenerationSafeObjIterateClosure blk(cl); |
|
352 space_iterate(&blk); |
|
353 } |
|
354 |
|
355 void Generation::prepare_for_compaction(CompactPoint* cp) { |
|
356 // Generic implementation, can be specialized |
|
357 CompactibleSpace* space = first_compaction_space(); |
|
358 while (space != NULL) { |
|
359 space->prepare_for_compaction(cp); |
|
360 space = space->next_compaction_space(); |
|
361 } |
|
362 } |
|
363 |
|
364 class AdjustPointersClosure: public SpaceClosure { |
|
365 public: |
|
366 void do_space(Space* sp) { |
|
367 sp->adjust_pointers(); |
|
368 } |
|
369 }; |
|
370 |
|
371 void Generation::adjust_pointers() { |
|
372 // Note that this is done over all spaces, not just the compactible |
|
373 // ones. |
|
374 AdjustPointersClosure blk; |
|
375 space_iterate(&blk, true); |
|
376 } |
|
377 |
|
378 void Generation::compact() { |
|
379 CompactibleSpace* sp = first_compaction_space(); |
|
380 while (sp != NULL) { |
|
381 sp->compact(); |
|
382 sp = sp->next_compaction_space(); |
|
383 } |
|
384 } |
|
385 |
|
386 CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size, |
|
387 int level, |
|
388 GenRemSet* remset) : |
|
389 Generation(rs, initial_byte_size, level), _rs(remset), |
|
390 _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(), |
|
391 _used_at_prologue() |
|
392 { |
|
393 HeapWord* start = (HeapWord*)rs.base(); |
|
394 size_t reserved_byte_size = rs.size(); |
|
395 assert((uintptr_t(start) & 3) == 0, "bad alignment"); |
|
396 assert((reserved_byte_size & 3) == 0, "bad alignment"); |
|
397 MemRegion reserved_mr(start, heap_word_size(reserved_byte_size)); |
|
398 _bts = new BlockOffsetSharedArray(reserved_mr, |
|
399 heap_word_size(initial_byte_size)); |
|
400 MemRegion committed_mr(start, heap_word_size(initial_byte_size)); |
|
401 _rs->resize_covered_region(committed_mr); |
|
402 if (_bts == NULL) |
|
403 vm_exit_during_initialization("Could not allocate a BlockOffsetArray"); |
|
404 |
|
405 // Verify that the start and end of this generation is the start of a card. |
|
406 // If this wasn't true, a single card could span more than on generation, |
|
407 // which would cause problems when we commit/uncommit memory, and when we |
|
408 // clear and dirty cards. |
|
409 guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned"); |
|
410 if (reserved_mr.end() != Universe::heap()->reserved_region().end()) { |
|
411 // Don't check at the very end of the heap as we'll assert that we're probing off |
|
412 // the end if we try. |
|
413 guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned"); |
|
414 } |
|
415 _min_heap_delta_bytes = MinHeapDeltaBytes; |
|
416 _capacity_at_prologue = initial_byte_size; |
|
417 _used_at_prologue = 0; |
|
418 } |
|
419 |
|
420 bool CardGeneration::expand(size_t bytes, size_t expand_bytes) { |
|
421 assert_locked_or_safepoint(Heap_lock); |
|
422 if (bytes == 0) { |
|
423 return true; // That's what grow_by(0) would return |
|
424 } |
|
425 size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes); |
|
426 if (aligned_bytes == 0){ |
|
427 // The alignment caused the number of bytes to wrap. An expand_by(0) will |
|
428 // return true with the implication that an expansion was done when it |
|
429 // was not. A call to expand implies a best effort to expand by "bytes" |
|
430 // but not a guarantee. Align down to give a best effort. This is likely |
|
431 // the most that the generation can expand since it has some capacity to |
|
432 // start with. |
|
433 aligned_bytes = ReservedSpace::page_align_size_down(bytes); |
|
434 } |
|
435 size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); |
|
436 bool success = false; |
|
437 if (aligned_expand_bytes > aligned_bytes) { |
|
438 success = grow_by(aligned_expand_bytes); |
|
439 } |
|
440 if (!success) { |
|
441 success = grow_by(aligned_bytes); |
|
442 } |
|
443 if (!success) { |
|
444 success = grow_to_reserved(); |
|
445 } |
|
446 if (PrintGC && Verbose) { |
|
447 if (success && GC_locker::is_active_and_needs_gc()) { |
|
448 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); |
|
449 } |
|
450 } |
|
451 |
|
452 return success; |
|
453 } |
|
454 |
|
455 |
|
456 // No young generation references, clear this generation's cards. |
|
457 void CardGeneration::clear_remembered_set() { |
|
458 _rs->clear(reserved()); |
|
459 } |
|
460 |
|
461 |
|
462 // Objects in this generation may have moved, invalidate this |
|
463 // generation's cards. |
|
464 void CardGeneration::invalidate_remembered_set() { |
|
465 _rs->invalidate(used_region()); |
|
466 } |
|
467 |
|
468 |
|
469 void CardGeneration::compute_new_size() { |
|
470 assert(_shrink_factor <= 100, "invalid shrink factor"); |
|
471 size_t current_shrink_factor = _shrink_factor; |
|
472 _shrink_factor = 0; |
|
473 |
|
474 // We don't have floating point command-line arguments |
|
475 // Note: argument processing ensures that MinHeapFreeRatio < 100. |
|
476 const double minimum_free_percentage = MinHeapFreeRatio / 100.0; |
|
477 const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
|
478 |
|
479 // Compute some numbers about the state of the heap. |
|
480 const size_t used_after_gc = used(); |
|
481 const size_t capacity_after_gc = capacity(); |
|
482 |
|
483 const double min_tmp = used_after_gc / maximum_used_percentage; |
|
484 size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx)); |
|
485 // Don't shrink less than the initial generation size |
|
486 minimum_desired_capacity = MAX2(minimum_desired_capacity, |
|
487 spec()->init_size()); |
|
488 assert(used_after_gc <= minimum_desired_capacity, "sanity check"); |
|
489 |
|
490 if (PrintGC && Verbose) { |
|
491 const size_t free_after_gc = free(); |
|
492 const double free_percentage = ((double)free_after_gc) / capacity_after_gc; |
|
493 gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: "); |
|
494 gclog_or_tty->print_cr(" " |
|
495 " minimum_free_percentage: %6.2f" |
|
496 " maximum_used_percentage: %6.2f", |
|
497 minimum_free_percentage, |
|
498 maximum_used_percentage); |
|
499 gclog_or_tty->print_cr(" " |
|
500 " free_after_gc : %6.1fK" |
|
501 " used_after_gc : %6.1fK" |
|
502 " capacity_after_gc : %6.1fK", |
|
503 free_after_gc / (double) K, |
|
504 used_after_gc / (double) K, |
|
505 capacity_after_gc / (double) K); |
|
506 gclog_or_tty->print_cr(" " |
|
507 " free_percentage: %6.2f", |
|
508 free_percentage); |
|
509 } |
|
510 |
|
511 if (capacity_after_gc < minimum_desired_capacity) { |
|
512 // If we have less free space than we want then expand |
|
513 size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; |
|
514 // Don't expand unless it's significant |
|
515 if (expand_bytes >= _min_heap_delta_bytes) { |
|
516 expand(expand_bytes, 0); // safe if expansion fails |
|
517 } |
|
518 if (PrintGC && Verbose) { |
|
519 gclog_or_tty->print_cr(" expanding:" |
|
520 " minimum_desired_capacity: %6.1fK" |
|
521 " expand_bytes: %6.1fK" |
|
522 " _min_heap_delta_bytes: %6.1fK", |
|
523 minimum_desired_capacity / (double) K, |
|
524 expand_bytes / (double) K, |
|
525 _min_heap_delta_bytes / (double) K); |
|
526 } |
|
527 return; |
|
528 } |
|
529 |
|
530 // No expansion, now see if we want to shrink |
|
531 size_t shrink_bytes = 0; |
|
532 // We would never want to shrink more than this |
|
533 size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity; |
|
534 |
|
535 if (MaxHeapFreeRatio < 100) { |
|
536 const double maximum_free_percentage = MaxHeapFreeRatio / 100.0; |
|
537 const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
|
538 const double max_tmp = used_after_gc / minimum_used_percentage; |
|
539 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); |
|
540 maximum_desired_capacity = MAX2(maximum_desired_capacity, |
|
541 spec()->init_size()); |
|
542 if (PrintGC && Verbose) { |
|
543 gclog_or_tty->print_cr(" " |
|
544 " maximum_free_percentage: %6.2f" |
|
545 " minimum_used_percentage: %6.2f", |
|
546 maximum_free_percentage, |
|
547 minimum_used_percentage); |
|
548 gclog_or_tty->print_cr(" " |
|
549 " _capacity_at_prologue: %6.1fK" |
|
550 " minimum_desired_capacity: %6.1fK" |
|
551 " maximum_desired_capacity: %6.1fK", |
|
552 _capacity_at_prologue / (double) K, |
|
553 minimum_desired_capacity / (double) K, |
|
554 maximum_desired_capacity / (double) K); |
|
555 } |
|
556 assert(minimum_desired_capacity <= maximum_desired_capacity, |
|
557 "sanity check"); |
|
558 |
|
559 if (capacity_after_gc > maximum_desired_capacity) { |
|
560 // Capacity too large, compute shrinking size |
|
561 shrink_bytes = capacity_after_gc - maximum_desired_capacity; |
|
562 // We don't want shrink all the way back to initSize if people call |
|
563 // System.gc(), because some programs do that between "phases" and then |
|
564 // we'd just have to grow the heap up again for the next phase. So we |
|
565 // damp the shrinking: 0% on the first call, 10% on the second call, 40% |
|
566 // on the third call, and 100% by the fourth call. But if we recompute |
|
567 // size without shrinking, it goes back to 0%. |
|
568 shrink_bytes = shrink_bytes / 100 * current_shrink_factor; |
|
569 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size"); |
|
570 if (current_shrink_factor == 0) { |
|
571 _shrink_factor = 10; |
|
572 } else { |
|
573 _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100); |
|
574 } |
|
575 if (PrintGC && Verbose) { |
|
576 gclog_or_tty->print_cr(" " |
|
577 " shrinking:" |
|
578 " initSize: %.1fK" |
|
579 " maximum_desired_capacity: %.1fK", |
|
580 spec()->init_size() / (double) K, |
|
581 maximum_desired_capacity / (double) K); |
|
582 gclog_or_tty->print_cr(" " |
|
583 " shrink_bytes: %.1fK" |
|
584 " current_shrink_factor: %d" |
|
585 " new shrink factor: %d" |
|
586 " _min_heap_delta_bytes: %.1fK", |
|
587 shrink_bytes / (double) K, |
|
588 current_shrink_factor, |
|
589 _shrink_factor, |
|
590 _min_heap_delta_bytes / (double) K); |
|
591 } |
|
592 } |
|
593 } |
|
594 |
|
595 if (capacity_after_gc > _capacity_at_prologue) { |
|
596 // We might have expanded for promotions, in which case we might want to |
|
597 // take back that expansion if there's room after GC. That keeps us from |
|
598 // stretching the heap with promotions when there's plenty of room. |
|
599 size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue; |
|
600 expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes); |
|
601 // We have two shrinking computations, take the largest |
|
602 shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion); |
|
603 assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size"); |
|
604 if (PrintGC && Verbose) { |
|
605 gclog_or_tty->print_cr(" " |
|
606 " aggressive shrinking:" |
|
607 " _capacity_at_prologue: %.1fK" |
|
608 " capacity_after_gc: %.1fK" |
|
609 " expansion_for_promotion: %.1fK" |
|
610 " shrink_bytes: %.1fK", |
|
611 capacity_after_gc / (double) K, |
|
612 _capacity_at_prologue / (double) K, |
|
613 expansion_for_promotion / (double) K, |
|
614 shrink_bytes / (double) K); |
|
615 } |
|
616 } |
|
617 // Don't shrink unless it's significant |
|
618 if (shrink_bytes >= _min_heap_delta_bytes) { |
|
619 shrink(shrink_bytes); |
|
620 } |
|
621 } |
|
622 |
|
623 // Currently nothing to do. |
|
624 void CardGeneration::prepare_for_verify() {} |
|
625 |
|
626 |
|
627 void OneContigSpaceCardGeneration::collect(bool full, |
|
628 bool clear_all_soft_refs, |
|
629 size_t size, |
|
630 bool is_tlab) { |
|
631 GenCollectedHeap* gch = GenCollectedHeap::heap(); |
|
632 |
|
633 SpecializationStats::clear(); |
|
634 // Temporarily expand the span of our ref processor, so |
|
635 // refs discovery is over the entire heap, not just this generation |
|
636 ReferenceProcessorSpanMutator |
|
637 x(ref_processor(), gch->reserved_region()); |
|
638 |
|
639 STWGCTimer* gc_timer = GenMarkSweep::gc_timer(); |
|
640 gc_timer->register_gc_start(); |
|
641 |
|
642 SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); |
|
643 gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); |
|
644 |
|
645 GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs); |
|
646 |
|
647 gc_timer->register_gc_end(); |
|
648 |
|
649 gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); |
|
650 |
|
651 SpecializationStats::print(); |
|
652 } |
|
653 |
|
654 HeapWord* |
|
655 OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size, |
|
656 bool is_tlab, |
|
657 bool parallel) { |
|
658 assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation"); |
|
659 if (parallel) { |
|
660 MutexLocker x(ParGCRareEvent_lock); |
|
661 HeapWord* result = NULL; |
|
662 size_t byte_size = word_size * HeapWordSize; |
|
663 while (true) { |
|
664 expand(byte_size, _min_heap_delta_bytes); |
|
665 if (GCExpandToAllocateDelayMillis > 0) { |
|
666 os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); |
|
667 } |
|
668 result = _the_space->par_allocate(word_size); |
|
669 if ( result != NULL) { |
|
670 return result; |
|
671 } else { |
|
672 // If there's not enough expansion space available, give up. |
|
673 if (_virtual_space.uncommitted_size() < byte_size) { |
|
674 return NULL; |
|
675 } |
|
676 // else try again |
|
677 } |
|
678 } |
|
679 } else { |
|
680 expand(word_size*HeapWordSize, _min_heap_delta_bytes); |
|
681 return _the_space->allocate(word_size); |
|
682 } |
|
683 } |
|
684 |
|
685 bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) { |
|
686 GCMutexLocker x(ExpandHeap_lock); |
|
687 return CardGeneration::expand(bytes, expand_bytes); |
|
688 } |
|
689 |
|
690 |
|
691 void OneContigSpaceCardGeneration::shrink(size_t bytes) { |
|
692 assert_locked_or_safepoint(ExpandHeap_lock); |
|
693 size_t size = ReservedSpace::page_align_size_down(bytes); |
|
694 if (size > 0) { |
|
695 shrink_by(size); |
|
696 } |
|
697 } |
|
698 |
|
699 |
|
700 size_t OneContigSpaceCardGeneration::capacity() const { |
|
701 return _the_space->capacity(); |
|
702 } |
|
703 |
|
704 |
|
705 size_t OneContigSpaceCardGeneration::used() const { |
|
706 return _the_space->used(); |
|
707 } |
|
708 |
|
709 |
|
710 size_t OneContigSpaceCardGeneration::free() const { |
|
711 return _the_space->free(); |
|
712 } |
|
713 |
|
714 MemRegion OneContigSpaceCardGeneration::used_region() const { |
|
715 return the_space()->used_region(); |
|
716 } |
|
717 |
|
718 size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const { |
|
719 return _the_space->free(); |
|
720 } |
|
721 |
|
722 size_t OneContigSpaceCardGeneration::contiguous_available() const { |
|
723 return _the_space->free() + _virtual_space.uncommitted_size(); |
|
724 } |
|
725 |
|
726 bool OneContigSpaceCardGeneration::grow_by(size_t bytes) { |
|
727 assert_locked_or_safepoint(ExpandHeap_lock); |
|
728 bool result = _virtual_space.expand_by(bytes); |
|
729 if (result) { |
|
730 size_t new_word_size = |
|
731 heap_word_size(_virtual_space.committed_size()); |
|
732 MemRegion mr(_the_space->bottom(), new_word_size); |
|
733 // Expand card table |
|
734 Universe::heap()->barrier_set()->resize_covered_region(mr); |
|
735 // Expand shared block offset array |
|
736 _bts->resize(new_word_size); |
|
737 |
|
738 // Fix for bug #4668531 |
|
739 if (ZapUnusedHeapArea) { |
|
740 MemRegion mangle_region(_the_space->end(), |
|
741 (HeapWord*)_virtual_space.high()); |
|
742 SpaceMangler::mangle_region(mangle_region); |
|
743 } |
|
744 |
|
745 // Expand space -- also expands space's BOT |
|
746 // (which uses (part of) shared array above) |
|
747 _the_space->set_end((HeapWord*)_virtual_space.high()); |
|
748 |
|
749 // update the space and generation capacity counters |
|
750 update_counters(); |
|
751 |
|
752 if (Verbose && PrintGC) { |
|
753 size_t new_mem_size = _virtual_space.committed_size(); |
|
754 size_t old_mem_size = new_mem_size - bytes; |
|
755 gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " |
|
756 SIZE_FORMAT "K to " SIZE_FORMAT "K", |
|
757 name(), old_mem_size/K, bytes/K, new_mem_size/K); |
|
758 } |
|
759 } |
|
760 return result; |
|
761 } |
|
762 |
|
763 |
|
764 bool OneContigSpaceCardGeneration::grow_to_reserved() { |
|
765 assert_locked_or_safepoint(ExpandHeap_lock); |
|
766 bool success = true; |
|
767 const size_t remaining_bytes = _virtual_space.uncommitted_size(); |
|
768 if (remaining_bytes > 0) { |
|
769 success = grow_by(remaining_bytes); |
|
770 DEBUG_ONLY(if (!success) warning("grow to reserved failed");) |
|
771 } |
|
772 return success; |
|
773 } |
|
774 |
|
775 void OneContigSpaceCardGeneration::shrink_by(size_t bytes) { |
|
776 assert_locked_or_safepoint(ExpandHeap_lock); |
|
777 // Shrink committed space |
|
778 _virtual_space.shrink_by(bytes); |
|
779 // Shrink space; this also shrinks the space's BOT |
|
780 _the_space->set_end((HeapWord*) _virtual_space.high()); |
|
781 size_t new_word_size = heap_word_size(_the_space->capacity()); |
|
782 // Shrink the shared block offset array |
|
783 _bts->resize(new_word_size); |
|
784 MemRegion mr(_the_space->bottom(), new_word_size); |
|
785 // Shrink the card table |
|
786 Universe::heap()->barrier_set()->resize_covered_region(mr); |
|
787 |
|
788 if (Verbose && PrintGC) { |
|
789 size_t new_mem_size = _virtual_space.committed_size(); |
|
790 size_t old_mem_size = new_mem_size + bytes; |
|
791 gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K", |
|
792 name(), old_mem_size/K, new_mem_size/K); |
|
793 } |
|
794 } |
|
795 |
|
796 // Currently nothing to do. |
|
797 void OneContigSpaceCardGeneration::prepare_for_verify() {} |
|
798 |
|
799 |
|
800 // Override for a card-table generation with one contiguous |
|
801 // space. NOTE: For reasons that are lost in the fog of history, |
|
802 // this code is used when you iterate over perm gen objects, |
|
803 // even when one uses CDS, where the perm gen has a couple of |
|
804 // other spaces; this is because CompactingPermGenGen derives |
|
805 // from OneContigSpaceCardGeneration. This should be cleaned up, |
|
806 // see CR 6897789.. |
|
807 void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) { |
|
808 _the_space->object_iterate(blk); |
|
809 } |
|
810 |
|
811 void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk, |
|
812 bool usedOnly) { |
|
813 blk->do_space(_the_space); |
|
814 } |
|
815 |
|
816 void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) { |
|
817 blk->set_generation(this); |
|
818 younger_refs_in_space_iterate(_the_space, blk); |
|
819 blk->reset_generation(); |
|
820 } |
|
821 |
|
822 void OneContigSpaceCardGeneration::save_marks() { |
|
823 _the_space->set_saved_mark(); |
|
824 } |
|
825 |
|
826 |
|
827 void OneContigSpaceCardGeneration::reset_saved_marks() { |
|
828 _the_space->reset_saved_mark(); |
|
829 } |
|
830 |
|
831 |
|
832 bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() { |
|
833 return _the_space->saved_mark_at_top(); |
|
834 } |
|
835 |
|
836 #define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
|
837 \ |
|
838 void OneContigSpaceCardGeneration:: \ |
|
839 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ |
|
840 blk->set_generation(this); \ |
|
841 _the_space->oop_since_save_marks_iterate##nv_suffix(blk); \ |
|
842 blk->reset_generation(); \ |
|
843 save_marks(); \ |
|
844 } |
|
845 |
|
846 ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN) |
|
847 |
|
848 #undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN |
|
849 |
|
850 |
|
851 void OneContigSpaceCardGeneration::gc_epilogue(bool full) { |
|
852 _last_gc = WaterMark(the_space(), the_space()->top()); |
|
853 |
|
854 // update the generation and space performance counters |
|
855 update_counters(); |
|
856 if (ZapUnusedHeapArea) { |
|
857 the_space()->check_mangled_unused_area_complete(); |
|
858 } |
|
859 } |
|
860 |
|
861 void OneContigSpaceCardGeneration::record_spaces_top() { |
|
862 assert(ZapUnusedHeapArea, "Not mangling unused space"); |
|
863 the_space()->set_top_for_allocations(); |
|
864 } |
|
865 |
|
866 void OneContigSpaceCardGeneration::verify() { |
|
867 the_space()->verify(); |
|
868 } |
|
869 |
|
870 void OneContigSpaceCardGeneration::print_on(outputStream* st) const { |
|
871 Generation::print_on(st); |
|
872 st->print(" the"); |
|
873 the_space()->print_on(st); |
|
874 } |