Thu, 26 Jun 2014 11:36:58 +0200
8047818: G1 HeapRegions can no longer be ContiguousSpaces
Summary: Change parent of G1OffsetTableContigSpace to CompactibleSpace, reimplement missing functionality
Reviewed-by: stefank, jmasa, tschatzl
duke@435 | 1 | /* |
drchase@6680 | 2 | * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #include "precompiled.hpp" |
sla@5237 | 26 | #include "gc_implementation/shared/gcTimer.hpp" |
sla@5237 | 27 | #include "gc_implementation/shared/gcTrace.hpp" |
stefank@2314 | 28 | #include "gc_implementation/shared/spaceDecorator.hpp" |
stefank@2314 | 29 | #include "gc_interface/collectedHeap.inline.hpp" |
stefank@2314 | 30 | #include "memory/allocation.inline.hpp" |
stefank@2314 | 31 | #include "memory/blockOffsetTable.inline.hpp" |
stefank@2314 | 32 | #include "memory/cardTableRS.hpp" |
stefank@2314 | 33 | #include "memory/gcLocker.inline.hpp" |
stefank@2314 | 34 | #include "memory/genCollectedHeap.hpp" |
stefank@2314 | 35 | #include "memory/genMarkSweep.hpp" |
stefank@2314 | 36 | #include "memory/genOopClosures.hpp" |
stefank@2314 | 37 | #include "memory/genOopClosures.inline.hpp" |
stefank@2314 | 38 | #include "memory/generation.hpp" |
stefank@2314 | 39 | #include "memory/generation.inline.hpp" |
stefank@2314 | 40 | #include "memory/space.inline.hpp" |
stefank@2314 | 41 | #include "oops/oop.inline.hpp" |
stefank@2314 | 42 | #include "runtime/java.hpp" |
stefank@2314 | 43 | #include "utilities/copy.hpp" |
stefank@2314 | 44 | #include "utilities/events.hpp" |
duke@435 | 45 | |
drchase@6680 | 46 | PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC |
drchase@6680 | 47 | |
duke@435 | 48 | Generation::Generation(ReservedSpace rs, size_t initial_size, int level) : |
duke@435 | 49 | _level(level), |
duke@435 | 50 | _ref_processor(NULL) { |
duke@435 | 51 | if (!_virtual_space.initialize(rs, initial_size)) { |
duke@435 | 52 | vm_exit_during_initialization("Could not reserve enough space for " |
duke@435 | 53 | "object heap"); |
duke@435 | 54 | } |
jmasa@698 | 55 | // Mangle all of the the initial generation. |
jmasa@698 | 56 | if (ZapUnusedHeapArea) { |
jmasa@698 | 57 | MemRegion mangle_region((HeapWord*)_virtual_space.low(), |
jmasa@698 | 58 | (HeapWord*)_virtual_space.high()); |
jmasa@698 | 59 | SpaceMangler::mangle_region(mangle_region); |
jmasa@698 | 60 | } |
duke@435 | 61 | _reserved = MemRegion((HeapWord*)_virtual_space.low_boundary(), |
duke@435 | 62 | (HeapWord*)_virtual_space.high_boundary()); |
duke@435 | 63 | } |
duke@435 | 64 | |
duke@435 | 65 | GenerationSpec* Generation::spec() { |
duke@435 | 66 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
duke@435 | 67 | assert(0 <= level() && level() < gch->_n_gens, "Bad gen level"); |
duke@435 | 68 | return gch->_gen_specs[level()]; |
duke@435 | 69 | } |
duke@435 | 70 | |
duke@435 | 71 | size_t Generation::max_capacity() const { |
duke@435 | 72 | return reserved().byte_size(); |
duke@435 | 73 | } |
duke@435 | 74 | |
duke@435 | 75 | void Generation::print_heap_change(size_t prev_used) const { |
duke@435 | 76 | if (PrintGCDetails && Verbose) { |
duke@435 | 77 | gclog_or_tty->print(" " SIZE_FORMAT |
duke@435 | 78 | "->" SIZE_FORMAT |
duke@435 | 79 | "(" SIZE_FORMAT ")", |
duke@435 | 80 | prev_used, used(), capacity()); |
duke@435 | 81 | } else { |
duke@435 | 82 | gclog_or_tty->print(" " SIZE_FORMAT "K" |
duke@435 | 83 | "->" SIZE_FORMAT "K" |
duke@435 | 84 | "(" SIZE_FORMAT "K)", |
duke@435 | 85 | prev_used / K, used() / K, capacity() / K); |
duke@435 | 86 | } |
duke@435 | 87 | } |
duke@435 | 88 | |
duke@435 | 89 | // By default we get a single threaded default reference processor; |
ysr@2651 | 90 | // generations needing multi-threaded refs processing or discovery override this method. |
duke@435 | 91 | void Generation::ref_processor_init() { |
duke@435 | 92 | assert(_ref_processor == NULL, "a reference processor already exists"); |
duke@435 | 93 | assert(!_reserved.is_empty(), "empty generation?"); |
ysr@2651 | 94 | _ref_processor = new ReferenceProcessor(_reserved); // a vanilla reference processor |
duke@435 | 95 | if (_ref_processor == NULL) { |
duke@435 | 96 | vm_exit_during_initialization("Could not allocate ReferenceProcessor object"); |
duke@435 | 97 | } |
duke@435 | 98 | } |
duke@435 | 99 | |
duke@435 | 100 | void Generation::print() const { print_on(tty); } |
duke@435 | 101 | |
duke@435 | 102 | void Generation::print_on(outputStream* st) const { |
duke@435 | 103 | st->print(" %-20s", name()); |
duke@435 | 104 | st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", |
duke@435 | 105 | capacity()/K, used()/K); |
duke@435 | 106 | st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", |
duke@435 | 107 | _virtual_space.low_boundary(), |
duke@435 | 108 | _virtual_space.high(), |
duke@435 | 109 | _virtual_space.high_boundary()); |
duke@435 | 110 | } |
duke@435 | 111 | |
duke@435 | 112 | void Generation::print_summary_info() { print_summary_info_on(tty); } |
duke@435 | 113 | |
duke@435 | 114 | void Generation::print_summary_info_on(outputStream* st) { |
duke@435 | 115 | StatRecord* sr = stat_record(); |
duke@435 | 116 | double time = sr->accumulated_time.seconds(); |
duke@435 | 117 | st->print_cr("[Accumulated GC generation %d time %3.7f secs, " |
duke@435 | 118 | "%d GC's, avg GC time %3.7f]", |
duke@435 | 119 | level(), time, sr->invocations, |
duke@435 | 120 | sr->invocations > 0 ? time / sr->invocations : 0.0); |
duke@435 | 121 | } |
duke@435 | 122 | |
duke@435 | 123 | // Utility iterator classes |
duke@435 | 124 | |
duke@435 | 125 | class GenerationIsInReservedClosure : public SpaceClosure { |
duke@435 | 126 | public: |
duke@435 | 127 | const void* _p; |
duke@435 | 128 | Space* sp; |
duke@435 | 129 | virtual void do_space(Space* s) { |
duke@435 | 130 | if (sp == NULL) { |
duke@435 | 131 | if (s->is_in_reserved(_p)) sp = s; |
duke@435 | 132 | } |
duke@435 | 133 | } |
duke@435 | 134 | GenerationIsInReservedClosure(const void* p) : _p(p), sp(NULL) {} |
duke@435 | 135 | }; |
duke@435 | 136 | |
duke@435 | 137 | class GenerationIsInClosure : public SpaceClosure { |
duke@435 | 138 | public: |
duke@435 | 139 | const void* _p; |
duke@435 | 140 | Space* sp; |
duke@435 | 141 | virtual void do_space(Space* s) { |
duke@435 | 142 | if (sp == NULL) { |
duke@435 | 143 | if (s->is_in(_p)) sp = s; |
duke@435 | 144 | } |
duke@435 | 145 | } |
duke@435 | 146 | GenerationIsInClosure(const void* p) : _p(p), sp(NULL) {} |
duke@435 | 147 | }; |
duke@435 | 148 | |
duke@435 | 149 | bool Generation::is_in(const void* p) const { |
duke@435 | 150 | GenerationIsInClosure blk(p); |
duke@435 | 151 | ((Generation*)this)->space_iterate(&blk); |
duke@435 | 152 | return blk.sp != NULL; |
duke@435 | 153 | } |
duke@435 | 154 | |
duke@435 | 155 | DefNewGeneration* Generation::as_DefNewGeneration() { |
duke@435 | 156 | assert((kind() == Generation::DefNew) || |
duke@435 | 157 | (kind() == Generation::ParNew) || |
duke@435 | 158 | (kind() == Generation::ASParNew), |
duke@435 | 159 | "Wrong youngest generation type"); |
duke@435 | 160 | return (DefNewGeneration*) this; |
duke@435 | 161 | } |
duke@435 | 162 | |
duke@435 | 163 | Generation* Generation::next_gen() const { |
duke@435 | 164 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
duke@435 | 165 | int next = level() + 1; |
duke@435 | 166 | if (next < gch->_n_gens) { |
duke@435 | 167 | return gch->_gens[next]; |
duke@435 | 168 | } else { |
duke@435 | 169 | return NULL; |
duke@435 | 170 | } |
duke@435 | 171 | } |
duke@435 | 172 | |
duke@435 | 173 | size_t Generation::max_contiguous_available() const { |
duke@435 | 174 | // The largest number of contiguous free words in this or any higher generation. |
duke@435 | 175 | size_t max = 0; |
duke@435 | 176 | for (const Generation* gen = this; gen != NULL; gen = gen->next_gen()) { |
duke@435 | 177 | size_t avail = gen->contiguous_available(); |
duke@435 | 178 | if (avail > max) { |
duke@435 | 179 | max = avail; |
duke@435 | 180 | } |
duke@435 | 181 | } |
duke@435 | 182 | return max; |
duke@435 | 183 | } |
duke@435 | 184 | |
ysr@2243 | 185 | bool Generation::promotion_attempt_is_safe(size_t max_promotion_in_bytes) const { |
ysr@2243 | 186 | size_t available = max_contiguous_available(); |
ysr@2243 | 187 | bool res = (available >= max_promotion_in_bytes); |
duke@435 | 188 | if (PrintGC && Verbose) { |
ysr@2243 | 189 | gclog_or_tty->print_cr( |
ysr@2243 | 190 | "Generation: promo attempt is%s safe: available("SIZE_FORMAT") %s max_promo("SIZE_FORMAT")", |
ysr@2243 | 191 | res? "":" not", available, res? ">=":"<", |
ysr@2243 | 192 | max_promotion_in_bytes); |
duke@435 | 193 | } |
ysr@2243 | 194 | return res; |
duke@435 | 195 | } |
duke@435 | 196 | |
duke@435 | 197 | // Ignores "ref" and calls allocate(). |
coleenp@548 | 198 | oop Generation::promote(oop obj, size_t obj_size) { |
duke@435 | 199 | assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); |
duke@435 | 200 | |
duke@435 | 201 | #ifndef PRODUCT |
duke@435 | 202 | if (Universe::heap()->promotion_should_fail()) { |
duke@435 | 203 | return NULL; |
duke@435 | 204 | } |
duke@435 | 205 | #endif // #ifndef PRODUCT |
duke@435 | 206 | |
duke@435 | 207 | HeapWord* result = allocate(obj_size, false); |
duke@435 | 208 | if (result != NULL) { |
duke@435 | 209 | Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); |
duke@435 | 210 | return oop(result); |
duke@435 | 211 | } else { |
duke@435 | 212 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
coleenp@548 | 213 | return gch->handle_failed_promotion(this, obj, obj_size); |
duke@435 | 214 | } |
duke@435 | 215 | } |
duke@435 | 216 | |
duke@435 | 217 | oop Generation::par_promote(int thread_num, |
duke@435 | 218 | oop obj, markOop m, size_t word_sz) { |
duke@435 | 219 | // Could do a bad general impl here that gets a lock. But no. |
duke@435 | 220 | ShouldNotCallThis(); |
duke@435 | 221 | return NULL; |
duke@435 | 222 | } |
duke@435 | 223 | |
duke@435 | 224 | void Generation::par_promote_alloc_undo(int thread_num, |
duke@435 | 225 | HeapWord* obj, size_t word_sz) { |
duke@435 | 226 | // Could do a bad general impl here that gets a lock. But no. |
duke@435 | 227 | guarantee(false, "No good general implementation."); |
duke@435 | 228 | } |
duke@435 | 229 | |
duke@435 | 230 | Space* Generation::space_containing(const void* p) const { |
duke@435 | 231 | GenerationIsInReservedClosure blk(p); |
duke@435 | 232 | // Cast away const |
duke@435 | 233 | ((Generation*)this)->space_iterate(&blk); |
duke@435 | 234 | return blk.sp; |
duke@435 | 235 | } |
duke@435 | 236 | |
duke@435 | 237 | // Some of these are mediocre general implementations. Should be |
duke@435 | 238 | // overridden to get better performance. |
duke@435 | 239 | |
duke@435 | 240 | class GenerationBlockStartClosure : public SpaceClosure { |
duke@435 | 241 | public: |
duke@435 | 242 | const void* _p; |
duke@435 | 243 | HeapWord* _start; |
duke@435 | 244 | virtual void do_space(Space* s) { |
duke@435 | 245 | if (_start == NULL && s->is_in_reserved(_p)) { |
duke@435 | 246 | _start = s->block_start(_p); |
duke@435 | 247 | } |
duke@435 | 248 | } |
duke@435 | 249 | GenerationBlockStartClosure(const void* p) { _p = p; _start = NULL; } |
duke@435 | 250 | }; |
duke@435 | 251 | |
duke@435 | 252 | HeapWord* Generation::block_start(const void* p) const { |
duke@435 | 253 | GenerationBlockStartClosure blk(p); |
duke@435 | 254 | // Cast away const |
duke@435 | 255 | ((Generation*)this)->space_iterate(&blk); |
duke@435 | 256 | return blk._start; |
duke@435 | 257 | } |
duke@435 | 258 | |
duke@435 | 259 | class GenerationBlockSizeClosure : public SpaceClosure { |
duke@435 | 260 | public: |
duke@435 | 261 | const HeapWord* _p; |
duke@435 | 262 | size_t size; |
duke@435 | 263 | virtual void do_space(Space* s) { |
duke@435 | 264 | if (size == 0 && s->is_in_reserved(_p)) { |
duke@435 | 265 | size = s->block_size(_p); |
duke@435 | 266 | } |
duke@435 | 267 | } |
duke@435 | 268 | GenerationBlockSizeClosure(const HeapWord* p) { _p = p; size = 0; } |
duke@435 | 269 | }; |
duke@435 | 270 | |
duke@435 | 271 | size_t Generation::block_size(const HeapWord* p) const { |
duke@435 | 272 | GenerationBlockSizeClosure blk(p); |
duke@435 | 273 | // Cast away const |
duke@435 | 274 | ((Generation*)this)->space_iterate(&blk); |
duke@435 | 275 | assert(blk.size > 0, "seems reasonable"); |
duke@435 | 276 | return blk.size; |
duke@435 | 277 | } |
duke@435 | 278 | |
duke@435 | 279 | class GenerationBlockIsObjClosure : public SpaceClosure { |
duke@435 | 280 | public: |
duke@435 | 281 | const HeapWord* _p; |
duke@435 | 282 | bool is_obj; |
duke@435 | 283 | virtual void do_space(Space* s) { |
duke@435 | 284 | if (!is_obj && s->is_in_reserved(_p)) { |
duke@435 | 285 | is_obj |= s->block_is_obj(_p); |
duke@435 | 286 | } |
duke@435 | 287 | } |
duke@435 | 288 | GenerationBlockIsObjClosure(const HeapWord* p) { _p = p; is_obj = false; } |
duke@435 | 289 | }; |
duke@435 | 290 | |
duke@435 | 291 | bool Generation::block_is_obj(const HeapWord* p) const { |
duke@435 | 292 | GenerationBlockIsObjClosure blk(p); |
duke@435 | 293 | // Cast away const |
duke@435 | 294 | ((Generation*)this)->space_iterate(&blk); |
duke@435 | 295 | return blk.is_obj; |
duke@435 | 296 | } |
duke@435 | 297 | |
duke@435 | 298 | class GenerationOopIterateClosure : public SpaceClosure { |
duke@435 | 299 | public: |
mgerdin@6978 | 300 | ExtendedOopClosure* _cl; |
duke@435 | 301 | virtual void do_space(Space* s) { |
mgerdin@6978 | 302 | s->oop_iterate(_cl); |
duke@435 | 303 | } |
mgerdin@6978 | 304 | GenerationOopIterateClosure(ExtendedOopClosure* cl) : |
mgerdin@6978 | 305 | _cl(cl) {} |
duke@435 | 306 | }; |
duke@435 | 307 | |
coleenp@4037 | 308 | void Generation::oop_iterate(ExtendedOopClosure* cl) { |
mgerdin@6978 | 309 | GenerationOopIterateClosure blk(cl); |
duke@435 | 310 | space_iterate(&blk); |
duke@435 | 311 | } |
duke@435 | 312 | |
duke@435 | 313 | void Generation::younger_refs_in_space_iterate(Space* sp, |
duke@435 | 314 | OopsInGenClosure* cl) { |
duke@435 | 315 | GenRemSet* rs = SharedHeap::heap()->rem_set(); |
duke@435 | 316 | rs->younger_refs_in_space_iterate(sp, cl); |
duke@435 | 317 | } |
duke@435 | 318 | |
duke@435 | 319 | class GenerationObjIterateClosure : public SpaceClosure { |
duke@435 | 320 | private: |
duke@435 | 321 | ObjectClosure* _cl; |
duke@435 | 322 | public: |
duke@435 | 323 | virtual void do_space(Space* s) { |
duke@435 | 324 | s->object_iterate(_cl); |
duke@435 | 325 | } |
duke@435 | 326 | GenerationObjIterateClosure(ObjectClosure* cl) : _cl(cl) {} |
duke@435 | 327 | }; |
duke@435 | 328 | |
duke@435 | 329 | void Generation::object_iterate(ObjectClosure* cl) { |
duke@435 | 330 | GenerationObjIterateClosure blk(cl); |
duke@435 | 331 | space_iterate(&blk); |
duke@435 | 332 | } |
duke@435 | 333 | |
jmasa@952 | 334 | class GenerationSafeObjIterateClosure : public SpaceClosure { |
jmasa@952 | 335 | private: |
jmasa@952 | 336 | ObjectClosure* _cl; |
jmasa@952 | 337 | public: |
jmasa@952 | 338 | virtual void do_space(Space* s) { |
jmasa@952 | 339 | s->safe_object_iterate(_cl); |
jmasa@952 | 340 | } |
jmasa@952 | 341 | GenerationSafeObjIterateClosure(ObjectClosure* cl) : _cl(cl) {} |
jmasa@952 | 342 | }; |
jmasa@952 | 343 | |
jmasa@952 | 344 | void Generation::safe_object_iterate(ObjectClosure* cl) { |
jmasa@952 | 345 | GenerationSafeObjIterateClosure blk(cl); |
jmasa@952 | 346 | space_iterate(&blk); |
jmasa@952 | 347 | } |
jmasa@952 | 348 | |
duke@435 | 349 | void Generation::prepare_for_compaction(CompactPoint* cp) { |
duke@435 | 350 | // Generic implementation, can be specialized |
duke@435 | 351 | CompactibleSpace* space = first_compaction_space(); |
duke@435 | 352 | while (space != NULL) { |
duke@435 | 353 | space->prepare_for_compaction(cp); |
duke@435 | 354 | space = space->next_compaction_space(); |
duke@435 | 355 | } |
duke@435 | 356 | } |
duke@435 | 357 | |
duke@435 | 358 | class AdjustPointersClosure: public SpaceClosure { |
duke@435 | 359 | public: |
duke@435 | 360 | void do_space(Space* sp) { |
duke@435 | 361 | sp->adjust_pointers(); |
duke@435 | 362 | } |
duke@435 | 363 | }; |
duke@435 | 364 | |
duke@435 | 365 | void Generation::adjust_pointers() { |
duke@435 | 366 | // Note that this is done over all spaces, not just the compactible |
duke@435 | 367 | // ones. |
duke@435 | 368 | AdjustPointersClosure blk; |
duke@435 | 369 | space_iterate(&blk, true); |
duke@435 | 370 | } |
duke@435 | 371 | |
duke@435 | 372 | void Generation::compact() { |
duke@435 | 373 | CompactibleSpace* sp = first_compaction_space(); |
duke@435 | 374 | while (sp != NULL) { |
duke@435 | 375 | sp->compact(); |
duke@435 | 376 | sp = sp->next_compaction_space(); |
duke@435 | 377 | } |
duke@435 | 378 | } |
duke@435 | 379 | |
duke@435 | 380 | CardGeneration::CardGeneration(ReservedSpace rs, size_t initial_byte_size, |
duke@435 | 381 | int level, |
duke@435 | 382 | GenRemSet* remset) : |
jmasa@4900 | 383 | Generation(rs, initial_byte_size, level), _rs(remset), |
jmasa@4900 | 384 | _shrink_factor(0), _min_heap_delta_bytes(), _capacity_at_prologue(), |
jmasa@4900 | 385 | _used_at_prologue() |
duke@435 | 386 | { |
duke@435 | 387 | HeapWord* start = (HeapWord*)rs.base(); |
duke@435 | 388 | size_t reserved_byte_size = rs.size(); |
duke@435 | 389 | assert((uintptr_t(start) & 3) == 0, "bad alignment"); |
duke@435 | 390 | assert((reserved_byte_size & 3) == 0, "bad alignment"); |
duke@435 | 391 | MemRegion reserved_mr(start, heap_word_size(reserved_byte_size)); |
duke@435 | 392 | _bts = new BlockOffsetSharedArray(reserved_mr, |
duke@435 | 393 | heap_word_size(initial_byte_size)); |
duke@435 | 394 | MemRegion committed_mr(start, heap_word_size(initial_byte_size)); |
duke@435 | 395 | _rs->resize_covered_region(committed_mr); |
duke@435 | 396 | if (_bts == NULL) |
duke@435 | 397 | vm_exit_during_initialization("Could not allocate a BlockOffsetArray"); |
duke@435 | 398 | |
duke@435 | 399 | // Verify that the start and end of this generation is the start of a card. |
duke@435 | 400 | // If this wasn't true, a single card could span more than on generation, |
duke@435 | 401 | // which would cause problems when we commit/uncommit memory, and when we |
duke@435 | 402 | // clear and dirty cards. |
duke@435 | 403 | guarantee(_rs->is_aligned(reserved_mr.start()), "generation must be card aligned"); |
duke@435 | 404 | if (reserved_mr.end() != Universe::heap()->reserved_region().end()) { |
duke@435 | 405 | // Don't check at the very end of the heap as we'll assert that we're probing off |
duke@435 | 406 | // the end if we try. |
duke@435 | 407 | guarantee(_rs->is_aligned(reserved_mr.end()), "generation must be card aligned"); |
duke@435 | 408 | } |
jmasa@4900 | 409 | _min_heap_delta_bytes = MinHeapDeltaBytes; |
jmasa@4900 | 410 | _capacity_at_prologue = initial_byte_size; |
jmasa@4900 | 411 | _used_at_prologue = 0; |
duke@435 | 412 | } |
duke@435 | 413 | |
jmasa@706 | 414 | bool CardGeneration::expand(size_t bytes, size_t expand_bytes) { |
jmasa@706 | 415 | assert_locked_or_safepoint(Heap_lock); |
jmasa@706 | 416 | if (bytes == 0) { |
jmasa@706 | 417 | return true; // That's what grow_by(0) would return |
jmasa@706 | 418 | } |
jmasa@706 | 419 | size_t aligned_bytes = ReservedSpace::page_align_size_up(bytes); |
jmasa@706 | 420 | if (aligned_bytes == 0){ |
jmasa@706 | 421 | // The alignment caused the number of bytes to wrap. An expand_by(0) will |
jmasa@706 | 422 | // return true with the implication that an expansion was done when it |
jmasa@706 | 423 | // was not. A call to expand implies a best effort to expand by "bytes" |
jmasa@706 | 424 | // but not a guarantee. Align down to give a best effort. This is likely |
jmasa@706 | 425 | // the most that the generation can expand since it has some capacity to |
jmasa@706 | 426 | // start with. |
jmasa@706 | 427 | aligned_bytes = ReservedSpace::page_align_size_down(bytes); |
jmasa@706 | 428 | } |
jmasa@706 | 429 | size_t aligned_expand_bytes = ReservedSpace::page_align_size_up(expand_bytes); |
jmasa@706 | 430 | bool success = false; |
jmasa@706 | 431 | if (aligned_expand_bytes > aligned_bytes) { |
jmasa@706 | 432 | success = grow_by(aligned_expand_bytes); |
jmasa@706 | 433 | } |
jmasa@706 | 434 | if (!success) { |
jmasa@706 | 435 | success = grow_by(aligned_bytes); |
jmasa@706 | 436 | } |
jmasa@706 | 437 | if (!success) { |
jmasa@706 | 438 | success = grow_to_reserved(); |
jmasa@706 | 439 | } |
jmasa@706 | 440 | if (PrintGC && Verbose) { |
coleenp@4037 | 441 | if (success && GC_locker::is_active_and_needs_gc()) { |
jmasa@706 | 442 | gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); |
jmasa@706 | 443 | } |
jmasa@706 | 444 | } |
jmasa@706 | 445 | |
jmasa@706 | 446 | return success; |
jmasa@706 | 447 | } |
jmasa@706 | 448 | |
duke@435 | 449 | |
duke@435 | 450 | // No young generation references, clear this generation's cards. |
duke@435 | 451 | void CardGeneration::clear_remembered_set() { |
duke@435 | 452 | _rs->clear(reserved()); |
duke@435 | 453 | } |
duke@435 | 454 | |
duke@435 | 455 | |
duke@435 | 456 | // Objects in this generation may have moved, invalidate this |
duke@435 | 457 | // generation's cards. |
duke@435 | 458 | void CardGeneration::invalidate_remembered_set() { |
duke@435 | 459 | _rs->invalidate(used_region()); |
duke@435 | 460 | } |
duke@435 | 461 | |
duke@435 | 462 | |
jmasa@4900 | 463 | void CardGeneration::compute_new_size() { |
jmasa@4900 | 464 | assert(_shrink_factor <= 100, "invalid shrink factor"); |
jmasa@4900 | 465 | size_t current_shrink_factor = _shrink_factor; |
jmasa@4900 | 466 | _shrink_factor = 0; |
jmasa@4900 | 467 | |
jmasa@4900 | 468 | // We don't have floating point command-line arguments |
jmasa@4900 | 469 | // Note: argument processing ensures that MinHeapFreeRatio < 100. |
jmasa@4900 | 470 | const double minimum_free_percentage = MinHeapFreeRatio / 100.0; |
jmasa@4900 | 471 | const double maximum_used_percentage = 1.0 - minimum_free_percentage; |
jmasa@4900 | 472 | |
jmasa@4900 | 473 | // Compute some numbers about the state of the heap. |
jmasa@4900 | 474 | const size_t used_after_gc = used(); |
jmasa@4900 | 475 | const size_t capacity_after_gc = capacity(); |
jmasa@4900 | 476 | |
jmasa@4900 | 477 | const double min_tmp = used_after_gc / maximum_used_percentage; |
jmasa@4900 | 478 | size_t minimum_desired_capacity = (size_t)MIN2(min_tmp, double(max_uintx)); |
jmasa@4900 | 479 | // Don't shrink less than the initial generation size |
jmasa@4900 | 480 | minimum_desired_capacity = MAX2(minimum_desired_capacity, |
jmasa@4900 | 481 | spec()->init_size()); |
jmasa@4900 | 482 | assert(used_after_gc <= minimum_desired_capacity, "sanity check"); |
jmasa@4900 | 483 | |
jmasa@4900 | 484 | if (PrintGC && Verbose) { |
jmasa@4900 | 485 | const size_t free_after_gc = free(); |
jmasa@4900 | 486 | const double free_percentage = ((double)free_after_gc) / capacity_after_gc; |
jmasa@4900 | 487 | gclog_or_tty->print_cr("TenuredGeneration::compute_new_size: "); |
jmasa@4900 | 488 | gclog_or_tty->print_cr(" " |
jmasa@4900 | 489 | " minimum_free_percentage: %6.2f" |
jmasa@4900 | 490 | " maximum_used_percentage: %6.2f", |
jmasa@4900 | 491 | minimum_free_percentage, |
jmasa@4900 | 492 | maximum_used_percentage); |
jmasa@4900 | 493 | gclog_or_tty->print_cr(" " |
jmasa@4900 | 494 | " free_after_gc : %6.1fK" |
jmasa@4900 | 495 | " used_after_gc : %6.1fK" |
jmasa@4900 | 496 | " capacity_after_gc : %6.1fK", |
jmasa@4900 | 497 | free_after_gc / (double) K, |
jmasa@4900 | 498 | used_after_gc / (double) K, |
jmasa@4900 | 499 | capacity_after_gc / (double) K); |
jmasa@4900 | 500 | gclog_or_tty->print_cr(" " |
jmasa@4900 | 501 | " free_percentage: %6.2f", |
jmasa@4900 | 502 | free_percentage); |
jmasa@4900 | 503 | } |
jmasa@4900 | 504 | |
jmasa@4900 | 505 | if (capacity_after_gc < minimum_desired_capacity) { |
jmasa@4900 | 506 | // If we have less free space than we want then expand |
jmasa@4900 | 507 | size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; |
jmasa@4900 | 508 | // Don't expand unless it's significant |
jmasa@4900 | 509 | if (expand_bytes >= _min_heap_delta_bytes) { |
jmasa@4900 | 510 | expand(expand_bytes, 0); // safe if expansion fails |
jmasa@4900 | 511 | } |
jmasa@4900 | 512 | if (PrintGC && Verbose) { |
jmasa@4900 | 513 | gclog_or_tty->print_cr(" expanding:" |
jmasa@4900 | 514 | " minimum_desired_capacity: %6.1fK" |
jmasa@4900 | 515 | " expand_bytes: %6.1fK" |
jmasa@4900 | 516 | " _min_heap_delta_bytes: %6.1fK", |
jmasa@4900 | 517 | minimum_desired_capacity / (double) K, |
jmasa@4900 | 518 | expand_bytes / (double) K, |
jmasa@4900 | 519 | _min_heap_delta_bytes / (double) K); |
jmasa@4900 | 520 | } |
jmasa@4900 | 521 | return; |
jmasa@4900 | 522 | } |
jmasa@4900 | 523 | |
jmasa@4900 | 524 | // No expansion, now see if we want to shrink |
jmasa@4900 | 525 | size_t shrink_bytes = 0; |
jmasa@4900 | 526 | // We would never want to shrink more than this |
jmasa@4900 | 527 | size_t max_shrink_bytes = capacity_after_gc - minimum_desired_capacity; |
jmasa@4900 | 528 | |
jmasa@4900 | 529 | if (MaxHeapFreeRatio < 100) { |
jmasa@4900 | 530 | const double maximum_free_percentage = MaxHeapFreeRatio / 100.0; |
jmasa@4900 | 531 | const double minimum_used_percentage = 1.0 - maximum_free_percentage; |
jmasa@4900 | 532 | const double max_tmp = used_after_gc / minimum_used_percentage; |
jmasa@4900 | 533 | size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(max_uintx)); |
jmasa@4900 | 534 | maximum_desired_capacity = MAX2(maximum_desired_capacity, |
jmasa@4900 | 535 | spec()->init_size()); |
jmasa@4900 | 536 | if (PrintGC && Verbose) { |
jmasa@4900 | 537 | gclog_or_tty->print_cr(" " |
jmasa@4900 | 538 | " maximum_free_percentage: %6.2f" |
jmasa@4900 | 539 | " minimum_used_percentage: %6.2f", |
jmasa@4900 | 540 | maximum_free_percentage, |
jmasa@4900 | 541 | minimum_used_percentage); |
jmasa@4900 | 542 | gclog_or_tty->print_cr(" " |
jmasa@4900 | 543 | " _capacity_at_prologue: %6.1fK" |
jmasa@4900 | 544 | " minimum_desired_capacity: %6.1fK" |
jmasa@4900 | 545 | " maximum_desired_capacity: %6.1fK", |
jmasa@4900 | 546 | _capacity_at_prologue / (double) K, |
jmasa@4900 | 547 | minimum_desired_capacity / (double) K, |
jmasa@4900 | 548 | maximum_desired_capacity / (double) K); |
jmasa@4900 | 549 | } |
jmasa@4900 | 550 | assert(minimum_desired_capacity <= maximum_desired_capacity, |
jmasa@4900 | 551 | "sanity check"); |
jmasa@4900 | 552 | |
jmasa@4900 | 553 | if (capacity_after_gc > maximum_desired_capacity) { |
jmasa@4900 | 554 | // Capacity too large, compute shrinking size |
jmasa@4900 | 555 | shrink_bytes = capacity_after_gc - maximum_desired_capacity; |
jmasa@4900 | 556 | // We don't want shrink all the way back to initSize if people call |
jmasa@4900 | 557 | // System.gc(), because some programs do that between "phases" and then |
jmasa@4900 | 558 | // we'd just have to grow the heap up again for the next phase. So we |
jmasa@4900 | 559 | // damp the shrinking: 0% on the first call, 10% on the second call, 40% |
jmasa@4900 | 560 | // on the third call, and 100% by the fourth call. But if we recompute |
jmasa@4900 | 561 | // size without shrinking, it goes back to 0%. |
jmasa@4900 | 562 | shrink_bytes = shrink_bytes / 100 * current_shrink_factor; |
jmasa@4900 | 563 | assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size"); |
jmasa@4900 | 564 | if (current_shrink_factor == 0) { |
jmasa@4900 | 565 | _shrink_factor = 10; |
jmasa@4900 | 566 | } else { |
jmasa@4900 | 567 | _shrink_factor = MIN2(current_shrink_factor * 4, (size_t) 100); |
jmasa@4900 | 568 | } |
jmasa@4900 | 569 | if (PrintGC && Verbose) { |
jmasa@4900 | 570 | gclog_or_tty->print_cr(" " |
jmasa@4900 | 571 | " shrinking:" |
jmasa@4900 | 572 | " initSize: %.1fK" |
jmasa@4900 | 573 | " maximum_desired_capacity: %.1fK", |
jmasa@4900 | 574 | spec()->init_size() / (double) K, |
jmasa@4900 | 575 | maximum_desired_capacity / (double) K); |
jmasa@4900 | 576 | gclog_or_tty->print_cr(" " |
jmasa@4900 | 577 | " shrink_bytes: %.1fK" |
jmasa@4900 | 578 | " current_shrink_factor: %d" |
jmasa@4900 | 579 | " new shrink factor: %d" |
jmasa@4900 | 580 | " _min_heap_delta_bytes: %.1fK", |
jmasa@4900 | 581 | shrink_bytes / (double) K, |
jmasa@4900 | 582 | current_shrink_factor, |
jmasa@4900 | 583 | _shrink_factor, |
jmasa@4900 | 584 | _min_heap_delta_bytes / (double) K); |
jmasa@4900 | 585 | } |
jmasa@4900 | 586 | } |
jmasa@4900 | 587 | } |
jmasa@4900 | 588 | |
jmasa@4900 | 589 | if (capacity_after_gc > _capacity_at_prologue) { |
jmasa@4900 | 590 | // We might have expanded for promotions, in which case we might want to |
jmasa@4900 | 591 | // take back that expansion if there's room after GC. That keeps us from |
jmasa@4900 | 592 | // stretching the heap with promotions when there's plenty of room. |
jmasa@4900 | 593 | size_t expansion_for_promotion = capacity_after_gc - _capacity_at_prologue; |
jmasa@4900 | 594 | expansion_for_promotion = MIN2(expansion_for_promotion, max_shrink_bytes); |
jmasa@4900 | 595 | // We have two shrinking computations, take the largest |
jmasa@4900 | 596 | shrink_bytes = MAX2(shrink_bytes, expansion_for_promotion); |
jmasa@4900 | 597 | assert(shrink_bytes <= max_shrink_bytes, "invalid shrink size"); |
jmasa@4900 | 598 | if (PrintGC && Verbose) { |
jmasa@4900 | 599 | gclog_or_tty->print_cr(" " |
jmasa@4900 | 600 | " aggressive shrinking:" |
jmasa@4900 | 601 | " _capacity_at_prologue: %.1fK" |
jmasa@4900 | 602 | " capacity_after_gc: %.1fK" |
jmasa@4900 | 603 | " expansion_for_promotion: %.1fK" |
jmasa@4900 | 604 | " shrink_bytes: %.1fK", |
jmasa@4900 | 605 | capacity_after_gc / (double) K, |
jmasa@4900 | 606 | _capacity_at_prologue / (double) K, |
jmasa@4900 | 607 | expansion_for_promotion / (double) K, |
jmasa@4900 | 608 | shrink_bytes / (double) K); |
jmasa@4900 | 609 | } |
jmasa@4900 | 610 | } |
jmasa@4900 | 611 | // Don't shrink unless it's significant |
jmasa@4900 | 612 | if (shrink_bytes >= _min_heap_delta_bytes) { |
jmasa@4900 | 613 | shrink(shrink_bytes); |
jmasa@4900 | 614 | } |
jmasa@4900 | 615 | } |
jmasa@4900 | 616 | |
duke@435 | 617 | // Currently nothing to do. |
duke@435 | 618 | void CardGeneration::prepare_for_verify() {} |
duke@435 | 619 | |
duke@435 | 620 | |
duke@435 | 621 | void OneContigSpaceCardGeneration::collect(bool full, |
duke@435 | 622 | bool clear_all_soft_refs, |
duke@435 | 623 | size_t size, |
duke@435 | 624 | bool is_tlab) { |
sla@5237 | 625 | GenCollectedHeap* gch = GenCollectedHeap::heap(); |
sla@5237 | 626 | |
duke@435 | 627 | SpecializationStats::clear(); |
duke@435 | 628 | // Temporarily expand the span of our ref processor, so |
duke@435 | 629 | // refs discovery is over the entire heap, not just this generation |
duke@435 | 630 | ReferenceProcessorSpanMutator |
sla@5237 | 631 | x(ref_processor(), gch->reserved_region()); |
sla@5237 | 632 | |
sla@5237 | 633 | STWGCTimer* gc_timer = GenMarkSweep::gc_timer(); |
mgronlun@6131 | 634 | gc_timer->register_gc_start(); |
sla@5237 | 635 | |
sla@5237 | 636 | SerialOldTracer* gc_tracer = GenMarkSweep::gc_tracer(); |
sla@5237 | 637 | gc_tracer->report_gc_start(gch->gc_cause(), gc_timer->gc_start()); |
sla@5237 | 638 | |
duke@435 | 639 | GenMarkSweep::invoke_at_safepoint(_level, ref_processor(), clear_all_soft_refs); |
sla@5237 | 640 | |
mgronlun@6131 | 641 | gc_timer->register_gc_end(); |
sla@5237 | 642 | |
mgronlun@6131 | 643 | gc_tracer->report_gc_end(gc_timer->gc_end(), gc_timer->time_partitions()); |
sla@5237 | 644 | |
duke@435 | 645 | SpecializationStats::print(); |
duke@435 | 646 | } |
duke@435 | 647 | |
duke@435 | 648 | HeapWord* |
duke@435 | 649 | OneContigSpaceCardGeneration::expand_and_allocate(size_t word_size, |
duke@435 | 650 | bool is_tlab, |
duke@435 | 651 | bool parallel) { |
duke@435 | 652 | assert(!is_tlab, "OneContigSpaceCardGeneration does not support TLAB allocation"); |
duke@435 | 653 | if (parallel) { |
duke@435 | 654 | MutexLocker x(ParGCRareEvent_lock); |
duke@435 | 655 | HeapWord* result = NULL; |
duke@435 | 656 | size_t byte_size = word_size * HeapWordSize; |
duke@435 | 657 | while (true) { |
duke@435 | 658 | expand(byte_size, _min_heap_delta_bytes); |
duke@435 | 659 | if (GCExpandToAllocateDelayMillis > 0) { |
duke@435 | 660 | os::sleep(Thread::current(), GCExpandToAllocateDelayMillis, false); |
duke@435 | 661 | } |
duke@435 | 662 | result = _the_space->par_allocate(word_size); |
duke@435 | 663 | if ( result != NULL) { |
duke@435 | 664 | return result; |
duke@435 | 665 | } else { |
duke@435 | 666 | // If there's not enough expansion space available, give up. |
duke@435 | 667 | if (_virtual_space.uncommitted_size() < byte_size) { |
duke@435 | 668 | return NULL; |
duke@435 | 669 | } |
duke@435 | 670 | // else try again |
duke@435 | 671 | } |
duke@435 | 672 | } |
duke@435 | 673 | } else { |
duke@435 | 674 | expand(word_size*HeapWordSize, _min_heap_delta_bytes); |
duke@435 | 675 | return _the_space->allocate(word_size); |
duke@435 | 676 | } |
duke@435 | 677 | } |
duke@435 | 678 | |
jmasa@706 | 679 | bool OneContigSpaceCardGeneration::expand(size_t bytes, size_t expand_bytes) { |
duke@435 | 680 | GCMutexLocker x(ExpandHeap_lock); |
jmasa@706 | 681 | return CardGeneration::expand(bytes, expand_bytes); |
duke@435 | 682 | } |
duke@435 | 683 | |
duke@435 | 684 | |
duke@435 | 685 | void OneContigSpaceCardGeneration::shrink(size_t bytes) { |
duke@435 | 686 | assert_locked_or_safepoint(ExpandHeap_lock); |
duke@435 | 687 | size_t size = ReservedSpace::page_align_size_down(bytes); |
duke@435 | 688 | if (size > 0) { |
duke@435 | 689 | shrink_by(size); |
duke@435 | 690 | } |
duke@435 | 691 | } |
duke@435 | 692 | |
duke@435 | 693 | |
duke@435 | 694 | size_t OneContigSpaceCardGeneration::capacity() const { |
duke@435 | 695 | return _the_space->capacity(); |
duke@435 | 696 | } |
duke@435 | 697 | |
duke@435 | 698 | |
duke@435 | 699 | size_t OneContigSpaceCardGeneration::used() const { |
duke@435 | 700 | return _the_space->used(); |
duke@435 | 701 | } |
duke@435 | 702 | |
duke@435 | 703 | |
duke@435 | 704 | size_t OneContigSpaceCardGeneration::free() const { |
duke@435 | 705 | return _the_space->free(); |
duke@435 | 706 | } |
duke@435 | 707 | |
duke@435 | 708 | MemRegion OneContigSpaceCardGeneration::used_region() const { |
duke@435 | 709 | return the_space()->used_region(); |
duke@435 | 710 | } |
duke@435 | 711 | |
duke@435 | 712 | size_t OneContigSpaceCardGeneration::unsafe_max_alloc_nogc() const { |
duke@435 | 713 | return _the_space->free(); |
duke@435 | 714 | } |
duke@435 | 715 | |
duke@435 | 716 | size_t OneContigSpaceCardGeneration::contiguous_available() const { |
duke@435 | 717 | return _the_space->free() + _virtual_space.uncommitted_size(); |
duke@435 | 718 | } |
duke@435 | 719 | |
duke@435 | 720 | bool OneContigSpaceCardGeneration::grow_by(size_t bytes) { |
duke@435 | 721 | assert_locked_or_safepoint(ExpandHeap_lock); |
duke@435 | 722 | bool result = _virtual_space.expand_by(bytes); |
duke@435 | 723 | if (result) { |
duke@435 | 724 | size_t new_word_size = |
duke@435 | 725 | heap_word_size(_virtual_space.committed_size()); |
duke@435 | 726 | MemRegion mr(_the_space->bottom(), new_word_size); |
duke@435 | 727 | // Expand card table |
duke@435 | 728 | Universe::heap()->barrier_set()->resize_covered_region(mr); |
duke@435 | 729 | // Expand shared block offset array |
duke@435 | 730 | _bts->resize(new_word_size); |
duke@435 | 731 | |
duke@435 | 732 | // Fix for bug #4668531 |
jmasa@698 | 733 | if (ZapUnusedHeapArea) { |
jmasa@698 | 734 | MemRegion mangle_region(_the_space->end(), |
jmasa@698 | 735 | (HeapWord*)_virtual_space.high()); |
jmasa@698 | 736 | SpaceMangler::mangle_region(mangle_region); |
jmasa@698 | 737 | } |
duke@435 | 738 | |
duke@435 | 739 | // Expand space -- also expands space's BOT |
duke@435 | 740 | // (which uses (part of) shared array above) |
duke@435 | 741 | _the_space->set_end((HeapWord*)_virtual_space.high()); |
duke@435 | 742 | |
duke@435 | 743 | // update the space and generation capacity counters |
duke@435 | 744 | update_counters(); |
duke@435 | 745 | |
duke@435 | 746 | if (Verbose && PrintGC) { |
duke@435 | 747 | size_t new_mem_size = _virtual_space.committed_size(); |
duke@435 | 748 | size_t old_mem_size = new_mem_size - bytes; |
duke@435 | 749 | gclog_or_tty->print_cr("Expanding %s from " SIZE_FORMAT "K by " |
duke@435 | 750 | SIZE_FORMAT "K to " SIZE_FORMAT "K", |
duke@435 | 751 | name(), old_mem_size/K, bytes/K, new_mem_size/K); |
duke@435 | 752 | } |
duke@435 | 753 | } |
duke@435 | 754 | return result; |
duke@435 | 755 | } |
duke@435 | 756 | |
duke@435 | 757 | |
duke@435 | 758 | bool OneContigSpaceCardGeneration::grow_to_reserved() { |
duke@435 | 759 | assert_locked_or_safepoint(ExpandHeap_lock); |
duke@435 | 760 | bool success = true; |
duke@435 | 761 | const size_t remaining_bytes = _virtual_space.uncommitted_size(); |
duke@435 | 762 | if (remaining_bytes > 0) { |
duke@435 | 763 | success = grow_by(remaining_bytes); |
duke@435 | 764 | DEBUG_ONLY(if (!success) warning("grow to reserved failed");) |
duke@435 | 765 | } |
duke@435 | 766 | return success; |
duke@435 | 767 | } |
duke@435 | 768 | |
duke@435 | 769 | void OneContigSpaceCardGeneration::shrink_by(size_t bytes) { |
duke@435 | 770 | assert_locked_or_safepoint(ExpandHeap_lock); |
duke@435 | 771 | // Shrink committed space |
duke@435 | 772 | _virtual_space.shrink_by(bytes); |
duke@435 | 773 | // Shrink space; this also shrinks the space's BOT |
duke@435 | 774 | _the_space->set_end((HeapWord*) _virtual_space.high()); |
duke@435 | 775 | size_t new_word_size = heap_word_size(_the_space->capacity()); |
duke@435 | 776 | // Shrink the shared block offset array |
duke@435 | 777 | _bts->resize(new_word_size); |
duke@435 | 778 | MemRegion mr(_the_space->bottom(), new_word_size); |
duke@435 | 779 | // Shrink the card table |
duke@435 | 780 | Universe::heap()->barrier_set()->resize_covered_region(mr); |
duke@435 | 781 | |
duke@435 | 782 | if (Verbose && PrintGC) { |
duke@435 | 783 | size_t new_mem_size = _virtual_space.committed_size(); |
duke@435 | 784 | size_t old_mem_size = new_mem_size + bytes; |
duke@435 | 785 | gclog_or_tty->print_cr("Shrinking %s from " SIZE_FORMAT "K to " SIZE_FORMAT "K", |
duke@435 | 786 | name(), old_mem_size/K, new_mem_size/K); |
duke@435 | 787 | } |
duke@435 | 788 | } |
duke@435 | 789 | |
duke@435 | 790 | // Currently nothing to do. |
duke@435 | 791 | void OneContigSpaceCardGeneration::prepare_for_verify() {} |
duke@435 | 792 | |
duke@435 | 793 | |
ysr@1486 | 794 | // Override for a card-table generation with one contiguous |
ysr@1486 | 795 | // space. NOTE: For reasons that are lost in the fog of history, |
ysr@1486 | 796 | // this code is used when you iterate over perm gen objects, |
ysr@1486 | 797 | // even when one uses CDS, where the perm gen has a couple of |
ysr@1486 | 798 | // other spaces; this is because CompactingPermGenGen derives |
ysr@1486 | 799 | // from OneContigSpaceCardGeneration. This should be cleaned up, |
ysr@1486 | 800 | // see CR 6897789.. |
duke@435 | 801 | void OneContigSpaceCardGeneration::object_iterate(ObjectClosure* blk) { |
duke@435 | 802 | _the_space->object_iterate(blk); |
duke@435 | 803 | } |
duke@435 | 804 | |
duke@435 | 805 | void OneContigSpaceCardGeneration::space_iterate(SpaceClosure* blk, |
duke@435 | 806 | bool usedOnly) { |
duke@435 | 807 | blk->do_space(_the_space); |
duke@435 | 808 | } |
duke@435 | 809 | |
duke@435 | 810 | void OneContigSpaceCardGeneration::younger_refs_iterate(OopsInGenClosure* blk) { |
duke@435 | 811 | blk->set_generation(this); |
duke@435 | 812 | younger_refs_in_space_iterate(_the_space, blk); |
duke@435 | 813 | blk->reset_generation(); |
duke@435 | 814 | } |
duke@435 | 815 | |
duke@435 | 816 | void OneContigSpaceCardGeneration::save_marks() { |
duke@435 | 817 | _the_space->set_saved_mark(); |
duke@435 | 818 | } |
duke@435 | 819 | |
duke@435 | 820 | |
duke@435 | 821 | void OneContigSpaceCardGeneration::reset_saved_marks() { |
duke@435 | 822 | _the_space->reset_saved_mark(); |
duke@435 | 823 | } |
duke@435 | 824 | |
duke@435 | 825 | |
duke@435 | 826 | bool OneContigSpaceCardGeneration::no_allocs_since_save_marks() { |
duke@435 | 827 | return _the_space->saved_mark_at_top(); |
duke@435 | 828 | } |
duke@435 | 829 | |
duke@435 | 830 | #define OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN(OopClosureType, nv_suffix) \ |
duke@435 | 831 | \ |
duke@435 | 832 | void OneContigSpaceCardGeneration:: \ |
duke@435 | 833 | oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ |
duke@435 | 834 | blk->set_generation(this); \ |
duke@435 | 835 | _the_space->oop_since_save_marks_iterate##nv_suffix(blk); \ |
duke@435 | 836 | blk->reset_generation(); \ |
duke@435 | 837 | save_marks(); \ |
duke@435 | 838 | } |
duke@435 | 839 | |
duke@435 | 840 | ALL_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN) |
duke@435 | 841 | |
duke@435 | 842 | #undef OneContig_SINCE_SAVE_MARKS_ITERATE_DEFN |
duke@435 | 843 | |
duke@435 | 844 | |
duke@435 | 845 | void OneContigSpaceCardGeneration::gc_epilogue(bool full) { |
duke@435 | 846 | _last_gc = WaterMark(the_space(), the_space()->top()); |
duke@435 | 847 | |
duke@435 | 848 | // update the generation and space performance counters |
duke@435 | 849 | update_counters(); |
jmasa@698 | 850 | if (ZapUnusedHeapArea) { |
jmasa@698 | 851 | the_space()->check_mangled_unused_area_complete(); |
jmasa@698 | 852 | } |
jmasa@698 | 853 | } |
jmasa@698 | 854 | |
jmasa@698 | 855 | void OneContigSpaceCardGeneration::record_spaces_top() { |
jmasa@698 | 856 | assert(ZapUnusedHeapArea, "Not mangling unused space"); |
jmasa@698 | 857 | the_space()->set_top_for_allocations(); |
duke@435 | 858 | } |
duke@435 | 859 | |
brutisso@3711 | 860 | void OneContigSpaceCardGeneration::verify() { |
brutisso@3711 | 861 | the_space()->verify(); |
duke@435 | 862 | } |
duke@435 | 863 | |
duke@435 | 864 | void OneContigSpaceCardGeneration::print_on(outputStream* st) const { |
duke@435 | 865 | Generation::print_on(st); |
duke@435 | 866 | st->print(" the"); |
duke@435 | 867 | the_space()->print_on(st); |
duke@435 | 868 | } |