src/share/vm/memory/defNewGeneration.cpp

changeset 791
1ee8caae33af
parent 782
60fb9c4db4e6
parent 704
850fdf70db2b
child 888
c96030fff130
equal deleted inserted replaced
790:0edda524b58c 791:1ee8caae33af
1 /* 1 /*
2 * Copyright 2001-2007 Sun Microsystems, Inc. All Rights Reserved. 2 * Copyright 2001-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 * 4 *
5 * This code is free software; you can redistribute it and/or modify it 5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as 6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
170 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space, 170 _from_counters = new CSpaceCounters("s0", 1, _max_survivor_size, _from_space,
171 _gen_counters); 171 _gen_counters);
172 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space, 172 _to_counters = new CSpaceCounters("s1", 2, _max_survivor_size, _to_space,
173 _gen_counters); 173 _gen_counters);
174 174
175 compute_space_boundaries(0); 175 compute_space_boundaries(0, SpaceDecorator::Clear, SpaceDecorator::Mangle);
176 update_counters(); 176 update_counters();
177 _next_gen = NULL; 177 _next_gen = NULL;
178 _tenuring_threshold = MaxTenuringThreshold; 178 _tenuring_threshold = MaxTenuringThreshold;
179 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize; 179 _pretenure_size_threshold_words = PretenureSizeThreshold >> LogHeapWordSize;
180 } 180 }
181 181
182 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size) { 182 void DefNewGeneration::compute_space_boundaries(uintx minimum_eden_size,
183 uintx alignment = GenCollectedHeap::heap()->collector_policy()->min_alignment(); 183 bool clear_space,
184 bool mangle_space) {
185 uintx alignment =
186 GenCollectedHeap::heap()->collector_policy()->min_alignment();
187
188 // If the spaces are being cleared (only done at heap initialization
189 // currently), the survivor spaces need not be empty.
190 // Otherwise, no care is taken for used areas in the survivor spaces
191 // so check.
192 assert(clear_space || (to()->is_empty() && from()->is_empty()),
193 "Initialization of the survivor spaces assumes these are empty");
184 194
185 // Compute sizes 195 // Compute sizes
186 uintx size = _virtual_space.committed_size(); 196 uintx size = _virtual_space.committed_size();
187 uintx survivor_size = compute_survivor_size(size, alignment); 197 uintx survivor_size = compute_survivor_size(size, alignment);
188 uintx eden_size = size - (2*survivor_size); 198 uintx eden_size = size - (2*survivor_size);
212 222
213 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start); 223 MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)from_start);
214 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start); 224 MemRegion fromMR((HeapWord*)from_start, (HeapWord*)to_start);
215 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); 225 MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end);
216 226
217 eden()->set_bounds(edenMR); 227 // A minimum eden size implies that there is a part of eden that
218 if (minimum_eden_size == 0) { 228 // is being used and that affects the initialization of any
219 // The "minimum_eden_size" is really the amount of eden occupied by 229 // newly formed eden.
220 // allocated objects -- if this is zero, then we can clear the space. 230 bool live_in_eden = minimum_eden_size > 0;
221 eden()->clear(); 231
222 } else { 232 // If not clearing the spaces, do some checking to verify that
223 // Otherwise, we will not have cleared eden. This can cause newly 233 // the space are already mangled.
224 // expanded space not to be mangled if using ZapUnusedHeapArea. 234 if (!clear_space) {
225 // We explicitly do such mangling here. 235 // Must check mangling before the spaces are reshaped. Otherwise,
236 // the bottom or end of one space may have moved into another
237 // a failure of the check may not correctly indicate which space
238 // is not properly mangled.
226 if (ZapUnusedHeapArea) { 239 if (ZapUnusedHeapArea) {
227 eden()->mangle_unused_area(); 240 HeapWord* limit = (HeapWord*) _virtual_space.high();
228 } 241 eden()->check_mangled_unused_area(limit);
229 } 242 from()->check_mangled_unused_area(limit);
230 from()->initialize(fromMR, true /* clear */); 243 to()->check_mangled_unused_area(limit);
231 to()->initialize( toMR, true /* clear */); 244 }
232 // Make sure we compact eden, then from. 245 }
246
247 // Reset the spaces for their new regions.
248 eden()->initialize(edenMR,
249 clear_space && !live_in_eden,
250 SpaceDecorator::Mangle);
251 // If clear_space and live_in_eden, we will not have cleared any
252 // portion of eden above its top. This can cause newly
253 // expanded space not to be mangled if using ZapUnusedHeapArea.
254 // We explicitly do such mangling here.
255 if (ZapUnusedHeapArea && clear_space && live_in_eden && mangle_space) {
256 eden()->mangle_unused_area();
257 }
258 from()->initialize(fromMR, clear_space, mangle_space);
259 to()->initialize(toMR, clear_space, mangle_space);
260
261 // Set next compaction spaces.
262 eden()->set_next_compaction_space(from());
233 // The to-space is normally empty before a compaction so need 263 // The to-space is normally empty before a compaction so need
234 // not be considered. The exception is during promotion 264 // not be considered. The exception is during promotion
235 // failure handling when to-space can contain live objects. 265 // failure handling when to-space can contain live objects.
236 eden()->set_next_compaction_space(from());
237 from()->set_next_compaction_space(NULL); 266 from()->set_next_compaction_space(NULL);
238 } 267 }
239 268
240 void DefNewGeneration::swap_spaces() { 269 void DefNewGeneration::swap_spaces() {
241 ContiguousSpace* s = from(); 270 ContiguousSpace* s = from();
254 } 283 }
255 } 284 }
256 285
257 bool DefNewGeneration::expand(size_t bytes) { 286 bool DefNewGeneration::expand(size_t bytes) {
258 MutexLocker x(ExpandHeap_lock); 287 MutexLocker x(ExpandHeap_lock);
288 HeapWord* prev_high = (HeapWord*) _virtual_space.high();
259 bool success = _virtual_space.expand_by(bytes); 289 bool success = _virtual_space.expand_by(bytes);
290 if (success && ZapUnusedHeapArea) {
291 // Mangle newly committed space immediately because it
292 // can be done here more simply that after the new
293 // spaces have been computed.
294 HeapWord* new_high = (HeapWord*) _virtual_space.high();
295 MemRegion mangle_region(prev_high, new_high);
296 SpaceMangler::mangle_region(mangle_region);
297 }
260 298
261 // Do not attempt an expand-to-the reserve size. The 299 // Do not attempt an expand-to-the reserve size. The
262 // request should properly observe the maximum size of 300 // request should properly observe the maximum size of
263 // the generation so an expand-to-reserve should be 301 // the generation so an expand-to-reserve should be
264 // unnecessary. Also a second call to expand-to-reserve 302 // unnecessary. Also a second call to expand-to-reserve
266 // For example if the first expand fail for unknown reasons, 304 // For example if the first expand fail for unknown reasons,
267 // but the second succeeds and expands the heap to its maximum 305 // but the second succeeds and expands the heap to its maximum
268 // value. 306 // value.
269 if (GC_locker::is_active()) { 307 if (GC_locker::is_active()) {
270 if (PrintGC && Verbose) { 308 if (PrintGC && Verbose) {
271 gclog_or_tty->print_cr("Garbage collection disabled, expanded heap instead"); 309 gclog_or_tty->print_cr("Garbage collection disabled, "
310 "expanded heap instead");
272 } 311 }
273 } 312 }
274 313
275 return success; 314 return success;
276 } 315 }
330 assert(change % alignment == 0, "just checking"); 369 assert(change % alignment == 0, "just checking");
331 _virtual_space.shrink_by(change); 370 _virtual_space.shrink_by(change);
332 changed = true; 371 changed = true;
333 } 372 }
334 if (changed) { 373 if (changed) {
335 compute_space_boundaries(eden()->used()); 374 // The spaces have already been mangled at this point but
336 MemRegion cmr((HeapWord*)_virtual_space.low(), (HeapWord*)_virtual_space.high()); 375 // may not have been cleared (set top = bottom) and should be.
376 // Mangling was done when the heap was being expanded.
377 compute_space_boundaries(eden()->used(),
378 SpaceDecorator::Clear,
379 SpaceDecorator::DontMangle);
380 MemRegion cmr((HeapWord*)_virtual_space.low(),
381 (HeapWord*)_virtual_space.high());
337 Universe::heap()->barrier_set()->resize_covered_region(cmr); 382 Universe::heap()->barrier_set()->resize_covered_region(cmr);
338 if (Verbose && PrintGC) { 383 if (Verbose && PrintGC) {
339 size_t new_size_after = _virtual_space.committed_size(); 384 size_t new_size_after = _virtual_space.committed_size();
340 size_t eden_size_after = eden()->capacity(); 385 size_t eden_size_after = eden()->capacity();
341 size_t survivor_size_after = from()->capacity(); 386 size_t survivor_size_after = from()->capacity();
342 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->" SIZE_FORMAT "K [eden=" 387 gclog_or_tty->print("New generation size " SIZE_FORMAT "K->"
388 SIZE_FORMAT "K [eden="
343 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]", 389 SIZE_FORMAT "K,survivor=" SIZE_FORMAT "K]",
344 new_size_before/K, new_size_after/K, eden_size_after/K, survivor_size_after/K); 390 new_size_before/K, new_size_after/K,
391 eden_size_after/K, survivor_size_after/K);
345 if (WizardMode) { 392 if (WizardMode) {
346 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]", 393 gclog_or_tty->print("[allowed " SIZE_FORMAT "K extra for %d threads]",
347 thread_increase_size/K, threads_count); 394 thread_increase_size/K, threads_count);
348 } 395 }
349 gclog_or_tty->cr(); 396 gclog_or_tty->cr();
484 // These can be shared for all code paths 531 // These can be shared for all code paths
485 IsAliveClosure is_alive(this); 532 IsAliveClosure is_alive(this);
486 ScanWeakRefClosure scan_weak_ref(this); 533 ScanWeakRefClosure scan_weak_ref(this);
487 534
488 age_table()->clear(); 535 age_table()->clear();
489 to()->clear(); 536 to()->clear(SpaceDecorator::Mangle);
490 537
491 gch->rem_set()->prepare_for_younger_refs_iterate(false); 538 gch->rem_set()->prepare_for_younger_refs_iterate(false);
492 539
493 assert(gch->no_allocs_since_save_marks(0), 540 assert(gch->no_allocs_since_save_marks(0),
494 "save marks have not been newly set."); 541 "save marks have not been newly set.");
529 FastKeepAliveClosure keep_alive(this, &scan_weak_ref); 576 FastKeepAliveClosure keep_alive(this, &scan_weak_ref);
530 ref_processor()->process_discovered_references( 577 ref_processor()->process_discovered_references(
531 soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL); 578 soft_ref_policy, &is_alive, &keep_alive, &evacuate_followers, NULL);
532 if (!promotion_failed()) { 579 if (!promotion_failed()) {
533 // Swap the survivor spaces. 580 // Swap the survivor spaces.
534 eden()->clear(); 581 eden()->clear(SpaceDecorator::Mangle);
535 from()->clear(); 582 from()->clear(SpaceDecorator::Mangle);
583 if (ZapUnusedHeapArea) {
584 // This is now done here because of the piece-meal mangling which
585 // can check for valid mangling at intermediate points in the
586 // collection(s). When a minor collection fails to collect
587 // sufficient space resizing of the young generation can occur
588 // an redistribute the spaces in the young generation. Mangle
589 // here so that unzapped regions don't get distributed to
590 // other spaces.
591 to()->mangle_unused_area();
592 }
536 swap_spaces(); 593 swap_spaces();
537 594
538 assert(to()->is_empty(), "to space should be empty now"); 595 assert(to()->is_empty(), "to space should be empty now");
539 596
540 // Set the desired survivor size to half the real survivor space 597 // Set the desired survivor size to half the real survivor space
757 sb->next = list; 814 sb->next = list;
758 list = sb; 815 list = sb;
759 } 816 }
760 } 817 }
761 818
819 void DefNewGeneration::reset_scratch() {
820 // If contributing scratch in to_space, mangle all of
821 // to_space if ZapUnusedHeapArea. This is needed because
822 // top is not maintained while using to-space as scratch.
823 if (ZapUnusedHeapArea) {
824 to()->mangle_unused_area_complete();
825 }
826 }
827
762 bool DefNewGeneration::collection_attempt_is_safe() { 828 bool DefNewGeneration::collection_attempt_is_safe() {
763 if (!to()->is_empty()) { 829 if (!to()->is_empty()) {
764 return false; 830 return false;
765 } 831 }
766 if (_next_gen == NULL) { 832 if (_next_gen == NULL) {
810 if (full) { // we seem to be running out of space 876 if (full) { // we seem to be running out of space
811 set_should_allocate_from_space(); 877 set_should_allocate_from_space();
812 } 878 }
813 } 879 }
814 880
881 if (ZapUnusedHeapArea) {
882 eden()->check_mangled_unused_area_complete();
883 from()->check_mangled_unused_area_complete();
884 to()->check_mangled_unused_area_complete();
885 }
886
815 // update the generation and space performance counters 887 // update the generation and space performance counters
816 update_counters(); 888 update_counters();
817 gch->collector_policy()->counters()->update_counters(); 889 gch->collector_policy()->counters()->update_counters();
818 } 890 }
891
892 void DefNewGeneration::record_spaces_top() {
893 assert(ZapUnusedHeapArea, "Not mangling unused space");
894 eden()->set_top_for_allocations();
895 to()->set_top_for_allocations();
896 from()->set_top_for_allocations();
897 }
898
819 899
820 void DefNewGeneration::update_counters() { 900 void DefNewGeneration::update_counters() {
821 if (UsePerfData) { 901 if (UsePerfData) {
822 _eden_counters->update_all(); 902 _eden_counters->update_all();
823 _from_counters->update_all(); 903 _from_counters->update_all();

mercurial