Tue, 26 Aug 2014 09:36:53 +0200
8054819: Rename HeapRegionSeq to HeapRegionManager
Reviewed-by: jwilhelm, jmasa
1.1 --- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java Wed Aug 27 09:36:55 2014 +0200 1.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/G1CollectedHeap.java Tue Aug 26 09:36:53 2014 +0200 1.3 @@ -43,8 +43,8 @@ 1.4 // Mirror class for G1CollectedHeap. 1.5 1.6 public class G1CollectedHeap extends SharedHeap { 1.7 - // HeapRegionSeq _seq; 1.8 - static private long hrsFieldOffset; 1.9 + // HeapRegionManager _hrm; 1.10 + static private long hrmFieldOffset; 1.11 // MemRegion _g1_reserved; 1.12 static private long g1ReservedFieldOffset; 1.13 // size_t _summary_bytes_used; 1.14 @@ -67,7 +67,7 @@ 1.15 static private synchronized void initialize(TypeDataBase db) { 1.16 Type type = db.lookupType("G1CollectedHeap"); 1.17 1.18 - hrsFieldOffset = type.getField("_hrs").getOffset(); 1.19 + hrmFieldOffset = type.getField("_hrm").getOffset(); 1.20 summaryBytesUsedField = type.getCIntegerField("_summary_bytes_used"); 1.21 g1mmField = type.getAddressField("_g1mm"); 1.22 oldSetFieldOffset = type.getField("_old_set").getOffset(); 1.23 @@ -75,7 +75,7 @@ 1.24 } 1.25 1.26 public long capacity() { 1.27 - return hrs().capacity(); 1.28 + return hrm().capacity(); 1.29 } 1.30 1.31 public long used() { 1.32 @@ -83,13 +83,13 @@ 1.33 } 1.34 1.35 public long n_regions() { 1.36 - return hrs().length(); 1.37 + return hrm().length(); 1.38 } 1.39 1.40 - private HeapRegionSeq hrs() { 1.41 - Address hrsAddr = addr.addOffsetTo(hrsFieldOffset); 1.42 - return (HeapRegionSeq) VMObjectFactory.newObject(HeapRegionSeq.class, 1.43 - hrsAddr); 1.44 + private HeapRegionManager hrm() { 1.45 + Address hrmAddr = addr.addOffsetTo(hrmFieldOffset); 1.46 + return (HeapRegionManager) VMObjectFactory.newObject(HeapRegionManager.class, 1.47 + hrmAddr); 1.48 } 1.49 1.50 public G1MonitoringSupport g1mm() { 1.51 @@ -110,7 +110,7 @@ 1.52 } 1.53 1.54 private Iterator<HeapRegion> heapRegionIterator() { 1.55 - return hrs().heapRegionIterator(); 1.56 + return hrm().heapRegionIterator(); 1.57 } 1.58 1.59 public void heapRegionIterate(SpaceClosure scl) {
2.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionManager.java Tue Aug 26 09:36:53 2014 +0200 2.3 @@ -0,0 +1,88 @@ 2.4 +/* 2.5 + * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. 2.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 2.7 + * 2.8 + * This code is free software; you can redistribute it and/or modify it 2.9 + * under the terms of the GNU General Public License version 2 only, as 2.10 + * published by the Free Software Foundation. 2.11 + * 2.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 2.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 2.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 2.15 + * version 2 for more details (a copy is included in the LICENSE file that 2.16 + * accompanied this code). 2.17 + * 2.18 + * You should have received a copy of the GNU General Public License version 2.19 + * 2 along with this work; if not, write to the Free Software Foundation, 2.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 2.21 + * 2.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 2.23 + * or visit www.oracle.com if you need additional information or have any 2.24 + * questions. 2.25 + * 2.26 + */ 2.27 + 2.28 +package sun.jvm.hotspot.gc_implementation.g1; 2.29 + 2.30 +import java.util.Iterator; 2.31 +import java.util.Observable; 2.32 +import java.util.Observer; 2.33 + 2.34 +import sun.jvm.hotspot.debugger.Address; 2.35 +import sun.jvm.hotspot.runtime.VM; 2.36 +import sun.jvm.hotspot.runtime.VMObject; 2.37 +import sun.jvm.hotspot.runtime.VMObjectFactory; 2.38 +import sun.jvm.hotspot.types.AddressField; 2.39 +import sun.jvm.hotspot.types.CIntegerField; 2.40 +import sun.jvm.hotspot.types.Type; 2.41 +import sun.jvm.hotspot.types.TypeDataBase; 2.42 + 2.43 +// Mirror class for HeapRegionManager. 2.44 + 2.45 +public class HeapRegionManager extends VMObject { 2.46 + // G1HeapRegionTable _regions 2.47 + static private long regionsFieldOffset; 2.48 + // uint _committed_length 2.49 + static private CIntegerField numCommittedField; 2.50 + 2.51 + static { 2.52 + VM.registerVMInitializedObserver(new Observer() { 2.53 + public void update(Observable o, Object data) { 2.54 + initialize(VM.getVM().getTypeDataBase()); 2.55 + } 2.56 + }); 2.57 + } 2.58 + 2.59 + static private synchronized void initialize(TypeDataBase db) { 2.60 + Type type = db.lookupType("HeapRegionManager"); 2.61 + 2.62 + regionsFieldOffset = type.getField("_regions").getOffset(); 2.63 + numCommittedField = type.getCIntegerField("_num_committed"); 2.64 + } 2.65 + 2.66 + private G1HeapRegionTable regions() { 2.67 + Address regionsAddr = addr.addOffsetTo(regionsFieldOffset); 2.68 + return (G1HeapRegionTable) VMObjectFactory.newObject(G1HeapRegionTable.class, 2.69 + regionsAddr); 2.70 + } 2.71 + 2.72 + public long capacity() { 2.73 + return length() * HeapRegion.grainBytes(); 2.74 + } 2.75 + 2.76 + public long length() { 2.77 + return regions().length(); 2.78 + } 2.79 + 2.80 + public long committedLength() { 2.81 + return numCommittedField.getValue(addr); 2.82 + } 2.83 + 2.84 + public Iterator<HeapRegion> heapRegionIterator() { 2.85 + return regions().heapRegionIterator(length()); 2.86 + } 2.87 + 2.88 + public HeapRegionManager(Address addr) { 2.89 + super(addr); 2.90 + } 2.91 +}
3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/gc_implementation/g1/HeapRegionSeq.java Wed Aug 27 09:36:55 2014 +0200 3.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 3.3 @@ -1,88 +0,0 @@ 3.4 -/* 3.5 - * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. 3.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3.7 - * 3.8 - * This code is free software; you can redistribute it and/or modify it 3.9 - * under the terms of the GNU General Public License version 2 only, as 3.10 - * published by the Free Software Foundation. 3.11 - * 3.12 - * This code is distributed in the hope that it will be useful, but WITHOUT 3.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 3.14 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 3.15 - * version 2 for more details (a copy is included in the LICENSE file that 3.16 - * accompanied this code). 3.17 - * 3.18 - * You should have received a copy of the GNU General Public License version 3.19 - * 2 along with this work; if not, write to the Free Software Foundation, 3.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 3.21 - * 3.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 3.23 - * or visit www.oracle.com if you need additional information or have any 3.24 - * questions. 3.25 - * 3.26 - */ 3.27 - 3.28 -package sun.jvm.hotspot.gc_implementation.g1; 3.29 - 3.30 -import java.util.Iterator; 3.31 -import java.util.Observable; 3.32 -import java.util.Observer; 3.33 - 3.34 -import sun.jvm.hotspot.debugger.Address; 3.35 -import sun.jvm.hotspot.runtime.VM; 3.36 -import sun.jvm.hotspot.runtime.VMObject; 3.37 -import sun.jvm.hotspot.runtime.VMObjectFactory; 3.38 -import sun.jvm.hotspot.types.AddressField; 3.39 -import sun.jvm.hotspot.types.CIntegerField; 3.40 -import sun.jvm.hotspot.types.Type; 3.41 -import sun.jvm.hotspot.types.TypeDataBase; 3.42 - 3.43 -// Mirror class for HeapRegionSeq. It essentially encapsulates the G1HeapRegionTable. 3.44 - 3.45 -public class HeapRegionSeq extends VMObject { 3.46 - // G1HeapRegionTable _regions 3.47 - static private long regionsFieldOffset; 3.48 - // uint _committed_length 3.49 - static private CIntegerField numCommittedField; 3.50 - 3.51 - static { 3.52 - VM.registerVMInitializedObserver(new Observer() { 3.53 - public void update(Observable o, Object data) { 3.54 - initialize(VM.getVM().getTypeDataBase()); 3.55 - } 3.56 - }); 3.57 - } 3.58 - 3.59 - static private synchronized void initialize(TypeDataBase db) { 3.60 - Type type = db.lookupType("HeapRegionSeq"); 3.61 - 3.62 - regionsFieldOffset = type.getField("_regions").getOffset(); 3.63 - numCommittedField = type.getCIntegerField("_num_committed"); 3.64 - } 3.65 - 3.66 - private G1HeapRegionTable regions() { 3.67 - Address regionsAddr = addr.addOffsetTo(regionsFieldOffset); 3.68 - return (G1HeapRegionTable) VMObjectFactory.newObject(G1HeapRegionTable.class, 3.69 - regionsAddr); 3.70 - } 3.71 - 3.72 - public long capacity() { 3.73 - return length() * HeapRegion.grainBytes(); 3.74 - } 3.75 - 3.76 - public long length() { 3.77 - return regions().length(); 3.78 - } 3.79 - 3.80 - public long committedLength() { 3.81 - return numCommittedField.getValue(addr); 3.82 - } 3.83 - 3.84 - public Iterator<HeapRegion> heapRegionIterator() { 3.85 - return regions().heapRegionIterator(length()); 3.86 - } 3.87 - 3.88 - public HeapRegionSeq(Address addr) { 3.89 - super(addr); 3.90 - } 3.91 -}
4.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp Wed Aug 27 09:36:55 2014 +0200 4.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp Tue Aug 26 09:36:53 2014 +0200 4.3 @@ -34,8 +34,8 @@ 4.4 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 4.5 #include "gc_implementation/g1/g1RemSet.hpp" 4.6 #include "gc_implementation/g1/heapRegion.inline.hpp" 4.7 +#include "gc_implementation/g1/heapRegionManager.inline.hpp" 4.8 #include "gc_implementation/g1/heapRegionRemSet.hpp" 4.9 -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" 4.10 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 4.11 #include "gc_implementation/shared/vmGCOperations.hpp" 4.12 #include "gc_implementation/shared/gcTimer.hpp" 4.13 @@ -1408,7 +1408,7 @@ 4.14 void set_bit_for_region(HeapRegion* hr) { 4.15 assert(!hr->continuesHumongous(), "should have filtered those out"); 4.16 4.17 - BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); 4.18 + BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 4.19 if (!hr->startsHumongous()) { 4.20 // Normal (non-humongous) case: just set the bit. 4.21 _region_bm->par_at_put(index, true); 4.22 @@ -1596,7 +1596,7 @@ 4.23 if (_verbose) { 4.24 gclog_or_tty->print_cr("Region %u: marked bytes mismatch: " 4.25 "expected: " SIZE_FORMAT ", actual: " SIZE_FORMAT, 4.26 - hr->hrs_index(), exp_marked_bytes, act_marked_bytes); 4.27 + hr->hrm_index(), exp_marked_bytes, act_marked_bytes); 4.28 } 4.29 failures += 1; 4.30 } 4.31 @@ -1605,7 +1605,7 @@ 4.32 // (which was just calculated) region bit maps. 4.33 // We're not OK if the bit in the calculated expected region 4.34 // bitmap is set and the bit in the actual region bitmap is not. 4.35 - BitMap::idx_t index = (BitMap::idx_t) hr->hrs_index(); 4.36 + BitMap::idx_t index = (BitMap::idx_t) hr->hrm_index(); 4.37 4.38 bool expected = _exp_region_bm->at(index); 4.39 bool actual = _region_bm->at(index); 4.40 @@ -1613,7 +1613,7 @@ 4.41 if (_verbose) { 4.42 gclog_or_tty->print_cr("Region %u: region bitmap mismatch: " 4.43 "expected: %s, actual: %s", 4.44 - hr->hrs_index(), 4.45 + hr->hrm_index(), 4.46 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 4.47 } 4.48 failures += 1; 4.49 @@ -1634,7 +1634,7 @@ 4.50 if (_verbose) { 4.51 gclog_or_tty->print_cr("Region %u: card bitmap mismatch at " SIZE_FORMAT ": " 4.52 "expected: %s, actual: %s", 4.53 - hr->hrs_index(), i, 4.54 + hr->hrm_index(), i, 4.55 BOOL_TO_STR(expected), BOOL_TO_STR(actual)); 4.56 } 4.57 failures += 1; 4.58 @@ -3254,7 +3254,7 @@ 4.59 assert(limit_idx <= end_idx, "or else use atomics"); 4.60 4.61 // Aggregate the "stripe" in the count data associated with hr. 4.62 - uint hrs_index = hr->hrs_index(); 4.63 + uint hrm_index = hr->hrm_index(); 4.64 size_t marked_bytes = 0; 4.65 4.66 for (uint i = 0; i < _max_worker_id; i += 1) { 4.67 @@ -3263,7 +3263,7 @@ 4.68 4.69 // Fetch the marked_bytes in this region for task i and 4.70 // add it to the running total for this region. 4.71 - marked_bytes += marked_bytes_array[hrs_index]; 4.72 + marked_bytes += marked_bytes_array[hrm_index]; 4.73 4.74 // Now union the bitmaps[0,max_worker_id)[start_idx..limit_idx) 4.75 // into the global card bitmap.
5.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Wed Aug 27 09:36:55 2014 +0200 5.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp Tue Aug 26 09:36:53 2014 +0200 5.3 @@ -86,7 +86,7 @@ 5.4 HeapWord* start = mr.start(); 5.5 HeapWord* end = mr.end(); 5.6 size_t region_size_bytes = mr.byte_size(); 5.7 - uint index = hr->hrs_index(); 5.8 + uint index = hr->hrm_index(); 5.9 5.10 assert(!hr->continuesHumongous(), "should not be HC region"); 5.11 assert(hr == g1h->heap_region_containing(start), "sanity");
6.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Wed Aug 27 09:36:55 2014 +0200 6.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Tue Aug 26 09:36:53 2014 +0200 6.3 @@ -528,9 +528,9 @@ 6.4 // again to allocate from it. 6.5 append_secondary_free_list(); 6.6 6.7 - assert(_hrs.num_free_regions() > 0, "if the secondary_free_list was not " 6.8 + assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not " 6.9 "empty we should have moved at least one entry to the free_list"); 6.10 - HeapRegion* res = _hrs.allocate_free_region(is_old); 6.11 + HeapRegion* res = _hrm.allocate_free_region(is_old); 6.12 if (G1ConcRegionFreeingVerbose) { 6.13 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : " 6.14 "allocated "HR_FORMAT" from secondary_free_list", 6.15 @@ -571,7 +571,7 @@ 6.16 } 6.17 } 6.18 6.19 - res = _hrs.allocate_free_region(is_old); 6.20 + res = _hrm.allocate_free_region(is_old); 6.21 6.22 if (res == NULL) { 6.23 if (G1ConcRegionFreeingVerbose) { 6.24 @@ -597,7 +597,7 @@ 6.25 // always expand the heap by an amount aligned to the heap 6.26 // region size, the free list should in theory not be empty. 6.27 // In either case allocate_free_region() will check for NULL. 6.28 - res = _hrs.allocate_free_region(is_old); 6.29 + res = _hrm.allocate_free_region(is_old); 6.30 } else { 6.31 _expand_heap_after_alloc_failure = false; 6.32 } 6.33 @@ -609,7 +609,7 @@ 6.34 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first, 6.35 uint num_regions, 6.36 size_t word_size) { 6.37 - assert(first != G1_NO_HRS_INDEX, "pre-condition"); 6.38 + assert(first != G1_NO_HRM_INDEX, "pre-condition"); 6.39 assert(isHumongous(word_size), "word_size should be humongous"); 6.40 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition"); 6.41 6.42 @@ -747,7 +747,7 @@ 6.43 6.44 verify_region_sets_optional(); 6.45 6.46 - uint first = G1_NO_HRS_INDEX; 6.47 + uint first = G1_NO_HRM_INDEX; 6.48 uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords); 6.49 6.50 if (obj_regions == 1) { 6.51 @@ -756,7 +756,7 @@ 6.52 // later. 6.53 HeapRegion* hr = new_region(word_size, true /* is_old */, false /* do_expand */); 6.54 if (hr != NULL) { 6.55 - first = hr->hrs_index(); 6.56 + first = hr->hrm_index(); 6.57 } 6.58 } else { 6.59 // We can't allocate humongous regions spanning more than one region while 6.60 @@ -772,18 +772,18 @@ 6.61 6.62 // Policy: Try only empty regions (i.e. already committed first). Maybe we 6.63 // are lucky enough to find some. 6.64 - first = _hrs.find_contiguous_only_empty(obj_regions); 6.65 - if (first != G1_NO_HRS_INDEX) { 6.66 - _hrs.allocate_free_regions_starting_at(first, obj_regions); 6.67 - } 6.68 - } 6.69 - 6.70 - if (first == G1_NO_HRS_INDEX) { 6.71 + first = _hrm.find_contiguous_only_empty(obj_regions); 6.72 + if (first != G1_NO_HRM_INDEX) { 6.73 + _hrm.allocate_free_regions_starting_at(first, obj_regions); 6.74 + } 6.75 + } 6.76 + 6.77 + if (first == G1_NO_HRM_INDEX) { 6.78 // Policy: We could not find enough regions for the humongous object in the 6.79 // free list. Look through the heap to find a mix of free and uncommitted regions. 6.80 // If so, try expansion. 6.81 - first = _hrs.find_contiguous_empty_or_unavailable(obj_regions); 6.82 - if (first != G1_NO_HRS_INDEX) { 6.83 + first = _hrm.find_contiguous_empty_or_unavailable(obj_regions); 6.84 + if (first != G1_NO_HRM_INDEX) { 6.85 // We found something. Make sure these regions are committed, i.e. expand 6.86 // the heap. Alternatively we could do a defragmentation GC. 6.87 ergo_verbose1(ErgoHeapSizing, 6.88 @@ -792,7 +792,7 @@ 6.89 ergo_format_byte("allocation request"), 6.90 word_size * HeapWordSize); 6.91 6.92 - _hrs.expand_at(first, obj_regions); 6.93 + _hrm.expand_at(first, obj_regions); 6.94 g1_policy()->record_new_heap_size(num_regions()); 6.95 6.96 #ifdef ASSERT 6.97 @@ -802,14 +802,14 @@ 6.98 assert(is_on_master_free_list(hr), "sanity"); 6.99 } 6.100 #endif 6.101 - _hrs.allocate_free_regions_starting_at(first, obj_regions); 6.102 + _hrm.allocate_free_regions_starting_at(first, obj_regions); 6.103 } else { 6.104 // Policy: Potentially trigger a defragmentation GC. 6.105 } 6.106 } 6.107 6.108 HeapWord* result = NULL; 6.109 - if (first != G1_NO_HRS_INDEX) { 6.110 + if (first != G1_NO_HRM_INDEX) { 6.111 result = humongous_obj_allocate_initialize_regions(first, obj_regions, word_size); 6.112 assert(result != NULL, "it should always return a valid result"); 6.113 6.114 @@ -1244,7 +1244,7 @@ 6.115 : _hr_printer(hr_printer) { } 6.116 }; 6.117 6.118 -void G1CollectedHeap::print_hrs_post_compaction() { 6.119 +void G1CollectedHeap::print_hrm_post_compaction() { 6.120 PostCompactionPrinterClosure cl(hr_printer()); 6.121 heap_region_iterate(&cl); 6.122 } 6.123 @@ -1413,7 +1413,7 @@ 6.124 // that all the COMMIT / UNCOMMIT events are generated before 6.125 // the end GC event. 6.126 6.127 - print_hrs_post_compaction(); 6.128 + print_hrm_post_compaction(); 6.129 _hr_printer.end_gc(true /* full */, (size_t) total_collections()); 6.130 } 6.131 6.132 @@ -1486,7 +1486,7 @@ 6.133 // Update the number of full collections that have been completed. 6.134 increment_old_marking_cycles_completed(false /* concurrent */); 6.135 6.136 - _hrs.verify_optional(); 6.137 + _hrm.verify_optional(); 6.138 verify_region_sets_optional(); 6.139 6.140 verify_after_gc(); 6.141 @@ -1730,7 +1730,7 @@ 6.142 ergo_format_byte("allocation request"), 6.143 word_size * HeapWordSize); 6.144 if (expand(expand_bytes)) { 6.145 - _hrs.verify_optional(); 6.146 + _hrm.verify_optional(); 6.147 verify_region_sets_optional(); 6.148 return attempt_allocation_at_safepoint(word_size, 6.149 false /* expect_null_mutator_alloc_region */); 6.150 @@ -1758,7 +1758,7 @@ 6.151 uint regions_to_expand = (uint)(aligned_expand_bytes / HeapRegion::GrainBytes); 6.152 assert(regions_to_expand > 0, "Must expand by at least one region"); 6.153 6.154 - uint expanded_by = _hrs.expand_by(regions_to_expand); 6.155 + uint expanded_by = _hrm.expand_by(regions_to_expand); 6.156 6.157 if (expanded_by > 0) { 6.158 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes; 6.159 @@ -1771,7 +1771,7 @@ 6.160 // The expansion of the virtual storage space was unsuccessful. 6.161 // Let's see if it was because we ran out of swap. 6.162 if (G1ExitOnExpansionFailure && 6.163 - _hrs.available() >= regions_to_expand) { 6.164 + _hrm.available() >= regions_to_expand) { 6.165 // We had head room... 6.166 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion"); 6.167 } 6.168 @@ -1786,7 +1786,7 @@ 6.169 HeapRegion::GrainBytes); 6.170 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes); 6.171 6.172 - uint num_regions_removed = _hrs.shrink_by(num_regions_to_remove); 6.173 + uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove); 6.174 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes; 6.175 6.176 ergo_verbose3(ErgoHeapSizing, 6.177 @@ -1819,7 +1819,7 @@ 6.178 shrink_helper(shrink_bytes); 6.179 rebuild_region_sets(true /* free_list_only */); 6.180 6.181 - _hrs.verify_optional(); 6.182 + _hrm.verify_optional(); 6.183 verify_region_sets_optional(); 6.184 } 6.185 6.186 @@ -2028,7 +2028,7 @@ 6.187 CMBitMap::mark_distance(), 6.188 mtGC); 6.189 6.190 - _hrs.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); 6.191 + _hrm.initialize(heap_storage, prev_bitmap_storage, next_bitmap_storage, bot_storage, cardtable_storage, card_counts_storage); 6.192 g1_barrier_set()->initialize(cardtable_storage); 6.193 // Do later initialization work for concurrent refinement. 6.194 _cg1r->init(card_counts_storage); 6.195 @@ -2049,8 +2049,8 @@ 6.196 6.197 _g1h = this; 6.198 6.199 - _in_cset_fast_test.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes); 6.200 - _humongous_is_live.initialize(_hrs.reserved().start(), _hrs.reserved().end(), HeapRegion::GrainBytes); 6.201 + _in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes); 6.202 + _humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes); 6.203 6.204 // Create the ConcurrentMark data structure and thread. 6.205 // (Must do this late, so that "max_regions" is defined.) 6.206 @@ -2111,7 +2111,7 @@ 6.207 6.208 // Here we allocate the dummy HeapRegion that is required by the 6.209 // G1AllocRegion class. 6.210 - HeapRegion* dummy_region = _hrs.get_dummy_region(); 6.211 + HeapRegion* dummy_region = _hrm.get_dummy_region(); 6.212 6.213 // We'll re-use the same region whether the alloc region will 6.214 // require BOT updates or not and, if it doesn't, then a non-young 6.215 @@ -2228,14 +2228,14 @@ 6.216 } 6.217 6.218 size_t G1CollectedHeap::capacity() const { 6.219 - return _hrs.length() * HeapRegion::GrainBytes; 6.220 + return _hrm.length() * HeapRegion::GrainBytes; 6.221 } 6.222 6.223 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) { 6.224 assert(!hr->continuesHumongous(), "pre-condition"); 6.225 hr->reset_gc_time_stamp(); 6.226 if (hr->startsHumongous()) { 6.227 - uint first_index = hr->hrs_index() + 1; 6.228 + uint first_index = hr->hrm_index() + 1; 6.229 uint last_index = hr->last_hc_index(); 6.230 for (uint i = first_index; i < last_index; i += 1) { 6.231 HeapRegion* chr = region_at(i); 6.232 @@ -2533,7 +2533,7 @@ 6.233 } 6.234 6.235 bool G1CollectedHeap::is_in(const void* p) const { 6.236 - if (_hrs.reserved().contains(p)) { 6.237 + if (_hrm.reserved().contains(p)) { 6.238 // Given that we know that p is in the reserved space, 6.239 // heap_region_containing_raw() should successfully 6.240 // return the containing region. 6.241 @@ -2547,7 +2547,7 @@ 6.242 #ifdef ASSERT 6.243 bool G1CollectedHeap::is_in_exact(const void* p) const { 6.244 bool contains = reserved_region().contains(p); 6.245 - bool available = _hrs.is_available(addr_to_region((HeapWord*)p)); 6.246 + bool available = _hrm.is_available(addr_to_region((HeapWord*)p)); 6.247 if (contains && available) { 6.248 return true; 6.249 } else { 6.250 @@ -2614,7 +2614,7 @@ 6.251 } 6.252 6.253 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const { 6.254 - _hrs.iterate(cl); 6.255 + _hrm.iterate(cl); 6.256 } 6.257 6.258 void 6.259 @@ -2622,7 +2622,7 @@ 6.260 uint worker_id, 6.261 uint num_workers, 6.262 jint claim_value) const { 6.263 - _hrs.par_iterate(cl, worker_id, num_workers, claim_value); 6.264 + _hrm.par_iterate(cl, worker_id, num_workers, claim_value); 6.265 } 6.266 6.267 class ResetClaimValuesClosure: public HeapRegionClosure { 6.268 @@ -2842,9 +2842,9 @@ 6.269 } 6.270 6.271 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const { 6.272 - HeapRegion* result = _hrs.next_region_in_heap(from); 6.273 + HeapRegion* result = _hrm.next_region_in_heap(from); 6.274 while (result != NULL && result->isHumongous()) { 6.275 - result = _hrs.next_region_in_heap(result); 6.276 + result = _hrm.next_region_in_heap(result); 6.277 } 6.278 return result; 6.279 } 6.280 @@ -2904,7 +2904,7 @@ 6.281 } 6.282 6.283 size_t G1CollectedHeap::max_capacity() const { 6.284 - return _hrs.reserved().byte_size(); 6.285 + return _hrm.reserved().byte_size(); 6.286 } 6.287 6.288 jlong G1CollectedHeap::millis_since_last_gc() { 6.289 @@ -3433,9 +3433,9 @@ 6.290 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K", 6.291 capacity()/K, used_unlocked()/K); 6.292 st->print(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")", 6.293 - _hrs.reserved().start(), 6.294 - _hrs.reserved().start() + _hrs.length() + HeapRegion::GrainWords, 6.295 - _hrs.reserved().end()); 6.296 + _hrm.reserved().start(), 6.297 + _hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords, 6.298 + _hrm.reserved().end()); 6.299 st->cr(); 6.300 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K); 6.301 uint young_regions = _young_list->length(); 6.302 @@ -3678,7 +3678,7 @@ 6.303 } 6.304 G1CollectedHeap* g1h = G1CollectedHeap::heap(); 6.305 6.306 - uint region_idx = r->hrs_index(); 6.307 + uint region_idx = r->hrm_index(); 6.308 bool is_candidate = !g1h->humongous_region_is_always_live(region_idx); 6.309 // Is_candidate already filters out humongous regions with some remembered set. 6.310 // This will not lead to humongous object that we mistakenly keep alive because 6.311 @@ -4200,7 +4200,7 @@ 6.312 // output from the concurrent mark thread interfering with this 6.313 // logging output either. 6.314 6.315 - _hrs.verify_optional(); 6.316 + _hrm.verify_optional(); 6.317 verify_region_sets_optional(); 6.318 6.319 TASKQUEUE_STATS_ONLY(if (ParallelGCVerbose) print_taskqueue_stats()); 6.320 @@ -6019,7 +6019,7 @@ 6.321 bool locked) { 6.322 assert(!hr->isHumongous(), "this is only for non-humongous regions"); 6.323 assert(!hr->is_empty(), "the region should not be empty"); 6.324 - assert(_hrs.is_available(hr->hrs_index()), "region should be committed"); 6.325 + assert(_hrm.is_available(hr->hrm_index()), "region should be committed"); 6.326 assert(free_list != NULL, "pre-condition"); 6.327 6.328 if (G1VerifyBitmaps) { 6.329 @@ -6050,7 +6050,7 @@ 6.330 hr->set_notHumongous(); 6.331 free_region(hr, free_list, par); 6.332 6.333 - uint i = hr->hrs_index() + 1; 6.334 + uint i = hr->hrm_index() + 1; 6.335 while (i < last_index) { 6.336 HeapRegion* curr_hr = region_at(i); 6.337 assert(curr_hr->continuesHumongous(), "invariant"); 6.338 @@ -6074,7 +6074,7 @@ 6.339 assert(list != NULL, "list can't be null"); 6.340 if (!list->is_empty()) { 6.341 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 6.342 - _hrs.insert_list_into_free_list(list); 6.343 + _hrm.insert_list_into_free_list(list); 6.344 } 6.345 } 6.346 6.347 @@ -6443,7 +6443,7 @@ 6.348 // While this cleanup is not strictly necessary to be done (or done instantly), 6.349 // given that their occurrence is very low, this saves us this additional 6.350 // complexity. 6.351 - uint region_idx = r->hrs_index(); 6.352 + uint region_idx = r->hrm_index(); 6.353 if (g1h->humongous_is_live(region_idx) || 6.354 g1h->humongous_region_is_always_live(region_idx)) { 6.355 6.356 @@ -6682,22 +6682,22 @@ 6.357 // this is that during a full GC string deduplication needs to know if 6.358 // a collected region was young or old when the full GC was initiated. 6.359 } 6.360 - _hrs.remove_all_free_regions(); 6.361 + _hrm.remove_all_free_regions(); 6.362 } 6.363 6.364 class RebuildRegionSetsClosure : public HeapRegionClosure { 6.365 private: 6.366 bool _free_list_only; 6.367 HeapRegionSet* _old_set; 6.368 - HeapRegionSeq* _hrs; 6.369 + HeapRegionManager* _hrm; 6.370 size_t _total_used; 6.371 6.372 public: 6.373 RebuildRegionSetsClosure(bool free_list_only, 6.374 - HeapRegionSet* old_set, HeapRegionSeq* hrs) : 6.375 + HeapRegionSet* old_set, HeapRegionManager* hrm) : 6.376 _free_list_only(free_list_only), 6.377 - _old_set(old_set), _hrs(hrs), _total_used(0) { 6.378 - assert(_hrs->num_free_regions() == 0, "pre-condition"); 6.379 + _old_set(old_set), _hrm(hrm), _total_used(0) { 6.380 + assert(_hrm->num_free_regions() == 0, "pre-condition"); 6.381 if (!free_list_only) { 6.382 assert(_old_set->is_empty(), "pre-condition"); 6.383 } 6.384 @@ -6710,7 +6710,7 @@ 6.385 6.386 if (r->is_empty()) { 6.387 // Add free regions to the free list 6.388 - _hrs->insert_into_free_list(r); 6.389 + _hrm->insert_into_free_list(r); 6.390 } else if (!_free_list_only) { 6.391 assert(!r->is_young(), "we should not come across young regions"); 6.392 6.393 @@ -6738,7 +6738,7 @@ 6.394 _young_list->empty_list(); 6.395 } 6.396 6.397 - RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrs); 6.398 + RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm); 6.399 heap_region_iterate(&cl); 6.400 6.401 if (!free_list_only) { 6.402 @@ -6928,7 +6928,7 @@ 6.403 private: 6.404 HeapRegionSet* _old_set; 6.405 HeapRegionSet* _humongous_set; 6.406 - HeapRegionSeq* _hrs; 6.407 + HeapRegionManager* _hrm; 6.408 6.409 public: 6.410 HeapRegionSetCount _old_count; 6.411 @@ -6937,8 +6937,8 @@ 6.412 6.413 VerifyRegionListsClosure(HeapRegionSet* old_set, 6.414 HeapRegionSet* humongous_set, 6.415 - HeapRegionSeq* hrs) : 6.416 - _old_set(old_set), _humongous_set(humongous_set), _hrs(hrs), 6.417 + HeapRegionManager* hrm) : 6.418 + _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm), 6.419 _old_count(), _humongous_count(), _free_count(){ } 6.420 6.421 bool doHeapRegion(HeapRegion* hr) { 6.422 @@ -6949,19 +6949,19 @@ 6.423 if (hr->is_young()) { 6.424 // TODO 6.425 } else if (hr->startsHumongous()) { 6.426 - assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrs_index())); 6.427 + assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index())); 6.428 _humongous_count.increment(1u, hr->capacity()); 6.429 } else if (hr->is_empty()) { 6.430 - assert(_hrs->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrs_index())); 6.431 + assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index())); 6.432 _free_count.increment(1u, hr->capacity()); 6.433 } else { 6.434 - assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrs_index())); 6.435 + assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index())); 6.436 _old_count.increment(1u, hr->capacity()); 6.437 } 6.438 return false; 6.439 } 6.440 6.441 - void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionSeq* free_list) { 6.442 + void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) { 6.443 guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length())); 6.444 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT, 6.445 old_set->total_capacity_bytes(), _old_count.capacity())); 6.446 @@ -6980,7 +6980,7 @@ 6.447 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); 6.448 6.449 // First, check the explicit lists. 6.450 - _hrs.verify(); 6.451 + _hrm.verify(); 6.452 { 6.453 // Given that a concurrent operation might be adding regions to 6.454 // the secondary free list we have to take the lock before 6.455 @@ -7011,9 +7011,9 @@ 6.456 // Finally, make sure that the region accounting in the lists is 6.457 // consistent with what we see in the heap. 6.458 6.459 - VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrs); 6.460 + VerifyRegionListsClosure cl(&_old_set, &_humongous_set, &_hrm); 6.461 heap_region_iterate(&cl); 6.462 - cl.verify_counts(&_old_set, &_humongous_set, &_hrs); 6.463 + cl.verify_counts(&_old_set, &_humongous_set, &_hrm); 6.464 } 6.465 6.466 // Optimized nmethod scanning
7.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Wed Aug 27 09:36:55 2014 +0200 7.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Tue Aug 26 09:36:53 2014 +0200 7.3 @@ -33,7 +33,7 @@ 7.4 #include "gc_implementation/g1/g1MonitoringSupport.hpp" 7.5 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 7.6 #include "gc_implementation/g1/g1YCTypes.hpp" 7.7 -#include "gc_implementation/g1/heapRegionSeq.hpp" 7.8 +#include "gc_implementation/g1/heapRegionManager.hpp" 7.9 #include "gc_implementation/g1/heapRegionSet.hpp" 7.10 #include "gc_implementation/shared/hSpaceCounters.hpp" 7.11 #include "gc_implementation/shared/parGCAllocBuffer.hpp" 7.12 @@ -291,7 +291,7 @@ 7.13 G1RegionMappingChangedListener _listener; 7.14 7.15 // The sequence of all heap regions in the heap. 7.16 - HeapRegionSeq _hrs; 7.17 + HeapRegionManager _hrm; 7.18 7.19 // Alloc region used to satisfy mutator allocation requests. 7.20 MutatorAllocRegion _mutator_alloc_region; 7.21 @@ -429,7 +429,7 @@ 7.22 7.23 // If the HR printer is active, dump the state of the regions in the 7.24 // heap after a compaction. 7.25 - void print_hrs_post_compaction(); 7.26 + void print_hrm_post_compaction(); 7.27 7.28 double verify(bool guard, const char* msg); 7.29 void verify_before_gc(); 7.30 @@ -715,7 +715,7 @@ 7.31 // We register a region with the fast "in collection set" test. We 7.32 // simply set to true the array slot corresponding to this region. 7.33 void register_region_with_in_cset_fast_test(HeapRegion* r) { 7.34 - _in_cset_fast_test.set_in_cset(r->hrs_index()); 7.35 + _in_cset_fast_test.set_in_cset(r->hrm_index()); 7.36 } 7.37 7.38 // This is a fast test on whether a reference points into the 7.39 @@ -1171,17 +1171,17 @@ 7.40 // But G1CollectedHeap doesn't yet support this. 7.41 7.42 virtual bool is_maximal_no_gc() const { 7.43 - return _hrs.available() == 0; 7.44 + return _hrm.available() == 0; 7.45 } 7.46 7.47 // The current number of regions in the heap. 7.48 - uint num_regions() const { return _hrs.length(); } 7.49 + uint num_regions() const { return _hrm.length(); } 7.50 7.51 // The max number of regions in the heap. 7.52 - uint max_regions() const { return _hrs.max_length(); } 7.53 + uint max_regions() const { return _hrm.max_length(); } 7.54 7.55 // The number of regions that are completely free. 7.56 - uint num_free_regions() const { return _hrs.num_free_regions(); } 7.57 + uint num_free_regions() const { return _hrm.num_free_regions(); } 7.58 7.59 // The number of regions that are not completely free. 7.60 uint num_used_regions() const { return num_regions() - num_free_regions(); } 7.61 @@ -1233,7 +1233,7 @@ 7.62 7.63 #ifdef ASSERT 7.64 bool is_on_master_free_list(HeapRegion* hr) { 7.65 - return _hrs.is_free(hr); 7.66 + return _hrm.is_free(hr); 7.67 } 7.68 #endif // ASSERT 7.69 7.70 @@ -1245,7 +1245,7 @@ 7.71 } 7.72 7.73 void append_secondary_free_list() { 7.74 - _hrs.insert_list_into_free_list(&_secondary_free_list); 7.75 + _hrm.insert_list_into_free_list(&_secondary_free_list); 7.76 } 7.77 7.78 void append_secondary_free_list_if_not_empty_with_lock() { 7.79 @@ -1356,13 +1356,13 @@ 7.80 // Return "TRUE" iff the given object address is in the reserved 7.81 // region of g1. 7.82 bool is_in_g1_reserved(const void* p) const { 7.83 - return _hrs.reserved().contains(p); 7.84 + return _hrm.reserved().contains(p); 7.85 } 7.86 7.87 // Returns a MemRegion that corresponds to the space that has been 7.88 // reserved for the heap 7.89 MemRegion g1_reserved() const { 7.90 - return _hrs.reserved(); 7.91 + return _hrm.reserved(); 7.92 } 7.93 7.94 virtual bool is_in_closed_subset(const void* p) const;
8.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Wed Aug 27 09:36:55 2014 +0200 8.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp Tue Aug 26 09:36:53 2014 +0200 8.3 @@ -30,15 +30,15 @@ 8.4 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" 8.5 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 8.6 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 8.7 +#include "gc_implementation/g1/heapRegionManager.inline.hpp" 8.8 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 8.9 -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" 8.10 #include "runtime/orderAccess.inline.hpp" 8.11 #include "utilities/taskqueue.hpp" 8.12 8.13 // Inline functions for G1CollectedHeap 8.14 8.15 // Return the region with the given index. It assumes the index is valid. 8.16 -inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); } 8.17 +inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } 8.18 8.19 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { 8.20 assert(is_in_reserved(addr), 8.21 @@ -48,7 +48,7 @@ 8.22 } 8.23 8.24 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const { 8.25 - return _hrs.reserved().start() + index * HeapRegion::GrainWords; 8.26 + return _hrm.reserved().start() + index * HeapRegion::GrainWords; 8.27 } 8.28 8.29 template <class T> 8.30 @@ -57,7 +57,7 @@ 8.31 assert(is_in_g1_reserved((const void*) addr), 8.32 err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")", 8.33 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()))); 8.34 - return _hrs.addr_to_region((HeapWord*) addr); 8.35 + return _hrm.addr_to_region((HeapWord*) addr); 8.36 } 8.37 8.38 template <class T> 8.39 @@ -87,7 +87,7 @@ 8.40 } 8.41 8.42 inline bool G1CollectedHeap::obj_in_cs(oop obj) { 8.43 - HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj); 8.44 + HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj); 8.45 return r != NULL && r->in_collection_set(); 8.46 } 8.47
9.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp Wed Aug 27 09:36:55 2014 +0200 9.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp Tue Aug 26 09:36:53 2014 +0200 9.3 @@ -32,7 +32,7 @@ 9.4 #include "gc_implementation/g1/g1GCPhaseTimes.hpp" 9.5 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 9.6 #include "gc_implementation/g1/g1RemSet.inline.hpp" 9.7 -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" 9.8 +#include "gc_implementation/g1/heapRegionManager.inline.hpp" 9.9 #include "gc_implementation/g1/heapRegionRemSet.hpp" 9.10 #include "memory/iterator.hpp" 9.11 #include "oops/oop.inline.hpp"
10.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Wed Aug 27 09:36:55 2014 +0200 10.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Tue Aug 26 09:36:53 2014 +0200 10.3 @@ -29,7 +29,7 @@ 10.4 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 10.5 #include "gc_implementation/g1/heapRegion.inline.hpp" 10.6 #include "gc_implementation/g1/heapRegionRemSet.hpp" 10.7 -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" 10.8 +#include "gc_implementation/g1/heapRegionManager.inline.hpp" 10.9 #include "gc_implementation/shared/liveRange.hpp" 10.10 #include "memory/genOopClosures.inline.hpp" 10.11 #include "memory/iterator.hpp" 10.12 @@ -344,11 +344,11 @@ 10.13 return low; 10.14 } 10.15 10.16 -HeapRegion::HeapRegion(uint hrs_index, 10.17 +HeapRegion::HeapRegion(uint hrm_index, 10.18 G1BlockOffsetSharedArray* sharedOffsetArray, 10.19 MemRegion mr) : 10.20 G1OffsetTableContigSpace(sharedOffsetArray, mr), 10.21 - _hrs_index(hrs_index), 10.22 + _hrm_index(hrm_index), 10.23 _humongous_type(NotHumongous), _humongous_start_region(NULL), 10.24 _in_collection_set(false), 10.25 _next_in_special_set(NULL), _orig_end(NULL),
11.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp Wed Aug 27 09:36:55 2014 +0200 11.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp Tue Aug 26 09:36:53 2014 +0200 11.3 @@ -54,15 +54,15 @@ 11.4 11.5 #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]" 11.6 #define HR_FORMAT_PARAMS(_hr_) \ 11.7 - (_hr_)->hrs_index(), \ 11.8 + (_hr_)->hrm_index(), \ 11.9 (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : \ 11.10 (_hr_)->startsHumongous() ? "HS" : \ 11.11 (_hr_)->continuesHumongous() ? "HC" : \ 11.12 !(_hr_)->is_empty() ? "O" : "F", \ 11.13 p2i((_hr_)->bottom()), p2i((_hr_)->top()), p2i((_hr_)->end()) 11.14 11.15 -// sentinel value for hrs_index 11.16 -#define G1_NO_HRS_INDEX ((uint) -1) 11.17 +// sentinel value for hrm_index 11.18 +#define G1_NO_HRM_INDEX ((uint) -1) 11.19 11.20 // A dirty card to oop closure for heap regions. It 11.21 // knows how to get the G1 heap and how to use the bitmap 11.22 @@ -234,7 +234,7 @@ 11.23 11.24 protected: 11.25 // The index of this region in the heap region sequence. 11.26 - uint _hrs_index; 11.27 + uint _hrm_index; 11.28 11.29 HumongousType _humongous_type; 11.30 // For a humongous region, region in which it starts. 11.31 @@ -330,7 +330,7 @@ 11.32 size_t _predicted_bytes_to_copy; 11.33 11.34 public: 11.35 - HeapRegion(uint hrs_index, 11.36 + HeapRegion(uint hrm_index, 11.37 G1BlockOffsetSharedArray* sharedOffsetArray, 11.38 MemRegion mr); 11.39 11.40 @@ -385,9 +385,9 @@ 11.41 inline HeapWord* par_allocate_no_bot_updates(size_t word_size); 11.42 inline HeapWord* allocate_no_bot_updates(size_t word_size); 11.43 11.44 - // If this region is a member of a HeapRegionSeq, the index in that 11.45 + // If this region is a member of a HeapRegionManager, the index in that 11.46 // sequence, otherwise -1. 11.47 - uint hrs_index() const { return _hrs_index; } 11.48 + uint hrm_index() const { return _hrm_index; } 11.49 11.50 // The number of bytes marked live in the region in the last marking phase. 11.51 size_t marked_bytes() { return _prev_marked_bytes; } 11.52 @@ -458,7 +458,7 @@ 11.53 // with this HS region. 11.54 uint last_hc_index() const { 11.55 assert(startsHumongous(), "don't call this otherwise"); 11.56 - return hrs_index() + region_num(); 11.57 + return hrm_index() + region_num(); 11.58 } 11.59 11.60 // Same as Space::is_in_reserved, but will use the original size of the region. 11.61 @@ -813,7 +813,7 @@ 11.62 // HeapRegionClosure is used for iterating over regions. 11.63 // Terminates the iteration when the "doHeapRegion" method returns "true". 11.64 class HeapRegionClosure : public StackObj { 11.65 - friend class HeapRegionSeq; 11.66 + friend class HeapRegionManager; 11.67 friend class G1CollectedHeap; 11.68 11.69 bool _complete;
12.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 12.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionManager.cpp Tue Aug 26 09:36:53 2014 +0200 12.3 @@ -0,0 +1,450 @@ 12.4 +/* 12.5 + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 12.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 12.7 + * 12.8 + * This code is free software; you can redistribute it and/or modify it 12.9 + * under the terms of the GNU General Public License version 2 only, as 12.10 + * published by the Free Software Foundation. 12.11 + * 12.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 12.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12.15 + * version 2 for more details (a copy is included in the LICENSE file that 12.16 + * accompanied this code). 12.17 + * 12.18 + * You should have received a copy of the GNU General Public License version 12.19 + * 2 along with this work; if not, write to the Free Software Foundation, 12.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 12.21 + * 12.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 12.23 + * or visit www.oracle.com if you need additional information or have any 12.24 + * questions. 12.25 + * 12.26 + */ 12.27 + 12.28 +#include "precompiled.hpp" 12.29 +#include "gc_implementation/g1/heapRegion.hpp" 12.30 +#include "gc_implementation/g1/heapRegionManager.inline.hpp" 12.31 +#include "gc_implementation/g1/heapRegionSet.inline.hpp" 12.32 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 12.33 +#include "gc_implementation/g1/concurrentG1Refine.hpp" 12.34 +#include "memory/allocation.hpp" 12.35 + 12.36 +void HeapRegionManager::initialize(G1RegionToSpaceMapper* heap_storage, 12.37 + G1RegionToSpaceMapper* prev_bitmap, 12.38 + G1RegionToSpaceMapper* next_bitmap, 12.39 + G1RegionToSpaceMapper* bot, 12.40 + G1RegionToSpaceMapper* cardtable, 12.41 + G1RegionToSpaceMapper* card_counts) { 12.42 + _allocated_heapregions_length = 0; 12.43 + 12.44 + _heap_mapper = heap_storage; 12.45 + 12.46 + _prev_bitmap_mapper = prev_bitmap; 12.47 + _next_bitmap_mapper = next_bitmap; 12.48 + 12.49 + _bot_mapper = bot; 12.50 + _cardtable_mapper = cardtable; 12.51 + 12.52 + _card_counts_mapper = card_counts; 12.53 + 12.54 + MemRegion reserved = heap_storage->reserved(); 12.55 + _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes); 12.56 + 12.57 + _available_map.resize(_regions.length(), false); 12.58 + _available_map.clear(); 12.59 +} 12.60 + 12.61 +bool HeapRegionManager::is_available(uint region) const { 12.62 + return _available_map.at(region); 12.63 +} 12.64 + 12.65 +#ifdef ASSERT 12.66 +bool HeapRegionManager::is_free(HeapRegion* hr) const { 12.67 + return _free_list.contains(hr); 12.68 +} 12.69 +#endif 12.70 + 12.71 +HeapRegion* HeapRegionManager::new_heap_region(uint hrm_index) { 12.72 + HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrm_index); 12.73 + MemRegion mr(bottom, bottom + HeapRegion::GrainWords); 12.74 + assert(reserved().contains(mr), "invariant"); 12.75 + return new HeapRegion(hrm_index, G1CollectedHeap::heap()->bot_shared(), mr); 12.76 +} 12.77 + 12.78 +void HeapRegionManager::commit_regions(uint index, size_t num_regions) { 12.79 + guarantee(num_regions > 0, "Must commit more than zero regions"); 12.80 + guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions"); 12.81 + 12.82 + _num_committed += (uint)num_regions; 12.83 + 12.84 + _heap_mapper->commit_regions(index, num_regions); 12.85 + 12.86 + // Also commit auxiliary data 12.87 + _prev_bitmap_mapper->commit_regions(index, num_regions); 12.88 + _next_bitmap_mapper->commit_regions(index, num_regions); 12.89 + 12.90 + _bot_mapper->commit_regions(index, num_regions); 12.91 + _cardtable_mapper->commit_regions(index, num_regions); 12.92 + 12.93 + _card_counts_mapper->commit_regions(index, num_regions); 12.94 +} 12.95 + 12.96 +void HeapRegionManager::uncommit_regions(uint start, size_t num_regions) { 12.97 + guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start)); 12.98 + guarantee(_num_committed >= num_regions, "pre-condition"); 12.99 + 12.100 + // Print before uncommitting. 12.101 + if (G1CollectedHeap::heap()->hr_printer()->is_active()) { 12.102 + for (uint i = start; i < start + num_regions; i++) { 12.103 + HeapRegion* hr = at(i); 12.104 + G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end()); 12.105 + } 12.106 + } 12.107 + 12.108 + _num_committed -= (uint)num_regions; 12.109 + 12.110 + _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range); 12.111 + _heap_mapper->uncommit_regions(start, num_regions); 12.112 + 12.113 + // Also uncommit auxiliary data 12.114 + _prev_bitmap_mapper->uncommit_regions(start, num_regions); 12.115 + _next_bitmap_mapper->uncommit_regions(start, num_regions); 12.116 + 12.117 + _bot_mapper->uncommit_regions(start, num_regions); 12.118 + _cardtable_mapper->uncommit_regions(start, num_regions); 12.119 + 12.120 + _card_counts_mapper->uncommit_regions(start, num_regions); 12.121 +} 12.122 + 12.123 +void HeapRegionManager::make_regions_available(uint start, uint num_regions) { 12.124 + guarantee(num_regions > 0, "No point in calling this for zero regions"); 12.125 + commit_regions(start, num_regions); 12.126 + for (uint i = start; i < start + num_regions; i++) { 12.127 + if (_regions.get_by_index(i) == NULL) { 12.128 + HeapRegion* new_hr = new_heap_region(i); 12.129 + _regions.set_by_index(i, new_hr); 12.130 + _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1); 12.131 + } 12.132 + } 12.133 + 12.134 + _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range); 12.135 + 12.136 + for (uint i = start; i < start + num_regions; i++) { 12.137 + assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i)); 12.138 + HeapRegion* hr = at(i); 12.139 + if (G1CollectedHeap::heap()->hr_printer()->is_active()) { 12.140 + G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end()); 12.141 + } 12.142 + HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i); 12.143 + MemRegion mr(bottom, bottom + HeapRegion::GrainWords); 12.144 + 12.145 + hr->initialize(mr); 12.146 + insert_into_free_list(at(i)); 12.147 + } 12.148 +} 12.149 + 12.150 +uint HeapRegionManager::expand_by(uint num_regions) { 12.151 + return expand_at(0, num_regions); 12.152 +} 12.153 + 12.154 +uint HeapRegionManager::expand_at(uint start, uint num_regions) { 12.155 + if (num_regions == 0) { 12.156 + return 0; 12.157 + } 12.158 + 12.159 + uint cur = start; 12.160 + uint idx_last_found = 0; 12.161 + uint num_last_found = 0; 12.162 + 12.163 + uint expanded = 0; 12.164 + 12.165 + while (expanded < num_regions && 12.166 + (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) { 12.167 + uint to_expand = MIN2(num_regions - expanded, num_last_found); 12.168 + make_regions_available(idx_last_found, to_expand); 12.169 + expanded += to_expand; 12.170 + cur = idx_last_found + num_last_found + 1; 12.171 + } 12.172 + 12.173 + verify_optional(); 12.174 + return expanded; 12.175 +} 12.176 + 12.177 +uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) { 12.178 + uint found = 0; 12.179 + size_t length_found = 0; 12.180 + uint cur = 0; 12.181 + 12.182 + while (length_found < num && cur < max_length()) { 12.183 + HeapRegion* hr = _regions.get_by_index(cur); 12.184 + if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) { 12.185 + // This region is a potential candidate for allocation into. 12.186 + length_found++; 12.187 + } else { 12.188 + // This region is not a candidate. The next region is the next possible one. 12.189 + found = cur + 1; 12.190 + length_found = 0; 12.191 + } 12.192 + cur++; 12.193 + } 12.194 + 12.195 + if (length_found == num) { 12.196 + for (uint i = found; i < (found + num); i++) { 12.197 + HeapRegion* hr = _regions.get_by_index(i); 12.198 + // sanity check 12.199 + guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()), 12.200 + err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT 12.201 + " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr))); 12.202 + } 12.203 + return found; 12.204 + } else { 12.205 + return G1_NO_HRM_INDEX; 12.206 + } 12.207 +} 12.208 + 12.209 +HeapRegion* HeapRegionManager::next_region_in_heap(const HeapRegion* r) const { 12.210 + guarantee(r != NULL, "Start region must be a valid region"); 12.211 + guarantee(is_available(r->hrm_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrm_index())); 12.212 + for (uint i = r->hrm_index() + 1; i < _allocated_heapregions_length; i++) { 12.213 + HeapRegion* hr = _regions.get_by_index(i); 12.214 + if (is_available(i)) { 12.215 + return hr; 12.216 + } 12.217 + } 12.218 + return NULL; 12.219 +} 12.220 + 12.221 +void HeapRegionManager::iterate(HeapRegionClosure* blk) const { 12.222 + uint len = max_length(); 12.223 + 12.224 + for (uint i = 0; i < len; i++) { 12.225 + if (!is_available(i)) { 12.226 + continue; 12.227 + } 12.228 + guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i)); 12.229 + bool res = blk->doHeapRegion(at(i)); 12.230 + if (res) { 12.231 + blk->incomplete(); 12.232 + return; 12.233 + } 12.234 + } 12.235 +} 12.236 + 12.237 +uint HeapRegionManager::find_unavailable_from_idx(uint start_idx, uint* res_idx) const { 12.238 + guarantee(res_idx != NULL, "checking"); 12.239 + guarantee(start_idx <= (max_length() + 1), "checking"); 12.240 + 12.241 + uint num_regions = 0; 12.242 + 12.243 + uint cur = start_idx; 12.244 + while (cur < max_length() && is_available(cur)) { 12.245 + cur++; 12.246 + } 12.247 + if (cur == max_length()) { 12.248 + return num_regions; 12.249 + } 12.250 + *res_idx = cur; 12.251 + while (cur < max_length() && !is_available(cur)) { 12.252 + cur++; 12.253 + } 12.254 + num_regions = cur - *res_idx; 12.255 +#ifdef ASSERT 12.256 + for (uint i = *res_idx; i < (*res_idx + num_regions); i++) { 12.257 + assert(!is_available(i), "just checking"); 12.258 + } 12.259 + assert(cur == max_length() || num_regions == 0 || is_available(cur), 12.260 + err_msg("The region at the current position %u must be available or at the end of the heap.", cur)); 12.261 +#endif 12.262 + return num_regions; 12.263 +} 12.264 + 12.265 +uint HeapRegionManager::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const { 12.266 + return num_regions * worker_i / num_workers; 12.267 +} 12.268 + 12.269 +void HeapRegionManager::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const { 12.270 + const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length); 12.271 + 12.272 + // Every worker will actually look at all regions, skipping over regions that 12.273 + // are currently not committed. 12.274 + // This also (potentially) iterates over regions newly allocated during GC. This 12.275 + // is no problem except for some extra work. 12.276 + for (uint count = 0; count < _allocated_heapregions_length; count++) { 12.277 + const uint index = (start_index + count) % _allocated_heapregions_length; 12.278 + assert(0 <= index && index < _allocated_heapregions_length, "sanity"); 12.279 + // Skip over unavailable regions 12.280 + if (!is_available(index)) { 12.281 + continue; 12.282 + } 12.283 + HeapRegion* r = _regions.get_by_index(index); 12.284 + // We'll ignore "continues humongous" regions (we'll process them 12.285 + // when we come across their corresponding "start humongous" 12.286 + // region) and regions already claimed. 12.287 + if (r->claim_value() == claim_value || r->continuesHumongous()) { 12.288 + continue; 12.289 + } 12.290 + // OK, try to claim it 12.291 + if (!r->claimHeapRegion(claim_value)) { 12.292 + continue; 12.293 + } 12.294 + // Success! 12.295 + if (r->startsHumongous()) { 12.296 + // If the region is "starts humongous" we'll iterate over its 12.297 + // "continues humongous" first; in fact we'll do them 12.298 + // first. The order is important. In one case, calling the 12.299 + // closure on the "starts humongous" region might de-allocate 12.300 + // and clear all its "continues humongous" regions and, as a 12.301 + // result, we might end up processing them twice. So, we'll do 12.302 + // them first (note: most closures will ignore them anyway) and 12.303 + // then we'll do the "starts humongous" region. 12.304 + for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) { 12.305 + HeapRegion* chr = _regions.get_by_index(ch_index); 12.306 + 12.307 + assert(chr->continuesHumongous(), "Must be humongous region"); 12.308 + assert(chr->humongous_start_region() == r, 12.309 + err_msg("Must work on humongous continuation of the original start region " 12.310 + PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr))); 12.311 + assert(chr->claim_value() != claim_value, 12.312 + "Must not have been claimed yet because claiming of humongous continuation first claims the start region"); 12.313 + 12.314 + bool claim_result = chr->claimHeapRegion(claim_value); 12.315 + // We should always be able to claim it; no one else should 12.316 + // be trying to claim this region. 12.317 + guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object"); 12.318 + 12.319 + bool res2 = blk->doHeapRegion(chr); 12.320 + if (res2) { 12.321 + return; 12.322 + } 12.323 + 12.324 + // Right now, this holds (i.e., no closure that actually 12.325 + // does something with "continues humongous" regions 12.326 + // clears them). We might have to weaken it in the future, 12.327 + // but let's leave these two asserts here for extra safety. 12.328 + assert(chr->continuesHumongous(), "should still be the case"); 12.329 + assert(chr->humongous_start_region() == r, "sanity"); 12.330 + } 12.331 + } 12.332 + 12.333 + bool res = blk->doHeapRegion(r); 12.334 + if (res) { 12.335 + return; 12.336 + } 12.337 + } 12.338 +} 12.339 + 12.340 +uint HeapRegionManager::shrink_by(uint num_regions_to_remove) { 12.341 + assert(length() > 0, "the region sequence should not be empty"); 12.342 + assert(length() <= _allocated_heapregions_length, "invariant"); 12.343 + assert(_allocated_heapregions_length > 0, "we should have at least one region committed"); 12.344 + assert(num_regions_to_remove < length(), "We should never remove all regions"); 12.345 + 12.346 + if (num_regions_to_remove == 0) { 12.347 + return 0; 12.348 + } 12.349 + 12.350 + uint removed = 0; 12.351 + uint cur = _allocated_heapregions_length - 1; 12.352 + uint idx_last_found = 0; 12.353 + uint num_last_found = 0; 12.354 + 12.355 + while ((removed < num_regions_to_remove) && 12.356 + (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) { 12.357 + // Only allow uncommit from the end of the heap. 12.358 + if ((idx_last_found + num_last_found) != _allocated_heapregions_length) { 12.359 + return 0; 12.360 + } 12.361 + uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found); 12.362 + 12.363 + uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove); 12.364 + 12.365 + cur -= num_last_found; 12.366 + removed += to_remove; 12.367 + } 12.368 + 12.369 + verify_optional(); 12.370 + 12.371 + return removed; 12.372 +} 12.373 + 12.374 +uint HeapRegionManager::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const { 12.375 + guarantee(start_idx < _allocated_heapregions_length, "checking"); 12.376 + guarantee(res_idx != NULL, "checking"); 12.377 + 12.378 + uint num_regions_found = 0; 12.379 + 12.380 + jlong cur = start_idx; 12.381 + while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) { 12.382 + cur--; 12.383 + } 12.384 + if (cur == -1) { 12.385 + return num_regions_found; 12.386 + } 12.387 + jlong old_cur = cur; 12.388 + // cur indexes the first empty region 12.389 + while (cur != -1 && is_available(cur) && at(cur)->is_empty()) { 12.390 + cur--; 12.391 + } 12.392 + *res_idx = cur + 1; 12.393 + num_regions_found = old_cur - cur; 12.394 + 12.395 +#ifdef ASSERT 12.396 + for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) { 12.397 + assert(at(i)->is_empty(), "just checking"); 12.398 + } 12.399 +#endif 12.400 + return num_regions_found; 12.401 +} 12.402 + 12.403 +void HeapRegionManager::verify() { 12.404 + guarantee(length() <= _allocated_heapregions_length, 12.405 + err_msg("invariant: _length: %u _allocated_length: %u", 12.406 + length(), _allocated_heapregions_length)); 12.407 + guarantee(_allocated_heapregions_length <= max_length(), 12.408 + err_msg("invariant: _allocated_length: %u _max_length: %u", 12.409 + _allocated_heapregions_length, max_length())); 12.410 + 12.411 + bool prev_committed = true; 12.412 + uint num_committed = 0; 12.413 + HeapWord* prev_end = heap_bottom(); 12.414 + for (uint i = 0; i < _allocated_heapregions_length; i++) { 12.415 + if (!is_available(i)) { 12.416 + prev_committed = false; 12.417 + continue; 12.418 + } 12.419 + num_committed++; 12.420 + HeapRegion* hr = _regions.get_by_index(i); 12.421 + guarantee(hr != NULL, err_msg("invariant: i: %u", i)); 12.422 + guarantee(!prev_committed || hr->bottom() == prev_end, 12.423 + err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT, 12.424 + i, HR_FORMAT_PARAMS(hr), p2i(prev_end))); 12.425 + guarantee(hr->hrm_index() == i, 12.426 + err_msg("invariant: i: %u hrm_index(): %u", i, hr->hrm_index())); 12.427 + // Asserts will fire if i is >= _length 12.428 + HeapWord* addr = hr->bottom(); 12.429 + guarantee(addr_to_region(addr) == hr, "sanity"); 12.430 + // We cannot check whether the region is part of a particular set: at the time 12.431 + // this method may be called, we have only completed allocation of the regions, 12.432 + // but not put into a region set. 12.433 + prev_committed = true; 12.434 + if (hr->startsHumongous()) { 12.435 + prev_end = hr->orig_end(); 12.436 + } else { 12.437 + prev_end = hr->end(); 12.438 + } 12.439 + } 12.440 + for (uint i = _allocated_heapregions_length; i < max_length(); i++) { 12.441 + guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i)); 12.442 + } 12.443 + 12.444 + guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed)); 12.445 + _free_list.verify(); 12.446 +} 12.447 + 12.448 +#ifndef PRODUCT 12.449 +void HeapRegionManager::verify_optional() { 12.450 + verify(); 12.451 +} 12.452 +#endif // PRODUCT 12.453 +
13.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 13.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionManager.hpp Tue Aug 26 09:36:53 2014 +0200 13.3 @@ -0,0 +1,238 @@ 13.4 +/* 13.5 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 13.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 13.7 + * 13.8 + * This code is free software; you can redistribute it and/or modify it 13.9 + * under the terms of the GNU General Public License version 2 only, as 13.10 + * published by the Free Software Foundation. 13.11 + * 13.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 13.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13.15 + * version 2 for more details (a copy is included in the LICENSE file that 13.16 + * accompanied this code). 13.17 + * 13.18 + * You should have received a copy of the GNU General Public License version 13.19 + * 2 along with this work; if not, write to the Free Software Foundation, 13.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 13.21 + * 13.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 13.23 + * or visit www.oracle.com if you need additional information or have any 13.24 + * questions. 13.25 + * 13.26 + */ 13.27 + 13.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP 13.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP 13.30 + 13.31 +#include "gc_implementation/g1/g1BiasedArray.hpp" 13.32 +#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" 13.33 +#include "gc_implementation/g1/heapRegionSet.hpp" 13.34 + 13.35 +class HeapRegion; 13.36 +class HeapRegionClosure; 13.37 +class FreeRegionList; 13.38 + 13.39 +class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> { 13.40 + protected: 13.41 + virtual HeapRegion* default_value() const { return NULL; } 13.42 +}; 13.43 + 13.44 +// This class keeps track of the actual heap memory, auxiliary data 13.45 +// and its metadata (i.e., HeapRegion instances) and the list of free regions. 13.46 +// 13.47 +// This allows maximum flexibility for deciding what to commit or uncommit given 13.48 +// a request from outside. 13.49 +// 13.50 +// HeapRegions are kept in the _regions array in address order. A region's 13.51 +// index in the array corresponds to its index in the heap (i.e., 0 is the 13.52 +// region at the bottom of the heap, 1 is the one after it, etc.). Two 13.53 +// regions that are consecutive in the array should also be adjacent in the 13.54 +// address space (i.e., region(i).end() == region(i+1).bottom(). 13.55 +// 13.56 +// We create a HeapRegion when we commit the region's address space 13.57 +// for the first time. When we uncommit the address space of a 13.58 +// region we retain the HeapRegion to be able to re-use it in the 13.59 +// future (in case we recommit it). 13.60 +// 13.61 +// We keep track of three lengths: 13.62 +// 13.63 +// * _num_committed (returned by length()) is the number of currently 13.64 +// committed regions. These may not be contiguous. 13.65 +// * _allocated_heapregions_length (not exposed outside this class) is the 13.66 +// number of regions+1 for which we have HeapRegions. 13.67 +// * max_length() returns the maximum number of regions the heap can have. 13.68 +// 13.69 + 13.70 +class HeapRegionManager: public CHeapObj<mtGC> { 13.71 + friend class VMStructs; 13.72 + 13.73 + G1HeapRegionTable _regions; 13.74 + 13.75 + G1RegionToSpaceMapper* _heap_mapper; 13.76 + G1RegionToSpaceMapper* _prev_bitmap_mapper; 13.77 + G1RegionToSpaceMapper* _next_bitmap_mapper; 13.78 + G1RegionToSpaceMapper* _bot_mapper; 13.79 + G1RegionToSpaceMapper* _cardtable_mapper; 13.80 + G1RegionToSpaceMapper* _card_counts_mapper; 13.81 + 13.82 + FreeRegionList _free_list; 13.83 + 13.84 + // Each bit in this bitmap indicates that the corresponding region is available 13.85 + // for allocation. 13.86 + BitMap _available_map; 13.87 + 13.88 + // The number of regions committed in the heap. 13.89 + uint _num_committed; 13.90 + 13.91 + // Internal only. The highest heap region +1 we allocated a HeapRegion instance for. 13.92 + uint _allocated_heapregions_length; 13.93 + 13.94 + HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); } 13.95 + HeapWord* heap_end() const {return _regions.end_address_mapped(); } 13.96 + 13.97 + void make_regions_available(uint index, uint num_regions = 1); 13.98 + 13.99 + // Pass down commit calls to the VirtualSpace. 13.100 + void commit_regions(uint index, size_t num_regions = 1); 13.101 + void uncommit_regions(uint index, size_t num_regions = 1); 13.102 + 13.103 + // Notify other data structures about change in the heap layout. 13.104 + void update_committed_space(HeapWord* old_end, HeapWord* new_end); 13.105 + // Calculate the starting region for each worker during parallel iteration so 13.106 + // that they do not all start from the same region. 13.107 + uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const; 13.108 + 13.109 + // Find a contiguous set of empty or uncommitted regions of length num and return 13.110 + // the index of the first region or G1_NO_HRM_INDEX if the search was unsuccessful. 13.111 + // If only_empty is true, only empty regions are considered. 13.112 + // Searches from bottom to top of the heap, doing a first-fit. 13.113 + uint find_contiguous(size_t num, bool only_empty); 13.114 + // Finds the next sequence of unavailable regions starting from start_idx. Returns the 13.115 + // length of the sequence found. If this result is zero, no such sequence could be found, 13.116 + // otherwise res_idx indicates the start index of these regions. 13.117 + uint find_unavailable_from_idx(uint start_idx, uint* res_idx) const; 13.118 + // Finds the next sequence of empty regions starting from start_idx, going backwards in 13.119 + // the heap. Returns the length of the sequence found. If this value is zero, no 13.120 + // sequence could be found, otherwise res_idx contains the start index of this range. 13.121 + uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const; 13.122 + // Allocate a new HeapRegion for the given index. 13.123 + HeapRegion* new_heap_region(uint hrm_index); 13.124 +#ifdef ASSERT 13.125 +public: 13.126 + bool is_free(HeapRegion* hr) const; 13.127 +#endif 13.128 + // Returns whether the given region is available for allocation. 13.129 + bool is_available(uint region) const; 13.130 + 13.131 + public: 13.132 + // Empty constructor, we'll initialize it with the initialize() method. 13.133 + HeapRegionManager() : _regions(), _heap_mapper(NULL), _num_committed(0), 13.134 + _next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL), 13.135 + _allocated_heapregions_length(0), _available_map(), 13.136 + _free_list("Free list", new MasterFreeRegionListMtSafeChecker()) 13.137 + { } 13.138 + 13.139 + void initialize(G1RegionToSpaceMapper* heap_storage, 13.140 + G1RegionToSpaceMapper* prev_bitmap, 13.141 + G1RegionToSpaceMapper* next_bitmap, 13.142 + G1RegionToSpaceMapper* bot, 13.143 + G1RegionToSpaceMapper* cardtable, 13.144 + G1RegionToSpaceMapper* card_counts); 13.145 + 13.146 + // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired 13.147 + // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit 13.148 + // the heap from the lowest address, this region (and its associated data 13.149 + // structures) are available and we do not need to check further. 13.150 + HeapRegion* get_dummy_region() { return new_heap_region(0); } 13.151 + 13.152 + // Return the HeapRegion at the given index. Assume that the index 13.153 + // is valid. 13.154 + inline HeapRegion* at(uint index) const; 13.155 + 13.156 + // If addr is within the committed space return its corresponding 13.157 + // HeapRegion, otherwise return NULL. 13.158 + inline HeapRegion* addr_to_region(HeapWord* addr) const; 13.159 + 13.160 + // Insert the given region into the free region list. 13.161 + inline void insert_into_free_list(HeapRegion* hr); 13.162 + 13.163 + // Insert the given region list into the global free region list. 13.164 + void insert_list_into_free_list(FreeRegionList* list) { 13.165 + _free_list.add_ordered(list); 13.166 + } 13.167 + 13.168 + HeapRegion* allocate_free_region(bool is_old) { 13.169 + HeapRegion* hr = _free_list.remove_region(is_old); 13.170 + 13.171 + if (hr != NULL) { 13.172 + assert(hr->next() == NULL, "Single region should not have next"); 13.173 + assert(is_available(hr->hrm_index()), "Must be committed"); 13.174 + } 13.175 + return hr; 13.176 + } 13.177 + 13.178 + inline void allocate_free_regions_starting_at(uint first, uint num_regions); 13.179 + 13.180 + // Remove all regions from the free list. 13.181 + void remove_all_free_regions() { 13.182 + _free_list.remove_all(); 13.183 + } 13.184 + 13.185 + // Return the number of committed free regions in the heap. 13.186 + uint num_free_regions() const { 13.187 + return _free_list.length(); 13.188 + } 13.189 + 13.190 + size_t total_capacity_bytes() const { 13.191 + return num_free_regions() * HeapRegion::GrainBytes; 13.192 + } 13.193 + 13.194 + // Return the number of available (uncommitted) regions. 13.195 + uint available() const { return max_length() - length(); } 13.196 + 13.197 + // Return the number of regions that have been committed in the heap. 13.198 + uint length() const { return _num_committed; } 13.199 + 13.200 + // Return the maximum number of regions in the heap. 13.201 + uint max_length() const { return (uint)_regions.length(); } 13.202 + 13.203 + MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); } 13.204 + 13.205 + // Expand the sequence to reflect that the heap has grown. Either create new 13.206 + // HeapRegions, or re-use existing ones. Returns the number of regions the 13.207 + // sequence was expanded by. If a HeapRegion allocation fails, the resulting 13.208 + // number of regions might be smaller than what's desired. 13.209 + uint expand_by(uint num_regions); 13.210 + 13.211 + // Makes sure that the regions from start to start+num_regions-1 are available 13.212 + // for allocation. Returns the number of regions that were committed to achieve 13.213 + // this. 13.214 + uint expand_at(uint start, uint num_regions); 13.215 + 13.216 + // Find a contiguous set of empty regions of length num. Returns the start index of 13.217 + // that set, or G1_NO_HRM_INDEX. 13.218 + uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); } 13.219 + // Find a contiguous set of empty or unavailable regions of length num. Returns the 13.220 + // start index of that set, or G1_NO_HRM_INDEX. 13.221 + uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); } 13.222 + 13.223 + HeapRegion* next_region_in_heap(const HeapRegion* r) const; 13.224 + 13.225 + // Apply blk->doHeapRegion() on all committed regions in address order, 13.226 + // terminating the iteration early if doHeapRegion() returns true. 13.227 + void iterate(HeapRegionClosure* blk) const; 13.228 + 13.229 + void par_iterate(HeapRegionClosure* blk, uint worker_id, uint no_of_par_workers, jint claim_value) const; 13.230 + 13.231 + // Uncommit up to num_regions_to_remove regions that are completely free. 13.232 + // Return the actual number of uncommitted regions. 13.233 + uint shrink_by(uint num_regions_to_remove); 13.234 + 13.235 + void verify(); 13.236 + 13.237 + // Do some sanity checking. 13.238 + void verify_optional() PRODUCT_RETURN; 13.239 +}; 13.240 + 13.241 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_HPP
14.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 14.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionManager.inline.hpp Tue Aug 26 09:36:53 2014 +0200 14.3 @@ -0,0 +1,58 @@ 14.4 +/* 14.5 + * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 14.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 14.7 + * 14.8 + * This code is free software; you can redistribute it and/or modify it 14.9 + * under the terms of the GNU General Public License version 2 only, as 14.10 + * published by the Free Software Foundation. 14.11 + * 14.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 14.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 14.15 + * version 2 for more details (a copy is included in the LICENSE file that 14.16 + * accompanied this code). 14.17 + * 14.18 + * You should have received a copy of the GNU General Public License version 14.19 + * 2 along with this work; if not, write to the Free Software Foundation, 14.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 14.21 + * 14.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 14.23 + * or visit www.oracle.com if you need additional information or have any 14.24 + * questions. 14.25 + * 14.26 + */ 14.27 + 14.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_INLINE_HPP 14.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_INLINE_HPP 14.30 + 14.31 +#include "gc_implementation/g1/heapRegion.hpp" 14.32 +#include "gc_implementation/g1/heapRegionManager.hpp" 14.33 +#include "gc_implementation/g1/heapRegionSet.inline.hpp" 14.34 + 14.35 +inline HeapRegion* HeapRegionManager::addr_to_region(HeapWord* addr) const { 14.36 + assert(addr < heap_end(), 14.37 + err_msg("addr: "PTR_FORMAT" end: "PTR_FORMAT, p2i(addr), p2i(heap_end()))); 14.38 + assert(addr >= heap_bottom(), 14.39 + err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, p2i(addr), p2i(heap_bottom()))); 14.40 + 14.41 + HeapRegion* hr = _regions.get_by_address(addr); 14.42 + return hr; 14.43 +} 14.44 + 14.45 +inline HeapRegion* HeapRegionManager::at(uint index) const { 14.46 + assert(is_available(index), "pre-condition"); 14.47 + HeapRegion* hr = _regions.get_by_index(index); 14.48 + assert(hr != NULL, "sanity"); 14.49 + assert(hr->hrm_index() == index, "sanity"); 14.50 + return hr; 14.51 +} 14.52 + 14.53 +inline void HeapRegionManager::insert_into_free_list(HeapRegion* hr) { 14.54 + _free_list.add_ordered(hr); 14.55 +} 14.56 + 14.57 +inline void HeapRegionManager::allocate_free_regions_starting_at(uint first, uint num_regions) { 14.58 + _free_list.remove_starting_at(at(first), num_regions); 14.59 +} 14.60 + 14.61 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONMANAGER_INLINE_HPP
15.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Wed Aug 27 09:36:55 2014 +0200 15.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp Tue Aug 26 09:36:53 2014 +0200 15.3 @@ -27,7 +27,7 @@ 15.4 #include "gc_implementation/g1/g1BlockOffsetTable.inline.hpp" 15.5 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 15.6 #include "gc_implementation/g1/heapRegionRemSet.hpp" 15.7 -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" 15.8 +#include "gc_implementation/g1/heapRegionManager.inline.hpp" 15.9 #include "memory/allocation.hpp" 15.10 #include "memory/padded.inline.hpp" 15.11 #include "memory/space.inline.hpp" 15.12 @@ -419,7 +419,7 @@ 15.13 } 15.14 15.15 void OtherRegionsTable::add_reference(OopOrNarrowOopStar from, int tid) { 15.16 - uint cur_hrs_ind = hr()->hrs_index(); 15.17 + uint cur_hrm_ind = hr()->hrm_index(); 15.18 15.19 if (G1TraceHeapRegionRememberedSet) { 15.20 gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").", 15.21 @@ -434,10 +434,10 @@ 15.22 if (G1TraceHeapRegionRememberedSet) { 15.23 gclog_or_tty->print_cr("Table for [" PTR_FORMAT "...): card %d (cache = "INT32_FORMAT")", 15.24 hr()->bottom(), from_card, 15.25 - FromCardCache::at((uint)tid, cur_hrs_ind)); 15.26 + FromCardCache::at((uint)tid, cur_hrm_ind)); 15.27 } 15.28 15.29 - if (FromCardCache::contains_or_replace((uint)tid, cur_hrs_ind, from_card)) { 15.30 + if (FromCardCache::contains_or_replace((uint)tid, cur_hrm_ind, from_card)) { 15.31 if (G1TraceHeapRegionRememberedSet) { 15.32 gclog_or_tty->print_cr(" from-card cache hit."); 15.33 } 15.34 @@ -447,7 +447,7 @@ 15.35 15.36 // Note that this may be a continued H region. 15.37 HeapRegion* from_hr = _g1h->heap_region_containing_raw(from); 15.38 - RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrs_index(); 15.39 + RegionIdx_t from_hrs_ind = (RegionIdx_t) from_hr->hrm_index(); 15.40 15.41 // If the region is already coarsened, return. 15.42 if (_coarse_map.at(from_hrs_ind)) { 15.43 @@ -493,8 +493,8 @@ 15.44 } else { 15.45 if (G1TraceHeapRegionRememberedSet) { 15.46 gclog_or_tty->print_cr(" [tid %d] sparse table entry " 15.47 - "overflow(f: %d, t: %d)", 15.48 - tid, from_hrs_ind, cur_hrs_ind); 15.49 + "overflow(f: %d, t: %u)", 15.50 + tid, from_hrs_ind, cur_hrm_ind); 15.51 } 15.52 } 15.53 15.54 @@ -606,9 +606,9 @@ 15.55 guarantee(max != NULL, "Since _n_fine_entries > 0"); 15.56 15.57 // Set the corresponding coarse bit. 15.58 - size_t max_hrs_index = (size_t) max->hr()->hrs_index(); 15.59 - if (!_coarse_map.at(max_hrs_index)) { 15.60 - _coarse_map.at_put(max_hrs_index, true); 15.61 + size_t max_hrm_index = (size_t) max->hr()->hrm_index(); 15.62 + if (!_coarse_map.at(max_hrm_index)) { 15.63 + _coarse_map.at_put(max_hrm_index, true); 15.64 _n_coarse_entries++; 15.65 if (G1TraceHeapRegionRememberedSet) { 15.66 gclog_or_tty->print("Coarsened entry in region [" PTR_FORMAT "...] " 15.67 @@ -632,7 +632,7 @@ 15.68 BitMap* region_bm, BitMap* card_bm) { 15.69 // First eliminated garbage regions from the coarse map. 15.70 if (G1RSScrubVerbose) { 15.71 - gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrs_index()); 15.72 + gclog_or_tty->print_cr("Scrubbing region %u:", hr()->hrm_index()); 15.73 } 15.74 15.75 assert(_coarse_map.size() == region_bm->size(), "Precondition"); 15.76 @@ -655,9 +655,9 @@ 15.77 // If the entire region is dead, eliminate. 15.78 if (G1RSScrubVerbose) { 15.79 gclog_or_tty->print_cr(" For other region %u:", 15.80 - cur->hr()->hrs_index()); 15.81 + cur->hr()->hrm_index()); 15.82 } 15.83 - if (!region_bm->at((size_t) cur->hr()->hrs_index())) { 15.84 + if (!region_bm->at((size_t) cur->hr()->hrm_index())) { 15.85 *prev = nxt; 15.86 cur->set_collision_list_next(NULL); 15.87 _n_fine_entries--; 15.88 @@ -751,7 +751,7 @@ 15.89 } 15.90 15.91 void OtherRegionsTable::clear_fcc() { 15.92 - FromCardCache::clear(hr()->hrs_index()); 15.93 + FromCardCache::clear(hr()->hrm_index()); 15.94 } 15.95 15.96 void OtherRegionsTable::clear() { 15.97 @@ -802,7 +802,7 @@ 15.98 15.99 bool OtherRegionsTable::contains_reference_locked(OopOrNarrowOopStar from) const { 15.100 HeapRegion* hr = _g1h->heap_region_containing_raw(from); 15.101 - RegionIdx_t hr_ind = (RegionIdx_t) hr->hrs_index(); 15.102 + RegionIdx_t hr_ind = (RegionIdx_t) hr->hrm_index(); 15.103 // Is this region in the coarse map? 15.104 if (_coarse_map.at(hr_ind)) return true; 15.105 15.106 @@ -839,7 +839,7 @@ 15.107 HeapRegionRemSet::HeapRegionRemSet(G1BlockOffsetSharedArray* bosa, 15.108 HeapRegion* hr) 15.109 : _bosa(bosa), 15.110 - _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrs_index()), true), 15.111 + _m(Mutex::leaf, FormatBuffer<128>("HeapRegionRemSet lock #%u", hr->hrm_index()), true), 15.112 _code_roots(), _other_regions(hr, &_m), _iter_state(Unclaimed), _iter_claimed(0) { 15.113 reset_for_par_iteration(); 15.114 }
16.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp Wed Aug 27 09:36:55 2014 +0200 16.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 16.3 @@ -1,450 +0,0 @@ 16.4 -/* 16.5 - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 16.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 16.7 - * 16.8 - * This code is free software; you can redistribute it and/or modify it 16.9 - * under the terms of the GNU General Public License version 2 only, as 16.10 - * published by the Free Software Foundation. 16.11 - * 16.12 - * This code is distributed in the hope that it will be useful, but WITHOUT 16.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 16.14 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 16.15 - * version 2 for more details (a copy is included in the LICENSE file that 16.16 - * accompanied this code). 16.17 - * 16.18 - * You should have received a copy of the GNU General Public License version 16.19 - * 2 along with this work; if not, write to the Free Software Foundation, 16.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 16.21 - * 16.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 16.23 - * or visit www.oracle.com if you need additional information or have any 16.24 - * questions. 16.25 - * 16.26 - */ 16.27 - 16.28 -#include "precompiled.hpp" 16.29 -#include "gc_implementation/g1/heapRegion.hpp" 16.30 -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" 16.31 -#include "gc_implementation/g1/heapRegionSet.inline.hpp" 16.32 -#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 16.33 -#include "gc_implementation/g1/concurrentG1Refine.hpp" 16.34 -#include "memory/allocation.hpp" 16.35 - 16.36 -void HeapRegionSeq::initialize(G1RegionToSpaceMapper* heap_storage, 16.37 - G1RegionToSpaceMapper* prev_bitmap, 16.38 - G1RegionToSpaceMapper* next_bitmap, 16.39 - G1RegionToSpaceMapper* bot, 16.40 - G1RegionToSpaceMapper* cardtable, 16.41 - G1RegionToSpaceMapper* card_counts) { 16.42 - _allocated_heapregions_length = 0; 16.43 - 16.44 - _heap_mapper = heap_storage; 16.45 - 16.46 - _prev_bitmap_mapper = prev_bitmap; 16.47 - _next_bitmap_mapper = next_bitmap; 16.48 - 16.49 - _bot_mapper = bot; 16.50 - _cardtable_mapper = cardtable; 16.51 - 16.52 - _card_counts_mapper = card_counts; 16.53 - 16.54 - MemRegion reserved = heap_storage->reserved(); 16.55 - _regions.initialize(reserved.start(), reserved.end(), HeapRegion::GrainBytes); 16.56 - 16.57 - _available_map.resize(_regions.length(), false); 16.58 - _available_map.clear(); 16.59 -} 16.60 - 16.61 -bool HeapRegionSeq::is_available(uint region) const { 16.62 - return _available_map.at(region); 16.63 -} 16.64 - 16.65 -#ifdef ASSERT 16.66 -bool HeapRegionSeq::is_free(HeapRegion* hr) const { 16.67 - return _free_list.contains(hr); 16.68 -} 16.69 -#endif 16.70 - 16.71 -HeapRegion* HeapRegionSeq::new_heap_region(uint hrs_index) { 16.72 - HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(hrs_index); 16.73 - MemRegion mr(bottom, bottom + HeapRegion::GrainWords); 16.74 - assert(reserved().contains(mr), "invariant"); 16.75 - return new HeapRegion(hrs_index, G1CollectedHeap::heap()->bot_shared(), mr); 16.76 -} 16.77 - 16.78 -void HeapRegionSeq::commit_regions(uint index, size_t num_regions) { 16.79 - guarantee(num_regions > 0, "Must commit more than zero regions"); 16.80 - guarantee(_num_committed + num_regions <= max_length(), "Cannot commit more than the maximum amount of regions"); 16.81 - 16.82 - _num_committed += (uint)num_regions; 16.83 - 16.84 - _heap_mapper->commit_regions(index, num_regions); 16.85 - 16.86 - // Also commit auxiliary data 16.87 - _prev_bitmap_mapper->commit_regions(index, num_regions); 16.88 - _next_bitmap_mapper->commit_regions(index, num_regions); 16.89 - 16.90 - _bot_mapper->commit_regions(index, num_regions); 16.91 - _cardtable_mapper->commit_regions(index, num_regions); 16.92 - 16.93 - _card_counts_mapper->commit_regions(index, num_regions); 16.94 -} 16.95 - 16.96 -void HeapRegionSeq::uncommit_regions(uint start, size_t num_regions) { 16.97 - guarantee(num_regions >= 1, err_msg("Need to specify at least one region to uncommit, tried to uncommit zero regions at %u", start)); 16.98 - guarantee(_num_committed >= num_regions, "pre-condition"); 16.99 - 16.100 - // Print before uncommitting. 16.101 - if (G1CollectedHeap::heap()->hr_printer()->is_active()) { 16.102 - for (uint i = start; i < start + num_regions; i++) { 16.103 - HeapRegion* hr = at(i); 16.104 - G1CollectedHeap::heap()->hr_printer()->uncommit(hr->bottom(), hr->end()); 16.105 - } 16.106 - } 16.107 - 16.108 - _num_committed -= (uint)num_regions; 16.109 - 16.110 - _available_map.par_clear_range(start, start + num_regions, BitMap::unknown_range); 16.111 - _heap_mapper->uncommit_regions(start, num_regions); 16.112 - 16.113 - // Also uncommit auxiliary data 16.114 - _prev_bitmap_mapper->uncommit_regions(start, num_regions); 16.115 - _next_bitmap_mapper->uncommit_regions(start, num_regions); 16.116 - 16.117 - _bot_mapper->uncommit_regions(start, num_regions); 16.118 - _cardtable_mapper->uncommit_regions(start, num_regions); 16.119 - 16.120 - _card_counts_mapper->uncommit_regions(start, num_regions); 16.121 -} 16.122 - 16.123 -void HeapRegionSeq::make_regions_available(uint start, uint num_regions) { 16.124 - guarantee(num_regions > 0, "No point in calling this for zero regions"); 16.125 - commit_regions(start, num_regions); 16.126 - for (uint i = start; i < start + num_regions; i++) { 16.127 - if (_regions.get_by_index(i) == NULL) { 16.128 - HeapRegion* new_hr = new_heap_region(i); 16.129 - _regions.set_by_index(i, new_hr); 16.130 - _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1); 16.131 - } 16.132 - } 16.133 - 16.134 - _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range); 16.135 - 16.136 - for (uint i = start; i < start + num_regions; i++) { 16.137 - assert(is_available(i), err_msg("Just made region %u available but is apparently not.", i)); 16.138 - HeapRegion* hr = at(i); 16.139 - if (G1CollectedHeap::heap()->hr_printer()->is_active()) { 16.140 - G1CollectedHeap::heap()->hr_printer()->commit(hr->bottom(), hr->end()); 16.141 - } 16.142 - HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i); 16.143 - MemRegion mr(bottom, bottom + HeapRegion::GrainWords); 16.144 - 16.145 - hr->initialize(mr); 16.146 - insert_into_free_list(at(i)); 16.147 - } 16.148 -} 16.149 - 16.150 -uint HeapRegionSeq::expand_by(uint num_regions) { 16.151 - return expand_at(0, num_regions); 16.152 -} 16.153 - 16.154 -uint HeapRegionSeq::expand_at(uint start, uint num_regions) { 16.155 - if (num_regions == 0) { 16.156 - return 0; 16.157 - } 16.158 - 16.159 - uint cur = start; 16.160 - uint idx_last_found = 0; 16.161 - uint num_last_found = 0; 16.162 - 16.163 - uint expanded = 0; 16.164 - 16.165 - while (expanded < num_regions && 16.166 - (num_last_found = find_unavailable_from_idx(cur, &idx_last_found)) > 0) { 16.167 - uint to_expand = MIN2(num_regions - expanded, num_last_found); 16.168 - make_regions_available(idx_last_found, to_expand); 16.169 - expanded += to_expand; 16.170 - cur = idx_last_found + num_last_found + 1; 16.171 - } 16.172 - 16.173 - verify_optional(); 16.174 - return expanded; 16.175 -} 16.176 - 16.177 -uint HeapRegionSeq::find_contiguous(size_t num, bool empty_only) { 16.178 - uint found = 0; 16.179 - size_t length_found = 0; 16.180 - uint cur = 0; 16.181 - 16.182 - while (length_found < num && cur < max_length()) { 16.183 - HeapRegion* hr = _regions.get_by_index(cur); 16.184 - if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) { 16.185 - // This region is a potential candidate for allocation into. 16.186 - length_found++; 16.187 - } else { 16.188 - // This region is not a candidate. The next region is the next possible one. 16.189 - found = cur + 1; 16.190 - length_found = 0; 16.191 - } 16.192 - cur++; 16.193 - } 16.194 - 16.195 - if (length_found == num) { 16.196 - for (uint i = found; i < (found + num); i++) { 16.197 - HeapRegion* hr = _regions.get_by_index(i); 16.198 - // sanity check 16.199 - guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()), 16.200 - err_msg("Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT 16.201 - " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr))); 16.202 - } 16.203 - return found; 16.204 - } else { 16.205 - return G1_NO_HRS_INDEX; 16.206 - } 16.207 -} 16.208 - 16.209 -HeapRegion* HeapRegionSeq::next_region_in_heap(const HeapRegion* r) const { 16.210 - guarantee(r != NULL, "Start region must be a valid region"); 16.211 - guarantee(is_available(r->hrs_index()), err_msg("Trying to iterate starting from region %u which is not in the heap", r->hrs_index())); 16.212 - for (uint i = r->hrs_index() + 1; i < _allocated_heapregions_length; i++) { 16.213 - HeapRegion* hr = _regions.get_by_index(i); 16.214 - if (is_available(i)) { 16.215 - return hr; 16.216 - } 16.217 - } 16.218 - return NULL; 16.219 -} 16.220 - 16.221 -void HeapRegionSeq::iterate(HeapRegionClosure* blk) const { 16.222 - uint len = max_length(); 16.223 - 16.224 - for (uint i = 0; i < len; i++) { 16.225 - if (!is_available(i)) { 16.226 - continue; 16.227 - } 16.228 - guarantee(at(i) != NULL, err_msg("Tried to access region %u that has a NULL HeapRegion*", i)); 16.229 - bool res = blk->doHeapRegion(at(i)); 16.230 - if (res) { 16.231 - blk->incomplete(); 16.232 - return; 16.233 - } 16.234 - } 16.235 -} 16.236 - 16.237 -uint HeapRegionSeq::find_unavailable_from_idx(uint start_idx, uint* res_idx) const { 16.238 - guarantee(res_idx != NULL, "checking"); 16.239 - guarantee(start_idx <= (max_length() + 1), "checking"); 16.240 - 16.241 - uint num_regions = 0; 16.242 - 16.243 - uint cur = start_idx; 16.244 - while (cur < max_length() && is_available(cur)) { 16.245 - cur++; 16.246 - } 16.247 - if (cur == max_length()) { 16.248 - return num_regions; 16.249 - } 16.250 - *res_idx = cur; 16.251 - while (cur < max_length() && !is_available(cur)) { 16.252 - cur++; 16.253 - } 16.254 - num_regions = cur - *res_idx; 16.255 -#ifdef ASSERT 16.256 - for (uint i = *res_idx; i < (*res_idx + num_regions); i++) { 16.257 - assert(!is_available(i), "just checking"); 16.258 - } 16.259 - assert(cur == max_length() || num_regions == 0 || is_available(cur), 16.260 - err_msg("The region at the current position %u must be available or at the end of the heap.", cur)); 16.261 -#endif 16.262 - return num_regions; 16.263 -} 16.264 - 16.265 -uint HeapRegionSeq::start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const { 16.266 - return num_regions * worker_i / num_workers; 16.267 -} 16.268 - 16.269 -void HeapRegionSeq::par_iterate(HeapRegionClosure* blk, uint worker_id, uint num_workers, jint claim_value) const { 16.270 - const uint start_index = start_region_for_worker(worker_id, num_workers, _allocated_heapregions_length); 16.271 - 16.272 - // Every worker will actually look at all regions, skipping over regions that 16.273 - // are currently not committed. 16.274 - // This also (potentially) iterates over regions newly allocated during GC. This 16.275 - // is no problem except for some extra work. 16.276 - for (uint count = 0; count < _allocated_heapregions_length; count++) { 16.277 - const uint index = (start_index + count) % _allocated_heapregions_length; 16.278 - assert(0 <= index && index < _allocated_heapregions_length, "sanity"); 16.279 - // Skip over unavailable regions 16.280 - if (!is_available(index)) { 16.281 - continue; 16.282 - } 16.283 - HeapRegion* r = _regions.get_by_index(index); 16.284 - // We'll ignore "continues humongous" regions (we'll process them 16.285 - // when we come across their corresponding "start humongous" 16.286 - // region) and regions already claimed. 16.287 - if (r->claim_value() == claim_value || r->continuesHumongous()) { 16.288 - continue; 16.289 - } 16.290 - // OK, try to claim it 16.291 - if (!r->claimHeapRegion(claim_value)) { 16.292 - continue; 16.293 - } 16.294 - // Success! 16.295 - if (r->startsHumongous()) { 16.296 - // If the region is "starts humongous" we'll iterate over its 16.297 - // "continues humongous" first; in fact we'll do them 16.298 - // first. The order is important. In one case, calling the 16.299 - // closure on the "starts humongous" region might de-allocate 16.300 - // and clear all its "continues humongous" regions and, as a 16.301 - // result, we might end up processing them twice. So, we'll do 16.302 - // them first (note: most closures will ignore them anyway) and 16.303 - // then we'll do the "starts humongous" region. 16.304 - for (uint ch_index = index + 1; ch_index < index + r->region_num(); ch_index++) { 16.305 - HeapRegion* chr = _regions.get_by_index(ch_index); 16.306 - 16.307 - assert(chr->continuesHumongous(), "Must be humongous region"); 16.308 - assert(chr->humongous_start_region() == r, 16.309 - err_msg("Must work on humongous continuation of the original start region " 16.310 - PTR_FORMAT ", but is " PTR_FORMAT, p2i(r), p2i(chr))); 16.311 - assert(chr->claim_value() != claim_value, 16.312 - "Must not have been claimed yet because claiming of humongous continuation first claims the start region"); 16.313 - 16.314 - bool claim_result = chr->claimHeapRegion(claim_value); 16.315 - // We should always be able to claim it; no one else should 16.316 - // be trying to claim this region. 16.317 - guarantee(claim_result, "We should always be able to claim the continuesHumongous part of the humongous object"); 16.318 - 16.319 - bool res2 = blk->doHeapRegion(chr); 16.320 - if (res2) { 16.321 - return; 16.322 - } 16.323 - 16.324 - // Right now, this holds (i.e., no closure that actually 16.325 - // does something with "continues humongous" regions 16.326 - // clears them). We might have to weaken it in the future, 16.327 - // but let's leave these two asserts here for extra safety. 16.328 - assert(chr->continuesHumongous(), "should still be the case"); 16.329 - assert(chr->humongous_start_region() == r, "sanity"); 16.330 - } 16.331 - } 16.332 - 16.333 - bool res = blk->doHeapRegion(r); 16.334 - if (res) { 16.335 - return; 16.336 - } 16.337 - } 16.338 -} 16.339 - 16.340 -uint HeapRegionSeq::shrink_by(uint num_regions_to_remove) { 16.341 - assert(length() > 0, "the region sequence should not be empty"); 16.342 - assert(length() <= _allocated_heapregions_length, "invariant"); 16.343 - assert(_allocated_heapregions_length > 0, "we should have at least one region committed"); 16.344 - assert(num_regions_to_remove < length(), "We should never remove all regions"); 16.345 - 16.346 - if (num_regions_to_remove == 0) { 16.347 - return 0; 16.348 - } 16.349 - 16.350 - uint removed = 0; 16.351 - uint cur = _allocated_heapregions_length - 1; 16.352 - uint idx_last_found = 0; 16.353 - uint num_last_found = 0; 16.354 - 16.355 - while ((removed < num_regions_to_remove) && 16.356 - (num_last_found = find_empty_from_idx_reverse(cur, &idx_last_found)) > 0) { 16.357 - // Only allow uncommit from the end of the heap. 16.358 - if ((idx_last_found + num_last_found) != _allocated_heapregions_length) { 16.359 - return 0; 16.360 - } 16.361 - uint to_remove = MIN2(num_regions_to_remove - removed, num_last_found); 16.362 - 16.363 - uncommit_regions(idx_last_found + num_last_found - to_remove, to_remove); 16.364 - 16.365 - cur -= num_last_found; 16.366 - removed += to_remove; 16.367 - } 16.368 - 16.369 - verify_optional(); 16.370 - 16.371 - return removed; 16.372 -} 16.373 - 16.374 -uint HeapRegionSeq::find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const { 16.375 - guarantee(start_idx < _allocated_heapregions_length, "checking"); 16.376 - guarantee(res_idx != NULL, "checking"); 16.377 - 16.378 - uint num_regions_found = 0; 16.379 - 16.380 - jlong cur = start_idx; 16.381 - while (cur != -1 && !(is_available(cur) && at(cur)->is_empty())) { 16.382 - cur--; 16.383 - } 16.384 - if (cur == -1) { 16.385 - return num_regions_found; 16.386 - } 16.387 - jlong old_cur = cur; 16.388 - // cur indexes the first empty region 16.389 - while (cur != -1 && is_available(cur) && at(cur)->is_empty()) { 16.390 - cur--; 16.391 - } 16.392 - *res_idx = cur + 1; 16.393 - num_regions_found = old_cur - cur; 16.394 - 16.395 -#ifdef ASSERT 16.396 - for (uint i = *res_idx; i < (*res_idx + num_regions_found); i++) { 16.397 - assert(at(i)->is_empty(), "just checking"); 16.398 - } 16.399 -#endif 16.400 - return num_regions_found; 16.401 -} 16.402 - 16.403 -void HeapRegionSeq::verify() { 16.404 - guarantee(length() <= _allocated_heapregions_length, 16.405 - err_msg("invariant: _length: %u _allocated_length: %u", 16.406 - length(), _allocated_heapregions_length)); 16.407 - guarantee(_allocated_heapregions_length <= max_length(), 16.408 - err_msg("invariant: _allocated_length: %u _max_length: %u", 16.409 - _allocated_heapregions_length, max_length())); 16.410 - 16.411 - bool prev_committed = true; 16.412 - uint num_committed = 0; 16.413 - HeapWord* prev_end = heap_bottom(); 16.414 - for (uint i = 0; i < _allocated_heapregions_length; i++) { 16.415 - if (!is_available(i)) { 16.416 - prev_committed = false; 16.417 - continue; 16.418 - } 16.419 - num_committed++; 16.420 - HeapRegion* hr = _regions.get_by_index(i); 16.421 - guarantee(hr != NULL, err_msg("invariant: i: %u", i)); 16.422 - guarantee(!prev_committed || hr->bottom() == prev_end, 16.423 - err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT, 16.424 - i, HR_FORMAT_PARAMS(hr), p2i(prev_end))); 16.425 - guarantee(hr->hrs_index() == i, 16.426 - err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index())); 16.427 - // Asserts will fire if i is >= _length 16.428 - HeapWord* addr = hr->bottom(); 16.429 - guarantee(addr_to_region(addr) == hr, "sanity"); 16.430 - // We cannot check whether the region is part of a particular set: at the time 16.431 - // this method may be called, we have only completed allocation of the regions, 16.432 - // but not put into a region set. 16.433 - prev_committed = true; 16.434 - if (hr->startsHumongous()) { 16.435 - prev_end = hr->orig_end(); 16.436 - } else { 16.437 - prev_end = hr->end(); 16.438 - } 16.439 - } 16.440 - for (uint i = _allocated_heapregions_length; i < max_length(); i++) { 16.441 - guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i)); 16.442 - } 16.443 - 16.444 - guarantee(num_committed == _num_committed, err_msg("Found %u committed regions, but should be %u", num_committed, _num_committed)); 16.445 - _free_list.verify(); 16.446 -} 16.447 - 16.448 -#ifndef PRODUCT 16.449 -void HeapRegionSeq::verify_optional() { 16.450 - verify(); 16.451 -} 16.452 -#endif // PRODUCT 16.453 -
17.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp Wed Aug 27 09:36:55 2014 +0200 17.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 17.3 @@ -1,238 +0,0 @@ 17.4 -/* 17.5 - * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 17.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 17.7 - * 17.8 - * This code is free software; you can redistribute it and/or modify it 17.9 - * under the terms of the GNU General Public License version 2 only, as 17.10 - * published by the Free Software Foundation. 17.11 - * 17.12 - * This code is distributed in the hope that it will be useful, but WITHOUT 17.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 17.14 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 17.15 - * version 2 for more details (a copy is included in the LICENSE file that 17.16 - * accompanied this code). 17.17 - * 17.18 - * You should have received a copy of the GNU General Public License version 17.19 - * 2 along with this work; if not, write to the Free Software Foundation, 17.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17.21 - * 17.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 17.23 - * or visit www.oracle.com if you need additional information or have any 17.24 - * questions. 17.25 - * 17.26 - */ 17.27 - 17.28 -#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP 17.29 -#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP 17.30 - 17.31 -#include "gc_implementation/g1/g1BiasedArray.hpp" 17.32 -#include "gc_implementation/g1/g1RegionToSpaceMapper.hpp" 17.33 -#include "gc_implementation/g1/heapRegionSet.hpp" 17.34 - 17.35 -class HeapRegion; 17.36 -class HeapRegionClosure; 17.37 -class FreeRegionList; 17.38 - 17.39 -class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> { 17.40 - protected: 17.41 - virtual HeapRegion* default_value() const { return NULL; } 17.42 -}; 17.43 - 17.44 -// This class keeps track of the actual heap memory, auxiliary data 17.45 -// and its metadata (i.e., HeapRegion instances) and the list of free regions. 17.46 -// 17.47 -// This allows maximum flexibility for deciding what to commit or uncommit given 17.48 -// a request from outside. 17.49 -// 17.50 -// HeapRegions are kept in the _regions array in address order. A region's 17.51 -// index in the array corresponds to its index in the heap (i.e., 0 is the 17.52 -// region at the bottom of the heap, 1 is the one after it, etc.). Two 17.53 -// regions that are consecutive in the array should also be adjacent in the 17.54 -// address space (i.e., region(i).end() == region(i+1).bottom(). 17.55 -// 17.56 -// We create a HeapRegion when we commit the region's address space 17.57 -// for the first time. When we uncommit the address space of a 17.58 -// region we retain the HeapRegion to be able to re-use it in the 17.59 -// future (in case we recommit it). 17.60 -// 17.61 -// We keep track of three lengths: 17.62 -// 17.63 -// * _num_committed (returned by length()) is the number of currently 17.64 -// committed regions. These may not be contiguous. 17.65 -// * _allocated_heapregions_length (not exposed outside this class) is the 17.66 -// number of regions+1 for which we have HeapRegions. 17.67 -// * max_length() returns the maximum number of regions the heap can have. 17.68 -// 17.69 - 17.70 -class HeapRegionSeq: public CHeapObj<mtGC> { 17.71 - friend class VMStructs; 17.72 - 17.73 - G1HeapRegionTable _regions; 17.74 - 17.75 - G1RegionToSpaceMapper* _heap_mapper; 17.76 - G1RegionToSpaceMapper* _prev_bitmap_mapper; 17.77 - G1RegionToSpaceMapper* _next_bitmap_mapper; 17.78 - G1RegionToSpaceMapper* _bot_mapper; 17.79 - G1RegionToSpaceMapper* _cardtable_mapper; 17.80 - G1RegionToSpaceMapper* _card_counts_mapper; 17.81 - 17.82 - FreeRegionList _free_list; 17.83 - 17.84 - // Each bit in this bitmap indicates that the corresponding region is available 17.85 - // for allocation. 17.86 - BitMap _available_map; 17.87 - 17.88 - // The number of regions committed in the heap. 17.89 - uint _num_committed; 17.90 - 17.91 - // Internal only. The highest heap region +1 we allocated a HeapRegion instance for. 17.92 - uint _allocated_heapregions_length; 17.93 - 17.94 - HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); } 17.95 - HeapWord* heap_end() const {return _regions.end_address_mapped(); } 17.96 - 17.97 - void make_regions_available(uint index, uint num_regions = 1); 17.98 - 17.99 - // Pass down commit calls to the VirtualSpace. 17.100 - void commit_regions(uint index, size_t num_regions = 1); 17.101 - void uncommit_regions(uint index, size_t num_regions = 1); 17.102 - 17.103 - // Notify other data structures about change in the heap layout. 17.104 - void update_committed_space(HeapWord* old_end, HeapWord* new_end); 17.105 - // Calculate the starting region for each worker during parallel iteration so 17.106 - // that they do not all start from the same region. 17.107 - uint start_region_for_worker(uint worker_i, uint num_workers, uint num_regions) const; 17.108 - 17.109 - // Find a contiguous set of empty or uncommitted regions of length num and return 17.110 - // the index of the first region or G1_NO_HRS_INDEX if the search was unsuccessful. 17.111 - // If only_empty is true, only empty regions are considered. 17.112 - // Searches from bottom to top of the heap, doing a first-fit. 17.113 - uint find_contiguous(size_t num, bool only_empty); 17.114 - // Finds the next sequence of unavailable regions starting from start_idx. Returns the 17.115 - // length of the sequence found. If this result is zero, no such sequence could be found, 17.116 - // otherwise res_idx indicates the start index of these regions. 17.117 - uint find_unavailable_from_idx(uint start_idx, uint* res_idx) const; 17.118 - // Finds the next sequence of empty regions starting from start_idx, going backwards in 17.119 - // the heap. Returns the length of the sequence found. If this value is zero, no 17.120 - // sequence could be found, otherwise res_idx contains the start index of this range. 17.121 - uint find_empty_from_idx_reverse(uint start_idx, uint* res_idx) const; 17.122 - // Allocate a new HeapRegion for the given index. 17.123 - HeapRegion* new_heap_region(uint hrs_index); 17.124 -#ifdef ASSERT 17.125 -public: 17.126 - bool is_free(HeapRegion* hr) const; 17.127 -#endif 17.128 - // Returns whether the given region is available for allocation. 17.129 - bool is_available(uint region) const; 17.130 - 17.131 - public: 17.132 - // Empty constructor, we'll initialize it with the initialize() method. 17.133 - HeapRegionSeq() : _regions(), _heap_mapper(NULL), _num_committed(0), 17.134 - _next_bitmap_mapper(NULL), _prev_bitmap_mapper(NULL), _bot_mapper(NULL), 17.135 - _allocated_heapregions_length(0), _available_map(), 17.136 - _free_list("Free list", new MasterFreeRegionListMtSafeChecker()) 17.137 - { } 17.138 - 17.139 - void initialize(G1RegionToSpaceMapper* heap_storage, 17.140 - G1RegionToSpaceMapper* prev_bitmap, 17.141 - G1RegionToSpaceMapper* next_bitmap, 17.142 - G1RegionToSpaceMapper* bot, 17.143 - G1RegionToSpaceMapper* cardtable, 17.144 - G1RegionToSpaceMapper* card_counts); 17.145 - 17.146 - // Return the "dummy" region used for G1AllocRegion. This is currently a hardwired 17.147 - // new HeapRegion that owns HeapRegion at index 0. Since at the moment we commit 17.148 - // the heap from the lowest address, this region (and its associated data 17.149 - // structures) are available and we do not need to check further. 17.150 - HeapRegion* get_dummy_region() { return new_heap_region(0); } 17.151 - 17.152 - // Return the HeapRegion at the given index. Assume that the index 17.153 - // is valid. 17.154 - inline HeapRegion* at(uint index) const; 17.155 - 17.156 - // If addr is within the committed space return its corresponding 17.157 - // HeapRegion, otherwise return NULL. 17.158 - inline HeapRegion* addr_to_region(HeapWord* addr) const; 17.159 - 17.160 - // Insert the given region into the free region list. 17.161 - inline void insert_into_free_list(HeapRegion* hr); 17.162 - 17.163 - // Insert the given region list into the global free region list. 17.164 - void insert_list_into_free_list(FreeRegionList* list) { 17.165 - _free_list.add_ordered(list); 17.166 - } 17.167 - 17.168 - HeapRegion* allocate_free_region(bool is_old) { 17.169 - HeapRegion* hr = _free_list.remove_region(is_old); 17.170 - 17.171 - if (hr != NULL) { 17.172 - assert(hr->next() == NULL, "Single region should not have next"); 17.173 - assert(is_available(hr->hrs_index()), "Must be committed"); 17.174 - } 17.175 - return hr; 17.176 - } 17.177 - 17.178 - inline void allocate_free_regions_starting_at(uint first, uint num_regions); 17.179 - 17.180 - // Remove all regions from the free list. 17.181 - void remove_all_free_regions() { 17.182 - _free_list.remove_all(); 17.183 - } 17.184 - 17.185 - // Return the number of committed free regions in the heap. 17.186 - uint num_free_regions() const { 17.187 - return _free_list.length(); 17.188 - } 17.189 - 17.190 - size_t total_capacity_bytes() const { 17.191 - return num_free_regions() * HeapRegion::GrainBytes; 17.192 - } 17.193 - 17.194 - // Return the number of available (uncommitted) regions. 17.195 - uint available() const { return max_length() - length(); } 17.196 - 17.197 - // Return the number of regions that have been committed in the heap. 17.198 - uint length() const { return _num_committed; } 17.199 - 17.200 - // Return the maximum number of regions in the heap. 17.201 - uint max_length() const { return (uint)_regions.length(); } 17.202 - 17.203 - MemRegion reserved() const { return MemRegion(heap_bottom(), heap_end()); } 17.204 - 17.205 - // Expand the sequence to reflect that the heap has grown. Either create new 17.206 - // HeapRegions, or re-use existing ones. Returns the number of regions the 17.207 - // sequence was expanded by. If a HeapRegion allocation fails, the resulting 17.208 - // number of regions might be smaller than what's desired. 17.209 - uint expand_by(uint num_regions); 17.210 - 17.211 - // Makes sure that the regions from start to start+num_regions-1 are available 17.212 - // for allocation. Returns the number of regions that were committed to achieve 17.213 - // this. 17.214 - uint expand_at(uint start, uint num_regions); 17.215 - 17.216 - // Find a contiguous set of empty regions of length num. Returns the start index of 17.217 - // that set, or G1_NO_HRS_INDEX. 17.218 - uint find_contiguous_only_empty(size_t num) { return find_contiguous(num, true); } 17.219 - // Find a contiguous set of empty or unavailable regions of length num. Returns the 17.220 - // start index of that set, or G1_NO_HRS_INDEX. 17.221 - uint find_contiguous_empty_or_unavailable(size_t num) { return find_contiguous(num, false); } 17.222 - 17.223 - HeapRegion* next_region_in_heap(const HeapRegion* r) const; 17.224 - 17.225 - // Apply blk->doHeapRegion() on all committed regions in address order, 17.226 - // terminating the iteration early if doHeapRegion() returns true. 17.227 - void iterate(HeapRegionClosure* blk) const; 17.228 - 17.229 - void par_iterate(HeapRegionClosure* blk, uint worker_id, uint no_of_par_workers, jint claim_value) const; 17.230 - 17.231 - // Uncommit up to num_regions_to_remove regions that are completely free. 17.232 - // Return the actual number of uncommitted regions. 17.233 - uint shrink_by(uint num_regions_to_remove); 17.234 - 17.235 - void verify(); 17.236 - 17.237 - // Do some sanity checking. 17.238 - void verify_optional() PRODUCT_RETURN; 17.239 -}; 17.240 - 17.241 -#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
18.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp Wed Aug 27 09:36:55 2014 +0200 18.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 18.3 @@ -1,58 +0,0 @@ 18.4 -/* 18.5 - * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 18.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 18.7 - * 18.8 - * This code is free software; you can redistribute it and/or modify it 18.9 - * under the terms of the GNU General Public License version 2 only, as 18.10 - * published by the Free Software Foundation. 18.11 - * 18.12 - * This code is distributed in the hope that it will be useful, but WITHOUT 18.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 18.14 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 18.15 - * version 2 for more details (a copy is included in the LICENSE file that 18.16 - * accompanied this code). 18.17 - * 18.18 - * You should have received a copy of the GNU General Public License version 18.19 - * 2 along with this work; if not, write to the Free Software Foundation, 18.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18.21 - * 18.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 18.23 - * or visit www.oracle.com if you need additional information or have any 18.24 - * questions. 18.25 - * 18.26 - */ 18.27 - 18.28 -#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP 18.29 -#define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP 18.30 - 18.31 -#include "gc_implementation/g1/heapRegion.hpp" 18.32 -#include "gc_implementation/g1/heapRegionSeq.hpp" 18.33 -#include "gc_implementation/g1/heapRegionSet.inline.hpp" 18.34 - 18.35 -inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const { 18.36 - assert(addr < heap_end(), 18.37 - err_msg("addr: "PTR_FORMAT" end: "PTR_FORMAT, p2i(addr), p2i(heap_end()))); 18.38 - assert(addr >= heap_bottom(), 18.39 - err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, p2i(addr), p2i(heap_bottom()))); 18.40 - 18.41 - HeapRegion* hr = _regions.get_by_address(addr); 18.42 - return hr; 18.43 -} 18.44 - 18.45 -inline HeapRegion* HeapRegionSeq::at(uint index) const { 18.46 - assert(is_available(index), "pre-condition"); 18.47 - HeapRegion* hr = _regions.get_by_index(index); 18.48 - assert(hr != NULL, "sanity"); 18.49 - assert(hr->hrs_index() == index, "sanity"); 18.50 - return hr; 18.51 -} 18.52 - 18.53 -inline void HeapRegionSeq::insert_into_free_list(HeapRegion* hr) { 18.54 - _free_list.add_ordered(hr); 18.55 -} 18.56 - 18.57 -inline void HeapRegionSeq::allocate_free_regions_starting_at(uint first, uint num_regions) { 18.58 - _free_list.remove_starting_at(at(first), num_regions); 18.59 -} 18.60 - 18.61 -#endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_INLINE_HPP
19.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Wed Aug 27 09:36:55 2014 +0200 19.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp Tue Aug 26 09:36:53 2014 +0200 19.3 @@ -39,11 +39,11 @@ 19.4 19.5 #ifndef PRODUCT 19.6 void HeapRegionSetBase::verify_region(HeapRegion* hr) { 19.7 - assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrs_index())); 19.8 - assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrs_index())); // currently we don't use these sets for young regions 19.9 - assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrs_index(), name())); 19.10 - assert(hr->is_empty() == regions_empty(), err_msg("Wrong empty state for region %u and set %s", hr->hrs_index(), name())); 19.11 - assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrs_index())); 19.12 + assert(hr->containing_set() == this, err_msg("Inconsistent containing set for %u", hr->hrm_index())); 19.13 + assert(!hr->is_young(), err_msg("Adding young region %u", hr->hrm_index())); // currently we don't use these sets for young regions 19.14 + assert(hr->isHumongous() == regions_humongous(), err_msg("Wrong humongous state for region %u and set %s", hr->hrm_index(), name())); 19.15 + assert(hr->is_empty() == regions_empty(), err_msg("Wrong empty state for region %u and set %s", hr->hrm_index(), name())); 19.16 + assert(hr->rem_set()->verify_ready_for_par_iteration(), err_msg("Wrong iteration state %u", hr->hrm_index())); 19.17 } 19.18 #endif 19.19 19.20 @@ -158,7 +158,7 @@ 19.21 HeapRegion* curr_from = from_list->_head; 19.22 19.23 while (curr_from != NULL) { 19.24 - while (curr_to != NULL && curr_to->hrs_index() < curr_from->hrs_index()) { 19.25 + while (curr_to != NULL && curr_to->hrm_index() < curr_from->hrm_index()) { 19.26 curr_to = curr_to->next(); 19.27 } 19.28 19.29 @@ -183,7 +183,7 @@ 19.30 } 19.31 } 19.32 19.33 - if (_tail->hrs_index() < from_list->_tail->hrs_index()) { 19.34 + if (_tail->hrm_index() < from_list->_tail->hrm_index()) { 19.35 _tail = from_list->_tail; 19.36 } 19.37 } 19.38 @@ -309,8 +309,8 @@ 19.39 if (curr->next() != NULL) { 19.40 guarantee(curr->next()->prev() == curr, "Next or prev pointers messed up"); 19.41 } 19.42 - guarantee(curr->hrs_index() == 0 || curr->hrs_index() > last_index, "List should be sorted"); 19.43 - last_index = curr->hrs_index(); 19.44 + guarantee(curr->hrm_index() == 0 || curr->hrm_index() > last_index, "List should be sorted"); 19.45 + last_index = curr->hrm_index(); 19.46 19.47 capacity += curr->capacity(); 19.48 19.49 @@ -319,7 +319,7 @@ 19.50 curr = curr->next(); 19.51 } 19.52 19.53 - guarantee(_tail == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), _tail->hrs_index(), prev0->hrs_index())); 19.54 + guarantee(_tail == prev0, err_msg("Expected %s to end with %u but it ended with %u.", name(), _tail->hrm_index(), prev0->hrm_index())); 19.55 guarantee(_tail == NULL || _tail->next() == NULL, "_tail should not have a next"); 19.56 guarantee(length() == count, err_msg("%s count mismatch. Expected %u, actual %u.", name(), length(), count)); 19.57 guarantee(total_capacity_bytes() == capacity, err_msg("%s capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
20.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Wed Aug 27 09:36:55 2014 +0200 20.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp Tue Aug 26 09:36:53 2014 +0200 20.3 @@ -238,14 +238,14 @@ 20.4 20.5 // Add hr to the list. The region should not be a member of another set. 20.6 // Assumes that the list is ordered and will preserve that order. The order 20.7 - // is determined by hrs_index. 20.8 + // is determined by hrm_index. 20.9 inline void add_ordered(HeapRegion* hr); 20.10 20.11 // Removes from head or tail based on the given argument. 20.12 HeapRegion* remove_region(bool from_head); 20.13 20.14 // Merge two ordered lists. The result is also ordered. The order is 20.15 - // determined by hrs_index. 20.16 + // determined by hrm_index. 20.17 void add_ordered(FreeRegionList* from_list); 20.18 20.19 // It empties the list by removing all regions from it.
21.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Wed Aug 27 09:36:55 2014 +0200 21.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp Tue Aug 26 09:36:53 2014 +0200 21.3 @@ -60,14 +60,14 @@ 21.4 if (_head != NULL) { 21.5 HeapRegion* curr; 21.6 21.7 - if (_last != NULL && _last->hrs_index() < hr->hrs_index()) { 21.8 + if (_last != NULL && _last->hrm_index() < hr->hrm_index()) { 21.9 curr = _last; 21.10 } else { 21.11 curr = _head; 21.12 } 21.13 21.14 // Find first entry with a Region Index larger than entry to insert. 21.15 - while (curr != NULL && curr->hrs_index() < hr->hrs_index()) { 21.16 + while (curr != NULL && curr->hrm_index() < hr->hrm_index()) { 21.17 curr = curr->next(); 21.18 } 21.19
22.1 --- a/src/share/vm/gc_implementation/g1/sparsePRT.cpp Wed Aug 27 09:36:55 2014 +0200 22.2 +++ b/src/share/vm/gc_implementation/g1/sparsePRT.cpp Tue Aug 26 09:36:53 2014 +0200 22.3 @@ -478,7 +478,7 @@ 22.4 bool SparsePRT::add_card(RegionIdx_t region_id, CardIdx_t card_index) { 22.5 #if SPARSE_PRT_VERBOSE 22.6 gclog_or_tty->print_cr(" Adding card %d from region %d to region %u sparse.", 22.7 - card_index, region_id, _hr->hrs_index()); 22.8 + card_index, region_id, _hr->hrm_index()); 22.9 #endif 22.10 if (_next->occupied_entries() * 2 > _next->capacity()) { 22.11 expand(); 22.12 @@ -530,7 +530,7 @@ 22.13 22.14 #if SPARSE_PRT_VERBOSE 22.15 gclog_or_tty->print_cr(" Expanded sparse table for %u to %d.", 22.16 - _hr->hrs_index(), _next->capacity()); 22.17 + _hr->hrm_index(), _next->capacity()); 22.18 #endif 22.19 for (size_t i = 0; i < last->capacity(); i++) { 22.20 SparsePRTEntry* e = last->entry((int)i);
23.1 --- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp Wed Aug 27 09:36:55 2014 +0200 23.2 +++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp Tue Aug 26 09:36:53 2014 +0200 23.3 @@ -26,7 +26,7 @@ 23.4 #define SHARE_VM_GC_IMPLEMENTATION_G1_VMSTRUCTS_G1_HPP 23.5 23.6 #include "gc_implementation/g1/heapRegion.hpp" 23.7 -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" 23.8 +#include "gc_implementation/g1/heapRegionManager.inline.hpp" 23.9 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 23.10 23.11 #define VM_STRUCTS_G1(nonstatic_field, static_field) \ 23.12 @@ -42,10 +42,10 @@ 23.13 nonstatic_field(G1HeapRegionTable, _bias, size_t) \ 23.14 nonstatic_field(G1HeapRegionTable, _shift_by, uint) \ 23.15 \ 23.16 - nonstatic_field(HeapRegionSeq, _regions, G1HeapRegionTable) \ 23.17 - nonstatic_field(HeapRegionSeq, _num_committed, uint) \ 23.18 + nonstatic_field(HeapRegionManager, _regions, G1HeapRegionTable) \ 23.19 + nonstatic_field(HeapRegionManager, _num_committed, uint) \ 23.20 \ 23.21 - nonstatic_field(G1CollectedHeap, _hrs, HeapRegionSeq) \ 23.22 + nonstatic_field(G1CollectedHeap, _hrm, HeapRegionManager) \ 23.23 nonstatic_field(G1CollectedHeap, _summary_bytes_used, size_t) \ 23.24 nonstatic_field(G1CollectedHeap, _g1mm, G1MonitoringSupport*) \ 23.25 nonstatic_field(G1CollectedHeap, _old_set, HeapRegionSetBase) \ 23.26 @@ -72,7 +72,7 @@ 23.27 \ 23.28 declare_type(G1OffsetTableContigSpace, CompactibleSpace) \ 23.29 declare_type(HeapRegion, G1OffsetTableContigSpace) \ 23.30 - declare_toplevel_type(HeapRegionSeq) \ 23.31 + declare_toplevel_type(HeapRegionManager) \ 23.32 declare_toplevel_type(HeapRegionSetBase) \ 23.33 declare_toplevel_type(HeapRegionSetCount) \ 23.34 declare_toplevel_type(G1MonitoringSupport) \
24.1 --- a/src/share/vm/oops/instanceKlass.cpp Wed Aug 27 09:36:55 2014 +0200 24.2 +++ b/src/share/vm/oops/instanceKlass.cpp Tue Aug 26 09:36:53 2014 +0200 24.3 @@ -66,7 +66,7 @@ 24.4 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 24.5 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 24.6 #include "gc_implementation/g1/g1RemSet.inline.hpp" 24.7 -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" 24.8 +#include "gc_implementation/g1/heapRegionManager.inline.hpp" 24.9 #include "gc_implementation/parNew/parOopClosures.inline.hpp" 24.10 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp" 24.11 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
25.1 --- a/src/share/vm/oops/instanceMirrorKlass.cpp Wed Aug 27 09:36:55 2014 +0200 25.2 +++ b/src/share/vm/oops/instanceMirrorKlass.cpp Tue Aug 26 09:36:53 2014 +0200 25.3 @@ -42,7 +42,7 @@ 25.4 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 25.5 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 25.6 #include "gc_implementation/g1/g1RemSet.inline.hpp" 25.7 -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" 25.8 +#include "gc_implementation/g1/heapRegionManager.inline.hpp" 25.9 #include "gc_implementation/parNew/parOopClosures.inline.hpp" 25.10 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" 25.11 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
26.1 --- a/src/share/vm/oops/instanceRefKlass.cpp Wed Aug 27 09:36:55 2014 +0200 26.2 +++ b/src/share/vm/oops/instanceRefKlass.cpp Tue Aug 26 09:36:53 2014 +0200 26.3 @@ -38,7 +38,7 @@ 26.4 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 26.5 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 26.6 #include "gc_implementation/g1/g1RemSet.inline.hpp" 26.7 -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" 26.8 +#include "gc_implementation/g1/heapRegionManager.inline.hpp" 26.9 #include "gc_implementation/parNew/parOopClosures.inline.hpp" 26.10 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" 26.11 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
27.1 --- a/src/share/vm/oops/objArrayKlass.cpp Wed Aug 27 09:36:55 2014 +0200 27.2 +++ b/src/share/vm/oops/objArrayKlass.cpp Tue Aug 26 09:36:53 2014 +0200 27.3 @@ -51,7 +51,7 @@ 27.4 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 27.5 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 27.6 #include "gc_implementation/g1/g1RemSet.inline.hpp" 27.7 -#include "gc_implementation/g1/heapRegionSeq.inline.hpp" 27.8 +#include "gc_implementation/g1/heapRegionManager.inline.hpp" 27.9 #include "gc_implementation/parNew/parOopClosures.inline.hpp" 27.10 #include "gc_implementation/parallelScavenge/psCompactionManager.hpp" 27.11 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"