83 HeapRegion* _survivor_head; |
83 HeapRegion* _survivor_head; |
84 HeapRegion* _survivor_tail; |
84 HeapRegion* _survivor_tail; |
85 |
85 |
86 HeapRegion* _curr; |
86 HeapRegion* _curr; |
87 |
87 |
88 size_t _length; |
88 uint _length; |
89 size_t _survivor_length; |
89 uint _survivor_length; |
90 |
90 |
91 size_t _last_sampled_rs_lengths; |
91 size_t _last_sampled_rs_lengths; |
92 size_t _sampled_rs_lengths; |
92 size_t _sampled_rs_lengths; |
93 |
93 |
94 void empty_list(HeapRegion* list); |
94 void empty_list(HeapRegion* list); |
99 void push_region(HeapRegion* hr); |
99 void push_region(HeapRegion* hr); |
100 void add_survivor_region(HeapRegion* hr); |
100 void add_survivor_region(HeapRegion* hr); |
101 |
101 |
102 void empty_list(); |
102 void empty_list(); |
103 bool is_empty() { return _length == 0; } |
103 bool is_empty() { return _length == 0; } |
104 size_t length() { return _length; } |
104 uint length() { return _length; } |
105 size_t survivor_length() { return _survivor_length; } |
105 uint survivor_length() { return _survivor_length; } |
106 |
106 |
107 // Currently we do not keep track of the used byte sum for the |
107 // Currently we do not keep track of the used byte sum for the |
108 // young list and the survivors and it'd be quite a lot of work to |
108 // young list and the survivors and it'd be quite a lot of work to |
109 // do so. When we'll eventually replace the young list with |
109 // do so. When we'll eventually replace the young list with |
110 // instances of HeapRegionLinkedList we'll get that for free. So, |
110 // instances of HeapRegionLinkedList we'll get that for free. So, |
111 // we'll report the more accurate information then. |
111 // we'll report the more accurate information then. |
112 size_t eden_used_bytes() { |
112 size_t eden_used_bytes() { |
113 assert(length() >= survivor_length(), "invariant"); |
113 assert(length() >= survivor_length(), "invariant"); |
114 return (length() - survivor_length()) * HeapRegion::GrainBytes; |
114 return (size_t) (length() - survivor_length()) * HeapRegion::GrainBytes; |
115 } |
115 } |
116 size_t survivor_used_bytes() { |
116 size_t survivor_used_bytes() { |
117 return survivor_length() * HeapRegion::GrainBytes; |
117 return (size_t) survivor_length() * HeapRegion::GrainBytes; |
118 } |
118 } |
119 |
119 |
120 void rs_length_sampling_init(); |
120 void rs_length_sampling_init(); |
121 bool rs_length_sampling_more(); |
121 bool rs_length_sampling_more(); |
122 void rs_length_sampling_next(); |
122 void rs_length_sampling_next(); |
245 |
245 |
246 // It keeps track of the humongous regions. |
246 // It keeps track of the humongous regions. |
247 MasterHumongousRegionSet _humongous_set; |
247 MasterHumongousRegionSet _humongous_set; |
248 |
248 |
249 // The number of regions we could create by expansion. |
249 // The number of regions we could create by expansion. |
250 size_t _expansion_regions; |
250 uint _expansion_regions; |
251 |
251 |
252 // The block offset table for the G1 heap. |
252 // The block offset table for the G1 heap. |
253 G1BlockOffsetSharedArray* _bot_shared; |
253 G1BlockOffsetSharedArray* _bot_shared; |
254 |
254 |
255 // Tears down the region sets / lists so that they are empty and the |
255 // Tears down the region sets / lists so that they are empty and the |
337 // points into the collection set or not. This field is also used to |
337 // points into the collection set or not. This field is also used to |
338 // free the array. |
338 // free the array. |
339 bool* _in_cset_fast_test_base; |
339 bool* _in_cset_fast_test_base; |
340 |
340 |
341 // The length of the _in_cset_fast_test_base array. |
341 // The length of the _in_cset_fast_test_base array. |
342 size_t _in_cset_fast_test_length; |
342 uint _in_cset_fast_test_length; |
343 |
343 |
344 volatile unsigned _gc_time_stamp; |
344 volatile unsigned _gc_time_stamp; |
345 |
345 |
346 size_t* _surviving_young_words; |
346 size_t* _surviving_young_words; |
347 |
347 |
456 // Attempt to satisfy a humongous allocation request of the given |
456 // Attempt to satisfy a humongous allocation request of the given |
457 // size by finding a contiguous set of free regions of num_regions |
457 // size by finding a contiguous set of free regions of num_regions |
458 // length and remove them from the master free list. Return the |
458 // length and remove them from the master free list. Return the |
459 // index of the first region or G1_NULL_HRS_INDEX if the search |
459 // index of the first region or G1_NULL_HRS_INDEX if the search |
460 // was unsuccessful. |
460 // was unsuccessful. |
461 size_t humongous_obj_allocate_find_first(size_t num_regions, |
461 uint humongous_obj_allocate_find_first(uint num_regions, |
462 size_t word_size); |
462 size_t word_size); |
463 |
463 |
464 // Initialize a contiguous set of free regions of length num_regions |
464 // Initialize a contiguous set of free regions of length num_regions |
465 // and starting at index first so that they appear as a single |
465 // and starting at index first so that they appear as a single |
466 // humongous region. |
466 // humongous region. |
467 HeapWord* humongous_obj_allocate_initialize_regions(size_t first, |
467 HeapWord* humongous_obj_allocate_initialize_regions(uint first, |
468 size_t num_regions, |
468 uint num_regions, |
469 size_t word_size); |
469 size_t word_size); |
470 |
470 |
471 // Attempt to allocate a humongous object of the given size. Return |
471 // Attempt to allocate a humongous object of the given size. Return |
472 // NULL if unsuccessful. |
472 // NULL if unsuccessful. |
473 HeapWord* humongous_obj_allocate(size_t word_size); |
473 HeapWord* humongous_obj_allocate(size_t word_size); |
572 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force); |
572 HeapRegion* new_mutator_alloc_region(size_t word_size, bool force); |
573 void retire_mutator_alloc_region(HeapRegion* alloc_region, |
573 void retire_mutator_alloc_region(HeapRegion* alloc_region, |
574 size_t allocated_bytes); |
574 size_t allocated_bytes); |
575 |
575 |
576 // For GC alloc regions. |
576 // For GC alloc regions. |
577 HeapRegion* new_gc_alloc_region(size_t word_size, size_t count, |
577 HeapRegion* new_gc_alloc_region(size_t word_size, uint count, |
578 GCAllocPurpose ap); |
578 GCAllocPurpose ap); |
579 void retire_gc_alloc_region(HeapRegion* alloc_region, |
579 void retire_gc_alloc_region(HeapRegion* alloc_region, |
580 size_t allocated_bytes, GCAllocPurpose ap); |
580 size_t allocated_bytes, GCAllocPurpose ap); |
581 |
581 |
582 // - if explicit_gc is true, the GC is for a System.gc() or a heap |
582 // - if explicit_gc is true, the GC is for a System.gc() or a heap |
639 // We register a region with the fast "in collection set" test. We |
639 // We register a region with the fast "in collection set" test. We |
640 // simply set to true the array slot corresponding to this region. |
640 // simply set to true the array slot corresponding to this region. |
641 void register_region_with_in_cset_fast_test(HeapRegion* r) { |
641 void register_region_with_in_cset_fast_test(HeapRegion* r) { |
642 assert(_in_cset_fast_test_base != NULL, "sanity"); |
642 assert(_in_cset_fast_test_base != NULL, "sanity"); |
643 assert(r->in_collection_set(), "invariant"); |
643 assert(r->in_collection_set(), "invariant"); |
644 size_t index = r->hrs_index(); |
644 uint index = r->hrs_index(); |
645 assert(index < _in_cset_fast_test_length, "invariant"); |
645 assert(index < _in_cset_fast_test_length, "invariant"); |
646 assert(!_in_cset_fast_test_base[index], "invariant"); |
646 assert(!_in_cset_fast_test_base[index], "invariant"); |
647 _in_cset_fast_test_base[index] = true; |
647 _in_cset_fast_test_base[index] = true; |
648 } |
648 } |
649 |
649 |
653 bool in_cset_fast_test(oop obj) { |
653 bool in_cset_fast_test(oop obj) { |
654 assert(_in_cset_fast_test != NULL, "sanity"); |
654 assert(_in_cset_fast_test != NULL, "sanity"); |
655 if (_g1_committed.contains((HeapWord*) obj)) { |
655 if (_g1_committed.contains((HeapWord*) obj)) { |
656 // no need to subtract the bottom of the heap from obj, |
656 // no need to subtract the bottom of the heap from obj, |
657 // _in_cset_fast_test is biased |
657 // _in_cset_fast_test is biased |
658 size_t index = ((size_t) obj) >> HeapRegion::LogOfHRGrainBytes; |
658 uintx index = (uintx) obj >> HeapRegion::LogOfHRGrainBytes; |
659 bool ret = _in_cset_fast_test[index]; |
659 bool ret = _in_cset_fast_test[index]; |
660 // let's make sure the result is consistent with what the slower |
660 // let's make sure the result is consistent with what the slower |
661 // test returns |
661 // test returns |
662 assert( ret || !obj_in_cs(obj), "sanity"); |
662 assert( ret || !obj_in_cs(obj), "sanity"); |
663 assert(!ret || obj_in_cs(obj), "sanity"); |
663 assert(!ret || obj_in_cs(obj), "sanity"); |
668 } |
668 } |
669 |
669 |
670 void clear_cset_fast_test() { |
670 void clear_cset_fast_test() { |
671 assert(_in_cset_fast_test_base != NULL, "sanity"); |
671 assert(_in_cset_fast_test_base != NULL, "sanity"); |
672 memset(_in_cset_fast_test_base, false, |
672 memset(_in_cset_fast_test_base, false, |
673 _in_cset_fast_test_length * sizeof(bool)); |
673 (size_t) _in_cset_fast_test_length * sizeof(bool)); |
674 } |
674 } |
675 |
675 |
676 // This is called at the end of either a concurrent cycle or a Full |
676 // This is called at the end of either a concurrent cycle or a Full |
677 // GC to update the number of full collections completed. Those two |
677 // GC to update the number of full collections completed. Those two |
678 // can happen in a nested fashion, i.e., we start a concurrent |
678 // can happen in a nested fashion, i.e., we start a concurrent |
1099 virtual bool is_maximal_no_gc() const { |
1099 virtual bool is_maximal_no_gc() const { |
1100 return _g1_storage.uncommitted_size() == 0; |
1100 return _g1_storage.uncommitted_size() == 0; |
1101 } |
1101 } |
1102 |
1102 |
1103 // The total number of regions in the heap. |
1103 // The total number of regions in the heap. |
1104 size_t n_regions() { return _hrs.length(); } |
1104 uint n_regions() { return _hrs.length(); } |
1105 |
1105 |
1106 // The max number of regions in the heap. |
1106 // The max number of regions in the heap. |
1107 size_t max_regions() { return _hrs.max_length(); } |
1107 uint max_regions() { return _hrs.max_length(); } |
1108 |
1108 |
1109 // The number of regions that are completely free. |
1109 // The number of regions that are completely free. |
1110 size_t free_regions() { return _free_list.length(); } |
1110 uint free_regions() { return _free_list.length(); } |
1111 |
1111 |
1112 // The number of regions that are not completely free. |
1112 // The number of regions that are not completely free. |
1113 size_t used_regions() { return n_regions() - free_regions(); } |
1113 uint used_regions() { return n_regions() - free_regions(); } |
1114 |
1114 |
1115 // The number of regions available for "regular" expansion. |
1115 // The number of regions available for "regular" expansion. |
1116 size_t expansion_regions() { return _expansion_regions; } |
1116 uint expansion_regions() { return _expansion_regions; } |
1117 |
1117 |
1118 // Factory method for HeapRegion instances. It will return NULL if |
1118 // Factory method for HeapRegion instances. It will return NULL if |
1119 // the allocation fails. |
1119 // the allocation fails. |
1120 HeapRegion* new_heap_region(size_t hrs_index, HeapWord* bottom); |
1120 HeapRegion* new_heap_region(uint hrs_index, HeapWord* bottom); |
1121 |
1121 |
1122 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; |
1122 void verify_not_dirty_region(HeapRegion* hr) PRODUCT_RETURN; |
1123 void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; |
1123 void verify_dirty_region(HeapRegion* hr) PRODUCT_RETURN; |
1124 void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; |
1124 void verify_dirty_young_list(HeapRegion* head) PRODUCT_RETURN; |
1125 void verify_dirty_young_regions() PRODUCT_RETURN; |
1125 void verify_dirty_young_regions() PRODUCT_RETURN; |
1299 // is NULL), in address order, terminating early if the "doHeapRegion" |
1299 // is NULL), in address order, terminating early if the "doHeapRegion" |
1300 // method returns "true". |
1300 // method returns "true". |
1301 void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const; |
1301 void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const; |
1302 |
1302 |
1303 // Return the region with the given index. It assumes the index is valid. |
1303 // Return the region with the given index. It assumes the index is valid. |
1304 HeapRegion* region_at(size_t index) const { return _hrs.at(index); } |
1304 HeapRegion* region_at(uint index) const { return _hrs.at(index); } |
1305 |
1305 |
1306 // Divide the heap region sequence into "chunks" of some size (the number |
1306 // Divide the heap region sequence into "chunks" of some size (the number |
1307 // of regions divided by the number of parallel threads times some |
1307 // of regions divided by the number of parallel threads times some |
1308 // overpartition factor, currently 4). Assumes that this will be called |
1308 // overpartition factor, currently 4). Assumes that this will be called |
1309 // in parallel by ParallelGCThreads worker threads with discinct worker |
1309 // in parallel by ParallelGCThreads worker threads with discinct worker |