src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp

changeset 2315
631f79e71e90
parent 2314
f95d63e2154a
child 2333
016a3628c885
equal deleted inserted replaced
2314:f95d63e2154a 2315:631f79e71e90
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
27 27
28 #include "gc_implementation/g1/concurrentMark.hpp" 28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
30 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
30 #include "gc_implementation/g1/heapRegionSeq.hpp" 31 #include "gc_implementation/g1/heapRegionSeq.hpp"
31 #include "utilities/taskqueue.hpp" 32 #include "utilities/taskqueue.hpp"
32 33
33 // Inline functions for G1CollectedHeap 34 // Inline functions for G1CollectedHeap
34 35
56 inline bool G1CollectedHeap::obj_in_cs(oop obj) { 57 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
57 HeapRegion* r = _hrs->addr_to_region(obj); 58 HeapRegion* r = _hrs->addr_to_region(obj);
58 return r != NULL && r->in_collection_set(); 59 return r != NULL && r->in_collection_set();
59 } 60 }
60 61
61 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size, 62 // See the comment in the .hpp file about the locking protocol and
62 bool permit_collection_pause) { 63 // assumptions of this method (and other related ones).
63 HeapWord* res = NULL; 64 inline HeapWord*
65 G1CollectedHeap::allocate_from_cur_alloc_region(HeapRegion* cur_alloc_region,
66 size_t word_size) {
67 assert_heap_locked_and_not_at_safepoint();
68 assert(cur_alloc_region != NULL, "pre-condition of the method");
69 assert(cur_alloc_region == _cur_alloc_region, "pre-condition of the method");
70 assert(cur_alloc_region->is_young(),
71 "we only support young current alloc regions");
72 assert(!isHumongous(word_size), "allocate_from_cur_alloc_region() "
73 "should not be used for humongous allocations");
74 assert(!cur_alloc_region->isHumongous(), "Catch a regression of this bug.");
64 75
65 assert( SafepointSynchronize::is_at_safepoint() || 76 assert(!cur_alloc_region->is_empty(),
66 Heap_lock->owned_by_self(), "pre-condition of the call" ); 77 err_msg("region ["PTR_FORMAT","PTR_FORMAT"] should not be empty",
78 cur_alloc_region->bottom(), cur_alloc_region->end()));
79 // This allocate method does BOT updates and we don't need them in
80 // the young generation. This will be fixed in the near future by
81 // CR 6994297.
82 HeapWord* result = cur_alloc_region->allocate(word_size);
83 if (result != NULL) {
84 assert(is_in(result), "result should be in the heap");
85 Heap_lock->unlock();
67 86
68 // All humongous allocation requests should go through the slow path in 87 // Do the dirtying after we release the Heap_lock.
69 // attempt_allocation_slow(). 88 dirty_young_block(result, word_size);
70 if (!isHumongous(word_size) && _cur_alloc_region != NULL) { 89 return result;
71 // If this allocation causes a region to become non empty, 90 }
72 // then we need to update our free_regions count.
73 91
74 if (_cur_alloc_region->is_empty()) { 92 assert_heap_locked();
75 res = _cur_alloc_region->allocate(word_size); 93 return NULL;
76 if (res != NULL) 94 }
77 _free_regions--; 95
78 } else { 96 // See the comment in the .hpp file about the locking protocol and
79 res = _cur_alloc_region->allocate(word_size); 97 // assumptions of this method (and other related ones).
98 inline HeapWord*
99 G1CollectedHeap::attempt_allocation(size_t word_size) {
100 assert_heap_locked_and_not_at_safepoint();
101 assert(!isHumongous(word_size), "attempt_allocation() should not be called "
102 "for humongous allocation requests");
103
104 HeapRegion* cur_alloc_region = _cur_alloc_region;
105 if (cur_alloc_region != NULL) {
106 HeapWord* result = allocate_from_cur_alloc_region(cur_alloc_region,
107 word_size);
108 if (result != NULL) {
109 assert_heap_not_locked();
110 return result;
80 } 111 }
81 112
82 if (res != NULL) { 113 assert_heap_locked();
83 if (!SafepointSynchronize::is_at_safepoint()) { 114
84 assert( Heap_lock->owned_by_self(), "invariant" ); 115 // Since we couldn't successfully allocate into it, retire the
85 Heap_lock->unlock(); 116 // current alloc region.
86 } 117 retire_cur_alloc_region(cur_alloc_region);
87 return res;
88 }
89 } 118 }
90 // attempt_allocation_slow will also unlock the heap lock when appropriate. 119
91 return attempt_allocation_slow(word_size, permit_collection_pause); 120 // Try to get a new region and allocate out of it
121 HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
122 false, /* at safepoint */
123 true /* do_dirtying */);
124 if (result != NULL) {
125 assert_heap_not_locked();
126 return result;
127 }
128
129 assert_heap_locked();
130 return NULL;
131 }
132
133 inline void
134 G1CollectedHeap::retire_cur_alloc_region_common(HeapRegion* cur_alloc_region) {
135 assert_heap_locked_or_at_safepoint();
136 assert(cur_alloc_region != NULL && cur_alloc_region == _cur_alloc_region,
137 "pre-condition of the call");
138 assert(cur_alloc_region->is_young(),
139 "we only support young current alloc regions");
140
141 // The region is guaranteed to be young
142 g1_policy()->add_region_to_incremental_cset_lhs(cur_alloc_region);
143 _summary_bytes_used += cur_alloc_region->used();
144 _cur_alloc_region = NULL;
145 }
146
147 // It dirties the cards that cover the block so that so that the post
148 // write barrier never queues anything when updating objects on this
149 // block. It is assumed (and in fact we assert) that the block
150 // belongs to a young region.
151 inline void
152 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
153 assert_heap_not_locked();
154
155 // Assign the containing region to containing_hr so that we don't
156 // have to keep calling heap_region_containing_raw() in the
157 // asserts below.
158 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
159 assert(containing_hr != NULL && start != NULL && word_size > 0,
160 "pre-condition");
161 assert(containing_hr->is_in(start), "it should contain start");
162 assert(containing_hr->is_young(), "it should be young");
163 assert(!containing_hr->isHumongous(), "it should not be humongous");
164
165 HeapWord* end = start + word_size;
166 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
167
168 MemRegion mr(start, end);
169 ((CardTableModRefBS*)_g1h->barrier_set())->dirty(mr);
92 } 170 }
93 171
94 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const { 172 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
95 return _task_queues->queue(i); 173 return _task_queues->queue(i);
96 } 174 }

mercurial