Mon, 21 Jul 2014 09:41:06 +0200
8035401: Fix visibility of G1ParScanThreadState members
Summary: After JDK-8035400 there were several opportunities to fix the visibility of several members of the G1ParScanThreadState class.
Reviewed-by: brutisso, mgerdin
1 /*
2 * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
27 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
28 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
29 #include "oops/oop.inline.hpp"
30 #include "oops/oop.pcgc.inline.hpp"
31 #include "runtime/prefetch.inline.hpp"
33 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
34 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
35 #endif // _MSC_VER
37 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
38 : _g1h(g1h),
39 _refs(g1h->task_queue(queue_num)),
40 _dcq(&g1h->dirty_card_queue_set()),
41 _ct_bs(g1h->g1_barrier_set()),
42 _g1_rem(g1h->g1_rem_set()),
43 _hash_seed(17), _queue_num(queue_num),
44 _term_attempts(0),
45 _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
46 _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
47 _age_table(false), _scanner(g1h, this, rp),
48 _strong_roots_time(0), _term_time(0),
49 _alloc_buffer_waste(0), _undo_waste(0) {
50 // we allocate G1YoungSurvRateNumRegions plus one entries, since
51 // we "sacrifice" entry 0 to keep track of surviving bytes for
52 // non-young regions (where the age is -1)
53 // We also add a few elements at the beginning and at the end in
54 // an attempt to eliminate cache contention
55 uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
56 uint array_length = PADDING_ELEM_NUM +
57 real_length +
58 PADDING_ELEM_NUM;
59 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
60 if (_surviving_young_words_base == NULL)
61 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
62 "Not enough space for young surv histo.");
63 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
64 memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
66 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
67 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
69 _start = os::elapsedTime();
70 }
72 G1ParScanThreadState::~G1ParScanThreadState() {
73 retire_alloc_buffers();
74 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
75 }
77 void
78 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
79 {
80 st->print_raw_cr("GC Termination Stats");
81 st->print_raw_cr(" elapsed --strong roots-- -------termination-------"
82 " ------waste (KiB)------");
83 st->print_raw_cr("thr ms ms % ms % attempts"
84 " total alloc undo");
85 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
86 " ------- ------- -------");
87 }
89 void
90 G1ParScanThreadState::print_termination_stats(int i,
91 outputStream* const st) const
92 {
93 const double elapsed_ms = elapsed_time() * 1000.0;
94 const double s_roots_ms = strong_roots_time() * 1000.0;
95 const double term_ms = term_time() * 1000.0;
96 st->print_cr("%3d %9.2f %9.2f %6.2f "
97 "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
98 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
99 i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
100 term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
101 (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
102 alloc_buffer_waste() * HeapWordSize / K,
103 undo_waste() * HeapWordSize / K);
104 }
106 #ifdef ASSERT
107 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
108 assert(ref != NULL, "invariant");
109 assert(UseCompressedOops, "sanity");
110 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, p2i(ref)));
111 oop p = oopDesc::load_decode_heap_oop(ref);
112 assert(_g1h->is_in_g1_reserved(p),
113 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
114 return true;
115 }
117 bool G1ParScanThreadState::verify_ref(oop* ref) const {
118 assert(ref != NULL, "invariant");
119 if (has_partial_array_mask(ref)) {
120 // Must be in the collection set--it's already been copied.
121 oop p = clear_partial_array_mask(ref);
122 assert(_g1h->obj_in_cs(p),
123 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
124 } else {
125 oop p = oopDesc::load_decode_heap_oop(ref);
126 assert(_g1h->is_in_g1_reserved(p),
127 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
128 }
129 return true;
130 }
132 bool G1ParScanThreadState::verify_task(StarTask ref) const {
133 if (ref.is_narrow()) {
134 return verify_ref((narrowOop*) ref);
135 } else {
136 return verify_ref((oop*) ref);
137 }
138 }
139 #endif // ASSERT
141 void G1ParScanThreadState::trim_queue() {
142 assert(_evac_failure_cl != NULL, "not set");
144 StarTask ref;
145 do {
146 // Drain the overflow stack first, so other threads can steal.
147 while (_refs->pop_overflow(ref)) {
148 dispatch_reference(ref);
149 }
151 while (_refs->pop_local(ref)) {
152 dispatch_reference(ref);
153 }
154 } while (!_refs->is_empty());
155 }
157 oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
158 size_t word_sz = old->size();
159 HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
160 // +1 to make the -1 indexes valid...
161 int young_index = from_region->young_index_in_cset()+1;
162 assert( (from_region->is_young() && young_index > 0) ||
163 (!from_region->is_young() && young_index == 0), "invariant" );
164 G1CollectorPolicy* g1p = _g1h->g1_policy();
165 markOop m = old->mark();
166 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
167 : m->age();
168 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
169 word_sz);
170 HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
171 #ifndef PRODUCT
172 // Should this evacuation fail?
173 if (_g1h->evacuation_should_fail()) {
174 if (obj_ptr != NULL) {
175 undo_allocation(alloc_purpose, obj_ptr, word_sz);
176 obj_ptr = NULL;
177 }
178 }
179 #endif // !PRODUCT
181 if (obj_ptr == NULL) {
182 // This will either forward-to-self, or detect that someone else has
183 // installed a forwarding pointer.
184 return _g1h->handle_evacuation_failure_par(this, old);
185 }
187 oop obj = oop(obj_ptr);
189 // We're going to allocate linearly, so might as well prefetch ahead.
190 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
192 oop forward_ptr = old->forward_to_atomic(obj);
193 if (forward_ptr == NULL) {
194 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
196 // alloc_purpose is just a hint to allocate() above, recheck the type of region
197 // we actually allocated from and update alloc_purpose accordingly
198 HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
199 alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
201 if (g1p->track_object_age(alloc_purpose)) {
202 // We could simply do obj->incr_age(). However, this causes a
203 // performance issue. obj->incr_age() will first check whether
204 // the object has a displaced mark by checking its mark word;
205 // getting the mark word from the new location of the object
206 // stalls. So, given that we already have the mark word and we
207 // are about to install it anyway, it's better to increase the
208 // age on the mark word, when the object does not have a
209 // displaced mark word. We're not expecting many objects to have
210 // a displaced marked word, so that case is not optimized
211 // further (it could be...) and we simply call obj->incr_age().
213 if (m->has_displaced_mark_helper()) {
214 // in this case, we have to install the mark word first,
215 // otherwise obj looks to be forwarded (the old mark word,
216 // which contains the forward pointer, was copied)
217 obj->set_mark(m);
218 obj->incr_age();
219 } else {
220 m = m->incr_age();
221 obj->set_mark(m);
222 }
223 age_table()->add(obj, word_sz);
224 } else {
225 obj->set_mark(m);
226 }
228 if (G1StringDedup::is_enabled()) {
229 G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
230 to_region->is_young(),
231 queue_num(),
232 obj);
233 }
235 size_t* surv_young_words = surviving_young_words();
236 surv_young_words[young_index] += word_sz;
238 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
239 // We keep track of the next start index in the length field of
240 // the to-space object. The actual length can be found in the
241 // length field of the from-space object.
242 arrayOop(obj)->set_length(0);
243 oop* old_p = set_partial_array_mask(old);
244 push_on_queue(old_p);
245 } else {
246 // No point in using the slower heap_region_containing() method,
247 // given that we know obj is in the heap.
248 _scanner.set_region(_g1h->heap_region_containing_raw(obj));
249 obj->oop_iterate_backwards(&_scanner);
250 }
251 } else {
252 undo_allocation(alloc_purpose, obj_ptr, word_sz);
253 obj = forward_ptr;
254 }
255 return obj;
256 }
258 HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
259 HeapWord* obj = NULL;
260 size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
261 if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
262 G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
263 add_to_alloc_buffer_waste(alloc_buf->words_remaining());
264 alloc_buf->retire(false /* end_of_gc */, false /* retain */);
266 HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
267 if (buf == NULL) {
268 return NULL; // Let caller handle allocation failure.
269 }
270 // Otherwise.
271 alloc_buf->set_word_size(gclab_word_size);
272 alloc_buf->set_buf(buf);
274 obj = alloc_buf->allocate(word_sz);
275 assert(obj != NULL, "buffer was definitely big enough...");
276 } else {
277 obj = _g1h->par_allocate_during_gc(purpose, word_sz);
278 }
279 return obj;
280 }
282 void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
283 if (alloc_buffer(purpose)->contains(obj)) {
284 assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
285 "should contain whole object");
286 alloc_buffer(purpose)->undo_allocation(obj, word_sz);
287 } else {
288 CollectedHeap::fill_with_object(obj, word_sz);
289 add_to_undo_waste(word_sz);
290 }
291 }
293 HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
294 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
295 if (obj != NULL) {
296 return obj;
297 }
298 return allocate_slow(purpose, word_sz);
299 }
301 void G1ParScanThreadState::retire_alloc_buffers() {
302 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
303 size_t waste = _alloc_buffers[ap]->words_remaining();
304 add_to_alloc_buffer_waste(waste);
305 _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
306 true /* end_of_gc */,
307 false /* retain */);
308 }
309 }