src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp

Thu, 22 May 2014 15:52:41 -0400

author
drchase
date
Thu, 22 May 2014 15:52:41 -0400
changeset 6680
78bbf4d43a14
parent 6541
bfdf528be8e8
child 6876
710a3c8b516e
child 6911
ce8f6bb717c9
permissions
-rw-r--r--

8037816: Fix for 8036122 breaks build with Xcode5/clang
8043029: Change 8037816 breaks HS build with older GCC versions which don't support diagnostic pragmas
8043164: Format warning in traceStream.hpp
Summary: Backport of main fix + two corrections, enables clang compilation, turns on format attributes, corrects/mutes warnings
Reviewed-by: kvn, coleenp, iveresov, twisti

ysr@777 1 /*
drchase@6680 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
ysr@777 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
ysr@777 4 *
ysr@777 5 * This code is free software; you can redistribute it and/or modify it
ysr@777 6 * under the terms of the GNU General Public License version 2 only, as
ysr@777 7 * published by the Free Software Foundation.
ysr@777 8 *
ysr@777 9 * This code is distributed in the hope that it will be useful, but WITHOUT
ysr@777 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
ysr@777 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
ysr@777 12 * version 2 for more details (a copy is included in the LICENSE file that
ysr@777 13 * accompanied this code).
ysr@777 14 *
ysr@777 15 * You should have received a copy of the GNU General Public License version
ysr@777 16 * 2 along with this work; if not, write to the Free Software Foundation,
ysr@777 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
ysr@777 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
ysr@777 22 *
ysr@777 23 */
ysr@777 24
stefank@2314 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
stefank@2314 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/g1/concurrentMark.hpp"
stefank@2314 29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
tonyp@2715 30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
tonyp@2315 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
tschatzl@6541 32 #include "gc_implementation/g1/g1RemSet.inline.hpp"
mgerdin@5860 33 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
brutisso@6385 34 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
tonyp@2469 35 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
stefank@2314 36 #include "utilities/taskqueue.hpp"
stefank@2314 37
ysr@777 38 // Inline functions for G1CollectedHeap
ysr@777 39
tschatzl@6541 40 // Return the region with the given index. It assumes the index is valid.
tschatzl@6541 41 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
tschatzl@6541 42
tonyp@2963 43 template <class T>
ysr@777 44 inline HeapRegion*
tonyp@2963 45 G1CollectedHeap::heap_region_containing(const T addr) const {
tonyp@2963 46 HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
ysr@777 47 // hr can be null if addr in perm_gen
ysr@777 48 if (hr != NULL && hr->continuesHumongous()) {
ysr@777 49 hr = hr->humongous_start_region();
ysr@777 50 }
ysr@777 51 return hr;
ysr@777 52 }
ysr@777 53
tonyp@2963 54 template <class T>
ysr@777 55 inline HeapRegion*
tonyp@2963 56 G1CollectedHeap::heap_region_containing_raw(const T addr) const {
tonyp@2963 57 assert(_g1_reserved.contains((const void*) addr), "invariant");
tonyp@2963 58 HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
ysr@777 59 return res;
ysr@777 60 }
ysr@777 61
tschatzl@6541 62 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
tschatzl@6541 63 _old_set.remove(hr);
tschatzl@6541 64 }
tschatzl@6541 65
ysr@777 66 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
tonyp@2963 67 HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
ysr@777 68 return r != NULL && r->in_collection_set();
ysr@777 69 }
ysr@777 70
tonyp@2315 71 inline HeapWord*
tonyp@2715 72 G1CollectedHeap::attempt_allocation(size_t word_size,
mgerdin@4853 73 unsigned int* gc_count_before_ret,
mgerdin@4853 74 int* gclocker_retry_count_ret) {
tonyp@2715 75 assert_heap_not_locked_and_not_at_safepoint();
tonyp@2715 76 assert(!isHumongous(word_size), "attempt_allocation() should not "
tonyp@2715 77 "be called for humongous allocation requests");
ysr@777 78
tonyp@2715 79 HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
tonyp@2715 80 false /* bot_updates */);
tonyp@2715 81 if (result == NULL) {
mgerdin@4853 82 result = attempt_allocation_slow(word_size,
mgerdin@4853 83 gc_count_before_ret,
mgerdin@4853 84 gclocker_retry_count_ret);
tonyp@2715 85 }
tonyp@2715 86 assert_heap_not_locked();
tonyp@2315 87 if (result != NULL) {
tonyp@2315 88 dirty_young_block(result, word_size);
tonyp@2315 89 }
tonyp@2715 90 return result;
tonyp@2454 91 }
tonyp@2454 92
tonyp@3028 93 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
tonyp@3028 94 word_size) {
tonyp@3028 95 assert(!isHumongous(word_size),
tonyp@3028 96 "we should not be seeing humongous-size allocations in this path");
tonyp@3028 97
tonyp@3028 98 HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
tonyp@3028 99 false /* bot_updates */);
tonyp@3028 100 if (result == NULL) {
tonyp@3028 101 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
tonyp@3028 102 result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
tonyp@3028 103 false /* bot_updates */);
tonyp@3028 104 }
tonyp@3028 105 if (result != NULL) {
tonyp@3028 106 dirty_young_block(result, word_size);
tonyp@3028 107 }
tonyp@3028 108 return result;
tonyp@3028 109 }
tonyp@3028 110
tonyp@3028 111 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
tonyp@3028 112 assert(!isHumongous(word_size),
tonyp@3028 113 "we should not be seeing humongous-size allocations in this path");
tonyp@3028 114
tonyp@3028 115 HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
tonyp@3028 116 true /* bot_updates */);
tonyp@3028 117 if (result == NULL) {
tonyp@3028 118 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
tonyp@3028 119 result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
tonyp@3028 120 true /* bot_updates */);
tonyp@3028 121 }
tonyp@3028 122 return result;
tonyp@3028 123 }
tonyp@3028 124
tonyp@2315 125 // It dirties the cards that cover the block so that so that the post
tonyp@2315 126 // write barrier never queues anything when updating objects on this
tonyp@2315 127 // block. It is assumed (and in fact we assert) that the block
tonyp@2315 128 // belongs to a young region.
tonyp@2315 129 inline void
tonyp@2315 130 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
tonyp@2315 131 assert_heap_not_locked();
tonyp@2315 132
tonyp@2315 133 // Assign the containing region to containing_hr so that we don't
tonyp@2315 134 // have to keep calling heap_region_containing_raw() in the
tonyp@2315 135 // asserts below.
tonyp@2315 136 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
tonyp@2315 137 assert(containing_hr != NULL && start != NULL && word_size > 0,
tonyp@2315 138 "pre-condition");
tonyp@2315 139 assert(containing_hr->is_in(start), "it should contain start");
tonyp@2315 140 assert(containing_hr->is_young(), "it should be young");
tonyp@2315 141 assert(!containing_hr->isHumongous(), "it should not be humongous");
tonyp@2315 142
tonyp@2315 143 HeapWord* end = start + word_size;
tonyp@2315 144 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
tonyp@2315 145
tonyp@2315 146 MemRegion mr(start, end);
mgerdin@5860 147 g1_barrier_set()->g1_mark_as_young(mr);
ysr@777 148 }
ysr@777 149
jcoomes@2064 150 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
ysr@777 151 return _task_queues->queue(i);
ysr@777 152 }
ysr@777 153
johnc@4016 154 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
ysr@777 155 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
ysr@777 156 }
ysr@777 157
ysr@777 158 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
ysr@777 159 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
ysr@777 160 }
stefank@2314 161
tschatzl@6541 162
tschatzl@6541 163 // This is a fast test on whether a reference points into the
tschatzl@6541 164 // collection set or not. Assume that the reference
tschatzl@6541 165 // points into the heap.
tschatzl@6541 166 inline bool G1CollectedHeap::in_cset_fast_test(oop obj) {
tschatzl@6541 167 assert(_in_cset_fast_test != NULL, "sanity");
drchase@6680 168 assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, p2i((HeapWord*)obj)));
tschatzl@6541 169 // no need to subtract the bottom of the heap from obj,
tschatzl@6541 170 // _in_cset_fast_test is biased
tschatzl@6541 171 uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
tschatzl@6541 172 bool ret = _in_cset_fast_test[index];
tschatzl@6541 173 // let's make sure the result is consistent with what the slower
tschatzl@6541 174 // test returns
tschatzl@6541 175 assert( ret || !obj_in_cs(obj), "sanity");
tschatzl@6541 176 assert(!ret || obj_in_cs(obj), "sanity");
tschatzl@6541 177 return ret;
tschatzl@6541 178 }
tschatzl@6541 179
johnc@4016 180 #ifndef PRODUCT
johnc@4016 181 // Support for G1EvacuationFailureALot
johnc@4016 182
johnc@4016 183 inline bool
johnc@4016 184 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
johnc@4016 185 bool during_initial_mark,
johnc@4016 186 bool during_marking) {
johnc@4016 187 bool res = false;
johnc@4016 188 if (during_marking) {
johnc@4016 189 res |= G1EvacuationFailureALotDuringConcMark;
johnc@4016 190 }
johnc@4016 191 if (during_initial_mark) {
johnc@4016 192 res |= G1EvacuationFailureALotDuringInitialMark;
johnc@4016 193 }
johnc@4016 194 if (gcs_are_young) {
johnc@4016 195 res |= G1EvacuationFailureALotDuringYoungGC;
johnc@4016 196 } else {
johnc@4016 197 // GCs are mixed
johnc@4016 198 res |= G1EvacuationFailureALotDuringMixedGC;
johnc@4016 199 }
johnc@4016 200 return res;
johnc@4016 201 }
johnc@4016 202
johnc@4016 203 inline void
johnc@4016 204 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
johnc@4016 205 if (G1EvacuationFailureALot) {
johnc@4016 206 // Note we can't assert that _evacuation_failure_alot_for_current_gc
johnc@4016 207 // is clear here. It may have been set during a previous GC but that GC
johnc@4016 208 // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
johnc@4016 209 // trigger an evacuation failure and clear the flags and and counts.
johnc@4016 210
johnc@4016 211 // Check if we have gone over the interval.
johnc@4016 212 const size_t gc_num = total_collections();
johnc@4016 213 const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
johnc@4016 214
johnc@4016 215 _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
johnc@4016 216
johnc@4016 217 // Now check if G1EvacuationFailureALot is enabled for the current GC type.
johnc@4016 218 const bool gcs_are_young = g1_policy()->gcs_are_young();
johnc@4016 219 const bool during_im = g1_policy()->during_initial_mark_pause();
johnc@4016 220 const bool during_marking = mark_in_progress();
johnc@4016 221
johnc@4016 222 _evacuation_failure_alot_for_current_gc &=
johnc@4016 223 evacuation_failure_alot_for_gc_type(gcs_are_young,
johnc@4016 224 during_im,
johnc@4016 225 during_marking);
johnc@4016 226 }
johnc@4016 227 }
johnc@4016 228
johnc@4016 229 inline bool
johnc@4016 230 G1CollectedHeap::evacuation_should_fail() {
johnc@4016 231 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
johnc@4016 232 return false;
johnc@4016 233 }
johnc@4016 234 // G1EvacuationFailureALot is in effect for current GC
johnc@4016 235 // Access to _evacuation_failure_alot_count is not atomic;
johnc@4016 236 // the value does not have to be exact.
johnc@4016 237 if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
johnc@4016 238 return false;
johnc@4016 239 }
johnc@4016 240 _evacuation_failure_alot_count = 0;
johnc@4016 241 return true;
johnc@4016 242 }
johnc@4016 243
johnc@4016 244 inline void G1CollectedHeap::reset_evacuation_should_fail() {
johnc@4016 245 if (G1EvacuationFailureALot) {
johnc@4016 246 _evacuation_failure_alot_gc_number = total_collections();
johnc@4016 247 _evacuation_failure_alot_count = 0;
johnc@4016 248 _evacuation_failure_alot_for_current_gc = false;
johnc@4016 249 }
johnc@4016 250 }
johnc@4016 251 #endif // #ifndef PRODUCT
johnc@4016 252
tschatzl@6541 253 inline bool G1CollectedHeap::is_in_young(const oop obj) {
tschatzl@6541 254 HeapRegion* hr = heap_region_containing(obj);
tschatzl@6541 255 return hr != NULL && hr->is_young();
tschatzl@6541 256 }
tschatzl@6541 257
tschatzl@6541 258 // We don't need barriers for initializing stores to objects
tschatzl@6541 259 // in the young gen: for the SATB pre-barrier, there is no
tschatzl@6541 260 // pre-value that needs to be remembered; for the remembered-set
tschatzl@6541 261 // update logging post-barrier, we don't maintain remembered set
tschatzl@6541 262 // information for young gen objects.
tschatzl@6541 263 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
tschatzl@6541 264 return is_in_young(new_obj);
tschatzl@6541 265 }
tschatzl@6541 266
tschatzl@6541 267 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
tschatzl@6541 268 const HeapRegion* hr = heap_region_containing(obj);
tschatzl@6541 269 if (hr == NULL) {
tschatzl@6541 270 if (obj == NULL) return false;
tschatzl@6541 271 else return true;
tschatzl@6541 272 }
tschatzl@6541 273 else return is_obj_dead(obj, hr);
tschatzl@6541 274 }
tschatzl@6541 275
tschatzl@6541 276 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
tschatzl@6541 277 const HeapRegion* hr = heap_region_containing(obj);
tschatzl@6541 278 if (hr == NULL) {
tschatzl@6541 279 if (obj == NULL) return false;
tschatzl@6541 280 else return true;
tschatzl@6541 281 }
tschatzl@6541 282 else return is_obj_ill(obj, hr);
tschatzl@6541 283 }
tschatzl@6541 284
tschatzl@6541 285 template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
tschatzl@6541 286 if (!from->is_survivor()) {
tschatzl@6541 287 _g1_rem->par_write_ref(from, p, tid);
tschatzl@6541 288 }
tschatzl@6541 289 }
tschatzl@6541 290
tschatzl@6541 291 template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
tschatzl@6541 292 if (G1DeferredRSUpdate) {
tschatzl@6541 293 deferred_rs_update(from, p, tid);
tschatzl@6541 294 } else {
tschatzl@6541 295 immediate_rs_update(from, p, tid);
tschatzl@6541 296 }
tschatzl@6541 297 }
tschatzl@6541 298
tschatzl@6541 299
tschatzl@6541 300 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
tschatzl@6541 301 assert(has_partial_array_mask(p), "invariant");
tschatzl@6541 302 oop from_obj = clear_partial_array_mask(p);
tschatzl@6541 303
tschatzl@6541 304 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
tschatzl@6541 305 assert(from_obj->is_objArray(), "must be obj array");
tschatzl@6541 306 objArrayOop from_obj_array = objArrayOop(from_obj);
tschatzl@6541 307 // The from-space object contains the real length.
tschatzl@6541 308 int length = from_obj_array->length();
tschatzl@6541 309
tschatzl@6541 310 assert(from_obj->is_forwarded(), "must be forwarded");
tschatzl@6541 311 oop to_obj = from_obj->forwardee();
tschatzl@6541 312 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
tschatzl@6541 313 objArrayOop to_obj_array = objArrayOop(to_obj);
tschatzl@6541 314 // We keep track of the next start index in the length field of the
tschatzl@6541 315 // to-space object.
tschatzl@6541 316 int next_index = to_obj_array->length();
tschatzl@6541 317 assert(0 <= next_index && next_index < length,
tschatzl@6541 318 err_msg("invariant, next index: %d, length: %d", next_index, length));
tschatzl@6541 319
tschatzl@6541 320 int start = next_index;
tschatzl@6541 321 int end = length;
tschatzl@6541 322 int remainder = end - start;
tschatzl@6541 323 // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
tschatzl@6541 324 if (remainder > 2 * ParGCArrayScanChunk) {
tschatzl@6541 325 end = start + ParGCArrayScanChunk;
tschatzl@6541 326 to_obj_array->set_length(end);
tschatzl@6541 327 // Push the remainder before we process the range in case another
tschatzl@6541 328 // worker has run out of things to do and can steal it.
tschatzl@6541 329 oop* from_obj_p = set_partial_array_mask(from_obj);
tschatzl@6541 330 push_on_queue(from_obj_p);
tschatzl@6541 331 } else {
tschatzl@6541 332 assert(length == end, "sanity");
tschatzl@6541 333 // We'll process the final range for this object. Restore the length
tschatzl@6541 334 // so that the heap remains parsable in case of evacuation failure.
tschatzl@6541 335 to_obj_array->set_length(end);
tschatzl@6541 336 }
tschatzl@6541 337 _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
tschatzl@6541 338 // Process indexes [start,end). It will also process the header
tschatzl@6541 339 // along with the first chunk (i.e., the chunk with start == 0).
tschatzl@6541 340 // Note that at this point the length field of to_obj_array is not
tschatzl@6541 341 // correct given that we are using it to keep track of the next
tschatzl@6541 342 // start index. oop_iterate_range() (thankfully!) ignores the length
tschatzl@6541 343 // field and only relies on the start / end parameters. It does
tschatzl@6541 344 // however return the size of the object which will be incorrect. So
tschatzl@6541 345 // we have to ignore it even if we wanted to use it.
tschatzl@6541 346 to_obj_array->oop_iterate_range(&_scanner, start, end);
tschatzl@6541 347 }
tschatzl@6541 348
tschatzl@6541 349 template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
tschatzl@6541 350 if (!has_partial_array_mask(ref_to_scan)) {
tschatzl@6541 351 // Note: we can use "raw" versions of "region_containing" because
tschatzl@6541 352 // "obj_to_scan" is definitely in the heap, and is not in a
tschatzl@6541 353 // humongous region.
tschatzl@6541 354 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
tschatzl@6541 355 do_oop_evac(ref_to_scan, r);
tschatzl@6541 356 } else {
tschatzl@6541 357 do_oop_partial_array((oop*)ref_to_scan);
tschatzl@6541 358 }
tschatzl@6541 359 }
tschatzl@6541 360
tschatzl@6541 361 inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
tschatzl@6541 362 assert(verify_task(ref), "sanity");
tschatzl@6541 363 if (ref.is_narrow()) {
tschatzl@6541 364 deal_with_reference((narrowOop*)ref);
tschatzl@6541 365 } else {
tschatzl@6541 366 deal_with_reference((oop*)ref);
tschatzl@6541 367 }
tschatzl@6541 368 }
tschatzl@6541 369
stefank@2314 370 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP

mercurial