src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp

Thu, 03 Apr 2014 17:49:31 +0400

author
vkempik
date
Thu, 03 Apr 2014 17:49:31 +0400
changeset 6552
8847586c9037
parent 6541
bfdf528be8e8
child 6680
78bbf4d43a14
permissions
-rw-r--r--

8016302: Change type of the number of GC workers to unsigned int (2)
Reviewed-by: tschatzl, jwilhelm

     1 /*
     2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
    28 #include "gc_implementation/g1/concurrentMark.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
    30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
    31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    32 #include "gc_implementation/g1/g1RemSet.inline.hpp"
    33 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    34 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
    35 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    36 #include "utilities/taskqueue.hpp"
    38 // Inline functions for G1CollectedHeap
    40 // Return the region with the given index. It assumes the index is valid.
    41 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
    43 template <class T>
    44 inline HeapRegion*
    45 G1CollectedHeap::heap_region_containing(const T addr) const {
    46   HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
    47   // hr can be null if addr in perm_gen
    48   if (hr != NULL && hr->continuesHumongous()) {
    49     hr = hr->humongous_start_region();
    50   }
    51   return hr;
    52 }
    54 template <class T>
    55 inline HeapRegion*
    56 G1CollectedHeap::heap_region_containing_raw(const T addr) const {
    57   assert(_g1_reserved.contains((const void*) addr), "invariant");
    58   HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
    59   return res;
    60 }
    62 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
    63   _old_set.remove(hr);
    64 }
    66 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
    67   HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
    68   return r != NULL && r->in_collection_set();
    69 }
    71 inline HeapWord*
    72 G1CollectedHeap::attempt_allocation(size_t word_size,
    73                                     unsigned int* gc_count_before_ret,
    74                                     int* gclocker_retry_count_ret) {
    75   assert_heap_not_locked_and_not_at_safepoint();
    76   assert(!isHumongous(word_size), "attempt_allocation() should not "
    77          "be called for humongous allocation requests");
    79   HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
    80                                                       false /* bot_updates */);
    81   if (result == NULL) {
    82     result = attempt_allocation_slow(word_size,
    83                                      gc_count_before_ret,
    84                                      gclocker_retry_count_ret);
    85   }
    86   assert_heap_not_locked();
    87   if (result != NULL) {
    88     dirty_young_block(result, word_size);
    89   }
    90   return result;
    91 }
    93 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
    94                                                               word_size) {
    95   assert(!isHumongous(word_size),
    96          "we should not be seeing humongous-size allocations in this path");
    98   HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
    99                                                       false /* bot_updates */);
   100   if (result == NULL) {
   101     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
   102     result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
   103                                                       false /* bot_updates */);
   104   }
   105   if (result != NULL) {
   106     dirty_young_block(result, word_size);
   107   }
   108   return result;
   109 }
   111 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
   112   assert(!isHumongous(word_size),
   113          "we should not be seeing humongous-size allocations in this path");
   115   HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
   116                                                        true /* bot_updates */);
   117   if (result == NULL) {
   118     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
   119     result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
   120                                                        true /* bot_updates */);
   121   }
   122   return result;
   123 }
   125 // It dirties the cards that cover the block so that so that the post
   126 // write barrier never queues anything when updating objects on this
   127 // block. It is assumed (and in fact we assert) that the block
   128 // belongs to a young region.
   129 inline void
   130 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
   131   assert_heap_not_locked();
   133   // Assign the containing region to containing_hr so that we don't
   134   // have to keep calling heap_region_containing_raw() in the
   135   // asserts below.
   136   DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
   137   assert(containing_hr != NULL && start != NULL && word_size > 0,
   138          "pre-condition");
   139   assert(containing_hr->is_in(start), "it should contain start");
   140   assert(containing_hr->is_young(), "it should be young");
   141   assert(!containing_hr->isHumongous(), "it should not be humongous");
   143   HeapWord* end = start + word_size;
   144   assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
   146   MemRegion mr(start, end);
   147   g1_barrier_set()->g1_mark_as_young(mr);
   148 }
   150 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
   151   return _task_queues->queue(i);
   152 }
   154 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
   155   return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
   156 }
   158 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
   159   return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
   160 }
   163 // This is a fast test on whether a reference points into the
   164 // collection set or not. Assume that the reference
   165 // points into the heap.
   166 inline bool G1CollectedHeap::in_cset_fast_test(oop obj) {
   167   assert(_in_cset_fast_test != NULL, "sanity");
   168   assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, (HeapWord*)obj));
   169   // no need to subtract the bottom of the heap from obj,
   170   // _in_cset_fast_test is biased
   171   uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
   172   bool ret = _in_cset_fast_test[index];
   173   // let's make sure the result is consistent with what the slower
   174   // test returns
   175   assert( ret || !obj_in_cs(obj), "sanity");
   176   assert(!ret ||  obj_in_cs(obj), "sanity");
   177   return ret;
   178 }
   180 #ifndef PRODUCT
   181 // Support for G1EvacuationFailureALot
   183 inline bool
   184 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
   185                                                      bool during_initial_mark,
   186                                                      bool during_marking) {
   187   bool res = false;
   188   if (during_marking) {
   189     res |= G1EvacuationFailureALotDuringConcMark;
   190   }
   191   if (during_initial_mark) {
   192     res |= G1EvacuationFailureALotDuringInitialMark;
   193   }
   194   if (gcs_are_young) {
   195     res |= G1EvacuationFailureALotDuringYoungGC;
   196   } else {
   197     // GCs are mixed
   198     res |= G1EvacuationFailureALotDuringMixedGC;
   199   }
   200   return res;
   201 }
   203 inline void
   204 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
   205   if (G1EvacuationFailureALot) {
   206     // Note we can't assert that _evacuation_failure_alot_for_current_gc
   207     // is clear here. It may have been set during a previous GC but that GC
   208     // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
   209     // trigger an evacuation failure and clear the flags and and counts.
   211     // Check if we have gone over the interval.
   212     const size_t gc_num = total_collections();
   213     const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
   215     _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
   217     // Now check if G1EvacuationFailureALot is enabled for the current GC type.
   218     const bool gcs_are_young = g1_policy()->gcs_are_young();
   219     const bool during_im = g1_policy()->during_initial_mark_pause();
   220     const bool during_marking = mark_in_progress();
   222     _evacuation_failure_alot_for_current_gc &=
   223       evacuation_failure_alot_for_gc_type(gcs_are_young,
   224                                           during_im,
   225                                           during_marking);
   226   }
   227 }
   229 inline bool
   230 G1CollectedHeap::evacuation_should_fail() {
   231   if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
   232     return false;
   233   }
   234   // G1EvacuationFailureALot is in effect for current GC
   235   // Access to _evacuation_failure_alot_count is not atomic;
   236   // the value does not have to be exact.
   237   if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
   238     return false;
   239   }
   240   _evacuation_failure_alot_count = 0;
   241   return true;
   242 }
   244 inline void G1CollectedHeap::reset_evacuation_should_fail() {
   245   if (G1EvacuationFailureALot) {
   246     _evacuation_failure_alot_gc_number = total_collections();
   247     _evacuation_failure_alot_count = 0;
   248     _evacuation_failure_alot_for_current_gc = false;
   249   }
   250 }
   251 #endif  // #ifndef PRODUCT
   253 inline bool G1CollectedHeap::is_in_young(const oop obj) {
   254   HeapRegion* hr = heap_region_containing(obj);
   255   return hr != NULL && hr->is_young();
   256 }
   258 // We don't need barriers for initializing stores to objects
   259 // in the young gen: for the SATB pre-barrier, there is no
   260 // pre-value that needs to be remembered; for the remembered-set
   261 // update logging post-barrier, we don't maintain remembered set
   262 // information for young gen objects.
   263 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
   264   return is_in_young(new_obj);
   265 }
   267 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
   268   const HeapRegion* hr = heap_region_containing(obj);
   269   if (hr == NULL) {
   270     if (obj == NULL) return false;
   271     else return true;
   272   }
   273   else return is_obj_dead(obj, hr);
   274 }
   276 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
   277   const HeapRegion* hr = heap_region_containing(obj);
   278   if (hr == NULL) {
   279     if (obj == NULL) return false;
   280     else return true;
   281   }
   282   else return is_obj_ill(obj, hr);
   283 }
   285 template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
   286   if (!from->is_survivor()) {
   287     _g1_rem->par_write_ref(from, p, tid);
   288   }
   289 }
   291 template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
   292   if (G1DeferredRSUpdate) {
   293     deferred_rs_update(from, p, tid);
   294   } else {
   295     immediate_rs_update(from, p, tid);
   296   }
   297 }
   300 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
   301   assert(has_partial_array_mask(p), "invariant");
   302   oop from_obj = clear_partial_array_mask(p);
   304   assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
   305   assert(from_obj->is_objArray(), "must be obj array");
   306   objArrayOop from_obj_array = objArrayOop(from_obj);
   307   // The from-space object contains the real length.
   308   int length                 = from_obj_array->length();
   310   assert(from_obj->is_forwarded(), "must be forwarded");
   311   oop to_obj                 = from_obj->forwardee();
   312   assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
   313   objArrayOop to_obj_array   = objArrayOop(to_obj);
   314   // We keep track of the next start index in the length field of the
   315   // to-space object.
   316   int next_index             = to_obj_array->length();
   317   assert(0 <= next_index && next_index < length,
   318          err_msg("invariant, next index: %d, length: %d", next_index, length));
   320   int start                  = next_index;
   321   int end                    = length;
   322   int remainder              = end - start;
   323   // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
   324   if (remainder > 2 * ParGCArrayScanChunk) {
   325     end = start + ParGCArrayScanChunk;
   326     to_obj_array->set_length(end);
   327     // Push the remainder before we process the range in case another
   328     // worker has run out of things to do and can steal it.
   329     oop* from_obj_p = set_partial_array_mask(from_obj);
   330     push_on_queue(from_obj_p);
   331   } else {
   332     assert(length == end, "sanity");
   333     // We'll process the final range for this object. Restore the length
   334     // so that the heap remains parsable in case of evacuation failure.
   335     to_obj_array->set_length(end);
   336   }
   337   _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
   338   // Process indexes [start,end). It will also process the header
   339   // along with the first chunk (i.e., the chunk with start == 0).
   340   // Note that at this point the length field of to_obj_array is not
   341   // correct given that we are using it to keep track of the next
   342   // start index. oop_iterate_range() (thankfully!) ignores the length
   343   // field and only relies on the start / end parameters.  It does
   344   // however return the size of the object which will be incorrect. So
   345   // we have to ignore it even if we wanted to use it.
   346   to_obj_array->oop_iterate_range(&_scanner, start, end);
   347 }
   349 template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
   350   if (!has_partial_array_mask(ref_to_scan)) {
   351     // Note: we can use "raw" versions of "region_containing" because
   352     // "obj_to_scan" is definitely in the heap, and is not in a
   353     // humongous region.
   354     HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
   355     do_oop_evac(ref_to_scan, r);
   356   } else {
   357     do_oop_partial_array((oop*)ref_to_scan);
   358   }
   359 }
   361 inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
   362   assert(verify_task(ref), "sanity");
   363   if (ref.is_narrow()) {
   364     deal_with_reference((narrowOop*)ref);
   365   } else {
   366     deal_with_reference((oop*)ref);
   367   }
   368 }
   370 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP

mercurial