src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp

Tue, 12 Aug 2014 15:17:46 +0000

author
tschatzl
date
Tue, 12 Aug 2014 15:17:46 +0000
changeset 7024
bfba6779654b
parent 7010
a3953c777565
child 7049
eec72fa4b108
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
    28 #include "gc_implementation/g1/concurrentMark.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
    30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
    31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    32 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    33 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
    34 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    35 #include "runtime/orderAccess.inline.hpp"
    36 #include "utilities/taskqueue.hpp"
    38 // Inline functions for G1CollectedHeap
    40 // Return the region with the given index. It assumes the index is valid.
    41 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
    43 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
    44   assert(is_in_reserved(addr),
    45          err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
    46                  p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
    47   return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
    48 }
    50 template <class T>
    51 inline HeapRegion*
    52 G1CollectedHeap::heap_region_containing(const T addr) const {
    53   HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
    54   // hr can be null if addr in perm_gen
    55   if (hr != NULL && hr->continuesHumongous()) {
    56     hr = hr->humongous_start_region();
    57   }
    58   return hr;
    59 }
    61 template <class T>
    62 inline HeapRegion*
    63 G1CollectedHeap::heap_region_containing_raw(const T addr) const {
    64   assert(_g1_reserved.contains((const void*) addr), "invariant");
    65   HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
    66   return res;
    67 }
    69 inline void G1CollectedHeap::reset_gc_time_stamp() {
    70   _gc_time_stamp = 0;
    71   OrderAccess::fence();
    72   // Clear the cached CSet starting regions and time stamps.
    73   // Their validity is dependent on the GC timestamp.
    74   clear_cset_start_regions();
    75 }
    77 inline void G1CollectedHeap::increment_gc_time_stamp() {
    78   ++_gc_time_stamp;
    79   OrderAccess::fence();
    80 }
    82 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
    83   _old_set.remove(hr);
    84 }
    86 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
    87   HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
    88   return r != NULL && r->in_collection_set();
    89 }
    91 inline HeapWord*
    92 G1CollectedHeap::attempt_allocation(size_t word_size,
    93                                     unsigned int* gc_count_before_ret,
    94                                     int* gclocker_retry_count_ret) {
    95   assert_heap_not_locked_and_not_at_safepoint();
    96   assert(!isHumongous(word_size), "attempt_allocation() should not "
    97          "be called for humongous allocation requests");
    99   HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
   100                                                       false /* bot_updates */);
   101   if (result == NULL) {
   102     result = attempt_allocation_slow(word_size,
   103                                      gc_count_before_ret,
   104                                      gclocker_retry_count_ret);
   105   }
   106   assert_heap_not_locked();
   107   if (result != NULL) {
   108     dirty_young_block(result, word_size);
   109   }
   110   return result;
   111 }
   113 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
   114                                                               word_size) {
   115   assert(!isHumongous(word_size),
   116          "we should not be seeing humongous-size allocations in this path");
   118   HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
   119                                                       false /* bot_updates */);
   120   if (result == NULL) {
   121     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
   122     result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
   123                                                       false /* bot_updates */);
   124   }
   125   if (result != NULL) {
   126     dirty_young_block(result, word_size);
   127   }
   128   return result;
   129 }
   131 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
   132   assert(!isHumongous(word_size),
   133          "we should not be seeing humongous-size allocations in this path");
   135   HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
   136                                                        true /* bot_updates */);
   137   if (result == NULL) {
   138     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
   139     result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
   140                                                        true /* bot_updates */);
   141   }
   142   return result;
   143 }
   145 // It dirties the cards that cover the block so that so that the post
   146 // write barrier never queues anything when updating objects on this
   147 // block. It is assumed (and in fact we assert) that the block
   148 // belongs to a young region.
   149 inline void
   150 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
   151   assert_heap_not_locked();
   153   // Assign the containing region to containing_hr so that we don't
   154   // have to keep calling heap_region_containing_raw() in the
   155   // asserts below.
   156   DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
   157   assert(containing_hr != NULL && start != NULL && word_size > 0,
   158          "pre-condition");
   159   assert(containing_hr->is_in(start), "it should contain start");
   160   assert(containing_hr->is_young(), "it should be young");
   161   assert(!containing_hr->isHumongous(), "it should not be humongous");
   163   HeapWord* end = start + word_size;
   164   assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
   166   MemRegion mr(start, end);
   167   g1_barrier_set()->g1_mark_as_young(mr);
   168 }
   170 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
   171   return _task_queues->queue(i);
   172 }
   174 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
   175   return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
   176 }
   178 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
   179   return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
   180 }
   182 // This is a fast test on whether a reference points into the
   183 // collection set or not. Assume that the reference
   184 // points into the heap.
   185 inline bool G1CollectedHeap::is_in_cset(oop obj) {
   186   bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj);
   187   // let's make sure the result is consistent with what the slower
   188   // test returns
   189   assert( ret || !obj_in_cs(obj), "sanity");
   190   assert(!ret ||  obj_in_cs(obj), "sanity");
   191   return ret;
   192 }
   194 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
   195   return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
   196 }
   198 G1CollectedHeap::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) {
   199   return _in_cset_fast_test.at((HeapWord*)obj);
   200 }
   202 void G1CollectedHeap::register_humongous_region_with_in_cset_fast_test(uint index) {
   203   _in_cset_fast_test.set_humongous(index);
   204 }
   206 #ifndef PRODUCT
   207 // Support for G1EvacuationFailureALot
   209 inline bool
   210 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
   211                                                      bool during_initial_mark,
   212                                                      bool during_marking) {
   213   bool res = false;
   214   if (during_marking) {
   215     res |= G1EvacuationFailureALotDuringConcMark;
   216   }
   217   if (during_initial_mark) {
   218     res |= G1EvacuationFailureALotDuringInitialMark;
   219   }
   220   if (gcs_are_young) {
   221     res |= G1EvacuationFailureALotDuringYoungGC;
   222   } else {
   223     // GCs are mixed
   224     res |= G1EvacuationFailureALotDuringMixedGC;
   225   }
   226   return res;
   227 }
   229 inline void
   230 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
   231   if (G1EvacuationFailureALot) {
   232     // Note we can't assert that _evacuation_failure_alot_for_current_gc
   233     // is clear here. It may have been set during a previous GC but that GC
   234     // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
   235     // trigger an evacuation failure and clear the flags and and counts.
   237     // Check if we have gone over the interval.
   238     const size_t gc_num = total_collections();
   239     const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
   241     _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
   243     // Now check if G1EvacuationFailureALot is enabled for the current GC type.
   244     const bool gcs_are_young = g1_policy()->gcs_are_young();
   245     const bool during_im = g1_policy()->during_initial_mark_pause();
   246     const bool during_marking = mark_in_progress();
   248     _evacuation_failure_alot_for_current_gc &=
   249       evacuation_failure_alot_for_gc_type(gcs_are_young,
   250                                           during_im,
   251                                           during_marking);
   252   }
   253 }
   255 inline bool
   256 G1CollectedHeap::evacuation_should_fail() {
   257   if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
   258     return false;
   259   }
   260   // G1EvacuationFailureALot is in effect for current GC
   261   // Access to _evacuation_failure_alot_count is not atomic;
   262   // the value does not have to be exact.
   263   if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
   264     return false;
   265   }
   266   _evacuation_failure_alot_count = 0;
   267   return true;
   268 }
   270 inline void G1CollectedHeap::reset_evacuation_should_fail() {
   271   if (G1EvacuationFailureALot) {
   272     _evacuation_failure_alot_gc_number = total_collections();
   273     _evacuation_failure_alot_count = 0;
   274     _evacuation_failure_alot_for_current_gc = false;
   275   }
   276 }
   277 #endif  // #ifndef PRODUCT
   279 inline bool G1CollectedHeap::is_in_young(const oop obj) {
   280   HeapRegion* hr = heap_region_containing(obj);
   281   return hr != NULL && hr->is_young();
   282 }
   284 // We don't need barriers for initializing stores to objects
   285 // in the young gen: for the SATB pre-barrier, there is no
   286 // pre-value that needs to be remembered; for the remembered-set
   287 // update logging post-barrier, we don't maintain remembered set
   288 // information for young gen objects.
   289 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
   290   return is_in_young(new_obj);
   291 }
   293 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
   294   const HeapRegion* hr = heap_region_containing(obj);
   295   if (hr == NULL) {
   296     if (obj == NULL) return false;
   297     else return true;
   298   }
   299   else return is_obj_dead(obj, hr);
   300 }
   302 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
   303   const HeapRegion* hr = heap_region_containing(obj);
   304   if (hr == NULL) {
   305     if (obj == NULL) return false;
   306     else return true;
   307   }
   308   else return is_obj_ill(obj, hr);
   309 }
   311 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
   312   uint region = addr_to_region((HeapWord*)obj);
   313   // We not only set the "live" flag in the humongous_is_live table, but also
   314   // reset the entry in the _in_cset_fast_test table so that subsequent references
   315   // to the same humongous object do not go into the slow path again.
   316   // This is racy, as multiple threads may at the same time enter here, but this
   317   // is benign.
   318   // During collection we only ever set the "live" flag, and only ever clear the
   319   // entry in the in_cset_fast_table.
   320   // We only ever evaluate the contents of these tables (in the VM thread) after
   321   // having synchronized the worker threads with the VM thread, or in the same
   322   // thread (i.e. within the VM thread).
   323   if (!_humongous_is_live.is_live(region)) {
   324     _humongous_is_live.set_live(region);
   325     _in_cset_fast_test.clear_humongous(region);
   326   }
   327 }
   329 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP

mercurial