src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp

Wed, 23 Jan 2013 13:02:39 -0500

author
jprovino
date
Wed, 23 Jan 2013 13:02:39 -0500
changeset 4542
db9981fd3124
parent 4173
8a5ea0a9ccc4
child 4733
9def4075da6d
permissions
-rw-r--r--

8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
Summary: Rename INCLUDE_ALTERNATE_GCS to INCLUDE_ALL_GCS and replace SERIALGC with INCLUDE_ALL_GCS.
Reviewed-by: coleenp, stefank

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
    28 #include "gc_implementation/g1/concurrentMark.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    31 // Utility routine to set an exclusive range of cards on the given
    32 // card liveness bitmap
    33 inline void ConcurrentMark::set_card_bitmap_range(BitMap* card_bm,
    34                                                   BitMap::idx_t start_idx,
    35                                                   BitMap::idx_t end_idx,
    36                                                   bool is_par) {
    38   // Set the exclusive bit range [start_idx, end_idx).
    39   assert((end_idx - start_idx) > 0, "at least one card");
    40   assert(end_idx <= card_bm->size(), "sanity");
    42   // Silently clip the end index
    43   end_idx = MIN2(end_idx, card_bm->size());
    45   // For small ranges use a simple loop; otherwise use set_range or
    46   // use par_at_put_range (if parallel). The range is made up of the
    47   // cards that are spanned by an object/mem region so 8 cards will
    48   // allow up to object sizes up to 4K to be handled using the loop.
    49   if ((end_idx - start_idx) <= 8) {
    50     for (BitMap::idx_t i = start_idx; i < end_idx; i += 1) {
    51       if (is_par) {
    52         card_bm->par_set_bit(i);
    53       } else {
    54         card_bm->set_bit(i);
    55       }
    56     }
    57   } else {
    58     // Note BitMap::par_at_put_range() and BitMap::set_range() are exclusive.
    59     if (is_par) {
    60       card_bm->par_at_put_range(start_idx, end_idx, true);
    61     } else {
    62       card_bm->set_range(start_idx, end_idx);
    63     }
    64   }
    65 }
    67 // Returns the index in the liveness accounting card bitmap
    68 // for the given address
    69 inline BitMap::idx_t ConcurrentMark::card_bitmap_index_for(HeapWord* addr) {
    70   // Below, the term "card num" means the result of shifting an address
    71   // by the card shift -- address 0 corresponds to card number 0.  One
    72   // must subtract the card num of the bottom of the heap to obtain a
    73   // card table index.
    74   intptr_t card_num = intptr_t(uintptr_t(addr) >> CardTableModRefBS::card_shift);
    75   return card_num - heap_bottom_card_num();
    76 }
    78 // Counts the given memory region in the given task/worker
    79 // counting data structures.
    80 inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
    81                                          size_t* marked_bytes_array,
    82                                          BitMap* task_card_bm) {
    83   G1CollectedHeap* g1h = _g1h;
    84   CardTableModRefBS* ct_bs = (CardTableModRefBS*) (g1h->barrier_set());
    86   HeapWord* start = mr.start();
    87   HeapWord* end = mr.end();
    88   size_t region_size_bytes = mr.byte_size();
    89   uint index = hr->hrs_index();
    91   assert(!hr->continuesHumongous(), "should not be HC region");
    92   assert(hr == g1h->heap_region_containing(start), "sanity");
    93   assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
    94   assert(marked_bytes_array != NULL, "pre-condition");
    95   assert(task_card_bm != NULL, "pre-condition");
    97   // Add to the task local marked bytes for this region.
    98   marked_bytes_array[index] += region_size_bytes;
   100   BitMap::idx_t start_idx = card_bitmap_index_for(start);
   101   BitMap::idx_t end_idx = card_bitmap_index_for(end);
   103   // Note: if we're looking at the last region in heap - end
   104   // could be actually just beyond the end of the heap; end_idx
   105   // will then correspond to a (non-existent) card that is also
   106   // just beyond the heap.
   107   if (g1h->is_in_g1_reserved(end) && !ct_bs->is_card_aligned(end)) {
   108     // end of region is not card aligned - incremement to cover
   109     // all the cards spanned by the region.
   110     end_idx += 1;
   111   }
   112   // The card bitmap is task/worker specific => no need to use
   113   // the 'par' BitMap routines.
   114   // Set bits in the exclusive bit range [start_idx, end_idx).
   115   set_card_bitmap_range(task_card_bm, start_idx, end_idx, false /* is_par */);
   116 }
   118 // Counts the given memory region in the task/worker counting
   119 // data structures for the given worker id.
   120 inline void ConcurrentMark::count_region(MemRegion mr,
   121                                          HeapRegion* hr,
   122                                          uint worker_id) {
   123   size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
   124   BitMap* task_card_bm = count_card_bitmap_for(worker_id);
   125   count_region(mr, hr, marked_bytes_array, task_card_bm);
   126 }
   128 // Counts the given memory region, which may be a single object, in the
   129 // task/worker counting data structures for the given worker id.
   130 inline void ConcurrentMark::count_region(MemRegion mr, uint worker_id) {
   131   HeapWord* addr = mr.start();
   132   HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
   133   count_region(mr, hr, worker_id);
   134 }
   136 // Counts the given object in the given task/worker counting data structures.
   137 inline void ConcurrentMark::count_object(oop obj,
   138                                          HeapRegion* hr,
   139                                          size_t* marked_bytes_array,
   140                                          BitMap* task_card_bm) {
   141   MemRegion mr((HeapWord*)obj, obj->size());
   142   count_region(mr, hr, marked_bytes_array, task_card_bm);
   143 }
   145 // Counts the given object in the task/worker counting data
   146 // structures for the given worker id.
   147 inline void ConcurrentMark::count_object(oop obj,
   148                                          HeapRegion* hr,
   149                                          uint worker_id) {
   150   size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
   151   BitMap* task_card_bm = count_card_bitmap_for(worker_id);
   152   HeapWord* addr = (HeapWord*) obj;
   153   count_object(obj, hr, marked_bytes_array, task_card_bm);
   154 }
   156 // Attempts to mark the given object and, if successful, counts
   157 // the object in the given task/worker counting structures.
   158 inline bool ConcurrentMark::par_mark_and_count(oop obj,
   159                                                HeapRegion* hr,
   160                                                size_t* marked_bytes_array,
   161                                                BitMap* task_card_bm) {
   162   HeapWord* addr = (HeapWord*)obj;
   163   if (_nextMarkBitMap->parMark(addr)) {
   164     // Update the task specific count data for the object.
   165     count_object(obj, hr, marked_bytes_array, task_card_bm);
   166     return true;
   167   }
   168   return false;
   169 }
   171 // Attempts to mark the given object and, if successful, counts
   172 // the object in the task/worker counting structures for the
   173 // given worker id.
   174 inline bool ConcurrentMark::par_mark_and_count(oop obj,
   175                                                size_t word_size,
   176                                                HeapRegion* hr,
   177                                                uint worker_id) {
   178   HeapWord* addr = (HeapWord*)obj;
   179   if (_nextMarkBitMap->parMark(addr)) {
   180     MemRegion mr(addr, word_size);
   181     count_region(mr, hr, worker_id);
   182     return true;
   183   }
   184   return false;
   185 }
   187 // Attempts to mark the given object and, if successful, counts
   188 // the object in the task/worker counting structures for the
   189 // given worker id.
   190 inline bool ConcurrentMark::par_mark_and_count(oop obj,
   191                                                HeapRegion* hr,
   192                                                uint worker_id) {
   193   HeapWord* addr = (HeapWord*)obj;
   194   if (_nextMarkBitMap->parMark(addr)) {
   195     // Update the task specific count data for the object.
   196     count_object(obj, hr, worker_id);
   197     return true;
   198   }
   199   return false;
   200 }
   202 // As above - but we don't know the heap region containing the
   203 // object and so have to supply it.
   204 inline bool ConcurrentMark::par_mark_and_count(oop obj, uint worker_id) {
   205   HeapWord* addr = (HeapWord*)obj;
   206   HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
   207   return par_mark_and_count(obj, hr, worker_id);
   208 }
   210 // Similar to the above routine but we already know the size, in words, of
   211 // the object that we wish to mark/count
   212 inline bool ConcurrentMark::par_mark_and_count(oop obj,
   213                                                size_t word_size,
   214                                                uint worker_id) {
   215   HeapWord* addr = (HeapWord*)obj;
   216   if (_nextMarkBitMap->parMark(addr)) {
   217     // Update the task specific count data for the object.
   218     MemRegion mr(addr, word_size);
   219     count_region(mr, worker_id);
   220     return true;
   221   }
   222   return false;
   223 }
   225 // Unconditionally mark the given object, and unconditinally count
   226 // the object in the counting structures for worker id 0.
   227 // Should *not* be called from parallel code.
   228 inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) {
   229   HeapWord* addr = (HeapWord*)obj;
   230   _nextMarkBitMap->mark(addr);
   231   // Update the task specific count data for the object.
   232   count_object(obj, hr, 0 /* worker_id */);
   233   return true;
   234 }
   236 // As above - but we don't have the heap region containing the
   237 // object, so we have to supply it.
   238 inline bool ConcurrentMark::mark_and_count(oop obj) {
   239   HeapWord* addr = (HeapWord*)obj;
   240   HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
   241   return mark_and_count(obj, hr);
   242 }
   244 inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
   245   HeapWord* start_addr = MAX2(startWord(), mr.start());
   246   HeapWord* end_addr = MIN2(endWord(), mr.end());
   248   if (end_addr > start_addr) {
   249     // Right-open interval [start-offset, end-offset).
   250     BitMap::idx_t start_offset = heapWordToOffset(start_addr);
   251     BitMap::idx_t end_offset = heapWordToOffset(end_addr);
   253     start_offset = _bm.get_next_one_offset(start_offset, end_offset);
   254     while (start_offset < end_offset) {
   255       HeapWord* obj_addr = offsetToHeapWord(start_offset);
   256       oop obj = (oop) obj_addr;
   257       if (!cl->do_bit(start_offset)) {
   258         return false;
   259       }
   260       HeapWord* next_addr = MIN2(obj_addr + obj->size(), end_addr);
   261       BitMap::idx_t next_offset = heapWordToOffset(next_addr);
   262       start_offset = _bm.get_next_one_offset(next_offset, end_offset);
   263     }
   264   }
   265   return true;
   266 }
   268 inline bool CMBitMapRO::iterate(BitMapClosure* cl) {
   269   MemRegion mr(startWord(), sizeInWords());
   270   return iterate(cl, mr);
   271 }
   273 inline void CMTask::push(oop obj) {
   274   HeapWord* objAddr = (HeapWord*) obj;
   275   assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
   276   assert(!_g1h->is_on_master_free_list(
   277               _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
   278   assert(!_g1h->is_obj_ill(obj), "invariant");
   279   assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
   281   if (_cm->verbose_high()) {
   282     gclog_or_tty->print_cr("[%u] pushing "PTR_FORMAT, _worker_id, (void*) obj);
   283   }
   285   if (!_task_queue->push(obj)) {
   286     // The local task queue looks full. We need to push some entries
   287     // to the global stack.
   289     if (_cm->verbose_medium()) {
   290       gclog_or_tty->print_cr("[%u] task queue overflow, "
   291                              "moving entries to the global stack",
   292                              _worker_id);
   293     }
   294     move_entries_to_global_stack();
   296     // this should succeed since, even if we overflow the global
   297     // stack, we should have definitely removed some entries from the
   298     // local queue. So, there must be space on it.
   299     bool success = _task_queue->push(obj);
   300     assert(success, "invariant");
   301   }
   303   statsOnly( int tmp_size = _task_queue->size();
   304              if (tmp_size > _local_max_size) {
   305                _local_max_size = tmp_size;
   306              }
   307              ++_local_pushes );
   308 }
   310 // This determines whether the method below will check both the local
   311 // and global fingers when determining whether to push on the stack a
   312 // gray object (value 1) or whether it will only check the global one
   313 // (value 0). The tradeoffs are that the former will be a bit more
   314 // accurate and possibly push less on the stack, but it might also be
   315 // a little bit slower.
   317 #define _CHECK_BOTH_FINGERS_      1
   319 inline void CMTask::deal_with_reference(oop obj) {
   320   if (_cm->verbose_high()) {
   321     gclog_or_tty->print_cr("[%u] we're dealing with reference = "PTR_FORMAT,
   322                            _worker_id, (void*) obj);
   323   }
   325   ++_refs_reached;
   327   HeapWord* objAddr = (HeapWord*) obj;
   328   assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
   329   if (_g1h->is_in_g1_reserved(objAddr)) {
   330     assert(obj != NULL, "null check is implicit");
   331     if (!_nextMarkBitMap->isMarked(objAddr)) {
   332       // Only get the containing region if the object is not marked on the
   333       // bitmap (otherwise, it's a waste of time since we won't do
   334       // anything with it).
   335       HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
   336       if (!hr->obj_allocated_since_next_marking(obj)) {
   337         if (_cm->verbose_high()) {
   338           gclog_or_tty->print_cr("[%u] "PTR_FORMAT" is not considered marked",
   339                                  _worker_id, (void*) obj);
   340         }
   342         // we need to mark it first
   343         if (_cm->par_mark_and_count(obj, hr, _marked_bytes_array, _card_bm)) {
   344           // No OrderAccess:store_load() is needed. It is implicit in the
   345           // CAS done in CMBitMap::parMark() call in the routine above.
   346           HeapWord* global_finger = _cm->finger();
   348 #if _CHECK_BOTH_FINGERS_
   349           // we will check both the local and global fingers
   351           if (_finger != NULL && objAddr < _finger) {
   352             if (_cm->verbose_high()) {
   353               gclog_or_tty->print_cr("[%u] below the local finger ("PTR_FORMAT"), "
   354                                      "pushing it", _worker_id, _finger);
   355             }
   356             push(obj);
   357           } else if (_curr_region != NULL && objAddr < _region_limit) {
   358             // do nothing
   359           } else if (objAddr < global_finger) {
   360             // Notice that the global finger might be moving forward
   361             // concurrently. This is not a problem. In the worst case, we
   362             // mark the object while it is above the global finger and, by
   363             // the time we read the global finger, it has moved forward
   364             // passed this object. In this case, the object will probably
   365             // be visited when a task is scanning the region and will also
   366             // be pushed on the stack. So, some duplicate work, but no
   367             // correctness problems.
   369             if (_cm->verbose_high()) {
   370               gclog_or_tty->print_cr("[%u] below the global finger "
   371                                      "("PTR_FORMAT"), pushing it",
   372                                      _worker_id, global_finger);
   373             }
   374             push(obj);
   375           } else {
   376             // do nothing
   377           }
   378 #else // _CHECK_BOTH_FINGERS_
   379           // we will only check the global finger
   381           if (objAddr < global_finger) {
   382             // see long comment above
   384             if (_cm->verbose_high()) {
   385               gclog_or_tty->print_cr("[%u] below the global finger "
   386                                      "("PTR_FORMAT"), pushing it",
   387                                      _worker_id, global_finger);
   388             }
   389             push(obj);
   390           }
   391 #endif // _CHECK_BOTH_FINGERS_
   392         }
   393       }
   394     }
   395   }
   396 }
   398 inline void ConcurrentMark::markPrev(oop p) {
   399   assert(!_prevMarkBitMap->isMarked((HeapWord*) p), "sanity");
   400   // Note we are overriding the read-only view of the prev map here, via
   401   // the cast.
   402   ((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
   403 }
   405 inline void ConcurrentMark::grayRoot(oop obj, size_t word_size,
   406                                      uint worker_id, HeapRegion* hr) {
   407   assert(obj != NULL, "pre-condition");
   408   HeapWord* addr = (HeapWord*) obj;
   409   if (hr == NULL) {
   410     hr = _g1h->heap_region_containing_raw(addr);
   411   } else {
   412     assert(hr->is_in(addr), "pre-condition");
   413   }
   414   assert(hr != NULL, "sanity");
   415   // Given that we're looking for a region that contains an object
   416   // header it's impossible to get back a HC region.
   417   assert(!hr->continuesHumongous(), "sanity");
   419   // We cannot assert that word_size == obj->size() given that obj
   420   // might not be in a consistent state (another thread might be in
   421   // the process of copying it). So the best thing we can do is to
   422   // assert that word_size is under an upper bound which is its
   423   // containing region's capacity.
   424   assert(word_size * HeapWordSize <= hr->capacity(),
   425          err_msg("size: "SIZE_FORMAT" capacity: "SIZE_FORMAT" "HR_FORMAT,
   426                  word_size * HeapWordSize, hr->capacity(),
   427                  HR_FORMAT_PARAMS(hr)));
   429   if (addr < hr->next_top_at_mark_start()) {
   430     if (!_nextMarkBitMap->isMarked(addr)) {
   431       par_mark_and_count(obj, word_size, hr, worker_id);
   432     }
   433   }
   434 }
   436 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP

mercurial