src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp

Thu, 12 Jan 2012 00:06:47 -0800

author
johnc
date
Thu, 12 Jan 2012 00:06:47 -0800
changeset 3463
d30fa85f9994
parent 3454
2e966d967c5c
child 3464
eff609af17d7
permissions
-rw-r--r--

6484965: G1: piggy-back liveness accounting phase on marking
Summary: Remove the separate counting phase of concurrent marking by tracking the amount of marked bytes and the cards spanned by marked objects in marking task/worker thread local data structures, which are updated as individual objects are marked.
Reviewed-by: brutisso, tonyp

     1 /*
     2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
    26 #define SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP
    28 #include "gc_implementation/g1/concurrentMark.hpp"
    29 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    31 // Returns the index in the liveness accounting card bitmap
    32 // for the given address
    33 inline BitMap::idx_t ConcurrentMark::card_bitmap_index_for(HeapWord* addr) {
    34   // Below, the term "card num" means the result of shifting an address
    35   // by the card shift -- address 0 corresponds to card number 0.  One
    36   // must subtract the card num of the bottom of the heap to obtain a
    37   // card table index.
    39   intptr_t card_num = intptr_t(uintptr_t(addr) >> CardTableModRefBS::card_shift);
    40   return card_num - heap_bottom_card_num();
    41 }
    43 // Counts the given memory region in the given task/worker
    44 // counting data structures.
    45 inline void ConcurrentMark::count_region(MemRegion mr, HeapRegion* hr,
    46                                          size_t* marked_bytes_array,
    47                                          BitMap* task_card_bm) {
    48   G1CollectedHeap* g1h = _g1h;
    49   HeapWord* start = mr.start();
    50   HeapWord* last = mr.last();
    51   size_t region_size_bytes = mr.byte_size();
    52   size_t index = hr->hrs_index();
    54   assert(!hr->continuesHumongous(), "should not be HC region");
    55   assert(hr == g1h->heap_region_containing(start), "sanity");
    56   assert(hr == g1h->heap_region_containing(mr.last()), "sanity");
    57   assert(marked_bytes_array != NULL, "pre-condition");
    58   assert(task_card_bm != NULL, "pre-condition");
    60   // Add to the task local marked bytes for this region.
    61   marked_bytes_array[index] += region_size_bytes;
    63   BitMap::idx_t start_idx = card_bitmap_index_for(start);
    64   BitMap::idx_t last_idx = card_bitmap_index_for(last);
    66   // The card bitmap is task/worker specific => no need to use 'par' routines.
    67   // Set bits in the inclusive bit range [start_idx, last_idx].
    68   //
    69   // For small ranges use a simple loop; otherwise use set_range
    70   // The range are the cards that are spanned by the object/region
    71   // so 8 cards will allow objects/regions up to 4K to be handled
    72   // using the loop.
    73   if ((last_idx - start_idx) <= 8) {
    74     for (BitMap::idx_t i = start_idx; i <= last_idx; i += 1) {
    75      task_card_bm->set_bit(i);
    76     }
    77   } else {
    78     assert(last_idx < task_card_bm->size(), "sanity");
    79     // Note: BitMap::set_range() is exclusive.
    80     task_card_bm->set_range(start_idx, last_idx+1);
    81   }
    82 }
    84 // Counts the given memory region, which may be a single object, in the
    85 // task/worker counting data structures for the given worker id.
    86 inline void ConcurrentMark::count_region(MemRegion mr, uint worker_id) {
    87   size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
    88   BitMap* task_card_bm = count_card_bitmap_for(worker_id);
    89   HeapWord* addr = mr.start();
    90   HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
    91   count_region(mr, hr, marked_bytes_array, task_card_bm);
    92 }
    94 // Counts the given object in the given task/worker counting data structures.
    95 inline void ConcurrentMark::count_object(oop obj,
    96                                          HeapRegion* hr,
    97                                          size_t* marked_bytes_array,
    98                                          BitMap* task_card_bm) {
    99   MemRegion mr((HeapWord*)obj, obj->size());
   100   count_region(mr, hr, marked_bytes_array, task_card_bm);
   101 }
   103 // Counts the given object in the task/worker counting data
   104 // structures for the given worker id.
   105 inline void ConcurrentMark::count_object(oop obj, HeapRegion* hr, uint worker_id) {
   106   size_t* marked_bytes_array = count_marked_bytes_array_for(worker_id);
   107   BitMap* task_card_bm = count_card_bitmap_for(worker_id);
   108   HeapWord* addr = (HeapWord*) obj;
   109   count_object(obj, hr, marked_bytes_array, task_card_bm);
   110 }
   112 // Attempts to mark the given object and, if successful, counts
   113 // the object in the given task/worker counting structures.
   114 inline bool ConcurrentMark::par_mark_and_count(oop obj,
   115                                                HeapRegion* hr,
   116                                                size_t* marked_bytes_array,
   117                                                BitMap* task_card_bm) {
   118   HeapWord* addr = (HeapWord*)obj;
   119   if (_nextMarkBitMap->parMark(addr)) {
   120     // Update the task specific count data for the object.
   121     count_object(obj, hr, marked_bytes_array, task_card_bm);
   122     return true;
   123   }
   124   return false;
   125 }
   127 // Attempts to mark the given object and, if successful, counts
   128 // the object in the task/worker counting structures for the
   129 // given worker id.
   130 inline bool ConcurrentMark::par_mark_and_count(oop obj,
   131                                                HeapRegion* hr,
   132                                                uint worker_id) {
   133   HeapWord* addr = (HeapWord*)obj;
   134   if (_nextMarkBitMap->parMark(addr)) {
   135     // Update the task specific count data for the object.
   136     count_object(obj, hr, worker_id);
   137     return true;
   138   }
   139   return false;
   140 }
   142 // As above - but we don't know the heap region containing the
   143 // object and so have to supply it.
   144 inline bool ConcurrentMark::par_mark_and_count(oop obj, uint worker_id) {
   145   HeapWord* addr = (HeapWord*)obj;
   146   HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
   147   return par_mark_and_count(obj, hr, worker_id);
   148 }
   150 // Similar to the above routine but we already know the size, in words, of
   151 // the object that we wish to mark/count
   152 inline bool ConcurrentMark::par_mark_and_count(oop obj,
   153                                                size_t word_size,
   154                                                uint worker_id) {
   155   HeapWord* addr = (HeapWord*)obj;
   156   if (_nextMarkBitMap->parMark(addr)) {
   157     // Update the task specific count data for the object.
   158     MemRegion mr(addr, word_size);
   159     count_region(mr, worker_id);
   160     return true;
   161   }
   162   return false;
   163 }
   165 // Unconditionally mark the given object, and unconditinally count
   166 // the object in the counting structures for worker id 0.
   167 // Should *not* be called from parallel code.
   168 inline bool ConcurrentMark::mark_and_count(oop obj, HeapRegion* hr) {
   169   HeapWord* addr = (HeapWord*)obj;
   170   _nextMarkBitMap->mark(addr);
   171   // Update the task specific count data for the object.
   172   count_object(obj, hr, 0 /* worker_id */);
   173   return true;
   174 }
   176 // As above - but we don't have the heap region containing the
   177 // object, so we have to supply it.
   178 inline bool ConcurrentMark::mark_and_count(oop obj) {
   179   HeapWord* addr = (HeapWord*)obj;
   180   HeapRegion* hr = _g1h->heap_region_containing_raw(addr);
   181   return mark_and_count(obj, hr);
   182 }
   184 inline bool CMBitMapRO::iterate(BitMapClosure* cl, MemRegion mr) {
   185   HeapWord* start_addr = MAX2(startWord(), mr.start());
   186   HeapWord* end_addr = MIN2(endWord(), mr.end());
   188   if (end_addr > start_addr) {
   189     // Right-open interval [start-offset, end-offset).
   190     BitMap::idx_t start_offset = heapWordToOffset(start_addr);
   191     BitMap::idx_t end_offset = heapWordToOffset(end_addr);
   193     start_offset = _bm.get_next_one_offset(start_offset, end_offset);
   194     while (start_offset < end_offset) {
   195       HeapWord* obj_addr = offsetToHeapWord(start_offset);
   196       oop obj = (oop) obj_addr;
   197       if (!cl->do_bit(start_offset)) {
   198         return false;
   199       }
   200       HeapWord* next_addr = MIN2(obj_addr + obj->size(), end_addr);
   201       BitMap::idx_t next_offset = heapWordToOffset(next_addr);
   202       start_offset = _bm.get_next_one_offset(next_offset, end_offset);
   203     }
   204   }
   205   return true;
   206 }
   208 inline bool CMBitMapRO::iterate(BitMapClosure* cl) {
   209   MemRegion mr(startWord(), sizeInWords());
   210   return iterate(cl, mr);
   211 }
   213 inline void CMTask::push(oop obj) {
   214   HeapWord* objAddr = (HeapWord*) obj;
   215   assert(_g1h->is_in_g1_reserved(objAddr), "invariant");
   216   assert(!_g1h->is_on_master_free_list(
   217               _g1h->heap_region_containing((HeapWord*) objAddr)), "invariant");
   218   assert(!_g1h->is_obj_ill(obj), "invariant");
   219   assert(_nextMarkBitMap->isMarked(objAddr), "invariant");
   221   if (_cm->verbose_high()) {
   222     gclog_or_tty->print_cr("[%d] pushing "PTR_FORMAT, _task_id, (void*) obj);
   223   }
   225   if (!_task_queue->push(obj)) {
   226     // The local task queue looks full. We need to push some entries
   227     // to the global stack.
   229     if (_cm->verbose_medium()) {
   230       gclog_or_tty->print_cr("[%d] task queue overflow, "
   231                              "moving entries to the global stack",
   232                              _task_id);
   233     }
   234     move_entries_to_global_stack();
   236     // this should succeed since, even if we overflow the global
   237     // stack, we should have definitely removed some entries from the
   238     // local queue. So, there must be space on it.
   239     bool success = _task_queue->push(obj);
   240     assert(success, "invariant");
   241   }
   243   statsOnly( int tmp_size = _task_queue->size();
   244              if (tmp_size > _local_max_size) {
   245                _local_max_size = tmp_size;
   246              }
   247              ++_local_pushes );
   248 }
   250 // This determines whether the method below will check both the local
   251 // and global fingers when determining whether to push on the stack a
   252 // gray object (value 1) or whether it will only check the global one
   253 // (value 0). The tradeoffs are that the former will be a bit more
   254 // accurate and possibly push less on the stack, but it might also be
   255 // a little bit slower.
   257 #define _CHECK_BOTH_FINGERS_      1
   259 inline void CMTask::deal_with_reference(oop obj) {
   260   if (_cm->verbose_high()) {
   261     gclog_or_tty->print_cr("[%d] we're dealing with reference = "PTR_FORMAT,
   262                            _task_id, (void*) obj);
   263   }
   265   ++_refs_reached;
   267   HeapWord* objAddr = (HeapWord*) obj;
   268   assert(obj->is_oop_or_null(true /* ignore mark word */), "Error");
   269   if (_g1h->is_in_g1_reserved(objAddr)) {
   270     assert(obj != NULL, "null check is implicit");
   271     if (!_nextMarkBitMap->isMarked(objAddr)) {
   272       // Only get the containing region if the object is not marked on the
   273       // bitmap (otherwise, it's a waste of time since we won't do
   274       // anything with it).
   275       HeapRegion* hr = _g1h->heap_region_containing_raw(obj);
   276       if (!hr->obj_allocated_since_next_marking(obj)) {
   277         if (_cm->verbose_high()) {
   278           gclog_or_tty->print_cr("[%d] "PTR_FORMAT" is not considered marked",
   279                                  _task_id, (void*) obj);
   280         }
   282         // we need to mark it first
   283         if (_cm->par_mark_and_count(obj, hr, _marked_bytes_array, _card_bm)) {
   284           // No OrderAccess:store_load() is needed. It is implicit in the
   285           // CAS done in CMBitMap::parMark() call in the routine above.
   286           HeapWord* global_finger = _cm->finger();
   288 #if _CHECK_BOTH_FINGERS_
   289           // we will check both the local and global fingers
   291           if (_finger != NULL && objAddr < _finger) {
   292             if (_cm->verbose_high()) {
   293               gclog_or_tty->print_cr("[%d] below the local finger ("PTR_FORMAT"), "
   294                                      "pushing it", _task_id, _finger);
   295             }
   296             push(obj);
   297           } else if (_curr_region != NULL && objAddr < _region_limit) {
   298             // do nothing
   299           } else if (objAddr < global_finger) {
   300             // Notice that the global finger might be moving forward
   301             // concurrently. This is not a problem. In the worst case, we
   302             // mark the object while it is above the global finger and, by
   303             // the time we read the global finger, it has moved forward
   304             // passed this object. In this case, the object will probably
   305             // be visited when a task is scanning the region and will also
   306             // be pushed on the stack. So, some duplicate work, but no
   307             // correctness problems.
   309             if (_cm->verbose_high()) {
   310               gclog_or_tty->print_cr("[%d] below the global finger "
   311                                      "("PTR_FORMAT"), pushing it",
   312                                      _task_id, global_finger);
   313             }
   314             push(obj);
   315           } else {
   316             // do nothing
   317           }
   318 #else // _CHECK_BOTH_FINGERS_
   319           // we will only check the global finger
   321           if (objAddr < global_finger) {
   322             // see long comment above
   324             if (_cm->verbose_high()) {
   325               gclog_or_tty->print_cr("[%d] below the global finger "
   326                                      "("PTR_FORMAT"), pushing it",
   327                                      _task_id, global_finger);
   328             }
   329             push(obj);
   330           }
   331 #endif // _CHECK_BOTH_FINGERS_
   332         }
   333       }
   334     }
   335   }
   336 }
   338 inline void ConcurrentMark::markPrev(oop p) {
   339   assert(!_prevMarkBitMap->isMarked((HeapWord*) p), "sanity");
   340   // Note we are overriding the read-only view of the prev map here, via
   341   // the cast.
   342   ((CMBitMap*)_prevMarkBitMap)->mark((HeapWord*) p);
   343 }
   345 inline void ConcurrentMark::grayRoot(oop obj, size_t word_size, uint worker_id) {
   346   HeapWord* addr = (HeapWord*) obj;
   348   // Currently we don't do anything with word_size but we will use it
   349   // in the very near future in the liveness calculation piggy-backing
   350   // changes.
   352 #ifdef ASSERT
   353   HeapRegion* hr = _g1h->heap_region_containing(addr);
   354   assert(hr != NULL, "sanity");
   355   assert(!hr->is_survivor(), "should not allocate survivors during IM");
   356   assert(addr < hr->next_top_at_mark_start(),
   357          err_msg("addr: "PTR_FORMAT" hr: "HR_FORMAT" NTAMS: "PTR_FORMAT,
   358                  addr, HR_FORMAT_PARAMS(hr), hr->next_top_at_mark_start()));
   359   // We cannot assert that word_size == obj->size() given that obj
   360   // might not be in a consistent state (another thread might be in
   361   // the process of copying it). So the best thing we can do is to
   362   // assert that word_size is under an upper bound which is its
   363   // containing region's capacity.
   364   assert(word_size * HeapWordSize <= hr->capacity(),
   365          err_msg("size: "SIZE_FORMAT" capacity: "SIZE_FORMAT" "HR_FORMAT,
   366                  word_size * HeapWordSize, hr->capacity(),
   367                  HR_FORMAT_PARAMS(hr)));
   368 #endif // ASSERT
   370   if (!_nextMarkBitMap->isMarked(addr)) {
   371     par_mark_and_count(obj, word_size, worker_id);
   372   }
   373 }
   375 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_CONCURRENTMARK_INLINE_HPP

mercurial