src/share/vm/gc_implementation/g1/g1ParScanThreadState.cpp

Mon, 21 Jul 2014 09:59:46 +0200

author
tschatzl
date
Mon, 21 Jul 2014 09:59:46 +0200
changeset 7007
7df07d855c8e
parent 6939
cd43876f692e
child 7031
ee019285a52c
permissions
-rw-r--r--

8048085: Aborting marking just before remark results in useless additional clearing of the next mark bitmap
Summary: Skip clearing the next bitmap if we just recently aborted since the full GC already clears this bitmap.
Reviewed-by: brutisso

     1 /*
     2  * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    27 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
    28 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
    29 #include "oops/oop.inline.hpp"
    30 #include "oops/oop.pcgc.inline.hpp"
    31 #include "runtime/prefetch.inline.hpp"
    33 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
    34   : _g1h(g1h),
    35     _refs(g1h->task_queue(queue_num)),
    36     _dcq(&g1h->dirty_card_queue_set()),
    37     _ct_bs(g1h->g1_barrier_set()),
    38     _g1_rem(g1h->g1_rem_set()),
    39     _hash_seed(17), _queue_num(queue_num),
    40     _term_attempts(0),
    41     _surviving_alloc_buffer(g1h->desired_plab_sz(GCAllocForSurvived)),
    42     _tenured_alloc_buffer(g1h->desired_plab_sz(GCAllocForTenured)),
    43     _age_table(false), _scanner(g1h, rp),
    44     _strong_roots_time(0), _term_time(0),
    45     _alloc_buffer_waste(0), _undo_waste(0) {
    46   _scanner.set_par_scan_thread_state(this);
    47   // we allocate G1YoungSurvRateNumRegions plus one entries, since
    48   // we "sacrifice" entry 0 to keep track of surviving bytes for
    49   // non-young regions (where the age is -1)
    50   // We also add a few elements at the beginning and at the end in
    51   // an attempt to eliminate cache contention
    52   uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
    53   uint array_length = PADDING_ELEM_NUM +
    54                       real_length +
    55                       PADDING_ELEM_NUM;
    56   _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
    57   if (_surviving_young_words_base == NULL)
    58     vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
    59                           "Not enough space for young surv histo.");
    60   _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
    61   memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
    63   _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
    64   _alloc_buffers[GCAllocForTenured]  = &_tenured_alloc_buffer;
    66   _start = os::elapsedTime();
    67 }
    69 G1ParScanThreadState::~G1ParScanThreadState() {
    70   retire_alloc_buffers();
    71   FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
    72 }
    74 void
    75 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
    76 {
    77   st->print_raw_cr("GC Termination Stats");
    78   st->print_raw_cr("     elapsed  --strong roots-- -------termination-------"
    79                    " ------waste (KiB)------");
    80   st->print_raw_cr("thr     ms        ms      %        ms      %    attempts"
    81                    "  total   alloc    undo");
    82   st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
    83                    " ------- ------- -------");
    84 }
    86 void
    87 G1ParScanThreadState::print_termination_stats(int i,
    88                                               outputStream* const st) const
    89 {
    90   const double elapsed_ms = elapsed_time() * 1000.0;
    91   const double s_roots_ms = strong_roots_time() * 1000.0;
    92   const double term_ms    = term_time() * 1000.0;
    93   st->print_cr("%3d %9.2f %9.2f %6.2f "
    94                "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
    95                SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
    96                i, elapsed_ms, s_roots_ms, s_roots_ms * 100 / elapsed_ms,
    97                term_ms, term_ms * 100 / elapsed_ms, term_attempts(),
    98                (alloc_buffer_waste() + undo_waste()) * HeapWordSize / K,
    99                alloc_buffer_waste() * HeapWordSize / K,
   100                undo_waste() * HeapWordSize / K);
   101 }
   103 #ifdef ASSERT
   104 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
   105   assert(ref != NULL, "invariant");
   106   assert(UseCompressedOops, "sanity");
   107   assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, p2i(ref)));
   108   oop p = oopDesc::load_decode_heap_oop(ref);
   109   assert(_g1h->is_in_g1_reserved(p),
   110          err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
   111   return true;
   112 }
   114 bool G1ParScanThreadState::verify_ref(oop* ref) const {
   115   assert(ref != NULL, "invariant");
   116   if (has_partial_array_mask(ref)) {
   117     // Must be in the collection set--it's already been copied.
   118     oop p = clear_partial_array_mask(ref);
   119     assert(_g1h->obj_in_cs(p),
   120            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
   121   } else {
   122     oop p = oopDesc::load_decode_heap_oop(ref);
   123     assert(_g1h->is_in_g1_reserved(p),
   124            err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)));
   125   }
   126   return true;
   127 }
   129 bool G1ParScanThreadState::verify_task(StarTask ref) const {
   130   if (ref.is_narrow()) {
   131     return verify_ref((narrowOop*) ref);
   132   } else {
   133     return verify_ref((oop*) ref);
   134   }
   135 }
   136 #endif // ASSERT
   138 void G1ParScanThreadState::trim_queue() {
   139   assert(_evac_failure_cl != NULL, "not set");
   141   StarTask ref;
   142   do {
   143     // Drain the overflow stack first, so other threads can steal.
   144     while (_refs->pop_overflow(ref)) {
   145       dispatch_reference(ref);
   146     }
   148     while (_refs->pop_local(ref)) {
   149       dispatch_reference(ref);
   150     }
   151   } while (!_refs->is_empty());
   152 }
   154 oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
   155   size_t word_sz = old->size();
   156   HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
   157   // +1 to make the -1 indexes valid...
   158   int       young_index = from_region->young_index_in_cset()+1;
   159   assert( (from_region->is_young() && young_index >  0) ||
   160          (!from_region->is_young() && young_index == 0), "invariant" );
   161   G1CollectorPolicy* g1p = _g1h->g1_policy();
   162   markOop m = old->mark();
   163   int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
   164                                            : m->age();
   165   GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
   166                                                              word_sz);
   167   HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
   168 #ifndef PRODUCT
   169   // Should this evacuation fail?
   170   if (_g1h->evacuation_should_fail()) {
   171     if (obj_ptr != NULL) {
   172       undo_allocation(alloc_purpose, obj_ptr, word_sz);
   173       obj_ptr = NULL;
   174     }
   175   }
   176 #endif // !PRODUCT
   178   if (obj_ptr == NULL) {
   179     // This will either forward-to-self, or detect that someone else has
   180     // installed a forwarding pointer.
   181     return _g1h->handle_evacuation_failure_par(this, old);
   182   }
   184   oop obj = oop(obj_ptr);
   186   // We're going to allocate linearly, so might as well prefetch ahead.
   187   Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes);
   189   oop forward_ptr = old->forward_to_atomic(obj);
   190   if (forward_ptr == NULL) {
   191     Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz);
   193     // alloc_purpose is just a hint to allocate() above, recheck the type of region
   194     // we actually allocated from and update alloc_purpose accordingly
   195     HeapRegion* to_region = _g1h->heap_region_containing_raw(obj_ptr);
   196     alloc_purpose = to_region->is_young() ? GCAllocForSurvived : GCAllocForTenured;
   198     if (g1p->track_object_age(alloc_purpose)) {
   199       // We could simply do obj->incr_age(). However, this causes a
   200       // performance issue. obj->incr_age() will first check whether
   201       // the object has a displaced mark by checking its mark word;
   202       // getting the mark word from the new location of the object
   203       // stalls. So, given that we already have the mark word and we
   204       // are about to install it anyway, it's better to increase the
   205       // age on the mark word, when the object does not have a
   206       // displaced mark word. We're not expecting many objects to have
   207       // a displaced marked word, so that case is not optimized
   208       // further (it could be...) and we simply call obj->incr_age().
   210       if (m->has_displaced_mark_helper()) {
   211         // in this case, we have to install the mark word first,
   212         // otherwise obj looks to be forwarded (the old mark word,
   213         // which contains the forward pointer, was copied)
   214         obj->set_mark(m);
   215         obj->incr_age();
   216       } else {
   217         m = m->incr_age();
   218         obj->set_mark(m);
   219       }
   220       age_table()->add(obj, word_sz);
   221     } else {
   222       obj->set_mark(m);
   223     }
   225     if (G1StringDedup::is_enabled()) {
   226       G1StringDedup::enqueue_from_evacuation(from_region->is_young(),
   227                                              to_region->is_young(),
   228                                              queue_num(),
   229                                              obj);
   230     }
   232     size_t* surv_young_words = surviving_young_words();
   233     surv_young_words[young_index] += word_sz;
   235     if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
   236       // We keep track of the next start index in the length field of
   237       // the to-space object. The actual length can be found in the
   238       // length field of the from-space object.
   239       arrayOop(obj)->set_length(0);
   240       oop* old_p = set_partial_array_mask(old);
   241       push_on_queue(old_p);
   242     } else {
   243       // No point in using the slower heap_region_containing() method,
   244       // given that we know obj is in the heap.
   245       _scanner.set_region(_g1h->heap_region_containing_raw(obj));
   246       obj->oop_iterate_backwards(&_scanner);
   247     }
   248   } else {
   249     undo_allocation(alloc_purpose, obj_ptr, word_sz);
   250     obj = forward_ptr;
   251   }
   252   return obj;
   253 }
   255 HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
   256   HeapWord* obj = NULL;
   257   size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
   258   if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
   259     G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
   260     add_to_alloc_buffer_waste(alloc_buf->words_remaining());
   261     alloc_buf->retire(false /* end_of_gc */, false /* retain */);
   263     HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
   264     if (buf == NULL) {
   265       return NULL; // Let caller handle allocation failure.
   266     }
   267     // Otherwise.
   268     alloc_buf->set_word_size(gclab_word_size);
   269     alloc_buf->set_buf(buf);
   271     obj = alloc_buf->allocate(word_sz);
   272     assert(obj != NULL, "buffer was definitely big enough...");
   273   } else {
   274     obj = _g1h->par_allocate_during_gc(purpose, word_sz);
   275   }
   276   return obj;
   277 }
   279 void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
   280   if (alloc_buffer(purpose)->contains(obj)) {
   281     assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
   282            "should contain whole object");
   283     alloc_buffer(purpose)->undo_allocation(obj, word_sz);
   284   } else {
   285     CollectedHeap::fill_with_object(obj, word_sz);
   286     add_to_undo_waste(word_sz);
   287   }
   288 }
   290 HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
   291   HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
   292   if (obj != NULL) {
   293     return obj;
   294   }
   295   return allocate_slow(purpose, word_sz);
   296 }
   298 void G1ParScanThreadState::retire_alloc_buffers() {
   299   for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
   300     size_t waste = _alloc_buffers[ap]->words_remaining();
   301     add_to_alloc_buffer_waste(waste);
   302     _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
   303                                                true /* end_of_gc */,
   304                                                false /* retain */);
   305   }
   306 }

mercurial