src/share/vm/gc_implementation/g1/satbQueue.cpp

changeset 2469
7e37af9d69ef
parent 2314
f95d63e2154a
child 3175
4dfb2df418f2
     1.1 --- a/src/share/vm/gc_implementation/g1/satbQueue.cpp	Thu Jan 13 17:19:21 2011 -0800
     1.2 +++ b/src/share/vm/gc_implementation/g1/satbQueue.cpp	Wed Jan 19 09:35:17 2011 -0500
     1.3 @@ -1,5 +1,5 @@
     1.4  /*
     1.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     1.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     1.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8   *
     1.9   * This code is free software; you can redistribute it and/or modify it
    1.10 @@ -23,12 +23,98 @@
    1.11   */
    1.12  
    1.13  #include "precompiled.hpp"
    1.14 +#include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
    1.15  #include "gc_implementation/g1/satbQueue.hpp"
    1.16  #include "memory/allocation.inline.hpp"
    1.17  #include "memory/sharedHeap.hpp"
    1.18  #include "runtime/mutexLocker.hpp"
    1.19  #include "runtime/thread.hpp"
    1.20  
    1.21 +// This method removes entries from an SATB buffer that will not be
    1.22 +// useful to the concurrent marking threads. An entry is removed if it
    1.23 +// satisfies one of the following conditions:
    1.24 +//
    1.25 +// * it points to an object outside the G1 heap (G1's concurrent
    1.26 +//     marking only visits objects inside the G1 heap),
    1.27 +// * it points to an object that has been allocated since marking
    1.28 +//     started (according to SATB those objects do not need to be
    1.29 +//     visited during marking), or
    1.30 +// * it points to an object that has already been marked (no need to
    1.31 +//     process it again).
    1.32 +//
    1.33 +// The rest of the entries will be retained and are compacted towards
    1.34 +// the top of the buffer. If with this filtering we clear a large
    1.35 +// enough chunk of the buffer we can re-use it (instead of enqueueing
    1.36 +// it) and we can just allow the mutator to carry on executing.
    1.37 +
    1.38 +bool ObjPtrQueue::should_enqueue_buffer() {
    1.39 +  assert(_lock == NULL || _lock->owned_by_self(),
    1.40 +         "we should have taken the lock before calling this");
    1.41 +
    1.42 +  // A value of 0 means "don't filter SATB buffers".
    1.43 +  if (G1SATBBufferEnqueueingThresholdPercent == 0) {
    1.44 +    return true;
    1.45 +  }
    1.46 +
    1.47 +  G1CollectedHeap* g1h = G1CollectedHeap::heap();
    1.48 +
    1.49 +  // This method should only be called if there is a non-NULL buffer
    1.50 +  // that is full.
    1.51 +  assert(_index == 0, "pre-condition");
    1.52 +  assert(_buf != NULL, "pre-condition");
    1.53 +
    1.54 +  void** buf = _buf;
    1.55 +  size_t sz = _sz;
    1.56 +
    1.57 +  // Used for sanity checking at the end of the loop.
    1.58 +  debug_only(size_t entries = 0; size_t retained = 0;)
    1.59 +
    1.60 +  size_t i = sz;
    1.61 +  size_t new_index = sz;
    1.62 +
    1.63 +  // Given that we are expecting _index == 0, we could have changed
    1.64 +  // the loop condition to (i > 0). But we are using _index for
    1.65 +  // generality.
    1.66 +  while (i > _index) {
    1.67 +    assert(i > 0, "we should have at least one more entry to process");
    1.68 +    i -= oopSize;
    1.69 +    debug_only(entries += 1;)
    1.70 +    oop* p = (oop*) &buf[byte_index_to_index((int) i)];
    1.71 +    oop obj = *p;
    1.72 +    // NULL the entry so that unused parts of the buffer contain NULLs
    1.73 +    // at the end. If we are going to retain it we will copy it to its
    1.74 +    // final place. If we have retained all entries we have visited so
    1.75 +    // far, we'll just end up copying it to the same place.
    1.76 +    *p = NULL;
    1.77 +
    1.78 +    bool retain = g1h->is_obj_ill(obj);
    1.79 +    if (retain) {
    1.80 +      assert(new_index > 0, "we should not have already filled up the buffer");
    1.81 +      new_index -= oopSize;
    1.82 +      assert(new_index >= i,
    1.83 +             "new_index should never be below i, as we alwaysr compact 'up'");
    1.84 +      oop* new_p = (oop*) &buf[byte_index_to_index((int) new_index)];
    1.85 +      assert(new_p >= p, "the destination location should never be below "
    1.86 +             "the source as we always compact 'up'");
    1.87 +      assert(*new_p == NULL,
    1.88 +             "we should have already cleared the destination location");
    1.89 +      *new_p = obj;
    1.90 +      debug_only(retained += 1;)
    1.91 +    }
    1.92 +  }
    1.93 +  size_t entries_calc = (sz - _index) / oopSize;
    1.94 +  assert(entries == entries_calc, "the number of entries we counted "
    1.95 +         "should match the number of entries we calculated");
    1.96 +  size_t retained_calc = (sz - new_index) / oopSize;
    1.97 +  assert(retained == retained_calc, "the number of retained entries we counted "
    1.98 +         "should match the number of retained entries we calculated");
    1.99 +  size_t perc = retained_calc * 100 / entries_calc;
   1.100 +  bool should_enqueue = perc > (size_t) G1SATBBufferEnqueueingThresholdPercent;
   1.101 +  _index = new_index;
   1.102 +
   1.103 +  return should_enqueue;
   1.104 +}
   1.105 +
   1.106  void ObjPtrQueue::apply_closure(ObjectClosure* cl) {
   1.107    if (_buf != NULL) {
   1.108      apply_closure_to_buffer(cl, _buf, _index, _sz);

mercurial