Mon, 12 Mar 2012 14:59:00 -0700
7147724: G1: hang in SurrogateLockerThread::manipulatePLL
Summary: Attempting to initiate a marking cycle when allocating a humongous object can, if a marking cycle is successfully initiated by another thread, result in the allocating thread spinning until the marking cycle is complete. Eliminate a deadlock between the main ConcurrentMarkThread, the SurrogateLocker thread, the VM thread, and a mutator thread waiting on the SecondaryFreeList_lock (while free regions are going to become available) by not manipulating the pending list lock during the prologue and epilogue of the cleanup pause.
Reviewed-by: brutisso, jcoomes, tonyp
tonyp@2715 | 1 | /* |
tonyp@2715 | 2 | * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. |
tonyp@2715 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
tonyp@2715 | 4 | * |
tonyp@2715 | 5 | * This code is free software; you can redistribute it and/or modify it |
tonyp@2715 | 6 | * under the terms of the GNU General Public License version 2 only, as |
tonyp@2715 | 7 | * published by the Free Software Foundation. |
tonyp@2715 | 8 | * |
tonyp@2715 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
tonyp@2715 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
tonyp@2715 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
tonyp@2715 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
tonyp@2715 | 13 | * accompanied this code). |
tonyp@2715 | 14 | * |
tonyp@2715 | 15 | * You should have received a copy of the GNU General Public License version |
tonyp@2715 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
tonyp@2715 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
tonyp@2715 | 18 | * |
tonyp@2715 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
tonyp@2715 | 20 | * or visit www.oracle.com if you need additional information or have any |
tonyp@2715 | 21 | * questions. |
tonyp@2715 | 22 | * |
tonyp@2715 | 23 | */ |
tonyp@2715 | 24 | |
tonyp@2715 | 25 | #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP |
tonyp@2715 | 26 | #define SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP |
tonyp@2715 | 27 | |
tonyp@2715 | 28 | #include "gc_implementation/g1/heapRegion.hpp" |
tonyp@2715 | 29 | |
tonyp@2715 | 30 | class G1CollectedHeap; |
tonyp@2715 | 31 | |
tonyp@2715 | 32 | // 0 -> no tracing, 1 -> basic tracing, 2 -> basic + allocation tracing |
tonyp@2715 | 33 | #define G1_ALLOC_REGION_TRACING 0 |
tonyp@2715 | 34 | |
tonyp@2715 | 35 | class ar_ext_msg; |
tonyp@2715 | 36 | |
tonyp@2715 | 37 | // A class that holds a region that is active in satisfying allocation |
tonyp@2715 | 38 | // requests, potentially issued in parallel. When the active region is |
tonyp@3028 | 39 | // full it will be retired and replaced with a new one. The |
tonyp@2715 | 40 | // implementation assumes that fast-path allocations will be lock-free |
tonyp@2715 | 41 | // and a lock will need to be taken when the active region needs to be |
tonyp@2715 | 42 | // replaced. |
tonyp@2715 | 43 | |
tonyp@2715 | 44 | class G1AllocRegion VALUE_OBJ_CLASS_SPEC { |
tonyp@2715 | 45 | friend class ar_ext_msg; |
tonyp@2715 | 46 | |
tonyp@2715 | 47 | private: |
tonyp@2715 | 48 | // The active allocating region we are currently allocating out |
tonyp@2715 | 49 | // of. The invariant is that if this object is initialized (i.e., |
tonyp@2715 | 50 | // init() has been called and release() has not) then _alloc_region |
tonyp@2715 | 51 | // is either an active allocating region or the dummy region (i.e., |
tonyp@2715 | 52 | // it can never be NULL) and this object can be used to satisfy |
tonyp@2715 | 53 | // allocation requests. If this object is not initialized |
tonyp@2715 | 54 | // (i.e. init() has not been called or release() has been called) |
tonyp@2715 | 55 | // then _alloc_region is NULL and this object should not be used to |
tonyp@2715 | 56 | // satisfy allocation requests (it was done this way to force the |
tonyp@2715 | 57 | // correct use of init() and release()). |
tonyp@2715 | 58 | HeapRegion* _alloc_region; |
tonyp@2715 | 59 | |
tonyp@3028 | 60 | // It keeps track of the distinct number of regions that are used |
tonyp@3028 | 61 | // for allocation in the active interval of this object, i.e., |
tonyp@3028 | 62 | // between a call to init() and a call to release(). The count |
tonyp@3028 | 63 | // mostly includes regions that are freshly allocated, as well as |
tonyp@3028 | 64 | // the region that is re-used using the set() method. This count can |
tonyp@3028 | 65 | // be used in any heuristics that might want to bound how many |
tonyp@3028 | 66 | // distinct regions this object can used during an active interval. |
tonyp@3028 | 67 | size_t _count; |
tonyp@3028 | 68 | |
tonyp@2715 | 69 | // When we set up a new active region we save its used bytes in this |
tonyp@2715 | 70 | // field so that, when we retire it, we can calculate how much space |
tonyp@2715 | 71 | // we allocated in it. |
tonyp@2715 | 72 | size_t _used_bytes_before; |
tonyp@2715 | 73 | |
tonyp@3028 | 74 | // When true, indicates that allocate calls should do BOT updates. |
tonyp@3028 | 75 | const bool _bot_updates; |
tonyp@2715 | 76 | |
tonyp@2715 | 77 | // Useful for debugging and tracing. |
tonyp@2715 | 78 | const char* _name; |
tonyp@2715 | 79 | |
tonyp@2715 | 80 | // A dummy region (i.e., it's been allocated specially for this |
tonyp@2715 | 81 | // purpose and it is not part of the heap) that is full (i.e., top() |
tonyp@2715 | 82 | // == end()). When we don't have a valid active region we make |
tonyp@2715 | 83 | // _alloc_region point to this. This allows us to skip checking |
tonyp@2715 | 84 | // whether the _alloc_region is NULL or not. |
tonyp@2715 | 85 | static HeapRegion* _dummy_region; |
tonyp@2715 | 86 | |
tonyp@2715 | 87 | // Some of the methods below take a bot_updates parameter. Its value |
tonyp@2715 | 88 | // should be the same as the _bot_updates field. The idea is that |
tonyp@2715 | 89 | // the parameter will be a constant for a particular alloc region |
tonyp@2715 | 90 | // and, given that these methods will be hopefully inlined, the |
tonyp@2715 | 91 | // compiler should compile out the test. |
tonyp@2715 | 92 | |
tonyp@2715 | 93 | // Perform a non-MT-safe allocation out of the given region. |
tonyp@2715 | 94 | static inline HeapWord* allocate(HeapRegion* alloc_region, |
tonyp@2715 | 95 | size_t word_size, |
tonyp@2715 | 96 | bool bot_updates); |
tonyp@2715 | 97 | |
tonyp@2715 | 98 | // Perform a MT-safe allocation out of the given region. |
tonyp@2715 | 99 | static inline HeapWord* par_allocate(HeapRegion* alloc_region, |
tonyp@2715 | 100 | size_t word_size, |
tonyp@2715 | 101 | bool bot_updates); |
tonyp@2715 | 102 | |
tonyp@2715 | 103 | // Ensure that the region passed as a parameter has been filled up |
tonyp@2715 | 104 | // so that noone else can allocate out of it any more. |
tonyp@2715 | 105 | static void fill_up_remaining_space(HeapRegion* alloc_region, |
tonyp@2715 | 106 | bool bot_updates); |
tonyp@2715 | 107 | |
tonyp@2715 | 108 | // Retire the active allocating region. If fill_up is true then make |
tonyp@2715 | 109 | // sure that the region is full before we retire it so that noone |
tonyp@2715 | 110 | // else can allocate out of it. |
tonyp@2715 | 111 | void retire(bool fill_up); |
tonyp@2715 | 112 | |
tonyp@2715 | 113 | // Allocate a new active region and use it to perform a word_size |
tonyp@2715 | 114 | // allocation. The force parameter will be passed on to |
tonyp@2715 | 115 | // G1CollectedHeap::allocate_new_alloc_region() and tells it to try |
tonyp@2715 | 116 | // to allocate a new region even if the max has been reached. |
tonyp@2715 | 117 | HeapWord* new_alloc_region_and_allocate(size_t word_size, bool force); |
tonyp@2715 | 118 | |
tonyp@2715 | 119 | void fill_in_ext_msg(ar_ext_msg* msg, const char* message); |
tonyp@2715 | 120 | |
tonyp@2715 | 121 | protected: |
tonyp@2715 | 122 | // For convenience as subclasses use it. |
tonyp@2715 | 123 | static G1CollectedHeap* _g1h; |
tonyp@2715 | 124 | |
tonyp@2715 | 125 | virtual HeapRegion* allocate_new_region(size_t word_size, bool force) = 0; |
tonyp@2715 | 126 | virtual void retire_region(HeapRegion* alloc_region, |
tonyp@2715 | 127 | size_t allocated_bytes) = 0; |
tonyp@2715 | 128 | |
tonyp@2715 | 129 | G1AllocRegion(const char* name, bool bot_updates); |
tonyp@2715 | 130 | |
tonyp@2715 | 131 | public: |
tonyp@2715 | 132 | static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region); |
tonyp@2715 | 133 | |
tonyp@2715 | 134 | HeapRegion* get() const { |
tonyp@2715 | 135 | // Make sure that the dummy region does not escape this class. |
tonyp@2715 | 136 | return (_alloc_region == _dummy_region) ? NULL : _alloc_region; |
tonyp@2715 | 137 | } |
tonyp@2715 | 138 | |
tonyp@3028 | 139 | size_t count() { return _count; } |
tonyp@3028 | 140 | |
tonyp@2715 | 141 | // The following two are the building blocks for the allocation method. |
tonyp@2715 | 142 | |
tonyp@2715 | 143 | // First-level allocation: Should be called without holding a |
tonyp@2715 | 144 | // lock. It will try to allocate lock-free out of the active region, |
tonyp@2715 | 145 | // or return NULL if it was unable to. |
tonyp@2715 | 146 | inline HeapWord* attempt_allocation(size_t word_size, bool bot_updates); |
tonyp@2715 | 147 | |
tonyp@2715 | 148 | // Second-level allocation: Should be called while holding a |
tonyp@2715 | 149 | // lock. It will try to first allocate lock-free out of the active |
tonyp@2715 | 150 | // region or, if it's unable to, it will try to replace the active |
tonyp@2715 | 151 | // alloc region with a new one. We require that the caller takes the |
tonyp@2715 | 152 | // appropriate lock before calling this so that it is easier to make |
tonyp@2715 | 153 | // it conform to its locking protocol. |
tonyp@2715 | 154 | inline HeapWord* attempt_allocation_locked(size_t word_size, |
tonyp@2715 | 155 | bool bot_updates); |
tonyp@2715 | 156 | |
tonyp@2715 | 157 | // Should be called to allocate a new region even if the max of this |
tonyp@2715 | 158 | // type of regions has been reached. Should only be called if other |
tonyp@2715 | 159 | // allocation attempts have failed and we are not holding a valid |
tonyp@2715 | 160 | // active region. |
tonyp@2715 | 161 | inline HeapWord* attempt_allocation_force(size_t word_size, |
tonyp@2715 | 162 | bool bot_updates); |
tonyp@2715 | 163 | |
tonyp@2715 | 164 | // Should be called before we start using this object. |
tonyp@2715 | 165 | void init(); |
tonyp@2715 | 166 | |
tonyp@3028 | 167 | // This can be used to set the active region to a specific |
tonyp@3028 | 168 | // region. (Use Example: we try to retain the last old GC alloc |
tonyp@3028 | 169 | // region that we've used during a GC and we can use set() to |
tonyp@3028 | 170 | // re-instate it at the beginning of the next GC.) |
tonyp@3028 | 171 | void set(HeapRegion* alloc_region); |
tonyp@3028 | 172 | |
tonyp@2715 | 173 | // Should be called when we want to release the active region which |
tonyp@2715 | 174 | // is returned after it's been retired. |
tonyp@2715 | 175 | HeapRegion* release(); |
tonyp@2715 | 176 | |
tonyp@2715 | 177 | #if G1_ALLOC_REGION_TRACING |
tonyp@2715 | 178 | void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL); |
tonyp@2715 | 179 | #else // G1_ALLOC_REGION_TRACING |
tonyp@2715 | 180 | void trace(const char* str, size_t word_size = 0, HeapWord* result = NULL) { } |
tonyp@2715 | 181 | #endif // G1_ALLOC_REGION_TRACING |
tonyp@2715 | 182 | }; |
tonyp@2715 | 183 | |
tonyp@2715 | 184 | class ar_ext_msg : public err_msg { |
tonyp@2715 | 185 | public: |
tonyp@2715 | 186 | ar_ext_msg(G1AllocRegion* alloc_region, const char *message) : err_msg("") { |
tonyp@2715 | 187 | alloc_region->fill_in_ext_msg(this, message); |
tonyp@2715 | 188 | } |
tonyp@2715 | 189 | }; |
tonyp@2715 | 190 | |
tonyp@2715 | 191 | #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1ALLOCREGION_HPP |