Mon, 12 Mar 2012 14:59:00 -0700
7147724: G1: hang in SurrogateLockerThread::manipulatePLL
Summary: Attempting to initiate a marking cycle when allocating a humongous object can, if a marking cycle is successfully initiated by another thread, result in the allocating thread spinning until the marking cycle is complete. Eliminate a deadlock between the main ConcurrentMarkThread, the SurrogateLocker thread, the VM thread, and a mutator thread waiting on the SecondaryFreeList_lock (while free regions are going to become available) by not manipulating the pending list lock during the prologue and epilogue of the cleanup pause.
Reviewed-by: brutisso, jcoomes, tonyp
ysr@777 | 1 | /* |
tonyp@3416 | 2 | * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. |
ysr@777 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
ysr@777 | 4 | * |
ysr@777 | 5 | * This code is free software; you can redistribute it and/or modify it |
ysr@777 | 6 | * under the terms of the GNU General Public License version 2 only, as |
ysr@777 | 7 | * published by the Free Software Foundation. |
ysr@777 | 8 | * |
ysr@777 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
ysr@777 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
ysr@777 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
ysr@777 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
ysr@777 | 13 | * accompanied this code). |
ysr@777 | 14 | * |
ysr@777 | 15 | * You should have received a copy of the GNU General Public License version |
ysr@777 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
ysr@777 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
ysr@777 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
ysr@777 | 22 | * |
ysr@777 | 23 | */ |
ysr@777 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP |
stefank@2314 | 26 | #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP |
stefank@2314 | 27 | |
ysr@777 | 28 | inline HeapWord* G1OffsetTableContigSpace::allocate(size_t size) { |
ysr@777 | 29 | HeapWord* res = ContiguousSpace::allocate(size); |
ysr@777 | 30 | if (res != NULL) { |
ysr@777 | 31 | _offsets.alloc_block(res, size); |
ysr@777 | 32 | } |
ysr@777 | 33 | return res; |
ysr@777 | 34 | } |
ysr@777 | 35 | |
ysr@777 | 36 | // Because of the requirement of keeping "_offsets" up to date with the |
ysr@777 | 37 | // allocations, we sequentialize these with a lock. Therefore, best if |
ysr@777 | 38 | // this is used for larger LAB allocations only. |
ysr@777 | 39 | inline HeapWord* G1OffsetTableContigSpace::par_allocate(size_t size) { |
ysr@777 | 40 | MutexLocker x(&_par_alloc_lock); |
tonyp@2715 | 41 | // Given that we take the lock no need to use par_allocate() here. |
tonyp@2715 | 42 | HeapWord* res = ContiguousSpace::allocate(size); |
ysr@777 | 43 | if (res != NULL) { |
ysr@777 | 44 | _offsets.alloc_block(res, size); |
ysr@777 | 45 | } |
ysr@777 | 46 | return res; |
ysr@777 | 47 | } |
ysr@777 | 48 | |
ysr@777 | 49 | inline HeapWord* G1OffsetTableContigSpace::block_start(const void* p) { |
ysr@777 | 50 | return _offsets.block_start(p); |
ysr@777 | 51 | } |
ysr@777 | 52 | |
ysr@777 | 53 | inline HeapWord* |
ysr@777 | 54 | G1OffsetTableContigSpace::block_start_const(const void* p) const { |
ysr@777 | 55 | return _offsets.block_start_const(p); |
ysr@777 | 56 | } |
stefank@2314 | 57 | |
tonyp@3416 | 58 | inline void HeapRegion::note_start_of_marking() { |
tonyp@3416 | 59 | init_top_at_conc_mark_count(); |
tonyp@3416 | 60 | _next_marked_bytes = 0; |
tonyp@3416 | 61 | _next_top_at_mark_start = top(); |
tonyp@3416 | 62 | } |
tonyp@3416 | 63 | |
tonyp@3416 | 64 | inline void HeapRegion::note_end_of_marking() { |
tonyp@3416 | 65 | _prev_top_at_mark_start = _next_top_at_mark_start; |
tonyp@3416 | 66 | _prev_marked_bytes = _next_marked_bytes; |
tonyp@3416 | 67 | _next_marked_bytes = 0; |
tonyp@3416 | 68 | |
tonyp@3416 | 69 | assert(_prev_marked_bytes <= |
tonyp@3416 | 70 | (size_t) pointer_delta(prev_top_at_mark_start(), bottom()) * |
tonyp@3416 | 71 | HeapWordSize, "invariant"); |
tonyp@3416 | 72 | } |
tonyp@3416 | 73 | |
tonyp@3416 | 74 | inline void HeapRegion::note_start_of_copying(bool during_initial_mark) { |
tonyp@3464 | 75 | if (is_survivor()) { |
tonyp@3464 | 76 | // This is how we always allocate survivors. |
tonyp@3464 | 77 | assert(_next_top_at_mark_start == bottom(), "invariant"); |
tonyp@3464 | 78 | } else { |
tonyp@3464 | 79 | if (during_initial_mark) { |
tonyp@3416 | 80 | // During initial-mark we'll explicitly mark any objects on old |
tonyp@3416 | 81 | // regions that are pointed to by roots. Given that explicit |
tonyp@3416 | 82 | // marks only make sense under NTAMS it'd be nice if we could |
tonyp@3416 | 83 | // check that condition if we wanted to. Given that we don't |
tonyp@3416 | 84 | // know where the top of this region will end up, we simply set |
tonyp@3416 | 85 | // NTAMS to the end of the region so all marks will be below |
tonyp@3416 | 86 | // NTAMS. We'll set it to the actual top when we retire this region. |
tonyp@3416 | 87 | _next_top_at_mark_start = end(); |
tonyp@3416 | 88 | } else { |
tonyp@3416 | 89 | // We could have re-used this old region as to-space over a |
tonyp@3416 | 90 | // couple of GCs since the start of the concurrent marking |
tonyp@3416 | 91 | // cycle. This means that [bottom,NTAMS) will contain objects |
tonyp@3416 | 92 | // copied up to and including initial-mark and [NTAMS, top) |
tonyp@3416 | 93 | // will contain objects copied during the concurrent marking cycle. |
tonyp@3416 | 94 | assert(top() >= _next_top_at_mark_start, "invariant"); |
tonyp@3416 | 95 | } |
tonyp@3416 | 96 | } |
tonyp@3416 | 97 | } |
tonyp@3416 | 98 | |
tonyp@3416 | 99 | inline void HeapRegion::note_end_of_copying(bool during_initial_mark) { |
tonyp@3464 | 100 | if (is_survivor()) { |
tonyp@3464 | 101 | // This is how we always allocate survivors. |
tonyp@3464 | 102 | assert(_next_top_at_mark_start == bottom(), "invariant"); |
tonyp@3464 | 103 | } else { |
tonyp@3464 | 104 | if (during_initial_mark) { |
tonyp@3416 | 105 | // See the comment for note_start_of_copying() for the details |
tonyp@3416 | 106 | // on this. |
tonyp@3416 | 107 | assert(_next_top_at_mark_start == end(), "pre-condition"); |
tonyp@3416 | 108 | _next_top_at_mark_start = top(); |
tonyp@3416 | 109 | } else { |
tonyp@3416 | 110 | // See the comment for note_start_of_copying() for the details |
tonyp@3416 | 111 | // on this. |
tonyp@3416 | 112 | assert(top() >= _next_top_at_mark_start, "invariant"); |
tonyp@3416 | 113 | } |
tonyp@3416 | 114 | } |
tonyp@3416 | 115 | } |
tonyp@3416 | 116 | |
stefank@2314 | 117 | #endif // SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGION_INLINE_HPP |