Mon, 16 Aug 2010 15:58:42 -0700
6948538: CMS: BOT walkers can fall into object allocation and initialization cracks
Summary: GC workers now recognize an intermediate transient state of blocks which are allocated but have not yet completed initialization. blk_start() calls do not attempt to determine the size of a block in the transient state, rather waiting for the block to become initialized so that it is safe to query its size. Audited and ensured the order of initialization of object fields (klass, free bit and size) to respect block state transition protocol. Also included some new assertion checking code enabled in debug mode.
Reviewed-by: chrisphi, johnc, poonam
1 /*
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 //////////////////////////////////////////////////////////////////////////
26 // BlockOffsetTable inlines
27 //////////////////////////////////////////////////////////////////////////
28 inline HeapWord* BlockOffsetTable::block_start(const void* addr) const {
29 if (addr >= _bottom && addr < _end) {
30 return block_start_unsafe(addr);
31 } else {
32 return NULL;
33 }
34 }
36 //////////////////////////////////////////////////////////////////////////
37 // BlockOffsetSharedArray inlines
38 //////////////////////////////////////////////////////////////////////////
39 inline size_t BlockOffsetSharedArray::index_for(const void* p) const {
40 char* pc = (char*)p;
41 assert(pc >= (char*)_reserved.start() &&
42 pc < (char*)_reserved.end(),
43 "p not in range.");
44 size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
45 size_t result = delta >> LogN;
46 assert(result < _vs.committed_size(), "bad index from address");
47 return result;
48 }
50 inline HeapWord* BlockOffsetSharedArray::address_for_index(size_t index) const {
51 assert(index < _vs.committed_size(), "bad index");
52 HeapWord* result = _reserved.start() + (index << LogN_words);
53 assert(result >= _reserved.start() && result < _reserved.end(),
54 "bad address from index");
55 return result;
56 }
58 inline void BlockOffsetSharedArray::check_reducing_assertion(bool reducing) {
59 assert(reducing || !SafepointSynchronize::is_at_safepoint() || init_to_zero() ||
60 Thread::current()->is_VM_thread() ||
61 Thread::current()->is_ConcurrentGC_thread() ||
62 ((!Thread::current()->is_ConcurrentGC_thread()) &&
63 ParGCRareEvent_lock->owned_by_self()), "Crack");
64 }
66 //////////////////////////////////////////////////////////////////////////
67 // BlockOffsetArrayNonContigSpace inlines
68 //////////////////////////////////////////////////////////////////////////
69 inline void BlockOffsetArrayNonContigSpace::freed(HeapWord* blk,
70 size_t size) {
71 freed(blk, blk + size);
72 }
74 inline void BlockOffsetArrayNonContigSpace::freed(HeapWord* blk_start,
75 HeapWord* blk_end) {
76 // Verify that the BOT shows [blk_start, blk_end) to be one block.
77 verify_single_block(blk_start, blk_end);
78 // adjust _unallocated_block upward or downward
79 // as appropriate
80 if (BlockOffsetArrayUseUnallocatedBlock) {
81 assert(_unallocated_block <= _end,
82 "Inconsistent value for _unallocated_block");
83 if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) {
84 // CMS-specific note: a block abutting _unallocated_block to
85 // its left is being freed, a new block is being added or
86 // we are resetting following a compaction
87 _unallocated_block = blk_start;
88 }
89 }
90 }