Fri, 15 Apr 2011 09:36:28 -0400
7032407: Crash in LinkResolver::runtime_resolve_virtual_method()
Summary: Make CDS reorder vtables so that dump time vtables match run time order, so when redefine classes reinitializes them, they aren't in the wrong order.
Reviewed-by: dcubed, acorn
1 /*
2 * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_MEMORY_BLOCKOFFSETTABLE_INLINE_HPP
26 #define SHARE_VM_MEMORY_BLOCKOFFSETTABLE_INLINE_HPP
28 #include "memory/blockOffsetTable.hpp"
29 #include "memory/space.hpp"
30 #include "runtime/safepoint.hpp"
31 #ifndef SERIALGC
32 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
33 #endif
35 //////////////////////////////////////////////////////////////////////////
36 // BlockOffsetTable inlines
37 //////////////////////////////////////////////////////////////////////////
38 inline HeapWord* BlockOffsetTable::block_start(const void* addr) const {
39 if (addr >= _bottom && addr < _end) {
40 return block_start_unsafe(addr);
41 } else {
42 return NULL;
43 }
44 }
46 //////////////////////////////////////////////////////////////////////////
47 // BlockOffsetSharedArray inlines
48 //////////////////////////////////////////////////////////////////////////
49 inline size_t BlockOffsetSharedArray::index_for(const void* p) const {
50 char* pc = (char*)p;
51 assert(pc >= (char*)_reserved.start() &&
52 pc < (char*)_reserved.end(),
53 "p not in range.");
54 size_t delta = pointer_delta(pc, _reserved.start(), sizeof(char));
55 size_t result = delta >> LogN;
56 assert(result < _vs.committed_size(), "bad index from address");
57 return result;
58 }
60 inline HeapWord* BlockOffsetSharedArray::address_for_index(size_t index) const {
61 assert(index < _vs.committed_size(), "bad index");
62 HeapWord* result = _reserved.start() + (index << LogN_words);
63 assert(result >= _reserved.start() && result < _reserved.end(),
64 "bad address from index");
65 return result;
66 }
68 inline void BlockOffsetSharedArray::check_reducing_assertion(bool reducing) {
69 assert(reducing || !SafepointSynchronize::is_at_safepoint() || init_to_zero() ||
70 Thread::current()->is_VM_thread() ||
71 Thread::current()->is_ConcurrentGC_thread() ||
72 ((!Thread::current()->is_ConcurrentGC_thread()) &&
73 ParGCRareEvent_lock->owned_by_self()), "Crack");
74 }
76 //////////////////////////////////////////////////////////////////////////
77 // BlockOffsetArrayNonContigSpace inlines
78 //////////////////////////////////////////////////////////////////////////
79 inline void BlockOffsetArrayNonContigSpace::freed(HeapWord* blk,
80 size_t size) {
81 freed(blk, blk + size);
82 }
84 inline void BlockOffsetArrayNonContigSpace::freed(HeapWord* blk_start,
85 HeapWord* blk_end) {
86 // Verify that the BOT shows [blk_start, blk_end) to be one block.
87 verify_single_block(blk_start, blk_end);
88 // adjust _unallocated_block upward or downward
89 // as appropriate
90 if (BlockOffsetArrayUseUnallocatedBlock) {
91 assert(_unallocated_block <= _end,
92 "Inconsistent value for _unallocated_block");
93 if (blk_end >= _unallocated_block && blk_start <= _unallocated_block) {
94 // CMS-specific note: a block abutting _unallocated_block to
95 // its left is being freed, a new block is being added or
96 // we are resetting following a compaction
97 _unallocated_block = blk_start;
98 }
99 }
100 }
102 #endif // SHARE_VM_MEMORY_BLOCKOFFSETTABLE_INLINE_HPP