src/share/vm/oops/oop.inline.hpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2658
c7f3d0b4570f
child 3131
b0efc7ee3b31
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

duke@435 1 /*
never@2658 2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP
stefank@2314 26 #define SHARE_VM_OOPS_OOP_INLINE_HPP
stefank@2314 27
stefank@2314 28 #include "gc_implementation/shared/ageTable.hpp"
stefank@2314 29 #include "gc_implementation/shared/markSweep.inline.hpp"
stefank@2314 30 #include "gc_interface/collectedHeap.inline.hpp"
stefank@2314 31 #include "memory/barrierSet.inline.hpp"
stefank@2314 32 #include "memory/cardTableModRefBS.hpp"
stefank@2314 33 #include "memory/compactingPermGenGen.hpp"
stefank@2314 34 #include "memory/genCollectedHeap.hpp"
stefank@2314 35 #include "memory/generation.hpp"
stefank@2314 36 #include "memory/permGen.hpp"
stefank@2314 37 #include "memory/specialized_oop_closures.hpp"
stefank@2314 38 #include "oops/arrayKlass.hpp"
stefank@2314 39 #include "oops/arrayOop.hpp"
stefank@2314 40 #include "oops/klass.hpp"
stefank@2314 41 #include "oops/klassOop.hpp"
stefank@2314 42 #include "oops/markOop.inline.hpp"
stefank@2314 43 #include "oops/oop.hpp"
stefank@2314 44 #include "runtime/atomic.hpp"
stefank@2314 45 #include "runtime/os.hpp"
stefank@2314 46 #ifdef TARGET_ARCH_x86
stefank@2314 47 # include "bytes_x86.hpp"
stefank@2314 48 #endif
stefank@2314 49 #ifdef TARGET_ARCH_sparc
stefank@2314 50 # include "bytes_sparc.hpp"
stefank@2314 51 #endif
stefank@2314 52 #ifdef TARGET_ARCH_zero
stefank@2314 53 # include "bytes_zero.hpp"
stefank@2314 54 #endif
bobv@2508 55 #ifdef TARGET_ARCH_arm
bobv@2508 56 # include "bytes_arm.hpp"
bobv@2508 57 #endif
bobv@2508 58 #ifdef TARGET_ARCH_ppc
bobv@2508 59 # include "bytes_ppc.hpp"
bobv@2508 60 #endif
stefank@2314 61
duke@435 62 // Implementation of all inlined member functions defined in oop.hpp
duke@435 63 // We need a separate file to avoid circular references
duke@435 64
duke@435 65 inline void oopDesc::release_set_mark(markOop m) {
duke@435 66 OrderAccess::release_store_ptr(&_mark, m);
duke@435 67 }
duke@435 68
duke@435 69 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
duke@435 70 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
duke@435 71 }
duke@435 72
coleenp@548 73 inline klassOop oopDesc::klass() const {
coleenp@548 74 if (UseCompressedOops) {
coleenp@548 75 return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass);
coleenp@602 76 } else {
coleenp@602 77 return _metadata._klass;
coleenp@602 78 }
coleenp@602 79 }
coleenp@602 80
coleenp@602 81 inline klassOop oopDesc::klass_or_null() const volatile {
coleenp@602 82 // can be NULL in CMS
coleenp@602 83 if (UseCompressedOops) {
coleenp@602 84 return (klassOop)decode_heap_oop(_metadata._compressed_klass);
coleenp@548 85 } else {
coleenp@548 86 return _metadata._klass;
coleenp@548 87 }
coleenp@548 88 }
coleenp@548 89
coleenp@548 90 inline int oopDesc::klass_gap_offset_in_bytes() {
coleenp@548 91 assert(UseCompressedOops, "only applicable to compressed headers");
coleenp@548 92 return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
coleenp@548 93 }
coleenp@548 94
coleenp@548 95 inline oop* oopDesc::klass_addr() {
coleenp@548 96 // Only used internally and with CMS and will not work with
coleenp@548 97 // UseCompressedOops
coleenp@548 98 assert(!UseCompressedOops, "only supported with uncompressed oops");
coleenp@548 99 return (oop*) &_metadata._klass;
coleenp@548 100 }
coleenp@548 101
coleenp@548 102 inline narrowOop* oopDesc::compressed_klass_addr() {
coleenp@548 103 assert(UseCompressedOops, "only called by compressed oops");
coleenp@548 104 return (narrowOop*) &_metadata._compressed_klass;
coleenp@548 105 }
coleenp@548 106
duke@435 107 inline void oopDesc::set_klass(klassOop k) {
duke@435 108 // since klasses are promoted no store check is needed
duke@435 109 assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop");
duke@435 110 assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop");
coleenp@548 111 if (UseCompressedOops) {
coleenp@548 112 oop_store_without_check(compressed_klass_addr(), (oop)k);
coleenp@548 113 } else {
coleenp@548 114 oop_store_without_check(klass_addr(), (oop) k);
coleenp@548 115 }
duke@435 116 }
duke@435 117
coleenp@602 118 inline int oopDesc::klass_gap() const {
coleenp@602 119 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
coleenp@602 120 }
coleenp@602 121
coleenp@602 122 inline void oopDesc::set_klass_gap(int v) {
coleenp@602 123 if (UseCompressedOops) {
coleenp@602 124 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
coleenp@602 125 }
coleenp@602 126 }
coleenp@602 127
duke@435 128 inline void oopDesc::set_klass_to_list_ptr(oop k) {
duke@435 129 // This is only to be used during GC, for from-space objects, so no
duke@435 130 // barrier is needed.
coleenp@548 131 if (UseCompressedOops) {
ysr@889 132 _metadata._compressed_klass = encode_heap_oop(k); // may be null (parnew overflow handling)
coleenp@548 133 } else {
coleenp@548 134 _metadata._klass = (klassOop)k;
coleenp@548 135 }
duke@435 136 }
duke@435 137
duke@435 138 inline void oopDesc::init_mark() { set_mark(markOopDesc::prototype_for_object(this)); }
duke@435 139 inline Klass* oopDesc::blueprint() const { return klass()->klass_part(); }
duke@435 140
duke@435 141 inline bool oopDesc::is_a(klassOop k) const { return blueprint()->is_subtype_of(k); }
duke@435 142
duke@435 143 inline bool oopDesc::is_instance() const { return blueprint()->oop_is_instance(); }
never@2658 144 inline bool oopDesc::is_instanceMirror() const { return blueprint()->oop_is_instanceMirror(); }
duke@435 145 inline bool oopDesc::is_instanceRef() const { return blueprint()->oop_is_instanceRef(); }
duke@435 146 inline bool oopDesc::is_array() const { return blueprint()->oop_is_array(); }
duke@435 147 inline bool oopDesc::is_objArray() const { return blueprint()->oop_is_objArray(); }
duke@435 148 inline bool oopDesc::is_typeArray() const { return blueprint()->oop_is_typeArray(); }
duke@435 149 inline bool oopDesc::is_javaArray() const { return blueprint()->oop_is_javaArray(); }
duke@435 150 inline bool oopDesc::is_klass() const { return blueprint()->oop_is_klass(); }
duke@435 151 inline bool oopDesc::is_thread() const { return blueprint()->oop_is_thread(); }
duke@435 152 inline bool oopDesc::is_method() const { return blueprint()->oop_is_method(); }
duke@435 153 inline bool oopDesc::is_constMethod() const { return blueprint()->oop_is_constMethod(); }
duke@435 154 inline bool oopDesc::is_methodData() const { return blueprint()->oop_is_methodData(); }
duke@435 155 inline bool oopDesc::is_constantPool() const { return blueprint()->oop_is_constantPool(); }
duke@435 156 inline bool oopDesc::is_constantPoolCache() const { return blueprint()->oop_is_constantPoolCache(); }
duke@435 157 inline bool oopDesc::is_compiledICHolder() const { return blueprint()->oop_is_compiledICHolder(); }
duke@435 158
duke@435 159 inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
duke@435 160
coleenp@548 161 template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
duke@435 162 inline jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); }
duke@435 163 inline jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); }
duke@435 164 inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); }
duke@435 165 inline jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); }
duke@435 166 inline jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); }
duke@435 167 inline jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); }
duke@435 168 inline jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); }
duke@435 169 inline jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
coleenp@548 170 inline address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
duke@435 171
coleenp@548 172
coleenp@548 173 // Functions for getting and setting oops within instance objects.
coleenp@548 174 // If the oops are compressed, the type passed to these overloaded functions
coleenp@548 175 // is narrowOop. All functions are overloaded so they can be called by
coleenp@548 176 // template functions without conditionals (the compiler instantiates via
coleenp@548 177 // the right type and inlines the appopriate code).
coleenp@548 178
coleenp@548 179 inline bool oopDesc::is_null(oop obj) { return obj == NULL; }
coleenp@548 180 inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
coleenp@548 181
coleenp@548 182 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
coleenp@548 183 // offset from the heap base. Saving the check for null can save instructions
coleenp@548 184 // in inner GC loops so these are separated.
coleenp@548 185
kvn@1926 186 inline bool check_obj_alignment(oop obj) {
kvn@1926 187 return (intptr_t)obj % MinObjAlignmentInBytes == 0;
kvn@1926 188 }
kvn@1926 189
coleenp@548 190 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
coleenp@548 191 assert(!is_null(v), "oop value can never be zero");
kvn@1926 192 assert(check_obj_alignment(v), "Address not aligned");
ysr@1280 193 assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
kvn@1077 194 address base = Universe::narrow_oop_base();
kvn@1077 195 int shift = Universe::narrow_oop_shift();
kvn@1077 196 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
coleenp@570 197 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
kvn@1077 198 uint64_t result = pd >> shift;
coleenp@570 199 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
ysr@1280 200 assert(decode_heap_oop(result) == v, "reversibility");
coleenp@548 201 return (narrowOop)result;
coleenp@548 202 }
coleenp@548 203
coleenp@548 204 inline narrowOop oopDesc::encode_heap_oop(oop v) {
coleenp@548 205 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
coleenp@548 206 }
coleenp@548 207
coleenp@548 208 inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
coleenp@548 209 assert(!is_null(v), "narrow oop value can never be zero");
kvn@1077 210 address base = Universe::narrow_oop_base();
kvn@1077 211 int shift = Universe::narrow_oop_shift();
kvn@1926 212 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
twisti@2201 213 assert(check_obj_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
kvn@1926 214 return result;
coleenp@548 215 }
coleenp@548 216
coleenp@548 217 inline oop oopDesc::decode_heap_oop(narrowOop v) {
coleenp@548 218 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
coleenp@548 219 }
coleenp@548 220
coleenp@548 221 inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
coleenp@548 222 inline oop oopDesc::decode_heap_oop(oop v) { return v; }
coleenp@548 223
coleenp@548 224 // Load an oop out of the Java heap as is without decoding.
coleenp@548 225 // Called by GC to check for null before decoding.
coleenp@548 226 inline oop oopDesc::load_heap_oop(oop* p) { return *p; }
coleenp@548 227 inline narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; }
coleenp@548 228
coleenp@548 229 // Load and decode an oop out of the Java heap into a wide oop.
coleenp@548 230 inline oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; }
coleenp@548 231 inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
coleenp@548 232 return decode_heap_oop_not_null(*p);
coleenp@548 233 }
coleenp@548 234
coleenp@548 235 // Load and decode an oop out of the heap accepting null
coleenp@548 236 inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
coleenp@548 237 inline oop oopDesc::load_decode_heap_oop(narrowOop* p) {
coleenp@548 238 return decode_heap_oop(*p);
coleenp@548 239 }
coleenp@548 240
coleenp@548 241 // Store already encoded heap oop into the heap.
coleenp@548 242 inline void oopDesc::store_heap_oop(oop* p, oop v) { *p = v; }
coleenp@548 243 inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
coleenp@548 244
coleenp@548 245 // Encode and store a heap oop.
coleenp@548 246 inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
coleenp@548 247 *p = encode_heap_oop_not_null(v);
coleenp@548 248 }
coleenp@548 249 inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
coleenp@548 250
coleenp@548 251 // Encode and store a heap oop allowing for null.
coleenp@548 252 inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
coleenp@548 253 *p = encode_heap_oop(v);
coleenp@548 254 }
coleenp@548 255 inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
coleenp@548 256
coleenp@548 257 // Store heap oop as is for volatile fields.
coleenp@548 258 inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
coleenp@548 259 OrderAccess::release_store_ptr(p, v);
coleenp@548 260 }
coleenp@548 261 inline void oopDesc::release_store_heap_oop(volatile narrowOop* p,
coleenp@548 262 narrowOop v) {
coleenp@548 263 OrderAccess::release_store(p, v);
coleenp@548 264 }
coleenp@548 265
coleenp@548 266 inline void oopDesc::release_encode_store_heap_oop_not_null(
coleenp@548 267 volatile narrowOop* p, oop v) {
coleenp@548 268 // heap oop is not pointer sized.
coleenp@548 269 OrderAccess::release_store(p, encode_heap_oop_not_null(v));
coleenp@548 270 }
coleenp@548 271
coleenp@548 272 inline void oopDesc::release_encode_store_heap_oop_not_null(
coleenp@548 273 volatile oop* p, oop v) {
coleenp@548 274 OrderAccess::release_store_ptr(p, v);
coleenp@548 275 }
coleenp@548 276
coleenp@548 277 inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
coleenp@548 278 oop v) {
coleenp@548 279 OrderAccess::release_store_ptr(p, v);
coleenp@548 280 }
coleenp@548 281 inline void oopDesc::release_encode_store_heap_oop(
coleenp@548 282 volatile narrowOop* p, oop v) {
coleenp@548 283 OrderAccess::release_store(p, encode_heap_oop(v));
coleenp@548 284 }
coleenp@548 285
coleenp@548 286
coleenp@548 287 // These functions are only used to exchange oop fields in instances,
coleenp@548 288 // not headers.
coleenp@548 289 inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
coleenp@548 290 if (UseCompressedOops) {
coleenp@548 291 // encode exchange value from oop to T
coleenp@548 292 narrowOop val = encode_heap_oop(exchange_value);
coleenp@548 293 narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
coleenp@548 294 // decode old from T to oop
coleenp@548 295 return decode_heap_oop(old);
coleenp@548 296 } else {
coleenp@548 297 return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
coleenp@548 298 }
coleenp@548 299 }
coleenp@548 300
coleenp@548 301 inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
coleenp@548 302 volatile HeapWord *dest,
coleenp@548 303 oop compare_value) {
coleenp@548 304 if (UseCompressedOops) {
coleenp@548 305 // encode exchange and compare value from oop to T
coleenp@548 306 narrowOop val = encode_heap_oop(exchange_value);
coleenp@548 307 narrowOop cmp = encode_heap_oop(compare_value);
coleenp@548 308
coleenp@548 309 narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
coleenp@548 310 // decode old from T to oop
coleenp@548 311 return decode_heap_oop(old);
coleenp@548 312 } else {
coleenp@548 313 return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
coleenp@548 314 }
coleenp@548 315 }
coleenp@548 316
coleenp@548 317 // In order to put or get a field out of an instance, must first check
coleenp@548 318 // if the field has been compressed and uncompress it.
coleenp@548 319 inline oop oopDesc::obj_field(int offset) const {
coleenp@548 320 return UseCompressedOops ?
coleenp@548 321 load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
coleenp@548 322 load_decode_heap_oop(obj_field_addr<oop>(offset));
coleenp@548 323 }
coleenp@548 324 inline void oopDesc::obj_field_put(int offset, oop value) {
coleenp@548 325 UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
coleenp@548 326 oop_store(obj_field_addr<oop>(offset), value);
coleenp@548 327 }
coleenp@548 328 inline void oopDesc::obj_field_raw_put(int offset, oop value) {
coleenp@548 329 UseCompressedOops ?
coleenp@548 330 encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
coleenp@548 331 encode_store_heap_oop(obj_field_addr<oop>(offset), value);
coleenp@548 332 }
duke@435 333
duke@435 334 inline jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); }
duke@435 335 inline void oopDesc::byte_field_put(int offset, jbyte contents) { *byte_field_addr(offset) = (jint) contents; }
duke@435 336
duke@435 337 inline jboolean oopDesc::bool_field(int offset) const { return (jboolean) *bool_field_addr(offset); }
duke@435 338 inline void oopDesc::bool_field_put(int offset, jboolean contents) { *bool_field_addr(offset) = (jint) contents; }
duke@435 339
duke@435 340 inline jchar oopDesc::char_field(int offset) const { return (jchar) *char_field_addr(offset); }
duke@435 341 inline void oopDesc::char_field_put(int offset, jchar contents) { *char_field_addr(offset) = (jint) contents; }
duke@435 342
duke@435 343 inline jint oopDesc::int_field(int offset) const { return *int_field_addr(offset); }
duke@435 344 inline void oopDesc::int_field_put(int offset, jint contents) { *int_field_addr(offset) = contents; }
duke@435 345
duke@435 346 inline jshort oopDesc::short_field(int offset) const { return (jshort) *short_field_addr(offset); }
duke@435 347 inline void oopDesc::short_field_put(int offset, jshort contents) { *short_field_addr(offset) = (jint) contents;}
duke@435 348
duke@435 349 inline jlong oopDesc::long_field(int offset) const { return *long_field_addr(offset); }
duke@435 350 inline void oopDesc::long_field_put(int offset, jlong contents) { *long_field_addr(offset) = contents; }
duke@435 351
duke@435 352 inline jfloat oopDesc::float_field(int offset) const { return *float_field_addr(offset); }
duke@435 353 inline void oopDesc::float_field_put(int offset, jfloat contents) { *float_field_addr(offset) = contents; }
duke@435 354
duke@435 355 inline jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); }
duke@435 356 inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
duke@435 357
coleenp@548 358 inline address oopDesc::address_field(int offset) const { return *address_field_addr(offset); }
coleenp@548 359 inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
coleenp@548 360
coleenp@548 361 inline oop oopDesc::obj_field_acquire(int offset) const {
coleenp@548 362 return UseCompressedOops ?
coleenp@548 363 decode_heap_oop((narrowOop)
coleenp@548 364 OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
coleenp@548 365 : decode_heap_oop((oop)
coleenp@548 366 OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
coleenp@548 367 }
coleenp@548 368 inline void oopDesc::release_obj_field_put(int offset, oop value) {
coleenp@548 369 UseCompressedOops ?
coleenp@548 370 oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
coleenp@548 371 oop_store((volatile oop*) obj_field_addr<oop>(offset), value);
coleenp@548 372 }
duke@435 373
duke@435 374 inline jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); }
duke@435 375 inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); }
duke@435 376
duke@435 377 inline jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); }
duke@435 378 inline void oopDesc::release_bool_field_put(int offset, jboolean contents) { OrderAccess::release_store(bool_field_addr(offset), contents); }
duke@435 379
duke@435 380 inline jchar oopDesc::char_field_acquire(int offset) const { return OrderAccess::load_acquire(char_field_addr(offset)); }
duke@435 381 inline void oopDesc::release_char_field_put(int offset, jchar contents) { OrderAccess::release_store(char_field_addr(offset), contents); }
duke@435 382
duke@435 383 inline jint oopDesc::int_field_acquire(int offset) const { return OrderAccess::load_acquire(int_field_addr(offset)); }
duke@435 384 inline void oopDesc::release_int_field_put(int offset, jint contents) { OrderAccess::release_store(int_field_addr(offset), contents); }
duke@435 385
duke@435 386 inline jshort oopDesc::short_field_acquire(int offset) const { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); }
duke@435 387 inline void oopDesc::release_short_field_put(int offset, jshort contents) { OrderAccess::release_store(short_field_addr(offset), contents); }
duke@435 388
duke@435 389 inline jlong oopDesc::long_field_acquire(int offset) const { return OrderAccess::load_acquire(long_field_addr(offset)); }
duke@435 390 inline void oopDesc::release_long_field_put(int offset, jlong contents) { OrderAccess::release_store(long_field_addr(offset), contents); }
duke@435 391
duke@435 392 inline jfloat oopDesc::float_field_acquire(int offset) const { return OrderAccess::load_acquire(float_field_addr(offset)); }
duke@435 393 inline void oopDesc::release_float_field_put(int offset, jfloat contents) { OrderAccess::release_store(float_field_addr(offset), contents); }
duke@435 394
duke@435 395 inline jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); }
duke@435 396 inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
duke@435 397
jrose@1145 398 inline address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
jrose@1145 399 inline void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
jrose@1145 400
duke@435 401 inline int oopDesc::size_given_klass(Klass* klass) {
duke@435 402 int lh = klass->layout_helper();
never@2658 403 int s;
duke@435 404
duke@435 405 // lh is now a value computed at class initialization that may hint
duke@435 406 // at the size. For instances, this is positive and equal to the
duke@435 407 // size. For arrays, this is negative and provides log2 of the
duke@435 408 // array element size. For other oops, it is zero and thus requires
duke@435 409 // a virtual call.
duke@435 410 //
duke@435 411 // We go to all this trouble because the size computation is at the
duke@435 412 // heart of phase 2 of mark-compaction, and called for every object,
duke@435 413 // alive or dead. So the speed here is equal in importance to the
duke@435 414 // speed of allocation.
duke@435 415
never@2658 416 if (lh > Klass::_lh_neutral_value) {
never@2658 417 if (!Klass::layout_helper_needs_slow_path(lh)) {
never@2658 418 s = lh >> LogHeapWordSize; // deliver size scaled by wordSize
never@2658 419 } else {
never@2658 420 s = klass->oop_size(this);
never@2658 421 }
never@2658 422 } else if (lh <= Klass::_lh_neutral_value) {
duke@435 423 // The most common case is instances; fall through if so.
duke@435 424 if (lh < Klass::_lh_neutral_value) {
duke@435 425 // Second most common case is arrays. We have to fetch the
duke@435 426 // length of the array, shift (multiply) it appropriately,
duke@435 427 // up to wordSize, add the header, and align to object size.
duke@435 428 size_t size_in_bytes;
duke@435 429 #ifdef _M_IA64
duke@435 430 // The Windows Itanium Aug 2002 SDK hoists this load above
duke@435 431 // the check for s < 0. An oop at the end of the heap will
duke@435 432 // cause an access violation if this load is performed on a non
duke@435 433 // array oop. Making the reference volatile prohibits this.
duke@435 434 // (%%% please explain by what magic the length is actually fetched!)
duke@435 435 volatile int *array_length;
duke@435 436 array_length = (volatile int *)( (intptr_t)this +
duke@435 437 arrayOopDesc::length_offset_in_bytes() );
duke@435 438 assert(array_length > 0, "Integer arithmetic problem somewhere");
duke@435 439 // Put into size_t to avoid overflow.
duke@435 440 size_in_bytes = (size_t) array_length;
duke@435 441 size_in_bytes = size_in_bytes << Klass::layout_helper_log2_element_size(lh);
duke@435 442 #else
duke@435 443 size_t array_length = (size_t) ((arrayOop)this)->length();
duke@435 444 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
duke@435 445 #endif
duke@435 446 size_in_bytes += Klass::layout_helper_header_size(lh);
duke@435 447
duke@435 448 // This code could be simplified, but by keeping array_header_in_bytes
duke@435 449 // in units of bytes and doing it this way we can round up just once,
duke@435 450 // skipping the intermediate round to HeapWordSize. Cast the result
duke@435 451 // of round_to to size_t to guarantee unsigned division == right shift.
duke@435 452 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
duke@435 453 HeapWordSize);
duke@435 454
ysr@777 455 // UseParNewGC, UseParallelGC and UseG1GC can change the length field
ysr@777 456 // of an "old copy" of an object array in the young gen so it indicates
ysr@777 457 // the grey portion of an already copied array. This will cause the first
ysr@777 458 // disjunct below to fail if the two comparands are computed across such
ysr@777 459 // a concurrent change.
duke@435 460 // UseParNewGC also runs with promotion labs (which look like int
duke@435 461 // filler arrays) which are subject to changing their declared size
duke@435 462 // when finally retiring a PLAB; this also can cause the first disjunct
duke@435 463 // to fail for another worker thread that is concurrently walking the block
duke@435 464 // offset table. Both these invariant failures are benign for their
duke@435 465 // current uses; we relax the assertion checking to cover these two cases below:
duke@435 466 // is_objArray() && is_forwarded() // covers first scenario above
duke@435 467 // || is_typeArray() // covers second scenario above
duke@435 468 // If and when UseParallelGC uses the same obj array oop stealing/chunking
ysr@777 469 // technique, we will need to suitably modify the assertion.
duke@435 470 assert((s == klass->oop_size(this)) ||
ysr@777 471 (Universe::heap()->is_gc_active() &&
ysr@777 472 ((is_typeArray() && UseParNewGC) ||
ysr@777 473 (is_objArray() && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))),
duke@435 474 "wrong array object size");
duke@435 475 } else {
duke@435 476 // Must be zero, so bite the bullet and take the virtual call.
duke@435 477 s = klass->oop_size(this);
duke@435 478 }
duke@435 479 }
duke@435 480
duke@435 481 assert(s % MinObjAlignment == 0, "alignment check");
duke@435 482 assert(s > 0, "Bad size calculated");
duke@435 483 return s;
duke@435 484 }
duke@435 485
duke@435 486
duke@435 487 inline int oopDesc::size() {
duke@435 488 return size_given_klass(blueprint());
duke@435 489 }
duke@435 490
duke@435 491 inline bool oopDesc::is_parsable() {
duke@435 492 return blueprint()->oop_is_parsable(this);
duke@435 493 }
duke@435 494
jmasa@953 495 inline bool oopDesc::is_conc_safe() {
jmasa@953 496 return blueprint()->oop_is_conc_safe(this);
jmasa@953 497 }
jmasa@953 498
coleenp@548 499 inline void update_barrier_set(void* p, oop v) {
duke@435 500 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
duke@435 501 oopDesc::bs()->write_ref_field(p, v);
duke@435 502 }
duke@435 503
ysr@1280 504 template <class T> inline void update_barrier_set_pre(T* p, oop v) {
ysr@777 505 oopDesc::bs()->write_ref_field_pre(p, v);
ysr@777 506 }
ysr@777 507
coleenp@548 508 template <class T> inline void oop_store(T* p, oop v) {
duke@435 509 if (always_do_update_barrier) {
coleenp@548 510 oop_store((volatile T*)p, v);
duke@435 511 } else {
ysr@777 512 update_barrier_set_pre(p, v);
coleenp@548 513 oopDesc::encode_store_heap_oop(p, v);
ysr@1280 514 update_barrier_set((void*)p, v); // cast away type
duke@435 515 }
duke@435 516 }
duke@435 517
coleenp@548 518 template <class T> inline void oop_store(volatile T* p, oop v) {
ysr@1280 519 update_barrier_set_pre((T*)p, v); // cast away volatile
duke@435 520 // Used by release_obj_field_put, so use release_store_ptr.
coleenp@548 521 oopDesc::release_encode_store_heap_oop(p, v);
ysr@1280 522 update_barrier_set((void*)p, v); // cast away type
duke@435 523 }
duke@435 524
coleenp@548 525 template <class T> inline void oop_store_without_check(T* p, oop v) {
duke@435 526 // XXX YSR FIX ME!!!
duke@435 527 if (always_do_update_barrier) {
coleenp@548 528 oop_store(p, v);
duke@435 529 } else {
duke@435 530 assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v),
duke@435 531 "oop store without store check failed");
coleenp@548 532 oopDesc::encode_store_heap_oop(p, v);
duke@435 533 }
duke@435 534 }
duke@435 535
duke@435 536 // When it absolutely has to get there.
coleenp@548 537 template <class T> inline void oop_store_without_check(volatile T* p, oop v) {
duke@435 538 // XXX YSR FIX ME!!!
duke@435 539 if (always_do_update_barrier) {
duke@435 540 oop_store(p, v);
duke@435 541 } else {
coleenp@548 542 assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier((T*)p, v),
duke@435 543 "oop store without store check failed");
coleenp@548 544 oopDesc::release_encode_store_heap_oop(p, v);
duke@435 545 }
duke@435 546 }
duke@435 547
coleenp@548 548 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
coleenp@548 549 // (without having to remember the function name this calls).
coleenp@548 550 inline void oop_store_raw(HeapWord* addr, oop value) {
coleenp@548 551 if (UseCompressedOops) {
coleenp@548 552 oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
coleenp@548 553 } else {
coleenp@548 554 oopDesc::encode_store_heap_oop((oop*)addr, value);
coleenp@548 555 }
coleenp@548 556 }
duke@435 557
duke@435 558 // Used only for markSweep, scavenging
duke@435 559 inline bool oopDesc::is_gc_marked() const {
duke@435 560 return mark()->is_marked();
duke@435 561 }
duke@435 562
duke@435 563 inline bool oopDesc::is_locked() const {
duke@435 564 return mark()->is_locked();
duke@435 565 }
duke@435 566
duke@435 567 inline bool oopDesc::is_unlocked() const {
duke@435 568 return mark()->is_unlocked();
duke@435 569 }
duke@435 570
duke@435 571 inline bool oopDesc::has_bias_pattern() const {
duke@435 572 return mark()->has_bias_pattern();
duke@435 573 }
duke@435 574
duke@435 575
duke@435 576 // used only for asserts
duke@435 577 inline bool oopDesc::is_oop(bool ignore_mark_word) const {
duke@435 578 oop obj = (oop) this;
duke@435 579 if (!check_obj_alignment(obj)) return false;
duke@435 580 if (!Universe::heap()->is_in_reserved(obj)) return false;
duke@435 581 // obj is aligned and accessible in heap
duke@435 582 // try to find metaclass cycle safely without seg faulting on bad input
duke@435 583 // we should reach klassKlassObj by following klass link at most 3 times
duke@435 584 for (int i = 0; i < 3; i++) {
coleenp@602 585 obj = obj->klass_or_null();
duke@435 586 // klass should be aligned and in permspace
duke@435 587 if (!check_obj_alignment(obj)) return false;
duke@435 588 if (!Universe::heap()->is_in_permanent(obj)) return false;
duke@435 589 }
duke@435 590 if (obj != Universe::klassKlassObj()) {
duke@435 591 // During a dump, the _klassKlassObj moved to a shared space.
duke@435 592 if (DumpSharedSpaces && Universe::klassKlassObj()->is_shared()) {
duke@435 593 return true;
duke@435 594 }
duke@435 595 return false;
duke@435 596 }
duke@435 597
duke@435 598 // Header verification: the mark is typically non-NULL. If we're
duke@435 599 // at a safepoint, it must not be null.
duke@435 600 // Outside of a safepoint, the header could be changing (for example,
duke@435 601 // another thread could be inflating a lock on this object).
duke@435 602 if (ignore_mark_word) {
duke@435 603 return true;
duke@435 604 }
duke@435 605 if (mark() != NULL) {
duke@435 606 return true;
duke@435 607 }
duke@435 608 return !SafepointSynchronize::is_at_safepoint();
duke@435 609 }
duke@435 610
duke@435 611
duke@435 612 // used only for asserts
duke@435 613 inline bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
duke@435 614 return this == NULL ? true : is_oop(ignore_mark_word);
duke@435 615 }
duke@435 616
duke@435 617 #ifndef PRODUCT
duke@435 618 // used only for asserts
duke@435 619 inline bool oopDesc::is_unlocked_oop() const {
duke@435 620 if (!Universe::heap()->is_in_reserved(this)) return false;
duke@435 621 return mark()->is_unlocked();
duke@435 622 }
duke@435 623 #endif // PRODUCT
duke@435 624
duke@435 625 inline void oopDesc::follow_header() {
coleenp@548 626 if (UseCompressedOops) {
coleenp@548 627 MarkSweep::mark_and_push(compressed_klass_addr());
coleenp@548 628 } else {
coleenp@548 629 MarkSweep::mark_and_push(klass_addr());
coleenp@548 630 }
duke@435 631 }
duke@435 632
coleenp@548 633 inline void oopDesc::follow_contents(void) {
duke@435 634 assert (is_gc_marked(), "should be marked");
duke@435 635 blueprint()->oop_follow_contents(this);
duke@435 636 }
duke@435 637
duke@435 638
duke@435 639 // Used by scavengers
duke@435 640
duke@435 641 inline bool oopDesc::is_forwarded() const {
duke@435 642 // The extra heap check is needed since the obj might be locked, in which case the
duke@435 643 // mark would point to a stack location and have the sentinel bit cleared
duke@435 644 return mark()->is_marked();
duke@435 645 }
duke@435 646
duke@435 647 // Used by scavengers
duke@435 648 inline void oopDesc::forward_to(oop p) {
kvn@1926 649 assert(check_obj_alignment(p),
kvn@1926 650 "forwarding to something not aligned");
duke@435 651 assert(Universe::heap()->is_in_reserved(p),
duke@435 652 "forwarding to something not in heap");
duke@435 653 markOop m = markOopDesc::encode_pointer_as_mark(p);
duke@435 654 assert(m->decode_pointer() == p, "encoding must be reversable");
duke@435 655 set_mark(m);
duke@435 656 }
duke@435 657
duke@435 658 // Used by parallel scavengers
duke@435 659 inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
kvn@1926 660 assert(check_obj_alignment(p),
kvn@1926 661 "forwarding to something not aligned");
duke@435 662 assert(Universe::heap()->is_in_reserved(p),
duke@435 663 "forwarding to something not in heap");
duke@435 664 markOop m = markOopDesc::encode_pointer_as_mark(p);
duke@435 665 assert(m->decode_pointer() == p, "encoding must be reversable");
duke@435 666 return cas_set_mark(m, compare) == compare;
duke@435 667 }
duke@435 668
duke@435 669 // Note that the forwardee is not the same thing as the displaced_mark.
duke@435 670 // The forwardee is used when copying during scavenge and mark-sweep.
duke@435 671 // It does need to clear the low two locking- and GC-related bits.
coleenp@548 672 inline oop oopDesc::forwardee() const {
coleenp@548 673 return (oop) mark()->decode_pointer();
coleenp@548 674 }
duke@435 675
duke@435 676 inline bool oopDesc::has_displaced_mark() const {
duke@435 677 return mark()->has_displaced_mark_helper();
duke@435 678 }
duke@435 679
duke@435 680 inline markOop oopDesc::displaced_mark() const {
duke@435 681 return mark()->displaced_mark_helper();
duke@435 682 }
duke@435 683
duke@435 684 inline void oopDesc::set_displaced_mark(markOop m) {
duke@435 685 mark()->set_displaced_mark_helper(m);
duke@435 686 }
duke@435 687
duke@435 688 // The following method needs to be MT safe.
duke@435 689 inline int oopDesc::age() const {
duke@435 690 assert(!is_forwarded(), "Attempt to read age from forwarded mark");
duke@435 691 if (has_displaced_mark()) {
duke@435 692 return displaced_mark()->age();
duke@435 693 } else {
duke@435 694 return mark()->age();
duke@435 695 }
duke@435 696 }
duke@435 697
duke@435 698 inline void oopDesc::incr_age() {
duke@435 699 assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
duke@435 700 if (has_displaced_mark()) {
duke@435 701 set_displaced_mark(displaced_mark()->incr_age());
duke@435 702 } else {
duke@435 703 set_mark(mark()->incr_age());
duke@435 704 }
duke@435 705 }
duke@435 706
duke@435 707
duke@435 708 inline intptr_t oopDesc::identity_hash() {
duke@435 709 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
duke@435 710 // Note: The mark must be read into local variable to avoid concurrent updates.
duke@435 711 markOop mrk = mark();
duke@435 712 if (mrk->is_unlocked() && !mrk->has_no_hash()) {
duke@435 713 return mrk->hash();
duke@435 714 } else if (mrk->is_marked()) {
duke@435 715 return mrk->hash();
duke@435 716 } else {
duke@435 717 return slow_identity_hash();
duke@435 718 }
duke@435 719 }
duke@435 720
duke@435 721 inline void oopDesc::oop_iterate_header(OopClosure* blk) {
coleenp@548 722 if (UseCompressedOops) {
coleenp@548 723 blk->do_oop(compressed_klass_addr());
coleenp@548 724 } else {
coleenp@548 725 blk->do_oop(klass_addr());
coleenp@548 726 }
duke@435 727 }
duke@435 728
duke@435 729 inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) {
coleenp@548 730 if (UseCompressedOops) {
coleenp@548 731 if (mr.contains(compressed_klass_addr())) {
coleenp@548 732 blk->do_oop(compressed_klass_addr());
coleenp@548 733 }
coleenp@548 734 } else {
coleenp@548 735 if (mr.contains(klass_addr())) blk->do_oop(klass_addr());
coleenp@548 736 }
duke@435 737 }
duke@435 738
duke@435 739 inline int oopDesc::adjust_pointers() {
duke@435 740 debug_only(int check_size = size());
duke@435 741 int s = blueprint()->oop_adjust_pointers(this);
duke@435 742 assert(s == check_size, "should be the same");
duke@435 743 return s;
duke@435 744 }
duke@435 745
duke@435 746 inline void oopDesc::adjust_header() {
coleenp@548 747 if (UseCompressedOops) {
coleenp@548 748 MarkSweep::adjust_pointer(compressed_klass_addr());
coleenp@548 749 } else {
coleenp@548 750 MarkSweep::adjust_pointer(klass_addr());
coleenp@548 751 }
duke@435 752 }
duke@435 753
duke@435 754 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
duke@435 755 \
duke@435 756 inline int oopDesc::oop_iterate(OopClosureType* blk) { \
duke@435 757 SpecializationStats::record_call(); \
duke@435 758 return blueprint()->oop_oop_iterate##nv_suffix(this, blk); \
duke@435 759 } \
duke@435 760 \
duke@435 761 inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \
duke@435 762 SpecializationStats::record_call(); \
duke@435 763 return blueprint()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \
duke@435 764 }
duke@435 765
duke@435 766 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN)
ysr@777 767 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN)
duke@435 768
ysr@777 769 #ifndef SERIALGC
ysr@777 770 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
ysr@777 771 \
ysr@777 772 inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \
ysr@777 773 SpecializationStats::record_call(); \
ysr@777 774 return blueprint()->oop_oop_iterate_backwards##nv_suffix(this, blk); \
ysr@777 775 }
ysr@777 776
ysr@777 777 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN)
ysr@777 778 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN)
ysr@777 779 #endif // !SERIALGC
duke@435 780
duke@435 781 inline bool oopDesc::is_shared() const {
duke@435 782 return CompactingPermGenGen::is_shared(this);
duke@435 783 }
duke@435 784
duke@435 785 inline bool oopDesc::is_shared_readonly() const {
duke@435 786 return CompactingPermGenGen::is_shared_readonly(this);
duke@435 787 }
duke@435 788
duke@435 789 inline bool oopDesc::is_shared_readwrite() const {
duke@435 790 return CompactingPermGenGen::is_shared_readwrite(this);
duke@435 791 }
stefank@2314 792
stefank@2314 793 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP

mercurial