src/share/vm/oops/markOop.hpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2497
3582bf76420e
child 2708
1d1603768966
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

duke@435 1 /*
stefank@2314 2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_OOPS_MARKOOP_HPP
stefank@2314 26 #define SHARE_VM_OOPS_MARKOOP_HPP
stefank@2314 27
stefank@2314 28 #include "oops/oop.hpp"
stefank@2314 29
duke@435 30 // The markOop describes the header of an object.
duke@435 31 //
duke@435 32 // Note that the mark is not a real oop but just a word.
duke@435 33 // It is placed in the oop hierarchy for historical reasons.
duke@435 34 //
ysr@1901 35 // Bit-format of an object header (most significant first, big endian layout below):
duke@435 36 //
ysr@1901 37 // 32 bits:
ysr@1901 38 // --------
ysr@1901 39 // hash:25 ------------>| age:4 biased_lock:1 lock:2 (normal object)
ysr@1901 40 // JavaThread*:23 epoch:2 age:4 biased_lock:1 lock:2 (biased object)
ysr@1901 41 // size:32 ------------------------------------------>| (CMS free block)
ysr@1901 42 // PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object)
ysr@1901 43 //
ysr@1901 44 // 64 bits:
ysr@1901 45 // --------
ysr@1901 46 // unused:25 hash:31 -->| unused:1 age:4 biased_lock:1 lock:2 (normal object)
ysr@1901 47 // JavaThread*:54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased object)
ysr@1901 48 // PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object)
ysr@1901 49 // size:64 ----------------------------------------------------->| (CMS free block)
ysr@1901 50 //
ysr@1901 51 // unused:25 hash:31 -->| cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && normal object)
ysr@1901 52 // JavaThread*:54 epoch:2 cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && biased object)
ysr@1901 53 // narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object)
ysr@1901 54 // unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block)
duke@435 55 //
duke@435 56 // - hash contains the identity hash value: largest value is
duke@435 57 // 31 bits, see os::random(). Also, 64-bit vm's require
duke@435 58 // a hash value no bigger than 32 bits because they will not
duke@435 59 // properly generate a mask larger than that: see library_call.cpp
duke@435 60 // and c1_CodePatterns_sparc.cpp.
duke@435 61 //
duke@435 62 // - the biased lock pattern is used to bias a lock toward a given
duke@435 63 // thread. When this pattern is set in the low three bits, the lock
duke@435 64 // is either biased toward a given thread or "anonymously" biased,
duke@435 65 // indicating that it is possible for it to be biased. When the
duke@435 66 // lock is biased toward a given thread, locking and unlocking can
duke@435 67 // be performed by that thread without using atomic operations.
duke@435 68 // When a lock's bias is revoked, it reverts back to the normal
duke@435 69 // locking scheme described below.
duke@435 70 //
duke@435 71 // Note that we are overloading the meaning of the "unlocked" state
duke@435 72 // of the header. Because we steal a bit from the age we can
duke@435 73 // guarantee that the bias pattern will never be seen for a truly
duke@435 74 // unlocked object.
duke@435 75 //
duke@435 76 // Note also that the biased state contains the age bits normally
duke@435 77 // contained in the object header. Large increases in scavenge
duke@435 78 // times were seen when these bits were absent and an arbitrary age
duke@435 79 // assigned to all biased objects, because they tended to consume a
duke@435 80 // significant fraction of the eden semispaces and were not
duke@435 81 // promoted promptly, causing an increase in the amount of copying
duke@435 82 // performed. The runtime system aligns all JavaThread* pointers to
ysr@1901 83 // a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM))
ysr@1901 84 // to make room for the age bits & the epoch bits (used in support of
ysr@1901 85 // biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs).
duke@435 86 //
duke@435 87 // [JavaThread* | epoch | age | 1 | 01] lock is biased toward given thread
duke@435 88 // [0 | epoch | age | 1 | 01] lock is anonymously biased
duke@435 89 //
duke@435 90 // - the two lock bits are used to describe three states: locked/unlocked and monitor.
duke@435 91 //
duke@435 92 // [ptr | 00] locked ptr points to real header on stack
duke@435 93 // [header | 0 | 01] unlocked regular object header
duke@435 94 // [ptr | 10] monitor inflated lock (header is wapped out)
duke@435 95 // [ptr | 11] marked used by markSweep to mark an object
duke@435 96 // not valid at any other time
duke@435 97 //
duke@435 98 // We assume that stack/thread pointers have the lowest two bits cleared.
duke@435 99
duke@435 100 class BasicLock;
duke@435 101 class ObjectMonitor;
duke@435 102 class JavaThread;
duke@435 103
duke@435 104 class markOopDesc: public oopDesc {
duke@435 105 private:
duke@435 106 // Conversion
duke@435 107 uintptr_t value() const { return (uintptr_t) this; }
duke@435 108
duke@435 109 public:
duke@435 110 // Constants
duke@435 111 enum { age_bits = 4,
duke@435 112 lock_bits = 2,
duke@435 113 biased_lock_bits = 1,
coleenp@548 114 max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits,
duke@435 115 hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits,
coleenp@622 116 cms_bits = LP64_ONLY(1) NOT_LP64(0),
duke@435 117 epoch_bits = 2
duke@435 118 };
duke@435 119
duke@435 120 // The biased locking code currently requires that the age bits be
coleenp@2497 121 // contiguous to the lock bits.
duke@435 122 enum { lock_shift = 0,
duke@435 123 biased_lock_shift = lock_bits,
duke@435 124 age_shift = lock_bits + biased_lock_bits,
coleenp@622 125 cms_shift = age_shift + age_bits,
coleenp@622 126 hash_shift = cms_shift + cms_bits,
duke@435 127 epoch_shift = hash_shift
duke@435 128 };
duke@435 129
duke@435 130 enum { lock_mask = right_n_bits(lock_bits),
duke@435 131 lock_mask_in_place = lock_mask << lock_shift,
duke@435 132 biased_lock_mask = right_n_bits(lock_bits + biased_lock_bits),
duke@435 133 biased_lock_mask_in_place= biased_lock_mask << lock_shift,
duke@435 134 biased_lock_bit_in_place = 1 << biased_lock_shift,
duke@435 135 age_mask = right_n_bits(age_bits),
duke@435 136 age_mask_in_place = age_mask << age_shift,
duke@435 137 epoch_mask = right_n_bits(epoch_bits),
coleenp@622 138 epoch_mask_in_place = epoch_mask << epoch_shift,
coleenp@622 139 cms_mask = right_n_bits(cms_bits),
coleenp@622 140 cms_mask_in_place = cms_mask << cms_shift
duke@435 141 #ifndef _WIN64
duke@435 142 ,hash_mask = right_n_bits(hash_bits),
duke@435 143 hash_mask_in_place = (address_word)hash_mask << hash_shift
duke@435 144 #endif
duke@435 145 };
duke@435 146
duke@435 147 // Alignment of JavaThread pointers encoded in object header required by biased locking
duke@435 148 enum { biased_lock_alignment = 2 << (epoch_shift + epoch_bits)
duke@435 149 };
duke@435 150
duke@435 151 #ifdef _WIN64
duke@435 152 // These values are too big for Win64
duke@435 153 const static uintptr_t hash_mask = right_n_bits(hash_bits);
duke@435 154 const static uintptr_t hash_mask_in_place =
duke@435 155 (address_word)hash_mask << hash_shift;
duke@435 156 #endif
duke@435 157
duke@435 158 enum { locked_value = 0,
duke@435 159 unlocked_value = 1,
duke@435 160 monitor_value = 2,
duke@435 161 marked_value = 3,
duke@435 162 biased_lock_pattern = 5
duke@435 163 };
duke@435 164
duke@435 165 enum { no_hash = 0 }; // no hash value assigned
duke@435 166
duke@435 167 enum { no_hash_in_place = (address_word)no_hash << hash_shift,
duke@435 168 no_lock_in_place = unlocked_value
duke@435 169 };
duke@435 170
duke@435 171 enum { max_age = age_mask };
duke@435 172
duke@435 173 enum { max_bias_epoch = epoch_mask };
duke@435 174
duke@435 175 // Biased Locking accessors.
duke@435 176 // These must be checked by all code which calls into the
duke@435 177 // ObjectSynchronizer and other code. The biasing is not understood
duke@435 178 // by the lower-level CAS-based locking code, although the runtime
duke@435 179 // fixes up biased locks to be compatible with it when a bias is
duke@435 180 // revoked.
duke@435 181 bool has_bias_pattern() const {
duke@435 182 return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern);
duke@435 183 }
duke@435 184 JavaThread* biased_locker() const {
duke@435 185 assert(has_bias_pattern(), "should not call this otherwise");
duke@435 186 return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place))));
duke@435 187 }
duke@435 188 // Indicates that the mark has the bias bit set but that it has not
duke@435 189 // yet been biased toward a particular thread
duke@435 190 bool is_biased_anonymously() const {
duke@435 191 return (has_bias_pattern() && (biased_locker() == NULL));
duke@435 192 }
duke@435 193 // Indicates epoch in which this bias was acquired. If the epoch
duke@435 194 // changes due to too many bias revocations occurring, the biases
duke@435 195 // from the previous epochs are all considered invalid.
duke@435 196 int bias_epoch() const {
duke@435 197 assert(has_bias_pattern(), "should not call this otherwise");
duke@435 198 return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift);
duke@435 199 }
duke@435 200 markOop set_bias_epoch(int epoch) {
duke@435 201 assert(has_bias_pattern(), "should not call this otherwise");
duke@435 202 assert((epoch & (~epoch_mask)) == 0, "epoch overflow");
duke@435 203 return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));
duke@435 204 }
duke@435 205 markOop incr_bias_epoch() {
duke@435 206 return set_bias_epoch((1 + bias_epoch()) & epoch_mask);
duke@435 207 }
duke@435 208 // Prototype mark for initialization
duke@435 209 static markOop biased_locking_prototype() {
duke@435 210 return markOop( biased_lock_pattern );
duke@435 211 }
duke@435 212
duke@435 213 // lock accessors (note that these assume lock_shift == 0)
duke@435 214 bool is_locked() const {
duke@435 215 return (mask_bits(value(), lock_mask_in_place) != unlocked_value);
duke@435 216 }
duke@435 217 bool is_unlocked() const {
duke@435 218 return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value);
duke@435 219 }
duke@435 220 bool is_marked() const {
duke@435 221 return (mask_bits(value(), lock_mask_in_place) == marked_value);
duke@435 222 }
duke@435 223 bool is_neutral() const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); }
duke@435 224
duke@435 225 // Special temporary state of the markOop while being inflated.
duke@435 226 // Code that looks at mark outside a lock need to take this into account.
duke@435 227 bool is_being_inflated() const { return (value() == 0); }
duke@435 228
duke@435 229 // Distinguished markword value - used when inflating over
duke@435 230 // an existing stacklock. 0 indicates the markword is "BUSY".
duke@435 231 // Lockword mutators that use a LD...CAS idiom should always
duke@435 232 // check for and avoid overwriting a 0 value installed by some
duke@435 233 // other thread. (They should spin or block instead. The 0 value
duke@435 234 // is transient and *should* be short-lived).
duke@435 235 static markOop INFLATING() { return (markOop) 0; } // inflate-in-progress
duke@435 236
duke@435 237 // Should this header be preserved during GC?
ysr@777 238 inline bool must_be_preserved(oop obj_containing_mark) const;
duke@435 239 inline bool must_be_preserved_with_bias(oop obj_containing_mark) const;
duke@435 240
duke@435 241 // Should this header (including its age bits) be preserved in the
duke@435 242 // case of a promotion failure during scavenge?
duke@435 243 // Note that we special case this situation. We want to avoid
duke@435 244 // calling BiasedLocking::preserve_marks()/restore_marks() (which
duke@435 245 // decrease the number of mark words that need to be preserved
duke@435 246 // during GC) during each scavenge. During scavenges in which there
duke@435 247 // is no promotion failure, we actually don't need to call the above
duke@435 248 // routines at all, since we don't mutate and re-initialize the
duke@435 249 // marks of promoted objects using init_mark(). However, during
duke@435 250 // scavenges which result in promotion failure, we do re-initialize
duke@435 251 // the mark words of objects, meaning that we should have called
duke@435 252 // these mark word preservation routines. Currently there's no good
duke@435 253 // place in which to call them in any of the scavengers (although
duke@435 254 // guarded by appropriate locks we could make one), but the
duke@435 255 // observation is that promotion failures are quite rare and
duke@435 256 // reducing the number of mark words preserved during them isn't a
duke@435 257 // high priority.
ysr@777 258 inline bool must_be_preserved_for_promotion_failure(oop obj_containing_mark) const;
duke@435 259 inline bool must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const;
duke@435 260
duke@435 261 // Should this header be preserved during a scavenge where CMS is
duke@435 262 // the old generation?
duke@435 263 // (This is basically the same body as must_be_preserved_for_promotion_failure(),
duke@435 264 // but takes the klassOop as argument instead)
ysr@777 265 inline bool must_be_preserved_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const;
duke@435 266 inline bool must_be_preserved_with_bias_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const;
duke@435 267
duke@435 268 // WARNING: The following routines are used EXCLUSIVELY by
duke@435 269 // synchronization functions. They are not really gc safe.
duke@435 270 // They must get updated if markOop layout get changed.
duke@435 271 markOop set_unlocked() const {
duke@435 272 return markOop(value() | unlocked_value);
duke@435 273 }
duke@435 274 bool has_locker() const {
duke@435 275 return ((value() & lock_mask_in_place) == locked_value);
duke@435 276 }
duke@435 277 BasicLock* locker() const {
duke@435 278 assert(has_locker(), "check");
duke@435 279 return (BasicLock*) value();
duke@435 280 }
duke@435 281 bool has_monitor() const {
duke@435 282 return ((value() & monitor_value) != 0);
duke@435 283 }
duke@435 284 ObjectMonitor* monitor() const {
duke@435 285 assert(has_monitor(), "check");
duke@435 286 // Use xor instead of &~ to provide one extra tag-bit check.
duke@435 287 return (ObjectMonitor*) (value() ^ monitor_value);
duke@435 288 }
duke@435 289 bool has_displaced_mark_helper() const {
duke@435 290 return ((value() & unlocked_value) == 0);
duke@435 291 }
duke@435 292 markOop displaced_mark_helper() const {
duke@435 293 assert(has_displaced_mark_helper(), "check");
duke@435 294 intptr_t ptr = (value() & ~monitor_value);
duke@435 295 return *(markOop*)ptr;
duke@435 296 }
duke@435 297 void set_displaced_mark_helper(markOop m) const {
duke@435 298 assert(has_displaced_mark_helper(), "check");
duke@435 299 intptr_t ptr = (value() & ~monitor_value);
duke@435 300 *(markOop*)ptr = m;
duke@435 301 }
duke@435 302 markOop copy_set_hash(intptr_t hash) const {
duke@435 303 intptr_t tmp = value() & (~hash_mask_in_place);
duke@435 304 tmp |= ((hash & hash_mask) << hash_shift);
duke@435 305 return (markOop)tmp;
duke@435 306 }
duke@435 307 // it is only used to be stored into BasicLock as the
duke@435 308 // indicator that the lock is using heavyweight monitor
duke@435 309 static markOop unused_mark() {
duke@435 310 return (markOop) marked_value;
duke@435 311 }
duke@435 312 // the following two functions create the markOop to be
duke@435 313 // stored into object header, it encodes monitor info
duke@435 314 static markOop encode(BasicLock* lock) {
duke@435 315 return (markOop) lock;
duke@435 316 }
duke@435 317 static markOop encode(ObjectMonitor* monitor) {
duke@435 318 intptr_t tmp = (intptr_t) monitor;
duke@435 319 return (markOop) (tmp | monitor_value);
duke@435 320 }
duke@435 321 static markOop encode(JavaThread* thread, int age, int bias_epoch) {
duke@435 322 intptr_t tmp = (intptr_t) thread;
duke@435 323 assert(UseBiasedLocking && ((tmp & (epoch_mask_in_place | age_mask_in_place | biased_lock_mask_in_place)) == 0), "misaligned JavaThread pointer");
duke@435 324 assert(age <= max_age, "age too large");
duke@435 325 assert(bias_epoch <= max_bias_epoch, "bias epoch too large");
duke@435 326 return (markOop) (tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern);
duke@435 327 }
duke@435 328
duke@435 329 // used to encode pointers during GC
duke@435 330 markOop clear_lock_bits() { return markOop(value() & ~lock_mask_in_place); }
duke@435 331
duke@435 332 // age operations
duke@435 333 markOop set_marked() { return markOop((value() & ~lock_mask_in_place) | marked_value); }
duke@435 334
duke@435 335 int age() const { return mask_bits(value() >> age_shift, age_mask); }
duke@435 336 markOop set_age(int v) const {
duke@435 337 assert((v & ~age_mask) == 0, "shouldn't overflow age field");
duke@435 338 return markOop((value() & ~age_mask_in_place) | (((intptr_t)v & age_mask) << age_shift));
duke@435 339 }
duke@435 340 markOop incr_age() const { return age() == max_age ? markOop(this) : set_age(age() + 1); }
duke@435 341
duke@435 342 // hash operations
duke@435 343 intptr_t hash() const {
duke@435 344 return mask_bits(value() >> hash_shift, hash_mask);
duke@435 345 }
duke@435 346
duke@435 347 bool has_no_hash() const {
duke@435 348 return hash() == no_hash;
duke@435 349 }
duke@435 350
duke@435 351 // Prototype mark for initialization
duke@435 352 static markOop prototype() {
duke@435 353 return markOop( no_hash_in_place | no_lock_in_place );
duke@435 354 }
duke@435 355
duke@435 356 // Helper function for restoration of unmarked mark oops during GC
duke@435 357 static inline markOop prototype_for_object(oop obj);
duke@435 358
duke@435 359 // Debugging
duke@435 360 void print_on(outputStream* st) const;
duke@435 361
duke@435 362 // Prepare address of oop for placement into mark
duke@435 363 inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); }
duke@435 364
duke@435 365 // Recover address of oop from encoded form used in mark
duke@435 366 inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); }
dcubed@483 367
dcubed@483 368 // see the definition in markOop.cpp for the gory details
dcubed@483 369 bool should_not_be_cached() const;
coleenp@622 370
coleenp@622 371 // These markOops indicate cms free chunk blocks and not objects.
coleenp@622 372 // In 64 bit, the markOop is set to distinguish them from oops.
coleenp@622 373 // These are defined in 32 bit mode for vmStructs.
coleenp@622 374 const static uintptr_t cms_free_chunk_pattern = 0x1;
coleenp@622 375
coleenp@622 376 // Constants for the size field.
coleenp@622 377 enum { size_shift = cms_shift + cms_bits,
coleenp@622 378 size_bits = 35 // need for compressed oops 32G
coleenp@622 379 };
coleenp@622 380 // These values are too big for Win64
coleenp@622 381 const static uintptr_t size_mask = LP64_ONLY(right_n_bits(size_bits))
coleenp@622 382 NOT_LP64(0);
coleenp@622 383 const static uintptr_t size_mask_in_place =
coleenp@622 384 (address_word)size_mask << size_shift;
coleenp@622 385
coleenp@622 386 #ifdef _LP64
coleenp@622 387 static markOop cms_free_prototype() {
coleenp@622 388 return markOop(((intptr_t)prototype() & ~cms_mask_in_place) |
coleenp@622 389 ((cms_free_chunk_pattern & cms_mask) << cms_shift));
coleenp@622 390 }
coleenp@622 391 uintptr_t cms_encoding() const {
coleenp@622 392 return mask_bits(value() >> cms_shift, cms_mask);
coleenp@622 393 }
coleenp@622 394 bool is_cms_free_chunk() const {
coleenp@622 395 return is_neutral() &&
coleenp@622 396 (cms_encoding() & cms_free_chunk_pattern) == cms_free_chunk_pattern;
coleenp@622 397 }
coleenp@622 398
coleenp@622 399 size_t get_size() const { return (size_t)(value() >> size_shift); }
coleenp@622 400 static markOop set_size_and_free(size_t size) {
coleenp@622 401 assert((size & ~size_mask) == 0, "shouldn't overflow size field");
coleenp@622 402 return markOop(((intptr_t)cms_free_prototype() & ~size_mask_in_place) |
coleenp@622 403 (((intptr_t)size & size_mask) << size_shift));
coleenp@622 404 }
coleenp@622 405 #endif // _LP64
duke@435 406 };
stefank@2314 407
stefank@2314 408 #endif // SHARE_VM_OOPS_MARKOOP_HPP

mercurial