src/share/vm/oops/markOop.hpp

Mon, 09 Jun 2008 11:51:19 -0400

author
coleenp
date
Mon, 09 Jun 2008 11:51:19 -0400
changeset 622
790e66e5fbac
parent 548
ba764ed4b6f2
child 631
d1605aabd0a1
child 779
6aae2f9d0294
permissions
-rw-r--r--

6687581: Make CMS work with compressed oops
Summary: Make FreeChunk read markword instead of LSB in _klass pointer to indicate that it's a FreeChunk for compressed oops.
Reviewed-by: ysr, jmasa

     1 /*
     2  * Copyright 1997-2006 Sun Microsystems, Inc.  All Rights Reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    20  * CA 95054 USA or visit www.sun.com if you need additional information or
    21  * have any questions.
    22  *
    23  */
    25 // The markOop describes the header of an object.
    26 //
    27 // Note that the mark is not a real oop but just a word.
    28 // It is placed in the oop hierarchy for historical reasons.
    29 //
    30 // Bit-format of an object header (most significant first):
    31 //
    32 //  32 bits: unused:0  hash:25 age:4 biased_lock:1 lock:2
    33 //  64 bits: unused:24 hash:31 cms:2 age:4 biased_lock:1 lock:2
    34 //           unused:20 size:35 cms:2 age:4 biased_lock:1 lock:2 (if cms
    35 //                                                               free chunk)
    36 //
    37 //  - hash contains the identity hash value: largest value is
    38 //    31 bits, see os::random().  Also, 64-bit vm's require
    39 //    a hash value no bigger than 32 bits because they will not
    40 //    properly generate a mask larger than that: see library_call.cpp
    41 //    and c1_CodePatterns_sparc.cpp.
    42 //
    43 //  - the biased lock pattern is used to bias a lock toward a given
    44 //    thread. When this pattern is set in the low three bits, the lock
    45 //    is either biased toward a given thread or "anonymously" biased,
    46 //    indicating that it is possible for it to be biased. When the
    47 //    lock is biased toward a given thread, locking and unlocking can
    48 //    be performed by that thread without using atomic operations.
    49 //    When a lock's bias is revoked, it reverts back to the normal
    50 //    locking scheme described below.
    51 //
    52 //    Note that we are overloading the meaning of the "unlocked" state
    53 //    of the header. Because we steal a bit from the age we can
    54 //    guarantee that the bias pattern will never be seen for a truly
    55 //    unlocked object.
    56 //
    57 //    Note also that the biased state contains the age bits normally
    58 //    contained in the object header. Large increases in scavenge
    59 //    times were seen when these bits were absent and an arbitrary age
    60 //    assigned to all biased objects, because they tended to consume a
    61 //    significant fraction of the eden semispaces and were not
    62 //    promoted promptly, causing an increase in the amount of copying
    63 //    performed. The runtime system aligns all JavaThread* pointers to
    64 //    a very large value (currently 128 bytes) to make room for the
    65 //    age bits when biased locking is enabled.
    66 //
    67 //    [JavaThread* | epoch | age | 1 | 01]       lock is biased toward given thread
    68 //    [0           | epoch | age | 1 | 01]       lock is anonymously biased
    69 //
    70 //  - the two lock bits are used to describe three states: locked/unlocked and monitor.
    71 //
    72 //    [ptr             | 00]  locked             ptr points to real header on stack
    73 //    [header      | 0 | 01]  unlocked           regular object header
    74 //    [ptr             | 10]  monitor            inflated lock (header is wapped out)
    75 //    [ptr             | 11]  marked             used by markSweep to mark an object
    76 //                                               not valid at any other time
    77 //
    78 //    We assume that stack/thread pointers have the lowest two bits cleared.
    80 class BasicLock;
    81 class ObjectMonitor;
    82 class JavaThread;
    84 class markOopDesc: public oopDesc {
    85  private:
    86   // Conversion
    87   uintptr_t value() const { return (uintptr_t) this; }
    89  public:
    90   // Constants
    91   enum { age_bits                 = 4,
    92          lock_bits                = 2,
    93          biased_lock_bits         = 1,
    94          max_hash_bits            = BitsPerWord - age_bits - lock_bits - biased_lock_bits,
    95          hash_bits                = max_hash_bits > 31 ? 31 : max_hash_bits,
    96          cms_bits                 = LP64_ONLY(1) NOT_LP64(0),
    97          epoch_bits               = 2
    98   };
   100   // The biased locking code currently requires that the age bits be
   101   // contiguous to the lock bits. Class data sharing would prefer the
   102   // hash bits to be lower down to provide more random hash codes for
   103   // shared read-only symbolOop objects, because these objects' mark
   104   // words are set to their own address with marked_value in the lock
   105   // bit, and using lower bits would make their identity hash values
   106   // more random. However, the performance decision was made in favor
   107   // of the biased locking code.
   109   enum { lock_shift               = 0,
   110          biased_lock_shift        = lock_bits,
   111          age_shift                = lock_bits + biased_lock_bits,
   112          cms_shift                = age_shift + age_bits,
   113          hash_shift               = cms_shift + cms_bits,
   114          epoch_shift              = hash_shift
   115   };
   117   enum { lock_mask                = right_n_bits(lock_bits),
   118          lock_mask_in_place       = lock_mask << lock_shift,
   119          biased_lock_mask         = right_n_bits(lock_bits + biased_lock_bits),
   120          biased_lock_mask_in_place= biased_lock_mask << lock_shift,
   121          biased_lock_bit_in_place = 1 << biased_lock_shift,
   122          age_mask                 = right_n_bits(age_bits),
   123          age_mask_in_place        = age_mask << age_shift,
   124          epoch_mask               = right_n_bits(epoch_bits),
   125          epoch_mask_in_place      = epoch_mask << epoch_shift,
   126          cms_mask                 = right_n_bits(cms_bits),
   127          cms_mask_in_place        = cms_mask << cms_shift
   128 #ifndef _WIN64
   129          ,hash_mask               = right_n_bits(hash_bits),
   130          hash_mask_in_place       = (address_word)hash_mask << hash_shift
   131 #endif
   132   };
   134   // Alignment of JavaThread pointers encoded in object header required by biased locking
   135   enum { biased_lock_alignment    = 2 << (epoch_shift + epoch_bits)
   136   };
   138 #ifdef _WIN64
   139     // These values are too big for Win64
   140     const static uintptr_t hash_mask = right_n_bits(hash_bits);
   141     const static uintptr_t hash_mask_in_place  =
   142                             (address_word)hash_mask << hash_shift;
   143 #endif
   145   enum { locked_value             = 0,
   146          unlocked_value           = 1,
   147          monitor_value            = 2,
   148          marked_value             = 3,
   149          biased_lock_pattern      = 5
   150   };
   152   enum { no_hash                  = 0 };  // no hash value assigned
   154   enum { no_hash_in_place         = (address_word)no_hash << hash_shift,
   155          no_lock_in_place         = unlocked_value
   156   };
   158   enum { max_age                  = age_mask };
   160   enum { max_bias_epoch           = epoch_mask };
   162   // Biased Locking accessors.
   163   // These must be checked by all code which calls into the
   164   // ObjectSynchronizer and other code. The biasing is not understood
   165   // by the lower-level CAS-based locking code, although the runtime
   166   // fixes up biased locks to be compatible with it when a bias is
   167   // revoked.
   168   bool has_bias_pattern() const {
   169     return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern);
   170   }
   171   JavaThread* biased_locker() const {
   172     assert(has_bias_pattern(), "should not call this otherwise");
   173     return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place))));
   174   }
   175   // Indicates that the mark has the bias bit set but that it has not
   176   // yet been biased toward a particular thread
   177   bool is_biased_anonymously() const {
   178     return (has_bias_pattern() && (biased_locker() == NULL));
   179   }
   180   // Indicates epoch in which this bias was acquired. If the epoch
   181   // changes due to too many bias revocations occurring, the biases
   182   // from the previous epochs are all considered invalid.
   183   int bias_epoch() const {
   184     assert(has_bias_pattern(), "should not call this otherwise");
   185     return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift);
   186   }
   187   markOop set_bias_epoch(int epoch) {
   188     assert(has_bias_pattern(), "should not call this otherwise");
   189     assert((epoch & (~epoch_mask)) == 0, "epoch overflow");
   190     return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));
   191   }
   192   markOop incr_bias_epoch() {
   193     return set_bias_epoch((1 + bias_epoch()) & epoch_mask);
   194   }
   195   // Prototype mark for initialization
   196   static markOop biased_locking_prototype() {
   197     return markOop( biased_lock_pattern );
   198   }
   200   // lock accessors (note that these assume lock_shift == 0)
   201   bool is_locked()   const {
   202     return (mask_bits(value(), lock_mask_in_place) != unlocked_value);
   203   }
   204   bool is_unlocked() const {
   205     return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value);
   206   }
   207   bool is_marked()   const {
   208     return (mask_bits(value(), lock_mask_in_place) == marked_value);
   209   }
   210   bool is_neutral()  const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); }
   212   // Special temporary state of the markOop while being inflated.
   213   // Code that looks at mark outside a lock need to take this into account.
   214   bool is_being_inflated() const { return (value() == 0); }
   216   // Distinguished markword value - used when inflating over
   217   // an existing stacklock.  0 indicates the markword is "BUSY".
   218   // Lockword mutators that use a LD...CAS idiom should always
   219   // check for and avoid overwriting a 0 value installed by some
   220   // other thread.  (They should spin or block instead.  The 0 value
   221   // is transient and *should* be short-lived).
   222   static markOop INFLATING() { return (markOop) 0; }    // inflate-in-progress
   224   // Should this header be preserved during GC?
   225   bool must_be_preserved(oop obj_containing_mark) const {
   226     if (!UseBiasedLocking)
   227       return (!is_unlocked() || !has_no_hash());
   228     return must_be_preserved_with_bias(obj_containing_mark);
   229   }
   230   inline bool must_be_preserved_with_bias(oop obj_containing_mark) const;
   232   // Should this header (including its age bits) be preserved in the
   233   // case of a promotion failure during scavenge?
   234   // Note that we special case this situation. We want to avoid
   235   // calling BiasedLocking::preserve_marks()/restore_marks() (which
   236   // decrease the number of mark words that need to be preserved
   237   // during GC) during each scavenge. During scavenges in which there
   238   // is no promotion failure, we actually don't need to call the above
   239   // routines at all, since we don't mutate and re-initialize the
   240   // marks of promoted objects using init_mark(). However, during
   241   // scavenges which result in promotion failure, we do re-initialize
   242   // the mark words of objects, meaning that we should have called
   243   // these mark word preservation routines. Currently there's no good
   244   // place in which to call them in any of the scavengers (although
   245   // guarded by appropriate locks we could make one), but the
   246   // observation is that promotion failures are quite rare and
   247   // reducing the number of mark words preserved during them isn't a
   248   // high priority.
   249   bool must_be_preserved_for_promotion_failure(oop obj_containing_mark) const {
   250     if (!UseBiasedLocking)
   251       return (this != prototype());
   252     return must_be_preserved_with_bias_for_promotion_failure(obj_containing_mark);
   253   }
   254   inline bool must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const;
   256   // Should this header be preserved during a scavenge where CMS is
   257   // the old generation?
   258   // (This is basically the same body as must_be_preserved_for_promotion_failure(),
   259   // but takes the klassOop as argument instead)
   260   bool must_be_preserved_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const {
   261     if (!UseBiasedLocking)
   262       return (this != prototype());
   263     return must_be_preserved_with_bias_for_cms_scavenge(klass_of_obj_containing_mark);
   264   }
   265   inline bool must_be_preserved_with_bias_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const;
   267   // WARNING: The following routines are used EXCLUSIVELY by
   268   // synchronization functions. They are not really gc safe.
   269   // They must get updated if markOop layout get changed.
   270   markOop set_unlocked() const {
   271     return markOop(value() | unlocked_value);
   272   }
   273   bool has_locker() const {
   274     return ((value() & lock_mask_in_place) == locked_value);
   275   }
   276   BasicLock* locker() const {
   277     assert(has_locker(), "check");
   278     return (BasicLock*) value();
   279   }
   280   bool has_monitor() const {
   281     return ((value() & monitor_value) != 0);
   282   }
   283   ObjectMonitor* monitor() const {
   284     assert(has_monitor(), "check");
   285     // Use xor instead of &~ to provide one extra tag-bit check.
   286     return (ObjectMonitor*) (value() ^ monitor_value);
   287   }
   288   bool has_displaced_mark_helper() const {
   289     return ((value() & unlocked_value) == 0);
   290   }
   291   markOop displaced_mark_helper() const {
   292     assert(has_displaced_mark_helper(), "check");
   293     intptr_t ptr = (value() & ~monitor_value);
   294     return *(markOop*)ptr;
   295   }
   296   void set_displaced_mark_helper(markOop m) const {
   297     assert(has_displaced_mark_helper(), "check");
   298     intptr_t ptr = (value() & ~monitor_value);
   299     *(markOop*)ptr = m;
   300   }
   301   markOop copy_set_hash(intptr_t hash) const {
   302     intptr_t tmp = value() & (~hash_mask_in_place);
   303     tmp |= ((hash & hash_mask) << hash_shift);
   304     return (markOop)tmp;
   305   }
   306   // it is only used to be stored into BasicLock as the
   307   // indicator that the lock is using heavyweight monitor
   308   static markOop unused_mark() {
   309     return (markOop) marked_value;
   310   }
   311   // the following two functions create the markOop to be
   312   // stored into object header, it encodes monitor info
   313   static markOop encode(BasicLock* lock) {
   314     return (markOop) lock;
   315   }
   316   static markOop encode(ObjectMonitor* monitor) {
   317     intptr_t tmp = (intptr_t) monitor;
   318     return (markOop) (tmp | monitor_value);
   319   }
   320   static markOop encode(JavaThread* thread, int age, int bias_epoch) {
   321     intptr_t tmp = (intptr_t) thread;
   322     assert(UseBiasedLocking && ((tmp & (epoch_mask_in_place | age_mask_in_place | biased_lock_mask_in_place)) == 0), "misaligned JavaThread pointer");
   323     assert(age <= max_age, "age too large");
   324     assert(bias_epoch <= max_bias_epoch, "bias epoch too large");
   325     return (markOop) (tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern);
   326   }
   328   // used to encode pointers during GC
   329   markOop clear_lock_bits() { return markOop(value() & ~lock_mask_in_place); }
   331   // age operations
   332   markOop set_marked()   { return markOop((value() & ~lock_mask_in_place) | marked_value); }
   334   int     age()               const { return mask_bits(value() >> age_shift, age_mask); }
   335   markOop set_age(int v) const {
   336     assert((v & ~age_mask) == 0, "shouldn't overflow age field");
   337     return markOop((value() & ~age_mask_in_place) | (((intptr_t)v & age_mask) << age_shift));
   338   }
   339   markOop incr_age()          const { return age() == max_age ? markOop(this) : set_age(age() + 1); }
   341   // hash operations
   342   intptr_t hash() const {
   343     return mask_bits(value() >> hash_shift, hash_mask);
   344   }
   346   bool has_no_hash() const {
   347     return hash() == no_hash;
   348   }
   350   // Prototype mark for initialization
   351   static markOop prototype() {
   352     return markOop( no_hash_in_place | no_lock_in_place );
   353   }
   355   // Helper function for restoration of unmarked mark oops during GC
   356   static inline markOop prototype_for_object(oop obj);
   358   // Debugging
   359   void print_on(outputStream* st) const;
   361   // Prepare address of oop for placement into mark
   362   inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); }
   364   // Recover address of oop from encoded form used in mark
   365   inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); }
   367   // see the definition in markOop.cpp for the gory details
   368   bool should_not_be_cached() const;
   370   // These markOops indicate cms free chunk blocks and not objects.
   371   // In 64 bit, the markOop is set to distinguish them from oops.
   372   // These are defined in 32 bit mode for vmStructs.
   373   const static uintptr_t cms_free_chunk_pattern  = 0x1;
   375   // Constants for the size field.
   376   enum { size_shift                = cms_shift + cms_bits,
   377          size_bits                 = 35    // need for compressed oops 32G
   378        };
   379   // These values are too big for Win64
   380   const static uintptr_t size_mask = LP64_ONLY(right_n_bits(size_bits))
   381                                      NOT_LP64(0);
   382   const static uintptr_t size_mask_in_place =
   383                                      (address_word)size_mask << size_shift;
   385 #ifdef _LP64
   386   static markOop cms_free_prototype() {
   387     return markOop(((intptr_t)prototype() & ~cms_mask_in_place) |
   388                    ((cms_free_chunk_pattern & cms_mask) << cms_shift));
   389   }
   390   uintptr_t cms_encoding() const {
   391     return mask_bits(value() >> cms_shift, cms_mask);
   392   }
   393   bool is_cms_free_chunk() const {
   394     return is_neutral() &&
   395            (cms_encoding() & cms_free_chunk_pattern) == cms_free_chunk_pattern;
   396   }
   398   size_t get_size() const       { return (size_t)(value() >> size_shift); }
   399   static markOop set_size_and_free(size_t size) {
   400     assert((size & ~size_mask) == 0, "shouldn't overflow size field");
   401     return markOop(((intptr_t)cms_free_prototype() & ~size_mask_in_place) |
   402                    (((intptr_t)size & size_mask) << size_shift));
   403   }
   404 #endif // _LP64
   405 };

mercurial