src/share/vm/oops/oop.inline.hpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2658
c7f3d0b4570f
child 3131
b0efc7ee3b31
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

     1 /*
     2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP
    26 #define SHARE_VM_OOPS_OOP_INLINE_HPP
    28 #include "gc_implementation/shared/ageTable.hpp"
    29 #include "gc_implementation/shared/markSweep.inline.hpp"
    30 #include "gc_interface/collectedHeap.inline.hpp"
    31 #include "memory/barrierSet.inline.hpp"
    32 #include "memory/cardTableModRefBS.hpp"
    33 #include "memory/compactingPermGenGen.hpp"
    34 #include "memory/genCollectedHeap.hpp"
    35 #include "memory/generation.hpp"
    36 #include "memory/permGen.hpp"
    37 #include "memory/specialized_oop_closures.hpp"
    38 #include "oops/arrayKlass.hpp"
    39 #include "oops/arrayOop.hpp"
    40 #include "oops/klass.hpp"
    41 #include "oops/klassOop.hpp"
    42 #include "oops/markOop.inline.hpp"
    43 #include "oops/oop.hpp"
    44 #include "runtime/atomic.hpp"
    45 #include "runtime/os.hpp"
    46 #ifdef TARGET_ARCH_x86
    47 # include "bytes_x86.hpp"
    48 #endif
    49 #ifdef TARGET_ARCH_sparc
    50 # include "bytes_sparc.hpp"
    51 #endif
    52 #ifdef TARGET_ARCH_zero
    53 # include "bytes_zero.hpp"
    54 #endif
    55 #ifdef TARGET_ARCH_arm
    56 # include "bytes_arm.hpp"
    57 #endif
    58 #ifdef TARGET_ARCH_ppc
    59 # include "bytes_ppc.hpp"
    60 #endif
    62 // Implementation of all inlined member functions defined in oop.hpp
    63 // We need a separate file to avoid circular references
    65 inline void oopDesc::release_set_mark(markOop m) {
    66   OrderAccess::release_store_ptr(&_mark, m);
    67 }
    69 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
    70   return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
    71 }
    73 inline klassOop oopDesc::klass() const {
    74   if (UseCompressedOops) {
    75     return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass);
    76   } else {
    77     return _metadata._klass;
    78   }
    79 }
    81 inline klassOop oopDesc::klass_or_null() const volatile {
    82   // can be NULL in CMS
    83   if (UseCompressedOops) {
    84     return (klassOop)decode_heap_oop(_metadata._compressed_klass);
    85   } else {
    86     return _metadata._klass;
    87   }
    88 }
    90 inline int oopDesc::klass_gap_offset_in_bytes() {
    91   assert(UseCompressedOops, "only applicable to compressed headers");
    92   return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
    93 }
    95 inline oop* oopDesc::klass_addr() {
    96   // Only used internally and with CMS and will not work with
    97   // UseCompressedOops
    98   assert(!UseCompressedOops, "only supported with uncompressed oops");
    99   return (oop*) &_metadata._klass;
   100 }
   102 inline narrowOop* oopDesc::compressed_klass_addr() {
   103   assert(UseCompressedOops, "only called by compressed oops");
   104   return (narrowOop*) &_metadata._compressed_klass;
   105 }
   107 inline void oopDesc::set_klass(klassOop k) {
   108   // since klasses are promoted no store check is needed
   109   assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop");
   110   assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop");
   111   if (UseCompressedOops) {
   112     oop_store_without_check(compressed_klass_addr(), (oop)k);
   113   } else {
   114     oop_store_without_check(klass_addr(), (oop) k);
   115   }
   116 }
   118 inline int oopDesc::klass_gap() const {
   119   return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
   120 }
   122 inline void oopDesc::set_klass_gap(int v) {
   123   if (UseCompressedOops) {
   124     *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
   125   }
   126 }
   128 inline void oopDesc::set_klass_to_list_ptr(oop k) {
   129   // This is only to be used during GC, for from-space objects, so no
   130   // barrier is needed.
   131   if (UseCompressedOops) {
   132     _metadata._compressed_klass = encode_heap_oop(k);  // may be null (parnew overflow handling)
   133   } else {
   134     _metadata._klass = (klassOop)k;
   135   }
   136 }
   138 inline void   oopDesc::init_mark()                 { set_mark(markOopDesc::prototype_for_object(this)); }
   139 inline Klass* oopDesc::blueprint()           const { return klass()->klass_part(); }
   141 inline bool oopDesc::is_a(klassOop k)        const { return blueprint()->is_subtype_of(k); }
   143 inline bool oopDesc::is_instance()           const { return blueprint()->oop_is_instance(); }
   144 inline bool oopDesc::is_instanceMirror()     const { return blueprint()->oop_is_instanceMirror(); }
   145 inline bool oopDesc::is_instanceRef()        const { return blueprint()->oop_is_instanceRef(); }
   146 inline bool oopDesc::is_array()              const { return blueprint()->oop_is_array(); }
   147 inline bool oopDesc::is_objArray()           const { return blueprint()->oop_is_objArray(); }
   148 inline bool oopDesc::is_typeArray()          const { return blueprint()->oop_is_typeArray(); }
   149 inline bool oopDesc::is_javaArray()          const { return blueprint()->oop_is_javaArray(); }
   150 inline bool oopDesc::is_klass()              const { return blueprint()->oop_is_klass(); }
   151 inline bool oopDesc::is_thread()             const { return blueprint()->oop_is_thread(); }
   152 inline bool oopDesc::is_method()             const { return blueprint()->oop_is_method(); }
   153 inline bool oopDesc::is_constMethod()        const { return blueprint()->oop_is_constMethod(); }
   154 inline bool oopDesc::is_methodData()         const { return blueprint()->oop_is_methodData(); }
   155 inline bool oopDesc::is_constantPool()       const { return blueprint()->oop_is_constantPool(); }
   156 inline bool oopDesc::is_constantPoolCache()  const { return blueprint()->oop_is_constantPoolCache(); }
   157 inline bool oopDesc::is_compiledICHolder()   const { return blueprint()->oop_is_compiledICHolder(); }
   159 inline void*     oopDesc::field_base(int offset)        const { return (void*)&((char*)this)[offset]; }
   161 template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
   162 inline jbyte*    oopDesc::byte_field_addr(int offset)   const { return (jbyte*)   field_base(offset); }
   163 inline jchar*    oopDesc::char_field_addr(int offset)   const { return (jchar*)   field_base(offset); }
   164 inline jboolean* oopDesc::bool_field_addr(int offset)   const { return (jboolean*)field_base(offset); }
   165 inline jint*     oopDesc::int_field_addr(int offset)    const { return (jint*)    field_base(offset); }
   166 inline jshort*   oopDesc::short_field_addr(int offset)  const { return (jshort*)  field_base(offset); }
   167 inline jlong*    oopDesc::long_field_addr(int offset)   const { return (jlong*)   field_base(offset); }
   168 inline jfloat*   oopDesc::float_field_addr(int offset)  const { return (jfloat*)  field_base(offset); }
   169 inline jdouble*  oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
   170 inline address*  oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
   173 // Functions for getting and setting oops within instance objects.
   174 // If the oops are compressed, the type passed to these overloaded functions
   175 // is narrowOop.  All functions are overloaded so they can be called by
   176 // template functions without conditionals (the compiler instantiates via
   177 // the right type and inlines the appopriate code).
   179 inline bool oopDesc::is_null(oop obj)       { return obj == NULL; }
   180 inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
   182 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
   183 // offset from the heap base.  Saving the check for null can save instructions
   184 // in inner GC loops so these are separated.
   186 inline bool check_obj_alignment(oop obj) {
   187   return (intptr_t)obj % MinObjAlignmentInBytes == 0;
   188 }
   190 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
   191   assert(!is_null(v), "oop value can never be zero");
   192   assert(check_obj_alignment(v), "Address not aligned");
   193   assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
   194   address base = Universe::narrow_oop_base();
   195   int    shift = Universe::narrow_oop_shift();
   196   uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
   197   assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
   198   uint64_t result = pd >> shift;
   199   assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
   200   assert(decode_heap_oop(result) == v, "reversibility");
   201   return (narrowOop)result;
   202 }
   204 inline narrowOop oopDesc::encode_heap_oop(oop v) {
   205   return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
   206 }
   208 inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
   209   assert(!is_null(v), "narrow oop value can never be zero");
   210   address base = Universe::narrow_oop_base();
   211   int    shift = Universe::narrow_oop_shift();
   212   oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
   213   assert(check_obj_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
   214   return result;
   215 }
   217 inline oop oopDesc::decode_heap_oop(narrowOop v) {
   218   return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
   219 }
   221 inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
   222 inline oop oopDesc::decode_heap_oop(oop v)  { return v; }
   224 // Load an oop out of the Java heap as is without decoding.
   225 // Called by GC to check for null before decoding.
   226 inline oop       oopDesc::load_heap_oop(oop* p)          { return *p; }
   227 inline narrowOop oopDesc::load_heap_oop(narrowOop* p)    { return *p; }
   229 // Load and decode an oop out of the Java heap into a wide oop.
   230 inline oop oopDesc::load_decode_heap_oop_not_null(oop* p)       { return *p; }
   231 inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
   232   return decode_heap_oop_not_null(*p);
   233 }
   235 // Load and decode an oop out of the heap accepting null
   236 inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
   237 inline oop oopDesc::load_decode_heap_oop(narrowOop* p) {
   238   return decode_heap_oop(*p);
   239 }
   241 // Store already encoded heap oop into the heap.
   242 inline void oopDesc::store_heap_oop(oop* p, oop v)                 { *p = v; }
   243 inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v)     { *p = v; }
   245 // Encode and store a heap oop.
   246 inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
   247   *p = encode_heap_oop_not_null(v);
   248 }
   249 inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
   251 // Encode and store a heap oop allowing for null.
   252 inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
   253   *p = encode_heap_oop(v);
   254 }
   255 inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
   257 // Store heap oop as is for volatile fields.
   258 inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
   259   OrderAccess::release_store_ptr(p, v);
   260 }
   261 inline void oopDesc::release_store_heap_oop(volatile narrowOop* p,
   262                                             narrowOop v) {
   263   OrderAccess::release_store(p, v);
   264 }
   266 inline void oopDesc::release_encode_store_heap_oop_not_null(
   267                                                 volatile narrowOop* p, oop v) {
   268   // heap oop is not pointer sized.
   269   OrderAccess::release_store(p, encode_heap_oop_not_null(v));
   270 }
   272 inline void oopDesc::release_encode_store_heap_oop_not_null(
   273                                                       volatile oop* p, oop v) {
   274   OrderAccess::release_store_ptr(p, v);
   275 }
   277 inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
   278                                                            oop v) {
   279   OrderAccess::release_store_ptr(p, v);
   280 }
   281 inline void oopDesc::release_encode_store_heap_oop(
   282                                                 volatile narrowOop* p, oop v) {
   283   OrderAccess::release_store(p, encode_heap_oop(v));
   284 }
   287 // These functions are only used to exchange oop fields in instances,
   288 // not headers.
   289 inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
   290   if (UseCompressedOops) {
   291     // encode exchange value from oop to T
   292     narrowOop val = encode_heap_oop(exchange_value);
   293     narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
   294     // decode old from T to oop
   295     return decode_heap_oop(old);
   296   } else {
   297     return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
   298   }
   299 }
   301 inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
   302                                                 volatile HeapWord *dest,
   303                                                 oop compare_value) {
   304   if (UseCompressedOops) {
   305     // encode exchange and compare value from oop to T
   306     narrowOop val = encode_heap_oop(exchange_value);
   307     narrowOop cmp = encode_heap_oop(compare_value);
   309     narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
   310     // decode old from T to oop
   311     return decode_heap_oop(old);
   312   } else {
   313     return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
   314   }
   315 }
   317 // In order to put or get a field out of an instance, must first check
   318 // if the field has been compressed and uncompress it.
   319 inline oop oopDesc::obj_field(int offset) const {
   320   return UseCompressedOops ?
   321     load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
   322     load_decode_heap_oop(obj_field_addr<oop>(offset));
   323 }
   324 inline void oopDesc::obj_field_put(int offset, oop value) {
   325   UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
   326                       oop_store(obj_field_addr<oop>(offset),       value);
   327 }
   328 inline void oopDesc::obj_field_raw_put(int offset, oop value) {
   329   UseCompressedOops ?
   330     encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
   331     encode_store_heap_oop(obj_field_addr<oop>(offset),       value);
   332 }
   334 inline jbyte oopDesc::byte_field(int offset) const                  { return (jbyte) *byte_field_addr(offset);    }
   335 inline void oopDesc::byte_field_put(int offset, jbyte contents)     { *byte_field_addr(offset) = (jint) contents; }
   337 inline jboolean oopDesc::bool_field(int offset) const               { return (jboolean) *bool_field_addr(offset); }
   338 inline void oopDesc::bool_field_put(int offset, jboolean contents)  { *bool_field_addr(offset) = (jint) contents; }
   340 inline jchar oopDesc::char_field(int offset) const                  { return (jchar) *char_field_addr(offset);    }
   341 inline void oopDesc::char_field_put(int offset, jchar contents)     { *char_field_addr(offset) = (jint) contents; }
   343 inline jint oopDesc::int_field(int offset) const                    { return *int_field_addr(offset);        }
   344 inline void oopDesc::int_field_put(int offset, jint contents)       { *int_field_addr(offset) = contents;    }
   346 inline jshort oopDesc::short_field(int offset) const                { return (jshort) *short_field_addr(offset);  }
   347 inline void oopDesc::short_field_put(int offset, jshort contents)   { *short_field_addr(offset) = (jint) contents;}
   349 inline jlong oopDesc::long_field(int offset) const                  { return *long_field_addr(offset);       }
   350 inline void oopDesc::long_field_put(int offset, jlong contents)     { *long_field_addr(offset) = contents;   }
   352 inline jfloat oopDesc::float_field(int offset) const                { return *float_field_addr(offset);      }
   353 inline void oopDesc::float_field_put(int offset, jfloat contents)   { *float_field_addr(offset) = contents;  }
   355 inline jdouble oopDesc::double_field(int offset) const              { return *double_field_addr(offset);     }
   356 inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
   358 inline address oopDesc::address_field(int offset) const              { return *address_field_addr(offset);     }
   359 inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
   361 inline oop oopDesc::obj_field_acquire(int offset) const {
   362   return UseCompressedOops ?
   363              decode_heap_oop((narrowOop)
   364                OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
   365            : decode_heap_oop((oop)
   366                OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
   367 }
   368 inline void oopDesc::release_obj_field_put(int offset, oop value) {
   369   UseCompressedOops ?
   370     oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
   371     oop_store((volatile oop*)      obj_field_addr<oop>(offset),       value);
   372 }
   374 inline jbyte oopDesc::byte_field_acquire(int offset) const                  { return OrderAccess::load_acquire(byte_field_addr(offset));     }
   375 inline void oopDesc::release_byte_field_put(int offset, jbyte contents)     { OrderAccess::release_store(byte_field_addr(offset), contents); }
   377 inline jboolean oopDesc::bool_field_acquire(int offset) const               { return OrderAccess::load_acquire(bool_field_addr(offset));     }
   378 inline void oopDesc::release_bool_field_put(int offset, jboolean contents)  { OrderAccess::release_store(bool_field_addr(offset), contents); }
   380 inline jchar oopDesc::char_field_acquire(int offset) const                  { return OrderAccess::load_acquire(char_field_addr(offset));     }
   381 inline void oopDesc::release_char_field_put(int offset, jchar contents)     { OrderAccess::release_store(char_field_addr(offset), contents); }
   383 inline jint oopDesc::int_field_acquire(int offset) const                    { return OrderAccess::load_acquire(int_field_addr(offset));      }
   384 inline void oopDesc::release_int_field_put(int offset, jint contents)       { OrderAccess::release_store(int_field_addr(offset), contents);  }
   386 inline jshort oopDesc::short_field_acquire(int offset) const                { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); }
   387 inline void oopDesc::release_short_field_put(int offset, jshort contents)   { OrderAccess::release_store(short_field_addr(offset), contents);     }
   389 inline jlong oopDesc::long_field_acquire(int offset) const                  { return OrderAccess::load_acquire(long_field_addr(offset));       }
   390 inline void oopDesc::release_long_field_put(int offset, jlong contents)     { OrderAccess::release_store(long_field_addr(offset), contents);   }
   392 inline jfloat oopDesc::float_field_acquire(int offset) const                { return OrderAccess::load_acquire(float_field_addr(offset));      }
   393 inline void oopDesc::release_float_field_put(int offset, jfloat contents)   { OrderAccess::release_store(float_field_addr(offset), contents);  }
   395 inline jdouble oopDesc::double_field_acquire(int offset) const              { return OrderAccess::load_acquire(double_field_addr(offset));     }
   396 inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
   398 inline address oopDesc::address_field_acquire(int offset) const             { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
   399 inline void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
   401 inline int oopDesc::size_given_klass(Klass* klass)  {
   402   int lh = klass->layout_helper();
   403   int s;
   405   // lh is now a value computed at class initialization that may hint
   406   // at the size.  For instances, this is positive and equal to the
   407   // size.  For arrays, this is negative and provides log2 of the
   408   // array element size.  For other oops, it is zero and thus requires
   409   // a virtual call.
   410   //
   411   // We go to all this trouble because the size computation is at the
   412   // heart of phase 2 of mark-compaction, and called for every object,
   413   // alive or dead.  So the speed here is equal in importance to the
   414   // speed of allocation.
   416   if (lh > Klass::_lh_neutral_value) {
   417     if (!Klass::layout_helper_needs_slow_path(lh)) {
   418       s = lh >> LogHeapWordSize;  // deliver size scaled by wordSize
   419     } else {
   420       s = klass->oop_size(this);
   421     }
   422   } else if (lh <= Klass::_lh_neutral_value) {
   423     // The most common case is instances; fall through if so.
   424     if (lh < Klass::_lh_neutral_value) {
   425       // Second most common case is arrays.  We have to fetch the
   426       // length of the array, shift (multiply) it appropriately,
   427       // up to wordSize, add the header, and align to object size.
   428       size_t size_in_bytes;
   429 #ifdef _M_IA64
   430       // The Windows Itanium Aug 2002 SDK hoists this load above
   431       // the check for s < 0.  An oop at the end of the heap will
   432       // cause an access violation if this load is performed on a non
   433       // array oop.  Making the reference volatile prohibits this.
   434       // (%%% please explain by what magic the length is actually fetched!)
   435       volatile int *array_length;
   436       array_length = (volatile int *)( (intptr_t)this +
   437                           arrayOopDesc::length_offset_in_bytes() );
   438       assert(array_length > 0, "Integer arithmetic problem somewhere");
   439       // Put into size_t to avoid overflow.
   440       size_in_bytes = (size_t) array_length;
   441       size_in_bytes = size_in_bytes << Klass::layout_helper_log2_element_size(lh);
   442 #else
   443       size_t array_length = (size_t) ((arrayOop)this)->length();
   444       size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
   445 #endif
   446       size_in_bytes += Klass::layout_helper_header_size(lh);
   448       // This code could be simplified, but by keeping array_header_in_bytes
   449       // in units of bytes and doing it this way we can round up just once,
   450       // skipping the intermediate round to HeapWordSize.  Cast the result
   451       // of round_to to size_t to guarantee unsigned division == right shift.
   452       s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
   453         HeapWordSize);
   455       // UseParNewGC, UseParallelGC and UseG1GC can change the length field
   456       // of an "old copy" of an object array in the young gen so it indicates
   457       // the grey portion of an already copied array. This will cause the first
   458       // disjunct below to fail if the two comparands are computed across such
   459       // a concurrent change.
   460       // UseParNewGC also runs with promotion labs (which look like int
   461       // filler arrays) which are subject to changing their declared size
   462       // when finally retiring a PLAB; this also can cause the first disjunct
   463       // to fail for another worker thread that is concurrently walking the block
   464       // offset table. Both these invariant failures are benign for their
   465       // current uses; we relax the assertion checking to cover these two cases below:
   466       //     is_objArray() && is_forwarded()   // covers first scenario above
   467       //  || is_typeArray()                    // covers second scenario above
   468       // If and when UseParallelGC uses the same obj array oop stealing/chunking
   469       // technique, we will need to suitably modify the assertion.
   470       assert((s == klass->oop_size(this)) ||
   471              (Universe::heap()->is_gc_active() &&
   472               ((is_typeArray() && UseParNewGC) ||
   473                (is_objArray()  && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))),
   474              "wrong array object size");
   475     } else {
   476       // Must be zero, so bite the bullet and take the virtual call.
   477       s = klass->oop_size(this);
   478     }
   479   }
   481   assert(s % MinObjAlignment == 0, "alignment check");
   482   assert(s > 0, "Bad size calculated");
   483   return s;
   484 }
   487 inline int oopDesc::size()  {
   488   return size_given_klass(blueprint());
   489 }
   491 inline bool oopDesc::is_parsable() {
   492   return blueprint()->oop_is_parsable(this);
   493 }
   495 inline bool oopDesc::is_conc_safe() {
   496   return blueprint()->oop_is_conc_safe(this);
   497 }
   499 inline void update_barrier_set(void* p, oop v) {
   500   assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
   501   oopDesc::bs()->write_ref_field(p, v);
   502 }
   504 template <class T> inline void update_barrier_set_pre(T* p, oop v) {
   505   oopDesc::bs()->write_ref_field_pre(p, v);
   506 }
   508 template <class T> inline void oop_store(T* p, oop v) {
   509   if (always_do_update_barrier) {
   510     oop_store((volatile T*)p, v);
   511   } else {
   512     update_barrier_set_pre(p, v);
   513     oopDesc::encode_store_heap_oop(p, v);
   514     update_barrier_set((void*)p, v);  // cast away type
   515   }
   516 }
   518 template <class T> inline void oop_store(volatile T* p, oop v) {
   519   update_barrier_set_pre((T*)p, v);   // cast away volatile
   520   // Used by release_obj_field_put, so use release_store_ptr.
   521   oopDesc::release_encode_store_heap_oop(p, v);
   522   update_barrier_set((void*)p, v);    // cast away type
   523 }
   525 template <class T> inline void oop_store_without_check(T* p, oop v) {
   526   // XXX YSR FIX ME!!!
   527   if (always_do_update_barrier) {
   528     oop_store(p, v);
   529   } else {
   530     assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v),
   531            "oop store without store check failed");
   532     oopDesc::encode_store_heap_oop(p, v);
   533   }
   534 }
   536 // When it absolutely has to get there.
   537 template <class T> inline void oop_store_without_check(volatile T* p, oop v) {
   538   // XXX YSR FIX ME!!!
   539   if (always_do_update_barrier) {
   540     oop_store(p, v);
   541   } else {
   542     assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier((T*)p, v),
   543            "oop store without store check failed");
   544     oopDesc::release_encode_store_heap_oop(p, v);
   545   }
   546 }
   548 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
   549 // (without having to remember the function name this calls).
   550 inline void oop_store_raw(HeapWord* addr, oop value) {
   551   if (UseCompressedOops) {
   552     oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
   553   } else {
   554     oopDesc::encode_store_heap_oop((oop*)addr, value);
   555   }
   556 }
   558 // Used only for markSweep, scavenging
   559 inline bool oopDesc::is_gc_marked() const {
   560   return mark()->is_marked();
   561 }
   563 inline bool oopDesc::is_locked() const {
   564   return mark()->is_locked();
   565 }
   567 inline bool oopDesc::is_unlocked() const {
   568   return mark()->is_unlocked();
   569 }
   571 inline bool oopDesc::has_bias_pattern() const {
   572   return mark()->has_bias_pattern();
   573 }
   576 // used only for asserts
   577 inline bool oopDesc::is_oop(bool ignore_mark_word) const {
   578   oop obj = (oop) this;
   579   if (!check_obj_alignment(obj)) return false;
   580   if (!Universe::heap()->is_in_reserved(obj)) return false;
   581   // obj is aligned and accessible in heap
   582   // try to find metaclass cycle safely without seg faulting on bad input
   583   // we should reach klassKlassObj by following klass link at most 3 times
   584   for (int i = 0; i < 3; i++) {
   585     obj = obj->klass_or_null();
   586     // klass should be aligned and in permspace
   587     if (!check_obj_alignment(obj)) return false;
   588     if (!Universe::heap()->is_in_permanent(obj)) return false;
   589   }
   590   if (obj != Universe::klassKlassObj()) {
   591     // During a dump, the _klassKlassObj moved to a shared space.
   592     if (DumpSharedSpaces && Universe::klassKlassObj()->is_shared()) {
   593       return true;
   594     }
   595     return false;
   596   }
   598   // Header verification: the mark is typically non-NULL. If we're
   599   // at a safepoint, it must not be null.
   600   // Outside of a safepoint, the header could be changing (for example,
   601   // another thread could be inflating a lock on this object).
   602   if (ignore_mark_word) {
   603     return true;
   604   }
   605   if (mark() != NULL) {
   606     return true;
   607   }
   608   return !SafepointSynchronize::is_at_safepoint();
   609 }
   612 // used only for asserts
   613 inline bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
   614   return this == NULL ? true : is_oop(ignore_mark_word);
   615 }
   617 #ifndef PRODUCT
   618 // used only for asserts
   619 inline bool oopDesc::is_unlocked_oop() const {
   620   if (!Universe::heap()->is_in_reserved(this)) return false;
   621   return mark()->is_unlocked();
   622 }
   623 #endif // PRODUCT
   625 inline void oopDesc::follow_header() {
   626   if (UseCompressedOops) {
   627     MarkSweep::mark_and_push(compressed_klass_addr());
   628   } else {
   629     MarkSweep::mark_and_push(klass_addr());
   630   }
   631 }
   633 inline void oopDesc::follow_contents(void) {
   634   assert (is_gc_marked(), "should be marked");
   635   blueprint()->oop_follow_contents(this);
   636 }
   639 // Used by scavengers
   641 inline bool oopDesc::is_forwarded() const {
   642   // The extra heap check is needed since the obj might be locked, in which case the
   643   // mark would point to a stack location and have the sentinel bit cleared
   644   return mark()->is_marked();
   645 }
   647 // Used by scavengers
   648 inline void oopDesc::forward_to(oop p) {
   649   assert(check_obj_alignment(p),
   650          "forwarding to something not aligned");
   651   assert(Universe::heap()->is_in_reserved(p),
   652          "forwarding to something not in heap");
   653   markOop m = markOopDesc::encode_pointer_as_mark(p);
   654   assert(m->decode_pointer() == p, "encoding must be reversable");
   655   set_mark(m);
   656 }
   658 // Used by parallel scavengers
   659 inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
   660   assert(check_obj_alignment(p),
   661          "forwarding to something not aligned");
   662   assert(Universe::heap()->is_in_reserved(p),
   663          "forwarding to something not in heap");
   664   markOop m = markOopDesc::encode_pointer_as_mark(p);
   665   assert(m->decode_pointer() == p, "encoding must be reversable");
   666   return cas_set_mark(m, compare) == compare;
   667 }
   669 // Note that the forwardee is not the same thing as the displaced_mark.
   670 // The forwardee is used when copying during scavenge and mark-sweep.
   671 // It does need to clear the low two locking- and GC-related bits.
   672 inline oop oopDesc::forwardee() const {
   673   return (oop) mark()->decode_pointer();
   674 }
   676 inline bool oopDesc::has_displaced_mark() const {
   677   return mark()->has_displaced_mark_helper();
   678 }
   680 inline markOop oopDesc::displaced_mark() const {
   681   return mark()->displaced_mark_helper();
   682 }
   684 inline void oopDesc::set_displaced_mark(markOop m) {
   685   mark()->set_displaced_mark_helper(m);
   686 }
   688 // The following method needs to be MT safe.
   689 inline int oopDesc::age() const {
   690   assert(!is_forwarded(), "Attempt to read age from forwarded mark");
   691   if (has_displaced_mark()) {
   692     return displaced_mark()->age();
   693   } else {
   694     return mark()->age();
   695   }
   696 }
   698 inline void oopDesc::incr_age() {
   699   assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
   700   if (has_displaced_mark()) {
   701     set_displaced_mark(displaced_mark()->incr_age());
   702   } else {
   703     set_mark(mark()->incr_age());
   704   }
   705 }
   708 inline intptr_t oopDesc::identity_hash() {
   709   // Fast case; if the object is unlocked and the hash value is set, no locking is needed
   710   // Note: The mark must be read into local variable to avoid concurrent updates.
   711   markOop mrk = mark();
   712   if (mrk->is_unlocked() && !mrk->has_no_hash()) {
   713     return mrk->hash();
   714   } else if (mrk->is_marked()) {
   715     return mrk->hash();
   716   } else {
   717     return slow_identity_hash();
   718   }
   719 }
   721 inline void oopDesc::oop_iterate_header(OopClosure* blk) {
   722   if (UseCompressedOops) {
   723     blk->do_oop(compressed_klass_addr());
   724   } else {
   725     blk->do_oop(klass_addr());
   726   }
   727 }
   729 inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) {
   730   if (UseCompressedOops) {
   731     if (mr.contains(compressed_klass_addr())) {
   732       blk->do_oop(compressed_klass_addr());
   733     }
   734   } else {
   735     if (mr.contains(klass_addr())) blk->do_oop(klass_addr());
   736   }
   737 }
   739 inline int oopDesc::adjust_pointers() {
   740   debug_only(int check_size = size());
   741   int s = blueprint()->oop_adjust_pointers(this);
   742   assert(s == check_size, "should be the same");
   743   return s;
   744 }
   746 inline void oopDesc::adjust_header() {
   747   if (UseCompressedOops) {
   748     MarkSweep::adjust_pointer(compressed_klass_addr());
   749   } else {
   750     MarkSweep::adjust_pointer(klass_addr());
   751   }
   752 }
   754 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                        \
   755                                                                            \
   756 inline int oopDesc::oop_iterate(OopClosureType* blk) {                     \
   757   SpecializationStats::record_call();                                      \
   758   return blueprint()->oop_oop_iterate##nv_suffix(this, blk);               \
   759 }                                                                          \
   760                                                                            \
   761 inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) {       \
   762   SpecializationStats::record_call();                                      \
   763   return blueprint()->oop_oop_iterate##nv_suffix##_m(this, blk, mr);       \
   764 }
   766 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN)
   767 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN)
   769 #ifndef SERIALGC
   770 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)              \
   771                                                                            \
   772 inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) {           \
   773   SpecializationStats::record_call();                                      \
   774   return blueprint()->oop_oop_iterate_backwards##nv_suffix(this, blk);     \
   775 }
   777 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN)
   778 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN)
   779 #endif // !SERIALGC
   781 inline bool oopDesc::is_shared() const {
   782   return CompactingPermGenGen::is_shared(this);
   783 }
   785 inline bool oopDesc::is_shared_readonly() const {
   786   return CompactingPermGenGen::is_shared_readonly(this);
   787 }
   789 inline bool oopDesc::is_shared_readwrite() const {
   790   return CompactingPermGenGen::is_shared_readwrite(this);
   791 }
   793 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP

mercurial