src/share/vm/oops/oop.inline.hpp

Sat, 10 Sep 2011 17:29:02 -0700

author
never
date
Sat, 10 Sep 2011 17:29:02 -0700
changeset 3137
e6b1331a51d2
parent 3131
b0efc7ee3b31
child 4037
da91efe96a93
permissions
-rw-r--r--

7086585: make Java field injection more flexible
Reviewed-by: jrose, twisti, kvn, coleenp

     1 /*
     2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP
    26 #define SHARE_VM_OOPS_OOP_INLINE_HPP
    28 #include "gc_implementation/shared/ageTable.hpp"
    29 #include "gc_implementation/shared/markSweep.inline.hpp"
    30 #include "gc_interface/collectedHeap.inline.hpp"
    31 #include "memory/barrierSet.inline.hpp"
    32 #include "memory/cardTableModRefBS.hpp"
    33 #include "memory/compactingPermGenGen.hpp"
    34 #include "memory/genCollectedHeap.hpp"
    35 #include "memory/generation.hpp"
    36 #include "memory/permGen.hpp"
    37 #include "memory/specialized_oop_closures.hpp"
    38 #include "oops/arrayKlass.hpp"
    39 #include "oops/arrayOop.hpp"
    40 #include "oops/klass.hpp"
    41 #include "oops/klassOop.hpp"
    42 #include "oops/markOop.inline.hpp"
    43 #include "oops/oop.hpp"
    44 #include "runtime/atomic.hpp"
    45 #include "runtime/os.hpp"
    46 #ifdef TARGET_ARCH_x86
    47 # include "bytes_x86.hpp"
    48 #endif
    49 #ifdef TARGET_ARCH_sparc
    50 # include "bytes_sparc.hpp"
    51 #endif
    52 #ifdef TARGET_ARCH_zero
    53 # include "bytes_zero.hpp"
    54 #endif
    55 #ifdef TARGET_ARCH_arm
    56 # include "bytes_arm.hpp"
    57 #endif
    58 #ifdef TARGET_ARCH_ppc
    59 # include "bytes_ppc.hpp"
    60 #endif
    62 // Implementation of all inlined member functions defined in oop.hpp
    63 // We need a separate file to avoid circular references
    65 inline void oopDesc::release_set_mark(markOop m) {
    66   OrderAccess::release_store_ptr(&_mark, m);
    67 }
    69 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
    70   return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
    71 }
    73 inline klassOop oopDesc::klass() const {
    74   if (UseCompressedOops) {
    75     return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass);
    76   } else {
    77     return _metadata._klass;
    78   }
    79 }
    81 inline klassOop oopDesc::klass_or_null() const volatile {
    82   // can be NULL in CMS
    83   if (UseCompressedOops) {
    84     return (klassOop)decode_heap_oop(_metadata._compressed_klass);
    85   } else {
    86     return _metadata._klass;
    87   }
    88 }
    90 inline int oopDesc::klass_gap_offset_in_bytes() {
    91   assert(UseCompressedOops, "only applicable to compressed headers");
    92   return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
    93 }
    95 inline oop* oopDesc::klass_addr() {
    96   // Only used internally and with CMS and will not work with
    97   // UseCompressedOops
    98   assert(!UseCompressedOops, "only supported with uncompressed oops");
    99   return (oop*) &_metadata._klass;
   100 }
   102 inline narrowOop* oopDesc::compressed_klass_addr() {
   103   assert(UseCompressedOops, "only called by compressed oops");
   104   return (narrowOop*) &_metadata._compressed_klass;
   105 }
   107 inline void oopDesc::set_klass(klassOop k) {
   108   // since klasses are promoted no store check is needed
   109   assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop");
   110   assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop");
   111   if (UseCompressedOops) {
   112     oop_store_without_check(compressed_klass_addr(), (oop)k);
   113   } else {
   114     oop_store_without_check(klass_addr(), (oop) k);
   115   }
   116 }
   118 inline int oopDesc::klass_gap() const {
   119   return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
   120 }
   122 inline void oopDesc::set_klass_gap(int v) {
   123   if (UseCompressedOops) {
   124     *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
   125   }
   126 }
   128 inline void oopDesc::set_klass_to_list_ptr(oop k) {
   129   // This is only to be used during GC, for from-space objects, so no
   130   // barrier is needed.
   131   if (UseCompressedOops) {
   132     _metadata._compressed_klass = encode_heap_oop(k);  // may be null (parnew overflow handling)
   133   } else {
   134     _metadata._klass = (klassOop)k;
   135   }
   136 }
   138 inline void   oopDesc::init_mark()                 { set_mark(markOopDesc::prototype_for_object(this)); }
   139 inline Klass* oopDesc::blueprint()           const { return klass()->klass_part(); }
   141 inline bool oopDesc::is_a(klassOop k)        const { return blueprint()->is_subtype_of(k); }
   143 inline bool oopDesc::is_instance()           const { return blueprint()->oop_is_instance(); }
   144 inline bool oopDesc::is_instanceMirror()     const { return blueprint()->oop_is_instanceMirror(); }
   145 inline bool oopDesc::is_instanceRef()        const { return blueprint()->oop_is_instanceRef(); }
   146 inline bool oopDesc::is_array()              const { return blueprint()->oop_is_array(); }
   147 inline bool oopDesc::is_objArray()           const { return blueprint()->oop_is_objArray(); }
   148 inline bool oopDesc::is_typeArray()          const { return blueprint()->oop_is_typeArray(); }
   149 inline bool oopDesc::is_javaArray()          const { return blueprint()->oop_is_javaArray(); }
   150 inline bool oopDesc::is_klass()              const { return blueprint()->oop_is_klass(); }
   151 inline bool oopDesc::is_thread()             const { return blueprint()->oop_is_thread(); }
   152 inline bool oopDesc::is_method()             const { return blueprint()->oop_is_method(); }
   153 inline bool oopDesc::is_constMethod()        const { return blueprint()->oop_is_constMethod(); }
   154 inline bool oopDesc::is_methodData()         const { return blueprint()->oop_is_methodData(); }
   155 inline bool oopDesc::is_constantPool()       const { return blueprint()->oop_is_constantPool(); }
   156 inline bool oopDesc::is_constantPoolCache()  const { return blueprint()->oop_is_constantPoolCache(); }
   157 inline bool oopDesc::is_compiledICHolder()   const { return blueprint()->oop_is_compiledICHolder(); }
   159 inline void*     oopDesc::field_base(int offset)        const { return (void*)&((char*)this)[offset]; }
   161 template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
   162 inline jbyte*    oopDesc::byte_field_addr(int offset)   const { return (jbyte*)   field_base(offset); }
   163 inline jchar*    oopDesc::char_field_addr(int offset)   const { return (jchar*)   field_base(offset); }
   164 inline jboolean* oopDesc::bool_field_addr(int offset)   const { return (jboolean*)field_base(offset); }
   165 inline jint*     oopDesc::int_field_addr(int offset)    const { return (jint*)    field_base(offset); }
   166 inline jshort*   oopDesc::short_field_addr(int offset)  const { return (jshort*)  field_base(offset); }
   167 inline jlong*    oopDesc::long_field_addr(int offset)   const { return (jlong*)   field_base(offset); }
   168 inline jfloat*   oopDesc::float_field_addr(int offset)  const { return (jfloat*)  field_base(offset); }
   169 inline jdouble*  oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
   170 inline address*  oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
   173 // Functions for getting and setting oops within instance objects.
   174 // If the oops are compressed, the type passed to these overloaded functions
   175 // is narrowOop.  All functions are overloaded so they can be called by
   176 // template functions without conditionals (the compiler instantiates via
   177 // the right type and inlines the appopriate code).
   179 inline bool oopDesc::is_null(oop obj)       { return obj == NULL; }
   180 inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
   182 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
   183 // offset from the heap base.  Saving the check for null can save instructions
   184 // in inner GC loops so these are separated.
   186 inline bool check_obj_alignment(oop obj) {
   187   return (intptr_t)obj % MinObjAlignmentInBytes == 0;
   188 }
   190 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
   191   assert(!is_null(v), "oop value can never be zero");
   192   assert(check_obj_alignment(v), "Address not aligned");
   193   assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
   194   address base = Universe::narrow_oop_base();
   195   int    shift = Universe::narrow_oop_shift();
   196   uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
   197   assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
   198   uint64_t result = pd >> shift;
   199   assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
   200   assert(decode_heap_oop(result) == v, "reversibility");
   201   return (narrowOop)result;
   202 }
   204 inline narrowOop oopDesc::encode_heap_oop(oop v) {
   205   return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
   206 }
   208 inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
   209   assert(!is_null(v), "narrow oop value can never be zero");
   210   address base = Universe::narrow_oop_base();
   211   int    shift = Universe::narrow_oop_shift();
   212   oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
   213   assert(check_obj_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
   214   return result;
   215 }
   217 inline oop oopDesc::decode_heap_oop(narrowOop v) {
   218   return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
   219 }
   221 inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
   222 inline oop oopDesc::decode_heap_oop(oop v)  { return v; }
   224 // Load an oop out of the Java heap as is without decoding.
   225 // Called by GC to check for null before decoding.
   226 inline oop       oopDesc::load_heap_oop(oop* p)          { return *p; }
   227 inline narrowOop oopDesc::load_heap_oop(narrowOop* p)    { return *p; }
   229 // Load and decode an oop out of the Java heap into a wide oop.
   230 inline oop oopDesc::load_decode_heap_oop_not_null(oop* p)       { return *p; }
   231 inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
   232   return decode_heap_oop_not_null(*p);
   233 }
   235 // Load and decode an oop out of the heap accepting null
   236 inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
   237 inline oop oopDesc::load_decode_heap_oop(narrowOop* p) {
   238   return decode_heap_oop(*p);
   239 }
   241 // Store already encoded heap oop into the heap.
   242 inline void oopDesc::store_heap_oop(oop* p, oop v)                 { *p = v; }
   243 inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v)     { *p = v; }
   245 // Encode and store a heap oop.
   246 inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
   247   *p = encode_heap_oop_not_null(v);
   248 }
   249 inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
   251 // Encode and store a heap oop allowing for null.
   252 inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
   253   *p = encode_heap_oop(v);
   254 }
   255 inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
   257 // Store heap oop as is for volatile fields.
   258 inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
   259   OrderAccess::release_store_ptr(p, v);
   260 }
   261 inline void oopDesc::release_store_heap_oop(volatile narrowOop* p,
   262                                             narrowOop v) {
   263   OrderAccess::release_store(p, v);
   264 }
   266 inline void oopDesc::release_encode_store_heap_oop_not_null(
   267                                                 volatile narrowOop* p, oop v) {
   268   // heap oop is not pointer sized.
   269   OrderAccess::release_store(p, encode_heap_oop_not_null(v));
   270 }
   272 inline void oopDesc::release_encode_store_heap_oop_not_null(
   273                                                       volatile oop* p, oop v) {
   274   OrderAccess::release_store_ptr(p, v);
   275 }
   277 inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
   278                                                            oop v) {
   279   OrderAccess::release_store_ptr(p, v);
   280 }
   281 inline void oopDesc::release_encode_store_heap_oop(
   282                                                 volatile narrowOop* p, oop v) {
   283   OrderAccess::release_store(p, encode_heap_oop(v));
   284 }
   287 // These functions are only used to exchange oop fields in instances,
   288 // not headers.
   289 inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
   290   if (UseCompressedOops) {
   291     // encode exchange value from oop to T
   292     narrowOop val = encode_heap_oop(exchange_value);
   293     narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
   294     // decode old from T to oop
   295     return decode_heap_oop(old);
   296   } else {
   297     return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
   298   }
   299 }
   301 inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
   302                                                 volatile HeapWord *dest,
   303                                                 oop compare_value) {
   304   if (UseCompressedOops) {
   305     // encode exchange and compare value from oop to T
   306     narrowOop val = encode_heap_oop(exchange_value);
   307     narrowOop cmp = encode_heap_oop(compare_value);
   309     narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
   310     // decode old from T to oop
   311     return decode_heap_oop(old);
   312   } else {
   313     return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
   314   }
   315 }
   317 // In order to put or get a field out of an instance, must first check
   318 // if the field has been compressed and uncompress it.
   319 inline oop oopDesc::obj_field(int offset) const {
   320   return UseCompressedOops ?
   321     load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
   322     load_decode_heap_oop(obj_field_addr<oop>(offset));
   323 }
   324 inline volatile oop oopDesc::obj_field_volatile(int offset) const {
   325   volatile oop value = obj_field(offset);
   326   OrderAccess::acquire();
   327   return value;
   328 }
   329 inline void oopDesc::obj_field_put(int offset, oop value) {
   330   UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
   331                       oop_store(obj_field_addr<oop>(offset),       value);
   332 }
   333 inline void oopDesc::obj_field_put_raw(int offset, oop value) {
   334   UseCompressedOops ?
   335     encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
   336     encode_store_heap_oop(obj_field_addr<oop>(offset),       value);
   337 }
   338 inline void oopDesc::obj_field_put_volatile(int offset, oop value) {
   339   OrderAccess::release();
   340   obj_field_put(offset, value);
   341   OrderAccess::fence();
   342 }
   344 inline jbyte oopDesc::byte_field(int offset) const                  { return (jbyte) *byte_field_addr(offset);    }
   345 inline void oopDesc::byte_field_put(int offset, jbyte contents)     { *byte_field_addr(offset) = (jint) contents; }
   347 inline jboolean oopDesc::bool_field(int offset) const               { return (jboolean) *bool_field_addr(offset); }
   348 inline void oopDesc::bool_field_put(int offset, jboolean contents)  { *bool_field_addr(offset) = (jint) contents; }
   350 inline jchar oopDesc::char_field(int offset) const                  { return (jchar) *char_field_addr(offset);    }
   351 inline void oopDesc::char_field_put(int offset, jchar contents)     { *char_field_addr(offset) = (jint) contents; }
   353 inline jint oopDesc::int_field(int offset) const                    { return *int_field_addr(offset);        }
   354 inline void oopDesc::int_field_put(int offset, jint contents)       { *int_field_addr(offset) = contents;    }
   356 inline jshort oopDesc::short_field(int offset) const                { return (jshort) *short_field_addr(offset);  }
   357 inline void oopDesc::short_field_put(int offset, jshort contents)   { *short_field_addr(offset) = (jint) contents;}
   359 inline jlong oopDesc::long_field(int offset) const                  { return *long_field_addr(offset);       }
   360 inline void oopDesc::long_field_put(int offset, jlong contents)     { *long_field_addr(offset) = contents;   }
   362 inline jfloat oopDesc::float_field(int offset) const                { return *float_field_addr(offset);      }
   363 inline void oopDesc::float_field_put(int offset, jfloat contents)   { *float_field_addr(offset) = contents;  }
   365 inline jdouble oopDesc::double_field(int offset) const              { return *double_field_addr(offset);     }
   366 inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
   368 inline address oopDesc::address_field(int offset) const              { return *address_field_addr(offset);     }
   369 inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
   371 inline oop oopDesc::obj_field_acquire(int offset) const {
   372   return UseCompressedOops ?
   373              decode_heap_oop((narrowOop)
   374                OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
   375            : decode_heap_oop((oop)
   376                OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
   377 }
   378 inline void oopDesc::release_obj_field_put(int offset, oop value) {
   379   UseCompressedOops ?
   380     oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
   381     oop_store((volatile oop*)      obj_field_addr<oop>(offset),       value);
   382 }
   384 inline jbyte oopDesc::byte_field_acquire(int offset) const                  { return OrderAccess::load_acquire(byte_field_addr(offset));     }
   385 inline void oopDesc::release_byte_field_put(int offset, jbyte contents)     { OrderAccess::release_store(byte_field_addr(offset), contents); }
   387 inline jboolean oopDesc::bool_field_acquire(int offset) const               { return OrderAccess::load_acquire(bool_field_addr(offset));     }
   388 inline void oopDesc::release_bool_field_put(int offset, jboolean contents)  { OrderAccess::release_store(bool_field_addr(offset), contents); }
   390 inline jchar oopDesc::char_field_acquire(int offset) const                  { return OrderAccess::load_acquire(char_field_addr(offset));     }
   391 inline void oopDesc::release_char_field_put(int offset, jchar contents)     { OrderAccess::release_store(char_field_addr(offset), contents); }
   393 inline jint oopDesc::int_field_acquire(int offset) const                    { return OrderAccess::load_acquire(int_field_addr(offset));      }
   394 inline void oopDesc::release_int_field_put(int offset, jint contents)       { OrderAccess::release_store(int_field_addr(offset), contents);  }
   396 inline jshort oopDesc::short_field_acquire(int offset) const                { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); }
   397 inline void oopDesc::release_short_field_put(int offset, jshort contents)   { OrderAccess::release_store(short_field_addr(offset), contents);     }
   399 inline jlong oopDesc::long_field_acquire(int offset) const                  { return OrderAccess::load_acquire(long_field_addr(offset));       }
   400 inline void oopDesc::release_long_field_put(int offset, jlong contents)     { OrderAccess::release_store(long_field_addr(offset), contents);   }
   402 inline jfloat oopDesc::float_field_acquire(int offset) const                { return OrderAccess::load_acquire(float_field_addr(offset));      }
   403 inline void oopDesc::release_float_field_put(int offset, jfloat contents)   { OrderAccess::release_store(float_field_addr(offset), contents);  }
   405 inline jdouble oopDesc::double_field_acquire(int offset) const              { return OrderAccess::load_acquire(double_field_addr(offset));     }
   406 inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
   408 inline address oopDesc::address_field_acquire(int offset) const             { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
   409 inline void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
   411 inline int oopDesc::size_given_klass(Klass* klass)  {
   412   int lh = klass->layout_helper();
   413   int s;
   415   // lh is now a value computed at class initialization that may hint
   416   // at the size.  For instances, this is positive and equal to the
   417   // size.  For arrays, this is negative and provides log2 of the
   418   // array element size.  For other oops, it is zero and thus requires
   419   // a virtual call.
   420   //
   421   // We go to all this trouble because the size computation is at the
   422   // heart of phase 2 of mark-compaction, and called for every object,
   423   // alive or dead.  So the speed here is equal in importance to the
   424   // speed of allocation.
   426   if (lh > Klass::_lh_neutral_value) {
   427     if (!Klass::layout_helper_needs_slow_path(lh)) {
   428       s = lh >> LogHeapWordSize;  // deliver size scaled by wordSize
   429     } else {
   430       s = klass->oop_size(this);
   431     }
   432   } else if (lh <= Klass::_lh_neutral_value) {
   433     // The most common case is instances; fall through if so.
   434     if (lh < Klass::_lh_neutral_value) {
   435       // Second most common case is arrays.  We have to fetch the
   436       // length of the array, shift (multiply) it appropriately,
   437       // up to wordSize, add the header, and align to object size.
   438       size_t size_in_bytes;
   439 #ifdef _M_IA64
   440       // The Windows Itanium Aug 2002 SDK hoists this load above
   441       // the check for s < 0.  An oop at the end of the heap will
   442       // cause an access violation if this load is performed on a non
   443       // array oop.  Making the reference volatile prohibits this.
   444       // (%%% please explain by what magic the length is actually fetched!)
   445       volatile int *array_length;
   446       array_length = (volatile int *)( (intptr_t)this +
   447                           arrayOopDesc::length_offset_in_bytes() );
   448       assert(array_length > 0, "Integer arithmetic problem somewhere");
   449       // Put into size_t to avoid overflow.
   450       size_in_bytes = (size_t) array_length;
   451       size_in_bytes = size_in_bytes << Klass::layout_helper_log2_element_size(lh);
   452 #else
   453       size_t array_length = (size_t) ((arrayOop)this)->length();
   454       size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
   455 #endif
   456       size_in_bytes += Klass::layout_helper_header_size(lh);
   458       // This code could be simplified, but by keeping array_header_in_bytes
   459       // in units of bytes and doing it this way we can round up just once,
   460       // skipping the intermediate round to HeapWordSize.  Cast the result
   461       // of round_to to size_t to guarantee unsigned division == right shift.
   462       s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
   463         HeapWordSize);
   465       // UseParNewGC, UseParallelGC and UseG1GC can change the length field
   466       // of an "old copy" of an object array in the young gen so it indicates
   467       // the grey portion of an already copied array. This will cause the first
   468       // disjunct below to fail if the two comparands are computed across such
   469       // a concurrent change.
   470       // UseParNewGC also runs with promotion labs (which look like int
   471       // filler arrays) which are subject to changing their declared size
   472       // when finally retiring a PLAB; this also can cause the first disjunct
   473       // to fail for another worker thread that is concurrently walking the block
   474       // offset table. Both these invariant failures are benign for their
   475       // current uses; we relax the assertion checking to cover these two cases below:
   476       //     is_objArray() && is_forwarded()   // covers first scenario above
   477       //  || is_typeArray()                    // covers second scenario above
   478       // If and when UseParallelGC uses the same obj array oop stealing/chunking
   479       // technique, we will need to suitably modify the assertion.
   480       assert((s == klass->oop_size(this)) ||
   481              (Universe::heap()->is_gc_active() &&
   482               ((is_typeArray() && UseParNewGC) ||
   483                (is_objArray()  && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))),
   484              "wrong array object size");
   485     } else {
   486       // Must be zero, so bite the bullet and take the virtual call.
   487       s = klass->oop_size(this);
   488     }
   489   }
   491   assert(s % MinObjAlignment == 0, "alignment check");
   492   assert(s > 0, "Bad size calculated");
   493   return s;
   494 }
   497 inline int oopDesc::size()  {
   498   return size_given_klass(blueprint());
   499 }
   501 inline bool oopDesc::is_parsable() {
   502   return blueprint()->oop_is_parsable(this);
   503 }
   505 inline bool oopDesc::is_conc_safe() {
   506   return blueprint()->oop_is_conc_safe(this);
   507 }
   509 inline void update_barrier_set(void* p, oop v) {
   510   assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
   511   oopDesc::bs()->write_ref_field(p, v);
   512 }
   514 template <class T> inline void update_barrier_set_pre(T* p, oop v) {
   515   oopDesc::bs()->write_ref_field_pre(p, v);
   516 }
   518 template <class T> inline void oop_store(T* p, oop v) {
   519   if (always_do_update_barrier) {
   520     oop_store((volatile T*)p, v);
   521   } else {
   522     update_barrier_set_pre(p, v);
   523     oopDesc::encode_store_heap_oop(p, v);
   524     update_barrier_set((void*)p, v);  // cast away type
   525   }
   526 }
   528 template <class T> inline void oop_store(volatile T* p, oop v) {
   529   update_barrier_set_pre((T*)p, v);   // cast away volatile
   530   // Used by release_obj_field_put, so use release_store_ptr.
   531   oopDesc::release_encode_store_heap_oop(p, v);
   532   update_barrier_set((void*)p, v);    // cast away type
   533 }
   535 template <class T> inline void oop_store_without_check(T* p, oop v) {
   536   // XXX YSR FIX ME!!!
   537   if (always_do_update_barrier) {
   538     oop_store(p, v);
   539   } else {
   540     assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v),
   541            "oop store without store check failed");
   542     oopDesc::encode_store_heap_oop(p, v);
   543   }
   544 }
   546 // When it absolutely has to get there.
   547 template <class T> inline void oop_store_without_check(volatile T* p, oop v) {
   548   // XXX YSR FIX ME!!!
   549   if (always_do_update_barrier) {
   550     oop_store(p, v);
   551   } else {
   552     assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier((T*)p, v),
   553            "oop store without store check failed");
   554     oopDesc::release_encode_store_heap_oop(p, v);
   555   }
   556 }
   558 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
   559 // (without having to remember the function name this calls).
   560 inline void oop_store_raw(HeapWord* addr, oop value) {
   561   if (UseCompressedOops) {
   562     oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
   563   } else {
   564     oopDesc::encode_store_heap_oop((oop*)addr, value);
   565   }
   566 }
   568 // Used only for markSweep, scavenging
   569 inline bool oopDesc::is_gc_marked() const {
   570   return mark()->is_marked();
   571 }
   573 inline bool oopDesc::is_locked() const {
   574   return mark()->is_locked();
   575 }
   577 inline bool oopDesc::is_unlocked() const {
   578   return mark()->is_unlocked();
   579 }
   581 inline bool oopDesc::has_bias_pattern() const {
   582   return mark()->has_bias_pattern();
   583 }
   586 // used only for asserts
   587 inline bool oopDesc::is_oop(bool ignore_mark_word) const {
   588   oop obj = (oop) this;
   589   if (!check_obj_alignment(obj)) return false;
   590   if (!Universe::heap()->is_in_reserved(obj)) return false;
   591   // obj is aligned and accessible in heap
   592   // try to find metaclass cycle safely without seg faulting on bad input
   593   // we should reach klassKlassObj by following klass link at most 3 times
   594   for (int i = 0; i < 3; i++) {
   595     obj = obj->klass_or_null();
   596     // klass should be aligned and in permspace
   597     if (!check_obj_alignment(obj)) return false;
   598     if (!Universe::heap()->is_in_permanent(obj)) return false;
   599   }
   600   if (obj != Universe::klassKlassObj()) {
   601     // During a dump, the _klassKlassObj moved to a shared space.
   602     if (DumpSharedSpaces && Universe::klassKlassObj()->is_shared()) {
   603       return true;
   604     }
   605     return false;
   606   }
   608   // Header verification: the mark is typically non-NULL. If we're
   609   // at a safepoint, it must not be null.
   610   // Outside of a safepoint, the header could be changing (for example,
   611   // another thread could be inflating a lock on this object).
   612   if (ignore_mark_word) {
   613     return true;
   614   }
   615   if (mark() != NULL) {
   616     return true;
   617   }
   618   return !SafepointSynchronize::is_at_safepoint();
   619 }
   622 // used only for asserts
   623 inline bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
   624   return this == NULL ? true : is_oop(ignore_mark_word);
   625 }
   627 #ifndef PRODUCT
   628 // used only for asserts
   629 inline bool oopDesc::is_unlocked_oop() const {
   630   if (!Universe::heap()->is_in_reserved(this)) return false;
   631   return mark()->is_unlocked();
   632 }
   633 #endif // PRODUCT
   635 inline void oopDesc::follow_header() {
   636   if (UseCompressedOops) {
   637     MarkSweep::mark_and_push(compressed_klass_addr());
   638   } else {
   639     MarkSweep::mark_and_push(klass_addr());
   640   }
   641 }
   643 inline void oopDesc::follow_contents(void) {
   644   assert (is_gc_marked(), "should be marked");
   645   blueprint()->oop_follow_contents(this);
   646 }
   649 // Used by scavengers
   651 inline bool oopDesc::is_forwarded() const {
   652   // The extra heap check is needed since the obj might be locked, in which case the
   653   // mark would point to a stack location and have the sentinel bit cleared
   654   return mark()->is_marked();
   655 }
   657 // Used by scavengers
   658 inline void oopDesc::forward_to(oop p) {
   659   assert(check_obj_alignment(p),
   660          "forwarding to something not aligned");
   661   assert(Universe::heap()->is_in_reserved(p),
   662          "forwarding to something not in heap");
   663   markOop m = markOopDesc::encode_pointer_as_mark(p);
   664   assert(m->decode_pointer() == p, "encoding must be reversable");
   665   set_mark(m);
   666 }
   668 // Used by parallel scavengers
   669 inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
   670   assert(check_obj_alignment(p),
   671          "forwarding to something not aligned");
   672   assert(Universe::heap()->is_in_reserved(p),
   673          "forwarding to something not in heap");
   674   markOop m = markOopDesc::encode_pointer_as_mark(p);
   675   assert(m->decode_pointer() == p, "encoding must be reversable");
   676   return cas_set_mark(m, compare) == compare;
   677 }
   679 // Note that the forwardee is not the same thing as the displaced_mark.
   680 // The forwardee is used when copying during scavenge and mark-sweep.
   681 // It does need to clear the low two locking- and GC-related bits.
   682 inline oop oopDesc::forwardee() const {
   683   return (oop) mark()->decode_pointer();
   684 }
   686 inline bool oopDesc::has_displaced_mark() const {
   687   return mark()->has_displaced_mark_helper();
   688 }
   690 inline markOop oopDesc::displaced_mark() const {
   691   return mark()->displaced_mark_helper();
   692 }
   694 inline void oopDesc::set_displaced_mark(markOop m) {
   695   mark()->set_displaced_mark_helper(m);
   696 }
   698 // The following method needs to be MT safe.
   699 inline int oopDesc::age() const {
   700   assert(!is_forwarded(), "Attempt to read age from forwarded mark");
   701   if (has_displaced_mark()) {
   702     return displaced_mark()->age();
   703   } else {
   704     return mark()->age();
   705   }
   706 }
   708 inline void oopDesc::incr_age() {
   709   assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
   710   if (has_displaced_mark()) {
   711     set_displaced_mark(displaced_mark()->incr_age());
   712   } else {
   713     set_mark(mark()->incr_age());
   714   }
   715 }
   718 inline intptr_t oopDesc::identity_hash() {
   719   // Fast case; if the object is unlocked and the hash value is set, no locking is needed
   720   // Note: The mark must be read into local variable to avoid concurrent updates.
   721   markOop mrk = mark();
   722   if (mrk->is_unlocked() && !mrk->has_no_hash()) {
   723     return mrk->hash();
   724   } else if (mrk->is_marked()) {
   725     return mrk->hash();
   726   } else {
   727     return slow_identity_hash();
   728   }
   729 }
   731 inline void oopDesc::oop_iterate_header(OopClosure* blk) {
   732   if (UseCompressedOops) {
   733     blk->do_oop(compressed_klass_addr());
   734   } else {
   735     blk->do_oop(klass_addr());
   736   }
   737 }
   739 inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) {
   740   if (UseCompressedOops) {
   741     if (mr.contains(compressed_klass_addr())) {
   742       blk->do_oop(compressed_klass_addr());
   743     }
   744   } else {
   745     if (mr.contains(klass_addr())) blk->do_oop(klass_addr());
   746   }
   747 }
   749 inline int oopDesc::adjust_pointers() {
   750   debug_only(int check_size = size());
   751   int s = blueprint()->oop_adjust_pointers(this);
   752   assert(s == check_size, "should be the same");
   753   return s;
   754 }
   756 inline void oopDesc::adjust_header() {
   757   if (UseCompressedOops) {
   758     MarkSweep::adjust_pointer(compressed_klass_addr());
   759   } else {
   760     MarkSweep::adjust_pointer(klass_addr());
   761   }
   762 }
   764 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                        \
   765                                                                            \
   766 inline int oopDesc::oop_iterate(OopClosureType* blk) {                     \
   767   SpecializationStats::record_call();                                      \
   768   return blueprint()->oop_oop_iterate##nv_suffix(this, blk);               \
   769 }                                                                          \
   770                                                                            \
   771 inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) {       \
   772   SpecializationStats::record_call();                                      \
   773   return blueprint()->oop_oop_iterate##nv_suffix##_m(this, blk, mr);       \
   774 }
   776 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN)
   777 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN)
   779 #ifndef SERIALGC
   780 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)              \
   781                                                                            \
   782 inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) {           \
   783   SpecializationStats::record_call();                                      \
   784   return blueprint()->oop_oop_iterate_backwards##nv_suffix(this, blk);     \
   785 }
   787 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN)
   788 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN)
   789 #endif // !SERIALGC
   791 inline bool oopDesc::is_shared() const {
   792   return CompactingPermGenGen::is_shared(this);
   793 }
   795 inline bool oopDesc::is_shared_readonly() const {
   796   return CompactingPermGenGen::is_shared_readonly(this);
   797 }
   799 inline bool oopDesc::is_shared_readwrite() const {
   800   return CompactingPermGenGen::is_shared_readwrite(this);
   801 }
   803 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP

mercurial