src/share/vm/oops/oop.inline.hpp

Tue, 30 Apr 2013 11:56:52 -0700

author
ccheung
date
Tue, 30 Apr 2013 11:56:52 -0700
changeset 4993
746b070f5022
parent 4544
3c9bc17b9403
child 5528
740e263c80c6
permissions
-rw-r--r--

8011661: Insufficient memory message says "malloc" when sometimes it should say "mmap"
Reviewed-by: coleenp, zgu, hseigel

     1 /*
     2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP
    26 #define SHARE_VM_OOPS_OOP_INLINE_HPP
    28 #include "gc_implementation/shared/ageTable.hpp"
    29 #include "gc_implementation/shared/markSweep.inline.hpp"
    30 #include "gc_interface/collectedHeap.inline.hpp"
    31 #include "memory/barrierSet.inline.hpp"
    32 #include "memory/cardTableModRefBS.hpp"
    33 #include "memory/genCollectedHeap.hpp"
    34 #include "memory/generation.hpp"
    35 #include "memory/specialized_oop_closures.hpp"
    36 #include "oops/arrayKlass.hpp"
    37 #include "oops/arrayOop.hpp"
    38 #include "oops/klass.hpp"
    39 #include "oops/markOop.inline.hpp"
    40 #include "oops/oop.hpp"
    41 #include "runtime/atomic.hpp"
    42 #include "runtime/os.hpp"
    43 #include "utilities/macros.hpp"
    44 #ifdef TARGET_ARCH_x86
    45 # include "bytes_x86.hpp"
    46 #endif
    47 #ifdef TARGET_ARCH_sparc
    48 # include "bytes_sparc.hpp"
    49 #endif
    50 #ifdef TARGET_ARCH_zero
    51 # include "bytes_zero.hpp"
    52 #endif
    53 #ifdef TARGET_ARCH_arm
    54 # include "bytes_arm.hpp"
    55 #endif
    56 #ifdef TARGET_ARCH_ppc
    57 # include "bytes_ppc.hpp"
    58 #endif
    60 // Implementation of all inlined member functions defined in oop.hpp
    61 // We need a separate file to avoid circular references
    63 inline void oopDesc::release_set_mark(markOop m) {
    64   OrderAccess::release_store_ptr(&_mark, m);
    65 }
    67 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
    68   return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
    69 }
    71 inline Klass* oopDesc::klass() const {
    72   if (UseCompressedKlassPointers) {
    73     return decode_klass_not_null(_metadata._compressed_klass);
    74   } else {
    75     return _metadata._klass;
    76   }
    77 }
    79 inline Klass* oopDesc::klass_or_null() const volatile {
    80   // can be NULL in CMS
    81   if (UseCompressedKlassPointers) {
    82     return decode_klass(_metadata._compressed_klass);
    83   } else {
    84     return _metadata._klass;
    85   }
    86 }
    88 inline int oopDesc::klass_gap_offset_in_bytes() {
    89   assert(UseCompressedKlassPointers, "only applicable to compressed klass pointers");
    90   return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
    91 }
    93 inline Klass** oopDesc::klass_addr() {
    94   // Only used internally and with CMS and will not work with
    95   // UseCompressedOops
    96   assert(!UseCompressedKlassPointers, "only supported with uncompressed klass pointers");
    97   return (Klass**) &_metadata._klass;
    98 }
   100 inline narrowOop* oopDesc::compressed_klass_addr() {
   101   assert(UseCompressedKlassPointers, "only called by compressed klass pointers");
   102   return (narrowOop*) &_metadata._compressed_klass;
   103 }
   105 inline void oopDesc::set_klass(Klass* k) {
   106   // since klasses are promoted no store check is needed
   107   assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
   108   assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
   109   if (UseCompressedKlassPointers) {
   110     *compressed_klass_addr() = encode_klass_not_null(k);
   111   } else {
   112     *klass_addr() = k;
   113   }
   114 }
   116 inline int oopDesc::klass_gap() const {
   117   return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
   118 }
   120 inline void oopDesc::set_klass_gap(int v) {
   121   if (UseCompressedKlassPointers) {
   122     *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
   123   }
   124 }
   126 inline void oopDesc::set_klass_to_list_ptr(oop k) {
   127   // This is only to be used during GC, for from-space objects, so no
   128   // barrier is needed.
   129   if (UseCompressedKlassPointers) {
   130     _metadata._compressed_klass = encode_heap_oop(k);  // may be null (parnew overflow handling)
   131   } else {
   132     _metadata._klass = (Klass*)(address)k;
   133   }
   134 }
   136 inline oop oopDesc::list_ptr_from_klass() {
   137   // This is only to be used during GC, for from-space objects.
   138   if (UseCompressedKlassPointers) {
   139     return decode_heap_oop(_metadata._compressed_klass);
   140   } else {
   141     // Special case for GC
   142     return (oop)(address)_metadata._klass;
   143   }
   144 }
   146 inline void   oopDesc::init_mark()                 { set_mark(markOopDesc::prototype_for_object(this)); }
   148 inline bool oopDesc::is_a(Klass* k)        const { return klass()->is_subtype_of(k); }
   150 inline bool oopDesc::is_instance()           const { return klass()->oop_is_instance(); }
   151 inline bool oopDesc::is_instanceMirror()     const { return klass()->oop_is_instanceMirror(); }
   152 inline bool oopDesc::is_instanceRef()        const { return klass()->oop_is_instanceRef(); }
   153 inline bool oopDesc::is_array()              const { return klass()->oop_is_array(); }
   154 inline bool oopDesc::is_objArray()           const { return klass()->oop_is_objArray(); }
   155 inline bool oopDesc::is_typeArray()          const { return klass()->oop_is_typeArray(); }
   157 inline void*     oopDesc::field_base(int offset)        const { return (void*)&((char*)this)[offset]; }
   159 template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
   160 inline Metadata** oopDesc::metadata_field_addr(int offset) const { return (Metadata**)field_base(offset); }
   161 inline jbyte*    oopDesc::byte_field_addr(int offset)   const { return (jbyte*)   field_base(offset); }
   162 inline jchar*    oopDesc::char_field_addr(int offset)   const { return (jchar*)   field_base(offset); }
   163 inline jboolean* oopDesc::bool_field_addr(int offset)   const { return (jboolean*)field_base(offset); }
   164 inline jint*     oopDesc::int_field_addr(int offset)    const { return (jint*)    field_base(offset); }
   165 inline jshort*   oopDesc::short_field_addr(int offset)  const { return (jshort*)  field_base(offset); }
   166 inline jlong*    oopDesc::long_field_addr(int offset)   const { return (jlong*)   field_base(offset); }
   167 inline jfloat*   oopDesc::float_field_addr(int offset)  const { return (jfloat*)  field_base(offset); }
   168 inline jdouble*  oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
   169 inline address*  oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
   172 // Functions for getting and setting oops within instance objects.
   173 // If the oops are compressed, the type passed to these overloaded functions
   174 // is narrowOop.  All functions are overloaded so they can be called by
   175 // template functions without conditionals (the compiler instantiates via
   176 // the right type and inlines the appopriate code).
   178 inline bool oopDesc::is_null(oop obj)       { return obj == NULL; }
   179 inline bool oopDesc::is_null(Klass* obj)  { return obj == NULL; }
   180 inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
   182 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
   183 // offset from the heap base.  Saving the check for null can save instructions
   184 // in inner GC loops so these are separated.
   186 inline bool check_obj_alignment(oop obj) {
   187   return (intptr_t)obj % MinObjAlignmentInBytes == 0;
   188 }
   189 inline bool check_klass_alignment(Klass* obj) {
   190   return (intptr_t)obj % KlassAlignmentInBytes == 0;
   191 }
   193 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
   194   assert(!is_null(v), "oop value can never be zero");
   195   assert(check_obj_alignment(v), "Address not aligned");
   196   assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
   197   address base = Universe::narrow_oop_base();
   198   int    shift = Universe::narrow_oop_shift();
   199   uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
   200   assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
   201   uint64_t result = pd >> shift;
   202   assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
   203   assert(decode_heap_oop(result) == v, "reversibility");
   204   return (narrowOop)result;
   205 }
   207 inline narrowOop oopDesc::encode_heap_oop(oop v) {
   208   return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
   209 }
   211 inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
   212   assert(!is_null(v), "narrow oop value can never be zero");
   213   address base = Universe::narrow_oop_base();
   214   int    shift = Universe::narrow_oop_shift();
   215   oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
   216   assert(check_obj_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
   217   return result;
   218 }
   220 inline oop oopDesc::decode_heap_oop(narrowOop v) {
   221   return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
   222 }
   224 inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
   225 inline oop oopDesc::decode_heap_oop(oop v)  { return v; }
   227 // Encoding and decoding for klass field.  It is copied code, but someday
   228 // might not be the same as oop.
   230 inline narrowOop oopDesc::encode_klass_not_null(Klass* v) {
   231   assert(!is_null(v), "klass value can never be zero");
   232   assert(check_klass_alignment(v), "Address not aligned");
   233   address base = Universe::narrow_klass_base();
   234   int    shift = Universe::narrow_klass_shift();
   235   uint64_t  pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
   236   assert(KlassEncodingMetaspaceMax > pd, "change encoding max if new encoding");
   237   uint64_t result = pd >> shift;
   238   assert((result & CONST64(0xffffffff00000000)) == 0, "narrow klass pointer overflow");
   239   assert(decode_klass(result) == v, "reversibility");
   240   return (narrowOop)result;
   241 }
   243 inline narrowOop oopDesc::encode_klass(Klass* v) {
   244   return (is_null(v)) ? (narrowOop)0 : encode_klass_not_null(v);
   245 }
   247 inline Klass* oopDesc::decode_klass_not_null(narrowOop v) {
   248   assert(!is_null(v), "narrow oop value can never be zero");
   249   address base = Universe::narrow_klass_base();
   250   int    shift = Universe::narrow_klass_shift();
   251   Klass* result = (Klass*)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
   252   assert(check_klass_alignment(result), err_msg("address not aligned: " PTR_FORMAT, (void*) result));
   253   return result;
   254 }
   256 inline Klass* oopDesc::decode_klass(narrowOop v) {
   257   return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v);
   258 }
   260 // Load an oop out of the Java heap as is without decoding.
   261 // Called by GC to check for null before decoding.
   262 inline oop       oopDesc::load_heap_oop(oop* p)          { return *p; }
   263 inline narrowOop oopDesc::load_heap_oop(narrowOop* p)    { return *p; }
   265 // Load and decode an oop out of the Java heap into a wide oop.
   266 inline oop oopDesc::load_decode_heap_oop_not_null(oop* p)       { return *p; }
   267 inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
   268   return decode_heap_oop_not_null(*p);
   269 }
   271 // Load and decode an oop out of the heap accepting null
   272 inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
   273 inline oop oopDesc::load_decode_heap_oop(narrowOop* p) {
   274   return decode_heap_oop(*p);
   275 }
   277 // Store already encoded heap oop into the heap.
   278 inline void oopDesc::store_heap_oop(oop* p, oop v)                 { *p = v; }
   279 inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v)     { *p = v; }
   281 // Encode and store a heap oop.
   282 inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
   283   *p = encode_heap_oop_not_null(v);
   284 }
   285 inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
   287 // Encode and store a heap oop allowing for null.
   288 inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
   289   *p = encode_heap_oop(v);
   290 }
   291 inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
   293 // Store heap oop as is for volatile fields.
   294 inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
   295   OrderAccess::release_store_ptr(p, v);
   296 }
   297 inline void oopDesc::release_store_heap_oop(volatile narrowOop* p,
   298                                             narrowOop v) {
   299   OrderAccess::release_store(p, v);
   300 }
   302 inline void oopDesc::release_encode_store_heap_oop_not_null(
   303                                                 volatile narrowOop* p, oop v) {
   304   // heap oop is not pointer sized.
   305   OrderAccess::release_store(p, encode_heap_oop_not_null(v));
   306 }
   308 inline void oopDesc::release_encode_store_heap_oop_not_null(
   309                                                       volatile oop* p, oop v) {
   310   OrderAccess::release_store_ptr(p, v);
   311 }
   313 inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
   314                                                            oop v) {
   315   OrderAccess::release_store_ptr(p, v);
   316 }
   317 inline void oopDesc::release_encode_store_heap_oop(
   318                                                 volatile narrowOop* p, oop v) {
   319   OrderAccess::release_store(p, encode_heap_oop(v));
   320 }
   323 // These functions are only used to exchange oop fields in instances,
   324 // not headers.
   325 inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
   326   if (UseCompressedOops) {
   327     // encode exchange value from oop to T
   328     narrowOop val = encode_heap_oop(exchange_value);
   329     narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
   330     // decode old from T to oop
   331     return decode_heap_oop(old);
   332   } else {
   333     return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
   334   }
   335 }
   337 // In order to put or get a field out of an instance, must first check
   338 // if the field has been compressed and uncompress it.
   339 inline oop oopDesc::obj_field(int offset) const {
   340   return UseCompressedOops ?
   341     load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
   342     load_decode_heap_oop(obj_field_addr<oop>(offset));
   343 }
   344 inline volatile oop oopDesc::obj_field_volatile(int offset) const {
   345   volatile oop value = obj_field(offset);
   346   OrderAccess::acquire();
   347   return value;
   348 }
   349 inline void oopDesc::obj_field_put(int offset, oop value) {
   350   UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
   351                       oop_store(obj_field_addr<oop>(offset),       value);
   352 }
   354 inline Metadata* oopDesc::metadata_field(int offset) const {
   355   return *metadata_field_addr(offset);
   356 }
   358 inline void oopDesc::metadata_field_put(int offset, Metadata* value) {
   359   *metadata_field_addr(offset) = value;
   360 }
   362 inline void oopDesc::obj_field_put_raw(int offset, oop value) {
   363   UseCompressedOops ?
   364     encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
   365     encode_store_heap_oop(obj_field_addr<oop>(offset),       value);
   366 }
   367 inline void oopDesc::obj_field_put_volatile(int offset, oop value) {
   368   OrderAccess::release();
   369   obj_field_put(offset, value);
   370   OrderAccess::fence();
   371 }
   373 inline jbyte oopDesc::byte_field(int offset) const                  { return (jbyte) *byte_field_addr(offset);    }
   374 inline void oopDesc::byte_field_put(int offset, jbyte contents)     { *byte_field_addr(offset) = (jint) contents; }
   376 inline jboolean oopDesc::bool_field(int offset) const               { return (jboolean) *bool_field_addr(offset); }
   377 inline void oopDesc::bool_field_put(int offset, jboolean contents)  { *bool_field_addr(offset) = (jint) contents; }
   379 inline jchar oopDesc::char_field(int offset) const                  { return (jchar) *char_field_addr(offset);    }
   380 inline void oopDesc::char_field_put(int offset, jchar contents)     { *char_field_addr(offset) = (jint) contents; }
   382 inline jint oopDesc::int_field(int offset) const                    { return *int_field_addr(offset);        }
   383 inline void oopDesc::int_field_put(int offset, jint contents)       { *int_field_addr(offset) = contents;    }
   385 inline jshort oopDesc::short_field(int offset) const                { return (jshort) *short_field_addr(offset);  }
   386 inline void oopDesc::short_field_put(int offset, jshort contents)   { *short_field_addr(offset) = (jint) contents;}
   388 inline jlong oopDesc::long_field(int offset) const                  { return *long_field_addr(offset);       }
   389 inline void oopDesc::long_field_put(int offset, jlong contents)     { *long_field_addr(offset) = contents;   }
   391 inline jfloat oopDesc::float_field(int offset) const                { return *float_field_addr(offset);      }
   392 inline void oopDesc::float_field_put(int offset, jfloat contents)   { *float_field_addr(offset) = contents;  }
   394 inline jdouble oopDesc::double_field(int offset) const              { return *double_field_addr(offset);     }
   395 inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
   397 inline address oopDesc::address_field(int offset) const              { return *address_field_addr(offset);     }
   398 inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
   400 inline oop oopDesc::obj_field_acquire(int offset) const {
   401   return UseCompressedOops ?
   402              decode_heap_oop((narrowOop)
   403                OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
   404            : decode_heap_oop((oop)
   405                OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
   406 }
   407 inline void oopDesc::release_obj_field_put(int offset, oop value) {
   408   UseCompressedOops ?
   409     oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
   410     oop_store((volatile oop*)      obj_field_addr<oop>(offset),       value);
   411 }
   413 inline jbyte oopDesc::byte_field_acquire(int offset) const                  { return OrderAccess::load_acquire(byte_field_addr(offset));     }
   414 inline void oopDesc::release_byte_field_put(int offset, jbyte contents)     { OrderAccess::release_store(byte_field_addr(offset), contents); }
   416 inline jboolean oopDesc::bool_field_acquire(int offset) const               { return OrderAccess::load_acquire(bool_field_addr(offset));     }
   417 inline void oopDesc::release_bool_field_put(int offset, jboolean contents)  { OrderAccess::release_store(bool_field_addr(offset), contents); }
   419 inline jchar oopDesc::char_field_acquire(int offset) const                  { return OrderAccess::load_acquire(char_field_addr(offset));     }
   420 inline void oopDesc::release_char_field_put(int offset, jchar contents)     { OrderAccess::release_store(char_field_addr(offset), contents); }
   422 inline jint oopDesc::int_field_acquire(int offset) const                    { return OrderAccess::load_acquire(int_field_addr(offset));      }
   423 inline void oopDesc::release_int_field_put(int offset, jint contents)       { OrderAccess::release_store(int_field_addr(offset), contents);  }
   425 inline jshort oopDesc::short_field_acquire(int offset) const                { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); }
   426 inline void oopDesc::release_short_field_put(int offset, jshort contents)   { OrderAccess::release_store(short_field_addr(offset), contents);     }
   428 inline jlong oopDesc::long_field_acquire(int offset) const                  { return OrderAccess::load_acquire(long_field_addr(offset));       }
   429 inline void oopDesc::release_long_field_put(int offset, jlong contents)     { OrderAccess::release_store(long_field_addr(offset), contents);   }
   431 inline jfloat oopDesc::float_field_acquire(int offset) const                { return OrderAccess::load_acquire(float_field_addr(offset));      }
   432 inline void oopDesc::release_float_field_put(int offset, jfloat contents)   { OrderAccess::release_store(float_field_addr(offset), contents);  }
   434 inline jdouble oopDesc::double_field_acquire(int offset) const              { return OrderAccess::load_acquire(double_field_addr(offset));     }
   435 inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
   437 inline address oopDesc::address_field_acquire(int offset) const             { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
   438 inline void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
   440 inline int oopDesc::size_given_klass(Klass* klass)  {
   441   int lh = klass->layout_helper();
   442   int s;
   444   // lh is now a value computed at class initialization that may hint
   445   // at the size.  For instances, this is positive and equal to the
   446   // size.  For arrays, this is negative and provides log2 of the
   447   // array element size.  For other oops, it is zero and thus requires
   448   // a virtual call.
   449   //
   450   // We go to all this trouble because the size computation is at the
   451   // heart of phase 2 of mark-compaction, and called for every object,
   452   // alive or dead.  So the speed here is equal in importance to the
   453   // speed of allocation.
   455   if (lh > Klass::_lh_neutral_value) {
   456     if (!Klass::layout_helper_needs_slow_path(lh)) {
   457       s = lh >> LogHeapWordSize;  // deliver size scaled by wordSize
   458     } else {
   459       s = klass->oop_size(this);
   460     }
   461   } else if (lh <= Klass::_lh_neutral_value) {
   462     // The most common case is instances; fall through if so.
   463     if (lh < Klass::_lh_neutral_value) {
   464       // Second most common case is arrays.  We have to fetch the
   465       // length of the array, shift (multiply) it appropriately,
   466       // up to wordSize, add the header, and align to object size.
   467       size_t size_in_bytes;
   468 #ifdef _M_IA64
   469       // The Windows Itanium Aug 2002 SDK hoists this load above
   470       // the check for s < 0.  An oop at the end of the heap will
   471       // cause an access violation if this load is performed on a non
   472       // array oop.  Making the reference volatile prohibits this.
   473       // (%%% please explain by what magic the length is actually fetched!)
   474       volatile int *array_length;
   475       array_length = (volatile int *)( (intptr_t)this +
   476                           arrayOopDesc::length_offset_in_bytes() );
   477       assert(array_length > 0, "Integer arithmetic problem somewhere");
   478       // Put into size_t to avoid overflow.
   479       size_in_bytes = (size_t) array_length;
   480       size_in_bytes = size_in_bytes << Klass::layout_helper_log2_element_size(lh);
   481 #else
   482       size_t array_length = (size_t) ((arrayOop)this)->length();
   483       size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
   484 #endif
   485       size_in_bytes += Klass::layout_helper_header_size(lh);
   487       // This code could be simplified, but by keeping array_header_in_bytes
   488       // in units of bytes and doing it this way we can round up just once,
   489       // skipping the intermediate round to HeapWordSize.  Cast the result
   490       // of round_to to size_t to guarantee unsigned division == right shift.
   491       s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
   492         HeapWordSize);
   494       // UseParNewGC, UseParallelGC and UseG1GC can change the length field
   495       // of an "old copy" of an object array in the young gen so it indicates
   496       // the grey portion of an already copied array. This will cause the first
   497       // disjunct below to fail if the two comparands are computed across such
   498       // a concurrent change.
   499       // UseParNewGC also runs with promotion labs (which look like int
   500       // filler arrays) which are subject to changing their declared size
   501       // when finally retiring a PLAB; this also can cause the first disjunct
   502       // to fail for another worker thread that is concurrently walking the block
   503       // offset table. Both these invariant failures are benign for their
   504       // current uses; we relax the assertion checking to cover these two cases below:
   505       //     is_objArray() && is_forwarded()   // covers first scenario above
   506       //  || is_typeArray()                    // covers second scenario above
   507       // If and when UseParallelGC uses the same obj array oop stealing/chunking
   508       // technique, we will need to suitably modify the assertion.
   509       assert((s == klass->oop_size(this)) ||
   510              (Universe::heap()->is_gc_active() &&
   511               ((is_typeArray() && UseParNewGC) ||
   512                (is_objArray()  && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))),
   513              "wrong array object size");
   514     } else {
   515       // Must be zero, so bite the bullet and take the virtual call.
   516       s = klass->oop_size(this);
   517     }
   518   }
   520   assert(s % MinObjAlignment == 0, "alignment check");
   521   assert(s > 0, "Bad size calculated");
   522   return s;
   523 }
   526 inline int oopDesc::size()  {
   527   return size_given_klass(klass());
   528 }
   530 inline void update_barrier_set(void* p, oop v) {
   531   assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
   532   oopDesc::bs()->write_ref_field(p, v);
   533 }
   535 template <class T> inline void update_barrier_set_pre(T* p, oop v) {
   536   oopDesc::bs()->write_ref_field_pre(p, v);
   537 }
   539 template <class T> inline void oop_store(T* p, oop v) {
   540   if (always_do_update_barrier) {
   541     oop_store((volatile T*)p, v);
   542   } else {
   543     update_barrier_set_pre(p, v);
   544     oopDesc::encode_store_heap_oop(p, v);
   545     update_barrier_set((void*)p, v);  // cast away type
   546   }
   547 }
   549 template <class T> inline void oop_store(volatile T* p, oop v) {
   550   update_barrier_set_pre((T*)p, v);   // cast away volatile
   551   // Used by release_obj_field_put, so use release_store_ptr.
   552   oopDesc::release_encode_store_heap_oop(p, v);
   553   update_barrier_set((void*)p, v);    // cast away type
   554 }
   556 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
   557 // (without having to remember the function name this calls).
   558 inline void oop_store_raw(HeapWord* addr, oop value) {
   559   if (UseCompressedOops) {
   560     oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
   561   } else {
   562     oopDesc::encode_store_heap_oop((oop*)addr, value);
   563   }
   564 }
   566 inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
   567                                                 volatile HeapWord *dest,
   568                                                 oop compare_value,
   569                                                 bool prebarrier) {
   570   if (UseCompressedOops) {
   571     if (prebarrier) {
   572       update_barrier_set_pre((narrowOop*)dest, exchange_value);
   573     }
   574     // encode exchange and compare value from oop to T
   575     narrowOop val = encode_heap_oop(exchange_value);
   576     narrowOop cmp = encode_heap_oop(compare_value);
   578     narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
   579     // decode old from T to oop
   580     return decode_heap_oop(old);
   581   } else {
   582     if (prebarrier) {
   583       update_barrier_set_pre((oop*)dest, exchange_value);
   584     }
   585     return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
   586   }
   587 }
   589 // Used only for markSweep, scavenging
   590 inline bool oopDesc::is_gc_marked() const {
   591   return mark()->is_marked();
   592 }
   594 inline bool oopDesc::is_locked() const {
   595   return mark()->is_locked();
   596 }
   598 inline bool oopDesc::is_unlocked() const {
   599   return mark()->is_unlocked();
   600 }
   602 inline bool oopDesc::has_bias_pattern() const {
   603   return mark()->has_bias_pattern();
   604 }
   607 // used only for asserts
   608 inline bool oopDesc::is_oop(bool ignore_mark_word) const {
   609   oop obj = (oop) this;
   610   if (!check_obj_alignment(obj)) return false;
   611   if (!Universe::heap()->is_in_reserved(obj)) return false;
   612   // obj is aligned and accessible in heap
   613   if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false;
   615   // Header verification: the mark is typically non-NULL. If we're
   616   // at a safepoint, it must not be null.
   617   // Outside of a safepoint, the header could be changing (for example,
   618   // another thread could be inflating a lock on this object).
   619   if (ignore_mark_word) {
   620     return true;
   621   }
   622   if (mark() != NULL) {
   623     return true;
   624   }
   625   return !SafepointSynchronize::is_at_safepoint();
   626 }
   629 // used only for asserts
   630 inline bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
   631   return this == NULL ? true : is_oop(ignore_mark_word);
   632 }
   634 #ifndef PRODUCT
   635 // used only for asserts
   636 inline bool oopDesc::is_unlocked_oop() const {
   637   if (!Universe::heap()->is_in_reserved(this)) return false;
   638   return mark()->is_unlocked();
   639 }
   640 #endif // PRODUCT
   642 inline void oopDesc::follow_contents(void) {
   643   assert (is_gc_marked(), "should be marked");
   644   klass()->oop_follow_contents(this);
   645 }
   647 // Used by scavengers
   649 inline bool oopDesc::is_forwarded() const {
   650   // The extra heap check is needed since the obj might be locked, in which case the
   651   // mark would point to a stack location and have the sentinel bit cleared
   652   return mark()->is_marked();
   653 }
   655 // Used by scavengers
   656 inline void oopDesc::forward_to(oop p) {
   657   assert(check_obj_alignment(p),
   658          "forwarding to something not aligned");
   659   assert(Universe::heap()->is_in_reserved(p),
   660          "forwarding to something not in heap");
   661   markOop m = markOopDesc::encode_pointer_as_mark(p);
   662   assert(m->decode_pointer() == p, "encoding must be reversable");
   663   set_mark(m);
   664 }
   666 // Used by parallel scavengers
   667 inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
   668   assert(check_obj_alignment(p),
   669          "forwarding to something not aligned");
   670   assert(Universe::heap()->is_in_reserved(p),
   671          "forwarding to something not in heap");
   672   markOop m = markOopDesc::encode_pointer_as_mark(p);
   673   assert(m->decode_pointer() == p, "encoding must be reversable");
   674   return cas_set_mark(m, compare) == compare;
   675 }
   677 // Note that the forwardee is not the same thing as the displaced_mark.
   678 // The forwardee is used when copying during scavenge and mark-sweep.
   679 // It does need to clear the low two locking- and GC-related bits.
   680 inline oop oopDesc::forwardee() const {
   681   return (oop) mark()->decode_pointer();
   682 }
   684 inline bool oopDesc::has_displaced_mark() const {
   685   return mark()->has_displaced_mark_helper();
   686 }
   688 inline markOop oopDesc::displaced_mark() const {
   689   return mark()->displaced_mark_helper();
   690 }
   692 inline void oopDesc::set_displaced_mark(markOop m) {
   693   mark()->set_displaced_mark_helper(m);
   694 }
   696 // The following method needs to be MT safe.
   697 inline uint oopDesc::age() const {
   698   assert(!is_forwarded(), "Attempt to read age from forwarded mark");
   699   if (has_displaced_mark()) {
   700     return displaced_mark()->age();
   701   } else {
   702     return mark()->age();
   703   }
   704 }
   706 inline void oopDesc::incr_age() {
   707   assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
   708   if (has_displaced_mark()) {
   709     set_displaced_mark(displaced_mark()->incr_age());
   710   } else {
   711     set_mark(mark()->incr_age());
   712   }
   713 }
   716 inline intptr_t oopDesc::identity_hash() {
   717   // Fast case; if the object is unlocked and the hash value is set, no locking is needed
   718   // Note: The mark must be read into local variable to avoid concurrent updates.
   719   markOop mrk = mark();
   720   if (mrk->is_unlocked() && !mrk->has_no_hash()) {
   721     return mrk->hash();
   722   } else if (mrk->is_marked()) {
   723     return mrk->hash();
   724   } else {
   725     return slow_identity_hash();
   726   }
   727 }
   729 inline int oopDesc::adjust_pointers() {
   730   debug_only(int check_size = size());
   731   int s = klass()->oop_adjust_pointers(this);
   732   assert(s == check_size, "should be the same");
   733   return s;
   734 }
   736 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                        \
   737                                                                            \
   738 inline int oopDesc::oop_iterate(OopClosureType* blk) {                     \
   739   SpecializationStats::record_call();                                      \
   740   return klass()->oop_oop_iterate##nv_suffix(this, blk);               \
   741 }                                                                          \
   742                                                                            \
   743 inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) {       \
   744   SpecializationStats::record_call();                                      \
   745   return klass()->oop_oop_iterate##nv_suffix##_m(this, blk, mr);       \
   746 }
   749 inline int oopDesc::oop_iterate_no_header(OopClosure* blk) {
   750   // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all
   751   // the do_oop calls, but turns off all other features in ExtendedOopClosure.
   752   NoHeaderExtendedOopClosure cl(blk);
   753   return oop_iterate(&cl);
   754 }
   756 inline int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) {
   757   NoHeaderExtendedOopClosure cl(blk);
   758   return oop_iterate(&cl, mr);
   759 }
   761 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN)
   762 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN)
   764 #if INCLUDE_ALL_GCS
   765 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)              \
   766                                                                            \
   767 inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) {           \
   768   SpecializationStats::record_call();                                      \
   769   return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk);     \
   770 }
   772 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN)
   773 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN)
   774 #endif // INCLUDE_ALL_GCS
   776 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP

mercurial