src/share/vm/oops/oop.inline.hpp

changeset 548
ba764ed4b6f2
parent 435
a61af66fc99e
child 568
435e64505015
     1.1 --- a/src/share/vm/oops/oop.inline.hpp	Fri Apr 11 09:56:35 2008 -0400
     1.2 +++ b/src/share/vm/oops/oop.inline.hpp	Sun Apr 13 17:43:42 2008 -0400
     1.3 @@ -25,7 +25,6 @@
     1.4  // Implementation of all inlined member functions defined in oop.hpp
     1.5  // We need a separate file to avoid circular references
     1.6  
     1.7 -
     1.8  inline void oopDesc::release_set_mark(markOop m) {
     1.9    OrderAccess::release_store_ptr(&_mark, m);
    1.10  }
    1.11 @@ -34,17 +33,54 @@
    1.12    return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
    1.13  }
    1.14  
    1.15 +inline klassOop oopDesc::klass() const {
    1.16 +  if (UseCompressedOops) {
    1.17 +    return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass);
    1.18 +      // can be NULL in CMS, but isn't supported on CMS yet.
    1.19 +  } else {
    1.20 +    return _metadata._klass;
    1.21 +  }
    1.22 +}
    1.23 +
    1.24 +inline int oopDesc::klass_gap_offset_in_bytes() {
    1.25 +  assert(UseCompressedOops, "only applicable to compressed headers");
    1.26 +  return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
    1.27 +}
    1.28 +
    1.29 +inline oop* oopDesc::klass_addr() {
    1.30 +  // Only used internally and with CMS and will not work with
    1.31 +  // UseCompressedOops
    1.32 +  assert(!UseCompressedOops, "only supported with uncompressed oops");
    1.33 +  return (oop*) &_metadata._klass;
    1.34 +}
    1.35 +
    1.36 +inline narrowOop* oopDesc::compressed_klass_addr() {
    1.37 +  assert(UseCompressedOops, "only called by compressed oops");
    1.38 +  return (narrowOop*) &_metadata._compressed_klass;
    1.39 +}
    1.40 +
    1.41  inline void oopDesc::set_klass(klassOop k) {
    1.42    // since klasses are promoted no store check is needed
    1.43    assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop");
    1.44    assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop");
    1.45 -  oop_store_without_check((oop*) &_klass, (oop) k);
    1.46 +  if (UseCompressedOops) {
    1.47 +    // zero the gap when the klass is set, by zeroing the pointer sized
    1.48 +    // part of the union.
    1.49 +    _metadata._klass = NULL;
    1.50 +    oop_store_without_check(compressed_klass_addr(), (oop)k);
    1.51 +  } else {
    1.52 +    oop_store_without_check(klass_addr(), (oop) k);
    1.53 +  }
    1.54  }
    1.55  
    1.56  inline void oopDesc::set_klass_to_list_ptr(oop k) {
    1.57    // This is only to be used during GC, for from-space objects, so no
    1.58    // barrier is needed.
    1.59 -  _klass = (klassOop)k;
    1.60 +  if (UseCompressedOops) {
    1.61 +    _metadata._compressed_klass = encode_heap_oop_not_null(k);
    1.62 +  } else {
    1.63 +    _metadata._klass = (klassOop)k;
    1.64 +  }
    1.65  }
    1.66  
    1.67  inline void   oopDesc::init_mark()                 { set_mark(markOopDesc::prototype_for_object(this)); }
    1.68 @@ -70,7 +106,7 @@
    1.69  
    1.70  inline void*     oopDesc::field_base(int offset)        const { return (void*)&((char*)this)[offset]; }
    1.71  
    1.72 -inline oop*      oopDesc::obj_field_addr(int offset)    const { return (oop*)     field_base(offset); }
    1.73 +template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
    1.74  inline jbyte*    oopDesc::byte_field_addr(int offset)   const { return (jbyte*)   field_base(offset); }
    1.75  inline jchar*    oopDesc::char_field_addr(int offset)   const { return (jchar*)   field_base(offset); }
    1.76  inline jboolean* oopDesc::bool_field_addr(int offset)   const { return (jboolean*)field_base(offset); }
    1.77 @@ -79,9 +115,156 @@
    1.78  inline jlong*    oopDesc::long_field_addr(int offset)   const { return (jlong*)   field_base(offset); }
    1.79  inline jfloat*   oopDesc::float_field_addr(int offset)  const { return (jfloat*)  field_base(offset); }
    1.80  inline jdouble*  oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
    1.81 +inline address*  oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
    1.82  
    1.83 -inline oop oopDesc::obj_field(int offset) const                     { return *obj_field_addr(offset);             }
    1.84 -inline void oopDesc::obj_field_put(int offset, oop value)           { oop_store(obj_field_addr(offset), value);   }
    1.85 +
    1.86 +// Functions for getting and setting oops within instance objects.
    1.87 +// If the oops are compressed, the type passed to these overloaded functions
    1.88 +// is narrowOop.  All functions are overloaded so they can be called by
    1.89 +// template functions without conditionals (the compiler instantiates via
    1.90 +// the right type and inlines the appopriate code).
    1.91 +
    1.92 +inline bool oopDesc::is_null(oop obj)       { return obj == NULL; }
    1.93 +inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
    1.94 +
    1.95 +// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
    1.96 +// offset from the heap base.  Saving the check for null can save instructions
    1.97 +// in inner GC loops so these are separated.
    1.98 +
    1.99 +inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
   1.100 +  assert(!is_null(v), "oop value can never be zero");
   1.101 +  address heap_base = Universe::heap_base();
   1.102 +  uint64_t result = (uint64_t)(pointer_delta((void*)v, (void*)heap_base, 1) >> LogMinObjAlignmentInBytes);
   1.103 +  assert((result & 0xffffffff00000000L) == 0, "narrow oop overflow");
   1.104 +  return (narrowOop)result;
   1.105 +}
   1.106 +
   1.107 +inline narrowOop oopDesc::encode_heap_oop(oop v) {
   1.108 +  return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
   1.109 +}
   1.110 +
   1.111 +inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
   1.112 +  assert(!is_null(v), "narrow oop value can never be zero");
   1.113 +  address heap_base = Universe::heap_base();
   1.114 +  return (oop)(void*)((uintptr_t)heap_base + ((uintptr_t)v << LogMinObjAlignmentInBytes));
   1.115 +}
   1.116 +
   1.117 +inline oop oopDesc::decode_heap_oop(narrowOop v) {
   1.118 +  return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
   1.119 +}
   1.120 +
   1.121 +inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
   1.122 +inline oop oopDesc::decode_heap_oop(oop v)  { return v; }
   1.123 +
   1.124 +// Load an oop out of the Java heap as is without decoding.
   1.125 +// Called by GC to check for null before decoding.
   1.126 +inline oop       oopDesc::load_heap_oop(oop* p)          { return *p; }
   1.127 +inline narrowOop oopDesc::load_heap_oop(narrowOop* p)    { return *p; }
   1.128 +
   1.129 +// Load and decode an oop out of the Java heap into a wide oop.
   1.130 +inline oop oopDesc::load_decode_heap_oop_not_null(oop* p)       { return *p; }
   1.131 +inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
   1.132 +  return decode_heap_oop_not_null(*p);
   1.133 +}
   1.134 +
   1.135 +// Load and decode an oop out of the heap accepting null
   1.136 +inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
   1.137 +inline oop oopDesc::load_decode_heap_oop(narrowOop* p) {
   1.138 +  return decode_heap_oop(*p);
   1.139 +}
   1.140 +
   1.141 +// Store already encoded heap oop into the heap.
   1.142 +inline void oopDesc::store_heap_oop(oop* p, oop v)                 { *p = v; }
   1.143 +inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v)     { *p = v; }
   1.144 +
   1.145 +// Encode and store a heap oop.
   1.146 +inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
   1.147 +  *p = encode_heap_oop_not_null(v);
   1.148 +}
   1.149 +inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
   1.150 +
   1.151 +// Encode and store a heap oop allowing for null.
   1.152 +inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
   1.153 +  *p = encode_heap_oop(v);
   1.154 +}
   1.155 +inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
   1.156 +
   1.157 +// Store heap oop as is for volatile fields.
   1.158 +inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
   1.159 +  OrderAccess::release_store_ptr(p, v);
   1.160 +}
   1.161 +inline void oopDesc::release_store_heap_oop(volatile narrowOop* p,
   1.162 +                                            narrowOop v) {
   1.163 +  OrderAccess::release_store(p, v);
   1.164 +}
   1.165 +
   1.166 +inline void oopDesc::release_encode_store_heap_oop_not_null(
   1.167 +                                                volatile narrowOop* p, oop v) {
   1.168 +  // heap oop is not pointer sized.
   1.169 +  OrderAccess::release_store(p, encode_heap_oop_not_null(v));
   1.170 +}
   1.171 +
   1.172 +inline void oopDesc::release_encode_store_heap_oop_not_null(
   1.173 +                                                      volatile oop* p, oop v) {
   1.174 +  OrderAccess::release_store_ptr(p, v);
   1.175 +}
   1.176 +
   1.177 +inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
   1.178 +                                                           oop v) {
   1.179 +  OrderAccess::release_store_ptr(p, v);
   1.180 +}
   1.181 +inline void oopDesc::release_encode_store_heap_oop(
   1.182 +                                                volatile narrowOop* p, oop v) {
   1.183 +  OrderAccess::release_store(p, encode_heap_oop(v));
   1.184 +}
   1.185 +
   1.186 +
   1.187 +// These functions are only used to exchange oop fields in instances,
   1.188 +// not headers.
   1.189 +inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
   1.190 +  if (UseCompressedOops) {
   1.191 +    // encode exchange value from oop to T
   1.192 +    narrowOop val = encode_heap_oop(exchange_value);
   1.193 +    narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
   1.194 +    // decode old from T to oop
   1.195 +    return decode_heap_oop(old);
   1.196 +  } else {
   1.197 +    return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
   1.198 +  }
   1.199 +}
   1.200 +
   1.201 +inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
   1.202 +                                                volatile HeapWord *dest,
   1.203 +                                                oop compare_value) {
   1.204 +  if (UseCompressedOops) {
   1.205 +    // encode exchange and compare value from oop to T
   1.206 +    narrowOop val = encode_heap_oop(exchange_value);
   1.207 +    narrowOop cmp = encode_heap_oop(compare_value);
   1.208 +
   1.209 +    narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
   1.210 +    // decode old from T to oop
   1.211 +    return decode_heap_oop(old);
   1.212 +  } else {
   1.213 +    return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
   1.214 +  }
   1.215 +}
   1.216 +
   1.217 +// In order to put or get a field out of an instance, must first check
   1.218 +// if the field has been compressed and uncompress it.
   1.219 +inline oop oopDesc::obj_field(int offset) const {
   1.220 +  return UseCompressedOops ?
   1.221 +    load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
   1.222 +    load_decode_heap_oop(obj_field_addr<oop>(offset));
   1.223 +}
   1.224 +inline void oopDesc::obj_field_put(int offset, oop value) {
   1.225 +  UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
   1.226 +                      oop_store(obj_field_addr<oop>(offset),       value);
   1.227 +}
   1.228 +inline void oopDesc::obj_field_raw_put(int offset, oop value) {
   1.229 +  UseCompressedOops ?
   1.230 +    encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
   1.231 +    encode_store_heap_oop(obj_field_addr<oop>(offset),       value);
   1.232 +}
   1.233  
   1.234  inline jbyte oopDesc::byte_field(int offset) const                  { return (jbyte) *byte_field_addr(offset);    }
   1.235  inline void oopDesc::byte_field_put(int offset, jbyte contents)     { *byte_field_addr(offset) = (jint) contents; }
   1.236 @@ -107,8 +290,21 @@
   1.237  inline jdouble oopDesc::double_field(int offset) const              { return *double_field_addr(offset);     }
   1.238  inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
   1.239  
   1.240 -inline oop oopDesc::obj_field_acquire(int offset) const                     { return (oop)OrderAccess::load_ptr_acquire(obj_field_addr(offset)); }
   1.241 -inline void oopDesc::release_obj_field_put(int offset, oop value)           { oop_store((volatile oop*)obj_field_addr(offset), value);           }
   1.242 +inline address oopDesc::address_field(int offset) const              { return *address_field_addr(offset);     }
   1.243 +inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
   1.244 +
   1.245 +inline oop oopDesc::obj_field_acquire(int offset) const {
   1.246 +  return UseCompressedOops ?
   1.247 +             decode_heap_oop((narrowOop)
   1.248 +               OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
   1.249 +           : decode_heap_oop((oop)
   1.250 +               OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
   1.251 +}
   1.252 +inline void oopDesc::release_obj_field_put(int offset, oop value) {
   1.253 +  UseCompressedOops ?
   1.254 +    oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
   1.255 +    oop_store((volatile oop*)      obj_field_addr<oop>(offset),       value);
   1.256 +}
   1.257  
   1.258  inline jbyte oopDesc::byte_field_acquire(int offset) const                  { return OrderAccess::load_acquire(byte_field_addr(offset));     }
   1.259  inline void oopDesc::release_byte_field_put(int offset, jbyte contents)     { OrderAccess::release_store(byte_field_addr(offset), contents); }
   1.260 @@ -134,7 +330,6 @@
   1.261  inline jdouble oopDesc::double_field_acquire(int offset) const              { return OrderAccess::load_acquire(double_field_addr(offset));     }
   1.262  inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
   1.263  
   1.264 -
   1.265  inline int oopDesc::size_given_klass(Klass* klass)  {
   1.266    int lh = klass->layout_helper();
   1.267    int s  = lh >> LogHeapWordSize;  // deliver size scaled by wordSize
   1.268 @@ -200,7 +395,7 @@
   1.269        // technique) we will need to suitably modify the assertion.
   1.270        assert((s == klass->oop_size(this)) ||
   1.271               (((UseParNewGC || UseParallelGC) &&
   1.272 -                                           Universe::heap()->is_gc_active()) &&
   1.273 +              Universe::heap()->is_gc_active()) &&
   1.274                (is_typeArray() ||
   1.275                 (is_objArray() && is_forwarded()))),
   1.276               "wrong array object size");
   1.277 @@ -224,52 +419,58 @@
   1.278    return blueprint()->oop_is_parsable(this);
   1.279  }
   1.280  
   1.281 -
   1.282 -inline void update_barrier_set(oop *p, oop v) {
   1.283 +inline void update_barrier_set(void* p, oop v) {
   1.284    assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
   1.285    oopDesc::bs()->write_ref_field(p, v);
   1.286  }
   1.287  
   1.288 -
   1.289 -inline void oop_store(oop* p, oop v) {
   1.290 +template <class T> inline void oop_store(T* p, oop v) {
   1.291    if (always_do_update_barrier) {
   1.292 -    oop_store((volatile oop*)p, v);
   1.293 +    oop_store((volatile T*)p, v);
   1.294    } else {
   1.295 -    *p = v;
   1.296 +    oopDesc::encode_store_heap_oop(p, v);
   1.297      update_barrier_set(p, v);
   1.298    }
   1.299  }
   1.300  
   1.301 -inline void oop_store(volatile oop* p, oop v) {
   1.302 +template <class T> inline void oop_store(volatile T* p, oop v) {
   1.303    // Used by release_obj_field_put, so use release_store_ptr.
   1.304 -  OrderAccess::release_store_ptr(p, v);
   1.305 -  update_barrier_set((oop *)p, v);
   1.306 +  oopDesc::release_encode_store_heap_oop(p, v);
   1.307 +  update_barrier_set((void*)p, v);
   1.308  }
   1.309  
   1.310 -inline void oop_store_without_check(oop* p, oop v) {
   1.311 +template <class T> inline void oop_store_without_check(T* p, oop v) {
   1.312    // XXX YSR FIX ME!!!
   1.313    if (always_do_update_barrier) {
   1.314 -   oop_store(p, v);
   1.315 +    oop_store(p, v);
   1.316    } else {
   1.317      assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v),
   1.318             "oop store without store check failed");
   1.319 -    *p = v;
   1.320 +    oopDesc::encode_store_heap_oop(p, v);
   1.321    }
   1.322  }
   1.323  
   1.324  // When it absolutely has to get there.
   1.325 -inline void oop_store_without_check(volatile oop* p, oop v) {
   1.326 +template <class T> inline void oop_store_without_check(volatile T* p, oop v) {
   1.327    // XXX YSR FIX ME!!!
   1.328    if (always_do_update_barrier) {
   1.329      oop_store(p, v);
   1.330    } else {
   1.331 -    assert(!Universe::heap()->barrier_set()->
   1.332 -                      write_ref_needs_barrier((oop *)p, v),
   1.333 +    assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier((T*)p, v),
   1.334             "oop store without store check failed");
   1.335 -    OrderAccess::release_store_ptr(p, v);
   1.336 +    oopDesc::release_encode_store_heap_oop(p, v);
   1.337    }
   1.338  }
   1.339  
   1.340 +// Should replace *addr = oop assignments where addr type depends on UseCompressedOops
   1.341 +// (without having to remember the function name this calls).
   1.342 +inline void oop_store_raw(HeapWord* addr, oop value) {
   1.343 +  if (UseCompressedOops) {
   1.344 +    oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
   1.345 +  } else {
   1.346 +    oopDesc::encode_store_heap_oop((oop*)addr, value);
   1.347 +  }
   1.348 +}
   1.349  
   1.350  // Used only for markSweep, scavenging
   1.351  inline bool oopDesc::is_gc_marked() const {
   1.352 @@ -340,15 +541,17 @@
   1.353    if (!Universe::heap()->is_in_reserved(this)) return false;
   1.354    return mark()->is_unlocked();
   1.355  }
   1.356 -
   1.357 -
   1.358  #endif // PRODUCT
   1.359  
   1.360  inline void oopDesc::follow_header() {
   1.361 -  MarkSweep::mark_and_push((oop*)&_klass);
   1.362 +  if (UseCompressedOops) {
   1.363 +    MarkSweep::mark_and_push(compressed_klass_addr());
   1.364 +  } else {
   1.365 +    MarkSweep::mark_and_push(klass_addr());
   1.366 +  }
   1.367  }
   1.368  
   1.369 -inline void oopDesc::follow_contents() {
   1.370 +inline void oopDesc::follow_contents(void) {
   1.371    assert (is_gc_marked(), "should be marked");
   1.372    blueprint()->oop_follow_contents(this);
   1.373  }
   1.374 @@ -362,7 +565,6 @@
   1.375    return mark()->is_marked();
   1.376  }
   1.377  
   1.378 -
   1.379  // Used by scavengers
   1.380  inline void oopDesc::forward_to(oop p) {
   1.381    assert(Universe::heap()->is_in_reserved(p),
   1.382 @@ -384,8 +586,9 @@
   1.383  // Note that the forwardee is not the same thing as the displaced_mark.
   1.384  // The forwardee is used when copying during scavenge and mark-sweep.
   1.385  // It does need to clear the low two locking- and GC-related bits.
   1.386 -inline oop oopDesc::forwardee() const           { return (oop) mark()->decode_pointer(); }
   1.387 -
   1.388 +inline oop oopDesc::forwardee() const {
   1.389 +  return (oop) mark()->decode_pointer();
   1.390 +}
   1.391  
   1.392  inline bool oopDesc::has_displaced_mark() const {
   1.393    return mark()->has_displaced_mark_helper();
   1.394 @@ -432,17 +635,24 @@
   1.395    }
   1.396  }
   1.397  
   1.398 -
   1.399  inline void oopDesc::oop_iterate_header(OopClosure* blk) {
   1.400 -  blk->do_oop((oop*)&_klass);
   1.401 +  if (UseCompressedOops) {
   1.402 +    blk->do_oop(compressed_klass_addr());
   1.403 +  } else {
   1.404 +    blk->do_oop(klass_addr());
   1.405 +  }
   1.406  }
   1.407  
   1.408 -
   1.409  inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) {
   1.410 -  if (mr.contains(&_klass)) blk->do_oop((oop*)&_klass);
   1.411 +  if (UseCompressedOops) {
   1.412 +    if (mr.contains(compressed_klass_addr())) {
   1.413 +      blk->do_oop(compressed_klass_addr());
   1.414 +    }
   1.415 +  } else {
   1.416 +    if (mr.contains(klass_addr())) blk->do_oop(klass_addr());
   1.417 +  }
   1.418  }
   1.419  
   1.420 -
   1.421  inline int oopDesc::adjust_pointers() {
   1.422    debug_only(int check_size = size());
   1.423    int s = blueprint()->oop_adjust_pointers(this);
   1.424 @@ -451,7 +661,11 @@
   1.425  }
   1.426  
   1.427  inline void oopDesc::adjust_header() {
   1.428 -  MarkSweep::adjust_pointer((oop*)&_klass);
   1.429 +  if (UseCompressedOops) {
   1.430 +    MarkSweep::adjust_pointer(compressed_klass_addr());
   1.431 +  } else {
   1.432 +    MarkSweep::adjust_pointer(klass_addr());
   1.433 +  }
   1.434  }
   1.435  
   1.436  #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix)                        \

mercurial