Tue, 21 Apr 2009 23:21:04 -0700
6655646: dynamic languages need dynamically linked call sites
Summary: invokedynamic instruction (JSR 292 RI)
Reviewed-by: twisti, never
1 /*
2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 // Implementation of all inlined member functions defined in oop.hpp
26 // We need a separate file to avoid circular references
28 inline void oopDesc::release_set_mark(markOop m) {
29 OrderAccess::release_store_ptr(&_mark, m);
30 }
32 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
33 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
34 }
36 inline klassOop oopDesc::klass() const {
37 if (UseCompressedOops) {
38 return (klassOop)decode_heap_oop_not_null(_metadata._compressed_klass);
39 } else {
40 return _metadata._klass;
41 }
42 }
44 inline klassOop oopDesc::klass_or_null() const volatile {
45 // can be NULL in CMS
46 if (UseCompressedOops) {
47 return (klassOop)decode_heap_oop(_metadata._compressed_klass);
48 } else {
49 return _metadata._klass;
50 }
51 }
53 inline int oopDesc::klass_gap_offset_in_bytes() {
54 assert(UseCompressedOops, "only applicable to compressed headers");
55 return oopDesc::klass_offset_in_bytes() + sizeof(narrowOop);
56 }
58 inline oop* oopDesc::klass_addr() {
59 // Only used internally and with CMS and will not work with
60 // UseCompressedOops
61 assert(!UseCompressedOops, "only supported with uncompressed oops");
62 return (oop*) &_metadata._klass;
63 }
65 inline narrowOop* oopDesc::compressed_klass_addr() {
66 assert(UseCompressedOops, "only called by compressed oops");
67 return (narrowOop*) &_metadata._compressed_klass;
68 }
70 inline void oopDesc::set_klass(klassOop k) {
71 // since klasses are promoted no store check is needed
72 assert(Universe::is_bootstrapping() || k != NULL, "must be a real klassOop");
73 assert(Universe::is_bootstrapping() || k->is_klass(), "not a klassOop");
74 if (UseCompressedOops) {
75 oop_store_without_check(compressed_klass_addr(), (oop)k);
76 } else {
77 oop_store_without_check(klass_addr(), (oop) k);
78 }
79 }
81 inline int oopDesc::klass_gap() const {
82 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
83 }
85 inline void oopDesc::set_klass_gap(int v) {
86 if (UseCompressedOops) {
87 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
88 }
89 }
91 inline void oopDesc::set_klass_to_list_ptr(oop k) {
92 // This is only to be used during GC, for from-space objects, so no
93 // barrier is needed.
94 if (UseCompressedOops) {
95 _metadata._compressed_klass = encode_heap_oop(k); // may be null (parnew overflow handling)
96 } else {
97 _metadata._klass = (klassOop)k;
98 }
99 }
101 inline void oopDesc::init_mark() { set_mark(markOopDesc::prototype_for_object(this)); }
102 inline Klass* oopDesc::blueprint() const { return klass()->klass_part(); }
104 inline bool oopDesc::is_a(klassOop k) const { return blueprint()->is_subtype_of(k); }
106 inline bool oopDesc::is_instance() const { return blueprint()->oop_is_instance(); }
107 inline bool oopDesc::is_instanceRef() const { return blueprint()->oop_is_instanceRef(); }
108 inline bool oopDesc::is_array() const { return blueprint()->oop_is_array(); }
109 inline bool oopDesc::is_objArray() const { return blueprint()->oop_is_objArray(); }
110 inline bool oopDesc::is_typeArray() const { return blueprint()->oop_is_typeArray(); }
111 inline bool oopDesc::is_javaArray() const { return blueprint()->oop_is_javaArray(); }
112 inline bool oopDesc::is_symbol() const { return blueprint()->oop_is_symbol(); }
113 inline bool oopDesc::is_klass() const { return blueprint()->oop_is_klass(); }
114 inline bool oopDesc::is_thread() const { return blueprint()->oop_is_thread(); }
115 inline bool oopDesc::is_method() const { return blueprint()->oop_is_method(); }
116 inline bool oopDesc::is_constMethod() const { return blueprint()->oop_is_constMethod(); }
117 inline bool oopDesc::is_methodData() const { return blueprint()->oop_is_methodData(); }
118 inline bool oopDesc::is_constantPool() const { return blueprint()->oop_is_constantPool(); }
119 inline bool oopDesc::is_constantPoolCache() const { return blueprint()->oop_is_constantPoolCache(); }
120 inline bool oopDesc::is_compiledICHolder() const { return blueprint()->oop_is_compiledICHolder(); }
122 inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
124 template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
125 inline jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); }
126 inline jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); }
127 inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); }
128 inline jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); }
129 inline jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); }
130 inline jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); }
131 inline jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); }
132 inline jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
133 inline address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
136 // Functions for getting and setting oops within instance objects.
137 // If the oops are compressed, the type passed to these overloaded functions
138 // is narrowOop. All functions are overloaded so they can be called by
139 // template functions without conditionals (the compiler instantiates via
140 // the right type and inlines the appopriate code).
142 inline bool oopDesc::is_null(oop obj) { return obj == NULL; }
143 inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
145 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
146 // offset from the heap base. Saving the check for null can save instructions
147 // in inner GC loops so these are separated.
149 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
150 assert(!is_null(v), "oop value can never be zero");
151 address base = Universe::narrow_oop_base();
152 int shift = Universe::narrow_oop_shift();
153 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
154 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
155 uint64_t result = pd >> shift;
156 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
157 return (narrowOop)result;
158 }
160 inline narrowOop oopDesc::encode_heap_oop(oop v) {
161 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
162 }
164 inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
165 assert(!is_null(v), "narrow oop value can never be zero");
166 address base = Universe::narrow_oop_base();
167 int shift = Universe::narrow_oop_shift();
168 return (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
169 }
171 inline oop oopDesc::decode_heap_oop(narrowOop v) {
172 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
173 }
175 inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
176 inline oop oopDesc::decode_heap_oop(oop v) { return v; }
178 // Load an oop out of the Java heap as is without decoding.
179 // Called by GC to check for null before decoding.
180 inline oop oopDesc::load_heap_oop(oop* p) { return *p; }
181 inline narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; }
183 // Load and decode an oop out of the Java heap into a wide oop.
184 inline oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; }
185 inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
186 return decode_heap_oop_not_null(*p);
187 }
189 // Load and decode an oop out of the heap accepting null
190 inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
191 inline oop oopDesc::load_decode_heap_oop(narrowOop* p) {
192 return decode_heap_oop(*p);
193 }
195 // Store already encoded heap oop into the heap.
196 inline void oopDesc::store_heap_oop(oop* p, oop v) { *p = v; }
197 inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
199 // Encode and store a heap oop.
200 inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
201 *p = encode_heap_oop_not_null(v);
202 }
203 inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
205 // Encode and store a heap oop allowing for null.
206 inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
207 *p = encode_heap_oop(v);
208 }
209 inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
211 // Store heap oop as is for volatile fields.
212 inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
213 OrderAccess::release_store_ptr(p, v);
214 }
215 inline void oopDesc::release_store_heap_oop(volatile narrowOop* p,
216 narrowOop v) {
217 OrderAccess::release_store(p, v);
218 }
220 inline void oopDesc::release_encode_store_heap_oop_not_null(
221 volatile narrowOop* p, oop v) {
222 // heap oop is not pointer sized.
223 OrderAccess::release_store(p, encode_heap_oop_not_null(v));
224 }
226 inline void oopDesc::release_encode_store_heap_oop_not_null(
227 volatile oop* p, oop v) {
228 OrderAccess::release_store_ptr(p, v);
229 }
231 inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
232 oop v) {
233 OrderAccess::release_store_ptr(p, v);
234 }
235 inline void oopDesc::release_encode_store_heap_oop(
236 volatile narrowOop* p, oop v) {
237 OrderAccess::release_store(p, encode_heap_oop(v));
238 }
241 // These functions are only used to exchange oop fields in instances,
242 // not headers.
243 inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
244 if (UseCompressedOops) {
245 // encode exchange value from oop to T
246 narrowOop val = encode_heap_oop(exchange_value);
247 narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
248 // decode old from T to oop
249 return decode_heap_oop(old);
250 } else {
251 return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
252 }
253 }
255 inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
256 volatile HeapWord *dest,
257 oop compare_value) {
258 if (UseCompressedOops) {
259 // encode exchange and compare value from oop to T
260 narrowOop val = encode_heap_oop(exchange_value);
261 narrowOop cmp = encode_heap_oop(compare_value);
263 narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
264 // decode old from T to oop
265 return decode_heap_oop(old);
266 } else {
267 return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
268 }
269 }
271 // In order to put or get a field out of an instance, must first check
272 // if the field has been compressed and uncompress it.
273 inline oop oopDesc::obj_field(int offset) const {
274 return UseCompressedOops ?
275 load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
276 load_decode_heap_oop(obj_field_addr<oop>(offset));
277 }
278 inline void oopDesc::obj_field_put(int offset, oop value) {
279 UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
280 oop_store(obj_field_addr<oop>(offset), value);
281 }
282 inline void oopDesc::obj_field_raw_put(int offset, oop value) {
283 UseCompressedOops ?
284 encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
285 encode_store_heap_oop(obj_field_addr<oop>(offset), value);
286 }
288 inline jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); }
289 inline void oopDesc::byte_field_put(int offset, jbyte contents) { *byte_field_addr(offset) = (jint) contents; }
291 inline jboolean oopDesc::bool_field(int offset) const { return (jboolean) *bool_field_addr(offset); }
292 inline void oopDesc::bool_field_put(int offset, jboolean contents) { *bool_field_addr(offset) = (jint) contents; }
294 inline jchar oopDesc::char_field(int offset) const { return (jchar) *char_field_addr(offset); }
295 inline void oopDesc::char_field_put(int offset, jchar contents) { *char_field_addr(offset) = (jint) contents; }
297 inline jint oopDesc::int_field(int offset) const { return *int_field_addr(offset); }
298 inline void oopDesc::int_field_put(int offset, jint contents) { *int_field_addr(offset) = contents; }
300 inline jshort oopDesc::short_field(int offset) const { return (jshort) *short_field_addr(offset); }
301 inline void oopDesc::short_field_put(int offset, jshort contents) { *short_field_addr(offset) = (jint) contents;}
303 inline jlong oopDesc::long_field(int offset) const { return *long_field_addr(offset); }
304 inline void oopDesc::long_field_put(int offset, jlong contents) { *long_field_addr(offset) = contents; }
306 inline jfloat oopDesc::float_field(int offset) const { return *float_field_addr(offset); }
307 inline void oopDesc::float_field_put(int offset, jfloat contents) { *float_field_addr(offset) = contents; }
309 inline jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); }
310 inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
312 inline address oopDesc::address_field(int offset) const { return *address_field_addr(offset); }
313 inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
315 inline oop oopDesc::obj_field_acquire(int offset) const {
316 return UseCompressedOops ?
317 decode_heap_oop((narrowOop)
318 OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
319 : decode_heap_oop((oop)
320 OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
321 }
322 inline void oopDesc::release_obj_field_put(int offset, oop value) {
323 UseCompressedOops ?
324 oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
325 oop_store((volatile oop*) obj_field_addr<oop>(offset), value);
326 }
328 inline jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); }
329 inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); }
331 inline jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); }
332 inline void oopDesc::release_bool_field_put(int offset, jboolean contents) { OrderAccess::release_store(bool_field_addr(offset), contents); }
334 inline jchar oopDesc::char_field_acquire(int offset) const { return OrderAccess::load_acquire(char_field_addr(offset)); }
335 inline void oopDesc::release_char_field_put(int offset, jchar contents) { OrderAccess::release_store(char_field_addr(offset), contents); }
337 inline jint oopDesc::int_field_acquire(int offset) const { return OrderAccess::load_acquire(int_field_addr(offset)); }
338 inline void oopDesc::release_int_field_put(int offset, jint contents) { OrderAccess::release_store(int_field_addr(offset), contents); }
340 inline jshort oopDesc::short_field_acquire(int offset) const { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); }
341 inline void oopDesc::release_short_field_put(int offset, jshort contents) { OrderAccess::release_store(short_field_addr(offset), contents); }
343 inline jlong oopDesc::long_field_acquire(int offset) const { return OrderAccess::load_acquire(long_field_addr(offset)); }
344 inline void oopDesc::release_long_field_put(int offset, jlong contents) { OrderAccess::release_store(long_field_addr(offset), contents); }
346 inline jfloat oopDesc::float_field_acquire(int offset) const { return OrderAccess::load_acquire(float_field_addr(offset)); }
347 inline void oopDesc::release_float_field_put(int offset, jfloat contents) { OrderAccess::release_store(float_field_addr(offset), contents); }
349 inline jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); }
350 inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
352 inline address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
353 inline void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
355 inline int oopDesc::size_given_klass(Klass* klass) {
356 int lh = klass->layout_helper();
357 int s = lh >> LogHeapWordSize; // deliver size scaled by wordSize
359 // lh is now a value computed at class initialization that may hint
360 // at the size. For instances, this is positive and equal to the
361 // size. For arrays, this is negative and provides log2 of the
362 // array element size. For other oops, it is zero and thus requires
363 // a virtual call.
364 //
365 // We go to all this trouble because the size computation is at the
366 // heart of phase 2 of mark-compaction, and called for every object,
367 // alive or dead. So the speed here is equal in importance to the
368 // speed of allocation.
370 if (lh <= Klass::_lh_neutral_value) {
371 // The most common case is instances; fall through if so.
372 if (lh < Klass::_lh_neutral_value) {
373 // Second most common case is arrays. We have to fetch the
374 // length of the array, shift (multiply) it appropriately,
375 // up to wordSize, add the header, and align to object size.
376 size_t size_in_bytes;
377 #ifdef _M_IA64
378 // The Windows Itanium Aug 2002 SDK hoists this load above
379 // the check for s < 0. An oop at the end of the heap will
380 // cause an access violation if this load is performed on a non
381 // array oop. Making the reference volatile prohibits this.
382 // (%%% please explain by what magic the length is actually fetched!)
383 volatile int *array_length;
384 array_length = (volatile int *)( (intptr_t)this +
385 arrayOopDesc::length_offset_in_bytes() );
386 assert(array_length > 0, "Integer arithmetic problem somewhere");
387 // Put into size_t to avoid overflow.
388 size_in_bytes = (size_t) array_length;
389 size_in_bytes = size_in_bytes << Klass::layout_helper_log2_element_size(lh);
390 #else
391 size_t array_length = (size_t) ((arrayOop)this)->length();
392 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
393 #endif
394 size_in_bytes += Klass::layout_helper_header_size(lh);
396 // This code could be simplified, but by keeping array_header_in_bytes
397 // in units of bytes and doing it this way we can round up just once,
398 // skipping the intermediate round to HeapWordSize. Cast the result
399 // of round_to to size_t to guarantee unsigned division == right shift.
400 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
401 HeapWordSize);
403 // UseParNewGC, UseParallelGC and UseG1GC can change the length field
404 // of an "old copy" of an object array in the young gen so it indicates
405 // the grey portion of an already copied array. This will cause the first
406 // disjunct below to fail if the two comparands are computed across such
407 // a concurrent change.
408 // UseParNewGC also runs with promotion labs (which look like int
409 // filler arrays) which are subject to changing their declared size
410 // when finally retiring a PLAB; this also can cause the first disjunct
411 // to fail for another worker thread that is concurrently walking the block
412 // offset table. Both these invariant failures are benign for their
413 // current uses; we relax the assertion checking to cover these two cases below:
414 // is_objArray() && is_forwarded() // covers first scenario above
415 // || is_typeArray() // covers second scenario above
416 // If and when UseParallelGC uses the same obj array oop stealing/chunking
417 // technique, we will need to suitably modify the assertion.
418 assert((s == klass->oop_size(this)) ||
419 (Universe::heap()->is_gc_active() &&
420 ((is_typeArray() && UseParNewGC) ||
421 (is_objArray() && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))),
422 "wrong array object size");
423 } else {
424 // Must be zero, so bite the bullet and take the virtual call.
425 s = klass->oop_size(this);
426 }
427 }
429 assert(s % MinObjAlignment == 0, "alignment check");
430 assert(s > 0, "Bad size calculated");
431 return s;
432 }
435 inline int oopDesc::size() {
436 return size_given_klass(blueprint());
437 }
439 inline bool oopDesc::is_parsable() {
440 return blueprint()->oop_is_parsable(this);
441 }
443 inline bool oopDesc::is_conc_safe() {
444 return blueprint()->oop_is_conc_safe(this);
445 }
447 inline void update_barrier_set(void* p, oop v) {
448 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
449 oopDesc::bs()->write_ref_field(p, v);
450 }
452 inline void update_barrier_set_pre(void* p, oop v) {
453 oopDesc::bs()->write_ref_field_pre(p, v);
454 }
456 template <class T> inline void oop_store(T* p, oop v) {
457 if (always_do_update_barrier) {
458 oop_store((volatile T*)p, v);
459 } else {
460 update_barrier_set_pre(p, v);
461 oopDesc::encode_store_heap_oop(p, v);
462 update_barrier_set(p, v);
463 }
464 }
466 template <class T> inline void oop_store(volatile T* p, oop v) {
467 update_barrier_set_pre((void*)p, v);
468 // Used by release_obj_field_put, so use release_store_ptr.
469 oopDesc::release_encode_store_heap_oop(p, v);
470 update_barrier_set((void*)p, v);
471 }
473 template <class T> inline void oop_store_without_check(T* p, oop v) {
474 // XXX YSR FIX ME!!!
475 if (always_do_update_barrier) {
476 oop_store(p, v);
477 } else {
478 assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier(p, v),
479 "oop store without store check failed");
480 oopDesc::encode_store_heap_oop(p, v);
481 }
482 }
484 // When it absolutely has to get there.
485 template <class T> inline void oop_store_without_check(volatile T* p, oop v) {
486 // XXX YSR FIX ME!!!
487 if (always_do_update_barrier) {
488 oop_store(p, v);
489 } else {
490 assert(!Universe::heap()->barrier_set()->write_ref_needs_barrier((T*)p, v),
491 "oop store without store check failed");
492 oopDesc::release_encode_store_heap_oop(p, v);
493 }
494 }
496 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
497 // (without having to remember the function name this calls).
498 inline void oop_store_raw(HeapWord* addr, oop value) {
499 if (UseCompressedOops) {
500 oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
501 } else {
502 oopDesc::encode_store_heap_oop((oop*)addr, value);
503 }
504 }
506 // Used only for markSweep, scavenging
507 inline bool oopDesc::is_gc_marked() const {
508 return mark()->is_marked();
509 }
511 inline bool oopDesc::is_locked() const {
512 return mark()->is_locked();
513 }
515 inline bool oopDesc::is_unlocked() const {
516 return mark()->is_unlocked();
517 }
519 inline bool oopDesc::has_bias_pattern() const {
520 return mark()->has_bias_pattern();
521 }
523 inline bool check_obj_alignment(oop obj) {
524 return (intptr_t)obj % MinObjAlignmentInBytes == 0;
525 }
528 // used only for asserts
529 inline bool oopDesc::is_oop(bool ignore_mark_word) const {
530 oop obj = (oop) this;
531 if (!check_obj_alignment(obj)) return false;
532 if (!Universe::heap()->is_in_reserved(obj)) return false;
533 // obj is aligned and accessible in heap
534 // try to find metaclass cycle safely without seg faulting on bad input
535 // we should reach klassKlassObj by following klass link at most 3 times
536 for (int i = 0; i < 3; i++) {
537 obj = obj->klass_or_null();
538 // klass should be aligned and in permspace
539 if (!check_obj_alignment(obj)) return false;
540 if (!Universe::heap()->is_in_permanent(obj)) return false;
541 }
542 if (obj != Universe::klassKlassObj()) {
543 // During a dump, the _klassKlassObj moved to a shared space.
544 if (DumpSharedSpaces && Universe::klassKlassObj()->is_shared()) {
545 return true;
546 }
547 return false;
548 }
550 // Header verification: the mark is typically non-NULL. If we're
551 // at a safepoint, it must not be null.
552 // Outside of a safepoint, the header could be changing (for example,
553 // another thread could be inflating a lock on this object).
554 if (ignore_mark_word) {
555 return true;
556 }
557 if (mark() != NULL) {
558 return true;
559 }
560 return !SafepointSynchronize::is_at_safepoint();
561 }
564 // used only for asserts
565 inline bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
566 return this == NULL ? true : is_oop(ignore_mark_word);
567 }
569 #ifndef PRODUCT
570 // used only for asserts
571 inline bool oopDesc::is_unlocked_oop() const {
572 if (!Universe::heap()->is_in_reserved(this)) return false;
573 return mark()->is_unlocked();
574 }
575 #endif // PRODUCT
577 inline void oopDesc::follow_header() {
578 if (UseCompressedOops) {
579 MarkSweep::mark_and_push(compressed_klass_addr());
580 } else {
581 MarkSweep::mark_and_push(klass_addr());
582 }
583 }
585 inline void oopDesc::follow_contents(void) {
586 assert (is_gc_marked(), "should be marked");
587 blueprint()->oop_follow_contents(this);
588 }
591 // Used by scavengers
593 inline bool oopDesc::is_forwarded() const {
594 // The extra heap check is needed since the obj might be locked, in which case the
595 // mark would point to a stack location and have the sentinel bit cleared
596 return mark()->is_marked();
597 }
599 // Used by scavengers
600 inline void oopDesc::forward_to(oop p) {
601 assert(Universe::heap()->is_in_reserved(p),
602 "forwarding to something not in heap");
603 markOop m = markOopDesc::encode_pointer_as_mark(p);
604 assert(m->decode_pointer() == p, "encoding must be reversable");
605 set_mark(m);
606 }
608 // Used by parallel scavengers
609 inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
610 assert(Universe::heap()->is_in_reserved(p),
611 "forwarding to something not in heap");
612 markOop m = markOopDesc::encode_pointer_as_mark(p);
613 assert(m->decode_pointer() == p, "encoding must be reversable");
614 return cas_set_mark(m, compare) == compare;
615 }
617 // Note that the forwardee is not the same thing as the displaced_mark.
618 // The forwardee is used when copying during scavenge and mark-sweep.
619 // It does need to clear the low two locking- and GC-related bits.
620 inline oop oopDesc::forwardee() const {
621 return (oop) mark()->decode_pointer();
622 }
624 inline bool oopDesc::has_displaced_mark() const {
625 return mark()->has_displaced_mark_helper();
626 }
628 inline markOop oopDesc::displaced_mark() const {
629 return mark()->displaced_mark_helper();
630 }
632 inline void oopDesc::set_displaced_mark(markOop m) {
633 mark()->set_displaced_mark_helper(m);
634 }
636 // The following method needs to be MT safe.
637 inline int oopDesc::age() const {
638 assert(!is_forwarded(), "Attempt to read age from forwarded mark");
639 if (has_displaced_mark()) {
640 return displaced_mark()->age();
641 } else {
642 return mark()->age();
643 }
644 }
646 inline void oopDesc::incr_age() {
647 assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
648 if (has_displaced_mark()) {
649 set_displaced_mark(displaced_mark()->incr_age());
650 } else {
651 set_mark(mark()->incr_age());
652 }
653 }
656 inline intptr_t oopDesc::identity_hash() {
657 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
658 // Note: The mark must be read into local variable to avoid concurrent updates.
659 markOop mrk = mark();
660 if (mrk->is_unlocked() && !mrk->has_no_hash()) {
661 return mrk->hash();
662 } else if (mrk->is_marked()) {
663 return mrk->hash();
664 } else {
665 return slow_identity_hash();
666 }
667 }
669 inline void oopDesc::oop_iterate_header(OopClosure* blk) {
670 if (UseCompressedOops) {
671 blk->do_oop(compressed_klass_addr());
672 } else {
673 blk->do_oop(klass_addr());
674 }
675 }
677 inline void oopDesc::oop_iterate_header(OopClosure* blk, MemRegion mr) {
678 if (UseCompressedOops) {
679 if (mr.contains(compressed_klass_addr())) {
680 blk->do_oop(compressed_klass_addr());
681 }
682 } else {
683 if (mr.contains(klass_addr())) blk->do_oop(klass_addr());
684 }
685 }
687 inline int oopDesc::adjust_pointers() {
688 debug_only(int check_size = size());
689 int s = blueprint()->oop_adjust_pointers(this);
690 assert(s == check_size, "should be the same");
691 return s;
692 }
694 inline void oopDesc::adjust_header() {
695 if (UseCompressedOops) {
696 MarkSweep::adjust_pointer(compressed_klass_addr());
697 } else {
698 MarkSweep::adjust_pointer(klass_addr());
699 }
700 }
702 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
703 \
704 inline int oopDesc::oop_iterate(OopClosureType* blk) { \
705 SpecializationStats::record_call(); \
706 return blueprint()->oop_oop_iterate##nv_suffix(this, blk); \
707 } \
708 \
709 inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \
710 SpecializationStats::record_call(); \
711 return blueprint()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \
712 }
714 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN)
715 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN)
717 #ifndef SERIALGC
718 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
719 \
720 inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \
721 SpecializationStats::record_call(); \
722 return blueprint()->oop_oop_iterate_backwards##nv_suffix(this, blk); \
723 }
725 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN)
726 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN)
727 #endif // !SERIALGC
729 inline bool oopDesc::is_shared() const {
730 return CompactingPermGenGen::is_shared(this);
731 }
733 inline bool oopDesc::is_shared_readonly() const {
734 return CompactingPermGenGen::is_shared_readonly(this);
735 }
737 inline bool oopDesc::is_shared_readwrite() const {
738 return CompactingPermGenGen::is_shared_readwrite(this);
739 }