Tue, 08 Aug 2017 15:57:29 +0800
merge
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 /*
26 * This file has been modified by Loongson Technology in 2015. These
27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
28 * available on the same license terms set forth above.
29 */
31 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP
32 #define SHARE_VM_OOPS_OOP_INLINE_HPP
34 #include "gc_implementation/shared/ageTable.hpp"
35 #include "gc_implementation/shared/markSweep.inline.hpp"
36 #include "gc_interface/collectedHeap.inline.hpp"
37 #include "memory/barrierSet.inline.hpp"
38 #include "memory/cardTableModRefBS.hpp"
39 #include "memory/genCollectedHeap.hpp"
40 #include "memory/generation.hpp"
41 #include "memory/specialized_oop_closures.hpp"
42 #include "oops/arrayKlass.hpp"
43 #include "oops/arrayOop.hpp"
44 #include "oops/klass.inline.hpp"
45 #include "oops/markOop.inline.hpp"
46 #include "oops/oop.hpp"
47 #include "runtime/atomic.hpp"
48 #include "runtime/os.hpp"
49 #include "utilities/macros.hpp"
50 #ifdef TARGET_ARCH_x86
51 # include "bytes_x86.hpp"
52 #endif
53 #ifdef TARGET_ARCH_sparc
54 # include "bytes_sparc.hpp"
55 #endif
56 #ifdef TARGET_ARCH_zero
57 # include "bytes_zero.hpp"
58 #endif
59 #ifdef TARGET_ARCH_arm
60 # include "bytes_arm.hpp"
61 #endif
62 #ifdef TARGET_ARCH_ppc
63 # include "bytes_ppc.hpp"
64 #endif
65 #ifdef TARGET_ARCH_mips
66 # include "bytes_mips.hpp"
67 #endif
69 // Implementation of all inlined member functions defined in oop.hpp
70 // We need a separate file to avoid circular references
72 inline void oopDesc::release_set_mark(markOop m) {
73 OrderAccess::release_store_ptr(&_mark, m);
74 }
76 inline markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
77 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
78 }
80 inline Klass* oopDesc::klass() const {
81 if (UseCompressedClassPointers) {
82 return Klass::decode_klass_not_null(_metadata._compressed_klass);
83 } else {
84 return _metadata._klass;
85 }
86 }
88 inline Klass* oopDesc::klass_or_null() const volatile {
89 // can be NULL in CMS
90 if (UseCompressedClassPointers) {
91 return Klass::decode_klass(_metadata._compressed_klass);
92 } else {
93 return _metadata._klass;
94 }
95 }
97 inline int oopDesc::klass_gap_offset_in_bytes() {
98 assert(UseCompressedClassPointers, "only applicable to compressed klass pointers");
99 return oopDesc::klass_offset_in_bytes() + sizeof(narrowKlass);
100 }
102 inline Klass** oopDesc::klass_addr() {
103 // Only used internally and with CMS and will not work with
104 // UseCompressedOops
105 assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
106 return (Klass**) &_metadata._klass;
107 }
109 inline narrowKlass* oopDesc::compressed_klass_addr() {
110 assert(UseCompressedClassPointers, "only called by compressed klass pointers");
111 return &_metadata._compressed_klass;
112 }
114 inline void oopDesc::set_klass(Klass* k) {
115 // since klasses are promoted no store check is needed
116 assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
117 assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
118 if (UseCompressedClassPointers) {
119 *compressed_klass_addr() = Klass::encode_klass_not_null(k);
120 } else {
121 *klass_addr() = k;
122 }
123 }
125 inline int oopDesc::klass_gap() const {
126 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
127 }
129 inline void oopDesc::set_klass_gap(int v) {
130 if (UseCompressedClassPointers) {
131 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
132 }
133 }
135 inline void oopDesc::set_klass_to_list_ptr(oop k) {
136 // This is only to be used during GC, for from-space objects, so no
137 // barrier is needed.
138 if (UseCompressedClassPointers) {
139 _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling)
140 } else {
141 _metadata._klass = (Klass*)(address)k;
142 }
143 }
145 inline oop oopDesc::list_ptr_from_klass() {
146 // This is only to be used during GC, for from-space objects.
147 if (UseCompressedClassPointers) {
148 return decode_heap_oop((narrowOop)_metadata._compressed_klass);
149 } else {
150 // Special case for GC
151 return (oop)(address)_metadata._klass;
152 }
153 }
155 inline void oopDesc::init_mark() { set_mark(markOopDesc::prototype_for_object(this)); }
157 inline bool oopDesc::is_a(Klass* k) const { return klass()->is_subtype_of(k); }
159 inline bool oopDesc::is_instance() const { return klass()->oop_is_instance(); }
160 inline bool oopDesc::is_instanceMirror() const { return klass()->oop_is_instanceMirror(); }
161 inline bool oopDesc::is_instanceRef() const { return klass()->oop_is_instanceRef(); }
162 inline bool oopDesc::is_array() const { return klass()->oop_is_array(); }
163 inline bool oopDesc::is_objArray() const { return klass()->oop_is_objArray(); }
164 inline bool oopDesc::is_typeArray() const { return klass()->oop_is_typeArray(); }
166 inline void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
168 template <class T> inline T* oopDesc::obj_field_addr(int offset) const { return (T*)field_base(offset); }
169 inline Metadata** oopDesc::metadata_field_addr(int offset) const { return (Metadata**)field_base(offset); }
170 inline jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); }
171 inline jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); }
172 inline jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*)field_base(offset); }
173 inline jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); }
174 inline jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); }
175 inline jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); }
176 inline jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); }
177 inline jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
178 inline address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
181 // Functions for getting and setting oops within instance objects.
182 // If the oops are compressed, the type passed to these overloaded functions
183 // is narrowOop. All functions are overloaded so they can be called by
184 // template functions without conditionals (the compiler instantiates via
185 // the right type and inlines the appopriate code).
187 inline bool oopDesc::is_null(oop obj) { return obj == NULL; }
188 inline bool oopDesc::is_null(narrowOop obj) { return obj == 0; }
190 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
191 // offset from the heap base. Saving the check for null can save instructions
192 // in inner GC loops so these are separated.
194 inline bool check_obj_alignment(oop obj) {
195 return cast_from_oop<intptr_t>(obj) % MinObjAlignmentInBytes == 0;
196 }
198 inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
199 assert(!is_null(v), "oop value can never be zero");
200 assert(check_obj_alignment(v), "Address not aligned");
201 assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
202 address base = Universe::narrow_oop_base();
203 int shift = Universe::narrow_oop_shift();
204 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
205 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
206 uint64_t result = pd >> shift;
207 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
208 assert(decode_heap_oop(result) == v, "reversibility");
209 return (narrowOop)result;
210 }
212 inline narrowOop oopDesc::encode_heap_oop(oop v) {
213 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
214 }
216 inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
217 assert(!is_null(v), "narrow oop value can never be zero");
218 address base = Universe::narrow_oop_base();
219 int shift = Universe::narrow_oop_shift();
220 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
221 assert(check_obj_alignment(result), err_msg("address not aligned: " INTPTR_FORMAT, p2i((void*) result)));
222 return result;
223 }
225 inline oop oopDesc::decode_heap_oop(narrowOop v) {
226 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
227 }
229 inline oop oopDesc::decode_heap_oop_not_null(oop v) { return v; }
230 inline oop oopDesc::decode_heap_oop(oop v) { return v; }
232 // Load an oop out of the Java heap as is without decoding.
233 // Called by GC to check for null before decoding.
234 inline oop oopDesc::load_heap_oop(oop* p) { return *p; }
235 inline narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; }
237 // Load and decode an oop out of the Java heap into a wide oop.
238 inline oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; }
239 inline oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
240 return decode_heap_oop_not_null(*p);
241 }
243 // Load and decode an oop out of the heap accepting null
244 inline oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
245 inline oop oopDesc::load_decode_heap_oop(narrowOop* p) {
246 return decode_heap_oop(*p);
247 }
249 // Store already encoded heap oop into the heap.
250 inline void oopDesc::store_heap_oop(oop* p, oop v) { *p = v; }
251 inline void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
253 // Encode and store a heap oop.
254 inline void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
255 *p = encode_heap_oop_not_null(v);
256 }
257 inline void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
259 // Encode and store a heap oop allowing for null.
260 inline void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
261 *p = encode_heap_oop(v);
262 }
263 inline void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
265 // Store heap oop as is for volatile fields.
266 inline void oopDesc::release_store_heap_oop(volatile oop* p, oop v) {
267 OrderAccess::release_store_ptr(p, v);
268 }
269 inline void oopDesc::release_store_heap_oop(volatile narrowOop* p,
270 narrowOop v) {
271 OrderAccess::release_store(p, v);
272 }
274 inline void oopDesc::release_encode_store_heap_oop_not_null(
275 volatile narrowOop* p, oop v) {
276 // heap oop is not pointer sized.
277 OrderAccess::release_store(p, encode_heap_oop_not_null(v));
278 }
280 inline void oopDesc::release_encode_store_heap_oop_not_null(
281 volatile oop* p, oop v) {
282 OrderAccess::release_store_ptr(p, v);
283 }
285 inline void oopDesc::release_encode_store_heap_oop(volatile oop* p,
286 oop v) {
287 OrderAccess::release_store_ptr(p, v);
288 }
289 inline void oopDesc::release_encode_store_heap_oop(
290 volatile narrowOop* p, oop v) {
291 OrderAccess::release_store(p, encode_heap_oop(v));
292 }
295 // These functions are only used to exchange oop fields in instances,
296 // not headers.
297 inline oop oopDesc::atomic_exchange_oop(oop exchange_value, volatile HeapWord *dest) {
298 if (UseCompressedOops) {
299 // encode exchange value from oop to T
300 narrowOop val = encode_heap_oop(exchange_value);
301 narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
302 // decode old from T to oop
303 return decode_heap_oop(old);
304 } else {
305 return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
306 }
307 }
309 // In order to put or get a field out of an instance, must first check
310 // if the field has been compressed and uncompress it.
311 inline oop oopDesc::obj_field(int offset) const {
312 return UseCompressedOops ?
313 load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
314 load_decode_heap_oop(obj_field_addr<oop>(offset));
315 }
316 inline volatile oop oopDesc::obj_field_volatile(int offset) const {
317 volatile oop value = obj_field(offset);
318 OrderAccess::acquire();
319 return value;
320 }
321 inline void oopDesc::obj_field_put(int offset, oop value) {
322 UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
323 oop_store(obj_field_addr<oop>(offset), value);
324 }
326 inline Metadata* oopDesc::metadata_field(int offset) const {
327 return *metadata_field_addr(offset);
328 }
330 inline void oopDesc::metadata_field_put(int offset, Metadata* value) {
331 *metadata_field_addr(offset) = value;
332 }
334 inline void oopDesc::obj_field_put_raw(int offset, oop value) {
335 UseCompressedOops ?
336 encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
337 encode_store_heap_oop(obj_field_addr<oop>(offset), value);
338 }
339 inline void oopDesc::obj_field_put_volatile(int offset, oop value) {
340 OrderAccess::release();
341 obj_field_put(offset, value);
342 OrderAccess::fence();
343 }
345 inline jbyte oopDesc::byte_field(int offset) const { return (jbyte) *byte_field_addr(offset); }
346 inline void oopDesc::byte_field_put(int offset, jbyte contents) { *byte_field_addr(offset) = (jint) contents; }
348 inline jboolean oopDesc::bool_field(int offset) const { return (jboolean) *bool_field_addr(offset); }
349 inline void oopDesc::bool_field_put(int offset, jboolean contents) { *bool_field_addr(offset) = (jint) contents; }
351 inline jchar oopDesc::char_field(int offset) const { return (jchar) *char_field_addr(offset); }
352 inline void oopDesc::char_field_put(int offset, jchar contents) { *char_field_addr(offset) = (jint) contents; }
354 inline jint oopDesc::int_field(int offset) const { return *int_field_addr(offset); }
355 inline void oopDesc::int_field_put(int offset, jint contents) { *int_field_addr(offset) = contents; }
357 inline jshort oopDesc::short_field(int offset) const { return (jshort) *short_field_addr(offset); }
358 inline void oopDesc::short_field_put(int offset, jshort contents) { *short_field_addr(offset) = (jint) contents;}
360 inline jlong oopDesc::long_field(int offset) const { return *long_field_addr(offset); }
361 inline void oopDesc::long_field_put(int offset, jlong contents) { *long_field_addr(offset) = contents; }
363 inline jfloat oopDesc::float_field(int offset) const { return *float_field_addr(offset); }
364 inline void oopDesc::float_field_put(int offset, jfloat contents) { *float_field_addr(offset) = contents; }
366 inline jdouble oopDesc::double_field(int offset) const { return *double_field_addr(offset); }
367 inline void oopDesc::double_field_put(int offset, jdouble contents) { *double_field_addr(offset) = contents; }
369 inline address oopDesc::address_field(int offset) const { return *address_field_addr(offset); }
370 inline void oopDesc::address_field_put(int offset, address contents) { *address_field_addr(offset) = contents; }
372 inline oop oopDesc::obj_field_acquire(int offset) const {
373 return UseCompressedOops ?
374 decode_heap_oop((narrowOop)
375 OrderAccess::load_acquire(obj_field_addr<narrowOop>(offset)))
376 : decode_heap_oop((oop)
377 OrderAccess::load_ptr_acquire(obj_field_addr<oop>(offset)));
378 }
379 inline void oopDesc::release_obj_field_put(int offset, oop value) {
380 UseCompressedOops ?
381 oop_store((volatile narrowOop*)obj_field_addr<narrowOop>(offset), value) :
382 oop_store((volatile oop*) obj_field_addr<oop>(offset), value);
383 }
385 inline jbyte oopDesc::byte_field_acquire(int offset) const { return OrderAccess::load_acquire(byte_field_addr(offset)); }
386 inline void oopDesc::release_byte_field_put(int offset, jbyte contents) { OrderAccess::release_store(byte_field_addr(offset), contents); }
388 inline jboolean oopDesc::bool_field_acquire(int offset) const { return OrderAccess::load_acquire(bool_field_addr(offset)); }
389 inline void oopDesc::release_bool_field_put(int offset, jboolean contents) { OrderAccess::release_store(bool_field_addr(offset), contents); }
391 inline jchar oopDesc::char_field_acquire(int offset) const { return OrderAccess::load_acquire(char_field_addr(offset)); }
392 inline void oopDesc::release_char_field_put(int offset, jchar contents) { OrderAccess::release_store(char_field_addr(offset), contents); }
394 inline jint oopDesc::int_field_acquire(int offset) const { return OrderAccess::load_acquire(int_field_addr(offset)); }
395 inline void oopDesc::release_int_field_put(int offset, jint contents) { OrderAccess::release_store(int_field_addr(offset), contents); }
397 inline jshort oopDesc::short_field_acquire(int offset) const { return (jshort)OrderAccess::load_acquire(short_field_addr(offset)); }
398 inline void oopDesc::release_short_field_put(int offset, jshort contents) { OrderAccess::release_store(short_field_addr(offset), contents); }
400 inline jlong oopDesc::long_field_acquire(int offset) const { return OrderAccess::load_acquire(long_field_addr(offset)); }
401 inline void oopDesc::release_long_field_put(int offset, jlong contents) { OrderAccess::release_store(long_field_addr(offset), contents); }
403 inline jfloat oopDesc::float_field_acquire(int offset) const { return OrderAccess::load_acquire(float_field_addr(offset)); }
404 inline void oopDesc::release_float_field_put(int offset, jfloat contents) { OrderAccess::release_store(float_field_addr(offset), contents); }
406 inline jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); }
407 inline void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
409 inline address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
410 inline void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
412 inline int oopDesc::size_given_klass(Klass* klass) {
413 int lh = klass->layout_helper();
414 int s;
416 // lh is now a value computed at class initialization that may hint
417 // at the size. For instances, this is positive and equal to the
418 // size. For arrays, this is negative and provides log2 of the
419 // array element size. For other oops, it is zero and thus requires
420 // a virtual call.
421 //
422 // We go to all this trouble because the size computation is at the
423 // heart of phase 2 of mark-compaction, and called for every object,
424 // alive or dead. So the speed here is equal in importance to the
425 // speed of allocation.
427 if (lh > Klass::_lh_neutral_value) {
428 if (!Klass::layout_helper_needs_slow_path(lh)) {
429 s = lh >> LogHeapWordSize; // deliver size scaled by wordSize
430 } else {
431 s = klass->oop_size(this);
432 }
433 } else if (lh <= Klass::_lh_neutral_value) {
434 // The most common case is instances; fall through if so.
435 if (lh < Klass::_lh_neutral_value) {
436 // Second most common case is arrays. We have to fetch the
437 // length of the array, shift (multiply) it appropriately,
438 // up to wordSize, add the header, and align to object size.
439 size_t size_in_bytes;
440 #ifdef _M_IA64
441 // The Windows Itanium Aug 2002 SDK hoists this load above
442 // the check for s < 0. An oop at the end of the heap will
443 // cause an access violation if this load is performed on a non
444 // array oop. Making the reference volatile prohibits this.
445 // (%%% please explain by what magic the length is actually fetched!)
446 volatile int *array_length;
447 array_length = (volatile int *)( (intptr_t)this +
448 arrayOopDesc::length_offset_in_bytes() );
449 assert(array_length > 0, "Integer arithmetic problem somewhere");
450 // Put into size_t to avoid overflow.
451 size_in_bytes = (size_t) array_length;
452 size_in_bytes = size_in_bytes << Klass::layout_helper_log2_element_size(lh);
453 #else
454 size_t array_length = (size_t) ((arrayOop)this)->length();
455 size_in_bytes = array_length << Klass::layout_helper_log2_element_size(lh);
456 #endif
457 size_in_bytes += Klass::layout_helper_header_size(lh);
459 // This code could be simplified, but by keeping array_header_in_bytes
460 // in units of bytes and doing it this way we can round up just once,
461 // skipping the intermediate round to HeapWordSize. Cast the result
462 // of round_to to size_t to guarantee unsigned division == right shift.
463 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
464 HeapWordSize);
466 // UseParNewGC, UseParallelGC and UseG1GC can change the length field
467 // of an "old copy" of an object array in the young gen so it indicates
468 // the grey portion of an already copied array. This will cause the first
469 // disjunct below to fail if the two comparands are computed across such
470 // a concurrent change.
471 // UseParNewGC also runs with promotion labs (which look like int
472 // filler arrays) which are subject to changing their declared size
473 // when finally retiring a PLAB; this also can cause the first disjunct
474 // to fail for another worker thread that is concurrently walking the block
475 // offset table. Both these invariant failures are benign for their
476 // current uses; we relax the assertion checking to cover these two cases below:
477 // is_objArray() && is_forwarded() // covers first scenario above
478 // || is_typeArray() // covers second scenario above
479 // If and when UseParallelGC uses the same obj array oop stealing/chunking
480 // technique, we will need to suitably modify the assertion.
481 assert((s == klass->oop_size(this)) ||
482 (Universe::heap()->is_gc_active() &&
483 ((is_typeArray() && UseParNewGC) ||
484 (is_objArray() && is_forwarded() && (UseParNewGC || UseParallelGC || UseG1GC)))),
485 "wrong array object size");
486 } else {
487 // Must be zero, so bite the bullet and take the virtual call.
488 s = klass->oop_size(this);
489 }
490 }
492 assert(s % MinObjAlignment == 0, "alignment check");
493 assert(s > 0, "Bad size calculated");
494 return s;
495 }
498 inline int oopDesc::size() {
499 return size_given_klass(klass());
500 }
502 inline void update_barrier_set(void* p, oop v, bool release = false) {
503 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
504 oopDesc::bs()->write_ref_field(p, v, release);
505 }
507 template <class T> inline void update_barrier_set_pre(T* p, oop v) {
508 oopDesc::bs()->write_ref_field_pre(p, v);
509 }
511 template <class T> inline void oop_store(T* p, oop v) {
512 if (always_do_update_barrier) {
513 oop_store((volatile T*)p, v);
514 } else {
515 update_barrier_set_pre(p, v);
516 oopDesc::encode_store_heap_oop(p, v);
517 // always_do_update_barrier == false =>
518 // Either we are at a safepoint (in GC) or CMS is not used. In both
519 // cases it's unnecessary to mark the card as dirty with release sematics.
520 update_barrier_set((void*)p, v, false /* release */); // cast away type
521 }
522 }
524 template <class T> inline void oop_store(volatile T* p, oop v) {
525 update_barrier_set_pre((T*)p, v); // cast away volatile
526 // Used by release_obj_field_put, so use release_store_ptr.
527 oopDesc::release_encode_store_heap_oop(p, v);
528 // When using CMS we must mark the card corresponding to p as dirty
529 // with release sematics to prevent that CMS sees the dirty card but
530 // not the new value v at p due to reordering of the two
531 // stores. Note that CMS has a concurrent precleaning phase, where
532 // it reads the card table while the Java threads are running.
533 update_barrier_set((void*)p, v, true /* release */); // cast away type
534 }
536 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
537 // (without having to remember the function name this calls).
538 inline void oop_store_raw(HeapWord* addr, oop value) {
539 if (UseCompressedOops) {
540 oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
541 } else {
542 oopDesc::encode_store_heap_oop((oop*)addr, value);
543 }
544 }
546 inline oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
547 volatile HeapWord *dest,
548 oop compare_value,
549 bool prebarrier) {
550 if (UseCompressedOops) {
551 if (prebarrier) {
552 update_barrier_set_pre((narrowOop*)dest, exchange_value);
553 }
554 // encode exchange and compare value from oop to T
555 narrowOop val = encode_heap_oop(exchange_value);
556 narrowOop cmp = encode_heap_oop(compare_value);
558 narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
559 // decode old from T to oop
560 return decode_heap_oop(old);
561 } else {
562 if (prebarrier) {
563 update_barrier_set_pre((oop*)dest, exchange_value);
564 }
565 return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
566 }
567 }
569 // Used only for markSweep, scavenging
570 inline bool oopDesc::is_gc_marked() const {
571 return mark()->is_marked();
572 }
574 inline bool oopDesc::is_locked() const {
575 return mark()->is_locked();
576 }
578 inline bool oopDesc::is_unlocked() const {
579 return mark()->is_unlocked();
580 }
582 inline bool oopDesc::has_bias_pattern() const {
583 return mark()->has_bias_pattern();
584 }
587 // used only for asserts
588 inline bool oopDesc::is_oop(bool ignore_mark_word) const {
589 oop obj = (oop) this;
590 if (!check_obj_alignment(obj)) return false;
591 if (!Universe::heap()->is_in_reserved(obj)) return false;
592 // obj is aligned and accessible in heap
593 if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false;
595 // Header verification: the mark is typically non-NULL. If we're
596 // at a safepoint, it must not be null.
597 // Outside of a safepoint, the header could be changing (for example,
598 // another thread could be inflating a lock on this object).
599 if (ignore_mark_word) {
600 return true;
601 }
602 if (mark() != NULL) {
603 return true;
604 }
605 return !SafepointSynchronize::is_at_safepoint();
606 }
609 // used only for asserts
610 inline bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
611 return this == NULL ? true : is_oop(ignore_mark_word);
612 }
614 #ifndef PRODUCT
615 // used only for asserts
616 inline bool oopDesc::is_unlocked_oop() const {
617 if (!Universe::heap()->is_in_reserved(this)) return false;
618 return mark()->is_unlocked();
619 }
620 #endif // PRODUCT
622 inline void oopDesc::follow_contents(void) {
623 assert (is_gc_marked(), "should be marked");
624 klass()->oop_follow_contents(this);
625 }
627 // Used by scavengers
629 inline bool oopDesc::is_forwarded() const {
630 // The extra heap check is needed since the obj might be locked, in which case the
631 // mark would point to a stack location and have the sentinel bit cleared
632 return mark()->is_marked();
633 }
635 // Used by scavengers
636 inline void oopDesc::forward_to(oop p) {
637 assert(check_obj_alignment(p),
638 "forwarding to something not aligned");
639 assert(Universe::heap()->is_in_reserved(p),
640 "forwarding to something not in heap");
641 markOop m = markOopDesc::encode_pointer_as_mark(p);
642 assert(m->decode_pointer() == p, "encoding must be reversable");
643 set_mark(m);
644 }
646 // Used by parallel scavengers
647 inline bool oopDesc::cas_forward_to(oop p, markOop compare) {
648 assert(check_obj_alignment(p),
649 "forwarding to something not aligned");
650 assert(Universe::heap()->is_in_reserved(p),
651 "forwarding to something not in heap");
652 markOop m = markOopDesc::encode_pointer_as_mark(p);
653 assert(m->decode_pointer() == p, "encoding must be reversable");
654 return cas_set_mark(m, compare) == compare;
655 }
657 // Note that the forwardee is not the same thing as the displaced_mark.
658 // The forwardee is used when copying during scavenge and mark-sweep.
659 // It does need to clear the low two locking- and GC-related bits.
660 inline oop oopDesc::forwardee() const {
661 return (oop) mark()->decode_pointer();
662 }
664 inline bool oopDesc::has_displaced_mark() const {
665 return mark()->has_displaced_mark_helper();
666 }
668 inline markOop oopDesc::displaced_mark() const {
669 return mark()->displaced_mark_helper();
670 }
672 inline void oopDesc::set_displaced_mark(markOop m) {
673 mark()->set_displaced_mark_helper(m);
674 }
676 // The following method needs to be MT safe.
677 inline uint oopDesc::age() const {
678 assert(!is_forwarded(), "Attempt to read age from forwarded mark");
679 if (has_displaced_mark()) {
680 return displaced_mark()->age();
681 } else {
682 return mark()->age();
683 }
684 }
686 inline void oopDesc::incr_age() {
687 assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
688 if (has_displaced_mark()) {
689 set_displaced_mark(displaced_mark()->incr_age());
690 } else {
691 set_mark(mark()->incr_age());
692 }
693 }
696 inline intptr_t oopDesc::identity_hash() {
697 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
698 // Note: The mark must be read into local variable to avoid concurrent updates.
699 markOop mrk = mark();
700 if (mrk->is_unlocked() && !mrk->has_no_hash()) {
701 return mrk->hash();
702 } else if (mrk->is_marked()) {
703 return mrk->hash();
704 } else {
705 return slow_identity_hash();
706 }
707 }
709 inline int oopDesc::adjust_pointers() {
710 debug_only(int check_size = size());
711 int s = klass()->oop_adjust_pointers(this);
712 assert(s == check_size, "should be the same");
713 return s;
714 }
716 #define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
717 \
718 inline int oopDesc::oop_iterate(OopClosureType* blk) { \
719 SpecializationStats::record_call(); \
720 return klass()->oop_oop_iterate##nv_suffix(this, blk); \
721 } \
722 \
723 inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \
724 SpecializationStats::record_call(); \
725 return klass()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \
726 }
729 inline int oopDesc::oop_iterate_no_header(OopClosure* blk) {
730 // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all
731 // the do_oop calls, but turns off all other features in ExtendedOopClosure.
732 NoHeaderExtendedOopClosure cl(blk);
733 return oop_iterate(&cl);
734 }
736 inline int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) {
737 NoHeaderExtendedOopClosure cl(blk);
738 return oop_iterate(&cl, mr);
739 }
741 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN)
742 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN)
744 #if INCLUDE_ALL_GCS
745 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
746 \
747 inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \
748 SpecializationStats::record_call(); \
749 return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \
750 }
752 ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN)
753 ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN)
754 #endif // INCLUDE_ALL_GCS
756 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP