Tue, 23 Nov 2010 13:22:55 -0800
6989984: Use standard include model for Hospot
Summary: Replaced MakeDeps and the includeDB files with more standardized solutions.
Reviewed-by: coleenp, kvn, kamg
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OOPS_MARKOOP_HPP
26 #define SHARE_VM_OOPS_MARKOOP_HPP
28 #include "oops/oop.hpp"
30 // The markOop describes the header of an object.
31 //
32 // Note that the mark is not a real oop but just a word.
33 // It is placed in the oop hierarchy for historical reasons.
34 //
35 // Bit-format of an object header (most significant first, big endian layout below):
36 //
37 // 32 bits:
38 // --------
39 // hash:25 ------------>| age:4 biased_lock:1 lock:2 (normal object)
40 // JavaThread*:23 epoch:2 age:4 biased_lock:1 lock:2 (biased object)
41 // size:32 ------------------------------------------>| (CMS free block)
42 // PromotedObject*:29 ---------->| promo_bits:3 ----->| (CMS promoted object)
43 //
44 // 64 bits:
45 // --------
46 // unused:25 hash:31 -->| unused:1 age:4 biased_lock:1 lock:2 (normal object)
47 // JavaThread*:54 epoch:2 unused:1 age:4 biased_lock:1 lock:2 (biased object)
48 // PromotedObject*:61 --------------------->| promo_bits:3 ----->| (CMS promoted object)
49 // size:64 ----------------------------------------------------->| (CMS free block)
50 //
51 // unused:25 hash:31 -->| cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && normal object)
52 // JavaThread*:54 epoch:2 cms_free:1 age:4 biased_lock:1 lock:2 (COOPs && biased object)
53 // narrowOop:32 unused:24 cms_free:1 unused:4 promo_bits:3 ----->| (COOPs && CMS promoted object)
54 // unused:21 size:35 -->| cms_free:1 unused:7 ------------------>| (COOPs && CMS free block)
55 //
56 // - hash contains the identity hash value: largest value is
57 // 31 bits, see os::random(). Also, 64-bit vm's require
58 // a hash value no bigger than 32 bits because they will not
59 // properly generate a mask larger than that: see library_call.cpp
60 // and c1_CodePatterns_sparc.cpp.
61 //
62 // - the biased lock pattern is used to bias a lock toward a given
63 // thread. When this pattern is set in the low three bits, the lock
64 // is either biased toward a given thread or "anonymously" biased,
65 // indicating that it is possible for it to be biased. When the
66 // lock is biased toward a given thread, locking and unlocking can
67 // be performed by that thread without using atomic operations.
68 // When a lock's bias is revoked, it reverts back to the normal
69 // locking scheme described below.
70 //
71 // Note that we are overloading the meaning of the "unlocked" state
72 // of the header. Because we steal a bit from the age we can
73 // guarantee that the bias pattern will never be seen for a truly
74 // unlocked object.
75 //
76 // Note also that the biased state contains the age bits normally
77 // contained in the object header. Large increases in scavenge
78 // times were seen when these bits were absent and an arbitrary age
79 // assigned to all biased objects, because they tended to consume a
80 // significant fraction of the eden semispaces and were not
81 // promoted promptly, causing an increase in the amount of copying
82 // performed. The runtime system aligns all JavaThread* pointers to
83 // a very large value (currently 128 bytes (32bVM) or 256 bytes (64bVM))
84 // to make room for the age bits & the epoch bits (used in support of
85 // biased locking), and for the CMS "freeness" bit in the 64bVM (+COOPs).
86 //
87 // [JavaThread* | epoch | age | 1 | 01] lock is biased toward given thread
88 // [0 | epoch | age | 1 | 01] lock is anonymously biased
89 //
90 // - the two lock bits are used to describe three states: locked/unlocked and monitor.
91 //
92 // [ptr | 00] locked ptr points to real header on stack
93 // [header | 0 | 01] unlocked regular object header
94 // [ptr | 10] monitor inflated lock (header is wapped out)
95 // [ptr | 11] marked used by markSweep to mark an object
96 // not valid at any other time
97 //
98 // We assume that stack/thread pointers have the lowest two bits cleared.
100 class BasicLock;
101 class ObjectMonitor;
102 class JavaThread;
104 class markOopDesc: public oopDesc {
105 private:
106 // Conversion
107 uintptr_t value() const { return (uintptr_t) this; }
109 public:
110 // Constants
111 enum { age_bits = 4,
112 lock_bits = 2,
113 biased_lock_bits = 1,
114 max_hash_bits = BitsPerWord - age_bits - lock_bits - biased_lock_bits,
115 hash_bits = max_hash_bits > 31 ? 31 : max_hash_bits,
116 cms_bits = LP64_ONLY(1) NOT_LP64(0),
117 epoch_bits = 2
118 };
120 // The biased locking code currently requires that the age bits be
121 // contiguous to the lock bits. Class data sharing would prefer the
122 // hash bits to be lower down to provide more random hash codes for
123 // shared read-only symbolOop objects, because these objects' mark
124 // words are set to their own address with marked_value in the lock
125 // bit, and using lower bits would make their identity hash values
126 // more random. However, the performance decision was made in favor
127 // of the biased locking code.
129 enum { lock_shift = 0,
130 biased_lock_shift = lock_bits,
131 age_shift = lock_bits + biased_lock_bits,
132 cms_shift = age_shift + age_bits,
133 hash_shift = cms_shift + cms_bits,
134 epoch_shift = hash_shift
135 };
137 enum { lock_mask = right_n_bits(lock_bits),
138 lock_mask_in_place = lock_mask << lock_shift,
139 biased_lock_mask = right_n_bits(lock_bits + biased_lock_bits),
140 biased_lock_mask_in_place= biased_lock_mask << lock_shift,
141 biased_lock_bit_in_place = 1 << biased_lock_shift,
142 age_mask = right_n_bits(age_bits),
143 age_mask_in_place = age_mask << age_shift,
144 epoch_mask = right_n_bits(epoch_bits),
145 epoch_mask_in_place = epoch_mask << epoch_shift,
146 cms_mask = right_n_bits(cms_bits),
147 cms_mask_in_place = cms_mask << cms_shift
148 #ifndef _WIN64
149 ,hash_mask = right_n_bits(hash_bits),
150 hash_mask_in_place = (address_word)hash_mask << hash_shift
151 #endif
152 };
154 // Alignment of JavaThread pointers encoded in object header required by biased locking
155 enum { biased_lock_alignment = 2 << (epoch_shift + epoch_bits)
156 };
158 #ifdef _WIN64
159 // These values are too big for Win64
160 const static uintptr_t hash_mask = right_n_bits(hash_bits);
161 const static uintptr_t hash_mask_in_place =
162 (address_word)hash_mask << hash_shift;
163 #endif
165 enum { locked_value = 0,
166 unlocked_value = 1,
167 monitor_value = 2,
168 marked_value = 3,
169 biased_lock_pattern = 5
170 };
172 enum { no_hash = 0 }; // no hash value assigned
174 enum { no_hash_in_place = (address_word)no_hash << hash_shift,
175 no_lock_in_place = unlocked_value
176 };
178 enum { max_age = age_mask };
180 enum { max_bias_epoch = epoch_mask };
182 // Biased Locking accessors.
183 // These must be checked by all code which calls into the
184 // ObjectSynchronizer and other code. The biasing is not understood
185 // by the lower-level CAS-based locking code, although the runtime
186 // fixes up biased locks to be compatible with it when a bias is
187 // revoked.
188 bool has_bias_pattern() const {
189 return (mask_bits(value(), biased_lock_mask_in_place) == biased_lock_pattern);
190 }
191 JavaThread* biased_locker() const {
192 assert(has_bias_pattern(), "should not call this otherwise");
193 return (JavaThread*) ((intptr_t) (mask_bits(value(), ~(biased_lock_mask_in_place | age_mask_in_place | epoch_mask_in_place))));
194 }
195 // Indicates that the mark has the bias bit set but that it has not
196 // yet been biased toward a particular thread
197 bool is_biased_anonymously() const {
198 return (has_bias_pattern() && (biased_locker() == NULL));
199 }
200 // Indicates epoch in which this bias was acquired. If the epoch
201 // changes due to too many bias revocations occurring, the biases
202 // from the previous epochs are all considered invalid.
203 int bias_epoch() const {
204 assert(has_bias_pattern(), "should not call this otherwise");
205 return (mask_bits(value(), epoch_mask_in_place) >> epoch_shift);
206 }
207 markOop set_bias_epoch(int epoch) {
208 assert(has_bias_pattern(), "should not call this otherwise");
209 assert((epoch & (~epoch_mask)) == 0, "epoch overflow");
210 return markOop(mask_bits(value(), ~epoch_mask_in_place) | (epoch << epoch_shift));
211 }
212 markOop incr_bias_epoch() {
213 return set_bias_epoch((1 + bias_epoch()) & epoch_mask);
214 }
215 // Prototype mark for initialization
216 static markOop biased_locking_prototype() {
217 return markOop( biased_lock_pattern );
218 }
220 // lock accessors (note that these assume lock_shift == 0)
221 bool is_locked() const {
222 return (mask_bits(value(), lock_mask_in_place) != unlocked_value);
223 }
224 bool is_unlocked() const {
225 return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value);
226 }
227 bool is_marked() const {
228 return (mask_bits(value(), lock_mask_in_place) == marked_value);
229 }
230 bool is_neutral() const { return (mask_bits(value(), biased_lock_mask_in_place) == unlocked_value); }
232 // Special temporary state of the markOop while being inflated.
233 // Code that looks at mark outside a lock need to take this into account.
234 bool is_being_inflated() const { return (value() == 0); }
236 // Distinguished markword value - used when inflating over
237 // an existing stacklock. 0 indicates the markword is "BUSY".
238 // Lockword mutators that use a LD...CAS idiom should always
239 // check for and avoid overwriting a 0 value installed by some
240 // other thread. (They should spin or block instead. The 0 value
241 // is transient and *should* be short-lived).
242 static markOop INFLATING() { return (markOop) 0; } // inflate-in-progress
244 // Should this header be preserved during GC?
245 inline bool must_be_preserved(oop obj_containing_mark) const;
246 inline bool must_be_preserved_with_bias(oop obj_containing_mark) const;
248 // Should this header (including its age bits) be preserved in the
249 // case of a promotion failure during scavenge?
250 // Note that we special case this situation. We want to avoid
251 // calling BiasedLocking::preserve_marks()/restore_marks() (which
252 // decrease the number of mark words that need to be preserved
253 // during GC) during each scavenge. During scavenges in which there
254 // is no promotion failure, we actually don't need to call the above
255 // routines at all, since we don't mutate and re-initialize the
256 // marks of promoted objects using init_mark(). However, during
257 // scavenges which result in promotion failure, we do re-initialize
258 // the mark words of objects, meaning that we should have called
259 // these mark word preservation routines. Currently there's no good
260 // place in which to call them in any of the scavengers (although
261 // guarded by appropriate locks we could make one), but the
262 // observation is that promotion failures are quite rare and
263 // reducing the number of mark words preserved during them isn't a
264 // high priority.
265 inline bool must_be_preserved_for_promotion_failure(oop obj_containing_mark) const;
266 inline bool must_be_preserved_with_bias_for_promotion_failure(oop obj_containing_mark) const;
268 // Should this header be preserved during a scavenge where CMS is
269 // the old generation?
270 // (This is basically the same body as must_be_preserved_for_promotion_failure(),
271 // but takes the klassOop as argument instead)
272 inline bool must_be_preserved_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const;
273 inline bool must_be_preserved_with_bias_for_cms_scavenge(klassOop klass_of_obj_containing_mark) const;
275 // WARNING: The following routines are used EXCLUSIVELY by
276 // synchronization functions. They are not really gc safe.
277 // They must get updated if markOop layout get changed.
278 markOop set_unlocked() const {
279 return markOop(value() | unlocked_value);
280 }
281 bool has_locker() const {
282 return ((value() & lock_mask_in_place) == locked_value);
283 }
284 BasicLock* locker() const {
285 assert(has_locker(), "check");
286 return (BasicLock*) value();
287 }
288 bool has_monitor() const {
289 return ((value() & monitor_value) != 0);
290 }
291 ObjectMonitor* monitor() const {
292 assert(has_monitor(), "check");
293 // Use xor instead of &~ to provide one extra tag-bit check.
294 return (ObjectMonitor*) (value() ^ monitor_value);
295 }
296 bool has_displaced_mark_helper() const {
297 return ((value() & unlocked_value) == 0);
298 }
299 markOop displaced_mark_helper() const {
300 assert(has_displaced_mark_helper(), "check");
301 intptr_t ptr = (value() & ~monitor_value);
302 return *(markOop*)ptr;
303 }
304 void set_displaced_mark_helper(markOop m) const {
305 assert(has_displaced_mark_helper(), "check");
306 intptr_t ptr = (value() & ~monitor_value);
307 *(markOop*)ptr = m;
308 }
309 markOop copy_set_hash(intptr_t hash) const {
310 intptr_t tmp = value() & (~hash_mask_in_place);
311 tmp |= ((hash & hash_mask) << hash_shift);
312 return (markOop)tmp;
313 }
314 // it is only used to be stored into BasicLock as the
315 // indicator that the lock is using heavyweight monitor
316 static markOop unused_mark() {
317 return (markOop) marked_value;
318 }
319 // the following two functions create the markOop to be
320 // stored into object header, it encodes monitor info
321 static markOop encode(BasicLock* lock) {
322 return (markOop) lock;
323 }
324 static markOop encode(ObjectMonitor* monitor) {
325 intptr_t tmp = (intptr_t) monitor;
326 return (markOop) (tmp | monitor_value);
327 }
328 static markOop encode(JavaThread* thread, int age, int bias_epoch) {
329 intptr_t tmp = (intptr_t) thread;
330 assert(UseBiasedLocking && ((tmp & (epoch_mask_in_place | age_mask_in_place | biased_lock_mask_in_place)) == 0), "misaligned JavaThread pointer");
331 assert(age <= max_age, "age too large");
332 assert(bias_epoch <= max_bias_epoch, "bias epoch too large");
333 return (markOop) (tmp | (bias_epoch << epoch_shift) | (age << age_shift) | biased_lock_pattern);
334 }
336 // used to encode pointers during GC
337 markOop clear_lock_bits() { return markOop(value() & ~lock_mask_in_place); }
339 // age operations
340 markOop set_marked() { return markOop((value() & ~lock_mask_in_place) | marked_value); }
342 int age() const { return mask_bits(value() >> age_shift, age_mask); }
343 markOop set_age(int v) const {
344 assert((v & ~age_mask) == 0, "shouldn't overflow age field");
345 return markOop((value() & ~age_mask_in_place) | (((intptr_t)v & age_mask) << age_shift));
346 }
347 markOop incr_age() const { return age() == max_age ? markOop(this) : set_age(age() + 1); }
349 // hash operations
350 intptr_t hash() const {
351 return mask_bits(value() >> hash_shift, hash_mask);
352 }
354 bool has_no_hash() const {
355 return hash() == no_hash;
356 }
358 // Prototype mark for initialization
359 static markOop prototype() {
360 return markOop( no_hash_in_place | no_lock_in_place );
361 }
363 // Helper function for restoration of unmarked mark oops during GC
364 static inline markOop prototype_for_object(oop obj);
366 // Debugging
367 void print_on(outputStream* st) const;
369 // Prepare address of oop for placement into mark
370 inline static markOop encode_pointer_as_mark(void* p) { return markOop(p)->set_marked(); }
372 // Recover address of oop from encoded form used in mark
373 inline void* decode_pointer() { if (UseBiasedLocking && has_bias_pattern()) return NULL; return clear_lock_bits(); }
375 // see the definition in markOop.cpp for the gory details
376 bool should_not_be_cached() const;
378 // These markOops indicate cms free chunk blocks and not objects.
379 // In 64 bit, the markOop is set to distinguish them from oops.
380 // These are defined in 32 bit mode for vmStructs.
381 const static uintptr_t cms_free_chunk_pattern = 0x1;
383 // Constants for the size field.
384 enum { size_shift = cms_shift + cms_bits,
385 size_bits = 35 // need for compressed oops 32G
386 };
387 // These values are too big for Win64
388 const static uintptr_t size_mask = LP64_ONLY(right_n_bits(size_bits))
389 NOT_LP64(0);
390 const static uintptr_t size_mask_in_place =
391 (address_word)size_mask << size_shift;
393 #ifdef _LP64
394 static markOop cms_free_prototype() {
395 return markOop(((intptr_t)prototype() & ~cms_mask_in_place) |
396 ((cms_free_chunk_pattern & cms_mask) << cms_shift));
397 }
398 uintptr_t cms_encoding() const {
399 return mask_bits(value() >> cms_shift, cms_mask);
400 }
401 bool is_cms_free_chunk() const {
402 return is_neutral() &&
403 (cms_encoding() & cms_free_chunk_pattern) == cms_free_chunk_pattern;
404 }
406 size_t get_size() const { return (size_t)(value() >> size_shift); }
407 static markOop set_size_and_free(size_t size) {
408 assert((size & ~size_mask) == 0, "shouldn't overflow size field");
409 return markOop(((intptr_t)cms_free_prototype() & ~size_mask_in_place) |
410 (((intptr_t)size & size_mask) << size_shift));
411 }
412 #endif // _LP64
413 };
415 #endif // SHARE_VM_OOPS_MARKOOP_HPP