Tue, 27 Nov 2012 07:57:57 -0800
8003879: Duplicate definitions in vmStructs
Summary: Removed duplicate entries
Reviewed-by: dholmes, sspitsyn
1 /*
2 * Copyright (c) 2010, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_PROMOTIONINFO_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_PROMOTIONINFO_HPP
28 #include "gc_implementation/concurrentMarkSweep/freeChunk.hpp"
29 #include "memory/allocation.hpp"
31 // Forward declarations
32 class CompactibleFreeListSpace;
34 class PromotedObject VALUE_OBJ_CLASS_SPEC {
35 private:
36 enum {
37 promoted_mask = right_n_bits(2), // i.e. 0x3
38 displaced_mark = nth_bit(2), // i.e. 0x4
39 next_mask = ~(right_n_bits(3)) // i.e. ~(0x7)
40 };
42 // Below, we want _narrow_next in the "higher" 32 bit slot,
43 // whose position will depend on endian-ness of the platform.
44 // This is so that there is no interference with the
45 // cms_free_bit occupying bit position 7 (lsb == 0)
46 // when we are using compressed oops; see FreeChunk::is_free().
47 // We cannot move the cms_free_bit down because currently
48 // biased locking code assumes that age bits are contiguous
49 // with the lock bits. Even if that assumption were relaxed,
50 // the least position we could move this bit to would be
51 // to bit position 3, which would require 16 byte alignment.
52 typedef struct {
53 #ifdef VM_LITTLE_ENDIAN
54 LP64_ONLY(narrowOop _pad;)
55 narrowOop _narrow_next;
56 #else
57 narrowOop _narrow_next;
58 LP64_ONLY(narrowOop _pad;)
59 #endif
60 } Data;
62 union {
63 intptr_t _next;
64 Data _data;
65 };
66 public:
67 inline PromotedObject* next() const {
68 assert(!((FreeChunk*)this)->is_free(), "Error");
69 PromotedObject* res;
70 if (UseCompressedOops) {
71 // The next pointer is a compressed oop stored in the top 32 bits
72 res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next);
73 } else {
74 res = (PromotedObject*)(_next & next_mask);
75 }
76 assert(oop(res)->is_oop_or_null(true /* ignore mark word */), "Not an oop?");
77 return res;
78 }
79 inline void setNext(PromotedObject* x) {
80 assert(((intptr_t)x & ~next_mask) == 0, "Conflict in bit usage, "
81 "or insufficient alignment of objects");
82 if (UseCompressedOops) {
83 assert(_data._narrow_next == 0, "Overwrite?");
84 _data._narrow_next = oopDesc::encode_heap_oop(oop(x));
85 } else {
86 _next |= (intptr_t)x;
87 }
88 assert(!((FreeChunk*)this)->is_free(), "Error");
89 }
90 inline void setPromotedMark() {
91 _next |= promoted_mask;
92 assert(!((FreeChunk*)this)->is_free(), "Error");
93 }
94 inline bool hasPromotedMark() const {
95 assert(!((FreeChunk*)this)->is_free(), "Error");
96 return (_next & promoted_mask) == promoted_mask;
97 }
98 inline void setDisplacedMark() {
99 _next |= displaced_mark;
100 assert(!((FreeChunk*)this)->is_free(), "Error");
101 }
102 inline bool hasDisplacedMark() const {
103 assert(!((FreeChunk*)this)->is_free(), "Error");
104 return (_next & displaced_mark) != 0;
105 }
106 inline void clear_next() {
107 _next = 0;
108 assert(!((FreeChunk*)this)->is_free(), "Error");
109 }
110 debug_only(void *next_addr() { return (void *) &_next; })
111 };
113 class SpoolBlock: public FreeChunk {
114 friend class PromotionInfo;
115 protected:
116 SpoolBlock* nextSpoolBlock;
117 size_t bufferSize; // number of usable words in this block
118 markOop* displacedHdr; // the displaced headers start here
120 // Note about bufferSize: it denotes the number of entries available plus 1;
121 // legal indices range from 1 through BufferSize - 1. See the verification
122 // code verify() that counts the number of displaced headers spooled.
123 size_t computeBufferSize() {
124 return (size() * sizeof(HeapWord) - sizeof(*this)) / sizeof(markOop);
125 }
127 public:
128 void init() {
129 bufferSize = computeBufferSize();
130 displacedHdr = (markOop*)&displacedHdr;
131 nextSpoolBlock = NULL;
132 }
134 void print_on(outputStream* st) const;
135 void print() const { print_on(gclog_or_tty); }
136 };
138 class PromotionInfo VALUE_OBJ_CLASS_SPEC {
139 bool _tracking; // set if tracking
140 CompactibleFreeListSpace* _space; // the space to which this belongs
141 PromotedObject* _promoHead; // head of list of promoted objects
142 PromotedObject* _promoTail; // tail of list of promoted objects
143 SpoolBlock* _spoolHead; // first spooling block
144 SpoolBlock* _spoolTail; // last non-full spooling block or null
145 SpoolBlock* _splice_point; // when _spoolTail is null, holds list tail
146 SpoolBlock* _spareSpool; // free spool buffer
147 size_t _firstIndex; // first active index in
148 // first spooling block (_spoolHead)
149 size_t _nextIndex; // last active index + 1 in last
150 // spooling block (_spoolTail)
151 private:
152 // ensure that spooling space exists; return true if there is spooling space
153 bool ensure_spooling_space_work();
155 public:
156 PromotionInfo() :
157 _tracking(0), _space(NULL),
158 _promoHead(NULL), _promoTail(NULL),
159 _spoolHead(NULL), _spoolTail(NULL),
160 _spareSpool(NULL), _firstIndex(1),
161 _nextIndex(1) {}
163 bool noPromotions() const {
164 assert(_promoHead != NULL || _promoTail == NULL, "list inconsistency");
165 return _promoHead == NULL;
166 }
167 void startTrackingPromotions();
168 void stopTrackingPromotions(uint worker_id = 0);
169 bool tracking() const { return _tracking; }
170 void track(PromotedObject* trackOop); // keep track of a promoted oop
171 // The following variant must be used when trackOop is not fully
172 // initialized and has a NULL klass:
173 void track(PromotedObject* trackOop, Klass* klassOfOop); // keep track of a promoted oop
174 void setSpace(CompactibleFreeListSpace* sp) { _space = sp; }
175 CompactibleFreeListSpace* space() const { return _space; }
176 markOop nextDisplacedHeader(); // get next header & forward spool pointer
177 void saveDisplacedHeader(markOop hdr);
178 // save header and forward spool
180 inline size_t refillSize() const;
182 SpoolBlock* getSpoolBlock(); // return a free spooling block
183 inline bool has_spooling_space() {
184 return _spoolTail != NULL && _spoolTail->bufferSize > _nextIndex;
185 }
186 // ensure that spooling space exists
187 bool ensure_spooling_space() {
188 return has_spooling_space() || ensure_spooling_space_work();
189 }
190 #define PROMOTED_OOPS_ITERATE_DECL(OopClosureType, nv_suffix) \
191 void promoted_oops_iterate##nv_suffix(OopClosureType* cl);
192 ALL_SINCE_SAVE_MARKS_CLOSURES(PROMOTED_OOPS_ITERATE_DECL)
193 #undef PROMOTED_OOPS_ITERATE_DECL
194 void promoted_oops_iterate(OopsInGenClosure* cl) {
195 promoted_oops_iterate_v(cl);
196 }
197 void verify() const;
198 void reset() {
199 _promoHead = NULL;
200 _promoTail = NULL;
201 _spoolHead = NULL;
202 _spoolTail = NULL;
203 _spareSpool = NULL;
204 _firstIndex = 0;
205 _nextIndex = 0;
207 }
209 void print_on(outputStream* st) const;
210 void print_statistics(uint worker_id) const;
211 };
214 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_PROMOTIONINFO_HPP