Tue, 27 Nov 2012 07:57:57 -0800
8003879: Duplicate definitions in vmStructs
Summary: Removed duplicate entries
Reviewed-by: dholmes, sspitsyn
1 /*
2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP
28 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
29 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
30 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp"
31 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
32 #include "gc_implementation/shared/gcUtil.hpp"
33 #include "memory/defNewGeneration.hpp"
35 inline void CMSBitMap::clear_all() {
36 assert_locked();
37 // CMS bitmaps are usually cover large memory regions
38 _bm.clear_large();
39 return;
40 }
42 inline size_t CMSBitMap::heapWordToOffset(HeapWord* addr) const {
43 return (pointer_delta(addr, _bmStartWord)) >> _shifter;
44 }
46 inline HeapWord* CMSBitMap::offsetToHeapWord(size_t offset) const {
47 return _bmStartWord + (offset << _shifter);
48 }
50 inline size_t CMSBitMap::heapWordDiffToOffsetDiff(size_t diff) const {
51 assert((diff & ((1 << _shifter) - 1)) == 0, "argument check");
52 return diff >> _shifter;
53 }
55 inline void CMSBitMap::mark(HeapWord* addr) {
56 assert_locked();
57 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
58 "outside underlying space?");
59 _bm.set_bit(heapWordToOffset(addr));
60 }
62 inline bool CMSBitMap::par_mark(HeapWord* addr) {
63 assert_locked();
64 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
65 "outside underlying space?");
66 return _bm.par_at_put(heapWordToOffset(addr), true);
67 }
69 inline void CMSBitMap::par_clear(HeapWord* addr) {
70 assert_locked();
71 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
72 "outside underlying space?");
73 _bm.par_at_put(heapWordToOffset(addr), false);
74 }
76 inline void CMSBitMap::mark_range(MemRegion mr) {
77 NOT_PRODUCT(region_invariant(mr));
78 // Range size is usually just 1 bit.
79 _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
80 BitMap::small_range);
81 }
83 inline void CMSBitMap::clear_range(MemRegion mr) {
84 NOT_PRODUCT(region_invariant(mr));
85 // Range size is usually just 1 bit.
86 _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
87 BitMap::small_range);
88 }
90 inline void CMSBitMap::par_mark_range(MemRegion mr) {
91 NOT_PRODUCT(region_invariant(mr));
92 // Range size is usually just 1 bit.
93 _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
94 BitMap::small_range);
95 }
97 inline void CMSBitMap::par_clear_range(MemRegion mr) {
98 NOT_PRODUCT(region_invariant(mr));
99 // Range size is usually just 1 bit.
100 _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
101 BitMap::small_range);
102 }
104 inline void CMSBitMap::mark_large_range(MemRegion mr) {
105 NOT_PRODUCT(region_invariant(mr));
106 // Range size must be greater than 32 bytes.
107 _bm.set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
108 BitMap::large_range);
109 }
111 inline void CMSBitMap::clear_large_range(MemRegion mr) {
112 NOT_PRODUCT(region_invariant(mr));
113 // Range size must be greater than 32 bytes.
114 _bm.clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
115 BitMap::large_range);
116 }
118 inline void CMSBitMap::par_mark_large_range(MemRegion mr) {
119 NOT_PRODUCT(region_invariant(mr));
120 // Range size must be greater than 32 bytes.
121 _bm.par_set_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
122 BitMap::large_range);
123 }
125 inline void CMSBitMap::par_clear_large_range(MemRegion mr) {
126 NOT_PRODUCT(region_invariant(mr));
127 // Range size must be greater than 32 bytes.
128 _bm.par_clear_range(heapWordToOffset(mr.start()), heapWordToOffset(mr.end()),
129 BitMap::large_range);
130 }
132 // Starting at "addr" (inclusive) return a memory region
133 // corresponding to the first maximally contiguous marked ("1") region.
134 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* addr) {
135 return getAndClearMarkedRegion(addr, endWord());
136 }
138 // Starting at "start_addr" (inclusive) return a memory region
139 // corresponding to the first maximal contiguous marked ("1") region
140 // strictly less than end_addr.
141 inline MemRegion CMSBitMap::getAndClearMarkedRegion(HeapWord* start_addr,
142 HeapWord* end_addr) {
143 HeapWord *start, *end;
144 assert_locked();
145 start = getNextMarkedWordAddress (start_addr, end_addr);
146 end = getNextUnmarkedWordAddress(start, end_addr);
147 assert(start <= end, "Consistency check");
148 MemRegion mr(start, end);
149 if (!mr.is_empty()) {
150 clear_range(mr);
151 }
152 return mr;
153 }
155 inline bool CMSBitMap::isMarked(HeapWord* addr) const {
156 assert_locked();
157 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
158 "outside underlying space?");
159 return _bm.at(heapWordToOffset(addr));
160 }
162 // The same as isMarked() but without a lock check.
163 inline bool CMSBitMap::par_isMarked(HeapWord* addr) const {
164 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
165 "outside underlying space?");
166 return _bm.at(heapWordToOffset(addr));
167 }
170 inline bool CMSBitMap::isUnmarked(HeapWord* addr) const {
171 assert_locked();
172 assert(_bmStartWord <= addr && addr < (_bmStartWord + _bmWordSize),
173 "outside underlying space?");
174 return !_bm.at(heapWordToOffset(addr));
175 }
177 // Return the HeapWord address corresponding to next "1" bit
178 // (inclusive).
179 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(HeapWord* addr) const {
180 return getNextMarkedWordAddress(addr, endWord());
181 }
183 // Return the least HeapWord address corresponding to next "1" bit
184 // starting at start_addr (inclusive) but strictly less than end_addr.
185 inline HeapWord* CMSBitMap::getNextMarkedWordAddress(
186 HeapWord* start_addr, HeapWord* end_addr) const {
187 assert_locked();
188 size_t nextOffset = _bm.get_next_one_offset(
189 heapWordToOffset(start_addr),
190 heapWordToOffset(end_addr));
191 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
192 assert(nextAddr >= start_addr &&
193 nextAddr <= end_addr, "get_next_one postcondition");
194 assert((nextAddr == end_addr) ||
195 isMarked(nextAddr), "get_next_one postcondition");
196 return nextAddr;
197 }
200 // Return the HeapWord address corrsponding to the next "0" bit
201 // (inclusive).
202 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(HeapWord* addr) const {
203 return getNextUnmarkedWordAddress(addr, endWord());
204 }
206 // Return the HeapWord address corrsponding to the next "0" bit
207 // (inclusive).
208 inline HeapWord* CMSBitMap::getNextUnmarkedWordAddress(
209 HeapWord* start_addr, HeapWord* end_addr) const {
210 assert_locked();
211 size_t nextOffset = _bm.get_next_zero_offset(
212 heapWordToOffset(start_addr),
213 heapWordToOffset(end_addr));
214 HeapWord* nextAddr = offsetToHeapWord(nextOffset);
215 assert(nextAddr >= start_addr &&
216 nextAddr <= end_addr, "get_next_zero postcondition");
217 assert((nextAddr == end_addr) ||
218 isUnmarked(nextAddr), "get_next_zero postcondition");
219 return nextAddr;
220 }
222 inline bool CMSBitMap::isAllClear() const {
223 assert_locked();
224 return getNextMarkedWordAddress(startWord()) >= endWord();
225 }
227 inline void CMSBitMap::iterate(BitMapClosure* cl, HeapWord* left,
228 HeapWord* right) {
229 assert_locked();
230 left = MAX2(_bmStartWord, left);
231 right = MIN2(_bmStartWord + _bmWordSize, right);
232 if (right > left) {
233 _bm.iterate(cl, heapWordToOffset(left), heapWordToOffset(right));
234 }
235 }
237 inline void CMSCollector::start_icms() {
238 if (CMSIncrementalMode) {
239 ConcurrentMarkSweepThread::start_icms();
240 }
241 }
243 inline void CMSCollector::stop_icms() {
244 if (CMSIncrementalMode) {
245 ConcurrentMarkSweepThread::stop_icms();
246 }
247 }
249 inline void CMSCollector::disable_icms() {
250 if (CMSIncrementalMode) {
251 ConcurrentMarkSweepThread::disable_icms();
252 }
253 }
255 inline void CMSCollector::enable_icms() {
256 if (CMSIncrementalMode) {
257 ConcurrentMarkSweepThread::enable_icms();
258 }
259 }
261 inline void CMSCollector::icms_wait() {
262 if (CMSIncrementalMode) {
263 cmsThread()->icms_wait();
264 }
265 }
267 inline void CMSCollector::save_sweep_limits() {
268 _cmsGen->save_sweep_limit();
269 }
271 inline bool CMSCollector::is_dead_obj(oop obj) const {
272 HeapWord* addr = (HeapWord*)obj;
273 assert((_cmsGen->cmsSpace()->is_in_reserved(addr)
274 && _cmsGen->cmsSpace()->block_is_obj(addr)),
275 "must be object");
276 return should_unload_classes() &&
277 _collectorState == Sweeping &&
278 !_markBitMap.isMarked(addr);
279 }
281 inline bool CMSCollector::should_abort_preclean() const {
282 // We are in the midst of an "abortable preclean" and either
283 // scavenge is done or foreground GC wants to take over collection
284 return _collectorState == AbortablePreclean &&
285 (_abort_preclean || _foregroundGCIsActive ||
286 GenCollectedHeap::heap()->incremental_collection_will_fail(true /* consult_young */));
287 }
289 inline size_t CMSCollector::get_eden_used() const {
290 return _young_gen->as_DefNewGeneration()->eden()->used();
291 }
293 inline size_t CMSCollector::get_eden_capacity() const {
294 return _young_gen->as_DefNewGeneration()->eden()->capacity();
295 }
297 inline bool CMSStats::valid() const {
298 return _valid_bits == _ALL_VALID;
299 }
301 inline void CMSStats::record_gc0_begin() {
302 if (_gc0_begin_time.is_updated()) {
303 float last_gc0_period = _gc0_begin_time.seconds();
304 _gc0_period = AdaptiveWeightedAverage::exp_avg(_gc0_period,
305 last_gc0_period, _gc0_alpha);
306 _gc0_alpha = _saved_alpha;
307 _valid_bits |= _GC0_VALID;
308 }
309 _cms_used_at_gc0_begin = _cms_gen->cmsSpace()->used();
311 _gc0_begin_time.update();
312 }
314 inline void CMSStats::record_gc0_end(size_t cms_gen_bytes_used) {
315 float last_gc0_duration = _gc0_begin_time.seconds();
316 _gc0_duration = AdaptiveWeightedAverage::exp_avg(_gc0_duration,
317 last_gc0_duration, _gc0_alpha);
319 // Amount promoted.
320 _cms_used_at_gc0_end = cms_gen_bytes_used;
322 size_t promoted_bytes = 0;
323 if (_cms_used_at_gc0_end >= _cms_used_at_gc0_begin) {
324 promoted_bytes = _cms_used_at_gc0_end - _cms_used_at_gc0_begin;
325 }
327 // If the younger gen collections were skipped, then the
328 // number of promoted bytes will be 0 and adding it to the
329 // average will incorrectly lessen the average. It is, however,
330 // also possible that no promotion was needed.
331 //
332 // _gc0_promoted used to be calculated as
333 // _gc0_promoted = AdaptiveWeightedAverage::exp_avg(_gc0_promoted,
334 // promoted_bytes, _gc0_alpha);
335 _cms_gen->gc_stats()->avg_promoted()->sample(promoted_bytes);
336 _gc0_promoted = (size_t) _cms_gen->gc_stats()->avg_promoted()->average();
338 // Amount directly allocated.
339 size_t allocated_bytes = _cms_gen->direct_allocated_words() * HeapWordSize;
340 _cms_gen->reset_direct_allocated_words();
341 _cms_allocated = AdaptiveWeightedAverage::exp_avg(_cms_allocated,
342 allocated_bytes, _gc0_alpha);
343 }
345 inline void CMSStats::record_cms_begin() {
346 _cms_timer.stop();
348 // This is just an approximate value, but is good enough.
349 _cms_used_at_cms_begin = _cms_used_at_gc0_end;
351 _cms_period = AdaptiveWeightedAverage::exp_avg((float)_cms_period,
352 (float) _cms_timer.seconds(), _cms_alpha);
353 _cms_begin_time.update();
355 _cms_timer.reset();
356 _cms_timer.start();
357 }
359 inline void CMSStats::record_cms_end() {
360 _cms_timer.stop();
362 float cur_duration = _cms_timer.seconds();
363 _cms_duration = AdaptiveWeightedAverage::exp_avg(_cms_duration,
364 cur_duration, _cms_alpha);
366 // Avoid division by 0.
367 const size_t cms_used_mb = MAX2(_cms_used_at_cms_begin / M, (size_t)1);
368 _cms_duration_per_mb = AdaptiveWeightedAverage::exp_avg(_cms_duration_per_mb,
369 cur_duration / cms_used_mb,
370 _cms_alpha);
372 _cms_end_time.update();
373 _cms_alpha = _saved_alpha;
374 _allow_duty_cycle_reduction = true;
375 _valid_bits |= _CMS_VALID;
377 _cms_timer.start();
378 }
380 inline double CMSStats::cms_time_since_begin() const {
381 return _cms_begin_time.seconds();
382 }
384 inline double CMSStats::cms_time_since_end() const {
385 return _cms_end_time.seconds();
386 }
388 inline double CMSStats::promotion_rate() const {
389 assert(valid(), "statistics not valid yet");
390 return gc0_promoted() / gc0_period();
391 }
393 inline double CMSStats::cms_allocation_rate() const {
394 assert(valid(), "statistics not valid yet");
395 return cms_allocated() / gc0_period();
396 }
398 inline double CMSStats::cms_consumption_rate() const {
399 assert(valid(), "statistics not valid yet");
400 return (gc0_promoted() + cms_allocated()) / gc0_period();
401 }
403 inline unsigned int CMSStats::icms_update_duty_cycle() {
404 // Update the duty cycle only if pacing is enabled and the stats are valid
405 // (after at least one young gen gc and one cms cycle have completed).
406 if (CMSIncrementalPacing && valid()) {
407 return icms_update_duty_cycle_impl();
408 }
409 return _icms_duty_cycle;
410 }
412 inline void ConcurrentMarkSweepGeneration::save_sweep_limit() {
413 cmsSpace()->save_sweep_limit();
414 }
416 inline size_t ConcurrentMarkSweepGeneration::capacity() const {
417 return _cmsSpace->capacity();
418 }
420 inline size_t ConcurrentMarkSweepGeneration::used() const {
421 return _cmsSpace->used();
422 }
424 inline size_t ConcurrentMarkSweepGeneration::free() const {
425 return _cmsSpace->free();
426 }
428 inline MemRegion ConcurrentMarkSweepGeneration::used_region() const {
429 return _cmsSpace->used_region();
430 }
432 inline MemRegion ConcurrentMarkSweepGeneration::used_region_at_save_marks() const {
433 return _cmsSpace->used_region_at_save_marks();
434 }
436 inline void MarkFromRootsClosure::do_yield_check() {
437 if (ConcurrentMarkSweepThread::should_yield() &&
438 !_collector->foregroundGCIsActive() &&
439 _yield) {
440 do_yield_work();
441 }
442 }
444 inline void Par_MarkFromRootsClosure::do_yield_check() {
445 if (ConcurrentMarkSweepThread::should_yield() &&
446 !_collector->foregroundGCIsActive() &&
447 _yield) {
448 do_yield_work();
449 }
450 }
452 inline void PushOrMarkClosure::do_yield_check() {
453 _parent->do_yield_check();
454 }
456 inline void Par_PushOrMarkClosure::do_yield_check() {
457 _parent->do_yield_check();
458 }
460 // Return value of "true" indicates that the on-going preclean
461 // should be aborted.
462 inline bool ScanMarkedObjectsAgainCarefullyClosure::do_yield_check() {
463 if (ConcurrentMarkSweepThread::should_yield() &&
464 !_collector->foregroundGCIsActive() &&
465 _yield) {
466 // Sample young gen size before and after yield
467 _collector->sample_eden();
468 do_yield_work();
469 _collector->sample_eden();
470 return _collector->should_abort_preclean();
471 }
472 return false;
473 }
475 inline void SurvivorSpacePrecleanClosure::do_yield_check() {
476 if (ConcurrentMarkSweepThread::should_yield() &&
477 !_collector->foregroundGCIsActive() &&
478 _yield) {
479 // Sample young gen size before and after yield
480 _collector->sample_eden();
481 do_yield_work();
482 _collector->sample_eden();
483 }
484 }
486 inline void SweepClosure::do_yield_check(HeapWord* addr) {
487 if (ConcurrentMarkSweepThread::should_yield() &&
488 !_collector->foregroundGCIsActive() &&
489 _yield) {
490 do_yield_work(addr);
491 }
492 }
494 inline void MarkRefsIntoAndScanClosure::do_yield_check() {
495 // The conditions are ordered for the remarking phase
496 // when _yield is false.
497 if (_yield &&
498 !_collector->foregroundGCIsActive() &&
499 ConcurrentMarkSweepThread::should_yield()) {
500 do_yield_work();
501 }
502 }
505 inline void ModUnionClosure::do_MemRegion(MemRegion mr) {
506 // Align the end of mr so it's at a card boundary.
507 // This is superfluous except at the end of the space;
508 // we should do better than this XXX
509 MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
510 CardTableModRefBS::card_size /* bytes */));
511 _t->mark_range(mr2);
512 }
514 inline void ModUnionClosurePar::do_MemRegion(MemRegion mr) {
515 // Align the end of mr so it's at a card boundary.
516 // This is superfluous except at the end of the space;
517 // we should do better than this XXX
518 MemRegion mr2(mr.start(), (HeapWord*)round_to((intptr_t)mr.end(),
519 CardTableModRefBS::card_size /* bytes */));
520 _t->par_mark_range(mr2);
521 }
523 #endif // SHARE_VM_GC_IMPLEMENTATION_CONCURRENTMARKSWEEP_CONCURRENTMARKSWEEPGENERATION_INLINE_HPP