Tue, 14 Jan 2014 16:40:33 +0100
8032379: Remove the is_scavenging flag to process_strong_roots
Summary: Refactor the strong root processing to avoid using a boolean in addition to the ScanOption enum.
Reviewed-by: stefank, tschatzl, ehelin, jmasa
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "gc_implementation/shared/liveRange.hpp"
29 #include "gc_implementation/shared/markSweep.hpp"
30 #include "gc_implementation/shared/spaceDecorator.hpp"
31 #include "memory/blockOffsetTable.inline.hpp"
32 #include "memory/defNewGeneration.hpp"
33 #include "memory/genCollectedHeap.hpp"
34 #include "memory/space.hpp"
35 #include "memory/space.inline.hpp"
36 #include "memory/universe.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "oops/oop.inline2.hpp"
39 #include "runtime/java.hpp"
40 #include "runtime/prefetch.inline.hpp"
41 #include "runtime/orderAccess.inline.hpp"
42 #include "runtime/safepoint.hpp"
43 #include "utilities/copy.hpp"
44 #include "utilities/globalDefinitions.hpp"
45 #include "utilities/macros.hpp"
47 void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
48 void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
50 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
52 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
53 HeapWord* top_obj) {
54 if (top_obj != NULL) {
55 if (_sp->block_is_obj(top_obj)) {
56 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
57 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
58 // An arrayOop is starting on the dirty card - since we do exact
59 // store checks for objArrays we are done.
60 } else {
61 // Otherwise, it is possible that the object starting on the dirty
62 // card spans the entire card, and that the store happened on a
63 // later card. Figure out where the object ends.
64 // Use the block_size() method of the space over which
65 // the iteration is being done. That space (e.g. CMS) may have
66 // specific requirements on object sizes which will
67 // be reflected in the block_size() method.
68 top = top_obj + oop(top_obj)->size();
69 }
70 }
71 } else {
72 top = top_obj;
73 }
74 } else {
75 assert(top == _sp->end(), "only case where top_obj == NULL");
76 }
77 return top;
78 }
80 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
81 HeapWord* bottom,
82 HeapWord* top) {
83 // 1. Blocks may or may not be objects.
84 // 2. Even when a block_is_obj(), it may not entirely
85 // occupy the block if the block quantum is larger than
86 // the object size.
87 // We can and should try to optimize by calling the non-MemRegion
88 // version of oop_iterate() for all but the extremal objects
89 // (for which we need to call the MemRegion version of
90 // oop_iterate()) To be done post-beta XXX
91 for (; bottom < top; bottom += _sp->block_size(bottom)) {
92 // As in the case of contiguous space above, we'd like to
93 // just use the value returned by oop_iterate to increment the
94 // current pointer; unfortunately, that won't work in CMS because
95 // we'd need an interface change (it seems) to have the space
96 // "adjust the object size" (for instance pad it up to its
97 // block alignment or minimum block size restrictions. XXX
98 if (_sp->block_is_obj(bottom) &&
99 !_sp->obj_allocated_since_save_marks(oop(bottom))) {
100 oop(bottom)->oop_iterate(_cl, mr);
101 }
102 }
103 }
105 // We get called with "mr" representing the dirty region
106 // that we want to process. Because of imprecise marking,
107 // we may need to extend the incoming "mr" to the right,
108 // and scan more. However, because we may already have
109 // scanned some of that extended region, we may need to
110 // trim its right-end back some so we do not scan what
111 // we (or another worker thread) may already have scanned
112 // or planning to scan.
113 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
115 // Some collectors need to do special things whenever their dirty
116 // cards are processed. For instance, CMS must remember mutator updates
117 // (i.e. dirty cards) so as to re-scan mutated objects.
118 // Such work can be piggy-backed here on dirty card scanning, so as to make
119 // it slightly more efficient than doing a complete non-detructive pre-scan
120 // of the card table.
121 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
122 if (pCl != NULL) {
123 pCl->do_MemRegion(mr);
124 }
126 HeapWord* bottom = mr.start();
127 HeapWord* last = mr.last();
128 HeapWord* top = mr.end();
129 HeapWord* bottom_obj;
130 HeapWord* top_obj;
132 assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
133 _precision == CardTableModRefBS::Precise,
134 "Only ones we deal with for now.");
136 assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
137 _cl->idempotent() || _last_bottom == NULL ||
138 top <= _last_bottom,
139 "Not decreasing");
140 NOT_PRODUCT(_last_bottom = mr.start());
142 bottom_obj = _sp->block_start(bottom);
143 top_obj = _sp->block_start(last);
145 assert(bottom_obj <= bottom, "just checking");
146 assert(top_obj <= top, "just checking");
148 // Given what we think is the top of the memory region and
149 // the start of the object at the top, get the actual
150 // value of the top.
151 top = get_actual_top(top, top_obj);
153 // If the previous call did some part of this region, don't redo.
154 if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
155 _min_done != NULL &&
156 _min_done < top) {
157 top = _min_done;
158 }
160 // Top may have been reset, and in fact may be below bottom,
161 // e.g. the dirty card region is entirely in a now free object
162 // -- something that could happen with a concurrent sweeper.
163 bottom = MIN2(bottom, top);
164 MemRegion extended_mr = MemRegion(bottom, top);
165 assert(bottom <= top &&
166 (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
167 _min_done == NULL ||
168 top <= _min_done),
169 "overlap!");
171 // Walk the region if it is not empty; otherwise there is nothing to do.
172 if (!extended_mr.is_empty()) {
173 walk_mem_region(extended_mr, bottom_obj, top);
174 }
176 // An idempotent closure might be applied in any order, so we don't
177 // record a _min_done for it.
178 if (!_cl->idempotent()) {
179 _min_done = bottom;
180 } else {
181 assert(_min_done == _last_explicit_min_done,
182 "Don't update _min_done for idempotent cl");
183 }
184 }
186 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
187 CardTableModRefBS::PrecisionStyle precision,
188 HeapWord* boundary) {
189 return new DirtyCardToOopClosure(this, cl, precision, boundary);
190 }
192 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
193 HeapWord* top_obj) {
194 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
195 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
196 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
197 // An arrayOop is starting on the dirty card - since we do exact
198 // store checks for objArrays we are done.
199 } else {
200 // Otherwise, it is possible that the object starting on the dirty
201 // card spans the entire card, and that the store happened on a
202 // later card. Figure out where the object ends.
203 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
204 "Block size and object size mismatch");
205 top = top_obj + oop(top_obj)->size();
206 }
207 }
208 } else {
209 top = (_sp->toContiguousSpace())->top();
210 }
211 return top;
212 }
214 void Filtering_DCTOC::walk_mem_region(MemRegion mr,
215 HeapWord* bottom,
216 HeapWord* top) {
217 // Note that this assumption won't hold if we have a concurrent
218 // collector in this space, which may have freed up objects after
219 // they were dirtied and before the stop-the-world GC that is
220 // examining cards here.
221 assert(bottom < top, "ought to be at least one obj on a dirty card.");
223 if (_boundary != NULL) {
224 // We have a boundary outside of which we don't want to look
225 // at objects, so create a filtering closure around the
226 // oop closure before walking the region.
227 FilteringClosure filter(_boundary, _cl);
228 walk_mem_region_with_cl(mr, bottom, top, &filter);
229 } else {
230 // No boundary, simply walk the heap with the oop closure.
231 walk_mem_region_with_cl(mr, bottom, top, _cl);
232 }
234 }
236 // We must replicate this so that the static type of "FilteringClosure"
237 // (see above) is apparent at the oop_iterate calls.
238 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
239 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \
240 HeapWord* bottom, \
241 HeapWord* top, \
242 ClosureType* cl) { \
243 bottom += oop(bottom)->oop_iterate(cl, mr); \
244 if (bottom < top) { \
245 HeapWord* next_obj = bottom + oop(bottom)->size(); \
246 while (next_obj < top) { \
247 /* Bottom lies entirely below top, so we can call the */ \
248 /* non-memRegion version of oop_iterate below. */ \
249 oop(bottom)->oop_iterate(cl); \
250 bottom = next_obj; \
251 next_obj = bottom + oop(bottom)->size(); \
252 } \
253 /* Last object. */ \
254 oop(bottom)->oop_iterate(cl, mr); \
255 } \
256 }
258 // (There are only two of these, rather than N, because the split is due
259 // only to the introduction of the FilteringClosure, a local part of the
260 // impl of this abstraction.)
261 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
262 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
264 DirtyCardToOopClosure*
265 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
266 CardTableModRefBS::PrecisionStyle precision,
267 HeapWord* boundary) {
268 return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
269 }
271 void Space::initialize(MemRegion mr,
272 bool clear_space,
273 bool mangle_space) {
274 HeapWord* bottom = mr.start();
275 HeapWord* end = mr.end();
276 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
277 "invalid space boundaries");
278 set_bottom(bottom);
279 set_end(end);
280 if (clear_space) clear(mangle_space);
281 }
283 void Space::clear(bool mangle_space) {
284 if (ZapUnusedHeapArea && mangle_space) {
285 mangle_unused_area();
286 }
287 }
289 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL),
290 _concurrent_iteration_safe_limit(NULL) {
291 _mangler = new GenSpaceMangler(this);
292 }
294 ContiguousSpace::~ContiguousSpace() {
295 delete _mangler;
296 }
298 void ContiguousSpace::initialize(MemRegion mr,
299 bool clear_space,
300 bool mangle_space)
301 {
302 CompactibleSpace::initialize(mr, clear_space, mangle_space);
303 set_concurrent_iteration_safe_limit(top());
304 }
306 void ContiguousSpace::clear(bool mangle_space) {
307 set_top(bottom());
308 set_saved_mark();
309 CompactibleSpace::clear(mangle_space);
310 }
312 bool ContiguousSpace::is_in(const void* p) const {
313 return _bottom <= p && p < _top;
314 }
316 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
317 return p >= _top;
318 }
320 void OffsetTableContigSpace::clear(bool mangle_space) {
321 ContiguousSpace::clear(mangle_space);
322 _offsets.initialize_threshold();
323 }
325 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
326 Space::set_bottom(new_bottom);
327 _offsets.set_bottom(new_bottom);
328 }
330 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
331 // Space should not advertize an increase in size
332 // until after the underlying offest table has been enlarged.
333 _offsets.resize(pointer_delta(new_end, bottom()));
334 Space::set_end(new_end);
335 }
337 #ifndef PRODUCT
339 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
340 mangler()->set_top_for_allocations(v);
341 }
342 void ContiguousSpace::set_top_for_allocations() {
343 mangler()->set_top_for_allocations(top());
344 }
345 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
346 mangler()->check_mangled_unused_area(limit);
347 }
349 void ContiguousSpace::check_mangled_unused_area_complete() {
350 mangler()->check_mangled_unused_area_complete();
351 }
353 // Mangled only the unused space that has not previously
354 // been mangled and that has not been allocated since being
355 // mangled.
356 void ContiguousSpace::mangle_unused_area() {
357 mangler()->mangle_unused_area();
358 }
359 void ContiguousSpace::mangle_unused_area_complete() {
360 mangler()->mangle_unused_area_complete();
361 }
362 void ContiguousSpace::mangle_region(MemRegion mr) {
363 // Although this method uses SpaceMangler::mangle_region() which
364 // is not specific to a space, the when the ContiguousSpace version
365 // is called, it is always with regard to a space and this
366 // bounds checking is appropriate.
367 MemRegion space_mr(bottom(), end());
368 assert(space_mr.contains(mr), "Mangling outside space");
369 SpaceMangler::mangle_region(mr);
370 }
371 #endif // NOT_PRODUCT
373 void CompactibleSpace::initialize(MemRegion mr,
374 bool clear_space,
375 bool mangle_space) {
376 Space::initialize(mr, clear_space, mangle_space);
377 set_compaction_top(bottom());
378 _next_compaction_space = NULL;
379 }
381 void CompactibleSpace::clear(bool mangle_space) {
382 Space::clear(mangle_space);
383 _compaction_top = bottom();
384 }
386 HeapWord* CompactibleSpace::forward(oop q, size_t size,
387 CompactPoint* cp, HeapWord* compact_top) {
388 // q is alive
389 // First check if we should switch compaction space
390 assert(this == cp->space, "'this' should be current compaction space.");
391 size_t compaction_max_size = pointer_delta(end(), compact_top);
392 while (size > compaction_max_size) {
393 // switch to next compaction space
394 cp->space->set_compaction_top(compact_top);
395 cp->space = cp->space->next_compaction_space();
396 if (cp->space == NULL) {
397 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
398 assert(cp->gen != NULL, "compaction must succeed");
399 cp->space = cp->gen->first_compaction_space();
400 assert(cp->space != NULL, "generation must have a first compaction space");
401 }
402 compact_top = cp->space->bottom();
403 cp->space->set_compaction_top(compact_top);
404 cp->threshold = cp->space->initialize_threshold();
405 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
406 }
408 // store the forwarding pointer into the mark word
409 if ((HeapWord*)q != compact_top) {
410 q->forward_to(oop(compact_top));
411 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
412 } else {
413 // if the object isn't moving we can just set the mark to the default
414 // mark and handle it specially later on.
415 q->init_mark();
416 assert(q->forwardee() == NULL, "should be forwarded to NULL");
417 }
419 compact_top += size;
421 // we need to update the offset table so that the beginnings of objects can be
422 // found during scavenge. Note that we are updating the offset table based on
423 // where the object will be once the compaction phase finishes.
424 if (compact_top > cp->threshold)
425 cp->threshold =
426 cp->space->cross_threshold(compact_top - size, compact_top);
427 return compact_top;
428 }
431 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
432 HeapWord* q, size_t deadlength) {
433 if (allowed_deadspace_words >= deadlength) {
434 allowed_deadspace_words -= deadlength;
435 CollectedHeap::fill_with_object(q, deadlength);
436 oop(q)->set_mark(oop(q)->mark()->set_marked());
437 assert((int) deadlength == oop(q)->size(), "bad filler object size");
438 // Recall that we required "q == compaction_top".
439 return true;
440 } else {
441 allowed_deadspace_words = 0;
442 return false;
443 }
444 }
446 #define block_is_always_obj(q) true
447 #define obj_size(q) oop(q)->size()
448 #define adjust_obj_size(s) s
450 void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
451 SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
452 }
454 // Faster object search.
455 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
456 SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
457 }
459 void Space::adjust_pointers() {
460 // adjust all the interior pointers to point at the new locations of objects
461 // Used by MarkSweep::mark_sweep_phase3()
463 // First check to see if there is any work to be done.
464 if (used() == 0) {
465 return; // Nothing to do.
466 }
468 // Otherwise...
469 HeapWord* q = bottom();
470 HeapWord* t = end();
472 debug_only(HeapWord* prev_q = NULL);
473 while (q < t) {
474 if (oop(q)->is_gc_marked()) {
475 // q is alive
477 // point all the oops to the new location
478 size_t size = oop(q)->adjust_pointers();
480 debug_only(prev_q = q);
482 q += size;
483 } else {
484 // q is not a live object. But we're not in a compactible space,
485 // So we don't have live ranges.
486 debug_only(prev_q = q);
487 q += block_size(q);
488 assert(q > prev_q, "we should be moving forward through memory");
489 }
490 }
491 assert(q == t, "just checking");
492 }
494 void CompactibleSpace::adjust_pointers() {
495 // Check first is there is any work to do.
496 if (used() == 0) {
497 return; // Nothing to do.
498 }
500 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
501 }
503 void CompactibleSpace::compact() {
504 SCAN_AND_COMPACT(obj_size);
505 }
507 void Space::print_short() const { print_short_on(tty); }
509 void Space::print_short_on(outputStream* st) const {
510 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
511 (int) ((double) used() * 100 / capacity()));
512 }
514 void Space::print() const { print_on(tty); }
516 void Space::print_on(outputStream* st) const {
517 print_short_on(st);
518 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
519 bottom(), end());
520 }
522 void ContiguousSpace::print_on(outputStream* st) const {
523 print_short_on(st);
524 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
525 bottom(), top(), end());
526 }
528 void OffsetTableContigSpace::print_on(outputStream* st) const {
529 print_short_on(st);
530 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
531 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
532 bottom(), top(), _offsets.threshold(), end());
533 }
535 void ContiguousSpace::verify() const {
536 HeapWord* p = bottom();
537 HeapWord* t = top();
538 HeapWord* prev_p = NULL;
539 while (p < t) {
540 oop(p)->verify();
541 prev_p = p;
542 p += oop(p)->size();
543 }
544 guarantee(p == top(), "end of last object must match end of space");
545 if (top() != end()) {
546 guarantee(top() == block_start_const(end()-1) &&
547 top() == block_start_const(top()),
548 "top should be start of unallocated block, if it exists");
549 }
550 }
552 void Space::oop_iterate(ExtendedOopClosure* blk) {
553 ObjectToOopClosure blk2(blk);
554 object_iterate(&blk2);
555 }
557 HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) {
558 guarantee(false, "NYI");
559 return bottom();
560 }
562 HeapWord* Space::object_iterate_careful_m(MemRegion mr,
563 ObjectClosureCareful* cl) {
564 guarantee(false, "NYI");
565 return bottom();
566 }
569 void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
570 assert(!mr.is_empty(), "Should be non-empty");
571 // We use MemRegion(bottom(), end()) rather than used_region() below
572 // because the two are not necessarily equal for some kinds of
573 // spaces, in particular, certain kinds of free list spaces.
574 // We could use the more complicated but more precise:
575 // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
576 // but the slight imprecision seems acceptable in the assertion check.
577 assert(MemRegion(bottom(), end()).contains(mr),
578 "Should be within used space");
579 HeapWord* prev = cl->previous(); // max address from last time
580 if (prev >= mr.end()) { // nothing to do
581 return;
582 }
583 // This assert will not work when we go from cms space to perm
584 // space, and use same closure. Easy fix deferred for later. XXX YSR
585 // assert(prev == NULL || contains(prev), "Should be within space");
587 bool last_was_obj_array = false;
588 HeapWord *blk_start_addr, *region_start_addr;
589 if (prev > mr.start()) {
590 region_start_addr = prev;
591 blk_start_addr = prev;
592 // The previous invocation may have pushed "prev" beyond the
593 // last allocated block yet there may be still be blocks
594 // in this region due to a particular coalescing policy.
595 // Relax the assertion so that the case where the unallocated
596 // block is maintained and "prev" is beyond the unallocated
597 // block does not cause the assertion to fire.
598 assert((BlockOffsetArrayUseUnallocatedBlock &&
599 (!is_in(prev))) ||
600 (blk_start_addr == block_start(region_start_addr)), "invariant");
601 } else {
602 region_start_addr = mr.start();
603 blk_start_addr = block_start(region_start_addr);
604 }
605 HeapWord* region_end_addr = mr.end();
606 MemRegion derived_mr(region_start_addr, region_end_addr);
607 while (blk_start_addr < region_end_addr) {
608 const size_t size = block_size(blk_start_addr);
609 if (block_is_obj(blk_start_addr)) {
610 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
611 } else {
612 last_was_obj_array = false;
613 }
614 blk_start_addr += size;
615 }
616 if (!last_was_obj_array) {
617 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
618 "Should be within (closed) used space");
619 assert(blk_start_addr > prev, "Invariant");
620 cl->set_previous(blk_start_addr); // min address for next time
621 }
622 }
624 bool Space::obj_is_alive(const HeapWord* p) const {
625 assert (block_is_obj(p), "The address should point to an object");
626 return true;
627 }
629 void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
630 assert(!mr.is_empty(), "Should be non-empty");
631 assert(used_region().contains(mr), "Should be within used space");
632 HeapWord* prev = cl->previous(); // max address from last time
633 if (prev >= mr.end()) { // nothing to do
634 return;
635 }
636 // See comment above (in more general method above) in case you
637 // happen to use this method.
638 assert(prev == NULL || is_in_reserved(prev), "Should be within space");
640 bool last_was_obj_array = false;
641 HeapWord *obj_start_addr, *region_start_addr;
642 if (prev > mr.start()) {
643 region_start_addr = prev;
644 obj_start_addr = prev;
645 assert(obj_start_addr == block_start(region_start_addr), "invariant");
646 } else {
647 region_start_addr = mr.start();
648 obj_start_addr = block_start(region_start_addr);
649 }
650 HeapWord* region_end_addr = mr.end();
651 MemRegion derived_mr(region_start_addr, region_end_addr);
652 while (obj_start_addr < region_end_addr) {
653 oop obj = oop(obj_start_addr);
654 const size_t size = obj->size();
655 last_was_obj_array = cl->do_object_bm(obj, derived_mr);
656 obj_start_addr += size;
657 }
658 if (!last_was_obj_array) {
659 assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()),
660 "Should be within (closed) used space");
661 assert(obj_start_addr > prev, "Invariant");
662 cl->set_previous(obj_start_addr); // min address for next time
663 }
664 }
666 #if INCLUDE_ALL_GCS
667 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
668 \
669 void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
670 HeapWord* obj_addr = mr.start(); \
671 HeapWord* t = mr.end(); \
672 while (obj_addr < t) { \
673 assert(oop(obj_addr)->is_oop(), "Should be an oop"); \
674 obj_addr += oop(obj_addr)->oop_iterate(blk); \
675 } \
676 }
678 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
680 #undef ContigSpace_PAR_OOP_ITERATE_DEFN
681 #endif // INCLUDE_ALL_GCS
683 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
684 if (is_empty()) return;
685 HeapWord* obj_addr = bottom();
686 HeapWord* t = top();
687 // Could call objects iterate, but this is easier.
688 while (obj_addr < t) {
689 obj_addr += oop(obj_addr)->oop_iterate(blk);
690 }
691 }
693 void ContiguousSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* blk) {
694 if (is_empty()) {
695 return;
696 }
697 MemRegion cur = MemRegion(bottom(), top());
698 mr = mr.intersection(cur);
699 if (mr.is_empty()) {
700 return;
701 }
702 if (mr.equals(cur)) {
703 oop_iterate(blk);
704 return;
705 }
706 assert(mr.end() <= top(), "just took an intersection above");
707 HeapWord* obj_addr = block_start(mr.start());
708 HeapWord* t = mr.end();
710 // Handle first object specially.
711 oop obj = oop(obj_addr);
712 SpaceMemRegionOopsIterClosure smr_blk(blk, mr);
713 obj_addr += obj->oop_iterate(&smr_blk);
714 while (obj_addr < t) {
715 oop obj = oop(obj_addr);
716 assert(obj->is_oop(), "expected an oop");
717 obj_addr += obj->size();
718 // If "obj_addr" is not greater than top, then the
719 // entire object "obj" is within the region.
720 if (obj_addr <= t) {
721 obj->oop_iterate(blk);
722 } else {
723 // "obj" extends beyond end of region
724 obj->oop_iterate(&smr_blk);
725 break;
726 }
727 };
728 }
730 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
731 if (is_empty()) return;
732 WaterMark bm = bottom_mark();
733 object_iterate_from(bm, blk);
734 }
736 // For a continguous space object_iterate() and safe_object_iterate()
737 // are the same.
738 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
739 object_iterate(blk);
740 }
742 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
743 assert(mark.space() == this, "Mark does not match space");
744 HeapWord* p = mark.point();
745 while (p < top()) {
746 blk->do_object(oop(p));
747 p += oop(p)->size();
748 }
749 }
751 HeapWord*
752 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
753 HeapWord * limit = concurrent_iteration_safe_limit();
754 assert(limit <= top(), "sanity check");
755 for (HeapWord* p = bottom(); p < limit;) {
756 size_t size = blk->do_object_careful(oop(p));
757 if (size == 0) {
758 return p; // failed at p
759 } else {
760 p += size;
761 }
762 }
763 return NULL; // all done
764 }
766 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
767 \
768 void ContiguousSpace:: \
769 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
770 HeapWord* t; \
771 HeapWord* p = saved_mark_word(); \
772 assert(p != NULL, "expected saved mark"); \
773 \
774 const intx interval = PrefetchScanIntervalInBytes; \
775 do { \
776 t = top(); \
777 while (p < t) { \
778 Prefetch::write(p, interval); \
779 debug_only(HeapWord* prev = p); \
780 oop m = oop(p); \
781 p += m->oop_iterate(blk); \
782 } \
783 } while (t < top()); \
784 \
785 set_saved_mark_word(p); \
786 }
788 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
790 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN
792 // Very general, slow implementation.
793 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
794 assert(MemRegion(bottom(), end()).contains(p),
795 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
796 p, bottom(), end()));
797 if (p >= top()) {
798 return top();
799 } else {
800 HeapWord* last = bottom();
801 HeapWord* cur = last;
802 while (cur <= p) {
803 last = cur;
804 cur += oop(cur)->size();
805 }
806 assert(oop(last)->is_oop(),
807 err_msg(PTR_FORMAT " should be an object start", last));
808 return last;
809 }
810 }
812 size_t ContiguousSpace::block_size(const HeapWord* p) const {
813 assert(MemRegion(bottom(), end()).contains(p),
814 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
815 p, bottom(), end()));
816 HeapWord* current_top = top();
817 assert(p <= current_top,
818 err_msg("p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
819 p, current_top));
820 assert(p == current_top || oop(p)->is_oop(),
821 err_msg("p (" PTR_FORMAT ") is not a block start - "
822 "current_top: " PTR_FORMAT ", is_oop: %s",
823 p, current_top, BOOL_TO_STR(oop(p)->is_oop())));
824 if (p < current_top) {
825 return oop(p)->size();
826 } else {
827 assert(p == current_top, "just checking");
828 return pointer_delta(end(), (HeapWord*) p);
829 }
830 }
832 // This version requires locking.
833 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
834 HeapWord* const end_value) {
835 // In G1 there are places where a GC worker can allocates into a
836 // region using this serial allocation code without being prone to a
837 // race with other GC workers (we ensure that no other GC worker can
838 // access the same region at the same time). So the assert below is
839 // too strong in the case of G1.
840 assert(Heap_lock->owned_by_self() ||
841 (SafepointSynchronize::is_at_safepoint() &&
842 (Thread::current()->is_VM_thread() || UseG1GC)),
843 "not locked");
844 HeapWord* obj = top();
845 if (pointer_delta(end_value, obj) >= size) {
846 HeapWord* new_top = obj + size;
847 set_top(new_top);
848 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
849 return obj;
850 } else {
851 return NULL;
852 }
853 }
855 // This version is lock-free.
856 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
857 HeapWord* const end_value) {
858 do {
859 HeapWord* obj = top();
860 if (pointer_delta(end_value, obj) >= size) {
861 HeapWord* new_top = obj + size;
862 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
863 // result can be one of two:
864 // the old top value: the exchange succeeded
865 // otherwise: the new value of the top is returned.
866 if (result == obj) {
867 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
868 return obj;
869 }
870 } else {
871 return NULL;
872 }
873 } while (true);
874 }
876 // Requires locking.
877 HeapWord* ContiguousSpace::allocate(size_t size) {
878 return allocate_impl(size, end());
879 }
881 // Lock-free.
882 HeapWord* ContiguousSpace::par_allocate(size_t size) {
883 return par_allocate_impl(size, end());
884 }
886 void ContiguousSpace::allocate_temporary_filler(int factor) {
887 // allocate temporary type array decreasing free size with factor 'factor'
888 assert(factor >= 0, "just checking");
889 size_t size = pointer_delta(end(), top());
891 // if space is full, return
892 if (size == 0) return;
894 if (factor > 0) {
895 size -= size/factor;
896 }
897 size = align_object_size(size);
899 const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
900 if (size >= (size_t)align_object_size(array_header_size)) {
901 size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
902 // allocate uninitialized int array
903 typeArrayOop t = (typeArrayOop) allocate(size);
904 assert(t != NULL, "allocation should succeed");
905 t->set_mark(markOopDesc::prototype());
906 t->set_klass(Universe::intArrayKlassObj());
907 t->set_length((int)length);
908 } else {
909 assert(size == CollectedHeap::min_fill_size(),
910 "size for smallest fake object doesn't match");
911 instanceOop obj = (instanceOop) allocate(size);
912 obj->set_mark(markOopDesc::prototype());
913 obj->set_klass_gap(0);
914 obj->set_klass(SystemDictionary::Object_klass());
915 }
916 }
918 void EdenSpace::clear(bool mangle_space) {
919 ContiguousSpace::clear(mangle_space);
920 set_soft_end(end());
921 }
923 // Requires locking.
924 HeapWord* EdenSpace::allocate(size_t size) {
925 return allocate_impl(size, soft_end());
926 }
928 // Lock-free.
929 HeapWord* EdenSpace::par_allocate(size_t size) {
930 return par_allocate_impl(size, soft_end());
931 }
933 HeapWord* ConcEdenSpace::par_allocate(size_t size)
934 {
935 do {
936 // The invariant is top() should be read before end() because
937 // top() can't be greater than end(), so if an update of _soft_end
938 // occurs between 'end_val = end();' and 'top_val = top();' top()
939 // also can grow up to the new end() and the condition
940 // 'top_val > end_val' is true. To ensure the loading order
941 // OrderAccess::loadload() is required after top() read.
942 HeapWord* obj = top();
943 OrderAccess::loadload();
944 if (pointer_delta(*soft_end_addr(), obj) >= size) {
945 HeapWord* new_top = obj + size;
946 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
947 // result can be one of two:
948 // the old top value: the exchange succeeded
949 // otherwise: the new value of the top is returned.
950 if (result == obj) {
951 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
952 return obj;
953 }
954 } else {
955 return NULL;
956 }
957 } while (true);
958 }
961 HeapWord* OffsetTableContigSpace::initialize_threshold() {
962 return _offsets.initialize_threshold();
963 }
965 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
966 _offsets.alloc_block(start, end);
967 return _offsets.threshold();
968 }
970 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
971 MemRegion mr) :
972 _offsets(sharedOffsetArray, mr),
973 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
974 {
975 _offsets.set_contig_space(this);
976 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
977 }
979 #define OBJ_SAMPLE_INTERVAL 0
980 #define BLOCK_SAMPLE_INTERVAL 100
982 void OffsetTableContigSpace::verify() const {
983 HeapWord* p = bottom();
984 HeapWord* prev_p = NULL;
985 int objs = 0;
986 int blocks = 0;
988 if (VerifyObjectStartArray) {
989 _offsets.verify();
990 }
992 while (p < top()) {
993 size_t size = oop(p)->size();
994 // For a sampling of objects in the space, find it using the
995 // block offset table.
996 if (blocks == BLOCK_SAMPLE_INTERVAL) {
997 guarantee(p == block_start_const(p + (size/2)),
998 "check offset computation");
999 blocks = 0;
1000 } else {
1001 blocks++;
1002 }
1004 if (objs == OBJ_SAMPLE_INTERVAL) {
1005 oop(p)->verify();
1006 objs = 0;
1007 } else {
1008 objs++;
1009 }
1010 prev_p = p;
1011 p += size;
1012 }
1013 guarantee(p == top(), "end of last object must match end of space");
1014 }
1017 size_t TenuredSpace::allowed_dead_ratio() const {
1018 return MarkSweepDeadRatio;
1019 }