Wed, 12 Mar 2014 15:22:45 +0100
8038404: Move object_iterate_mem from Space to CMS since it is only ever used by CMS
Reviewed-by: brutisso, tschatzl, stefank
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "gc_implementation/shared/liveRange.hpp"
29 #include "gc_implementation/shared/markSweep.hpp"
30 #include "gc_implementation/shared/spaceDecorator.hpp"
31 #include "memory/blockOffsetTable.inline.hpp"
32 #include "memory/defNewGeneration.hpp"
33 #include "memory/genCollectedHeap.hpp"
34 #include "memory/space.hpp"
35 #include "memory/space.inline.hpp"
36 #include "memory/universe.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "oops/oop.inline2.hpp"
39 #include "runtime/java.hpp"
40 #include "runtime/prefetch.inline.hpp"
41 #include "runtime/orderAccess.inline.hpp"
42 #include "runtime/safepoint.hpp"
43 #include "utilities/copy.hpp"
44 #include "utilities/globalDefinitions.hpp"
45 #include "utilities/macros.hpp"
47 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
49 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
50 HeapWord* top_obj) {
51 if (top_obj != NULL) {
52 if (_sp->block_is_obj(top_obj)) {
53 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
54 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
55 // An arrayOop is starting on the dirty card - since we do exact
56 // store checks for objArrays we are done.
57 } else {
58 // Otherwise, it is possible that the object starting on the dirty
59 // card spans the entire card, and that the store happened on a
60 // later card. Figure out where the object ends.
61 // Use the block_size() method of the space over which
62 // the iteration is being done. That space (e.g. CMS) may have
63 // specific requirements on object sizes which will
64 // be reflected in the block_size() method.
65 top = top_obj + oop(top_obj)->size();
66 }
67 }
68 } else {
69 top = top_obj;
70 }
71 } else {
72 assert(top == _sp->end(), "only case where top_obj == NULL");
73 }
74 return top;
75 }
77 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
78 HeapWord* bottom,
79 HeapWord* top) {
80 // 1. Blocks may or may not be objects.
81 // 2. Even when a block_is_obj(), it may not entirely
82 // occupy the block if the block quantum is larger than
83 // the object size.
84 // We can and should try to optimize by calling the non-MemRegion
85 // version of oop_iterate() for all but the extremal objects
86 // (for which we need to call the MemRegion version of
87 // oop_iterate()) To be done post-beta XXX
88 for (; bottom < top; bottom += _sp->block_size(bottom)) {
89 // As in the case of contiguous space above, we'd like to
90 // just use the value returned by oop_iterate to increment the
91 // current pointer; unfortunately, that won't work in CMS because
92 // we'd need an interface change (it seems) to have the space
93 // "adjust the object size" (for instance pad it up to its
94 // block alignment or minimum block size restrictions. XXX
95 if (_sp->block_is_obj(bottom) &&
96 !_sp->obj_allocated_since_save_marks(oop(bottom))) {
97 oop(bottom)->oop_iterate(_cl, mr);
98 }
99 }
100 }
102 // We get called with "mr" representing the dirty region
103 // that we want to process. Because of imprecise marking,
104 // we may need to extend the incoming "mr" to the right,
105 // and scan more. However, because we may already have
106 // scanned some of that extended region, we may need to
107 // trim its right-end back some so we do not scan what
108 // we (or another worker thread) may already have scanned
109 // or planning to scan.
110 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
112 // Some collectors need to do special things whenever their dirty
113 // cards are processed. For instance, CMS must remember mutator updates
114 // (i.e. dirty cards) so as to re-scan mutated objects.
115 // Such work can be piggy-backed here on dirty card scanning, so as to make
116 // it slightly more efficient than doing a complete non-detructive pre-scan
117 // of the card table.
118 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
119 if (pCl != NULL) {
120 pCl->do_MemRegion(mr);
121 }
123 HeapWord* bottom = mr.start();
124 HeapWord* last = mr.last();
125 HeapWord* top = mr.end();
126 HeapWord* bottom_obj;
127 HeapWord* top_obj;
129 assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
130 _precision == CardTableModRefBS::Precise,
131 "Only ones we deal with for now.");
133 assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
134 _cl->idempotent() || _last_bottom == NULL ||
135 top <= _last_bottom,
136 "Not decreasing");
137 NOT_PRODUCT(_last_bottom = mr.start());
139 bottom_obj = _sp->block_start(bottom);
140 top_obj = _sp->block_start(last);
142 assert(bottom_obj <= bottom, "just checking");
143 assert(top_obj <= top, "just checking");
145 // Given what we think is the top of the memory region and
146 // the start of the object at the top, get the actual
147 // value of the top.
148 top = get_actual_top(top, top_obj);
150 // If the previous call did some part of this region, don't redo.
151 if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
152 _min_done != NULL &&
153 _min_done < top) {
154 top = _min_done;
155 }
157 // Top may have been reset, and in fact may be below bottom,
158 // e.g. the dirty card region is entirely in a now free object
159 // -- something that could happen with a concurrent sweeper.
160 bottom = MIN2(bottom, top);
161 MemRegion extended_mr = MemRegion(bottom, top);
162 assert(bottom <= top &&
163 (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
164 _min_done == NULL ||
165 top <= _min_done),
166 "overlap!");
168 // Walk the region if it is not empty; otherwise there is nothing to do.
169 if (!extended_mr.is_empty()) {
170 walk_mem_region(extended_mr, bottom_obj, top);
171 }
173 // An idempotent closure might be applied in any order, so we don't
174 // record a _min_done for it.
175 if (!_cl->idempotent()) {
176 _min_done = bottom;
177 } else {
178 assert(_min_done == _last_explicit_min_done,
179 "Don't update _min_done for idempotent cl");
180 }
181 }
183 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
184 CardTableModRefBS::PrecisionStyle precision,
185 HeapWord* boundary) {
186 return new DirtyCardToOopClosure(this, cl, precision, boundary);
187 }
189 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
190 HeapWord* top_obj) {
191 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
192 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
193 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
194 // An arrayOop is starting on the dirty card - since we do exact
195 // store checks for objArrays we are done.
196 } else {
197 // Otherwise, it is possible that the object starting on the dirty
198 // card spans the entire card, and that the store happened on a
199 // later card. Figure out where the object ends.
200 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
201 "Block size and object size mismatch");
202 top = top_obj + oop(top_obj)->size();
203 }
204 }
205 } else {
206 top = (_sp->toContiguousSpace())->top();
207 }
208 return top;
209 }
211 void Filtering_DCTOC::walk_mem_region(MemRegion mr,
212 HeapWord* bottom,
213 HeapWord* top) {
214 // Note that this assumption won't hold if we have a concurrent
215 // collector in this space, which may have freed up objects after
216 // they were dirtied and before the stop-the-world GC that is
217 // examining cards here.
218 assert(bottom < top, "ought to be at least one obj on a dirty card.");
220 if (_boundary != NULL) {
221 // We have a boundary outside of which we don't want to look
222 // at objects, so create a filtering closure around the
223 // oop closure before walking the region.
224 FilteringClosure filter(_boundary, _cl);
225 walk_mem_region_with_cl(mr, bottom, top, &filter);
226 } else {
227 // No boundary, simply walk the heap with the oop closure.
228 walk_mem_region_with_cl(mr, bottom, top, _cl);
229 }
231 }
233 // We must replicate this so that the static type of "FilteringClosure"
234 // (see above) is apparent at the oop_iterate calls.
235 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
236 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \
237 HeapWord* bottom, \
238 HeapWord* top, \
239 ClosureType* cl) { \
240 bottom += oop(bottom)->oop_iterate(cl, mr); \
241 if (bottom < top) { \
242 HeapWord* next_obj = bottom + oop(bottom)->size(); \
243 while (next_obj < top) { \
244 /* Bottom lies entirely below top, so we can call the */ \
245 /* non-memRegion version of oop_iterate below. */ \
246 oop(bottom)->oop_iterate(cl); \
247 bottom = next_obj; \
248 next_obj = bottom + oop(bottom)->size(); \
249 } \
250 /* Last object. */ \
251 oop(bottom)->oop_iterate(cl, mr); \
252 } \
253 }
255 // (There are only two of these, rather than N, because the split is due
256 // only to the introduction of the FilteringClosure, a local part of the
257 // impl of this abstraction.)
258 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
259 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
261 DirtyCardToOopClosure*
262 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
263 CardTableModRefBS::PrecisionStyle precision,
264 HeapWord* boundary) {
265 return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
266 }
268 void Space::initialize(MemRegion mr,
269 bool clear_space,
270 bool mangle_space) {
271 HeapWord* bottom = mr.start();
272 HeapWord* end = mr.end();
273 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
274 "invalid space boundaries");
275 set_bottom(bottom);
276 set_end(end);
277 if (clear_space) clear(mangle_space);
278 }
280 void Space::clear(bool mangle_space) {
281 if (ZapUnusedHeapArea && mangle_space) {
282 mangle_unused_area();
283 }
284 }
286 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL),
287 _concurrent_iteration_safe_limit(NULL) {
288 _mangler = new GenSpaceMangler(this);
289 }
291 ContiguousSpace::~ContiguousSpace() {
292 delete _mangler;
293 }
295 void ContiguousSpace::initialize(MemRegion mr,
296 bool clear_space,
297 bool mangle_space)
298 {
299 CompactibleSpace::initialize(mr, clear_space, mangle_space);
300 set_concurrent_iteration_safe_limit(top());
301 }
303 void ContiguousSpace::clear(bool mangle_space) {
304 set_top(bottom());
305 set_saved_mark();
306 CompactibleSpace::clear(mangle_space);
307 }
309 bool ContiguousSpace::is_in(const void* p) const {
310 return _bottom <= p && p < _top;
311 }
313 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
314 return p >= _top;
315 }
317 void OffsetTableContigSpace::clear(bool mangle_space) {
318 ContiguousSpace::clear(mangle_space);
319 _offsets.initialize_threshold();
320 }
322 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
323 Space::set_bottom(new_bottom);
324 _offsets.set_bottom(new_bottom);
325 }
327 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
328 // Space should not advertize an increase in size
329 // until after the underlying offest table has been enlarged.
330 _offsets.resize(pointer_delta(new_end, bottom()));
331 Space::set_end(new_end);
332 }
334 #ifndef PRODUCT
336 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
337 mangler()->set_top_for_allocations(v);
338 }
339 void ContiguousSpace::set_top_for_allocations() {
340 mangler()->set_top_for_allocations(top());
341 }
342 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
343 mangler()->check_mangled_unused_area(limit);
344 }
346 void ContiguousSpace::check_mangled_unused_area_complete() {
347 mangler()->check_mangled_unused_area_complete();
348 }
350 // Mangled only the unused space that has not previously
351 // been mangled and that has not been allocated since being
352 // mangled.
353 void ContiguousSpace::mangle_unused_area() {
354 mangler()->mangle_unused_area();
355 }
356 void ContiguousSpace::mangle_unused_area_complete() {
357 mangler()->mangle_unused_area_complete();
358 }
359 void ContiguousSpace::mangle_region(MemRegion mr) {
360 // Although this method uses SpaceMangler::mangle_region() which
361 // is not specific to a space, the when the ContiguousSpace version
362 // is called, it is always with regard to a space and this
363 // bounds checking is appropriate.
364 MemRegion space_mr(bottom(), end());
365 assert(space_mr.contains(mr), "Mangling outside space");
366 SpaceMangler::mangle_region(mr);
367 }
368 #endif // NOT_PRODUCT
370 void CompactibleSpace::initialize(MemRegion mr,
371 bool clear_space,
372 bool mangle_space) {
373 Space::initialize(mr, clear_space, mangle_space);
374 set_compaction_top(bottom());
375 _next_compaction_space = NULL;
376 }
378 void CompactibleSpace::clear(bool mangle_space) {
379 Space::clear(mangle_space);
380 _compaction_top = bottom();
381 }
383 HeapWord* CompactibleSpace::forward(oop q, size_t size,
384 CompactPoint* cp, HeapWord* compact_top) {
385 // q is alive
386 // First check if we should switch compaction space
387 assert(this == cp->space, "'this' should be current compaction space.");
388 size_t compaction_max_size = pointer_delta(end(), compact_top);
389 while (size > compaction_max_size) {
390 // switch to next compaction space
391 cp->space->set_compaction_top(compact_top);
392 cp->space = cp->space->next_compaction_space();
393 if (cp->space == NULL) {
394 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
395 assert(cp->gen != NULL, "compaction must succeed");
396 cp->space = cp->gen->first_compaction_space();
397 assert(cp->space != NULL, "generation must have a first compaction space");
398 }
399 compact_top = cp->space->bottom();
400 cp->space->set_compaction_top(compact_top);
401 cp->threshold = cp->space->initialize_threshold();
402 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
403 }
405 // store the forwarding pointer into the mark word
406 if ((HeapWord*)q != compact_top) {
407 q->forward_to(oop(compact_top));
408 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
409 } else {
410 // if the object isn't moving we can just set the mark to the default
411 // mark and handle it specially later on.
412 q->init_mark();
413 assert(q->forwardee() == NULL, "should be forwarded to NULL");
414 }
416 compact_top += size;
418 // we need to update the offset table so that the beginnings of objects can be
419 // found during scavenge. Note that we are updating the offset table based on
420 // where the object will be once the compaction phase finishes.
421 if (compact_top > cp->threshold)
422 cp->threshold =
423 cp->space->cross_threshold(compact_top - size, compact_top);
424 return compact_top;
425 }
428 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
429 HeapWord* q, size_t deadlength) {
430 if (allowed_deadspace_words >= deadlength) {
431 allowed_deadspace_words -= deadlength;
432 CollectedHeap::fill_with_object(q, deadlength);
433 oop(q)->set_mark(oop(q)->mark()->set_marked());
434 assert((int) deadlength == oop(q)->size(), "bad filler object size");
435 // Recall that we required "q == compaction_top".
436 return true;
437 } else {
438 allowed_deadspace_words = 0;
439 return false;
440 }
441 }
443 #define block_is_always_obj(q) true
444 #define obj_size(q) oop(q)->size()
445 #define adjust_obj_size(s) s
447 void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
448 SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
449 }
451 // Faster object search.
452 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
453 SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
454 }
456 void Space::adjust_pointers() {
457 // adjust all the interior pointers to point at the new locations of objects
458 // Used by MarkSweep::mark_sweep_phase3()
460 // First check to see if there is any work to be done.
461 if (used() == 0) {
462 return; // Nothing to do.
463 }
465 // Otherwise...
466 HeapWord* q = bottom();
467 HeapWord* t = end();
469 debug_only(HeapWord* prev_q = NULL);
470 while (q < t) {
471 if (oop(q)->is_gc_marked()) {
472 // q is alive
474 // point all the oops to the new location
475 size_t size = oop(q)->adjust_pointers();
477 debug_only(prev_q = q);
479 q += size;
480 } else {
481 // q is not a live object. But we're not in a compactible space,
482 // So we don't have live ranges.
483 debug_only(prev_q = q);
484 q += block_size(q);
485 assert(q > prev_q, "we should be moving forward through memory");
486 }
487 }
488 assert(q == t, "just checking");
489 }
491 void CompactibleSpace::adjust_pointers() {
492 // Check first is there is any work to do.
493 if (used() == 0) {
494 return; // Nothing to do.
495 }
497 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
498 }
500 void CompactibleSpace::compact() {
501 SCAN_AND_COMPACT(obj_size);
502 }
504 void Space::print_short() const { print_short_on(tty); }
506 void Space::print_short_on(outputStream* st) const {
507 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
508 (int) ((double) used() * 100 / capacity()));
509 }
511 void Space::print() const { print_on(tty); }
513 void Space::print_on(outputStream* st) const {
514 print_short_on(st);
515 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
516 bottom(), end());
517 }
519 void ContiguousSpace::print_on(outputStream* st) const {
520 print_short_on(st);
521 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
522 bottom(), top(), end());
523 }
525 void OffsetTableContigSpace::print_on(outputStream* st) const {
526 print_short_on(st);
527 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
528 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
529 bottom(), top(), _offsets.threshold(), end());
530 }
532 void ContiguousSpace::verify() const {
533 HeapWord* p = bottom();
534 HeapWord* t = top();
535 HeapWord* prev_p = NULL;
536 while (p < t) {
537 oop(p)->verify();
538 prev_p = p;
539 p += oop(p)->size();
540 }
541 guarantee(p == top(), "end of last object must match end of space");
542 if (top() != end()) {
543 guarantee(top() == block_start_const(end()-1) &&
544 top() == block_start_const(top()),
545 "top should be start of unallocated block, if it exists");
546 }
547 }
549 void Space::oop_iterate(ExtendedOopClosure* blk) {
550 ObjectToOopClosure blk2(blk);
551 object_iterate(&blk2);
552 }
554 HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) {
555 guarantee(false, "NYI");
556 return bottom();
557 }
559 HeapWord* Space::object_iterate_careful_m(MemRegion mr,
560 ObjectClosureCareful* cl) {
561 guarantee(false, "NYI");
562 return bottom();
563 }
565 bool Space::obj_is_alive(const HeapWord* p) const {
566 assert (block_is_obj(p), "The address should point to an object");
567 return true;
568 }
570 #if INCLUDE_ALL_GCS
571 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
572 \
573 void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
574 HeapWord* obj_addr = mr.start(); \
575 HeapWord* t = mr.end(); \
576 while (obj_addr < t) { \
577 assert(oop(obj_addr)->is_oop(), "Should be an oop"); \
578 obj_addr += oop(obj_addr)->oop_iterate(blk); \
579 } \
580 }
582 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
584 #undef ContigSpace_PAR_OOP_ITERATE_DEFN
585 #endif // INCLUDE_ALL_GCS
587 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
588 if (is_empty()) return;
589 HeapWord* obj_addr = bottom();
590 HeapWord* t = top();
591 // Could call objects iterate, but this is easier.
592 while (obj_addr < t) {
593 obj_addr += oop(obj_addr)->oop_iterate(blk);
594 }
595 }
597 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
598 if (is_empty()) return;
599 WaterMark bm = bottom_mark();
600 object_iterate_from(bm, blk);
601 }
603 // For a continguous space object_iterate() and safe_object_iterate()
604 // are the same.
605 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
606 object_iterate(blk);
607 }
609 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
610 assert(mark.space() == this, "Mark does not match space");
611 HeapWord* p = mark.point();
612 while (p < top()) {
613 blk->do_object(oop(p));
614 p += oop(p)->size();
615 }
616 }
618 HeapWord*
619 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
620 HeapWord * limit = concurrent_iteration_safe_limit();
621 assert(limit <= top(), "sanity check");
622 for (HeapWord* p = bottom(); p < limit;) {
623 size_t size = blk->do_object_careful(oop(p));
624 if (size == 0) {
625 return p; // failed at p
626 } else {
627 p += size;
628 }
629 }
630 return NULL; // all done
631 }
633 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
634 \
635 void ContiguousSpace:: \
636 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
637 HeapWord* t; \
638 HeapWord* p = saved_mark_word(); \
639 assert(p != NULL, "expected saved mark"); \
640 \
641 const intx interval = PrefetchScanIntervalInBytes; \
642 do { \
643 t = top(); \
644 while (p < t) { \
645 Prefetch::write(p, interval); \
646 debug_only(HeapWord* prev = p); \
647 oop m = oop(p); \
648 p += m->oop_iterate(blk); \
649 } \
650 } while (t < top()); \
651 \
652 set_saved_mark_word(p); \
653 }
655 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
657 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN
659 // Very general, slow implementation.
660 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
661 assert(MemRegion(bottom(), end()).contains(p),
662 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
663 p, bottom(), end()));
664 if (p >= top()) {
665 return top();
666 } else {
667 HeapWord* last = bottom();
668 HeapWord* cur = last;
669 while (cur <= p) {
670 last = cur;
671 cur += oop(cur)->size();
672 }
673 assert(oop(last)->is_oop(),
674 err_msg(PTR_FORMAT " should be an object start", last));
675 return last;
676 }
677 }
679 size_t ContiguousSpace::block_size(const HeapWord* p) const {
680 assert(MemRegion(bottom(), end()).contains(p),
681 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
682 p, bottom(), end()));
683 HeapWord* current_top = top();
684 assert(p <= current_top,
685 err_msg("p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
686 p, current_top));
687 assert(p == current_top || oop(p)->is_oop(),
688 err_msg("p (" PTR_FORMAT ") is not a block start - "
689 "current_top: " PTR_FORMAT ", is_oop: %s",
690 p, current_top, BOOL_TO_STR(oop(p)->is_oop())));
691 if (p < current_top) {
692 return oop(p)->size();
693 } else {
694 assert(p == current_top, "just checking");
695 return pointer_delta(end(), (HeapWord*) p);
696 }
697 }
699 // This version requires locking.
700 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
701 HeapWord* const end_value) {
702 // In G1 there are places where a GC worker can allocates into a
703 // region using this serial allocation code without being prone to a
704 // race with other GC workers (we ensure that no other GC worker can
705 // access the same region at the same time). So the assert below is
706 // too strong in the case of G1.
707 assert(Heap_lock->owned_by_self() ||
708 (SafepointSynchronize::is_at_safepoint() &&
709 (Thread::current()->is_VM_thread() || UseG1GC)),
710 "not locked");
711 HeapWord* obj = top();
712 if (pointer_delta(end_value, obj) >= size) {
713 HeapWord* new_top = obj + size;
714 set_top(new_top);
715 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
716 return obj;
717 } else {
718 return NULL;
719 }
720 }
722 // This version is lock-free.
723 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
724 HeapWord* const end_value) {
725 do {
726 HeapWord* obj = top();
727 if (pointer_delta(end_value, obj) >= size) {
728 HeapWord* new_top = obj + size;
729 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
730 // result can be one of two:
731 // the old top value: the exchange succeeded
732 // otherwise: the new value of the top is returned.
733 if (result == obj) {
734 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
735 return obj;
736 }
737 } else {
738 return NULL;
739 }
740 } while (true);
741 }
743 // Requires locking.
744 HeapWord* ContiguousSpace::allocate(size_t size) {
745 return allocate_impl(size, end());
746 }
748 // Lock-free.
749 HeapWord* ContiguousSpace::par_allocate(size_t size) {
750 return par_allocate_impl(size, end());
751 }
753 void ContiguousSpace::allocate_temporary_filler(int factor) {
754 // allocate temporary type array decreasing free size with factor 'factor'
755 assert(factor >= 0, "just checking");
756 size_t size = pointer_delta(end(), top());
758 // if space is full, return
759 if (size == 0) return;
761 if (factor > 0) {
762 size -= size/factor;
763 }
764 size = align_object_size(size);
766 const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
767 if (size >= (size_t)align_object_size(array_header_size)) {
768 size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
769 // allocate uninitialized int array
770 typeArrayOop t = (typeArrayOop) allocate(size);
771 assert(t != NULL, "allocation should succeed");
772 t->set_mark(markOopDesc::prototype());
773 t->set_klass(Universe::intArrayKlassObj());
774 t->set_length((int)length);
775 } else {
776 assert(size == CollectedHeap::min_fill_size(),
777 "size for smallest fake object doesn't match");
778 instanceOop obj = (instanceOop) allocate(size);
779 obj->set_mark(markOopDesc::prototype());
780 obj->set_klass_gap(0);
781 obj->set_klass(SystemDictionary::Object_klass());
782 }
783 }
785 void EdenSpace::clear(bool mangle_space) {
786 ContiguousSpace::clear(mangle_space);
787 set_soft_end(end());
788 }
790 // Requires locking.
791 HeapWord* EdenSpace::allocate(size_t size) {
792 return allocate_impl(size, soft_end());
793 }
795 // Lock-free.
796 HeapWord* EdenSpace::par_allocate(size_t size) {
797 return par_allocate_impl(size, soft_end());
798 }
800 HeapWord* ConcEdenSpace::par_allocate(size_t size)
801 {
802 do {
803 // The invariant is top() should be read before end() because
804 // top() can't be greater than end(), so if an update of _soft_end
805 // occurs between 'end_val = end();' and 'top_val = top();' top()
806 // also can grow up to the new end() and the condition
807 // 'top_val > end_val' is true. To ensure the loading order
808 // OrderAccess::loadload() is required after top() read.
809 HeapWord* obj = top();
810 OrderAccess::loadload();
811 if (pointer_delta(*soft_end_addr(), obj) >= size) {
812 HeapWord* new_top = obj + size;
813 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
814 // result can be one of two:
815 // the old top value: the exchange succeeded
816 // otherwise: the new value of the top is returned.
817 if (result == obj) {
818 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
819 return obj;
820 }
821 } else {
822 return NULL;
823 }
824 } while (true);
825 }
828 HeapWord* OffsetTableContigSpace::initialize_threshold() {
829 return _offsets.initialize_threshold();
830 }
832 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
833 _offsets.alloc_block(start, end);
834 return _offsets.threshold();
835 }
837 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
838 MemRegion mr) :
839 _offsets(sharedOffsetArray, mr),
840 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
841 {
842 _offsets.set_contig_space(this);
843 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
844 }
846 #define OBJ_SAMPLE_INTERVAL 0
847 #define BLOCK_SAMPLE_INTERVAL 100
849 void OffsetTableContigSpace::verify() const {
850 HeapWord* p = bottom();
851 HeapWord* prev_p = NULL;
852 int objs = 0;
853 int blocks = 0;
855 if (VerifyObjectStartArray) {
856 _offsets.verify();
857 }
859 while (p < top()) {
860 size_t size = oop(p)->size();
861 // For a sampling of objects in the space, find it using the
862 // block offset table.
863 if (blocks == BLOCK_SAMPLE_INTERVAL) {
864 guarantee(p == block_start_const(p + (size/2)),
865 "check offset computation");
866 blocks = 0;
867 } else {
868 blocks++;
869 }
871 if (objs == OBJ_SAMPLE_INTERVAL) {
872 oop(p)->verify();
873 objs = 0;
874 } else {
875 objs++;
876 }
877 prev_p = p;
878 p += size;
879 }
880 guarantee(p == top(), "end of last object must match end of space");
881 }
884 size_t TenuredSpace::allowed_dead_ratio() const {
885 return MarkSweepDeadRatio;
886 }