Thu, 17 Jan 2013 19:04:48 -0800
8006537: Assert when dumping archive with default methods
Reviewed-by: coleenp
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "gc_implementation/shared/liveRange.hpp"
29 #include "gc_implementation/shared/markSweep.hpp"
30 #include "gc_implementation/shared/spaceDecorator.hpp"
31 #include "memory/blockOffsetTable.inline.hpp"
32 #include "memory/defNewGeneration.hpp"
33 #include "memory/genCollectedHeap.hpp"
34 #include "memory/space.hpp"
35 #include "memory/space.inline.hpp"
36 #include "memory/universe.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "oops/oop.inline2.hpp"
39 #include "runtime/java.hpp"
40 #include "runtime/safepoint.hpp"
41 #include "utilities/copy.hpp"
42 #include "utilities/globalDefinitions.hpp"
44 void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
45 void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
47 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
48 HeapWord* top_obj) {
49 if (top_obj != NULL) {
50 if (_sp->block_is_obj(top_obj)) {
51 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
52 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
53 // An arrayOop is starting on the dirty card - since we do exact
54 // store checks for objArrays we are done.
55 } else {
56 // Otherwise, it is possible that the object starting on the dirty
57 // card spans the entire card, and that the store happened on a
58 // later card. Figure out where the object ends.
59 // Use the block_size() method of the space over which
60 // the iteration is being done. That space (e.g. CMS) may have
61 // specific requirements on object sizes which will
62 // be reflected in the block_size() method.
63 top = top_obj + oop(top_obj)->size();
64 }
65 }
66 } else {
67 top = top_obj;
68 }
69 } else {
70 assert(top == _sp->end(), "only case where top_obj == NULL");
71 }
72 return top;
73 }
75 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
76 HeapWord* bottom,
77 HeapWord* top) {
78 // 1. Blocks may or may not be objects.
79 // 2. Even when a block_is_obj(), it may not entirely
80 // occupy the block if the block quantum is larger than
81 // the object size.
82 // We can and should try to optimize by calling the non-MemRegion
83 // version of oop_iterate() for all but the extremal objects
84 // (for which we need to call the MemRegion version of
85 // oop_iterate()) To be done post-beta XXX
86 for (; bottom < top; bottom += _sp->block_size(bottom)) {
87 // As in the case of contiguous space above, we'd like to
88 // just use the value returned by oop_iterate to increment the
89 // current pointer; unfortunately, that won't work in CMS because
90 // we'd need an interface change (it seems) to have the space
91 // "adjust the object size" (for instance pad it up to its
92 // block alignment or minimum block size restrictions. XXX
93 if (_sp->block_is_obj(bottom) &&
94 !_sp->obj_allocated_since_save_marks(oop(bottom))) {
95 oop(bottom)->oop_iterate(_cl, mr);
96 }
97 }
98 }
100 // We get called with "mr" representing the dirty region
101 // that we want to process. Because of imprecise marking,
102 // we may need to extend the incoming "mr" to the right,
103 // and scan more. However, because we may already have
104 // scanned some of that extended region, we may need to
105 // trim its right-end back some so we do not scan what
106 // we (or another worker thread) may already have scanned
107 // or planning to scan.
108 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
110 // Some collectors need to do special things whenever their dirty
111 // cards are processed. For instance, CMS must remember mutator updates
112 // (i.e. dirty cards) so as to re-scan mutated objects.
113 // Such work can be piggy-backed here on dirty card scanning, so as to make
114 // it slightly more efficient than doing a complete non-detructive pre-scan
115 // of the card table.
116 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
117 if (pCl != NULL) {
118 pCl->do_MemRegion(mr);
119 }
121 HeapWord* bottom = mr.start();
122 HeapWord* last = mr.last();
123 HeapWord* top = mr.end();
124 HeapWord* bottom_obj;
125 HeapWord* top_obj;
127 assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
128 _precision == CardTableModRefBS::Precise,
129 "Only ones we deal with for now.");
131 assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
132 _cl->idempotent() || _last_bottom == NULL ||
133 top <= _last_bottom,
134 "Not decreasing");
135 NOT_PRODUCT(_last_bottom = mr.start());
137 bottom_obj = _sp->block_start(bottom);
138 top_obj = _sp->block_start(last);
140 assert(bottom_obj <= bottom, "just checking");
141 assert(top_obj <= top, "just checking");
143 // Given what we think is the top of the memory region and
144 // the start of the object at the top, get the actual
145 // value of the top.
146 top = get_actual_top(top, top_obj);
148 // If the previous call did some part of this region, don't redo.
149 if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
150 _min_done != NULL &&
151 _min_done < top) {
152 top = _min_done;
153 }
155 // Top may have been reset, and in fact may be below bottom,
156 // e.g. the dirty card region is entirely in a now free object
157 // -- something that could happen with a concurrent sweeper.
158 bottom = MIN2(bottom, top);
159 MemRegion extended_mr = MemRegion(bottom, top);
160 assert(bottom <= top &&
161 (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
162 _min_done == NULL ||
163 top <= _min_done),
164 "overlap!");
166 // Walk the region if it is not empty; otherwise there is nothing to do.
167 if (!extended_mr.is_empty()) {
168 walk_mem_region(extended_mr, bottom_obj, top);
169 }
171 // An idempotent closure might be applied in any order, so we don't
172 // record a _min_done for it.
173 if (!_cl->idempotent()) {
174 _min_done = bottom;
175 } else {
176 assert(_min_done == _last_explicit_min_done,
177 "Don't update _min_done for idempotent cl");
178 }
179 }
181 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
182 CardTableModRefBS::PrecisionStyle precision,
183 HeapWord* boundary) {
184 return new DirtyCardToOopClosure(this, cl, precision, boundary);
185 }
187 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
188 HeapWord* top_obj) {
189 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
190 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
191 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
192 // An arrayOop is starting on the dirty card - since we do exact
193 // store checks for objArrays we are done.
194 } else {
195 // Otherwise, it is possible that the object starting on the dirty
196 // card spans the entire card, and that the store happened on a
197 // later card. Figure out where the object ends.
198 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
199 "Block size and object size mismatch");
200 top = top_obj + oop(top_obj)->size();
201 }
202 }
203 } else {
204 top = (_sp->toContiguousSpace())->top();
205 }
206 return top;
207 }
209 void Filtering_DCTOC::walk_mem_region(MemRegion mr,
210 HeapWord* bottom,
211 HeapWord* top) {
212 // Note that this assumption won't hold if we have a concurrent
213 // collector in this space, which may have freed up objects after
214 // they were dirtied and before the stop-the-world GC that is
215 // examining cards here.
216 assert(bottom < top, "ought to be at least one obj on a dirty card.");
218 if (_boundary != NULL) {
219 // We have a boundary outside of which we don't want to look
220 // at objects, so create a filtering closure around the
221 // oop closure before walking the region.
222 FilteringClosure filter(_boundary, _cl);
223 walk_mem_region_with_cl(mr, bottom, top, &filter);
224 } else {
225 // No boundary, simply walk the heap with the oop closure.
226 walk_mem_region_with_cl(mr, bottom, top, _cl);
227 }
229 }
231 // We must replicate this so that the static type of "FilteringClosure"
232 // (see above) is apparent at the oop_iterate calls.
233 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
234 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \
235 HeapWord* bottom, \
236 HeapWord* top, \
237 ClosureType* cl) { \
238 bottom += oop(bottom)->oop_iterate(cl, mr); \
239 if (bottom < top) { \
240 HeapWord* next_obj = bottom + oop(bottom)->size(); \
241 while (next_obj < top) { \
242 /* Bottom lies entirely below top, so we can call the */ \
243 /* non-memRegion version of oop_iterate below. */ \
244 oop(bottom)->oop_iterate(cl); \
245 bottom = next_obj; \
246 next_obj = bottom + oop(bottom)->size(); \
247 } \
248 /* Last object. */ \
249 oop(bottom)->oop_iterate(cl, mr); \
250 } \
251 }
253 // (There are only two of these, rather than N, because the split is due
254 // only to the introduction of the FilteringClosure, a local part of the
255 // impl of this abstraction.)
256 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
257 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
259 DirtyCardToOopClosure*
260 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
261 CardTableModRefBS::PrecisionStyle precision,
262 HeapWord* boundary) {
263 return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
264 }
266 void Space::initialize(MemRegion mr,
267 bool clear_space,
268 bool mangle_space) {
269 HeapWord* bottom = mr.start();
270 HeapWord* end = mr.end();
271 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
272 "invalid space boundaries");
273 set_bottom(bottom);
274 set_end(end);
275 if (clear_space) clear(mangle_space);
276 }
278 void Space::clear(bool mangle_space) {
279 if (ZapUnusedHeapArea && mangle_space) {
280 mangle_unused_area();
281 }
282 }
284 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL),
285 _concurrent_iteration_safe_limit(NULL) {
286 _mangler = new GenSpaceMangler(this);
287 }
289 ContiguousSpace::~ContiguousSpace() {
290 delete _mangler;
291 }
293 void ContiguousSpace::initialize(MemRegion mr,
294 bool clear_space,
295 bool mangle_space)
296 {
297 CompactibleSpace::initialize(mr, clear_space, mangle_space);
298 set_concurrent_iteration_safe_limit(top());
299 }
301 void ContiguousSpace::clear(bool mangle_space) {
302 set_top(bottom());
303 set_saved_mark();
304 CompactibleSpace::clear(mangle_space);
305 }
307 bool ContiguousSpace::is_in(const void* p) const {
308 return _bottom <= p && p < _top;
309 }
311 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
312 return p >= _top;
313 }
315 void OffsetTableContigSpace::clear(bool mangle_space) {
316 ContiguousSpace::clear(mangle_space);
317 _offsets.initialize_threshold();
318 }
320 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
321 Space::set_bottom(new_bottom);
322 _offsets.set_bottom(new_bottom);
323 }
325 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
326 // Space should not advertize an increase in size
327 // until after the underlying offest table has been enlarged.
328 _offsets.resize(pointer_delta(new_end, bottom()));
329 Space::set_end(new_end);
330 }
332 #ifndef PRODUCT
334 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
335 mangler()->set_top_for_allocations(v);
336 }
337 void ContiguousSpace::set_top_for_allocations() {
338 mangler()->set_top_for_allocations(top());
339 }
340 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
341 mangler()->check_mangled_unused_area(limit);
342 }
344 void ContiguousSpace::check_mangled_unused_area_complete() {
345 mangler()->check_mangled_unused_area_complete();
346 }
348 // Mangled only the unused space that has not previously
349 // been mangled and that has not been allocated since being
350 // mangled.
351 void ContiguousSpace::mangle_unused_area() {
352 mangler()->mangle_unused_area();
353 }
354 void ContiguousSpace::mangle_unused_area_complete() {
355 mangler()->mangle_unused_area_complete();
356 }
357 void ContiguousSpace::mangle_region(MemRegion mr) {
358 // Although this method uses SpaceMangler::mangle_region() which
359 // is not specific to a space, the when the ContiguousSpace version
360 // is called, it is always with regard to a space and this
361 // bounds checking is appropriate.
362 MemRegion space_mr(bottom(), end());
363 assert(space_mr.contains(mr), "Mangling outside space");
364 SpaceMangler::mangle_region(mr);
365 }
366 #endif // NOT_PRODUCT
368 void CompactibleSpace::initialize(MemRegion mr,
369 bool clear_space,
370 bool mangle_space) {
371 Space::initialize(mr, clear_space, mangle_space);
372 set_compaction_top(bottom());
373 _next_compaction_space = NULL;
374 }
376 void CompactibleSpace::clear(bool mangle_space) {
377 Space::clear(mangle_space);
378 _compaction_top = bottom();
379 }
381 HeapWord* CompactibleSpace::forward(oop q, size_t size,
382 CompactPoint* cp, HeapWord* compact_top) {
383 // q is alive
384 // First check if we should switch compaction space
385 assert(this == cp->space, "'this' should be current compaction space.");
386 size_t compaction_max_size = pointer_delta(end(), compact_top);
387 while (size > compaction_max_size) {
388 // switch to next compaction space
389 cp->space->set_compaction_top(compact_top);
390 cp->space = cp->space->next_compaction_space();
391 if (cp->space == NULL) {
392 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
393 assert(cp->gen != NULL, "compaction must succeed");
394 cp->space = cp->gen->first_compaction_space();
395 assert(cp->space != NULL, "generation must have a first compaction space");
396 }
397 compact_top = cp->space->bottom();
398 cp->space->set_compaction_top(compact_top);
399 cp->threshold = cp->space->initialize_threshold();
400 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
401 }
403 // store the forwarding pointer into the mark word
404 if ((HeapWord*)q != compact_top) {
405 q->forward_to(oop(compact_top));
406 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
407 } else {
408 // if the object isn't moving we can just set the mark to the default
409 // mark and handle it specially later on.
410 q->init_mark();
411 assert(q->forwardee() == NULL, "should be forwarded to NULL");
412 }
414 compact_top += size;
416 // we need to update the offset table so that the beginnings of objects can be
417 // found during scavenge. Note that we are updating the offset table based on
418 // where the object will be once the compaction phase finishes.
419 if (compact_top > cp->threshold)
420 cp->threshold =
421 cp->space->cross_threshold(compact_top - size, compact_top);
422 return compact_top;
423 }
426 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
427 HeapWord* q, size_t deadlength) {
428 if (allowed_deadspace_words >= deadlength) {
429 allowed_deadspace_words -= deadlength;
430 CollectedHeap::fill_with_object(q, deadlength);
431 oop(q)->set_mark(oop(q)->mark()->set_marked());
432 assert((int) deadlength == oop(q)->size(), "bad filler object size");
433 // Recall that we required "q == compaction_top".
434 return true;
435 } else {
436 allowed_deadspace_words = 0;
437 return false;
438 }
439 }
441 #define block_is_always_obj(q) true
442 #define obj_size(q) oop(q)->size()
443 #define adjust_obj_size(s) s
445 void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
446 SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
447 }
449 // Faster object search.
450 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
451 SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
452 }
454 void Space::adjust_pointers() {
455 // adjust all the interior pointers to point at the new locations of objects
456 // Used by MarkSweep::mark_sweep_phase3()
458 // First check to see if there is any work to be done.
459 if (used() == 0) {
460 return; // Nothing to do.
461 }
463 // Otherwise...
464 HeapWord* q = bottom();
465 HeapWord* t = end();
467 debug_only(HeapWord* prev_q = NULL);
468 while (q < t) {
469 if (oop(q)->is_gc_marked()) {
470 // q is alive
472 // point all the oops to the new location
473 size_t size = oop(q)->adjust_pointers();
475 debug_only(prev_q = q);
477 q += size;
478 } else {
479 // q is not a live object. But we're not in a compactible space,
480 // So we don't have live ranges.
481 debug_only(prev_q = q);
482 q += block_size(q);
483 assert(q > prev_q, "we should be moving forward through memory");
484 }
485 }
486 assert(q == t, "just checking");
487 }
489 void CompactibleSpace::adjust_pointers() {
490 // Check first is there is any work to do.
491 if (used() == 0) {
492 return; // Nothing to do.
493 }
495 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
496 }
498 void CompactibleSpace::compact() {
499 SCAN_AND_COMPACT(obj_size);
500 }
502 void Space::print_short() const { print_short_on(tty); }
504 void Space::print_short_on(outputStream* st) const {
505 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
506 (int) ((double) used() * 100 / capacity()));
507 }
509 void Space::print() const { print_on(tty); }
511 void Space::print_on(outputStream* st) const {
512 print_short_on(st);
513 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
514 bottom(), end());
515 }
517 void ContiguousSpace::print_on(outputStream* st) const {
518 print_short_on(st);
519 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
520 bottom(), top(), end());
521 }
523 void OffsetTableContigSpace::print_on(outputStream* st) const {
524 print_short_on(st);
525 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
526 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
527 bottom(), top(), _offsets.threshold(), end());
528 }
530 void ContiguousSpace::verify() const {
531 HeapWord* p = bottom();
532 HeapWord* t = top();
533 HeapWord* prev_p = NULL;
534 while (p < t) {
535 oop(p)->verify();
536 prev_p = p;
537 p += oop(p)->size();
538 }
539 guarantee(p == top(), "end of last object must match end of space");
540 if (top() != end()) {
541 guarantee(top() == block_start_const(end()-1) &&
542 top() == block_start_const(top()),
543 "top should be start of unallocated block, if it exists");
544 }
545 }
547 void Space::oop_iterate(ExtendedOopClosure* blk) {
548 ObjectToOopClosure blk2(blk);
549 object_iterate(&blk2);
550 }
552 HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) {
553 guarantee(false, "NYI");
554 return bottom();
555 }
557 HeapWord* Space::object_iterate_careful_m(MemRegion mr,
558 ObjectClosureCareful* cl) {
559 guarantee(false, "NYI");
560 return bottom();
561 }
564 void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
565 assert(!mr.is_empty(), "Should be non-empty");
566 // We use MemRegion(bottom(), end()) rather than used_region() below
567 // because the two are not necessarily equal for some kinds of
568 // spaces, in particular, certain kinds of free list spaces.
569 // We could use the more complicated but more precise:
570 // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
571 // but the slight imprecision seems acceptable in the assertion check.
572 assert(MemRegion(bottom(), end()).contains(mr),
573 "Should be within used space");
574 HeapWord* prev = cl->previous(); // max address from last time
575 if (prev >= mr.end()) { // nothing to do
576 return;
577 }
578 // This assert will not work when we go from cms space to perm
579 // space, and use same closure. Easy fix deferred for later. XXX YSR
580 // assert(prev == NULL || contains(prev), "Should be within space");
582 bool last_was_obj_array = false;
583 HeapWord *blk_start_addr, *region_start_addr;
584 if (prev > mr.start()) {
585 region_start_addr = prev;
586 blk_start_addr = prev;
587 // The previous invocation may have pushed "prev" beyond the
588 // last allocated block yet there may be still be blocks
589 // in this region due to a particular coalescing policy.
590 // Relax the assertion so that the case where the unallocated
591 // block is maintained and "prev" is beyond the unallocated
592 // block does not cause the assertion to fire.
593 assert((BlockOffsetArrayUseUnallocatedBlock &&
594 (!is_in(prev))) ||
595 (blk_start_addr == block_start(region_start_addr)), "invariant");
596 } else {
597 region_start_addr = mr.start();
598 blk_start_addr = block_start(region_start_addr);
599 }
600 HeapWord* region_end_addr = mr.end();
601 MemRegion derived_mr(region_start_addr, region_end_addr);
602 while (blk_start_addr < region_end_addr) {
603 const size_t size = block_size(blk_start_addr);
604 if (block_is_obj(blk_start_addr)) {
605 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
606 } else {
607 last_was_obj_array = false;
608 }
609 blk_start_addr += size;
610 }
611 if (!last_was_obj_array) {
612 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
613 "Should be within (closed) used space");
614 assert(blk_start_addr > prev, "Invariant");
615 cl->set_previous(blk_start_addr); // min address for next time
616 }
617 }
619 bool Space::obj_is_alive(const HeapWord* p) const {
620 assert (block_is_obj(p), "The address should point to an object");
621 return true;
622 }
624 void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
625 assert(!mr.is_empty(), "Should be non-empty");
626 assert(used_region().contains(mr), "Should be within used space");
627 HeapWord* prev = cl->previous(); // max address from last time
628 if (prev >= mr.end()) { // nothing to do
629 return;
630 }
631 // See comment above (in more general method above) in case you
632 // happen to use this method.
633 assert(prev == NULL || is_in_reserved(prev), "Should be within space");
635 bool last_was_obj_array = false;
636 HeapWord *obj_start_addr, *region_start_addr;
637 if (prev > mr.start()) {
638 region_start_addr = prev;
639 obj_start_addr = prev;
640 assert(obj_start_addr == block_start(region_start_addr), "invariant");
641 } else {
642 region_start_addr = mr.start();
643 obj_start_addr = block_start(region_start_addr);
644 }
645 HeapWord* region_end_addr = mr.end();
646 MemRegion derived_mr(region_start_addr, region_end_addr);
647 while (obj_start_addr < region_end_addr) {
648 oop obj = oop(obj_start_addr);
649 const size_t size = obj->size();
650 last_was_obj_array = cl->do_object_bm(obj, derived_mr);
651 obj_start_addr += size;
652 }
653 if (!last_was_obj_array) {
654 assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()),
655 "Should be within (closed) used space");
656 assert(obj_start_addr > prev, "Invariant");
657 cl->set_previous(obj_start_addr); // min address for next time
658 }
659 }
661 #ifndef SERIALGC
662 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
663 \
664 void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
665 HeapWord* obj_addr = mr.start(); \
666 HeapWord* t = mr.end(); \
667 while (obj_addr < t) { \
668 assert(oop(obj_addr)->is_oop(), "Should be an oop"); \
669 obj_addr += oop(obj_addr)->oop_iterate(blk); \
670 } \
671 }
673 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
675 #undef ContigSpace_PAR_OOP_ITERATE_DEFN
676 #endif // SERIALGC
678 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
679 if (is_empty()) return;
680 HeapWord* obj_addr = bottom();
681 HeapWord* t = top();
682 // Could call objects iterate, but this is easier.
683 while (obj_addr < t) {
684 obj_addr += oop(obj_addr)->oop_iterate(blk);
685 }
686 }
688 void ContiguousSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* blk) {
689 if (is_empty()) {
690 return;
691 }
692 MemRegion cur = MemRegion(bottom(), top());
693 mr = mr.intersection(cur);
694 if (mr.is_empty()) {
695 return;
696 }
697 if (mr.equals(cur)) {
698 oop_iterate(blk);
699 return;
700 }
701 assert(mr.end() <= top(), "just took an intersection above");
702 HeapWord* obj_addr = block_start(mr.start());
703 HeapWord* t = mr.end();
705 // Handle first object specially.
706 oop obj = oop(obj_addr);
707 SpaceMemRegionOopsIterClosure smr_blk(blk, mr);
708 obj_addr += obj->oop_iterate(&smr_blk);
709 while (obj_addr < t) {
710 oop obj = oop(obj_addr);
711 assert(obj->is_oop(), "expected an oop");
712 obj_addr += obj->size();
713 // If "obj_addr" is not greater than top, then the
714 // entire object "obj" is within the region.
715 if (obj_addr <= t) {
716 obj->oop_iterate(blk);
717 } else {
718 // "obj" extends beyond end of region
719 obj->oop_iterate(&smr_blk);
720 break;
721 }
722 };
723 }
725 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
726 if (is_empty()) return;
727 WaterMark bm = bottom_mark();
728 object_iterate_from(bm, blk);
729 }
731 // For a continguous space object_iterate() and safe_object_iterate()
732 // are the same.
733 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
734 object_iterate(blk);
735 }
737 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
738 assert(mark.space() == this, "Mark does not match space");
739 HeapWord* p = mark.point();
740 while (p < top()) {
741 blk->do_object(oop(p));
742 p += oop(p)->size();
743 }
744 }
746 HeapWord*
747 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
748 HeapWord * limit = concurrent_iteration_safe_limit();
749 assert(limit <= top(), "sanity check");
750 for (HeapWord* p = bottom(); p < limit;) {
751 size_t size = blk->do_object_careful(oop(p));
752 if (size == 0) {
753 return p; // failed at p
754 } else {
755 p += size;
756 }
757 }
758 return NULL; // all done
759 }
761 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
762 \
763 void ContiguousSpace:: \
764 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
765 HeapWord* t; \
766 HeapWord* p = saved_mark_word(); \
767 assert(p != NULL, "expected saved mark"); \
768 \
769 const intx interval = PrefetchScanIntervalInBytes; \
770 do { \
771 t = top(); \
772 while (p < t) { \
773 Prefetch::write(p, interval); \
774 debug_only(HeapWord* prev = p); \
775 oop m = oop(p); \
776 p += m->oop_iterate(blk); \
777 } \
778 } while (t < top()); \
779 \
780 set_saved_mark_word(p); \
781 }
783 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
785 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN
787 // Very general, slow implementation.
788 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
789 assert(MemRegion(bottom(), end()).contains(p),
790 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
791 p, bottom(), end()));
792 if (p >= top()) {
793 return top();
794 } else {
795 HeapWord* last = bottom();
796 HeapWord* cur = last;
797 while (cur <= p) {
798 last = cur;
799 cur += oop(cur)->size();
800 }
801 assert(oop(last)->is_oop(),
802 err_msg(PTR_FORMAT " should be an object start", last));
803 return last;
804 }
805 }
807 size_t ContiguousSpace::block_size(const HeapWord* p) const {
808 assert(MemRegion(bottom(), end()).contains(p),
809 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
810 p, bottom(), end()));
811 HeapWord* current_top = top();
812 assert(p <= current_top,
813 err_msg("p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
814 p, current_top));
815 assert(p == current_top || oop(p)->is_oop(),
816 err_msg("p (" PTR_FORMAT ") is not a block start - "
817 "current_top: " PTR_FORMAT ", is_oop: %s",
818 p, current_top, BOOL_TO_STR(oop(p)->is_oop())));
819 if (p < current_top) {
820 return oop(p)->size();
821 } else {
822 assert(p == current_top, "just checking");
823 return pointer_delta(end(), (HeapWord*) p);
824 }
825 }
827 // This version requires locking.
828 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
829 HeapWord* const end_value) {
830 // In G1 there are places where a GC worker can allocates into a
831 // region using this serial allocation code without being prone to a
832 // race with other GC workers (we ensure that no other GC worker can
833 // access the same region at the same time). So the assert below is
834 // too strong in the case of G1.
835 assert(Heap_lock->owned_by_self() ||
836 (SafepointSynchronize::is_at_safepoint() &&
837 (Thread::current()->is_VM_thread() || UseG1GC)),
838 "not locked");
839 HeapWord* obj = top();
840 if (pointer_delta(end_value, obj) >= size) {
841 HeapWord* new_top = obj + size;
842 set_top(new_top);
843 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
844 return obj;
845 } else {
846 return NULL;
847 }
848 }
850 // This version is lock-free.
851 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
852 HeapWord* const end_value) {
853 do {
854 HeapWord* obj = top();
855 if (pointer_delta(end_value, obj) >= size) {
856 HeapWord* new_top = obj + size;
857 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
858 // result can be one of two:
859 // the old top value: the exchange succeeded
860 // otherwise: the new value of the top is returned.
861 if (result == obj) {
862 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
863 return obj;
864 }
865 } else {
866 return NULL;
867 }
868 } while (true);
869 }
871 // Requires locking.
872 HeapWord* ContiguousSpace::allocate(size_t size) {
873 return allocate_impl(size, end());
874 }
876 // Lock-free.
877 HeapWord* ContiguousSpace::par_allocate(size_t size) {
878 return par_allocate_impl(size, end());
879 }
881 void ContiguousSpace::allocate_temporary_filler(int factor) {
882 // allocate temporary type array decreasing free size with factor 'factor'
883 assert(factor >= 0, "just checking");
884 size_t size = pointer_delta(end(), top());
886 // if space is full, return
887 if (size == 0) return;
889 if (factor > 0) {
890 size -= size/factor;
891 }
892 size = align_object_size(size);
894 const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
895 if (size >= (size_t)align_object_size(array_header_size)) {
896 size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
897 // allocate uninitialized int array
898 typeArrayOop t = (typeArrayOop) allocate(size);
899 assert(t != NULL, "allocation should succeed");
900 t->set_mark(markOopDesc::prototype());
901 t->set_klass(Universe::intArrayKlassObj());
902 t->set_length((int)length);
903 } else {
904 assert(size == CollectedHeap::min_fill_size(),
905 "size for smallest fake object doesn't match");
906 instanceOop obj = (instanceOop) allocate(size);
907 obj->set_mark(markOopDesc::prototype());
908 obj->set_klass_gap(0);
909 obj->set_klass(SystemDictionary::Object_klass());
910 }
911 }
913 void EdenSpace::clear(bool mangle_space) {
914 ContiguousSpace::clear(mangle_space);
915 set_soft_end(end());
916 }
918 // Requires locking.
919 HeapWord* EdenSpace::allocate(size_t size) {
920 return allocate_impl(size, soft_end());
921 }
923 // Lock-free.
924 HeapWord* EdenSpace::par_allocate(size_t size) {
925 return par_allocate_impl(size, soft_end());
926 }
928 HeapWord* ConcEdenSpace::par_allocate(size_t size)
929 {
930 do {
931 // The invariant is top() should be read before end() because
932 // top() can't be greater than end(), so if an update of _soft_end
933 // occurs between 'end_val = end();' and 'top_val = top();' top()
934 // also can grow up to the new end() and the condition
935 // 'top_val > end_val' is true. To ensure the loading order
936 // OrderAccess::loadload() is required after top() read.
937 HeapWord* obj = top();
938 OrderAccess::loadload();
939 if (pointer_delta(*soft_end_addr(), obj) >= size) {
940 HeapWord* new_top = obj + size;
941 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
942 // result can be one of two:
943 // the old top value: the exchange succeeded
944 // otherwise: the new value of the top is returned.
945 if (result == obj) {
946 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
947 return obj;
948 }
949 } else {
950 return NULL;
951 }
952 } while (true);
953 }
956 HeapWord* OffsetTableContigSpace::initialize_threshold() {
957 return _offsets.initialize_threshold();
958 }
960 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
961 _offsets.alloc_block(start, end);
962 return _offsets.threshold();
963 }
965 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
966 MemRegion mr) :
967 _offsets(sharedOffsetArray, mr),
968 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
969 {
970 _offsets.set_contig_space(this);
971 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
972 }
974 #define OBJ_SAMPLE_INTERVAL 0
975 #define BLOCK_SAMPLE_INTERVAL 100
977 void OffsetTableContigSpace::verify() const {
978 HeapWord* p = bottom();
979 HeapWord* prev_p = NULL;
980 int objs = 0;
981 int blocks = 0;
983 if (VerifyObjectStartArray) {
984 _offsets.verify();
985 }
987 while (p < top()) {
988 size_t size = oop(p)->size();
989 // For a sampling of objects in the space, find it using the
990 // block offset table.
991 if (blocks == BLOCK_SAMPLE_INTERVAL) {
992 guarantee(p == block_start_const(p + (size/2)),
993 "check offset computation");
994 blocks = 0;
995 } else {
996 blocks++;
997 }
999 if (objs == OBJ_SAMPLE_INTERVAL) {
1000 oop(p)->verify();
1001 objs = 0;
1002 } else {
1003 objs++;
1004 }
1005 prev_p = p;
1006 p += size;
1007 }
1008 guarantee(p == top(), "end of last object must match end of space");
1009 }
1012 size_t TenuredSpace::allowed_dead_ratio() const {
1013 return MarkSweepDeadRatio;
1014 }