Wed, 29 Oct 2008 06:30:02 -0700
6765804: GC "dead ratios" should be unsigned
Reviewed-by: ysr, tonyp
1 /*
2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_space.cpp.incl"
28 void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
29 void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
31 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
32 HeapWord* top_obj) {
33 if (top_obj != NULL) {
34 if (_sp->block_is_obj(top_obj)) {
35 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
36 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
37 // An arrayOop is starting on the dirty card - since we do exact
38 // store checks for objArrays we are done.
39 } else {
40 // Otherwise, it is possible that the object starting on the dirty
41 // card spans the entire card, and that the store happened on a
42 // later card. Figure out where the object ends.
43 // Use the block_size() method of the space over which
44 // the iteration is being done. That space (e.g. CMS) may have
45 // specific requirements on object sizes which will
46 // be reflected in the block_size() method.
47 top = top_obj + oop(top_obj)->size();
48 }
49 }
50 } else {
51 top = top_obj;
52 }
53 } else {
54 assert(top == _sp->end(), "only case where top_obj == NULL");
55 }
56 return top;
57 }
59 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
60 HeapWord* bottom,
61 HeapWord* top) {
62 // 1. Blocks may or may not be objects.
63 // 2. Even when a block_is_obj(), it may not entirely
64 // occupy the block if the block quantum is larger than
65 // the object size.
66 // We can and should try to optimize by calling the non-MemRegion
67 // version of oop_iterate() for all but the extremal objects
68 // (for which we need to call the MemRegion version of
69 // oop_iterate()) To be done post-beta XXX
70 for (; bottom < top; bottom += _sp->block_size(bottom)) {
71 // As in the case of contiguous space above, we'd like to
72 // just use the value returned by oop_iterate to increment the
73 // current pointer; unfortunately, that won't work in CMS because
74 // we'd need an interface change (it seems) to have the space
75 // "adjust the object size" (for instance pad it up to its
76 // block alignment or minimum block size restrictions. XXX
77 if (_sp->block_is_obj(bottom) &&
78 !_sp->obj_allocated_since_save_marks(oop(bottom))) {
79 oop(bottom)->oop_iterate(_cl, mr);
80 }
81 }
82 }
84 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
86 // Some collectors need to do special things whenever their dirty
87 // cards are processed. For instance, CMS must remember mutator updates
88 // (i.e. dirty cards) so as to re-scan mutated objects.
89 // Such work can be piggy-backed here on dirty card scanning, so as to make
90 // it slightly more efficient than doing a complete non-detructive pre-scan
91 // of the card table.
92 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
93 if (pCl != NULL) {
94 pCl->do_MemRegion(mr);
95 }
97 HeapWord* bottom = mr.start();
98 HeapWord* last = mr.last();
99 HeapWord* top = mr.end();
100 HeapWord* bottom_obj;
101 HeapWord* top_obj;
103 assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
104 _precision == CardTableModRefBS::Precise,
105 "Only ones we deal with for now.");
107 assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
108 _cl->idempotent() || _last_bottom == NULL ||
109 top <= _last_bottom,
110 "Not decreasing");
111 NOT_PRODUCT(_last_bottom = mr.start());
113 bottom_obj = _sp->block_start(bottom);
114 top_obj = _sp->block_start(last);
116 assert(bottom_obj <= bottom, "just checking");
117 assert(top_obj <= top, "just checking");
119 // Given what we think is the top of the memory region and
120 // the start of the object at the top, get the actual
121 // value of the top.
122 top = get_actual_top(top, top_obj);
124 // If the previous call did some part of this region, don't redo.
125 if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
126 _min_done != NULL &&
127 _min_done < top) {
128 top = _min_done;
129 }
131 // Top may have been reset, and in fact may be below bottom,
132 // e.g. the dirty card region is entirely in a now free object
133 // -- something that could happen with a concurrent sweeper.
134 bottom = MIN2(bottom, top);
135 mr = MemRegion(bottom, top);
136 assert(bottom <= top &&
137 (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
138 _min_done == NULL ||
139 top <= _min_done),
140 "overlap!");
142 // Walk the region if it is not empty; otherwise there is nothing to do.
143 if (!mr.is_empty()) {
144 walk_mem_region(mr, bottom_obj, top);
145 }
147 // An idempotent closure might be applied in any order, so we don't
148 // record a _min_done for it.
149 if (!_cl->idempotent()) {
150 _min_done = bottom;
151 } else {
152 assert(_min_done == _last_explicit_min_done,
153 "Don't update _min_done for idempotent cl");
154 }
155 }
157 DirtyCardToOopClosure* Space::new_dcto_cl(OopClosure* cl,
158 CardTableModRefBS::PrecisionStyle precision,
159 HeapWord* boundary) {
160 return new DirtyCardToOopClosure(this, cl, precision, boundary);
161 }
163 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
164 HeapWord* top_obj) {
165 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
166 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
167 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
168 // An arrayOop is starting on the dirty card - since we do exact
169 // store checks for objArrays we are done.
170 } else {
171 // Otherwise, it is possible that the object starting on the dirty
172 // card spans the entire card, and that the store happened on a
173 // later card. Figure out where the object ends.
174 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
175 "Block size and object size mismatch");
176 top = top_obj + oop(top_obj)->size();
177 }
178 }
179 } else {
180 top = (_sp->toContiguousSpace())->top();
181 }
182 return top;
183 }
185 void Filtering_DCTOC::walk_mem_region(MemRegion mr,
186 HeapWord* bottom,
187 HeapWord* top) {
188 // Note that this assumption won't hold if we have a concurrent
189 // collector in this space, which may have freed up objects after
190 // they were dirtied and before the stop-the-world GC that is
191 // examining cards here.
192 assert(bottom < top, "ought to be at least one obj on a dirty card.");
194 if (_boundary != NULL) {
195 // We have a boundary outside of which we don't want to look
196 // at objects, so create a filtering closure around the
197 // oop closure before walking the region.
198 FilteringClosure filter(_boundary, _cl);
199 walk_mem_region_with_cl(mr, bottom, top, &filter);
200 } else {
201 // No boundary, simply walk the heap with the oop closure.
202 walk_mem_region_with_cl(mr, bottom, top, _cl);
203 }
205 }
207 // We must replicate this so that the static type of "FilteringClosure"
208 // (see above) is apparent at the oop_iterate calls.
209 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
210 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \
211 HeapWord* bottom, \
212 HeapWord* top, \
213 ClosureType* cl) { \
214 bottom += oop(bottom)->oop_iterate(cl, mr); \
215 if (bottom < top) { \
216 HeapWord* next_obj = bottom + oop(bottom)->size(); \
217 while (next_obj < top) { \
218 /* Bottom lies entirely below top, so we can call the */ \
219 /* non-memRegion version of oop_iterate below. */ \
220 oop(bottom)->oop_iterate(cl); \
221 bottom = next_obj; \
222 next_obj = bottom + oop(bottom)->size(); \
223 } \
224 /* Last object. */ \
225 oop(bottom)->oop_iterate(cl, mr); \
226 } \
227 }
229 // (There are only two of these, rather than N, because the split is due
230 // only to the introduction of the FilteringClosure, a local part of the
231 // impl of this abstraction.)
232 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
233 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
235 DirtyCardToOopClosure*
236 ContiguousSpace::new_dcto_cl(OopClosure* cl,
237 CardTableModRefBS::PrecisionStyle precision,
238 HeapWord* boundary) {
239 return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
240 }
242 void Space::initialize(MemRegion mr,
243 bool clear_space,
244 bool mangle_space) {
245 HeapWord* bottom = mr.start();
246 HeapWord* end = mr.end();
247 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
248 "invalid space boundaries");
249 set_bottom(bottom);
250 set_end(end);
251 if (clear_space) clear(mangle_space);
252 }
254 void Space::clear(bool mangle_space) {
255 if (ZapUnusedHeapArea && mangle_space) {
256 mangle_unused_area();
257 }
258 }
260 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL),
261 _concurrent_iteration_safe_limit(NULL) {
262 _mangler = new GenSpaceMangler(this);
263 }
265 ContiguousSpace::~ContiguousSpace() {
266 delete _mangler;
267 }
269 void ContiguousSpace::initialize(MemRegion mr,
270 bool clear_space,
271 bool mangle_space)
272 {
273 CompactibleSpace::initialize(mr, clear_space, mangle_space);
274 set_concurrent_iteration_safe_limit(top());
275 }
277 void ContiguousSpace::clear(bool mangle_space) {
278 set_top(bottom());
279 set_saved_mark();
280 CompactibleSpace::clear(mangle_space);
281 }
283 bool Space::is_in(const void* p) const {
284 HeapWord* b = block_start_const(p);
285 return b != NULL && block_is_obj(b);
286 }
288 bool ContiguousSpace::is_in(const void* p) const {
289 return _bottom <= p && p < _top;
290 }
292 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
293 return p >= _top;
294 }
296 void OffsetTableContigSpace::clear(bool mangle_space) {
297 ContiguousSpace::clear(mangle_space);
298 _offsets.initialize_threshold();
299 }
301 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
302 Space::set_bottom(new_bottom);
303 _offsets.set_bottom(new_bottom);
304 }
306 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
307 // Space should not advertize an increase in size
308 // until after the underlying offest table has been enlarged.
309 _offsets.resize(pointer_delta(new_end, bottom()));
310 Space::set_end(new_end);
311 }
313 #ifndef PRODUCT
315 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
316 mangler()->set_top_for_allocations(v);
317 }
318 void ContiguousSpace::set_top_for_allocations() {
319 mangler()->set_top_for_allocations(top());
320 }
321 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
322 mangler()->check_mangled_unused_area(limit);
323 }
325 void ContiguousSpace::check_mangled_unused_area_complete() {
326 mangler()->check_mangled_unused_area_complete();
327 }
329 // Mangled only the unused space that has not previously
330 // been mangled and that has not been allocated since being
331 // mangled.
332 void ContiguousSpace::mangle_unused_area() {
333 mangler()->mangle_unused_area();
334 }
335 void ContiguousSpace::mangle_unused_area_complete() {
336 mangler()->mangle_unused_area_complete();
337 }
338 void ContiguousSpace::mangle_region(MemRegion mr) {
339 // Although this method uses SpaceMangler::mangle_region() which
340 // is not specific to a space, the when the ContiguousSpace version
341 // is called, it is always with regard to a space and this
342 // bounds checking is appropriate.
343 MemRegion space_mr(bottom(), end());
344 assert(space_mr.contains(mr), "Mangling outside space");
345 SpaceMangler::mangle_region(mr);
346 }
347 #endif // NOT_PRODUCT
349 void CompactibleSpace::initialize(MemRegion mr,
350 bool clear_space,
351 bool mangle_space) {
352 Space::initialize(mr, clear_space, mangle_space);
353 set_compaction_top(bottom());
354 _next_compaction_space = NULL;
355 }
357 void CompactibleSpace::clear(bool mangle_space) {
358 Space::clear(mangle_space);
359 _compaction_top = bottom();
360 }
362 HeapWord* CompactibleSpace::forward(oop q, size_t size,
363 CompactPoint* cp, HeapWord* compact_top) {
364 // q is alive
365 // First check if we should switch compaction space
366 assert(this == cp->space, "'this' should be current compaction space.");
367 size_t compaction_max_size = pointer_delta(end(), compact_top);
368 while (size > compaction_max_size) {
369 // switch to next compaction space
370 cp->space->set_compaction_top(compact_top);
371 cp->space = cp->space->next_compaction_space();
372 if (cp->space == NULL) {
373 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
374 assert(cp->gen != NULL, "compaction must succeed");
375 cp->space = cp->gen->first_compaction_space();
376 assert(cp->space != NULL, "generation must have a first compaction space");
377 }
378 compact_top = cp->space->bottom();
379 cp->space->set_compaction_top(compact_top);
380 cp->threshold = cp->space->initialize_threshold();
381 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
382 }
384 // store the forwarding pointer into the mark word
385 if ((HeapWord*)q != compact_top) {
386 q->forward_to(oop(compact_top));
387 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
388 } else {
389 // if the object isn't moving we can just set the mark to the default
390 // mark and handle it specially later on.
391 q->init_mark();
392 assert(q->forwardee() == NULL, "should be forwarded to NULL");
393 }
395 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, size));
396 compact_top += size;
398 // we need to update the offset table so that the beginnings of objects can be
399 // found during scavenge. Note that we are updating the offset table based on
400 // where the object will be once the compaction phase finishes.
401 if (compact_top > cp->threshold)
402 cp->threshold =
403 cp->space->cross_threshold(compact_top - size, compact_top);
404 return compact_top;
405 }
408 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
409 HeapWord* q, size_t deadlength) {
410 if (allowed_deadspace_words >= deadlength) {
411 allowed_deadspace_words -= deadlength;
412 oop(q)->set_mark(markOopDesc::prototype()->set_marked());
413 const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT);
414 if (deadlength >= min_int_array_size) {
415 oop(q)->set_klass(Universe::intArrayKlassObj());
416 typeArrayOop(q)->set_length((int)((deadlength - min_int_array_size)
417 * (HeapWordSize/sizeof(jint))));
418 } else {
419 assert((int) deadlength == instanceOopDesc::header_size(),
420 "size for smallest fake dead object doesn't match");
421 oop(q)->set_klass(SystemDictionary::object_klass());
422 }
423 assert((int) deadlength == oop(q)->size(),
424 "make sure size for fake dead object match");
425 // Recall that we required "q == compaction_top".
426 return true;
427 } else {
428 allowed_deadspace_words = 0;
429 return false;
430 }
431 }
433 #define block_is_always_obj(q) true
434 #define obj_size(q) oop(q)->size()
435 #define adjust_obj_size(s) s
437 void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
438 SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
439 }
441 // Faster object search.
442 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
443 SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
444 }
446 void Space::adjust_pointers() {
447 // adjust all the interior pointers to point at the new locations of objects
448 // Used by MarkSweep::mark_sweep_phase3()
450 // First check to see if there is any work to be done.
451 if (used() == 0) {
452 return; // Nothing to do.
453 }
455 // Otherwise...
456 HeapWord* q = bottom();
457 HeapWord* t = end();
459 debug_only(HeapWord* prev_q = NULL);
460 while (q < t) {
461 if (oop(q)->is_gc_marked()) {
462 // q is alive
464 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
465 // point all the oops to the new location
466 size_t size = oop(q)->adjust_pointers();
467 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
469 debug_only(prev_q = q);
470 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
472 q += size;
473 } else {
474 // q is not a live object. But we're not in a compactible space,
475 // So we don't have live ranges.
476 debug_only(prev_q = q);
477 q += block_size(q);
478 assert(q > prev_q, "we should be moving forward through memory");
479 }
480 }
481 assert(q == t, "just checking");
482 }
484 void CompactibleSpace::adjust_pointers() {
485 // Check first is there is any work to do.
486 if (used() == 0) {
487 return; // Nothing to do.
488 }
490 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
491 }
493 void CompactibleSpace::compact() {
494 SCAN_AND_COMPACT(obj_size);
495 }
497 void Space::print_short() const { print_short_on(tty); }
499 void Space::print_short_on(outputStream* st) const {
500 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
501 (int) ((double) used() * 100 / capacity()));
502 }
504 void Space::print() const { print_on(tty); }
506 void Space::print_on(outputStream* st) const {
507 print_short_on(st);
508 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
509 bottom(), end());
510 }
512 void ContiguousSpace::print_on(outputStream* st) const {
513 print_short_on(st);
514 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
515 bottom(), top(), end());
516 }
518 void OffsetTableContigSpace::print_on(outputStream* st) const {
519 print_short_on(st);
520 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
521 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
522 bottom(), top(), _offsets.threshold(), end());
523 }
525 void ContiguousSpace::verify(bool allow_dirty) const {
526 HeapWord* p = bottom();
527 HeapWord* t = top();
528 HeapWord* prev_p = NULL;
529 while (p < t) {
530 oop(p)->verify();
531 prev_p = p;
532 p += oop(p)->size();
533 }
534 guarantee(p == top(), "end of last object must match end of space");
535 if (top() != end()) {
536 guarantee(top() == block_start_const(end()-1) &&
537 top() == block_start_const(top()),
538 "top should be start of unallocated block, if it exists");
539 }
540 }
542 void Space::oop_iterate(OopClosure* blk) {
543 ObjectToOopClosure blk2(blk);
544 object_iterate(&blk2);
545 }
547 HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) {
548 guarantee(false, "NYI");
549 return bottom();
550 }
552 HeapWord* Space::object_iterate_careful_m(MemRegion mr,
553 ObjectClosureCareful* cl) {
554 guarantee(false, "NYI");
555 return bottom();
556 }
559 void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
560 assert(!mr.is_empty(), "Should be non-empty");
561 // We use MemRegion(bottom(), end()) rather than used_region() below
562 // because the two are not necessarily equal for some kinds of
563 // spaces, in particular, certain kinds of free list spaces.
564 // We could use the more complicated but more precise:
565 // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
566 // but the slight imprecision seems acceptable in the assertion check.
567 assert(MemRegion(bottom(), end()).contains(mr),
568 "Should be within used space");
569 HeapWord* prev = cl->previous(); // max address from last time
570 if (prev >= mr.end()) { // nothing to do
571 return;
572 }
573 // This assert will not work when we go from cms space to perm
574 // space, and use same closure. Easy fix deferred for later. XXX YSR
575 // assert(prev == NULL || contains(prev), "Should be within space");
577 bool last_was_obj_array = false;
578 HeapWord *blk_start_addr, *region_start_addr;
579 if (prev > mr.start()) {
580 region_start_addr = prev;
581 blk_start_addr = prev;
582 assert(blk_start_addr == block_start(region_start_addr), "invariant");
583 } else {
584 region_start_addr = mr.start();
585 blk_start_addr = block_start(region_start_addr);
586 }
587 HeapWord* region_end_addr = mr.end();
588 MemRegion derived_mr(region_start_addr, region_end_addr);
589 while (blk_start_addr < region_end_addr) {
590 const size_t size = block_size(blk_start_addr);
591 if (block_is_obj(blk_start_addr)) {
592 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
593 } else {
594 last_was_obj_array = false;
595 }
596 blk_start_addr += size;
597 }
598 if (!last_was_obj_array) {
599 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
600 "Should be within (closed) used space");
601 assert(blk_start_addr > prev, "Invariant");
602 cl->set_previous(blk_start_addr); // min address for next time
603 }
604 }
606 bool Space::obj_is_alive(const HeapWord* p) const {
607 assert (block_is_obj(p), "The address should point to an object");
608 return true;
609 }
611 void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
612 assert(!mr.is_empty(), "Should be non-empty");
613 assert(used_region().contains(mr), "Should be within used space");
614 HeapWord* prev = cl->previous(); // max address from last time
615 if (prev >= mr.end()) { // nothing to do
616 return;
617 }
618 // See comment above (in more general method above) in case you
619 // happen to use this method.
620 assert(prev == NULL || is_in_reserved(prev), "Should be within space");
622 bool last_was_obj_array = false;
623 HeapWord *obj_start_addr, *region_start_addr;
624 if (prev > mr.start()) {
625 region_start_addr = prev;
626 obj_start_addr = prev;
627 assert(obj_start_addr == block_start(region_start_addr), "invariant");
628 } else {
629 region_start_addr = mr.start();
630 obj_start_addr = block_start(region_start_addr);
631 }
632 HeapWord* region_end_addr = mr.end();
633 MemRegion derived_mr(region_start_addr, region_end_addr);
634 while (obj_start_addr < region_end_addr) {
635 oop obj = oop(obj_start_addr);
636 const size_t size = obj->size();
637 last_was_obj_array = cl->do_object_bm(obj, derived_mr);
638 obj_start_addr += size;
639 }
640 if (!last_was_obj_array) {
641 assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()),
642 "Should be within (closed) used space");
643 assert(obj_start_addr > prev, "Invariant");
644 cl->set_previous(obj_start_addr); // min address for next time
645 }
646 }
648 #ifndef SERIALGC
649 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
650 \
651 void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
652 HeapWord* obj_addr = mr.start(); \
653 HeapWord* t = mr.end(); \
654 while (obj_addr < t) { \
655 assert(oop(obj_addr)->is_oop(), "Should be an oop"); \
656 obj_addr += oop(obj_addr)->oop_iterate(blk); \
657 } \
658 }
660 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
662 #undef ContigSpace_PAR_OOP_ITERATE_DEFN
663 #endif // SERIALGC
665 void ContiguousSpace::oop_iterate(OopClosure* blk) {
666 if (is_empty()) return;
667 HeapWord* obj_addr = bottom();
668 HeapWord* t = top();
669 // Could call objects iterate, but this is easier.
670 while (obj_addr < t) {
671 obj_addr += oop(obj_addr)->oop_iterate(blk);
672 }
673 }
675 void ContiguousSpace::oop_iterate(MemRegion mr, OopClosure* blk) {
676 if (is_empty()) {
677 return;
678 }
679 MemRegion cur = MemRegion(bottom(), top());
680 mr = mr.intersection(cur);
681 if (mr.is_empty()) {
682 return;
683 }
684 if (mr.equals(cur)) {
685 oop_iterate(blk);
686 return;
687 }
688 assert(mr.end() <= top(), "just took an intersection above");
689 HeapWord* obj_addr = block_start(mr.start());
690 HeapWord* t = mr.end();
692 // Handle first object specially.
693 oop obj = oop(obj_addr);
694 SpaceMemRegionOopsIterClosure smr_blk(blk, mr);
695 obj_addr += obj->oop_iterate(&smr_blk);
696 while (obj_addr < t) {
697 oop obj = oop(obj_addr);
698 assert(obj->is_oop(), "expected an oop");
699 obj_addr += obj->size();
700 // If "obj_addr" is not greater than top, then the
701 // entire object "obj" is within the region.
702 if (obj_addr <= t) {
703 obj->oop_iterate(blk);
704 } else {
705 // "obj" extends beyond end of region
706 obj->oop_iterate(&smr_blk);
707 break;
708 }
709 };
710 }
712 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
713 if (is_empty()) return;
714 WaterMark bm = bottom_mark();
715 object_iterate_from(bm, blk);
716 }
718 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
719 assert(mark.space() == this, "Mark does not match space");
720 HeapWord* p = mark.point();
721 while (p < top()) {
722 blk->do_object(oop(p));
723 p += oop(p)->size();
724 }
725 }
727 HeapWord*
728 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
729 HeapWord * limit = concurrent_iteration_safe_limit();
730 assert(limit <= top(), "sanity check");
731 for (HeapWord* p = bottom(); p < limit;) {
732 size_t size = blk->do_object_careful(oop(p));
733 if (size == 0) {
734 return p; // failed at p
735 } else {
736 p += size;
737 }
738 }
739 return NULL; // all done
740 }
742 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
743 \
744 void ContiguousSpace:: \
745 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
746 HeapWord* t; \
747 HeapWord* p = saved_mark_word(); \
748 assert(p != NULL, "expected saved mark"); \
749 \
750 const intx interval = PrefetchScanIntervalInBytes; \
751 do { \
752 t = top(); \
753 while (p < t) { \
754 Prefetch::write(p, interval); \
755 debug_only(HeapWord* prev = p); \
756 oop m = oop(p); \
757 p += m->oop_iterate(blk); \
758 } \
759 } while (t < top()); \
760 \
761 set_saved_mark_word(p); \
762 }
764 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
766 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN
768 // Very general, slow implementation.
769 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
770 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
771 if (p >= top()) {
772 return top();
773 } else {
774 HeapWord* last = bottom();
775 HeapWord* cur = last;
776 while (cur <= p) {
777 last = cur;
778 cur += oop(cur)->size();
779 }
780 assert(oop(last)->is_oop(), "Should be an object start");
781 return last;
782 }
783 }
785 size_t ContiguousSpace::block_size(const HeapWord* p) const {
786 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
787 HeapWord* current_top = top();
788 assert(p <= current_top, "p is not a block start");
789 assert(p == current_top || oop(p)->is_oop(), "p is not a block start");
790 if (p < current_top)
791 return oop(p)->size();
792 else {
793 assert(p == current_top, "just checking");
794 return pointer_delta(end(), (HeapWord*) p);
795 }
796 }
798 // This version requires locking.
799 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
800 HeapWord* const end_value) {
801 assert(Heap_lock->owned_by_self() ||
802 (SafepointSynchronize::is_at_safepoint() &&
803 Thread::current()->is_VM_thread()),
804 "not locked");
805 HeapWord* obj = top();
806 if (pointer_delta(end_value, obj) >= size) {
807 HeapWord* new_top = obj + size;
808 set_top(new_top);
809 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
810 return obj;
811 } else {
812 return NULL;
813 }
814 }
816 // This version is lock-free.
817 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
818 HeapWord* const end_value) {
819 do {
820 HeapWord* obj = top();
821 if (pointer_delta(end_value, obj) >= size) {
822 HeapWord* new_top = obj + size;
823 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
824 // result can be one of two:
825 // the old top value: the exchange succeeded
826 // otherwise: the new value of the top is returned.
827 if (result == obj) {
828 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
829 return obj;
830 }
831 } else {
832 return NULL;
833 }
834 } while (true);
835 }
837 // Requires locking.
838 HeapWord* ContiguousSpace::allocate(size_t size) {
839 return allocate_impl(size, end());
840 }
842 // Lock-free.
843 HeapWord* ContiguousSpace::par_allocate(size_t size) {
844 return par_allocate_impl(size, end());
845 }
847 void ContiguousSpace::allocate_temporary_filler(int factor) {
848 // allocate temporary type array decreasing free size with factor 'factor'
849 assert(factor >= 0, "just checking");
850 size_t size = pointer_delta(end(), top());
852 // if space is full, return
853 if (size == 0) return;
855 if (factor > 0) {
856 size -= size/factor;
857 }
858 size = align_object_size(size);
860 const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT);
861 if (size >= min_int_array_size) {
862 size_t length = (size - min_int_array_size) * (HeapWordSize / sizeof(jint));
863 // allocate uninitialized int array
864 typeArrayOop t = (typeArrayOop) allocate(size);
865 assert(t != NULL, "allocation should succeed");
866 t->set_mark(markOopDesc::prototype());
867 t->set_klass(Universe::intArrayKlassObj());
868 t->set_length((int)length);
869 } else {
870 assert((int) size == instanceOopDesc::header_size(),
871 "size for smallest fake object doesn't match");
872 instanceOop obj = (instanceOop) allocate(size);
873 obj->set_mark(markOopDesc::prototype());
874 obj->set_klass_gap(0);
875 obj->set_klass(SystemDictionary::object_klass());
876 }
877 }
879 void EdenSpace::clear(bool mangle_space) {
880 ContiguousSpace::clear(mangle_space);
881 set_soft_end(end());
882 }
884 // Requires locking.
885 HeapWord* EdenSpace::allocate(size_t size) {
886 return allocate_impl(size, soft_end());
887 }
889 // Lock-free.
890 HeapWord* EdenSpace::par_allocate(size_t size) {
891 return par_allocate_impl(size, soft_end());
892 }
894 HeapWord* ConcEdenSpace::par_allocate(size_t size)
895 {
896 do {
897 // The invariant is top() should be read before end() because
898 // top() can't be greater than end(), so if an update of _soft_end
899 // occurs between 'end_val = end();' and 'top_val = top();' top()
900 // also can grow up to the new end() and the condition
901 // 'top_val > end_val' is true. To ensure the loading order
902 // OrderAccess::loadload() is required after top() read.
903 HeapWord* obj = top();
904 OrderAccess::loadload();
905 if (pointer_delta(*soft_end_addr(), obj) >= size) {
906 HeapWord* new_top = obj + size;
907 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
908 // result can be one of two:
909 // the old top value: the exchange succeeded
910 // otherwise: the new value of the top is returned.
911 if (result == obj) {
912 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
913 return obj;
914 }
915 } else {
916 return NULL;
917 }
918 } while (true);
919 }
922 HeapWord* OffsetTableContigSpace::initialize_threshold() {
923 return _offsets.initialize_threshold();
924 }
926 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
927 _offsets.alloc_block(start, end);
928 return _offsets.threshold();
929 }
931 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
932 MemRegion mr) :
933 _offsets(sharedOffsetArray, mr),
934 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
935 {
936 _offsets.set_contig_space(this);
937 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
938 }
941 class VerifyOldOopClosure : public OopClosure {
942 public:
943 oop _the_obj;
944 bool _allow_dirty;
945 void do_oop(oop* p) {
946 _the_obj->verify_old_oop(p, _allow_dirty);
947 }
948 void do_oop(narrowOop* p) {
949 _the_obj->verify_old_oop(p, _allow_dirty);
950 }
951 };
953 #define OBJ_SAMPLE_INTERVAL 0
954 #define BLOCK_SAMPLE_INTERVAL 100
956 void OffsetTableContigSpace::verify(bool allow_dirty) const {
957 HeapWord* p = bottom();
958 HeapWord* prev_p = NULL;
959 VerifyOldOopClosure blk; // Does this do anything?
960 blk._allow_dirty = allow_dirty;
961 int objs = 0;
962 int blocks = 0;
964 if (VerifyObjectStartArray) {
965 _offsets.verify();
966 }
968 while (p < top()) {
969 size_t size = oop(p)->size();
970 // For a sampling of objects in the space, find it using the
971 // block offset table.
972 if (blocks == BLOCK_SAMPLE_INTERVAL) {
973 guarantee(p == block_start_const(p + (size/2)),
974 "check offset computation");
975 blocks = 0;
976 } else {
977 blocks++;
978 }
980 if (objs == OBJ_SAMPLE_INTERVAL) {
981 oop(p)->verify();
982 blk._the_obj = oop(p);
983 oop(p)->oop_iterate(&blk);
984 objs = 0;
985 } else {
986 objs++;
987 }
988 prev_p = p;
989 p += size;
990 }
991 guarantee(p == top(), "end of last object must match end of space");
992 }
994 void OffsetTableContigSpace::serialize_block_offset_array_offsets(
995 SerializeOopClosure* soc) {
996 _offsets.serialize(soc);
997 }
1000 size_t TenuredSpace::allowed_dead_ratio() const {
1001 return MarkSweepDeadRatio;
1002 }
1005 size_t ContigPermSpace::allowed_dead_ratio() const {
1006 return PermMarkSweepDeadRatio;
1007 }