Mon, 28 Jul 2008 15:30:23 -0700
Merge
1 /*
2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 # include "incls/_precompiled.incl"
26 # include "incls/_space.cpp.incl"
28 void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
29 void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
31 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
32 HeapWord* top_obj) {
33 if (top_obj != NULL) {
34 if (_sp->block_is_obj(top_obj)) {
35 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
36 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
37 // An arrayOop is starting on the dirty card - since we do exact
38 // store checks for objArrays we are done.
39 } else {
40 // Otherwise, it is possible that the object starting on the dirty
41 // card spans the entire card, and that the store happened on a
42 // later card. Figure out where the object ends.
43 // Use the block_size() method of the space over which
44 // the iteration is being done. That space (e.g. CMS) may have
45 // specific requirements on object sizes which will
46 // be reflected in the block_size() method.
47 top = top_obj + oop(top_obj)->size();
48 }
49 }
50 } else {
51 top = top_obj;
52 }
53 } else {
54 assert(top == _sp->end(), "only case where top_obj == NULL");
55 }
56 return top;
57 }
59 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
60 HeapWord* bottom,
61 HeapWord* top) {
62 // 1. Blocks may or may not be objects.
63 // 2. Even when a block_is_obj(), it may not entirely
64 // occupy the block if the block quantum is larger than
65 // the object size.
66 // We can and should try to optimize by calling the non-MemRegion
67 // version of oop_iterate() for all but the extremal objects
68 // (for which we need to call the MemRegion version of
69 // oop_iterate()) To be done post-beta XXX
70 for (; bottom < top; bottom += _sp->block_size(bottom)) {
71 // As in the case of contiguous space above, we'd like to
72 // just use the value returned by oop_iterate to increment the
73 // current pointer; unfortunately, that won't work in CMS because
74 // we'd need an interface change (it seems) to have the space
75 // "adjust the object size" (for instance pad it up to its
76 // block alignment or minimum block size restrictions. XXX
77 if (_sp->block_is_obj(bottom) &&
78 !_sp->obj_allocated_since_save_marks(oop(bottom))) {
79 oop(bottom)->oop_iterate(_cl, mr);
80 }
81 }
82 }
84 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
86 // Some collectors need to do special things whenever their dirty
87 // cards are processed. For instance, CMS must remember mutator updates
88 // (i.e. dirty cards) so as to re-scan mutated objects.
89 // Such work can be piggy-backed here on dirty card scanning, so as to make
90 // it slightly more efficient than doing a complete non-detructive pre-scan
91 // of the card table.
92 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
93 if (pCl != NULL) {
94 pCl->do_MemRegion(mr);
95 }
97 HeapWord* bottom = mr.start();
98 HeapWord* last = mr.last();
99 HeapWord* top = mr.end();
100 HeapWord* bottom_obj;
101 HeapWord* top_obj;
103 assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
104 _precision == CardTableModRefBS::Precise,
105 "Only ones we deal with for now.");
107 assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
108 _last_bottom == NULL ||
109 top <= _last_bottom,
110 "Not decreasing");
111 NOT_PRODUCT(_last_bottom = mr.start());
113 bottom_obj = _sp->block_start(bottom);
114 top_obj = _sp->block_start(last);
116 assert(bottom_obj <= bottom, "just checking");
117 assert(top_obj <= top, "just checking");
119 // Given what we think is the top of the memory region and
120 // the start of the object at the top, get the actual
121 // value of the top.
122 top = get_actual_top(top, top_obj);
124 // If the previous call did some part of this region, don't redo.
125 if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
126 _min_done != NULL &&
127 _min_done < top) {
128 top = _min_done;
129 }
131 // Top may have been reset, and in fact may be below bottom,
132 // e.g. the dirty card region is entirely in a now free object
133 // -- something that could happen with a concurrent sweeper.
134 bottom = MIN2(bottom, top);
135 mr = MemRegion(bottom, top);
136 assert(bottom <= top &&
137 (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
138 _min_done == NULL ||
139 top <= _min_done),
140 "overlap!");
142 // Walk the region if it is not empty; otherwise there is nothing to do.
143 if (!mr.is_empty()) {
144 walk_mem_region(mr, bottom_obj, top);
145 }
147 _min_done = bottom;
148 }
150 DirtyCardToOopClosure* Space::new_dcto_cl(OopClosure* cl,
151 CardTableModRefBS::PrecisionStyle precision,
152 HeapWord* boundary) {
153 return new DirtyCardToOopClosure(this, cl, precision, boundary);
154 }
156 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
157 HeapWord* top_obj) {
158 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
159 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
160 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
161 // An arrayOop is starting on the dirty card - since we do exact
162 // store checks for objArrays we are done.
163 } else {
164 // Otherwise, it is possible that the object starting on the dirty
165 // card spans the entire card, and that the store happened on a
166 // later card. Figure out where the object ends.
167 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
168 "Block size and object size mismatch");
169 top = top_obj + oop(top_obj)->size();
170 }
171 }
172 } else {
173 top = (_sp->toContiguousSpace())->top();
174 }
175 return top;
176 }
178 void Filtering_DCTOC::walk_mem_region(MemRegion mr,
179 HeapWord* bottom,
180 HeapWord* top) {
181 // Note that this assumption won't hold if we have a concurrent
182 // collector in this space, which may have freed up objects after
183 // they were dirtied and before the stop-the-world GC that is
184 // examining cards here.
185 assert(bottom < top, "ought to be at least one obj on a dirty card.");
187 if (_boundary != NULL) {
188 // We have a boundary outside of which we don't want to look
189 // at objects, so create a filtering closure around the
190 // oop closure before walking the region.
191 FilteringClosure filter(_boundary, _cl);
192 walk_mem_region_with_cl(mr, bottom, top, &filter);
193 } else {
194 // No boundary, simply walk the heap with the oop closure.
195 walk_mem_region_with_cl(mr, bottom, top, _cl);
196 }
198 }
200 // We must replicate this so that the static type of "FilteringClosure"
201 // (see above) is apparent at the oop_iterate calls.
202 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
203 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \
204 HeapWord* bottom, \
205 HeapWord* top, \
206 ClosureType* cl) { \
207 bottom += oop(bottom)->oop_iterate(cl, mr); \
208 if (bottom < top) { \
209 HeapWord* next_obj = bottom + oop(bottom)->size(); \
210 while (next_obj < top) { \
211 /* Bottom lies entirely below top, so we can call the */ \
212 /* non-memRegion version of oop_iterate below. */ \
213 oop(bottom)->oop_iterate(cl); \
214 bottom = next_obj; \
215 next_obj = bottom + oop(bottom)->size(); \
216 } \
217 /* Last object. */ \
218 oop(bottom)->oop_iterate(cl, mr); \
219 } \
220 }
222 // (There are only two of these, rather than N, because the split is due
223 // only to the introduction of the FilteringClosure, a local part of the
224 // impl of this abstraction.)
225 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
226 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
228 DirtyCardToOopClosure*
229 ContiguousSpace::new_dcto_cl(OopClosure* cl,
230 CardTableModRefBS::PrecisionStyle precision,
231 HeapWord* boundary) {
232 return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
233 }
235 void Space::initialize(MemRegion mr,
236 bool clear_space,
237 bool mangle_space) {
238 HeapWord* bottom = mr.start();
239 HeapWord* end = mr.end();
240 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
241 "invalid space boundaries");
242 set_bottom(bottom);
243 set_end(end);
244 if (clear_space) clear(mangle_space);
245 }
247 void Space::clear(bool mangle_space) {
248 if (ZapUnusedHeapArea && mangle_space) {
249 mangle_unused_area();
250 }
251 }
253 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL) {
254 _mangler = new GenSpaceMangler(this);
255 }
257 ContiguousSpace::~ContiguousSpace() {
258 delete _mangler;
259 }
261 void ContiguousSpace::initialize(MemRegion mr,
262 bool clear_space,
263 bool mangle_space)
264 {
265 CompactibleSpace::initialize(mr, clear_space, mangle_space);
266 _concurrent_iteration_safe_limit = top();
267 }
269 void ContiguousSpace::clear(bool mangle_space) {
270 set_top(bottom());
271 set_saved_mark();
272 Space::clear(mangle_space);
273 }
275 bool Space::is_in(const void* p) const {
276 HeapWord* b = block_start(p);
277 return b != NULL && block_is_obj(b);
278 }
280 bool ContiguousSpace::is_in(const void* p) const {
281 return _bottom <= p && p < _top;
282 }
284 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
285 return p >= _top;
286 }
288 void OffsetTableContigSpace::clear(bool mangle_space) {
289 ContiguousSpace::clear(mangle_space);
290 _offsets.initialize_threshold();
291 }
293 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
294 Space::set_bottom(new_bottom);
295 _offsets.set_bottom(new_bottom);
296 }
298 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
299 // Space should not advertize an increase in size
300 // until after the underlying offest table has been enlarged.
301 _offsets.resize(pointer_delta(new_end, bottom()));
302 Space::set_end(new_end);
303 }
305 #ifndef PRODUCT
307 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
308 mangler()->set_top_for_allocations(v);
309 }
310 void ContiguousSpace::set_top_for_allocations() {
311 mangler()->set_top_for_allocations(top());
312 }
313 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
314 mangler()->check_mangled_unused_area(limit);
315 }
317 void ContiguousSpace::check_mangled_unused_area_complete() {
318 mangler()->check_mangled_unused_area_complete();
319 }
321 // Mangled only the unused space that has not previously
322 // been mangled and that has not been allocated since being
323 // mangled.
324 void ContiguousSpace::mangle_unused_area() {
325 mangler()->mangle_unused_area();
326 }
327 void ContiguousSpace::mangle_unused_area_complete() {
328 mangler()->mangle_unused_area_complete();
329 }
330 void ContiguousSpace::mangle_region(MemRegion mr) {
331 // Although this method uses SpaceMangler::mangle_region() which
332 // is not specific to a space, the when the ContiguousSpace version
333 // is called, it is always with regard to a space and this
334 // bounds checking is appropriate.
335 MemRegion space_mr(bottom(), end());
336 assert(space_mr.contains(mr), "Mangling outside space");
337 SpaceMangler::mangle_region(mr);
338 }
339 #endif // NOT_PRODUCT
341 void CompactibleSpace::initialize(MemRegion mr,
342 bool clear_space,
343 bool mangle_space) {
344 Space::initialize(mr, clear_space, mangle_space);
345 _compaction_top = bottom();
346 _next_compaction_space = NULL;
347 }
349 HeapWord* CompactibleSpace::forward(oop q, size_t size,
350 CompactPoint* cp, HeapWord* compact_top) {
351 // q is alive
352 // First check if we should switch compaction space
353 assert(this == cp->space, "'this' should be current compaction space.");
354 size_t compaction_max_size = pointer_delta(end(), compact_top);
355 while (size > compaction_max_size) {
356 // switch to next compaction space
357 cp->space->set_compaction_top(compact_top);
358 cp->space = cp->space->next_compaction_space();
359 if (cp->space == NULL) {
360 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
361 assert(cp->gen != NULL, "compaction must succeed");
362 cp->space = cp->gen->first_compaction_space();
363 assert(cp->space != NULL, "generation must have a first compaction space");
364 }
365 compact_top = cp->space->bottom();
366 cp->space->set_compaction_top(compact_top);
367 cp->threshold = cp->space->initialize_threshold();
368 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
369 }
371 // store the forwarding pointer into the mark word
372 if ((HeapWord*)q != compact_top) {
373 q->forward_to(oop(compact_top));
374 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
375 } else {
376 // if the object isn't moving we can just set the mark to the default
377 // mark and handle it specially later on.
378 q->init_mark();
379 assert(q->forwardee() == NULL, "should be forwarded to NULL");
380 }
382 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, size));
383 compact_top += size;
385 // we need to update the offset table so that the beginnings of objects can be
386 // found during scavenge. Note that we are updating the offset table based on
387 // where the object will be once the compaction phase finishes.
388 if (compact_top > cp->threshold)
389 cp->threshold =
390 cp->space->cross_threshold(compact_top - size, compact_top);
391 return compact_top;
392 }
395 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
396 HeapWord* q, size_t deadlength) {
397 if (allowed_deadspace_words >= deadlength) {
398 allowed_deadspace_words -= deadlength;
399 oop(q)->set_mark(markOopDesc::prototype()->set_marked());
400 const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT);
401 if (deadlength >= min_int_array_size) {
402 oop(q)->set_klass(Universe::intArrayKlassObj());
403 typeArrayOop(q)->set_length((int)((deadlength - min_int_array_size)
404 * (HeapWordSize/sizeof(jint))));
405 } else {
406 assert((int) deadlength == instanceOopDesc::header_size(),
407 "size for smallest fake dead object doesn't match");
408 oop(q)->set_klass(SystemDictionary::object_klass());
409 }
410 assert((int) deadlength == oop(q)->size(),
411 "make sure size for fake dead object match");
412 // Recall that we required "q == compaction_top".
413 return true;
414 } else {
415 allowed_deadspace_words = 0;
416 return false;
417 }
418 }
420 #define block_is_always_obj(q) true
421 #define obj_size(q) oop(q)->size()
422 #define adjust_obj_size(s) s
424 void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
425 SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
426 }
428 // Faster object search.
429 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
430 SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
431 }
433 void Space::adjust_pointers() {
434 // adjust all the interior pointers to point at the new locations of objects
435 // Used by MarkSweep::mark_sweep_phase3()
437 // First check to see if there is any work to be done.
438 if (used() == 0) {
439 return; // Nothing to do.
440 }
442 // Otherwise...
443 HeapWord* q = bottom();
444 HeapWord* t = end();
446 debug_only(HeapWord* prev_q = NULL);
447 while (q < t) {
448 if (oop(q)->is_gc_marked()) {
449 // q is alive
451 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
452 // point all the oops to the new location
453 size_t size = oop(q)->adjust_pointers();
454 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
456 debug_only(prev_q = q);
457 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
459 q += size;
460 } else {
461 // q is not a live object. But we're not in a compactible space,
462 // So we don't have live ranges.
463 debug_only(prev_q = q);
464 q += block_size(q);
465 assert(q > prev_q, "we should be moving forward through memory");
466 }
467 }
468 assert(q == t, "just checking");
469 }
471 void CompactibleSpace::adjust_pointers() {
472 // Check first is there is any work to do.
473 if (used() == 0) {
474 return; // Nothing to do.
475 }
477 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
478 }
480 void CompactibleSpace::compact() {
481 SCAN_AND_COMPACT(obj_size);
482 }
484 void Space::print_short() const { print_short_on(tty); }
486 void Space::print_short_on(outputStream* st) const {
487 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
488 (int) ((double) used() * 100 / capacity()));
489 }
491 void Space::print() const { print_on(tty); }
493 void Space::print_on(outputStream* st) const {
494 print_short_on(st);
495 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
496 bottom(), end());
497 }
499 void ContiguousSpace::print_on(outputStream* st) const {
500 print_short_on(st);
501 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
502 bottom(), top(), end());
503 }
505 void OffsetTableContigSpace::print_on(outputStream* st) const {
506 print_short_on(st);
507 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
508 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
509 bottom(), top(), _offsets.threshold(), end());
510 }
512 void ContiguousSpace::verify(bool allow_dirty) const {
513 HeapWord* p = bottom();
514 HeapWord* t = top();
515 HeapWord* prev_p = NULL;
516 while (p < t) {
517 oop(p)->verify();
518 prev_p = p;
519 p += oop(p)->size();
520 }
521 guarantee(p == top(), "end of last object must match end of space");
522 if (top() != end()) {
523 guarantee(top() == block_start(end()-1) &&
524 top() == block_start(top()),
525 "top should be start of unallocated block, if it exists");
526 }
527 }
529 void Space::oop_iterate(OopClosure* blk) {
530 ObjectToOopClosure blk2(blk);
531 object_iterate(&blk2);
532 }
534 HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) {
535 guarantee(false, "NYI");
536 return bottom();
537 }
539 HeapWord* Space::object_iterate_careful_m(MemRegion mr,
540 ObjectClosureCareful* cl) {
541 guarantee(false, "NYI");
542 return bottom();
543 }
546 void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
547 assert(!mr.is_empty(), "Should be non-empty");
548 // We use MemRegion(bottom(), end()) rather than used_region() below
549 // because the two are not necessarily equal for some kinds of
550 // spaces, in particular, certain kinds of free list spaces.
551 // We could use the more complicated but more precise:
552 // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
553 // but the slight imprecision seems acceptable in the assertion check.
554 assert(MemRegion(bottom(), end()).contains(mr),
555 "Should be within used space");
556 HeapWord* prev = cl->previous(); // max address from last time
557 if (prev >= mr.end()) { // nothing to do
558 return;
559 }
560 // This assert will not work when we go from cms space to perm
561 // space, and use same closure. Easy fix deferred for later. XXX YSR
562 // assert(prev == NULL || contains(prev), "Should be within space");
564 bool last_was_obj_array = false;
565 HeapWord *blk_start_addr, *region_start_addr;
566 if (prev > mr.start()) {
567 region_start_addr = prev;
568 blk_start_addr = prev;
569 assert(blk_start_addr == block_start(region_start_addr), "invariant");
570 } else {
571 region_start_addr = mr.start();
572 blk_start_addr = block_start(region_start_addr);
573 }
574 HeapWord* region_end_addr = mr.end();
575 MemRegion derived_mr(region_start_addr, region_end_addr);
576 while (blk_start_addr < region_end_addr) {
577 const size_t size = block_size(blk_start_addr);
578 if (block_is_obj(blk_start_addr)) {
579 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
580 } else {
581 last_was_obj_array = false;
582 }
583 blk_start_addr += size;
584 }
585 if (!last_was_obj_array) {
586 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
587 "Should be within (closed) used space");
588 assert(blk_start_addr > prev, "Invariant");
589 cl->set_previous(blk_start_addr); // min address for next time
590 }
591 }
593 bool Space::obj_is_alive(const HeapWord* p) const {
594 assert (block_is_obj(p), "The address should point to an object");
595 return true;
596 }
598 void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
599 assert(!mr.is_empty(), "Should be non-empty");
600 assert(used_region().contains(mr), "Should be within used space");
601 HeapWord* prev = cl->previous(); // max address from last time
602 if (prev >= mr.end()) { // nothing to do
603 return;
604 }
605 // See comment above (in more general method above) in case you
606 // happen to use this method.
607 assert(prev == NULL || is_in_reserved(prev), "Should be within space");
609 bool last_was_obj_array = false;
610 HeapWord *obj_start_addr, *region_start_addr;
611 if (prev > mr.start()) {
612 region_start_addr = prev;
613 obj_start_addr = prev;
614 assert(obj_start_addr == block_start(region_start_addr), "invariant");
615 } else {
616 region_start_addr = mr.start();
617 obj_start_addr = block_start(region_start_addr);
618 }
619 HeapWord* region_end_addr = mr.end();
620 MemRegion derived_mr(region_start_addr, region_end_addr);
621 while (obj_start_addr < region_end_addr) {
622 oop obj = oop(obj_start_addr);
623 const size_t size = obj->size();
624 last_was_obj_array = cl->do_object_bm(obj, derived_mr);
625 obj_start_addr += size;
626 }
627 if (!last_was_obj_array) {
628 assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()),
629 "Should be within (closed) used space");
630 assert(obj_start_addr > prev, "Invariant");
631 cl->set_previous(obj_start_addr); // min address for next time
632 }
633 }
635 #ifndef SERIALGC
636 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
637 \
638 void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
639 HeapWord* obj_addr = mr.start(); \
640 HeapWord* t = mr.end(); \
641 while (obj_addr < t) { \
642 assert(oop(obj_addr)->is_oop(), "Should be an oop"); \
643 obj_addr += oop(obj_addr)->oop_iterate(blk); \
644 } \
645 }
647 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
649 #undef ContigSpace_PAR_OOP_ITERATE_DEFN
650 #endif // SERIALGC
652 void ContiguousSpace::oop_iterate(OopClosure* blk) {
653 if (is_empty()) return;
654 HeapWord* obj_addr = bottom();
655 HeapWord* t = top();
656 // Could call objects iterate, but this is easier.
657 while (obj_addr < t) {
658 obj_addr += oop(obj_addr)->oop_iterate(blk);
659 }
660 }
662 void ContiguousSpace::oop_iterate(MemRegion mr, OopClosure* blk) {
663 if (is_empty()) {
664 return;
665 }
666 MemRegion cur = MemRegion(bottom(), top());
667 mr = mr.intersection(cur);
668 if (mr.is_empty()) {
669 return;
670 }
671 if (mr.equals(cur)) {
672 oop_iterate(blk);
673 return;
674 }
675 assert(mr.end() <= top(), "just took an intersection above");
676 HeapWord* obj_addr = block_start(mr.start());
677 HeapWord* t = mr.end();
679 // Handle first object specially.
680 oop obj = oop(obj_addr);
681 SpaceMemRegionOopsIterClosure smr_blk(blk, mr);
682 obj_addr += obj->oop_iterate(&smr_blk);
683 while (obj_addr < t) {
684 oop obj = oop(obj_addr);
685 assert(obj->is_oop(), "expected an oop");
686 obj_addr += obj->size();
687 // If "obj_addr" is not greater than top, then the
688 // entire object "obj" is within the region.
689 if (obj_addr <= t) {
690 obj->oop_iterate(blk);
691 } else {
692 // "obj" extends beyond end of region
693 obj->oop_iterate(&smr_blk);
694 break;
695 }
696 };
697 }
699 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
700 if (is_empty()) return;
701 WaterMark bm = bottom_mark();
702 object_iterate_from(bm, blk);
703 }
705 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
706 assert(mark.space() == this, "Mark does not match space");
707 HeapWord* p = mark.point();
708 while (p < top()) {
709 blk->do_object(oop(p));
710 p += oop(p)->size();
711 }
712 }
714 HeapWord*
715 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
716 HeapWord * limit = concurrent_iteration_safe_limit();
717 assert(limit <= top(), "sanity check");
718 for (HeapWord* p = bottom(); p < limit;) {
719 size_t size = blk->do_object_careful(oop(p));
720 if (size == 0) {
721 return p; // failed at p
722 } else {
723 p += size;
724 }
725 }
726 return NULL; // all done
727 }
729 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
730 \
731 void ContiguousSpace:: \
732 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
733 HeapWord* t; \
734 HeapWord* p = saved_mark_word(); \
735 assert(p != NULL, "expected saved mark"); \
736 \
737 const intx interval = PrefetchScanIntervalInBytes; \
738 do { \
739 t = top(); \
740 while (p < t) { \
741 Prefetch::write(p, interval); \
742 debug_only(HeapWord* prev = p); \
743 oop m = oop(p); \
744 p += m->oop_iterate(blk); \
745 } \
746 } while (t < top()); \
747 \
748 set_saved_mark_word(p); \
749 }
751 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
753 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN
755 // Very general, slow implementation.
756 HeapWord* ContiguousSpace::block_start(const void* p) const {
757 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
758 if (p >= top()) {
759 return top();
760 } else {
761 HeapWord* last = bottom();
762 HeapWord* cur = last;
763 while (cur <= p) {
764 last = cur;
765 cur += oop(cur)->size();
766 }
767 assert(oop(last)->is_oop(), "Should be an object start");
768 return last;
769 }
770 }
772 size_t ContiguousSpace::block_size(const HeapWord* p) const {
773 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
774 HeapWord* current_top = top();
775 assert(p <= current_top, "p is not a block start");
776 assert(p == current_top || oop(p)->is_oop(), "p is not a block start");
777 if (p < current_top)
778 return oop(p)->size();
779 else {
780 assert(p == current_top, "just checking");
781 return pointer_delta(end(), (HeapWord*) p);
782 }
783 }
785 // This version requires locking.
786 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
787 HeapWord* const end_value) {
788 assert(Heap_lock->owned_by_self() ||
789 (SafepointSynchronize::is_at_safepoint() &&
790 Thread::current()->is_VM_thread()),
791 "not locked");
792 HeapWord* obj = top();
793 if (pointer_delta(end_value, obj) >= size) {
794 HeapWord* new_top = obj + size;
795 set_top(new_top);
796 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
797 return obj;
798 } else {
799 return NULL;
800 }
801 }
803 // This version is lock-free.
804 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
805 HeapWord* const end_value) {
806 do {
807 HeapWord* obj = top();
808 if (pointer_delta(end_value, obj) >= size) {
809 HeapWord* new_top = obj + size;
810 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
811 // result can be one of two:
812 // the old top value: the exchange succeeded
813 // otherwise: the new value of the top is returned.
814 if (result == obj) {
815 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
816 return obj;
817 }
818 } else {
819 return NULL;
820 }
821 } while (true);
822 }
824 // Requires locking.
825 HeapWord* ContiguousSpace::allocate(size_t size) {
826 return allocate_impl(size, end());
827 }
829 // Lock-free.
830 HeapWord* ContiguousSpace::par_allocate(size_t size) {
831 return par_allocate_impl(size, end());
832 }
834 void ContiguousSpace::allocate_temporary_filler(int factor) {
835 // allocate temporary type array decreasing free size with factor 'factor'
836 assert(factor >= 0, "just checking");
837 size_t size = pointer_delta(end(), top());
839 // if space is full, return
840 if (size == 0) return;
842 if (factor > 0) {
843 size -= size/factor;
844 }
845 size = align_object_size(size);
847 const size_t min_int_array_size = typeArrayOopDesc::header_size(T_INT);
848 if (size >= min_int_array_size) {
849 size_t length = (size - min_int_array_size) * (HeapWordSize / sizeof(jint));
850 // allocate uninitialized int array
851 typeArrayOop t = (typeArrayOop) allocate(size);
852 assert(t != NULL, "allocation should succeed");
853 t->set_mark(markOopDesc::prototype());
854 t->set_klass(Universe::intArrayKlassObj());
855 t->set_length((int)length);
856 } else {
857 assert((int) size == instanceOopDesc::header_size(),
858 "size for smallest fake object doesn't match");
859 instanceOop obj = (instanceOop) allocate(size);
860 obj->set_mark(markOopDesc::prototype());
861 obj->set_klass_gap(0);
862 obj->set_klass(SystemDictionary::object_klass());
863 }
864 }
866 void EdenSpace::clear(bool mangle_space) {
867 ContiguousSpace::clear(mangle_space);
868 set_soft_end(end());
869 }
871 // Requires locking.
872 HeapWord* EdenSpace::allocate(size_t size) {
873 return allocate_impl(size, soft_end());
874 }
876 // Lock-free.
877 HeapWord* EdenSpace::par_allocate(size_t size) {
878 return par_allocate_impl(size, soft_end());
879 }
881 HeapWord* ConcEdenSpace::par_allocate(size_t size)
882 {
883 do {
884 // The invariant is top() should be read before end() because
885 // top() can't be greater than end(), so if an update of _soft_end
886 // occurs between 'end_val = end();' and 'top_val = top();' top()
887 // also can grow up to the new end() and the condition
888 // 'top_val > end_val' is true. To ensure the loading order
889 // OrderAccess::loadload() is required after top() read.
890 HeapWord* obj = top();
891 OrderAccess::loadload();
892 if (pointer_delta(*soft_end_addr(), obj) >= size) {
893 HeapWord* new_top = obj + size;
894 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
895 // result can be one of two:
896 // the old top value: the exchange succeeded
897 // otherwise: the new value of the top is returned.
898 if (result == obj) {
899 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
900 return obj;
901 }
902 } else {
903 return NULL;
904 }
905 } while (true);
906 }
909 HeapWord* OffsetTableContigSpace::initialize_threshold() {
910 return _offsets.initialize_threshold();
911 }
913 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
914 _offsets.alloc_block(start, end);
915 return _offsets.threshold();
916 }
918 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
919 MemRegion mr) :
920 _offsets(sharedOffsetArray, mr),
921 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
922 {
923 _offsets.set_contig_space(this);
924 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
925 }
928 class VerifyOldOopClosure : public OopClosure {
929 public:
930 oop _the_obj;
931 bool _allow_dirty;
932 void do_oop(oop* p) {
933 _the_obj->verify_old_oop(p, _allow_dirty);
934 }
935 void do_oop(narrowOop* p) {
936 _the_obj->verify_old_oop(p, _allow_dirty);
937 }
938 };
940 #define OBJ_SAMPLE_INTERVAL 0
941 #define BLOCK_SAMPLE_INTERVAL 100
943 void OffsetTableContigSpace::verify(bool allow_dirty) const {
944 HeapWord* p = bottom();
945 HeapWord* prev_p = NULL;
946 VerifyOldOopClosure blk; // Does this do anything?
947 blk._allow_dirty = allow_dirty;
948 int objs = 0;
949 int blocks = 0;
951 if (VerifyObjectStartArray) {
952 _offsets.verify();
953 }
955 while (p < top()) {
956 size_t size = oop(p)->size();
957 // For a sampling of objects in the space, find it using the
958 // block offset table.
959 if (blocks == BLOCK_SAMPLE_INTERVAL) {
960 guarantee(p == block_start(p + (size/2)), "check offset computation");
961 blocks = 0;
962 } else {
963 blocks++;
964 }
966 if (objs == OBJ_SAMPLE_INTERVAL) {
967 oop(p)->verify();
968 blk._the_obj = oop(p);
969 oop(p)->oop_iterate(&blk);
970 objs = 0;
971 } else {
972 objs++;
973 }
974 prev_p = p;
975 p += size;
976 }
977 guarantee(p == top(), "end of last object must match end of space");
978 }
980 void OffsetTableContigSpace::serialize_block_offset_array_offsets(
981 SerializeOopClosure* soc) {
982 _offsets.serialize(soc);
983 }
986 int TenuredSpace::allowed_dead_ratio() const {
987 return MarkSweepDeadRatio;
988 }
991 int ContigPermSpace::allowed_dead_ratio() const {
992 return PermMarkSweepDeadRatio;
993 }