Tue, 23 Nov 2010 13:22:55 -0800
6989984: Use standard include model for Hospot
Summary: Replaced MakeDeps and the includeDB files with more standardized solutions.
Reviewed-by: coleenp, kvn, kamg
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "gc_implementation/shared/liveRange.hpp"
29 #include "gc_implementation/shared/markSweep.hpp"
30 #include "gc_implementation/shared/spaceDecorator.hpp"
31 #include "memory/blockOffsetTable.inline.hpp"
32 #include "memory/defNewGeneration.hpp"
33 #include "memory/genCollectedHeap.hpp"
34 #include "memory/space.hpp"
35 #include "memory/space.inline.hpp"
36 #include "memory/universe.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "oops/oop.inline2.hpp"
39 #include "runtime/java.hpp"
40 #include "runtime/safepoint.hpp"
41 #include "utilities/copy.hpp"
42 #include "utilities/globalDefinitions.hpp"
44 void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
45 void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
47 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
48 HeapWord* top_obj) {
49 if (top_obj != NULL) {
50 if (_sp->block_is_obj(top_obj)) {
51 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
52 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
53 // An arrayOop is starting on the dirty card - since we do exact
54 // store checks for objArrays we are done.
55 } else {
56 // Otherwise, it is possible that the object starting on the dirty
57 // card spans the entire card, and that the store happened on a
58 // later card. Figure out where the object ends.
59 // Use the block_size() method of the space over which
60 // the iteration is being done. That space (e.g. CMS) may have
61 // specific requirements on object sizes which will
62 // be reflected in the block_size() method.
63 top = top_obj + oop(top_obj)->size();
64 }
65 }
66 } else {
67 top = top_obj;
68 }
69 } else {
70 assert(top == _sp->end(), "only case where top_obj == NULL");
71 }
72 return top;
73 }
75 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
76 HeapWord* bottom,
77 HeapWord* top) {
78 // 1. Blocks may or may not be objects.
79 // 2. Even when a block_is_obj(), it may not entirely
80 // occupy the block if the block quantum is larger than
81 // the object size.
82 // We can and should try to optimize by calling the non-MemRegion
83 // version of oop_iterate() for all but the extremal objects
84 // (for which we need to call the MemRegion version of
85 // oop_iterate()) To be done post-beta XXX
86 for (; bottom < top; bottom += _sp->block_size(bottom)) {
87 // As in the case of contiguous space above, we'd like to
88 // just use the value returned by oop_iterate to increment the
89 // current pointer; unfortunately, that won't work in CMS because
90 // we'd need an interface change (it seems) to have the space
91 // "adjust the object size" (for instance pad it up to its
92 // block alignment or minimum block size restrictions. XXX
93 if (_sp->block_is_obj(bottom) &&
94 !_sp->obj_allocated_since_save_marks(oop(bottom))) {
95 oop(bottom)->oop_iterate(_cl, mr);
96 }
97 }
98 }
100 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
102 // Some collectors need to do special things whenever their dirty
103 // cards are processed. For instance, CMS must remember mutator updates
104 // (i.e. dirty cards) so as to re-scan mutated objects.
105 // Such work can be piggy-backed here on dirty card scanning, so as to make
106 // it slightly more efficient than doing a complete non-detructive pre-scan
107 // of the card table.
108 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
109 if (pCl != NULL) {
110 pCl->do_MemRegion(mr);
111 }
113 HeapWord* bottom = mr.start();
114 HeapWord* last = mr.last();
115 HeapWord* top = mr.end();
116 HeapWord* bottom_obj;
117 HeapWord* top_obj;
119 assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
120 _precision == CardTableModRefBS::Precise,
121 "Only ones we deal with for now.");
123 assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
124 _cl->idempotent() || _last_bottom == NULL ||
125 top <= _last_bottom,
126 "Not decreasing");
127 NOT_PRODUCT(_last_bottom = mr.start());
129 bottom_obj = _sp->block_start(bottom);
130 top_obj = _sp->block_start(last);
132 assert(bottom_obj <= bottom, "just checking");
133 assert(top_obj <= top, "just checking");
135 // Given what we think is the top of the memory region and
136 // the start of the object at the top, get the actual
137 // value of the top.
138 top = get_actual_top(top, top_obj);
140 // If the previous call did some part of this region, don't redo.
141 if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
142 _min_done != NULL &&
143 _min_done < top) {
144 top = _min_done;
145 }
147 // Top may have been reset, and in fact may be below bottom,
148 // e.g. the dirty card region is entirely in a now free object
149 // -- something that could happen with a concurrent sweeper.
150 bottom = MIN2(bottom, top);
151 mr = MemRegion(bottom, top);
152 assert(bottom <= top &&
153 (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
154 _min_done == NULL ||
155 top <= _min_done),
156 "overlap!");
158 // Walk the region if it is not empty; otherwise there is nothing to do.
159 if (!mr.is_empty()) {
160 walk_mem_region(mr, bottom_obj, top);
161 }
163 // An idempotent closure might be applied in any order, so we don't
164 // record a _min_done for it.
165 if (!_cl->idempotent()) {
166 _min_done = bottom;
167 } else {
168 assert(_min_done == _last_explicit_min_done,
169 "Don't update _min_done for idempotent cl");
170 }
171 }
173 DirtyCardToOopClosure* Space::new_dcto_cl(OopClosure* cl,
174 CardTableModRefBS::PrecisionStyle precision,
175 HeapWord* boundary) {
176 return new DirtyCardToOopClosure(this, cl, precision, boundary);
177 }
179 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
180 HeapWord* top_obj) {
181 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
182 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
183 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
184 // An arrayOop is starting on the dirty card - since we do exact
185 // store checks for objArrays we are done.
186 } else {
187 // Otherwise, it is possible that the object starting on the dirty
188 // card spans the entire card, and that the store happened on a
189 // later card. Figure out where the object ends.
190 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
191 "Block size and object size mismatch");
192 top = top_obj + oop(top_obj)->size();
193 }
194 }
195 } else {
196 top = (_sp->toContiguousSpace())->top();
197 }
198 return top;
199 }
201 void Filtering_DCTOC::walk_mem_region(MemRegion mr,
202 HeapWord* bottom,
203 HeapWord* top) {
204 // Note that this assumption won't hold if we have a concurrent
205 // collector in this space, which may have freed up objects after
206 // they were dirtied and before the stop-the-world GC that is
207 // examining cards here.
208 assert(bottom < top, "ought to be at least one obj on a dirty card.");
210 if (_boundary != NULL) {
211 // We have a boundary outside of which we don't want to look
212 // at objects, so create a filtering closure around the
213 // oop closure before walking the region.
214 FilteringClosure filter(_boundary, _cl);
215 walk_mem_region_with_cl(mr, bottom, top, &filter);
216 } else {
217 // No boundary, simply walk the heap with the oop closure.
218 walk_mem_region_with_cl(mr, bottom, top, _cl);
219 }
221 }
223 // We must replicate this so that the static type of "FilteringClosure"
224 // (see above) is apparent at the oop_iterate calls.
225 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
226 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \
227 HeapWord* bottom, \
228 HeapWord* top, \
229 ClosureType* cl) { \
230 bottom += oop(bottom)->oop_iterate(cl, mr); \
231 if (bottom < top) { \
232 HeapWord* next_obj = bottom + oop(bottom)->size(); \
233 while (next_obj < top) { \
234 /* Bottom lies entirely below top, so we can call the */ \
235 /* non-memRegion version of oop_iterate below. */ \
236 oop(bottom)->oop_iterate(cl); \
237 bottom = next_obj; \
238 next_obj = bottom + oop(bottom)->size(); \
239 } \
240 /* Last object. */ \
241 oop(bottom)->oop_iterate(cl, mr); \
242 } \
243 }
245 // (There are only two of these, rather than N, because the split is due
246 // only to the introduction of the FilteringClosure, a local part of the
247 // impl of this abstraction.)
248 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
249 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
251 DirtyCardToOopClosure*
252 ContiguousSpace::new_dcto_cl(OopClosure* cl,
253 CardTableModRefBS::PrecisionStyle precision,
254 HeapWord* boundary) {
255 return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
256 }
258 void Space::initialize(MemRegion mr,
259 bool clear_space,
260 bool mangle_space) {
261 HeapWord* bottom = mr.start();
262 HeapWord* end = mr.end();
263 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
264 "invalid space boundaries");
265 set_bottom(bottom);
266 set_end(end);
267 if (clear_space) clear(mangle_space);
268 }
270 void Space::clear(bool mangle_space) {
271 if (ZapUnusedHeapArea && mangle_space) {
272 mangle_unused_area();
273 }
274 }
276 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL),
277 _concurrent_iteration_safe_limit(NULL) {
278 _mangler = new GenSpaceMangler(this);
279 }
281 ContiguousSpace::~ContiguousSpace() {
282 delete _mangler;
283 }
285 void ContiguousSpace::initialize(MemRegion mr,
286 bool clear_space,
287 bool mangle_space)
288 {
289 CompactibleSpace::initialize(mr, clear_space, mangle_space);
290 set_concurrent_iteration_safe_limit(top());
291 }
293 void ContiguousSpace::clear(bool mangle_space) {
294 set_top(bottom());
295 set_saved_mark();
296 CompactibleSpace::clear(mangle_space);
297 }
299 bool Space::is_in(const void* p) const {
300 HeapWord* b = block_start_const(p);
301 return b != NULL && block_is_obj(b);
302 }
304 bool ContiguousSpace::is_in(const void* p) const {
305 return _bottom <= p && p < _top;
306 }
308 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
309 return p >= _top;
310 }
312 void OffsetTableContigSpace::clear(bool mangle_space) {
313 ContiguousSpace::clear(mangle_space);
314 _offsets.initialize_threshold();
315 }
317 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
318 Space::set_bottom(new_bottom);
319 _offsets.set_bottom(new_bottom);
320 }
322 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
323 // Space should not advertize an increase in size
324 // until after the underlying offest table has been enlarged.
325 _offsets.resize(pointer_delta(new_end, bottom()));
326 Space::set_end(new_end);
327 }
329 #ifndef PRODUCT
331 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
332 mangler()->set_top_for_allocations(v);
333 }
334 void ContiguousSpace::set_top_for_allocations() {
335 mangler()->set_top_for_allocations(top());
336 }
337 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
338 mangler()->check_mangled_unused_area(limit);
339 }
341 void ContiguousSpace::check_mangled_unused_area_complete() {
342 mangler()->check_mangled_unused_area_complete();
343 }
345 // Mangled only the unused space that has not previously
346 // been mangled and that has not been allocated since being
347 // mangled.
348 void ContiguousSpace::mangle_unused_area() {
349 mangler()->mangle_unused_area();
350 }
351 void ContiguousSpace::mangle_unused_area_complete() {
352 mangler()->mangle_unused_area_complete();
353 }
354 void ContiguousSpace::mangle_region(MemRegion mr) {
355 // Although this method uses SpaceMangler::mangle_region() which
356 // is not specific to a space, the when the ContiguousSpace version
357 // is called, it is always with regard to a space and this
358 // bounds checking is appropriate.
359 MemRegion space_mr(bottom(), end());
360 assert(space_mr.contains(mr), "Mangling outside space");
361 SpaceMangler::mangle_region(mr);
362 }
363 #endif // NOT_PRODUCT
365 void CompactibleSpace::initialize(MemRegion mr,
366 bool clear_space,
367 bool mangle_space) {
368 Space::initialize(mr, clear_space, mangle_space);
369 set_compaction_top(bottom());
370 _next_compaction_space = NULL;
371 }
373 void CompactibleSpace::clear(bool mangle_space) {
374 Space::clear(mangle_space);
375 _compaction_top = bottom();
376 }
378 HeapWord* CompactibleSpace::forward(oop q, size_t size,
379 CompactPoint* cp, HeapWord* compact_top) {
380 // q is alive
381 // First check if we should switch compaction space
382 assert(this == cp->space, "'this' should be current compaction space.");
383 size_t compaction_max_size = pointer_delta(end(), compact_top);
384 while (size > compaction_max_size) {
385 // switch to next compaction space
386 cp->space->set_compaction_top(compact_top);
387 cp->space = cp->space->next_compaction_space();
388 if (cp->space == NULL) {
389 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
390 assert(cp->gen != NULL, "compaction must succeed");
391 cp->space = cp->gen->first_compaction_space();
392 assert(cp->space != NULL, "generation must have a first compaction space");
393 }
394 compact_top = cp->space->bottom();
395 cp->space->set_compaction_top(compact_top);
396 cp->threshold = cp->space->initialize_threshold();
397 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
398 }
400 // store the forwarding pointer into the mark word
401 if ((HeapWord*)q != compact_top) {
402 q->forward_to(oop(compact_top));
403 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
404 } else {
405 // if the object isn't moving we can just set the mark to the default
406 // mark and handle it specially later on.
407 q->init_mark();
408 assert(q->forwardee() == NULL, "should be forwarded to NULL");
409 }
411 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, size));
412 compact_top += size;
414 // we need to update the offset table so that the beginnings of objects can be
415 // found during scavenge. Note that we are updating the offset table based on
416 // where the object will be once the compaction phase finishes.
417 if (compact_top > cp->threshold)
418 cp->threshold =
419 cp->space->cross_threshold(compact_top - size, compact_top);
420 return compact_top;
421 }
424 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
425 HeapWord* q, size_t deadlength) {
426 if (allowed_deadspace_words >= deadlength) {
427 allowed_deadspace_words -= deadlength;
428 CollectedHeap::fill_with_object(q, deadlength);
429 oop(q)->set_mark(oop(q)->mark()->set_marked());
430 assert((int) deadlength == oop(q)->size(), "bad filler object size");
431 // Recall that we required "q == compaction_top".
432 return true;
433 } else {
434 allowed_deadspace_words = 0;
435 return false;
436 }
437 }
439 #define block_is_always_obj(q) true
440 #define obj_size(q) oop(q)->size()
441 #define adjust_obj_size(s) s
443 void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
444 SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
445 }
447 // Faster object search.
448 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
449 SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
450 }
452 void Space::adjust_pointers() {
453 // adjust all the interior pointers to point at the new locations of objects
454 // Used by MarkSweep::mark_sweep_phase3()
456 // First check to see if there is any work to be done.
457 if (used() == 0) {
458 return; // Nothing to do.
459 }
461 // Otherwise...
462 HeapWord* q = bottom();
463 HeapWord* t = end();
465 debug_only(HeapWord* prev_q = NULL);
466 while (q < t) {
467 if (oop(q)->is_gc_marked()) {
468 // q is alive
470 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));
471 // point all the oops to the new location
472 size_t size = oop(q)->adjust_pointers();
473 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());
475 debug_only(prev_q = q);
476 VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));
478 q += size;
479 } else {
480 // q is not a live object. But we're not in a compactible space,
481 // So we don't have live ranges.
482 debug_only(prev_q = q);
483 q += block_size(q);
484 assert(q > prev_q, "we should be moving forward through memory");
485 }
486 }
487 assert(q == t, "just checking");
488 }
490 void CompactibleSpace::adjust_pointers() {
491 // Check first is there is any work to do.
492 if (used() == 0) {
493 return; // Nothing to do.
494 }
496 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
497 }
499 void CompactibleSpace::compact() {
500 SCAN_AND_COMPACT(obj_size);
501 }
503 void Space::print_short() const { print_short_on(tty); }
505 void Space::print_short_on(outputStream* st) const {
506 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
507 (int) ((double) used() * 100 / capacity()));
508 }
510 void Space::print() const { print_on(tty); }
512 void Space::print_on(outputStream* st) const {
513 print_short_on(st);
514 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
515 bottom(), end());
516 }
518 void ContiguousSpace::print_on(outputStream* st) const {
519 print_short_on(st);
520 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
521 bottom(), top(), end());
522 }
524 void OffsetTableContigSpace::print_on(outputStream* st) const {
525 print_short_on(st);
526 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
527 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
528 bottom(), top(), _offsets.threshold(), end());
529 }
531 void ContiguousSpace::verify(bool allow_dirty) const {
532 HeapWord* p = bottom();
533 HeapWord* t = top();
534 HeapWord* prev_p = NULL;
535 while (p < t) {
536 oop(p)->verify();
537 prev_p = p;
538 p += oop(p)->size();
539 }
540 guarantee(p == top(), "end of last object must match end of space");
541 if (top() != end()) {
542 guarantee(top() == block_start_const(end()-1) &&
543 top() == block_start_const(top()),
544 "top should be start of unallocated block, if it exists");
545 }
546 }
548 void Space::oop_iterate(OopClosure* blk) {
549 ObjectToOopClosure blk2(blk);
550 object_iterate(&blk2);
551 }
553 HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) {
554 guarantee(false, "NYI");
555 return bottom();
556 }
558 HeapWord* Space::object_iterate_careful_m(MemRegion mr,
559 ObjectClosureCareful* cl) {
560 guarantee(false, "NYI");
561 return bottom();
562 }
565 void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
566 assert(!mr.is_empty(), "Should be non-empty");
567 // We use MemRegion(bottom(), end()) rather than used_region() below
568 // because the two are not necessarily equal for some kinds of
569 // spaces, in particular, certain kinds of free list spaces.
570 // We could use the more complicated but more precise:
571 // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
572 // but the slight imprecision seems acceptable in the assertion check.
573 assert(MemRegion(bottom(), end()).contains(mr),
574 "Should be within used space");
575 HeapWord* prev = cl->previous(); // max address from last time
576 if (prev >= mr.end()) { // nothing to do
577 return;
578 }
579 // This assert will not work when we go from cms space to perm
580 // space, and use same closure. Easy fix deferred for later. XXX YSR
581 // assert(prev == NULL || contains(prev), "Should be within space");
583 bool last_was_obj_array = false;
584 HeapWord *blk_start_addr, *region_start_addr;
585 if (prev > mr.start()) {
586 region_start_addr = prev;
587 blk_start_addr = prev;
588 // The previous invocation may have pushed "prev" beyond the
589 // last allocated block yet there may be still be blocks
590 // in this region due to a particular coalescing policy.
591 // Relax the assertion so that the case where the unallocated
592 // block is maintained and "prev" is beyond the unallocated
593 // block does not cause the assertion to fire.
594 assert((BlockOffsetArrayUseUnallocatedBlock &&
595 (!is_in(prev))) ||
596 (blk_start_addr == block_start(region_start_addr)), "invariant");
597 } else {
598 region_start_addr = mr.start();
599 blk_start_addr = block_start(region_start_addr);
600 }
601 HeapWord* region_end_addr = mr.end();
602 MemRegion derived_mr(region_start_addr, region_end_addr);
603 while (blk_start_addr < region_end_addr) {
604 const size_t size = block_size(blk_start_addr);
605 if (block_is_obj(blk_start_addr)) {
606 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
607 } else {
608 last_was_obj_array = false;
609 }
610 blk_start_addr += size;
611 }
612 if (!last_was_obj_array) {
613 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
614 "Should be within (closed) used space");
615 assert(blk_start_addr > prev, "Invariant");
616 cl->set_previous(blk_start_addr); // min address for next time
617 }
618 }
620 bool Space::obj_is_alive(const HeapWord* p) const {
621 assert (block_is_obj(p), "The address should point to an object");
622 return true;
623 }
625 void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
626 assert(!mr.is_empty(), "Should be non-empty");
627 assert(used_region().contains(mr), "Should be within used space");
628 HeapWord* prev = cl->previous(); // max address from last time
629 if (prev >= mr.end()) { // nothing to do
630 return;
631 }
632 // See comment above (in more general method above) in case you
633 // happen to use this method.
634 assert(prev == NULL || is_in_reserved(prev), "Should be within space");
636 bool last_was_obj_array = false;
637 HeapWord *obj_start_addr, *region_start_addr;
638 if (prev > mr.start()) {
639 region_start_addr = prev;
640 obj_start_addr = prev;
641 assert(obj_start_addr == block_start(region_start_addr), "invariant");
642 } else {
643 region_start_addr = mr.start();
644 obj_start_addr = block_start(region_start_addr);
645 }
646 HeapWord* region_end_addr = mr.end();
647 MemRegion derived_mr(region_start_addr, region_end_addr);
648 while (obj_start_addr < region_end_addr) {
649 oop obj = oop(obj_start_addr);
650 const size_t size = obj->size();
651 last_was_obj_array = cl->do_object_bm(obj, derived_mr);
652 obj_start_addr += size;
653 }
654 if (!last_was_obj_array) {
655 assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()),
656 "Should be within (closed) used space");
657 assert(obj_start_addr > prev, "Invariant");
658 cl->set_previous(obj_start_addr); // min address for next time
659 }
660 }
662 #ifndef SERIALGC
663 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
664 \
665 void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
666 HeapWord* obj_addr = mr.start(); \
667 HeapWord* t = mr.end(); \
668 while (obj_addr < t) { \
669 assert(oop(obj_addr)->is_oop(), "Should be an oop"); \
670 obj_addr += oop(obj_addr)->oop_iterate(blk); \
671 } \
672 }
674 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
676 #undef ContigSpace_PAR_OOP_ITERATE_DEFN
677 #endif // SERIALGC
679 void ContiguousSpace::oop_iterate(OopClosure* blk) {
680 if (is_empty()) return;
681 HeapWord* obj_addr = bottom();
682 HeapWord* t = top();
683 // Could call objects iterate, but this is easier.
684 while (obj_addr < t) {
685 obj_addr += oop(obj_addr)->oop_iterate(blk);
686 }
687 }
689 void ContiguousSpace::oop_iterate(MemRegion mr, OopClosure* blk) {
690 if (is_empty()) {
691 return;
692 }
693 MemRegion cur = MemRegion(bottom(), top());
694 mr = mr.intersection(cur);
695 if (mr.is_empty()) {
696 return;
697 }
698 if (mr.equals(cur)) {
699 oop_iterate(blk);
700 return;
701 }
702 assert(mr.end() <= top(), "just took an intersection above");
703 HeapWord* obj_addr = block_start(mr.start());
704 HeapWord* t = mr.end();
706 // Handle first object specially.
707 oop obj = oop(obj_addr);
708 SpaceMemRegionOopsIterClosure smr_blk(blk, mr);
709 obj_addr += obj->oop_iterate(&smr_blk);
710 while (obj_addr < t) {
711 oop obj = oop(obj_addr);
712 assert(obj->is_oop(), "expected an oop");
713 obj_addr += obj->size();
714 // If "obj_addr" is not greater than top, then the
715 // entire object "obj" is within the region.
716 if (obj_addr <= t) {
717 obj->oop_iterate(blk);
718 } else {
719 // "obj" extends beyond end of region
720 obj->oop_iterate(&smr_blk);
721 break;
722 }
723 };
724 }
726 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
727 if (is_empty()) return;
728 WaterMark bm = bottom_mark();
729 object_iterate_from(bm, blk);
730 }
732 // For a continguous space object_iterate() and safe_object_iterate()
733 // are the same.
734 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
735 object_iterate(blk);
736 }
738 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
739 assert(mark.space() == this, "Mark does not match space");
740 HeapWord* p = mark.point();
741 while (p < top()) {
742 blk->do_object(oop(p));
743 p += oop(p)->size();
744 }
745 }
747 HeapWord*
748 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
749 HeapWord * limit = concurrent_iteration_safe_limit();
750 assert(limit <= top(), "sanity check");
751 for (HeapWord* p = bottom(); p < limit;) {
752 size_t size = blk->do_object_careful(oop(p));
753 if (size == 0) {
754 return p; // failed at p
755 } else {
756 p += size;
757 }
758 }
759 return NULL; // all done
760 }
762 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
763 \
764 void ContiguousSpace:: \
765 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
766 HeapWord* t; \
767 HeapWord* p = saved_mark_word(); \
768 assert(p != NULL, "expected saved mark"); \
769 \
770 const intx interval = PrefetchScanIntervalInBytes; \
771 do { \
772 t = top(); \
773 while (p < t) { \
774 Prefetch::write(p, interval); \
775 debug_only(HeapWord* prev = p); \
776 oop m = oop(p); \
777 p += m->oop_iterate(blk); \
778 } \
779 } while (t < top()); \
780 \
781 set_saved_mark_word(p); \
782 }
784 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
786 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN
788 // Very general, slow implementation.
789 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
790 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
791 if (p >= top()) {
792 return top();
793 } else {
794 HeapWord* last = bottom();
795 HeapWord* cur = last;
796 while (cur <= p) {
797 last = cur;
798 cur += oop(cur)->size();
799 }
800 assert(oop(last)->is_oop(), "Should be an object start");
801 return last;
802 }
803 }
805 size_t ContiguousSpace::block_size(const HeapWord* p) const {
806 assert(MemRegion(bottom(), end()).contains(p), "p not in space");
807 HeapWord* current_top = top();
808 assert(p <= current_top, "p is not a block start");
809 assert(p == current_top || oop(p)->is_oop(), "p is not a block start");
810 if (p < current_top)
811 return oop(p)->size();
812 else {
813 assert(p == current_top, "just checking");
814 return pointer_delta(end(), (HeapWord*) p);
815 }
816 }
818 // This version requires locking.
819 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
820 HeapWord* const end_value) {
821 assert(Heap_lock->owned_by_self() ||
822 (SafepointSynchronize::is_at_safepoint() &&
823 Thread::current()->is_VM_thread()),
824 "not locked");
825 HeapWord* obj = top();
826 if (pointer_delta(end_value, obj) >= size) {
827 HeapWord* new_top = obj + size;
828 set_top(new_top);
829 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
830 return obj;
831 } else {
832 return NULL;
833 }
834 }
836 // This version is lock-free.
837 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
838 HeapWord* const end_value) {
839 do {
840 HeapWord* obj = top();
841 if (pointer_delta(end_value, obj) >= size) {
842 HeapWord* new_top = obj + size;
843 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
844 // result can be one of two:
845 // the old top value: the exchange succeeded
846 // otherwise: the new value of the top is returned.
847 if (result == obj) {
848 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
849 return obj;
850 }
851 } else {
852 return NULL;
853 }
854 } while (true);
855 }
857 // Requires locking.
858 HeapWord* ContiguousSpace::allocate(size_t size) {
859 return allocate_impl(size, end());
860 }
862 // Lock-free.
863 HeapWord* ContiguousSpace::par_allocate(size_t size) {
864 return par_allocate_impl(size, end());
865 }
867 void ContiguousSpace::allocate_temporary_filler(int factor) {
868 // allocate temporary type array decreasing free size with factor 'factor'
869 assert(factor >= 0, "just checking");
870 size_t size = pointer_delta(end(), top());
872 // if space is full, return
873 if (size == 0) return;
875 if (factor > 0) {
876 size -= size/factor;
877 }
878 size = align_object_size(size);
880 const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
881 if (size >= (size_t)align_object_size(array_header_size)) {
882 size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
883 // allocate uninitialized int array
884 typeArrayOop t = (typeArrayOop) allocate(size);
885 assert(t != NULL, "allocation should succeed");
886 t->set_mark(markOopDesc::prototype());
887 t->set_klass(Universe::intArrayKlassObj());
888 t->set_length((int)length);
889 } else {
890 assert(size == CollectedHeap::min_fill_size(),
891 "size for smallest fake object doesn't match");
892 instanceOop obj = (instanceOop) allocate(size);
893 obj->set_mark(markOopDesc::prototype());
894 obj->set_klass_gap(0);
895 obj->set_klass(SystemDictionary::Object_klass());
896 }
897 }
899 void EdenSpace::clear(bool mangle_space) {
900 ContiguousSpace::clear(mangle_space);
901 set_soft_end(end());
902 }
904 // Requires locking.
905 HeapWord* EdenSpace::allocate(size_t size) {
906 return allocate_impl(size, soft_end());
907 }
909 // Lock-free.
910 HeapWord* EdenSpace::par_allocate(size_t size) {
911 return par_allocate_impl(size, soft_end());
912 }
914 HeapWord* ConcEdenSpace::par_allocate(size_t size)
915 {
916 do {
917 // The invariant is top() should be read before end() because
918 // top() can't be greater than end(), so if an update of _soft_end
919 // occurs between 'end_val = end();' and 'top_val = top();' top()
920 // also can grow up to the new end() and the condition
921 // 'top_val > end_val' is true. To ensure the loading order
922 // OrderAccess::loadload() is required after top() read.
923 HeapWord* obj = top();
924 OrderAccess::loadload();
925 if (pointer_delta(*soft_end_addr(), obj) >= size) {
926 HeapWord* new_top = obj + size;
927 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
928 // result can be one of two:
929 // the old top value: the exchange succeeded
930 // otherwise: the new value of the top is returned.
931 if (result == obj) {
932 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
933 return obj;
934 }
935 } else {
936 return NULL;
937 }
938 } while (true);
939 }
942 HeapWord* OffsetTableContigSpace::initialize_threshold() {
943 return _offsets.initialize_threshold();
944 }
946 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
947 _offsets.alloc_block(start, end);
948 return _offsets.threshold();
949 }
951 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
952 MemRegion mr) :
953 _offsets(sharedOffsetArray, mr),
954 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
955 {
956 _offsets.set_contig_space(this);
957 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
958 }
961 class VerifyOldOopClosure : public OopClosure {
962 public:
963 oop _the_obj;
964 bool _allow_dirty;
965 void do_oop(oop* p) {
966 _the_obj->verify_old_oop(p, _allow_dirty);
967 }
968 void do_oop(narrowOop* p) {
969 _the_obj->verify_old_oop(p, _allow_dirty);
970 }
971 };
973 #define OBJ_SAMPLE_INTERVAL 0
974 #define BLOCK_SAMPLE_INTERVAL 100
976 void OffsetTableContigSpace::verify(bool allow_dirty) const {
977 HeapWord* p = bottom();
978 HeapWord* prev_p = NULL;
979 VerifyOldOopClosure blk; // Does this do anything?
980 blk._allow_dirty = allow_dirty;
981 int objs = 0;
982 int blocks = 0;
984 if (VerifyObjectStartArray) {
985 _offsets.verify();
986 }
988 while (p < top()) {
989 size_t size = oop(p)->size();
990 // For a sampling of objects in the space, find it using the
991 // block offset table.
992 if (blocks == BLOCK_SAMPLE_INTERVAL) {
993 guarantee(p == block_start_const(p + (size/2)),
994 "check offset computation");
995 blocks = 0;
996 } else {
997 blocks++;
998 }
1000 if (objs == OBJ_SAMPLE_INTERVAL) {
1001 oop(p)->verify();
1002 blk._the_obj = oop(p);
1003 oop(p)->oop_iterate(&blk);
1004 objs = 0;
1005 } else {
1006 objs++;
1007 }
1008 prev_p = p;
1009 p += size;
1010 }
1011 guarantee(p == top(), "end of last object must match end of space");
1012 }
1014 void OffsetTableContigSpace::serialize_block_offset_array_offsets(
1015 SerializeOopClosure* soc) {
1016 _offsets.serialize(soc);
1017 }
1020 size_t TenuredSpace::allowed_dead_ratio() const {
1021 return MarkSweepDeadRatio;
1022 }
1025 size_t ContigPermSpace::allowed_dead_ratio() const {
1026 return PermMarkSweepDeadRatio;
1027 }