Tue, 29 Apr 2014 15:17:27 +0200
8042195: Introduce umbrella header orderAccess.inline.hpp.
Reviewed-by: dholmes, kvn, stefank, twisti
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "classfile/systemDictionary.hpp"
27 #include "classfile/vmSymbols.hpp"
28 #include "gc_implementation/shared/liveRange.hpp"
29 #include "gc_implementation/shared/markSweep.hpp"
30 #include "gc_implementation/shared/spaceDecorator.hpp"
31 #include "memory/blockOffsetTable.inline.hpp"
32 #include "memory/defNewGeneration.hpp"
33 #include "memory/genCollectedHeap.hpp"
34 #include "memory/space.hpp"
35 #include "memory/space.inline.hpp"
36 #include "memory/universe.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "oops/oop.inline2.hpp"
39 #include "runtime/java.hpp"
40 #include "runtime/orderAccess.inline.hpp"
41 #include "runtime/safepoint.hpp"
42 #include "utilities/copy.hpp"
43 #include "utilities/globalDefinitions.hpp"
44 #include "utilities/macros.hpp"
46 void SpaceMemRegionOopsIterClosure::do_oop(oop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
47 void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
49 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
51 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
52 HeapWord* top_obj) {
53 if (top_obj != NULL) {
54 if (_sp->block_is_obj(top_obj)) {
55 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
56 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
57 // An arrayOop is starting on the dirty card - since we do exact
58 // store checks for objArrays we are done.
59 } else {
60 // Otherwise, it is possible that the object starting on the dirty
61 // card spans the entire card, and that the store happened on a
62 // later card. Figure out where the object ends.
63 // Use the block_size() method of the space over which
64 // the iteration is being done. That space (e.g. CMS) may have
65 // specific requirements on object sizes which will
66 // be reflected in the block_size() method.
67 top = top_obj + oop(top_obj)->size();
68 }
69 }
70 } else {
71 top = top_obj;
72 }
73 } else {
74 assert(top == _sp->end(), "only case where top_obj == NULL");
75 }
76 return top;
77 }
79 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
80 HeapWord* bottom,
81 HeapWord* top) {
82 // 1. Blocks may or may not be objects.
83 // 2. Even when a block_is_obj(), it may not entirely
84 // occupy the block if the block quantum is larger than
85 // the object size.
86 // We can and should try to optimize by calling the non-MemRegion
87 // version of oop_iterate() for all but the extremal objects
88 // (for which we need to call the MemRegion version of
89 // oop_iterate()) To be done post-beta XXX
90 for (; bottom < top; bottom += _sp->block_size(bottom)) {
91 // As in the case of contiguous space above, we'd like to
92 // just use the value returned by oop_iterate to increment the
93 // current pointer; unfortunately, that won't work in CMS because
94 // we'd need an interface change (it seems) to have the space
95 // "adjust the object size" (for instance pad it up to its
96 // block alignment or minimum block size restrictions. XXX
97 if (_sp->block_is_obj(bottom) &&
98 !_sp->obj_allocated_since_save_marks(oop(bottom))) {
99 oop(bottom)->oop_iterate(_cl, mr);
100 }
101 }
102 }
104 // We get called with "mr" representing the dirty region
105 // that we want to process. Because of imprecise marking,
106 // we may need to extend the incoming "mr" to the right,
107 // and scan more. However, because we may already have
108 // scanned some of that extended region, we may need to
109 // trim its right-end back some so we do not scan what
110 // we (or another worker thread) may already have scanned
111 // or planning to scan.
112 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
114 // Some collectors need to do special things whenever their dirty
115 // cards are processed. For instance, CMS must remember mutator updates
116 // (i.e. dirty cards) so as to re-scan mutated objects.
117 // Such work can be piggy-backed here on dirty card scanning, so as to make
118 // it slightly more efficient than doing a complete non-detructive pre-scan
119 // of the card table.
120 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
121 if (pCl != NULL) {
122 pCl->do_MemRegion(mr);
123 }
125 HeapWord* bottom = mr.start();
126 HeapWord* last = mr.last();
127 HeapWord* top = mr.end();
128 HeapWord* bottom_obj;
129 HeapWord* top_obj;
131 assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
132 _precision == CardTableModRefBS::Precise,
133 "Only ones we deal with for now.");
135 assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
136 _cl->idempotent() || _last_bottom == NULL ||
137 top <= _last_bottom,
138 "Not decreasing");
139 NOT_PRODUCT(_last_bottom = mr.start());
141 bottom_obj = _sp->block_start(bottom);
142 top_obj = _sp->block_start(last);
144 assert(bottom_obj <= bottom, "just checking");
145 assert(top_obj <= top, "just checking");
147 // Given what we think is the top of the memory region and
148 // the start of the object at the top, get the actual
149 // value of the top.
150 top = get_actual_top(top, top_obj);
152 // If the previous call did some part of this region, don't redo.
153 if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
154 _min_done != NULL &&
155 _min_done < top) {
156 top = _min_done;
157 }
159 // Top may have been reset, and in fact may be below bottom,
160 // e.g. the dirty card region is entirely in a now free object
161 // -- something that could happen with a concurrent sweeper.
162 bottom = MIN2(bottom, top);
163 MemRegion extended_mr = MemRegion(bottom, top);
164 assert(bottom <= top &&
165 (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
166 _min_done == NULL ||
167 top <= _min_done),
168 "overlap!");
170 // Walk the region if it is not empty; otherwise there is nothing to do.
171 if (!extended_mr.is_empty()) {
172 walk_mem_region(extended_mr, bottom_obj, top);
173 }
175 // An idempotent closure might be applied in any order, so we don't
176 // record a _min_done for it.
177 if (!_cl->idempotent()) {
178 _min_done = bottom;
179 } else {
180 assert(_min_done == _last_explicit_min_done,
181 "Don't update _min_done for idempotent cl");
182 }
183 }
185 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
186 CardTableModRefBS::PrecisionStyle precision,
187 HeapWord* boundary) {
188 return new DirtyCardToOopClosure(this, cl, precision, boundary);
189 }
191 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
192 HeapWord* top_obj) {
193 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
194 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
195 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
196 // An arrayOop is starting on the dirty card - since we do exact
197 // store checks for objArrays we are done.
198 } else {
199 // Otherwise, it is possible that the object starting on the dirty
200 // card spans the entire card, and that the store happened on a
201 // later card. Figure out where the object ends.
202 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
203 "Block size and object size mismatch");
204 top = top_obj + oop(top_obj)->size();
205 }
206 }
207 } else {
208 top = (_sp->toContiguousSpace())->top();
209 }
210 return top;
211 }
213 void Filtering_DCTOC::walk_mem_region(MemRegion mr,
214 HeapWord* bottom,
215 HeapWord* top) {
216 // Note that this assumption won't hold if we have a concurrent
217 // collector in this space, which may have freed up objects after
218 // they were dirtied and before the stop-the-world GC that is
219 // examining cards here.
220 assert(bottom < top, "ought to be at least one obj on a dirty card.");
222 if (_boundary != NULL) {
223 // We have a boundary outside of which we don't want to look
224 // at objects, so create a filtering closure around the
225 // oop closure before walking the region.
226 FilteringClosure filter(_boundary, _cl);
227 walk_mem_region_with_cl(mr, bottom, top, &filter);
228 } else {
229 // No boundary, simply walk the heap with the oop closure.
230 walk_mem_region_with_cl(mr, bottom, top, _cl);
231 }
233 }
235 // We must replicate this so that the static type of "FilteringClosure"
236 // (see above) is apparent at the oop_iterate calls.
237 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
238 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \
239 HeapWord* bottom, \
240 HeapWord* top, \
241 ClosureType* cl) { \
242 bottom += oop(bottom)->oop_iterate(cl, mr); \
243 if (bottom < top) { \
244 HeapWord* next_obj = bottom + oop(bottom)->size(); \
245 while (next_obj < top) { \
246 /* Bottom lies entirely below top, so we can call the */ \
247 /* non-memRegion version of oop_iterate below. */ \
248 oop(bottom)->oop_iterate(cl); \
249 bottom = next_obj; \
250 next_obj = bottom + oop(bottom)->size(); \
251 } \
252 /* Last object. */ \
253 oop(bottom)->oop_iterate(cl, mr); \
254 } \
255 }
257 // (There are only two of these, rather than N, because the split is due
258 // only to the introduction of the FilteringClosure, a local part of the
259 // impl of this abstraction.)
260 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
261 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
263 DirtyCardToOopClosure*
264 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
265 CardTableModRefBS::PrecisionStyle precision,
266 HeapWord* boundary) {
267 return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
268 }
270 void Space::initialize(MemRegion mr,
271 bool clear_space,
272 bool mangle_space) {
273 HeapWord* bottom = mr.start();
274 HeapWord* end = mr.end();
275 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
276 "invalid space boundaries");
277 set_bottom(bottom);
278 set_end(end);
279 if (clear_space) clear(mangle_space);
280 }
282 void Space::clear(bool mangle_space) {
283 if (ZapUnusedHeapArea && mangle_space) {
284 mangle_unused_area();
285 }
286 }
288 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL),
289 _concurrent_iteration_safe_limit(NULL) {
290 _mangler = new GenSpaceMangler(this);
291 }
293 ContiguousSpace::~ContiguousSpace() {
294 delete _mangler;
295 }
297 void ContiguousSpace::initialize(MemRegion mr,
298 bool clear_space,
299 bool mangle_space)
300 {
301 CompactibleSpace::initialize(mr, clear_space, mangle_space);
302 set_concurrent_iteration_safe_limit(top());
303 }
305 void ContiguousSpace::clear(bool mangle_space) {
306 set_top(bottom());
307 set_saved_mark();
308 CompactibleSpace::clear(mangle_space);
309 }
311 bool ContiguousSpace::is_in(const void* p) const {
312 return _bottom <= p && p < _top;
313 }
315 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
316 return p >= _top;
317 }
319 void OffsetTableContigSpace::clear(bool mangle_space) {
320 ContiguousSpace::clear(mangle_space);
321 _offsets.initialize_threshold();
322 }
324 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
325 Space::set_bottom(new_bottom);
326 _offsets.set_bottom(new_bottom);
327 }
329 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
330 // Space should not advertize an increase in size
331 // until after the underlying offest table has been enlarged.
332 _offsets.resize(pointer_delta(new_end, bottom()));
333 Space::set_end(new_end);
334 }
336 #ifndef PRODUCT
338 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
339 mangler()->set_top_for_allocations(v);
340 }
341 void ContiguousSpace::set_top_for_allocations() {
342 mangler()->set_top_for_allocations(top());
343 }
344 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
345 mangler()->check_mangled_unused_area(limit);
346 }
348 void ContiguousSpace::check_mangled_unused_area_complete() {
349 mangler()->check_mangled_unused_area_complete();
350 }
352 // Mangled only the unused space that has not previously
353 // been mangled and that has not been allocated since being
354 // mangled.
355 void ContiguousSpace::mangle_unused_area() {
356 mangler()->mangle_unused_area();
357 }
358 void ContiguousSpace::mangle_unused_area_complete() {
359 mangler()->mangle_unused_area_complete();
360 }
361 void ContiguousSpace::mangle_region(MemRegion mr) {
362 // Although this method uses SpaceMangler::mangle_region() which
363 // is not specific to a space, the when the ContiguousSpace version
364 // is called, it is always with regard to a space and this
365 // bounds checking is appropriate.
366 MemRegion space_mr(bottom(), end());
367 assert(space_mr.contains(mr), "Mangling outside space");
368 SpaceMangler::mangle_region(mr);
369 }
370 #endif // NOT_PRODUCT
372 void CompactibleSpace::initialize(MemRegion mr,
373 bool clear_space,
374 bool mangle_space) {
375 Space::initialize(mr, clear_space, mangle_space);
376 set_compaction_top(bottom());
377 _next_compaction_space = NULL;
378 }
380 void CompactibleSpace::clear(bool mangle_space) {
381 Space::clear(mangle_space);
382 _compaction_top = bottom();
383 }
385 HeapWord* CompactibleSpace::forward(oop q, size_t size,
386 CompactPoint* cp, HeapWord* compact_top) {
387 // q is alive
388 // First check if we should switch compaction space
389 assert(this == cp->space, "'this' should be current compaction space.");
390 size_t compaction_max_size = pointer_delta(end(), compact_top);
391 while (size > compaction_max_size) {
392 // switch to next compaction space
393 cp->space->set_compaction_top(compact_top);
394 cp->space = cp->space->next_compaction_space();
395 if (cp->space == NULL) {
396 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
397 assert(cp->gen != NULL, "compaction must succeed");
398 cp->space = cp->gen->first_compaction_space();
399 assert(cp->space != NULL, "generation must have a first compaction space");
400 }
401 compact_top = cp->space->bottom();
402 cp->space->set_compaction_top(compact_top);
403 cp->threshold = cp->space->initialize_threshold();
404 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
405 }
407 // store the forwarding pointer into the mark word
408 if ((HeapWord*)q != compact_top) {
409 q->forward_to(oop(compact_top));
410 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
411 } else {
412 // if the object isn't moving we can just set the mark to the default
413 // mark and handle it specially later on.
414 q->init_mark();
415 assert(q->forwardee() == NULL, "should be forwarded to NULL");
416 }
418 compact_top += size;
420 // we need to update the offset table so that the beginnings of objects can be
421 // found during scavenge. Note that we are updating the offset table based on
422 // where the object will be once the compaction phase finishes.
423 if (compact_top > cp->threshold)
424 cp->threshold =
425 cp->space->cross_threshold(compact_top - size, compact_top);
426 return compact_top;
427 }
430 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
431 HeapWord* q, size_t deadlength) {
432 if (allowed_deadspace_words >= deadlength) {
433 allowed_deadspace_words -= deadlength;
434 CollectedHeap::fill_with_object(q, deadlength);
435 oop(q)->set_mark(oop(q)->mark()->set_marked());
436 assert((int) deadlength == oop(q)->size(), "bad filler object size");
437 // Recall that we required "q == compaction_top".
438 return true;
439 } else {
440 allowed_deadspace_words = 0;
441 return false;
442 }
443 }
445 #define block_is_always_obj(q) true
446 #define obj_size(q) oop(q)->size()
447 #define adjust_obj_size(s) s
449 void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
450 SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
451 }
453 // Faster object search.
454 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
455 SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
456 }
458 void Space::adjust_pointers() {
459 // adjust all the interior pointers to point at the new locations of objects
460 // Used by MarkSweep::mark_sweep_phase3()
462 // First check to see if there is any work to be done.
463 if (used() == 0) {
464 return; // Nothing to do.
465 }
467 // Otherwise...
468 HeapWord* q = bottom();
469 HeapWord* t = end();
471 debug_only(HeapWord* prev_q = NULL);
472 while (q < t) {
473 if (oop(q)->is_gc_marked()) {
474 // q is alive
476 // point all the oops to the new location
477 size_t size = oop(q)->adjust_pointers();
479 debug_only(prev_q = q);
481 q += size;
482 } else {
483 // q is not a live object. But we're not in a compactible space,
484 // So we don't have live ranges.
485 debug_only(prev_q = q);
486 q += block_size(q);
487 assert(q > prev_q, "we should be moving forward through memory");
488 }
489 }
490 assert(q == t, "just checking");
491 }
493 void CompactibleSpace::adjust_pointers() {
494 // Check first is there is any work to do.
495 if (used() == 0) {
496 return; // Nothing to do.
497 }
499 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
500 }
502 void CompactibleSpace::compact() {
503 SCAN_AND_COMPACT(obj_size);
504 }
506 void Space::print_short() const { print_short_on(tty); }
508 void Space::print_short_on(outputStream* st) const {
509 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
510 (int) ((double) used() * 100 / capacity()));
511 }
513 void Space::print() const { print_on(tty); }
515 void Space::print_on(outputStream* st) const {
516 print_short_on(st);
517 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
518 bottom(), end());
519 }
521 void ContiguousSpace::print_on(outputStream* st) const {
522 print_short_on(st);
523 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
524 bottom(), top(), end());
525 }
527 void OffsetTableContigSpace::print_on(outputStream* st) const {
528 print_short_on(st);
529 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
530 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
531 bottom(), top(), _offsets.threshold(), end());
532 }
534 void ContiguousSpace::verify() const {
535 HeapWord* p = bottom();
536 HeapWord* t = top();
537 HeapWord* prev_p = NULL;
538 while (p < t) {
539 oop(p)->verify();
540 prev_p = p;
541 p += oop(p)->size();
542 }
543 guarantee(p == top(), "end of last object must match end of space");
544 if (top() != end()) {
545 guarantee(top() == block_start_const(end()-1) &&
546 top() == block_start_const(top()),
547 "top should be start of unallocated block, if it exists");
548 }
549 }
551 void Space::oop_iterate(ExtendedOopClosure* blk) {
552 ObjectToOopClosure blk2(blk);
553 object_iterate(&blk2);
554 }
556 HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) {
557 guarantee(false, "NYI");
558 return bottom();
559 }
561 HeapWord* Space::object_iterate_careful_m(MemRegion mr,
562 ObjectClosureCareful* cl) {
563 guarantee(false, "NYI");
564 return bottom();
565 }
568 void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
569 assert(!mr.is_empty(), "Should be non-empty");
570 // We use MemRegion(bottom(), end()) rather than used_region() below
571 // because the two are not necessarily equal for some kinds of
572 // spaces, in particular, certain kinds of free list spaces.
573 // We could use the more complicated but more precise:
574 // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
575 // but the slight imprecision seems acceptable in the assertion check.
576 assert(MemRegion(bottom(), end()).contains(mr),
577 "Should be within used space");
578 HeapWord* prev = cl->previous(); // max address from last time
579 if (prev >= mr.end()) { // nothing to do
580 return;
581 }
582 // This assert will not work when we go from cms space to perm
583 // space, and use same closure. Easy fix deferred for later. XXX YSR
584 // assert(prev == NULL || contains(prev), "Should be within space");
586 bool last_was_obj_array = false;
587 HeapWord *blk_start_addr, *region_start_addr;
588 if (prev > mr.start()) {
589 region_start_addr = prev;
590 blk_start_addr = prev;
591 // The previous invocation may have pushed "prev" beyond the
592 // last allocated block yet there may be still be blocks
593 // in this region due to a particular coalescing policy.
594 // Relax the assertion so that the case where the unallocated
595 // block is maintained and "prev" is beyond the unallocated
596 // block does not cause the assertion to fire.
597 assert((BlockOffsetArrayUseUnallocatedBlock &&
598 (!is_in(prev))) ||
599 (blk_start_addr == block_start(region_start_addr)), "invariant");
600 } else {
601 region_start_addr = mr.start();
602 blk_start_addr = block_start(region_start_addr);
603 }
604 HeapWord* region_end_addr = mr.end();
605 MemRegion derived_mr(region_start_addr, region_end_addr);
606 while (blk_start_addr < region_end_addr) {
607 const size_t size = block_size(blk_start_addr);
608 if (block_is_obj(blk_start_addr)) {
609 last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
610 } else {
611 last_was_obj_array = false;
612 }
613 blk_start_addr += size;
614 }
615 if (!last_was_obj_array) {
616 assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
617 "Should be within (closed) used space");
618 assert(blk_start_addr > prev, "Invariant");
619 cl->set_previous(blk_start_addr); // min address for next time
620 }
621 }
623 bool Space::obj_is_alive(const HeapWord* p) const {
624 assert (block_is_obj(p), "The address should point to an object");
625 return true;
626 }
628 void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
629 assert(!mr.is_empty(), "Should be non-empty");
630 assert(used_region().contains(mr), "Should be within used space");
631 HeapWord* prev = cl->previous(); // max address from last time
632 if (prev >= mr.end()) { // nothing to do
633 return;
634 }
635 // See comment above (in more general method above) in case you
636 // happen to use this method.
637 assert(prev == NULL || is_in_reserved(prev), "Should be within space");
639 bool last_was_obj_array = false;
640 HeapWord *obj_start_addr, *region_start_addr;
641 if (prev > mr.start()) {
642 region_start_addr = prev;
643 obj_start_addr = prev;
644 assert(obj_start_addr == block_start(region_start_addr), "invariant");
645 } else {
646 region_start_addr = mr.start();
647 obj_start_addr = block_start(region_start_addr);
648 }
649 HeapWord* region_end_addr = mr.end();
650 MemRegion derived_mr(region_start_addr, region_end_addr);
651 while (obj_start_addr < region_end_addr) {
652 oop obj = oop(obj_start_addr);
653 const size_t size = obj->size();
654 last_was_obj_array = cl->do_object_bm(obj, derived_mr);
655 obj_start_addr += size;
656 }
657 if (!last_was_obj_array) {
658 assert((bottom() <= obj_start_addr) && (obj_start_addr <= end()),
659 "Should be within (closed) used space");
660 assert(obj_start_addr > prev, "Invariant");
661 cl->set_previous(obj_start_addr); // min address for next time
662 }
663 }
665 #if INCLUDE_ALL_GCS
666 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
667 \
668 void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
669 HeapWord* obj_addr = mr.start(); \
670 HeapWord* t = mr.end(); \
671 while (obj_addr < t) { \
672 assert(oop(obj_addr)->is_oop(), "Should be an oop"); \
673 obj_addr += oop(obj_addr)->oop_iterate(blk); \
674 } \
675 }
677 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
679 #undef ContigSpace_PAR_OOP_ITERATE_DEFN
680 #endif // INCLUDE_ALL_GCS
682 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
683 if (is_empty()) return;
684 HeapWord* obj_addr = bottom();
685 HeapWord* t = top();
686 // Could call objects iterate, but this is easier.
687 while (obj_addr < t) {
688 obj_addr += oop(obj_addr)->oop_iterate(blk);
689 }
690 }
692 void ContiguousSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* blk) {
693 if (is_empty()) {
694 return;
695 }
696 MemRegion cur = MemRegion(bottom(), top());
697 mr = mr.intersection(cur);
698 if (mr.is_empty()) {
699 return;
700 }
701 if (mr.equals(cur)) {
702 oop_iterate(blk);
703 return;
704 }
705 assert(mr.end() <= top(), "just took an intersection above");
706 HeapWord* obj_addr = block_start(mr.start());
707 HeapWord* t = mr.end();
709 // Handle first object specially.
710 oop obj = oop(obj_addr);
711 SpaceMemRegionOopsIterClosure smr_blk(blk, mr);
712 obj_addr += obj->oop_iterate(&smr_blk);
713 while (obj_addr < t) {
714 oop obj = oop(obj_addr);
715 assert(obj->is_oop(), "expected an oop");
716 obj_addr += obj->size();
717 // If "obj_addr" is not greater than top, then the
718 // entire object "obj" is within the region.
719 if (obj_addr <= t) {
720 obj->oop_iterate(blk);
721 } else {
722 // "obj" extends beyond end of region
723 obj->oop_iterate(&smr_blk);
724 break;
725 }
726 };
727 }
729 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
730 if (is_empty()) return;
731 WaterMark bm = bottom_mark();
732 object_iterate_from(bm, blk);
733 }
735 // For a continguous space object_iterate() and safe_object_iterate()
736 // are the same.
737 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
738 object_iterate(blk);
739 }
741 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
742 assert(mark.space() == this, "Mark does not match space");
743 HeapWord* p = mark.point();
744 while (p < top()) {
745 blk->do_object(oop(p));
746 p += oop(p)->size();
747 }
748 }
750 HeapWord*
751 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
752 HeapWord * limit = concurrent_iteration_safe_limit();
753 assert(limit <= top(), "sanity check");
754 for (HeapWord* p = bottom(); p < limit;) {
755 size_t size = blk->do_object_careful(oop(p));
756 if (size == 0) {
757 return p; // failed at p
758 } else {
759 p += size;
760 }
761 }
762 return NULL; // all done
763 }
765 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
766 \
767 void ContiguousSpace:: \
768 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
769 HeapWord* t; \
770 HeapWord* p = saved_mark_word(); \
771 assert(p != NULL, "expected saved mark"); \
772 \
773 const intx interval = PrefetchScanIntervalInBytes; \
774 do { \
775 t = top(); \
776 while (p < t) { \
777 Prefetch::write(p, interval); \
778 debug_only(HeapWord* prev = p); \
779 oop m = oop(p); \
780 p += m->oop_iterate(blk); \
781 } \
782 } while (t < top()); \
783 \
784 set_saved_mark_word(p); \
785 }
787 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
789 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN
791 // Very general, slow implementation.
792 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
793 assert(MemRegion(bottom(), end()).contains(p),
794 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
795 p, bottom(), end()));
796 if (p >= top()) {
797 return top();
798 } else {
799 HeapWord* last = bottom();
800 HeapWord* cur = last;
801 while (cur <= p) {
802 last = cur;
803 cur += oop(cur)->size();
804 }
805 assert(oop(last)->is_oop(),
806 err_msg(PTR_FORMAT " should be an object start", last));
807 return last;
808 }
809 }
811 size_t ContiguousSpace::block_size(const HeapWord* p) const {
812 assert(MemRegion(bottom(), end()).contains(p),
813 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
814 p, bottom(), end()));
815 HeapWord* current_top = top();
816 assert(p <= current_top,
817 err_msg("p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
818 p, current_top));
819 assert(p == current_top || oop(p)->is_oop(),
820 err_msg("p (" PTR_FORMAT ") is not a block start - "
821 "current_top: " PTR_FORMAT ", is_oop: %s",
822 p, current_top, BOOL_TO_STR(oop(p)->is_oop())));
823 if (p < current_top) {
824 return oop(p)->size();
825 } else {
826 assert(p == current_top, "just checking");
827 return pointer_delta(end(), (HeapWord*) p);
828 }
829 }
831 // This version requires locking.
832 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
833 HeapWord* const end_value) {
834 // In G1 there are places where a GC worker can allocates into a
835 // region using this serial allocation code without being prone to a
836 // race with other GC workers (we ensure that no other GC worker can
837 // access the same region at the same time). So the assert below is
838 // too strong in the case of G1.
839 assert(Heap_lock->owned_by_self() ||
840 (SafepointSynchronize::is_at_safepoint() &&
841 (Thread::current()->is_VM_thread() || UseG1GC)),
842 "not locked");
843 HeapWord* obj = top();
844 if (pointer_delta(end_value, obj) >= size) {
845 HeapWord* new_top = obj + size;
846 set_top(new_top);
847 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
848 return obj;
849 } else {
850 return NULL;
851 }
852 }
854 // This version is lock-free.
855 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
856 HeapWord* const end_value) {
857 do {
858 HeapWord* obj = top();
859 if (pointer_delta(end_value, obj) >= size) {
860 HeapWord* new_top = obj + size;
861 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
862 // result can be one of two:
863 // the old top value: the exchange succeeded
864 // otherwise: the new value of the top is returned.
865 if (result == obj) {
866 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
867 return obj;
868 }
869 } else {
870 return NULL;
871 }
872 } while (true);
873 }
875 // Requires locking.
876 HeapWord* ContiguousSpace::allocate(size_t size) {
877 return allocate_impl(size, end());
878 }
880 // Lock-free.
881 HeapWord* ContiguousSpace::par_allocate(size_t size) {
882 return par_allocate_impl(size, end());
883 }
885 void ContiguousSpace::allocate_temporary_filler(int factor) {
886 // allocate temporary type array decreasing free size with factor 'factor'
887 assert(factor >= 0, "just checking");
888 size_t size = pointer_delta(end(), top());
890 // if space is full, return
891 if (size == 0) return;
893 if (factor > 0) {
894 size -= size/factor;
895 }
896 size = align_object_size(size);
898 const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
899 if (size >= (size_t)align_object_size(array_header_size)) {
900 size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
901 // allocate uninitialized int array
902 typeArrayOop t = (typeArrayOop) allocate(size);
903 assert(t != NULL, "allocation should succeed");
904 t->set_mark(markOopDesc::prototype());
905 t->set_klass(Universe::intArrayKlassObj());
906 t->set_length((int)length);
907 } else {
908 assert(size == CollectedHeap::min_fill_size(),
909 "size for smallest fake object doesn't match");
910 instanceOop obj = (instanceOop) allocate(size);
911 obj->set_mark(markOopDesc::prototype());
912 obj->set_klass_gap(0);
913 obj->set_klass(SystemDictionary::Object_klass());
914 }
915 }
917 void EdenSpace::clear(bool mangle_space) {
918 ContiguousSpace::clear(mangle_space);
919 set_soft_end(end());
920 }
922 // Requires locking.
923 HeapWord* EdenSpace::allocate(size_t size) {
924 return allocate_impl(size, soft_end());
925 }
927 // Lock-free.
928 HeapWord* EdenSpace::par_allocate(size_t size) {
929 return par_allocate_impl(size, soft_end());
930 }
932 HeapWord* ConcEdenSpace::par_allocate(size_t size)
933 {
934 do {
935 // The invariant is top() should be read before end() because
936 // top() can't be greater than end(), so if an update of _soft_end
937 // occurs between 'end_val = end();' and 'top_val = top();' top()
938 // also can grow up to the new end() and the condition
939 // 'top_val > end_val' is true. To ensure the loading order
940 // OrderAccess::loadload() is required after top() read.
941 HeapWord* obj = top();
942 OrderAccess::loadload();
943 if (pointer_delta(*soft_end_addr(), obj) >= size) {
944 HeapWord* new_top = obj + size;
945 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
946 // result can be one of two:
947 // the old top value: the exchange succeeded
948 // otherwise: the new value of the top is returned.
949 if (result == obj) {
950 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
951 return obj;
952 }
953 } else {
954 return NULL;
955 }
956 } while (true);
957 }
960 HeapWord* OffsetTableContigSpace::initialize_threshold() {
961 return _offsets.initialize_threshold();
962 }
964 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
965 _offsets.alloc_block(start, end);
966 return _offsets.threshold();
967 }
969 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
970 MemRegion mr) :
971 _offsets(sharedOffsetArray, mr),
972 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
973 {
974 _offsets.set_contig_space(this);
975 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
976 }
978 #define OBJ_SAMPLE_INTERVAL 0
979 #define BLOCK_SAMPLE_INTERVAL 100
981 void OffsetTableContigSpace::verify() const {
982 HeapWord* p = bottom();
983 HeapWord* prev_p = NULL;
984 int objs = 0;
985 int blocks = 0;
987 if (VerifyObjectStartArray) {
988 _offsets.verify();
989 }
991 while (p < top()) {
992 size_t size = oop(p)->size();
993 // For a sampling of objects in the space, find it using the
994 // block offset table.
995 if (blocks == BLOCK_SAMPLE_INTERVAL) {
996 guarantee(p == block_start_const(p + (size/2)),
997 "check offset computation");
998 blocks = 0;
999 } else {
1000 blocks++;
1001 }
1003 if (objs == OBJ_SAMPLE_INTERVAL) {
1004 oop(p)->verify();
1005 objs = 0;
1006 } else {
1007 objs++;
1008 }
1009 prev_p = p;
1010 p += size;
1011 }
1012 guarantee(p == top(), "end of last object must match end of space");
1013 }
1016 size_t TenuredSpace::allowed_dead_ratio() const {
1017 return MarkSweepDeadRatio;
1018 }