src/share/vm/memory/space.cpp

Tue, 24 Jun 2014 16:20:15 +0200

author
stefank
date
Tue, 24 Jun 2014 16:20:15 +0200
changeset 6982
4c1b88a53c74
parent 6981
ff1e37e7eb83
child 6990
1526a938e670
permissions
-rw-r--r--

8046670: Make CMS metadata aware closures applicable for other collectors
Reviewed-by: ehelin, mgerdin

duke@435 1 /*
drchase@6680 2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #include "precompiled.hpp"
stefank@2314 26 #include "classfile/systemDictionary.hpp"
stefank@2314 27 #include "classfile/vmSymbols.hpp"
stefank@2314 28 #include "gc_implementation/shared/liveRange.hpp"
stefank@2314 29 #include "gc_implementation/shared/markSweep.hpp"
stefank@2314 30 #include "gc_implementation/shared/spaceDecorator.hpp"
stefank@2314 31 #include "memory/blockOffsetTable.inline.hpp"
stefank@2314 32 #include "memory/defNewGeneration.hpp"
stefank@2314 33 #include "memory/genCollectedHeap.hpp"
stefank@2314 34 #include "memory/space.hpp"
stefank@2314 35 #include "memory/space.inline.hpp"
stefank@2314 36 #include "memory/universe.inline.hpp"
stefank@2314 37 #include "oops/oop.inline.hpp"
stefank@2314 38 #include "oops/oop.inline2.hpp"
stefank@2314 39 #include "runtime/java.hpp"
goetz@6912 40 #include "runtime/prefetch.inline.hpp"
goetz@6911 41 #include "runtime/orderAccess.inline.hpp"
stefank@2314 42 #include "runtime/safepoint.hpp"
stefank@2314 43 #include "utilities/copy.hpp"
stefank@2314 44 #include "utilities/globalDefinitions.hpp"
jprovino@4542 45 #include "utilities/macros.hpp"
duke@435 46
drchase@6680 47 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
drchase@6680 48
duke@435 49 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
duke@435 50 HeapWord* top_obj) {
duke@435 51 if (top_obj != NULL) {
duke@435 52 if (_sp->block_is_obj(top_obj)) {
duke@435 53 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
duke@435 54 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
duke@435 55 // An arrayOop is starting on the dirty card - since we do exact
duke@435 56 // store checks for objArrays we are done.
duke@435 57 } else {
duke@435 58 // Otherwise, it is possible that the object starting on the dirty
duke@435 59 // card spans the entire card, and that the store happened on a
duke@435 60 // later card. Figure out where the object ends.
duke@435 61 // Use the block_size() method of the space over which
duke@435 62 // the iteration is being done. That space (e.g. CMS) may have
duke@435 63 // specific requirements on object sizes which will
duke@435 64 // be reflected in the block_size() method.
duke@435 65 top = top_obj + oop(top_obj)->size();
duke@435 66 }
duke@435 67 }
duke@435 68 } else {
duke@435 69 top = top_obj;
duke@435 70 }
duke@435 71 } else {
duke@435 72 assert(top == _sp->end(), "only case where top_obj == NULL");
duke@435 73 }
duke@435 74 return top;
duke@435 75 }
duke@435 76
duke@435 77 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
duke@435 78 HeapWord* bottom,
duke@435 79 HeapWord* top) {
duke@435 80 // 1. Blocks may or may not be objects.
duke@435 81 // 2. Even when a block_is_obj(), it may not entirely
duke@435 82 // occupy the block if the block quantum is larger than
duke@435 83 // the object size.
duke@435 84 // We can and should try to optimize by calling the non-MemRegion
duke@435 85 // version of oop_iterate() for all but the extremal objects
duke@435 86 // (for which we need to call the MemRegion version of
duke@435 87 // oop_iterate()) To be done post-beta XXX
duke@435 88 for (; bottom < top; bottom += _sp->block_size(bottom)) {
duke@435 89 // As in the case of contiguous space above, we'd like to
duke@435 90 // just use the value returned by oop_iterate to increment the
duke@435 91 // current pointer; unfortunately, that won't work in CMS because
duke@435 92 // we'd need an interface change (it seems) to have the space
duke@435 93 // "adjust the object size" (for instance pad it up to its
duke@435 94 // block alignment or minimum block size restrictions. XXX
duke@435 95 if (_sp->block_is_obj(bottom) &&
duke@435 96 !_sp->obj_allocated_since_save_marks(oop(bottom))) {
duke@435 97 oop(bottom)->oop_iterate(_cl, mr);
duke@435 98 }
duke@435 99 }
duke@435 100 }
duke@435 101
ysr@2889 102 // We get called with "mr" representing the dirty region
ysr@2889 103 // that we want to process. Because of imprecise marking,
ysr@2889 104 // we may need to extend the incoming "mr" to the right,
ysr@2889 105 // and scan more. However, because we may already have
ysr@2889 106 // scanned some of that extended region, we may need to
ysr@2889 107 // trim its right-end back some so we do not scan what
ysr@2889 108 // we (or another worker thread) may already have scanned
ysr@2889 109 // or planning to scan.
duke@435 110 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
duke@435 111
duke@435 112 // Some collectors need to do special things whenever their dirty
duke@435 113 // cards are processed. For instance, CMS must remember mutator updates
duke@435 114 // (i.e. dirty cards) so as to re-scan mutated objects.
duke@435 115 // Such work can be piggy-backed here on dirty card scanning, so as to make
duke@435 116 // it slightly more efficient than doing a complete non-detructive pre-scan
duke@435 117 // of the card table.
duke@435 118 MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
duke@435 119 if (pCl != NULL) {
duke@435 120 pCl->do_MemRegion(mr);
duke@435 121 }
duke@435 122
duke@435 123 HeapWord* bottom = mr.start();
duke@435 124 HeapWord* last = mr.last();
duke@435 125 HeapWord* top = mr.end();
duke@435 126 HeapWord* bottom_obj;
duke@435 127 HeapWord* top_obj;
duke@435 128
duke@435 129 assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
duke@435 130 _precision == CardTableModRefBS::Precise,
duke@435 131 "Only ones we deal with for now.");
duke@435 132
duke@435 133 assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
ysr@777 134 _cl->idempotent() || _last_bottom == NULL ||
duke@435 135 top <= _last_bottom,
duke@435 136 "Not decreasing");
duke@435 137 NOT_PRODUCT(_last_bottom = mr.start());
duke@435 138
duke@435 139 bottom_obj = _sp->block_start(bottom);
duke@435 140 top_obj = _sp->block_start(last);
duke@435 141
duke@435 142 assert(bottom_obj <= bottom, "just checking");
duke@435 143 assert(top_obj <= top, "just checking");
duke@435 144
duke@435 145 // Given what we think is the top of the memory region and
duke@435 146 // the start of the object at the top, get the actual
duke@435 147 // value of the top.
duke@435 148 top = get_actual_top(top, top_obj);
duke@435 149
duke@435 150 // If the previous call did some part of this region, don't redo.
duke@435 151 if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
duke@435 152 _min_done != NULL &&
duke@435 153 _min_done < top) {
duke@435 154 top = _min_done;
duke@435 155 }
duke@435 156
duke@435 157 // Top may have been reset, and in fact may be below bottom,
duke@435 158 // e.g. the dirty card region is entirely in a now free object
duke@435 159 // -- something that could happen with a concurrent sweeper.
duke@435 160 bottom = MIN2(bottom, top);
ysr@2889 161 MemRegion extended_mr = MemRegion(bottom, top);
duke@435 162 assert(bottom <= top &&
duke@435 163 (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
duke@435 164 _min_done == NULL ||
duke@435 165 top <= _min_done),
duke@435 166 "overlap!");
duke@435 167
duke@435 168 // Walk the region if it is not empty; otherwise there is nothing to do.
ysr@2889 169 if (!extended_mr.is_empty()) {
ysr@2889 170 walk_mem_region(extended_mr, bottom_obj, top);
duke@435 171 }
duke@435 172
ysr@777 173 // An idempotent closure might be applied in any order, so we don't
ysr@777 174 // record a _min_done for it.
ysr@777 175 if (!_cl->idempotent()) {
ysr@777 176 _min_done = bottom;
ysr@777 177 } else {
ysr@777 178 assert(_min_done == _last_explicit_min_done,
ysr@777 179 "Don't update _min_done for idempotent cl");
ysr@777 180 }
duke@435 181 }
duke@435 182
coleenp@4037 183 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
duke@435 184 CardTableModRefBS::PrecisionStyle precision,
duke@435 185 HeapWord* boundary) {
duke@435 186 return new DirtyCardToOopClosure(this, cl, precision, boundary);
duke@435 187 }
duke@435 188
duke@435 189 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
duke@435 190 HeapWord* top_obj) {
duke@435 191 if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
duke@435 192 if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
duke@435 193 if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
duke@435 194 // An arrayOop is starting on the dirty card - since we do exact
duke@435 195 // store checks for objArrays we are done.
duke@435 196 } else {
duke@435 197 // Otherwise, it is possible that the object starting on the dirty
duke@435 198 // card spans the entire card, and that the store happened on a
duke@435 199 // later card. Figure out where the object ends.
duke@435 200 assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
duke@435 201 "Block size and object size mismatch");
duke@435 202 top = top_obj + oop(top_obj)->size();
duke@435 203 }
duke@435 204 }
duke@435 205 } else {
duke@435 206 top = (_sp->toContiguousSpace())->top();
duke@435 207 }
duke@435 208 return top;
duke@435 209 }
duke@435 210
duke@435 211 void Filtering_DCTOC::walk_mem_region(MemRegion mr,
duke@435 212 HeapWord* bottom,
duke@435 213 HeapWord* top) {
duke@435 214 // Note that this assumption won't hold if we have a concurrent
duke@435 215 // collector in this space, which may have freed up objects after
duke@435 216 // they were dirtied and before the stop-the-world GC that is
duke@435 217 // examining cards here.
duke@435 218 assert(bottom < top, "ought to be at least one obj on a dirty card.");
duke@435 219
duke@435 220 if (_boundary != NULL) {
duke@435 221 // We have a boundary outside of which we don't want to look
duke@435 222 // at objects, so create a filtering closure around the
duke@435 223 // oop closure before walking the region.
duke@435 224 FilteringClosure filter(_boundary, _cl);
duke@435 225 walk_mem_region_with_cl(mr, bottom, top, &filter);
duke@435 226 } else {
duke@435 227 // No boundary, simply walk the heap with the oop closure.
duke@435 228 walk_mem_region_with_cl(mr, bottom, top, _cl);
duke@435 229 }
duke@435 230
duke@435 231 }
duke@435 232
duke@435 233 // We must replicate this so that the static type of "FilteringClosure"
duke@435 234 // (see above) is apparent at the oop_iterate calls.
duke@435 235 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
duke@435 236 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \
duke@435 237 HeapWord* bottom, \
duke@435 238 HeapWord* top, \
duke@435 239 ClosureType* cl) { \
duke@435 240 bottom += oop(bottom)->oop_iterate(cl, mr); \
duke@435 241 if (bottom < top) { \
duke@435 242 HeapWord* next_obj = bottom + oop(bottom)->size(); \
duke@435 243 while (next_obj < top) { \
duke@435 244 /* Bottom lies entirely below top, so we can call the */ \
duke@435 245 /* non-memRegion version of oop_iterate below. */ \
duke@435 246 oop(bottom)->oop_iterate(cl); \
duke@435 247 bottom = next_obj; \
duke@435 248 next_obj = bottom + oop(bottom)->size(); \
duke@435 249 } \
duke@435 250 /* Last object. */ \
duke@435 251 oop(bottom)->oop_iterate(cl, mr); \
duke@435 252 } \
duke@435 253 }
duke@435 254
duke@435 255 // (There are only two of these, rather than N, because the split is due
duke@435 256 // only to the introduction of the FilteringClosure, a local part of the
duke@435 257 // impl of this abstraction.)
coleenp@4037 258 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
duke@435 259 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
duke@435 260
duke@435 261 DirtyCardToOopClosure*
coleenp@4037 262 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
duke@435 263 CardTableModRefBS::PrecisionStyle precision,
duke@435 264 HeapWord* boundary) {
duke@435 265 return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
duke@435 266 }
duke@435 267
jmasa@698 268 void Space::initialize(MemRegion mr,
jmasa@698 269 bool clear_space,
jmasa@698 270 bool mangle_space) {
duke@435 271 HeapWord* bottom = mr.start();
duke@435 272 HeapWord* end = mr.end();
duke@435 273 assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
duke@435 274 "invalid space boundaries");
duke@435 275 set_bottom(bottom);
duke@435 276 set_end(end);
jmasa@698 277 if (clear_space) clear(mangle_space);
duke@435 278 }
duke@435 279
jmasa@698 280 void Space::clear(bool mangle_space) {
jmasa@698 281 if (ZapUnusedHeapArea && mangle_space) {
jmasa@698 282 mangle_unused_area();
jmasa@698 283 }
duke@435 284 }
duke@435 285
tonyp@791 286 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL),
tonyp@791 287 _concurrent_iteration_safe_limit(NULL) {
jmasa@698 288 _mangler = new GenSpaceMangler(this);
jmasa@698 289 }
jmasa@698 290
jmasa@698 291 ContiguousSpace::~ContiguousSpace() {
jmasa@698 292 delete _mangler;
jmasa@698 293 }
jmasa@698 294
jmasa@698 295 void ContiguousSpace::initialize(MemRegion mr,
jmasa@698 296 bool clear_space,
jmasa@698 297 bool mangle_space)
duke@435 298 {
jmasa@698 299 CompactibleSpace::initialize(mr, clear_space, mangle_space);
ysr@782 300 set_concurrent_iteration_safe_limit(top());
duke@435 301 }
duke@435 302
jmasa@698 303 void ContiguousSpace::clear(bool mangle_space) {
duke@435 304 set_top(bottom());
duke@435 305 set_saved_mark();
tonyp@791 306 CompactibleSpace::clear(mangle_space);
duke@435 307 }
duke@435 308
duke@435 309 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
duke@435 310 return p >= _top;
duke@435 311 }
duke@435 312
jmasa@698 313 void OffsetTableContigSpace::clear(bool mangle_space) {
jmasa@698 314 ContiguousSpace::clear(mangle_space);
duke@435 315 _offsets.initialize_threshold();
duke@435 316 }
duke@435 317
duke@435 318 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
duke@435 319 Space::set_bottom(new_bottom);
duke@435 320 _offsets.set_bottom(new_bottom);
duke@435 321 }
duke@435 322
duke@435 323 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
duke@435 324 // Space should not advertize an increase in size
duke@435 325 // until after the underlying offest table has been enlarged.
duke@435 326 _offsets.resize(pointer_delta(new_end, bottom()));
duke@435 327 Space::set_end(new_end);
duke@435 328 }
duke@435 329
jmasa@698 330 #ifndef PRODUCT
jmasa@698 331
jmasa@698 332 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
jmasa@698 333 mangler()->set_top_for_allocations(v);
jmasa@698 334 }
jmasa@698 335 void ContiguousSpace::set_top_for_allocations() {
jmasa@698 336 mangler()->set_top_for_allocations(top());
jmasa@698 337 }
jmasa@698 338 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
jmasa@698 339 mangler()->check_mangled_unused_area(limit);
duke@435 340 }
duke@435 341
jmasa@698 342 void ContiguousSpace::check_mangled_unused_area_complete() {
jmasa@698 343 mangler()->check_mangled_unused_area_complete();
duke@435 344 }
duke@435 345
jmasa@698 346 // Mangled only the unused space that has not previously
jmasa@698 347 // been mangled and that has not been allocated since being
jmasa@698 348 // mangled.
jmasa@698 349 void ContiguousSpace::mangle_unused_area() {
jmasa@698 350 mangler()->mangle_unused_area();
jmasa@698 351 }
jmasa@698 352 void ContiguousSpace::mangle_unused_area_complete() {
jmasa@698 353 mangler()->mangle_unused_area_complete();
jmasa@698 354 }
jmasa@698 355 void ContiguousSpace::mangle_region(MemRegion mr) {
jmasa@698 356 // Although this method uses SpaceMangler::mangle_region() which
jmasa@698 357 // is not specific to a space, the when the ContiguousSpace version
jmasa@698 358 // is called, it is always with regard to a space and this
jmasa@698 359 // bounds checking is appropriate.
jmasa@698 360 MemRegion space_mr(bottom(), end());
jmasa@698 361 assert(space_mr.contains(mr), "Mangling outside space");
jmasa@698 362 SpaceMangler::mangle_region(mr);
jmasa@698 363 }
jmasa@698 364 #endif // NOT_PRODUCT
jmasa@698 365
jmasa@698 366 void CompactibleSpace::initialize(MemRegion mr,
jmasa@698 367 bool clear_space,
jmasa@698 368 bool mangle_space) {
jmasa@698 369 Space::initialize(mr, clear_space, mangle_space);
tonyp@791 370 set_compaction_top(bottom());
tonyp@791 371 _next_compaction_space = NULL;
tonyp@791 372 }
tonyp@791 373
tonyp@791 374 void CompactibleSpace::clear(bool mangle_space) {
tonyp@791 375 Space::clear(mangle_space);
duke@435 376 _compaction_top = bottom();
duke@435 377 }
duke@435 378
duke@435 379 HeapWord* CompactibleSpace::forward(oop q, size_t size,
duke@435 380 CompactPoint* cp, HeapWord* compact_top) {
duke@435 381 // q is alive
duke@435 382 // First check if we should switch compaction space
duke@435 383 assert(this == cp->space, "'this' should be current compaction space.");
duke@435 384 size_t compaction_max_size = pointer_delta(end(), compact_top);
duke@435 385 while (size > compaction_max_size) {
duke@435 386 // switch to next compaction space
duke@435 387 cp->space->set_compaction_top(compact_top);
duke@435 388 cp->space = cp->space->next_compaction_space();
duke@435 389 if (cp->space == NULL) {
duke@435 390 cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
duke@435 391 assert(cp->gen != NULL, "compaction must succeed");
duke@435 392 cp->space = cp->gen->first_compaction_space();
duke@435 393 assert(cp->space != NULL, "generation must have a first compaction space");
duke@435 394 }
duke@435 395 compact_top = cp->space->bottom();
duke@435 396 cp->space->set_compaction_top(compact_top);
duke@435 397 cp->threshold = cp->space->initialize_threshold();
duke@435 398 compaction_max_size = pointer_delta(cp->space->end(), compact_top);
duke@435 399 }
duke@435 400
duke@435 401 // store the forwarding pointer into the mark word
duke@435 402 if ((HeapWord*)q != compact_top) {
duke@435 403 q->forward_to(oop(compact_top));
duke@435 404 assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
duke@435 405 } else {
duke@435 406 // if the object isn't moving we can just set the mark to the default
duke@435 407 // mark and handle it specially later on.
duke@435 408 q->init_mark();
duke@435 409 assert(q->forwardee() == NULL, "should be forwarded to NULL");
duke@435 410 }
duke@435 411
duke@435 412 compact_top += size;
duke@435 413
duke@435 414 // we need to update the offset table so that the beginnings of objects can be
duke@435 415 // found during scavenge. Note that we are updating the offset table based on
duke@435 416 // where the object will be once the compaction phase finishes.
duke@435 417 if (compact_top > cp->threshold)
duke@435 418 cp->threshold =
duke@435 419 cp->space->cross_threshold(compact_top - size, compact_top);
duke@435 420 return compact_top;
duke@435 421 }
duke@435 422
duke@435 423
duke@435 424 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
duke@435 425 HeapWord* q, size_t deadlength) {
duke@435 426 if (allowed_deadspace_words >= deadlength) {
duke@435 427 allowed_deadspace_words -= deadlength;
jcoomes@916 428 CollectedHeap::fill_with_object(q, deadlength);
jcoomes@916 429 oop(q)->set_mark(oop(q)->mark()->set_marked());
jcoomes@916 430 assert((int) deadlength == oop(q)->size(), "bad filler object size");
duke@435 431 // Recall that we required "q == compaction_top".
duke@435 432 return true;
duke@435 433 } else {
duke@435 434 allowed_deadspace_words = 0;
duke@435 435 return false;
duke@435 436 }
duke@435 437 }
duke@435 438
duke@435 439 #define block_is_always_obj(q) true
duke@435 440 #define obj_size(q) oop(q)->size()
duke@435 441 #define adjust_obj_size(s) s
duke@435 442
duke@435 443 void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
duke@435 444 SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
duke@435 445 }
duke@435 446
duke@435 447 // Faster object search.
duke@435 448 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
duke@435 449 SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
duke@435 450 }
duke@435 451
duke@435 452 void Space::adjust_pointers() {
duke@435 453 // adjust all the interior pointers to point at the new locations of objects
duke@435 454 // Used by MarkSweep::mark_sweep_phase3()
duke@435 455
duke@435 456 // First check to see if there is any work to be done.
duke@435 457 if (used() == 0) {
duke@435 458 return; // Nothing to do.
duke@435 459 }
duke@435 460
duke@435 461 // Otherwise...
duke@435 462 HeapWord* q = bottom();
duke@435 463 HeapWord* t = end();
duke@435 464
duke@435 465 debug_only(HeapWord* prev_q = NULL);
duke@435 466 while (q < t) {
duke@435 467 if (oop(q)->is_gc_marked()) {
duke@435 468 // q is alive
duke@435 469
duke@435 470 // point all the oops to the new location
duke@435 471 size_t size = oop(q)->adjust_pointers();
duke@435 472
duke@435 473 debug_only(prev_q = q);
duke@435 474
duke@435 475 q += size;
duke@435 476 } else {
duke@435 477 // q is not a live object. But we're not in a compactible space,
duke@435 478 // So we don't have live ranges.
duke@435 479 debug_only(prev_q = q);
duke@435 480 q += block_size(q);
duke@435 481 assert(q > prev_q, "we should be moving forward through memory");
duke@435 482 }
duke@435 483 }
duke@435 484 assert(q == t, "just checking");
duke@435 485 }
duke@435 486
duke@435 487 void CompactibleSpace::adjust_pointers() {
duke@435 488 // Check first is there is any work to do.
duke@435 489 if (used() == 0) {
duke@435 490 return; // Nothing to do.
duke@435 491 }
duke@435 492
duke@435 493 SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
duke@435 494 }
duke@435 495
duke@435 496 void CompactibleSpace::compact() {
duke@435 497 SCAN_AND_COMPACT(obj_size);
duke@435 498 }
duke@435 499
duke@435 500 void Space::print_short() const { print_short_on(tty); }
duke@435 501
duke@435 502 void Space::print_short_on(outputStream* st) const {
duke@435 503 st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
duke@435 504 (int) ((double) used() * 100 / capacity()));
duke@435 505 }
duke@435 506
duke@435 507 void Space::print() const { print_on(tty); }
duke@435 508
duke@435 509 void Space::print_on(outputStream* st) const {
duke@435 510 print_short_on(st);
duke@435 511 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
duke@435 512 bottom(), end());
duke@435 513 }
duke@435 514
duke@435 515 void ContiguousSpace::print_on(outputStream* st) const {
duke@435 516 print_short_on(st);
duke@435 517 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
duke@435 518 bottom(), top(), end());
duke@435 519 }
duke@435 520
duke@435 521 void OffsetTableContigSpace::print_on(outputStream* st) const {
duke@435 522 print_short_on(st);
duke@435 523 st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
duke@435 524 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
duke@435 525 bottom(), top(), _offsets.threshold(), end());
duke@435 526 }
duke@435 527
brutisso@3711 528 void ContiguousSpace::verify() const {
duke@435 529 HeapWord* p = bottom();
duke@435 530 HeapWord* t = top();
duke@435 531 HeapWord* prev_p = NULL;
duke@435 532 while (p < t) {
duke@435 533 oop(p)->verify();
duke@435 534 prev_p = p;
duke@435 535 p += oop(p)->size();
duke@435 536 }
duke@435 537 guarantee(p == top(), "end of last object must match end of space");
duke@435 538 if (top() != end()) {
ysr@777 539 guarantee(top() == block_start_const(end()-1) &&
ysr@777 540 top() == block_start_const(top()),
duke@435 541 "top should be start of unallocated block, if it exists");
duke@435 542 }
duke@435 543 }
duke@435 544
coleenp@4037 545 void Space::oop_iterate(ExtendedOopClosure* blk) {
duke@435 546 ObjectToOopClosure blk2(blk);
duke@435 547 object_iterate(&blk2);
duke@435 548 }
duke@435 549
duke@435 550 bool Space::obj_is_alive(const HeapWord* p) const {
duke@435 551 assert (block_is_obj(p), "The address should point to an object");
duke@435 552 return true;
duke@435 553 }
duke@435 554
jprovino@4542 555 #if INCLUDE_ALL_GCS
duke@435 556 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
duke@435 557 \
duke@435 558 void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
duke@435 559 HeapWord* obj_addr = mr.start(); \
duke@435 560 HeapWord* t = mr.end(); \
duke@435 561 while (obj_addr < t) { \
duke@435 562 assert(oop(obj_addr)->is_oop(), "Should be an oop"); \
duke@435 563 obj_addr += oop(obj_addr)->oop_iterate(blk); \
duke@435 564 } \
duke@435 565 }
duke@435 566
duke@435 567 ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
duke@435 568
duke@435 569 #undef ContigSpace_PAR_OOP_ITERATE_DEFN
jprovino@4542 570 #endif // INCLUDE_ALL_GCS
duke@435 571
coleenp@4037 572 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
duke@435 573 if (is_empty()) return;
duke@435 574 HeapWord* obj_addr = bottom();
duke@435 575 HeapWord* t = top();
duke@435 576 // Could call objects iterate, but this is easier.
duke@435 577 while (obj_addr < t) {
duke@435 578 obj_addr += oop(obj_addr)->oop_iterate(blk);
duke@435 579 }
duke@435 580 }
duke@435 581
duke@435 582 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
duke@435 583 if (is_empty()) return;
duke@435 584 WaterMark bm = bottom_mark();
duke@435 585 object_iterate_from(bm, blk);
duke@435 586 }
duke@435 587
jmasa@952 588 // For a continguous space object_iterate() and safe_object_iterate()
jmasa@952 589 // are the same.
jmasa@952 590 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
jmasa@952 591 object_iterate(blk);
jmasa@952 592 }
jmasa@952 593
duke@435 594 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
duke@435 595 assert(mark.space() == this, "Mark does not match space");
duke@435 596 HeapWord* p = mark.point();
duke@435 597 while (p < top()) {
duke@435 598 blk->do_object(oop(p));
duke@435 599 p += oop(p)->size();
duke@435 600 }
duke@435 601 }
duke@435 602
duke@435 603 HeapWord*
duke@435 604 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
duke@435 605 HeapWord * limit = concurrent_iteration_safe_limit();
duke@435 606 assert(limit <= top(), "sanity check");
duke@435 607 for (HeapWord* p = bottom(); p < limit;) {
duke@435 608 size_t size = blk->do_object_careful(oop(p));
duke@435 609 if (size == 0) {
duke@435 610 return p; // failed at p
duke@435 611 } else {
duke@435 612 p += size;
duke@435 613 }
duke@435 614 }
duke@435 615 return NULL; // all done
duke@435 616 }
duke@435 617
duke@435 618 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \
duke@435 619 \
duke@435 620 void ContiguousSpace:: \
duke@435 621 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \
duke@435 622 HeapWord* t; \
duke@435 623 HeapWord* p = saved_mark_word(); \
duke@435 624 assert(p != NULL, "expected saved mark"); \
duke@435 625 \
duke@435 626 const intx interval = PrefetchScanIntervalInBytes; \
duke@435 627 do { \
duke@435 628 t = top(); \
duke@435 629 while (p < t) { \
duke@435 630 Prefetch::write(p, interval); \
duke@435 631 debug_only(HeapWord* prev = p); \
duke@435 632 oop m = oop(p); \
duke@435 633 p += m->oop_iterate(blk); \
duke@435 634 } \
duke@435 635 } while (t < top()); \
duke@435 636 \
duke@435 637 set_saved_mark_word(p); \
duke@435 638 }
duke@435 639
duke@435 640 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
duke@435 641
duke@435 642 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN
duke@435 643
duke@435 644 // Very general, slow implementation.
ysr@777 645 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
johnc@4300 646 assert(MemRegion(bottom(), end()).contains(p),
johnc@4300 647 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
johnc@4300 648 p, bottom(), end()));
duke@435 649 if (p >= top()) {
duke@435 650 return top();
duke@435 651 } else {
duke@435 652 HeapWord* last = bottom();
duke@435 653 HeapWord* cur = last;
duke@435 654 while (cur <= p) {
duke@435 655 last = cur;
duke@435 656 cur += oop(cur)->size();
duke@435 657 }
johnc@4300 658 assert(oop(last)->is_oop(),
johnc@4300 659 err_msg(PTR_FORMAT " should be an object start", last));
duke@435 660 return last;
duke@435 661 }
duke@435 662 }
duke@435 663
duke@435 664 size_t ContiguousSpace::block_size(const HeapWord* p) const {
johnc@4300 665 assert(MemRegion(bottom(), end()).contains(p),
johnc@4300 666 err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
johnc@4300 667 p, bottom(), end()));
duke@435 668 HeapWord* current_top = top();
johnc@4300 669 assert(p <= current_top,
johnc@4300 670 err_msg("p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
johnc@4300 671 p, current_top));
johnc@4300 672 assert(p == current_top || oop(p)->is_oop(),
johnc@4300 673 err_msg("p (" PTR_FORMAT ") is not a block start - "
johnc@4300 674 "current_top: " PTR_FORMAT ", is_oop: %s",
johnc@4300 675 p, current_top, BOOL_TO_STR(oop(p)->is_oop())));
johnc@4300 676 if (p < current_top) {
duke@435 677 return oop(p)->size();
johnc@4300 678 } else {
duke@435 679 assert(p == current_top, "just checking");
duke@435 680 return pointer_delta(end(), (HeapWord*) p);
duke@435 681 }
duke@435 682 }
duke@435 683
duke@435 684 // This version requires locking.
duke@435 685 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
duke@435 686 HeapWord* const end_value) {
tonyp@2715 687 // In G1 there are places where a GC worker can allocates into a
tonyp@2715 688 // region using this serial allocation code without being prone to a
tonyp@2715 689 // race with other GC workers (we ensure that no other GC worker can
tonyp@2715 690 // access the same region at the same time). So the assert below is
tonyp@2715 691 // too strong in the case of G1.
duke@435 692 assert(Heap_lock->owned_by_self() ||
duke@435 693 (SafepointSynchronize::is_at_safepoint() &&
tonyp@2715 694 (Thread::current()->is_VM_thread() || UseG1GC)),
duke@435 695 "not locked");
duke@435 696 HeapWord* obj = top();
duke@435 697 if (pointer_delta(end_value, obj) >= size) {
duke@435 698 HeapWord* new_top = obj + size;
duke@435 699 set_top(new_top);
duke@435 700 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
duke@435 701 return obj;
duke@435 702 } else {
duke@435 703 return NULL;
duke@435 704 }
duke@435 705 }
duke@435 706
duke@435 707 // This version is lock-free.
duke@435 708 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
duke@435 709 HeapWord* const end_value) {
duke@435 710 do {
duke@435 711 HeapWord* obj = top();
duke@435 712 if (pointer_delta(end_value, obj) >= size) {
duke@435 713 HeapWord* new_top = obj + size;
duke@435 714 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
duke@435 715 // result can be one of two:
duke@435 716 // the old top value: the exchange succeeded
duke@435 717 // otherwise: the new value of the top is returned.
duke@435 718 if (result == obj) {
duke@435 719 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
duke@435 720 return obj;
duke@435 721 }
duke@435 722 } else {
duke@435 723 return NULL;
duke@435 724 }
duke@435 725 } while (true);
duke@435 726 }
duke@435 727
duke@435 728 // Requires locking.
duke@435 729 HeapWord* ContiguousSpace::allocate(size_t size) {
duke@435 730 return allocate_impl(size, end());
duke@435 731 }
duke@435 732
duke@435 733 // Lock-free.
duke@435 734 HeapWord* ContiguousSpace::par_allocate(size_t size) {
duke@435 735 return par_allocate_impl(size, end());
duke@435 736 }
duke@435 737
duke@435 738 void ContiguousSpace::allocate_temporary_filler(int factor) {
duke@435 739 // allocate temporary type array decreasing free size with factor 'factor'
duke@435 740 assert(factor >= 0, "just checking");
duke@435 741 size_t size = pointer_delta(end(), top());
duke@435 742
duke@435 743 // if space is full, return
duke@435 744 if (size == 0) return;
duke@435 745
duke@435 746 if (factor > 0) {
duke@435 747 size -= size/factor;
duke@435 748 }
duke@435 749 size = align_object_size(size);
duke@435 750
kvn@1926 751 const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
kvn@1926 752 if (size >= (size_t)align_object_size(array_header_size)) {
kvn@1926 753 size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
duke@435 754 // allocate uninitialized int array
duke@435 755 typeArrayOop t = (typeArrayOop) allocate(size);
duke@435 756 assert(t != NULL, "allocation should succeed");
duke@435 757 t->set_mark(markOopDesc::prototype());
duke@435 758 t->set_klass(Universe::intArrayKlassObj());
duke@435 759 t->set_length((int)length);
duke@435 760 } else {
kvn@1926 761 assert(size == CollectedHeap::min_fill_size(),
duke@435 762 "size for smallest fake object doesn't match");
duke@435 763 instanceOop obj = (instanceOop) allocate(size);
duke@435 764 obj->set_mark(markOopDesc::prototype());
coleenp@602 765 obj->set_klass_gap(0);
never@1577 766 obj->set_klass(SystemDictionary::Object_klass());
duke@435 767 }
duke@435 768 }
duke@435 769
jmasa@698 770 void EdenSpace::clear(bool mangle_space) {
jmasa@698 771 ContiguousSpace::clear(mangle_space);
duke@435 772 set_soft_end(end());
duke@435 773 }
duke@435 774
duke@435 775 // Requires locking.
duke@435 776 HeapWord* EdenSpace::allocate(size_t size) {
duke@435 777 return allocate_impl(size, soft_end());
duke@435 778 }
duke@435 779
duke@435 780 // Lock-free.
duke@435 781 HeapWord* EdenSpace::par_allocate(size_t size) {
duke@435 782 return par_allocate_impl(size, soft_end());
duke@435 783 }
duke@435 784
duke@435 785 HeapWord* ConcEdenSpace::par_allocate(size_t size)
duke@435 786 {
duke@435 787 do {
duke@435 788 // The invariant is top() should be read before end() because
duke@435 789 // top() can't be greater than end(), so if an update of _soft_end
duke@435 790 // occurs between 'end_val = end();' and 'top_val = top();' top()
duke@435 791 // also can grow up to the new end() and the condition
duke@435 792 // 'top_val > end_val' is true. To ensure the loading order
duke@435 793 // OrderAccess::loadload() is required after top() read.
duke@435 794 HeapWord* obj = top();
duke@435 795 OrderAccess::loadload();
duke@435 796 if (pointer_delta(*soft_end_addr(), obj) >= size) {
duke@435 797 HeapWord* new_top = obj + size;
duke@435 798 HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
duke@435 799 // result can be one of two:
duke@435 800 // the old top value: the exchange succeeded
duke@435 801 // otherwise: the new value of the top is returned.
duke@435 802 if (result == obj) {
duke@435 803 assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
duke@435 804 return obj;
duke@435 805 }
duke@435 806 } else {
duke@435 807 return NULL;
duke@435 808 }
duke@435 809 } while (true);
duke@435 810 }
duke@435 811
duke@435 812
duke@435 813 HeapWord* OffsetTableContigSpace::initialize_threshold() {
duke@435 814 return _offsets.initialize_threshold();
duke@435 815 }
duke@435 816
duke@435 817 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
duke@435 818 _offsets.alloc_block(start, end);
duke@435 819 return _offsets.threshold();
duke@435 820 }
duke@435 821
duke@435 822 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
duke@435 823 MemRegion mr) :
duke@435 824 _offsets(sharedOffsetArray, mr),
duke@435 825 _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
duke@435 826 {
duke@435 827 _offsets.set_contig_space(this);
jmasa@698 828 initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
duke@435 829 }
duke@435 830
duke@435 831 #define OBJ_SAMPLE_INTERVAL 0
duke@435 832 #define BLOCK_SAMPLE_INTERVAL 100
duke@435 833
brutisso@3711 834 void OffsetTableContigSpace::verify() const {
duke@435 835 HeapWord* p = bottom();
duke@435 836 HeapWord* prev_p = NULL;
duke@435 837 int objs = 0;
duke@435 838 int blocks = 0;
duke@435 839
duke@435 840 if (VerifyObjectStartArray) {
duke@435 841 _offsets.verify();
duke@435 842 }
duke@435 843
duke@435 844 while (p < top()) {
duke@435 845 size_t size = oop(p)->size();
duke@435 846 // For a sampling of objects in the space, find it using the
duke@435 847 // block offset table.
duke@435 848 if (blocks == BLOCK_SAMPLE_INTERVAL) {
ysr@777 849 guarantee(p == block_start_const(p + (size/2)),
ysr@777 850 "check offset computation");
duke@435 851 blocks = 0;
duke@435 852 } else {
duke@435 853 blocks++;
duke@435 854 }
duke@435 855
duke@435 856 if (objs == OBJ_SAMPLE_INTERVAL) {
duke@435 857 oop(p)->verify();
duke@435 858 objs = 0;
duke@435 859 } else {
duke@435 860 objs++;
duke@435 861 }
duke@435 862 prev_p = p;
duke@435 863 p += size;
duke@435 864 }
duke@435 865 guarantee(p == top(), "end of last object must match end of space");
duke@435 866 }
duke@435 867
duke@435 868
jcoomes@873 869 size_t TenuredSpace::allowed_dead_ratio() const {
duke@435 870 return MarkSweepDeadRatio;
duke@435 871 }

mercurial