src/share/vm/memory/space.cpp

Mon, 04 Aug 2014 10:48:10 -0700

author
jmasa
date
Mon, 04 Aug 2014 10:48:10 -0700
changeset 7031
ee019285a52c
parent 6990
1526a938e670
child 7535
7ae4e26cb1e0
permissions
-rw-r--r--

8031323: Optionally align objects copied to survivor spaces
Reviewed-by: brutisso, tschatzl

     1 /*
     2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/systemDictionary.hpp"
    27 #include "classfile/vmSymbols.hpp"
    28 #include "gc_implementation/shared/liveRange.hpp"
    29 #include "gc_implementation/shared/markSweep.hpp"
    30 #include "gc_implementation/shared/spaceDecorator.hpp"
    31 #include "gc_interface/collectedHeap.inline.hpp"
    32 #include "memory/blockOffsetTable.inline.hpp"
    33 #include "memory/defNewGeneration.hpp"
    34 #include "memory/genCollectedHeap.hpp"
    35 #include "memory/space.hpp"
    36 #include "memory/space.inline.hpp"
    37 #include "memory/universe.inline.hpp"
    38 #include "oops/oop.inline.hpp"
    39 #include "oops/oop.inline2.hpp"
    40 #include "runtime/java.hpp"
    41 #include "runtime/prefetch.inline.hpp"
    42 #include "runtime/orderAccess.inline.hpp"
    43 #include "runtime/safepoint.hpp"
    44 #include "utilities/copy.hpp"
    45 #include "utilities/globalDefinitions.hpp"
    46 #include "utilities/macros.hpp"
    48 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    50 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
    51                                                 HeapWord* top_obj) {
    52   if (top_obj != NULL) {
    53     if (_sp->block_is_obj(top_obj)) {
    54       if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
    55         if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
    56           // An arrayOop is starting on the dirty card - since we do exact
    57           // store checks for objArrays we are done.
    58         } else {
    59           // Otherwise, it is possible that the object starting on the dirty
    60           // card spans the entire card, and that the store happened on a
    61           // later card.  Figure out where the object ends.
    62           // Use the block_size() method of the space over which
    63           // the iteration is being done.  That space (e.g. CMS) may have
    64           // specific requirements on object sizes which will
    65           // be reflected in the block_size() method.
    66           top = top_obj + oop(top_obj)->size();
    67         }
    68       }
    69     } else {
    70       top = top_obj;
    71     }
    72   } else {
    73     assert(top == _sp->end(), "only case where top_obj == NULL");
    74   }
    75   return top;
    76 }
    78 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
    79                                             HeapWord* bottom,
    80                                             HeapWord* top) {
    81   // 1. Blocks may or may not be objects.
    82   // 2. Even when a block_is_obj(), it may not entirely
    83   //    occupy the block if the block quantum is larger than
    84   //    the object size.
    85   // We can and should try to optimize by calling the non-MemRegion
    86   // version of oop_iterate() for all but the extremal objects
    87   // (for which we need to call the MemRegion version of
    88   // oop_iterate()) To be done post-beta XXX
    89   for (; bottom < top; bottom += _sp->block_size(bottom)) {
    90     // As in the case of contiguous space above, we'd like to
    91     // just use the value returned by oop_iterate to increment the
    92     // current pointer; unfortunately, that won't work in CMS because
    93     // we'd need an interface change (it seems) to have the space
    94     // "adjust the object size" (for instance pad it up to its
    95     // block alignment or minimum block size restrictions. XXX
    96     if (_sp->block_is_obj(bottom) &&
    97         !_sp->obj_allocated_since_save_marks(oop(bottom))) {
    98       oop(bottom)->oop_iterate(_cl, mr);
    99     }
   100   }
   101 }
   103 // We get called with "mr" representing the dirty region
   104 // that we want to process. Because of imprecise marking,
   105 // we may need to extend the incoming "mr" to the right,
   106 // and scan more. However, because we may already have
   107 // scanned some of that extended region, we may need to
   108 // trim its right-end back some so we do not scan what
   109 // we (or another worker thread) may already have scanned
   110 // or planning to scan.
   111 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
   113   // Some collectors need to do special things whenever their dirty
   114   // cards are processed. For instance, CMS must remember mutator updates
   115   // (i.e. dirty cards) so as to re-scan mutated objects.
   116   // Such work can be piggy-backed here on dirty card scanning, so as to make
   117   // it slightly more efficient than doing a complete non-detructive pre-scan
   118   // of the card table.
   119   MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
   120   if (pCl != NULL) {
   121     pCl->do_MemRegion(mr);
   122   }
   124   HeapWord* bottom = mr.start();
   125   HeapWord* last = mr.last();
   126   HeapWord* top = mr.end();
   127   HeapWord* bottom_obj;
   128   HeapWord* top_obj;
   130   assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
   131          _precision == CardTableModRefBS::Precise,
   132          "Only ones we deal with for now.");
   134   assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
   135          _cl->idempotent() || _last_bottom == NULL ||
   136          top <= _last_bottom,
   137          "Not decreasing");
   138   NOT_PRODUCT(_last_bottom = mr.start());
   140   bottom_obj = _sp->block_start(bottom);
   141   top_obj    = _sp->block_start(last);
   143   assert(bottom_obj <= bottom, "just checking");
   144   assert(top_obj    <= top,    "just checking");
   146   // Given what we think is the top of the memory region and
   147   // the start of the object at the top, get the actual
   148   // value of the top.
   149   top = get_actual_top(top, top_obj);
   151   // If the previous call did some part of this region, don't redo.
   152   if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
   153       _min_done != NULL &&
   154       _min_done < top) {
   155     top = _min_done;
   156   }
   158   // Top may have been reset, and in fact may be below bottom,
   159   // e.g. the dirty card region is entirely in a now free object
   160   // -- something that could happen with a concurrent sweeper.
   161   bottom = MIN2(bottom, top);
   162   MemRegion extended_mr = MemRegion(bottom, top);
   163   assert(bottom <= top &&
   164          (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
   165           _min_done == NULL ||
   166           top <= _min_done),
   167          "overlap!");
   169   // Walk the region if it is not empty; otherwise there is nothing to do.
   170   if (!extended_mr.is_empty()) {
   171     walk_mem_region(extended_mr, bottom_obj, top);
   172   }
   174   // An idempotent closure might be applied in any order, so we don't
   175   // record a _min_done for it.
   176   if (!_cl->idempotent()) {
   177     _min_done = bottom;
   178   } else {
   179     assert(_min_done == _last_explicit_min_done,
   180            "Don't update _min_done for idempotent cl");
   181   }
   182 }
   184 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
   185                                           CardTableModRefBS::PrecisionStyle precision,
   186                                           HeapWord* boundary) {
   187   return new DirtyCardToOopClosure(this, cl, precision, boundary);
   188 }
   190 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
   191                                                HeapWord* top_obj) {
   192   if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
   193     if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
   194       if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
   195         // An arrayOop is starting on the dirty card - since we do exact
   196         // store checks for objArrays we are done.
   197       } else {
   198         // Otherwise, it is possible that the object starting on the dirty
   199         // card spans the entire card, and that the store happened on a
   200         // later card.  Figure out where the object ends.
   201         assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
   202           "Block size and object size mismatch");
   203         top = top_obj + oop(top_obj)->size();
   204       }
   205     }
   206   } else {
   207     top = (_sp->toContiguousSpace())->top();
   208   }
   209   return top;
   210 }
   212 void Filtering_DCTOC::walk_mem_region(MemRegion mr,
   213                                       HeapWord* bottom,
   214                                       HeapWord* top) {
   215   // Note that this assumption won't hold if we have a concurrent
   216   // collector in this space, which may have freed up objects after
   217   // they were dirtied and before the stop-the-world GC that is
   218   // examining cards here.
   219   assert(bottom < top, "ought to be at least one obj on a dirty card.");
   221   if (_boundary != NULL) {
   222     // We have a boundary outside of which we don't want to look
   223     // at objects, so create a filtering closure around the
   224     // oop closure before walking the region.
   225     FilteringClosure filter(_boundary, _cl);
   226     walk_mem_region_with_cl(mr, bottom, top, &filter);
   227   } else {
   228     // No boundary, simply walk the heap with the oop closure.
   229     walk_mem_region_with_cl(mr, bottom, top, _cl);
   230   }
   232 }
   234 // We must replicate this so that the static type of "FilteringClosure"
   235 // (see above) is apparent at the oop_iterate calls.
   236 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
   237 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,        \
   238                                                    HeapWord* bottom,    \
   239                                                    HeapWord* top,       \
   240                                                    ClosureType* cl) {   \
   241   bottom += oop(bottom)->oop_iterate(cl, mr);                           \
   242   if (bottom < top) {                                                   \
   243     HeapWord* next_obj = bottom + oop(bottom)->size();                  \
   244     while (next_obj < top) {                                            \
   245       /* Bottom lies entirely below top, so we can call the */          \
   246       /* non-memRegion version of oop_iterate below. */                 \
   247       oop(bottom)->oop_iterate(cl);                                     \
   248       bottom = next_obj;                                                \
   249       next_obj = bottom + oop(bottom)->size();                          \
   250     }                                                                   \
   251     /* Last object. */                                                  \
   252     oop(bottom)->oop_iterate(cl, mr);                                   \
   253   }                                                                     \
   254 }
   256 // (There are only two of these, rather than N, because the split is due
   257 // only to the introduction of the FilteringClosure, a local part of the
   258 // impl of this abstraction.)
   259 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
   260 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
   262 DirtyCardToOopClosure*
   263 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
   264                              CardTableModRefBS::PrecisionStyle precision,
   265                              HeapWord* boundary) {
   266   return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
   267 }
   269 void Space::initialize(MemRegion mr,
   270                        bool clear_space,
   271                        bool mangle_space) {
   272   HeapWord* bottom = mr.start();
   273   HeapWord* end    = mr.end();
   274   assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
   275          "invalid space boundaries");
   276   set_bottom(bottom);
   277   set_end(end);
   278   if (clear_space) clear(mangle_space);
   279 }
   281 void Space::clear(bool mangle_space) {
   282   if (ZapUnusedHeapArea && mangle_space) {
   283     mangle_unused_area();
   284   }
   285 }
   287 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL),
   288     _concurrent_iteration_safe_limit(NULL) {
   289   _mangler = new GenSpaceMangler(this);
   290 }
   292 ContiguousSpace::~ContiguousSpace() {
   293   delete _mangler;
   294 }
   296 void ContiguousSpace::initialize(MemRegion mr,
   297                                  bool clear_space,
   298                                  bool mangle_space)
   299 {
   300   CompactibleSpace::initialize(mr, clear_space, mangle_space);
   301   set_concurrent_iteration_safe_limit(top());
   302 }
   304 void ContiguousSpace::clear(bool mangle_space) {
   305   set_top(bottom());
   306   set_saved_mark();
   307   CompactibleSpace::clear(mangle_space);
   308 }
   310 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
   311   return p >= _top;
   312 }
   314 void OffsetTableContigSpace::clear(bool mangle_space) {
   315   ContiguousSpace::clear(mangle_space);
   316   _offsets.initialize_threshold();
   317 }
   319 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
   320   Space::set_bottom(new_bottom);
   321   _offsets.set_bottom(new_bottom);
   322 }
   324 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
   325   // Space should not advertize an increase in size
   326   // until after the underlying offest table has been enlarged.
   327   _offsets.resize(pointer_delta(new_end, bottom()));
   328   Space::set_end(new_end);
   329 }
   331 #ifndef PRODUCT
   333 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
   334   mangler()->set_top_for_allocations(v);
   335 }
   336 void ContiguousSpace::set_top_for_allocations() {
   337   mangler()->set_top_for_allocations(top());
   338 }
   339 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
   340   mangler()->check_mangled_unused_area(limit);
   341 }
   343 void ContiguousSpace::check_mangled_unused_area_complete() {
   344   mangler()->check_mangled_unused_area_complete();
   345 }
   347 // Mangled only the unused space that has not previously
   348 // been mangled and that has not been allocated since being
   349 // mangled.
   350 void ContiguousSpace::mangle_unused_area() {
   351   mangler()->mangle_unused_area();
   352 }
   353 void ContiguousSpace::mangle_unused_area_complete() {
   354   mangler()->mangle_unused_area_complete();
   355 }
   356 void ContiguousSpace::mangle_region(MemRegion mr) {
   357   // Although this method uses SpaceMangler::mangle_region() which
   358   // is not specific to a space, the when the ContiguousSpace version
   359   // is called, it is always with regard to a space and this
   360   // bounds checking is appropriate.
   361   MemRegion space_mr(bottom(), end());
   362   assert(space_mr.contains(mr), "Mangling outside space");
   363   SpaceMangler::mangle_region(mr);
   364 }
   365 #endif  // NOT_PRODUCT
   367 void CompactibleSpace::initialize(MemRegion mr,
   368                                   bool clear_space,
   369                                   bool mangle_space) {
   370   Space::initialize(mr, clear_space, mangle_space);
   371   set_compaction_top(bottom());
   372   _next_compaction_space = NULL;
   373 }
   375 void CompactibleSpace::clear(bool mangle_space) {
   376   Space::clear(mangle_space);
   377   _compaction_top = bottom();
   378 }
   380 HeapWord* CompactibleSpace::forward(oop q, size_t size,
   381                                     CompactPoint* cp, HeapWord* compact_top) {
   382   // q is alive
   383   // First check if we should switch compaction space
   384   assert(this == cp->space, "'this' should be current compaction space.");
   385   size_t compaction_max_size = pointer_delta(end(), compact_top);
   386   while (size > compaction_max_size) {
   387     // switch to next compaction space
   388     cp->space->set_compaction_top(compact_top);
   389     cp->space = cp->space->next_compaction_space();
   390     if (cp->space == NULL) {
   391       cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
   392       assert(cp->gen != NULL, "compaction must succeed");
   393       cp->space = cp->gen->first_compaction_space();
   394       assert(cp->space != NULL, "generation must have a first compaction space");
   395     }
   396     compact_top = cp->space->bottom();
   397     cp->space->set_compaction_top(compact_top);
   398     cp->threshold = cp->space->initialize_threshold();
   399     compaction_max_size = pointer_delta(cp->space->end(), compact_top);
   400   }
   402   // store the forwarding pointer into the mark word
   403   if ((HeapWord*)q != compact_top) {
   404     q->forward_to(oop(compact_top));
   405     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   406   } else {
   407     // if the object isn't moving we can just set the mark to the default
   408     // mark and handle it specially later on.
   409     q->init_mark();
   410     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   411   }
   413   compact_top += size;
   415   // we need to update the offset table so that the beginnings of objects can be
   416   // found during scavenge.  Note that we are updating the offset table based on
   417   // where the object will be once the compaction phase finishes.
   418   if (compact_top > cp->threshold)
   419     cp->threshold =
   420       cp->space->cross_threshold(compact_top - size, compact_top);
   421   return compact_top;
   422 }
   425 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
   426                                         HeapWord* q, size_t deadlength) {
   427   if (allowed_deadspace_words >= deadlength) {
   428     allowed_deadspace_words -= deadlength;
   429     CollectedHeap::fill_with_object(q, deadlength);
   430     oop(q)->set_mark(oop(q)->mark()->set_marked());
   431     assert((int) deadlength == oop(q)->size(), "bad filler object size");
   432     // Recall that we required "q == compaction_top".
   433     return true;
   434   } else {
   435     allowed_deadspace_words = 0;
   436     return false;
   437   }
   438 }
   440 #define block_is_always_obj(q) true
   441 #define obj_size(q) oop(q)->size()
   442 #define adjust_obj_size(s) s
   444 void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
   445   SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
   446 }
   448 // Faster object search.
   449 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
   450   SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
   451 }
   453 void Space::adjust_pointers() {
   454   // adjust all the interior pointers to point at the new locations of objects
   455   // Used by MarkSweep::mark_sweep_phase3()
   457   // First check to see if there is any work to be done.
   458   if (used() == 0) {
   459     return;  // Nothing to do.
   460   }
   462   // Otherwise...
   463   HeapWord* q = bottom();
   464   HeapWord* t = end();
   466   debug_only(HeapWord* prev_q = NULL);
   467   while (q < t) {
   468     if (oop(q)->is_gc_marked()) {
   469       // q is alive
   471       // point all the oops to the new location
   472       size_t size = oop(q)->adjust_pointers();
   474       debug_only(prev_q = q);
   476       q += size;
   477     } else {
   478       // q is not a live object.  But we're not in a compactible space,
   479       // So we don't have live ranges.
   480       debug_only(prev_q = q);
   481       q += block_size(q);
   482       assert(q > prev_q, "we should be moving forward through memory");
   483     }
   484   }
   485   assert(q == t, "just checking");
   486 }
   488 void CompactibleSpace::adjust_pointers() {
   489   // Check first is there is any work to do.
   490   if (used() == 0) {
   491     return;   // Nothing to do.
   492   }
   494   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
   495 }
   497 void CompactibleSpace::compact() {
   498   SCAN_AND_COMPACT(obj_size);
   499 }
   501 void Space::print_short() const { print_short_on(tty); }
   503 void Space::print_short_on(outputStream* st) const {
   504   st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
   505               (int) ((double) used() * 100 / capacity()));
   506 }
   508 void Space::print() const { print_on(tty); }
   510 void Space::print_on(outputStream* st) const {
   511   print_short_on(st);
   512   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
   513                 bottom(), end());
   514 }
   516 void ContiguousSpace::print_on(outputStream* st) const {
   517   print_short_on(st);
   518   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
   519                 bottom(), top(), end());
   520 }
   522 void OffsetTableContigSpace::print_on(outputStream* st) const {
   523   print_short_on(st);
   524   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
   525                 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
   526               bottom(), top(), _offsets.threshold(), end());
   527 }
   529 void ContiguousSpace::verify() const {
   530   HeapWord* p = bottom();
   531   HeapWord* t = top();
   532   HeapWord* prev_p = NULL;
   533   while (p < t) {
   534     oop(p)->verify();
   535     prev_p = p;
   536     p += oop(p)->size();
   537   }
   538   guarantee(p == top(), "end of last object must match end of space");
   539   if (top() != end()) {
   540     guarantee(top() == block_start_const(end()-1) &&
   541               top() == block_start_const(top()),
   542               "top should be start of unallocated block, if it exists");
   543   }
   544 }
   546 void Space::oop_iterate(ExtendedOopClosure* blk) {
   547   ObjectToOopClosure blk2(blk);
   548   object_iterate(&blk2);
   549 }
   551 bool Space::obj_is_alive(const HeapWord* p) const {
   552   assert (block_is_obj(p), "The address should point to an object");
   553   return true;
   554 }
   556 #if INCLUDE_ALL_GCS
   557 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)         \
   558                                                                             \
   559   void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
   560     HeapWord* obj_addr = mr.start();                                        \
   561     HeapWord* t = mr.end();                                                 \
   562     while (obj_addr < t) {                                                  \
   563       assert(oop(obj_addr)->is_oop(), "Should be an oop");                  \
   564       obj_addr += oop(obj_addr)->oop_iterate(blk);                          \
   565     }                                                                       \
   566   }
   568   ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
   570 #undef ContigSpace_PAR_OOP_ITERATE_DEFN
   571 #endif // INCLUDE_ALL_GCS
   573 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
   574   if (is_empty()) return;
   575   HeapWord* obj_addr = bottom();
   576   HeapWord* t = top();
   577   // Could call objects iterate, but this is easier.
   578   while (obj_addr < t) {
   579     obj_addr += oop(obj_addr)->oop_iterate(blk);
   580   }
   581 }
   583 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
   584   if (is_empty()) return;
   585   WaterMark bm = bottom_mark();
   586   object_iterate_from(bm, blk);
   587 }
   589 // For a continguous space object_iterate() and safe_object_iterate()
   590 // are the same.
   591 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
   592   object_iterate(blk);
   593 }
   595 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
   596   assert(mark.space() == this, "Mark does not match space");
   597   HeapWord* p = mark.point();
   598   while (p < top()) {
   599     blk->do_object(oop(p));
   600     p += oop(p)->size();
   601   }
   602 }
   604 HeapWord*
   605 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
   606   HeapWord * limit = concurrent_iteration_safe_limit();
   607   assert(limit <= top(), "sanity check");
   608   for (HeapWord* p = bottom(); p < limit;) {
   609     size_t size = blk->do_object_careful(oop(p));
   610     if (size == 0) {
   611       return p;  // failed at p
   612     } else {
   613       p += size;
   614     }
   615   }
   616   return NULL; // all done
   617 }
   619 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)  \
   620                                                                           \
   621 void ContiguousSpace::                                                    \
   622 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {            \
   623   HeapWord* t;                                                            \
   624   HeapWord* p = saved_mark_word();                                        \
   625   assert(p != NULL, "expected saved mark");                               \
   626                                                                           \
   627   const intx interval = PrefetchScanIntervalInBytes;                      \
   628   do {                                                                    \
   629     t = top();                                                            \
   630     while (p < t) {                                                       \
   631       Prefetch::write(p, interval);                                       \
   632       debug_only(HeapWord* prev = p);                                     \
   633       oop m = oop(p);                                                     \
   634       p += m->oop_iterate(blk);                                           \
   635     }                                                                     \
   636   } while (t < top());                                                    \
   637                                                                           \
   638   set_saved_mark_word(p);                                                 \
   639 }
   641 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
   643 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN
   645 // Very general, slow implementation.
   646 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
   647   assert(MemRegion(bottom(), end()).contains(p),
   648          err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
   649                   p, bottom(), end()));
   650   if (p >= top()) {
   651     return top();
   652   } else {
   653     HeapWord* last = bottom();
   654     HeapWord* cur = last;
   655     while (cur <= p) {
   656       last = cur;
   657       cur += oop(cur)->size();
   658     }
   659     assert(oop(last)->is_oop(),
   660            err_msg(PTR_FORMAT " should be an object start", last));
   661     return last;
   662   }
   663 }
   665 size_t ContiguousSpace::block_size(const HeapWord* p) const {
   666   assert(MemRegion(bottom(), end()).contains(p),
   667          err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
   668                   p, bottom(), end()));
   669   HeapWord* current_top = top();
   670   assert(p <= current_top,
   671          err_msg("p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
   672                   p, current_top));
   673   assert(p == current_top || oop(p)->is_oop(),
   674          err_msg("p (" PTR_FORMAT ") is not a block start - "
   675                  "current_top: " PTR_FORMAT ", is_oop: %s",
   676                  p, current_top, BOOL_TO_STR(oop(p)->is_oop())));
   677   if (p < current_top) {
   678     return oop(p)->size();
   679   } else {
   680     assert(p == current_top, "just checking");
   681     return pointer_delta(end(), (HeapWord*) p);
   682   }
   683 }
   685 // This version requires locking.
   686 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
   687                                                 HeapWord* const end_value) {
   688   assert(Heap_lock->owned_by_self() ||
   689          (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()),
   690          "not locked");
   691   HeapWord* obj = top();
   692   if (pointer_delta(end_value, obj) >= size) {
   693     HeapWord* new_top = obj + size;
   694     set_top(new_top);
   695     assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
   696     return obj;
   697   } else {
   698     return NULL;
   699   }
   700 }
   702 // This version is lock-free.
   703 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
   704                                                     HeapWord* const end_value) {
   705   do {
   706     HeapWord* obj = top();
   707     if (pointer_delta(end_value, obj) >= size) {
   708       HeapWord* new_top = obj + size;
   709       HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
   710       // result can be one of two:
   711       //  the old top value: the exchange succeeded
   712       //  otherwise: the new value of the top is returned.
   713       if (result == obj) {
   714         assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
   715         return obj;
   716       }
   717     } else {
   718       return NULL;
   719     }
   720   } while (true);
   721 }
   723 HeapWord* ContiguousSpace::allocate_aligned(size_t size) {
   724   assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked");
   725   HeapWord* end_value = end();
   727   HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes);
   728   if (obj == NULL) {
   729     return NULL;
   730   }
   732   if (pointer_delta(end_value, obj) >= size) {
   733     HeapWord* new_top = obj + size;
   734     set_top(new_top);
   735     assert(is_ptr_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top),
   736       "checking alignment");
   737     return obj;
   738   } else {
   739     set_top(obj);
   740     return NULL;
   741   }
   742 }
   744 // Requires locking.
   745 HeapWord* ContiguousSpace::allocate(size_t size) {
   746   return allocate_impl(size, end());
   747 }
   749 // Lock-free.
   750 HeapWord* ContiguousSpace::par_allocate(size_t size) {
   751   return par_allocate_impl(size, end());
   752 }
   754 void ContiguousSpace::allocate_temporary_filler(int factor) {
   755   // allocate temporary type array decreasing free size with factor 'factor'
   756   assert(factor >= 0, "just checking");
   757   size_t size = pointer_delta(end(), top());
   759   // if space is full, return
   760   if (size == 0) return;
   762   if (factor > 0) {
   763     size -= size/factor;
   764   }
   765   size = align_object_size(size);
   767   const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
   768   if (size >= (size_t)align_object_size(array_header_size)) {
   769     size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
   770     // allocate uninitialized int array
   771     typeArrayOop t = (typeArrayOop) allocate(size);
   772     assert(t != NULL, "allocation should succeed");
   773     t->set_mark(markOopDesc::prototype());
   774     t->set_klass(Universe::intArrayKlassObj());
   775     t->set_length((int)length);
   776   } else {
   777     assert(size == CollectedHeap::min_fill_size(),
   778            "size for smallest fake object doesn't match");
   779     instanceOop obj = (instanceOop) allocate(size);
   780     obj->set_mark(markOopDesc::prototype());
   781     obj->set_klass_gap(0);
   782     obj->set_klass(SystemDictionary::Object_klass());
   783   }
   784 }
   786 void EdenSpace::clear(bool mangle_space) {
   787   ContiguousSpace::clear(mangle_space);
   788   set_soft_end(end());
   789 }
   791 // Requires locking.
   792 HeapWord* EdenSpace::allocate(size_t size) {
   793   return allocate_impl(size, soft_end());
   794 }
   796 // Lock-free.
   797 HeapWord* EdenSpace::par_allocate(size_t size) {
   798   return par_allocate_impl(size, soft_end());
   799 }
   801 HeapWord* ConcEdenSpace::par_allocate(size_t size)
   802 {
   803   do {
   804     // The invariant is top() should be read before end() because
   805     // top() can't be greater than end(), so if an update of _soft_end
   806     // occurs between 'end_val = end();' and 'top_val = top();' top()
   807     // also can grow up to the new end() and the condition
   808     // 'top_val > end_val' is true. To ensure the loading order
   809     // OrderAccess::loadload() is required after top() read.
   810     HeapWord* obj = top();
   811     OrderAccess::loadload();
   812     if (pointer_delta(*soft_end_addr(), obj) >= size) {
   813       HeapWord* new_top = obj + size;
   814       HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
   815       // result can be one of two:
   816       //  the old top value: the exchange succeeded
   817       //  otherwise: the new value of the top is returned.
   818       if (result == obj) {
   819         assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
   820         return obj;
   821       }
   822     } else {
   823       return NULL;
   824     }
   825   } while (true);
   826 }
   829 HeapWord* OffsetTableContigSpace::initialize_threshold() {
   830   return _offsets.initialize_threshold();
   831 }
   833 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
   834   _offsets.alloc_block(start, end);
   835   return _offsets.threshold();
   836 }
   838 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
   839                                                MemRegion mr) :
   840   _offsets(sharedOffsetArray, mr),
   841   _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
   842 {
   843   _offsets.set_contig_space(this);
   844   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
   845 }
   847 #define OBJ_SAMPLE_INTERVAL 0
   848 #define BLOCK_SAMPLE_INTERVAL 100
   850 void OffsetTableContigSpace::verify() const {
   851   HeapWord* p = bottom();
   852   HeapWord* prev_p = NULL;
   853   int objs = 0;
   854   int blocks = 0;
   856   if (VerifyObjectStartArray) {
   857     _offsets.verify();
   858   }
   860   while (p < top()) {
   861     size_t size = oop(p)->size();
   862     // For a sampling of objects in the space, find it using the
   863     // block offset table.
   864     if (blocks == BLOCK_SAMPLE_INTERVAL) {
   865       guarantee(p == block_start_const(p + (size/2)),
   866                 "check offset computation");
   867       blocks = 0;
   868     } else {
   869       blocks++;
   870     }
   872     if (objs == OBJ_SAMPLE_INTERVAL) {
   873       oop(p)->verify();
   874       objs = 0;
   875     } else {
   876       objs++;
   877     }
   878     prev_p = p;
   879     p += size;
   880   }
   881   guarantee(p == top(), "end of last object must match end of space");
   882 }
   885 size_t TenuredSpace::allowed_dead_ratio() const {
   886   return MarkSweepDeadRatio;
   887 }

mercurial