src/share/vm/memory/space.cpp

Wed, 27 Apr 2016 01:25:04 +0800

author
aoqi
date
Wed, 27 Apr 2016 01:25:04 +0800
changeset 0
f90c822e73f8
child 6876
710a3c8b516e
permissions
-rw-r--r--

Initial load
http://hg.openjdk.java.net/jdk8u/jdk8u/hotspot/
changeset: 6782:28b50d07f6f8
tag: jdk8u25-b17

     1 /*
     2  * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "classfile/systemDictionary.hpp"
    27 #include "classfile/vmSymbols.hpp"
    28 #include "gc_implementation/shared/liveRange.hpp"
    29 #include "gc_implementation/shared/markSweep.hpp"
    30 #include "gc_implementation/shared/spaceDecorator.hpp"
    31 #include "memory/blockOffsetTable.inline.hpp"
    32 #include "memory/defNewGeneration.hpp"
    33 #include "memory/genCollectedHeap.hpp"
    34 #include "memory/space.hpp"
    35 #include "memory/space.inline.hpp"
    36 #include "memory/universe.inline.hpp"
    37 #include "oops/oop.inline.hpp"
    38 #include "oops/oop.inline2.hpp"
    39 #include "runtime/java.hpp"
    40 #include "runtime/safepoint.hpp"
    41 #include "utilities/copy.hpp"
    42 #include "utilities/globalDefinitions.hpp"
    43 #include "utilities/macros.hpp"
    45 void SpaceMemRegionOopsIterClosure::do_oop(oop* p)       { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
    46 void SpaceMemRegionOopsIterClosure::do_oop(narrowOop* p) { SpaceMemRegionOopsIterClosure::do_oop_work(p); }
    48 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
    50 HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top,
    51                                                 HeapWord* top_obj) {
    52   if (top_obj != NULL) {
    53     if (_sp->block_is_obj(top_obj)) {
    54       if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
    55         if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
    56           // An arrayOop is starting on the dirty card - since we do exact
    57           // store checks for objArrays we are done.
    58         } else {
    59           // Otherwise, it is possible that the object starting on the dirty
    60           // card spans the entire card, and that the store happened on a
    61           // later card.  Figure out where the object ends.
    62           // Use the block_size() method of the space over which
    63           // the iteration is being done.  That space (e.g. CMS) may have
    64           // specific requirements on object sizes which will
    65           // be reflected in the block_size() method.
    66           top = top_obj + oop(top_obj)->size();
    67         }
    68       }
    69     } else {
    70       top = top_obj;
    71     }
    72   } else {
    73     assert(top == _sp->end(), "only case where top_obj == NULL");
    74   }
    75   return top;
    76 }
    78 void DirtyCardToOopClosure::walk_mem_region(MemRegion mr,
    79                                             HeapWord* bottom,
    80                                             HeapWord* top) {
    81   // 1. Blocks may or may not be objects.
    82   // 2. Even when a block_is_obj(), it may not entirely
    83   //    occupy the block if the block quantum is larger than
    84   //    the object size.
    85   // We can and should try to optimize by calling the non-MemRegion
    86   // version of oop_iterate() for all but the extremal objects
    87   // (for which we need to call the MemRegion version of
    88   // oop_iterate()) To be done post-beta XXX
    89   for (; bottom < top; bottom += _sp->block_size(bottom)) {
    90     // As in the case of contiguous space above, we'd like to
    91     // just use the value returned by oop_iterate to increment the
    92     // current pointer; unfortunately, that won't work in CMS because
    93     // we'd need an interface change (it seems) to have the space
    94     // "adjust the object size" (for instance pad it up to its
    95     // block alignment or minimum block size restrictions. XXX
    96     if (_sp->block_is_obj(bottom) &&
    97         !_sp->obj_allocated_since_save_marks(oop(bottom))) {
    98       oop(bottom)->oop_iterate(_cl, mr);
    99     }
   100   }
   101 }
   103 // We get called with "mr" representing the dirty region
   104 // that we want to process. Because of imprecise marking,
   105 // we may need to extend the incoming "mr" to the right,
   106 // and scan more. However, because we may already have
   107 // scanned some of that extended region, we may need to
   108 // trim its right-end back some so we do not scan what
   109 // we (or another worker thread) may already have scanned
   110 // or planning to scan.
   111 void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) {
   113   // Some collectors need to do special things whenever their dirty
   114   // cards are processed. For instance, CMS must remember mutator updates
   115   // (i.e. dirty cards) so as to re-scan mutated objects.
   116   // Such work can be piggy-backed here on dirty card scanning, so as to make
   117   // it slightly more efficient than doing a complete non-detructive pre-scan
   118   // of the card table.
   119   MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure();
   120   if (pCl != NULL) {
   121     pCl->do_MemRegion(mr);
   122   }
   124   HeapWord* bottom = mr.start();
   125   HeapWord* last = mr.last();
   126   HeapWord* top = mr.end();
   127   HeapWord* bottom_obj;
   128   HeapWord* top_obj;
   130   assert(_precision == CardTableModRefBS::ObjHeadPreciseArray ||
   131          _precision == CardTableModRefBS::Precise,
   132          "Only ones we deal with for now.");
   134   assert(_precision != CardTableModRefBS::ObjHeadPreciseArray ||
   135          _cl->idempotent() || _last_bottom == NULL ||
   136          top <= _last_bottom,
   137          "Not decreasing");
   138   NOT_PRODUCT(_last_bottom = mr.start());
   140   bottom_obj = _sp->block_start(bottom);
   141   top_obj    = _sp->block_start(last);
   143   assert(bottom_obj <= bottom, "just checking");
   144   assert(top_obj    <= top,    "just checking");
   146   // Given what we think is the top of the memory region and
   147   // the start of the object at the top, get the actual
   148   // value of the top.
   149   top = get_actual_top(top, top_obj);
   151   // If the previous call did some part of this region, don't redo.
   152   if (_precision == CardTableModRefBS::ObjHeadPreciseArray &&
   153       _min_done != NULL &&
   154       _min_done < top) {
   155     top = _min_done;
   156   }
   158   // Top may have been reset, and in fact may be below bottom,
   159   // e.g. the dirty card region is entirely in a now free object
   160   // -- something that could happen with a concurrent sweeper.
   161   bottom = MIN2(bottom, top);
   162   MemRegion extended_mr = MemRegion(bottom, top);
   163   assert(bottom <= top &&
   164          (_precision != CardTableModRefBS::ObjHeadPreciseArray ||
   165           _min_done == NULL ||
   166           top <= _min_done),
   167          "overlap!");
   169   // Walk the region if it is not empty; otherwise there is nothing to do.
   170   if (!extended_mr.is_empty()) {
   171     walk_mem_region(extended_mr, bottom_obj, top);
   172   }
   174   // An idempotent closure might be applied in any order, so we don't
   175   // record a _min_done for it.
   176   if (!_cl->idempotent()) {
   177     _min_done = bottom;
   178   } else {
   179     assert(_min_done == _last_explicit_min_done,
   180            "Don't update _min_done for idempotent cl");
   181   }
   182 }
   184 DirtyCardToOopClosure* Space::new_dcto_cl(ExtendedOopClosure* cl,
   185                                           CardTableModRefBS::PrecisionStyle precision,
   186                                           HeapWord* boundary) {
   187   return new DirtyCardToOopClosure(this, cl, precision, boundary);
   188 }
   190 HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top,
   191                                                HeapWord* top_obj) {
   192   if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) {
   193     if (_precision == CardTableModRefBS::ObjHeadPreciseArray) {
   194       if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) {
   195         // An arrayOop is starting on the dirty card - since we do exact
   196         // store checks for objArrays we are done.
   197       } else {
   198         // Otherwise, it is possible that the object starting on the dirty
   199         // card spans the entire card, and that the store happened on a
   200         // later card.  Figure out where the object ends.
   201         assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(),
   202           "Block size and object size mismatch");
   203         top = top_obj + oop(top_obj)->size();
   204       }
   205     }
   206   } else {
   207     top = (_sp->toContiguousSpace())->top();
   208   }
   209   return top;
   210 }
   212 void Filtering_DCTOC::walk_mem_region(MemRegion mr,
   213                                       HeapWord* bottom,
   214                                       HeapWord* top) {
   215   // Note that this assumption won't hold if we have a concurrent
   216   // collector in this space, which may have freed up objects after
   217   // they were dirtied and before the stop-the-world GC that is
   218   // examining cards here.
   219   assert(bottom < top, "ought to be at least one obj on a dirty card.");
   221   if (_boundary != NULL) {
   222     // We have a boundary outside of which we don't want to look
   223     // at objects, so create a filtering closure around the
   224     // oop closure before walking the region.
   225     FilteringClosure filter(_boundary, _cl);
   226     walk_mem_region_with_cl(mr, bottom, top, &filter);
   227   } else {
   228     // No boundary, simply walk the heap with the oop closure.
   229     walk_mem_region_with_cl(mr, bottom, top, _cl);
   230   }
   232 }
   234 // We must replicate this so that the static type of "FilteringClosure"
   235 // (see above) is apparent at the oop_iterate calls.
   236 #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \
   237 void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,        \
   238                                                    HeapWord* bottom,    \
   239                                                    HeapWord* top,       \
   240                                                    ClosureType* cl) {   \
   241   bottom += oop(bottom)->oop_iterate(cl, mr);                           \
   242   if (bottom < top) {                                                   \
   243     HeapWord* next_obj = bottom + oop(bottom)->size();                  \
   244     while (next_obj < top) {                                            \
   245       /* Bottom lies entirely below top, so we can call the */          \
   246       /* non-memRegion version of oop_iterate below. */                 \
   247       oop(bottom)->oop_iterate(cl);                                     \
   248       bottom = next_obj;                                                \
   249       next_obj = bottom + oop(bottom)->size();                          \
   250     }                                                                   \
   251     /* Last object. */                                                  \
   252     oop(bottom)->oop_iterate(cl, mr);                                   \
   253   }                                                                     \
   254 }
   256 // (There are only two of these, rather than N, because the split is due
   257 // only to the introduction of the FilteringClosure, a local part of the
   258 // impl of this abstraction.)
   259 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
   260 ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
   262 DirtyCardToOopClosure*
   263 ContiguousSpace::new_dcto_cl(ExtendedOopClosure* cl,
   264                              CardTableModRefBS::PrecisionStyle precision,
   265                              HeapWord* boundary) {
   266   return new ContiguousSpaceDCTOC(this, cl, precision, boundary);
   267 }
   269 void Space::initialize(MemRegion mr,
   270                        bool clear_space,
   271                        bool mangle_space) {
   272   HeapWord* bottom = mr.start();
   273   HeapWord* end    = mr.end();
   274   assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end),
   275          "invalid space boundaries");
   276   set_bottom(bottom);
   277   set_end(end);
   278   if (clear_space) clear(mangle_space);
   279 }
   281 void Space::clear(bool mangle_space) {
   282   if (ZapUnusedHeapArea && mangle_space) {
   283     mangle_unused_area();
   284   }
   285 }
   287 ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL),
   288     _concurrent_iteration_safe_limit(NULL) {
   289   _mangler = new GenSpaceMangler(this);
   290 }
   292 ContiguousSpace::~ContiguousSpace() {
   293   delete _mangler;
   294 }
   296 void ContiguousSpace::initialize(MemRegion mr,
   297                                  bool clear_space,
   298                                  bool mangle_space)
   299 {
   300   CompactibleSpace::initialize(mr, clear_space, mangle_space);
   301   set_concurrent_iteration_safe_limit(top());
   302 }
   304 void ContiguousSpace::clear(bool mangle_space) {
   305   set_top(bottom());
   306   set_saved_mark();
   307   CompactibleSpace::clear(mangle_space);
   308 }
   310 bool ContiguousSpace::is_in(const void* p) const {
   311   return _bottom <= p && p < _top;
   312 }
   314 bool ContiguousSpace::is_free_block(const HeapWord* p) const {
   315   return p >= _top;
   316 }
   318 void OffsetTableContigSpace::clear(bool mangle_space) {
   319   ContiguousSpace::clear(mangle_space);
   320   _offsets.initialize_threshold();
   321 }
   323 void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) {
   324   Space::set_bottom(new_bottom);
   325   _offsets.set_bottom(new_bottom);
   326 }
   328 void OffsetTableContigSpace::set_end(HeapWord* new_end) {
   329   // Space should not advertize an increase in size
   330   // until after the underlying offest table has been enlarged.
   331   _offsets.resize(pointer_delta(new_end, bottom()));
   332   Space::set_end(new_end);
   333 }
   335 #ifndef PRODUCT
   337 void ContiguousSpace::set_top_for_allocations(HeapWord* v) {
   338   mangler()->set_top_for_allocations(v);
   339 }
   340 void ContiguousSpace::set_top_for_allocations() {
   341   mangler()->set_top_for_allocations(top());
   342 }
   343 void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) {
   344   mangler()->check_mangled_unused_area(limit);
   345 }
   347 void ContiguousSpace::check_mangled_unused_area_complete() {
   348   mangler()->check_mangled_unused_area_complete();
   349 }
   351 // Mangled only the unused space that has not previously
   352 // been mangled and that has not been allocated since being
   353 // mangled.
   354 void ContiguousSpace::mangle_unused_area() {
   355   mangler()->mangle_unused_area();
   356 }
   357 void ContiguousSpace::mangle_unused_area_complete() {
   358   mangler()->mangle_unused_area_complete();
   359 }
   360 void ContiguousSpace::mangle_region(MemRegion mr) {
   361   // Although this method uses SpaceMangler::mangle_region() which
   362   // is not specific to a space, the when the ContiguousSpace version
   363   // is called, it is always with regard to a space and this
   364   // bounds checking is appropriate.
   365   MemRegion space_mr(bottom(), end());
   366   assert(space_mr.contains(mr), "Mangling outside space");
   367   SpaceMangler::mangle_region(mr);
   368 }
   369 #endif  // NOT_PRODUCT
   371 void CompactibleSpace::initialize(MemRegion mr,
   372                                   bool clear_space,
   373                                   bool mangle_space) {
   374   Space::initialize(mr, clear_space, mangle_space);
   375   set_compaction_top(bottom());
   376   _next_compaction_space = NULL;
   377 }
   379 void CompactibleSpace::clear(bool mangle_space) {
   380   Space::clear(mangle_space);
   381   _compaction_top = bottom();
   382 }
   384 HeapWord* CompactibleSpace::forward(oop q, size_t size,
   385                                     CompactPoint* cp, HeapWord* compact_top) {
   386   // q is alive
   387   // First check if we should switch compaction space
   388   assert(this == cp->space, "'this' should be current compaction space.");
   389   size_t compaction_max_size = pointer_delta(end(), compact_top);
   390   while (size > compaction_max_size) {
   391     // switch to next compaction space
   392     cp->space->set_compaction_top(compact_top);
   393     cp->space = cp->space->next_compaction_space();
   394     if (cp->space == NULL) {
   395       cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
   396       assert(cp->gen != NULL, "compaction must succeed");
   397       cp->space = cp->gen->first_compaction_space();
   398       assert(cp->space != NULL, "generation must have a first compaction space");
   399     }
   400     compact_top = cp->space->bottom();
   401     cp->space->set_compaction_top(compact_top);
   402     cp->threshold = cp->space->initialize_threshold();
   403     compaction_max_size = pointer_delta(cp->space->end(), compact_top);
   404   }
   406   // store the forwarding pointer into the mark word
   407   if ((HeapWord*)q != compact_top) {
   408     q->forward_to(oop(compact_top));
   409     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
   410   } else {
   411     // if the object isn't moving we can just set the mark to the default
   412     // mark and handle it specially later on.
   413     q->init_mark();
   414     assert(q->forwardee() == NULL, "should be forwarded to NULL");
   415   }
   417   compact_top += size;
   419   // we need to update the offset table so that the beginnings of objects can be
   420   // found during scavenge.  Note that we are updating the offset table based on
   421   // where the object will be once the compaction phase finishes.
   422   if (compact_top > cp->threshold)
   423     cp->threshold =
   424       cp->space->cross_threshold(compact_top - size, compact_top);
   425   return compact_top;
   426 }
   429 bool CompactibleSpace::insert_deadspace(size_t& allowed_deadspace_words,
   430                                         HeapWord* q, size_t deadlength) {
   431   if (allowed_deadspace_words >= deadlength) {
   432     allowed_deadspace_words -= deadlength;
   433     CollectedHeap::fill_with_object(q, deadlength);
   434     oop(q)->set_mark(oop(q)->mark()->set_marked());
   435     assert((int) deadlength == oop(q)->size(), "bad filler object size");
   436     // Recall that we required "q == compaction_top".
   437     return true;
   438   } else {
   439     allowed_deadspace_words = 0;
   440     return false;
   441   }
   442 }
   444 #define block_is_always_obj(q) true
   445 #define obj_size(q) oop(q)->size()
   446 #define adjust_obj_size(s) s
   448 void CompactibleSpace::prepare_for_compaction(CompactPoint* cp) {
   449   SCAN_AND_FORWARD(cp, end, block_is_obj, block_size);
   450 }
   452 // Faster object search.
   453 void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) {
   454   SCAN_AND_FORWARD(cp, top, block_is_always_obj, obj_size);
   455 }
   457 void Space::adjust_pointers() {
   458   // adjust all the interior pointers to point at the new locations of objects
   459   // Used by MarkSweep::mark_sweep_phase3()
   461   // First check to see if there is any work to be done.
   462   if (used() == 0) {
   463     return;  // Nothing to do.
   464   }
   466   // Otherwise...
   467   HeapWord* q = bottom();
   468   HeapWord* t = end();
   470   debug_only(HeapWord* prev_q = NULL);
   471   while (q < t) {
   472     if (oop(q)->is_gc_marked()) {
   473       // q is alive
   475       // point all the oops to the new location
   476       size_t size = oop(q)->adjust_pointers();
   478       debug_only(prev_q = q);
   480       q += size;
   481     } else {
   482       // q is not a live object.  But we're not in a compactible space,
   483       // So we don't have live ranges.
   484       debug_only(prev_q = q);
   485       q += block_size(q);
   486       assert(q > prev_q, "we should be moving forward through memory");
   487     }
   488   }
   489   assert(q == t, "just checking");
   490 }
   492 void CompactibleSpace::adjust_pointers() {
   493   // Check first is there is any work to do.
   494   if (used() == 0) {
   495     return;   // Nothing to do.
   496   }
   498   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
   499 }
   501 void CompactibleSpace::compact() {
   502   SCAN_AND_COMPACT(obj_size);
   503 }
   505 void Space::print_short() const { print_short_on(tty); }
   507 void Space::print_short_on(outputStream* st) const {
   508   st->print(" space " SIZE_FORMAT "K, %3d%% used", capacity() / K,
   509               (int) ((double) used() * 100 / capacity()));
   510 }
   512 void Space::print() const { print_on(tty); }
   514 void Space::print_on(outputStream* st) const {
   515   print_short_on(st);
   516   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")",
   517                 bottom(), end());
   518 }
   520 void ContiguousSpace::print_on(outputStream* st) const {
   521   print_short_on(st);
   522   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")",
   523                 bottom(), top(), end());
   524 }
   526 void OffsetTableContigSpace::print_on(outputStream* st) const {
   527   print_short_on(st);
   528   st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", "
   529                 INTPTR_FORMAT ", " INTPTR_FORMAT ")",
   530               bottom(), top(), _offsets.threshold(), end());
   531 }
   533 void ContiguousSpace::verify() const {
   534   HeapWord* p = bottom();
   535   HeapWord* t = top();
   536   HeapWord* prev_p = NULL;
   537   while (p < t) {
   538     oop(p)->verify();
   539     prev_p = p;
   540     p += oop(p)->size();
   541   }
   542   guarantee(p == top(), "end of last object must match end of space");
   543   if (top() != end()) {
   544     guarantee(top() == block_start_const(end()-1) &&
   545               top() == block_start_const(top()),
   546               "top should be start of unallocated block, if it exists");
   547   }
   548 }
   550 void Space::oop_iterate(ExtendedOopClosure* blk) {
   551   ObjectToOopClosure blk2(blk);
   552   object_iterate(&blk2);
   553 }
   555 HeapWord* Space::object_iterate_careful(ObjectClosureCareful* cl) {
   556   guarantee(false, "NYI");
   557   return bottom();
   558 }
   560 HeapWord* Space::object_iterate_careful_m(MemRegion mr,
   561                                           ObjectClosureCareful* cl) {
   562   guarantee(false, "NYI");
   563   return bottom();
   564 }
   567 void Space::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
   568   assert(!mr.is_empty(), "Should be non-empty");
   569   // We use MemRegion(bottom(), end()) rather than used_region() below
   570   // because the two are not necessarily equal for some kinds of
   571   // spaces, in particular, certain kinds of free list spaces.
   572   // We could use the more complicated but more precise:
   573   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
   574   // but the slight imprecision seems acceptable in the assertion check.
   575   assert(MemRegion(bottom(), end()).contains(mr),
   576          "Should be within used space");
   577   HeapWord* prev = cl->previous();   // max address from last time
   578   if (prev >= mr.end()) { // nothing to do
   579     return;
   580   }
   581   // This assert will not work when we go from cms space to perm
   582   // space, and use same closure. Easy fix deferred for later. XXX YSR
   583   // assert(prev == NULL || contains(prev), "Should be within space");
   585   bool last_was_obj_array = false;
   586   HeapWord *blk_start_addr, *region_start_addr;
   587   if (prev > mr.start()) {
   588     region_start_addr = prev;
   589     blk_start_addr    = prev;
   590     // The previous invocation may have pushed "prev" beyond the
   591     // last allocated block yet there may be still be blocks
   592     // in this region due to a particular coalescing policy.
   593     // Relax the assertion so that the case where the unallocated
   594     // block is maintained and "prev" is beyond the unallocated
   595     // block does not cause the assertion to fire.
   596     assert((BlockOffsetArrayUseUnallocatedBlock &&
   597             (!is_in(prev))) ||
   598            (blk_start_addr == block_start(region_start_addr)), "invariant");
   599   } else {
   600     region_start_addr = mr.start();
   601     blk_start_addr    = block_start(region_start_addr);
   602   }
   603   HeapWord* region_end_addr = mr.end();
   604   MemRegion derived_mr(region_start_addr, region_end_addr);
   605   while (blk_start_addr < region_end_addr) {
   606     const size_t size = block_size(blk_start_addr);
   607     if (block_is_obj(blk_start_addr)) {
   608       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
   609     } else {
   610       last_was_obj_array = false;
   611     }
   612     blk_start_addr += size;
   613   }
   614   if (!last_was_obj_array) {
   615     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
   616            "Should be within (closed) used space");
   617     assert(blk_start_addr > prev, "Invariant");
   618     cl->set_previous(blk_start_addr); // min address for next time
   619   }
   620 }
   622 bool Space::obj_is_alive(const HeapWord* p) const {
   623   assert (block_is_obj(p), "The address should point to an object");
   624   return true;
   625 }
   627 void ContiguousSpace::object_iterate_mem(MemRegion mr, UpwardsObjectClosure* cl) {
   628   assert(!mr.is_empty(), "Should be non-empty");
   629   assert(used_region().contains(mr), "Should be within used space");
   630   HeapWord* prev = cl->previous();   // max address from last time
   631   if (prev >= mr.end()) { // nothing to do
   632     return;
   633   }
   634   // See comment above (in more general method above) in case you
   635   // happen to use this method.
   636   assert(prev == NULL || is_in_reserved(prev), "Should be within space");
   638   bool last_was_obj_array = false;
   639   HeapWord *obj_start_addr, *region_start_addr;
   640   if (prev > mr.start()) {
   641     region_start_addr = prev;
   642     obj_start_addr    = prev;
   643     assert(obj_start_addr == block_start(region_start_addr), "invariant");
   644   } else {
   645     region_start_addr = mr.start();
   646     obj_start_addr    = block_start(region_start_addr);
   647   }
   648   HeapWord* region_end_addr = mr.end();
   649   MemRegion derived_mr(region_start_addr, region_end_addr);
   650   while (obj_start_addr < region_end_addr) {
   651     oop obj = oop(obj_start_addr);
   652     const size_t size = obj->size();
   653     last_was_obj_array = cl->do_object_bm(obj, derived_mr);
   654     obj_start_addr += size;
   655   }
   656   if (!last_was_obj_array) {
   657     assert((bottom() <= obj_start_addr)  && (obj_start_addr <= end()),
   658            "Should be within (closed) used space");
   659     assert(obj_start_addr > prev, "Invariant");
   660     cl->set_previous(obj_start_addr); // min address for next time
   661   }
   662 }
   664 #if INCLUDE_ALL_GCS
   665 #define ContigSpace_PAR_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)         \
   666                                                                             \
   667   void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) {\
   668     HeapWord* obj_addr = mr.start();                                        \
   669     HeapWord* t = mr.end();                                                 \
   670     while (obj_addr < t) {                                                  \
   671       assert(oop(obj_addr)->is_oop(), "Should be an oop");                  \
   672       obj_addr += oop(obj_addr)->oop_iterate(blk);                          \
   673     }                                                                       \
   674   }
   676   ALL_PAR_OOP_ITERATE_CLOSURES(ContigSpace_PAR_OOP_ITERATE_DEFN)
   678 #undef ContigSpace_PAR_OOP_ITERATE_DEFN
   679 #endif // INCLUDE_ALL_GCS
   681 void ContiguousSpace::oop_iterate(ExtendedOopClosure* blk) {
   682   if (is_empty()) return;
   683   HeapWord* obj_addr = bottom();
   684   HeapWord* t = top();
   685   // Could call objects iterate, but this is easier.
   686   while (obj_addr < t) {
   687     obj_addr += oop(obj_addr)->oop_iterate(blk);
   688   }
   689 }
   691 void ContiguousSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* blk) {
   692   if (is_empty()) {
   693     return;
   694   }
   695   MemRegion cur = MemRegion(bottom(), top());
   696   mr = mr.intersection(cur);
   697   if (mr.is_empty()) {
   698     return;
   699   }
   700   if (mr.equals(cur)) {
   701     oop_iterate(blk);
   702     return;
   703   }
   704   assert(mr.end() <= top(), "just took an intersection above");
   705   HeapWord* obj_addr = block_start(mr.start());
   706   HeapWord* t = mr.end();
   708   // Handle first object specially.
   709   oop obj = oop(obj_addr);
   710   SpaceMemRegionOopsIterClosure smr_blk(blk, mr);
   711   obj_addr += obj->oop_iterate(&smr_blk);
   712   while (obj_addr < t) {
   713     oop obj = oop(obj_addr);
   714     assert(obj->is_oop(), "expected an oop");
   715     obj_addr += obj->size();
   716     // If "obj_addr" is not greater than top, then the
   717     // entire object "obj" is within the region.
   718     if (obj_addr <= t) {
   719       obj->oop_iterate(blk);
   720     } else {
   721       // "obj" extends beyond end of region
   722       obj->oop_iterate(&smr_blk);
   723       break;
   724     }
   725   };
   726 }
   728 void ContiguousSpace::object_iterate(ObjectClosure* blk) {
   729   if (is_empty()) return;
   730   WaterMark bm = bottom_mark();
   731   object_iterate_from(bm, blk);
   732 }
   734 // For a continguous space object_iterate() and safe_object_iterate()
   735 // are the same.
   736 void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) {
   737   object_iterate(blk);
   738 }
   740 void ContiguousSpace::object_iterate_from(WaterMark mark, ObjectClosure* blk) {
   741   assert(mark.space() == this, "Mark does not match space");
   742   HeapWord* p = mark.point();
   743   while (p < top()) {
   744     blk->do_object(oop(p));
   745     p += oop(p)->size();
   746   }
   747 }
   749 HeapWord*
   750 ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) {
   751   HeapWord * limit = concurrent_iteration_safe_limit();
   752   assert(limit <= top(), "sanity check");
   753   for (HeapWord* p = bottom(); p < limit;) {
   754     size_t size = blk->do_object_careful(oop(p));
   755     if (size == 0) {
   756       return p;  // failed at p
   757     } else {
   758       p += size;
   759     }
   760   }
   761   return NULL; // all done
   762 }
   764 #define ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)  \
   765                                                                           \
   766 void ContiguousSpace::                                                    \
   767 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {            \
   768   HeapWord* t;                                                            \
   769   HeapWord* p = saved_mark_word();                                        \
   770   assert(p != NULL, "expected saved mark");                               \
   771                                                                           \
   772   const intx interval = PrefetchScanIntervalInBytes;                      \
   773   do {                                                                    \
   774     t = top();                                                            \
   775     while (p < t) {                                                       \
   776       Prefetch::write(p, interval);                                       \
   777       debug_only(HeapWord* prev = p);                                     \
   778       oop m = oop(p);                                                     \
   779       p += m->oop_iterate(blk);                                           \
   780     }                                                                     \
   781   } while (t < top());                                                    \
   782                                                                           \
   783   set_saved_mark_word(p);                                                 \
   784 }
   786 ALL_SINCE_SAVE_MARKS_CLOSURES(ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN)
   788 #undef ContigSpace_OOP_SINCE_SAVE_MARKS_DEFN
   790 // Very general, slow implementation.
   791 HeapWord* ContiguousSpace::block_start_const(const void* p) const {
   792   assert(MemRegion(bottom(), end()).contains(p),
   793          err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
   794                   p, bottom(), end()));
   795   if (p >= top()) {
   796     return top();
   797   } else {
   798     HeapWord* last = bottom();
   799     HeapWord* cur = last;
   800     while (cur <= p) {
   801       last = cur;
   802       cur += oop(cur)->size();
   803     }
   804     assert(oop(last)->is_oop(),
   805            err_msg(PTR_FORMAT " should be an object start", last));
   806     return last;
   807   }
   808 }
   810 size_t ContiguousSpace::block_size(const HeapWord* p) const {
   811   assert(MemRegion(bottom(), end()).contains(p),
   812          err_msg("p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")",
   813                   p, bottom(), end()));
   814   HeapWord* current_top = top();
   815   assert(p <= current_top,
   816          err_msg("p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT,
   817                   p, current_top));
   818   assert(p == current_top || oop(p)->is_oop(),
   819          err_msg("p (" PTR_FORMAT ") is not a block start - "
   820                  "current_top: " PTR_FORMAT ", is_oop: %s",
   821                  p, current_top, BOOL_TO_STR(oop(p)->is_oop())));
   822   if (p < current_top) {
   823     return oop(p)->size();
   824   } else {
   825     assert(p == current_top, "just checking");
   826     return pointer_delta(end(), (HeapWord*) p);
   827   }
   828 }
   830 // This version requires locking.
   831 inline HeapWord* ContiguousSpace::allocate_impl(size_t size,
   832                                                 HeapWord* const end_value) {
   833   // In G1 there are places where a GC worker can allocates into a
   834   // region using this serial allocation code without being prone to a
   835   // race with other GC workers (we ensure that no other GC worker can
   836   // access the same region at the same time). So the assert below is
   837   // too strong in the case of G1.
   838   assert(Heap_lock->owned_by_self() ||
   839          (SafepointSynchronize::is_at_safepoint() &&
   840                                (Thread::current()->is_VM_thread() || UseG1GC)),
   841          "not locked");
   842   HeapWord* obj = top();
   843   if (pointer_delta(end_value, obj) >= size) {
   844     HeapWord* new_top = obj + size;
   845     set_top(new_top);
   846     assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
   847     return obj;
   848   } else {
   849     return NULL;
   850   }
   851 }
   853 // This version is lock-free.
   854 inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size,
   855                                                     HeapWord* const end_value) {
   856   do {
   857     HeapWord* obj = top();
   858     if (pointer_delta(end_value, obj) >= size) {
   859       HeapWord* new_top = obj + size;
   860       HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
   861       // result can be one of two:
   862       //  the old top value: the exchange succeeded
   863       //  otherwise: the new value of the top is returned.
   864       if (result == obj) {
   865         assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
   866         return obj;
   867       }
   868     } else {
   869       return NULL;
   870     }
   871   } while (true);
   872 }
   874 // Requires locking.
   875 HeapWord* ContiguousSpace::allocate(size_t size) {
   876   return allocate_impl(size, end());
   877 }
   879 // Lock-free.
   880 HeapWord* ContiguousSpace::par_allocate(size_t size) {
   881   return par_allocate_impl(size, end());
   882 }
   884 void ContiguousSpace::allocate_temporary_filler(int factor) {
   885   // allocate temporary type array decreasing free size with factor 'factor'
   886   assert(factor >= 0, "just checking");
   887   size_t size = pointer_delta(end(), top());
   889   // if space is full, return
   890   if (size == 0) return;
   892   if (factor > 0) {
   893     size -= size/factor;
   894   }
   895   size = align_object_size(size);
   897   const size_t array_header_size = typeArrayOopDesc::header_size(T_INT);
   898   if (size >= (size_t)align_object_size(array_header_size)) {
   899     size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint));
   900     // allocate uninitialized int array
   901     typeArrayOop t = (typeArrayOop) allocate(size);
   902     assert(t != NULL, "allocation should succeed");
   903     t->set_mark(markOopDesc::prototype());
   904     t->set_klass(Universe::intArrayKlassObj());
   905     t->set_length((int)length);
   906   } else {
   907     assert(size == CollectedHeap::min_fill_size(),
   908            "size for smallest fake object doesn't match");
   909     instanceOop obj = (instanceOop) allocate(size);
   910     obj->set_mark(markOopDesc::prototype());
   911     obj->set_klass_gap(0);
   912     obj->set_klass(SystemDictionary::Object_klass());
   913   }
   914 }
   916 void EdenSpace::clear(bool mangle_space) {
   917   ContiguousSpace::clear(mangle_space);
   918   set_soft_end(end());
   919 }
   921 // Requires locking.
   922 HeapWord* EdenSpace::allocate(size_t size) {
   923   return allocate_impl(size, soft_end());
   924 }
   926 // Lock-free.
   927 HeapWord* EdenSpace::par_allocate(size_t size) {
   928   return par_allocate_impl(size, soft_end());
   929 }
   931 HeapWord* ConcEdenSpace::par_allocate(size_t size)
   932 {
   933   do {
   934     // The invariant is top() should be read before end() because
   935     // top() can't be greater than end(), so if an update of _soft_end
   936     // occurs between 'end_val = end();' and 'top_val = top();' top()
   937     // also can grow up to the new end() and the condition
   938     // 'top_val > end_val' is true. To ensure the loading order
   939     // OrderAccess::loadload() is required after top() read.
   940     HeapWord* obj = top();
   941     OrderAccess::loadload();
   942     if (pointer_delta(*soft_end_addr(), obj) >= size) {
   943       HeapWord* new_top = obj + size;
   944       HeapWord* result = (HeapWord*)Atomic::cmpxchg_ptr(new_top, top_addr(), obj);
   945       // result can be one of two:
   946       //  the old top value: the exchange succeeded
   947       //  otherwise: the new value of the top is returned.
   948       if (result == obj) {
   949         assert(is_aligned(obj) && is_aligned(new_top), "checking alignment");
   950         return obj;
   951       }
   952     } else {
   953       return NULL;
   954     }
   955   } while (true);
   956 }
   959 HeapWord* OffsetTableContigSpace::initialize_threshold() {
   960   return _offsets.initialize_threshold();
   961 }
   963 HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) {
   964   _offsets.alloc_block(start, end);
   965   return _offsets.threshold();
   966 }
   968 OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray,
   969                                                MemRegion mr) :
   970   _offsets(sharedOffsetArray, mr),
   971   _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock", true)
   972 {
   973   _offsets.set_contig_space(this);
   974   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
   975 }
   977 #define OBJ_SAMPLE_INTERVAL 0
   978 #define BLOCK_SAMPLE_INTERVAL 100
   980 void OffsetTableContigSpace::verify() const {
   981   HeapWord* p = bottom();
   982   HeapWord* prev_p = NULL;
   983   int objs = 0;
   984   int blocks = 0;
   986   if (VerifyObjectStartArray) {
   987     _offsets.verify();
   988   }
   990   while (p < top()) {
   991     size_t size = oop(p)->size();
   992     // For a sampling of objects in the space, find it using the
   993     // block offset table.
   994     if (blocks == BLOCK_SAMPLE_INTERVAL) {
   995       guarantee(p == block_start_const(p + (size/2)),
   996                 "check offset computation");
   997       blocks = 0;
   998     } else {
   999       blocks++;
  1002     if (objs == OBJ_SAMPLE_INTERVAL) {
  1003       oop(p)->verify();
  1004       objs = 0;
  1005     } else {
  1006       objs++;
  1008     prev_p = p;
  1009     p += size;
  1011   guarantee(p == top(), "end of last object must match end of space");
  1015 size_t TenuredSpace::allowed_dead_ratio() const {
  1016   return MarkSweepDeadRatio;

mercurial