Merge

Wed, 15 Jun 2011 10:20:03 -0700

author
never
date
Wed, 15 Jun 2011 10:20:03 -0700
changeset 2956
cfcf2ba8f3eb
parent 2955
e2ce15aa3daf
parent 2948
ae1d716e395c
child 2957
e2af886d540b
child 2978
d83ac25d0304
child 3012
1744e37e032b

Merge

src/share/vm/prims/methodHandleWalk.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Tue Jun 14 15:20:55 2011 -0700
     1.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp	Wed Jun 15 10:20:03 2011 -0700
     1.3 @@ -1,5 +1,5 @@
     1.4  /*
     1.5 - * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
     1.6 + * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
     1.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8   *
     1.9   * This code is free software; you can redistribute it and/or modify it
    1.10 @@ -407,6 +407,11 @@
    1.11    void save_sweep_limit() {
    1.12      _sweep_limit = BlockOffsetArrayUseUnallocatedBlock ?
    1.13                     unallocated_block() : end();
    1.14 +    if (CMSTraceSweeper) {
    1.15 +      gclog_or_tty->print_cr(">>>>> Saving sweep limit " PTR_FORMAT
    1.16 +                             "  for space [" PTR_FORMAT "," PTR_FORMAT ") <<<<<<",
    1.17 +                             _sweep_limit, bottom(), end());
    1.18 +    }
    1.19    }
    1.20    NOT_PRODUCT(
    1.21      void clear_sweep_limit() { _sweep_limit = NULL; }
     2.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Jun 14 15:20:55 2011 -0700
     2.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Wed Jun 15 10:20:03 2011 -0700
     2.3 @@ -7888,60 +7888,64 @@
     2.4    assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
     2.5           "sweep _limit out of bounds");
     2.6    if (CMSTraceSweeper) {
     2.7 -    gclog_or_tty->print("\n====================\nStarting new sweep\n");
     2.8 -  }
     2.9 -}
    2.10 -
    2.11 -// We need this destructor to reclaim any space at the end
    2.12 -// of the space, which do_blk below may not yet have added back to
    2.13 -// the free lists.
    2.14 +    gclog_or_tty->print_cr("\n====================\nStarting new sweep with limit " PTR_FORMAT,
    2.15 +                        _limit);
    2.16 +  }
    2.17 +}
    2.18 +
    2.19 +void SweepClosure::print_on(outputStream* st) const {
    2.20 +  tty->print_cr("_sp = [" PTR_FORMAT "," PTR_FORMAT ")",
    2.21 +                _sp->bottom(), _sp->end());
    2.22 +  tty->print_cr("_limit = " PTR_FORMAT, _limit);
    2.23 +  tty->print_cr("_freeFinger = " PTR_FORMAT, _freeFinger);
    2.24 +  NOT_PRODUCT(tty->print_cr("_last_fc = " PTR_FORMAT, _last_fc);)
    2.25 +  tty->print_cr("_inFreeRange = %d, _freeRangeInFreeLists = %d, _lastFreeRangeCoalesced = %d",
    2.26 +                _inFreeRange, _freeRangeInFreeLists, _lastFreeRangeCoalesced);
    2.27 +}
    2.28 +
    2.29 +#ifndef PRODUCT
    2.30 +// Assertion checking only:  no useful work in product mode --
    2.31 +// however, if any of the flags below become product flags,
    2.32 +// you may need to review this code to see if it needs to be
    2.33 +// enabled in product mode.
    2.34  SweepClosure::~SweepClosure() {
    2.35    assert_lock_strong(_freelistLock);
    2.36    assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
    2.37           "sweep _limit out of bounds");
    2.38 -  // Flush any remaining coterminal free run as a single
    2.39 -  // coalesced chunk to the appropriate free list.
    2.40    if (inFreeRange()) {
    2.41 -    assert(freeFinger() < _limit, "freeFinger points too high");
    2.42 -    flush_cur_free_chunk(freeFinger(), pointer_delta(_limit, freeFinger()));
    2.43 -    if (CMSTraceSweeper) {
    2.44 -      gclog_or_tty->print("Sweep: last chunk: ");
    2.45 -      gclog_or_tty->print("put_free_blk 0x%x ("SIZE_FORMAT") [coalesced:"SIZE_FORMAT"]\n",
    2.46 -                          freeFinger(), pointer_delta(_limit, freeFinger()), lastFreeRangeCoalesced());
    2.47 -    }
    2.48 -  } // else nothing to flush
    2.49 -  NOT_PRODUCT(
    2.50 -    if (Verbose && PrintGC) {
    2.51 -      gclog_or_tty->print("Collected "SIZE_FORMAT" objects, "
    2.52 -                          SIZE_FORMAT " bytes",
    2.53 -                 _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
    2.54 -      gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
    2.55 -                             SIZE_FORMAT" bytes  "
    2.56 -        "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
    2.57 -        _numObjectsLive, _numWordsLive*sizeof(HeapWord),
    2.58 -        _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
    2.59 -      size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree) *
    2.60 -        sizeof(HeapWord);
    2.61 -      gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
    2.62 -
    2.63 -      if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
    2.64 -        size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
    2.65 -        size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
    2.66 -        size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
    2.67 -        gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
    2.68 -        gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
    2.69 -          indexListReturnedBytes);
    2.70 -        gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
    2.71 -          dictReturnedBytes);
    2.72 -      }
    2.73 -    }
    2.74 -  )
    2.75 -  // Now, in debug mode, just null out the sweep_limit
    2.76 -  NOT_PRODUCT(_sp->clear_sweep_limit();)
    2.77 +    warning("inFreeRange() should have been reset; dumping state of SweepClosure");
    2.78 +    print();
    2.79 +    ShouldNotReachHere();
    2.80 +  }
    2.81 +  if (Verbose && PrintGC) {
    2.82 +    gclog_or_tty->print("Collected "SIZE_FORMAT" objects, " SIZE_FORMAT " bytes",
    2.83 +                        _numObjectsFreed, _numWordsFreed*sizeof(HeapWord));
    2.84 +    gclog_or_tty->print_cr("\nLive "SIZE_FORMAT" objects,  "
    2.85 +                           SIZE_FORMAT" bytes  "
    2.86 +      "Already free "SIZE_FORMAT" objects, "SIZE_FORMAT" bytes",
    2.87 +      _numObjectsLive, _numWordsLive*sizeof(HeapWord),
    2.88 +      _numObjectsAlreadyFree, _numWordsAlreadyFree*sizeof(HeapWord));
    2.89 +    size_t totalBytes = (_numWordsFreed + _numWordsLive + _numWordsAlreadyFree)
    2.90 +                        * sizeof(HeapWord);
    2.91 +    gclog_or_tty->print_cr("Total sweep: "SIZE_FORMAT" bytes", totalBytes);
    2.92 +
    2.93 +    if (PrintCMSStatistics && CMSVerifyReturnedBytes) {
    2.94 +      size_t indexListReturnedBytes = _sp->sumIndexedFreeListArrayReturnedBytes();
    2.95 +      size_t dictReturnedBytes = _sp->dictionary()->sumDictReturnedBytes();
    2.96 +      size_t returnedBytes = indexListReturnedBytes + dictReturnedBytes;
    2.97 +      gclog_or_tty->print("Returned "SIZE_FORMAT" bytes", returnedBytes);
    2.98 +      gclog_or_tty->print("   Indexed List Returned "SIZE_FORMAT" bytes",
    2.99 +        indexListReturnedBytes);
   2.100 +      gclog_or_tty->print_cr("        Dictionary Returned "SIZE_FORMAT" bytes",
   2.101 +        dictReturnedBytes);
   2.102 +    }
   2.103 +  }
   2.104    if (CMSTraceSweeper) {
   2.105 -    gclog_or_tty->print("end of sweep\n================\n");
   2.106 -  }
   2.107 -}
   2.108 +    gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
   2.109 +                           _limit);
   2.110 +  }
   2.111 +}
   2.112 +#endif  // PRODUCT
   2.113  
   2.114  void SweepClosure::initialize_free_range(HeapWord* freeFinger,
   2.115      bool freeRangeInFreeLists) {
   2.116 @@ -8001,15 +8005,17 @@
   2.117    // we started the sweep, it may no longer be one because heap expansion
   2.118    // may have caused us to coalesce the block ending at the address _limit
   2.119    // with a newly expanded chunk (this happens when _limit was set to the
   2.120 -  // previous _end of the space), so we may have stepped past _limit; see CR 6977970.
   2.121 +  // previous _end of the space), so we may have stepped past _limit:
   2.122 +  // see the following Zeno-like trail of CRs 6977970, 7008136, 7042740.
   2.123    if (addr >= _limit) { // we have swept up to or past the limit: finish up
   2.124      assert(_limit >= _sp->bottom() && _limit <= _sp->end(),
   2.125             "sweep _limit out of bounds");
   2.126      assert(addr < _sp->end(), "addr out of bounds");
   2.127 -    // Flush any remaining coterminal free run as a single
   2.128 +    // Flush any free range we might be holding as a single
   2.129      // coalesced chunk to the appropriate free list.
   2.130      if (inFreeRange()) {
   2.131 -      assert(freeFinger() < _limit, "finger points too high");
   2.132 +      assert(freeFinger() >= _sp->bottom() && freeFinger() < _limit,
   2.133 +             err_msg("freeFinger() " PTR_FORMAT" is out-of-bounds", freeFinger()));
   2.134        flush_cur_free_chunk(freeFinger(),
   2.135                             pointer_delta(addr, freeFinger()));
   2.136        if (CMSTraceSweeper) {
   2.137 @@ -8033,7 +8039,16 @@
   2.138      res = fc->size();
   2.139      do_already_free_chunk(fc);
   2.140      debug_only(_sp->verifyFreeLists());
   2.141 -    assert(res == fc->size(), "Don't expect the size to change");
   2.142 +    // If we flush the chunk at hand in lookahead_and_flush()
   2.143 +    // and it's coalesced with a preceding chunk, then the
   2.144 +    // process of "mangling" the payload of the coalesced block
   2.145 +    // will cause erasure of the size information from the
   2.146 +    // (erstwhile) header of all the coalesced blocks but the
   2.147 +    // first, so the first disjunct in the assert will not hold
   2.148 +    // in that specific case (in which case the second disjunct
   2.149 +    // will hold).
   2.150 +    assert(res == fc->size() || ((HeapWord*)fc) + res >= _limit,
   2.151 +           "Otherwise the size info doesn't change at this step");
   2.152      NOT_PRODUCT(
   2.153        _numObjectsAlreadyFree++;
   2.154        _numWordsAlreadyFree += res;
   2.155 @@ -8103,7 +8118,7 @@
   2.156  //
   2.157  
   2.158  void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
   2.159 -  size_t size = fc->size();
   2.160 +  const size_t size = fc->size();
   2.161    // Chunks that cannot be coalesced are not in the
   2.162    // free lists.
   2.163    if (CMSTestInFreeList && !fc->cantCoalesce()) {
   2.164 @@ -8112,7 +8127,7 @@
   2.165    }
   2.166    // a chunk that is already free, should not have been
   2.167    // marked in the bit map
   2.168 -  HeapWord* addr = (HeapWord*) fc;
   2.169 +  HeapWord* const addr = (HeapWord*) fc;
   2.170    assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
   2.171    // Verify that the bit map has no bits marked between
   2.172    // addr and purported end of this block.
   2.173 @@ -8149,7 +8164,7 @@
   2.174          }
   2.175        } else {
   2.176          // the midst of a free range, we are coalescing
   2.177 -        debug_only(record_free_block_coalesced(fc);)
   2.178 +        print_free_block_coalesced(fc);
   2.179          if (CMSTraceSweeper) {
   2.180            gclog_or_tty->print("  -- pick up free block 0x%x (%d)\n", fc, size);
   2.181          }
   2.182 @@ -8173,6 +8188,10 @@
   2.183          }
   2.184        }
   2.185      }
   2.186 +    // Note that if the chunk is not coalescable (the else arm
   2.187 +    // below), we unconditionally flush, without needing to do
   2.188 +    // a "lookahead," as we do below.
   2.189 +    if (inFreeRange()) lookahead_and_flush(fc, size);
   2.190    } else {
   2.191      // Code path common to both original and adaptive free lists.
   2.192  
   2.193 @@ -8191,8 +8210,8 @@
   2.194    // This is a chunk of garbage.  It is not in any free list.
   2.195    // Add it to a free list or let it possibly be coalesced into
   2.196    // a larger chunk.
   2.197 -  HeapWord* addr = (HeapWord*) fc;
   2.198 -  size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
   2.199 +  HeapWord* const addr = (HeapWord*) fc;
   2.200 +  const size_t size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
   2.201  
   2.202    if (_sp->adaptive_freelists()) {
   2.203      // Verify that the bit map has no bits marked between
   2.204 @@ -8205,7 +8224,6 @@
   2.205        // start of a new free range
   2.206        assert(size > 0, "A free range should have a size");
   2.207        initialize_free_range(addr, false);
   2.208 -
   2.209      } else {
   2.210        // this will be swept up when we hit the end of the
   2.211        // free range
   2.212 @@ -8235,6 +8253,9 @@
   2.213      // addr and purported end of just dead object.
   2.214      _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
   2.215    }
   2.216 +  assert(_limit >= addr + size,
   2.217 +         "A freshly garbage chunk can't possibly straddle over _limit");
   2.218 +  if (inFreeRange()) lookahead_and_flush(fc, size);
   2.219    return size;
   2.220  }
   2.221  
   2.222 @@ -8284,8 +8305,8 @@
   2.223             (!_collector->should_unload_classes()
   2.224              || oop(addr)->is_parsable()),
   2.225             "Should be an initialized object");
   2.226 -    // Note that there are objects used during class redefinition
   2.227 -    // (e.g., merge_cp in VM_RedefineClasses::merge_cp_and_rewrite()
   2.228 +    // Note that there are objects used during class redefinition,
   2.229 +    // e.g. merge_cp in VM_RedefineClasses::merge_cp_and_rewrite(),
   2.230      // which are discarded with their is_conc_safe state still
   2.231      // false.  These object may be floating garbage so may be
   2.232      // seen here.  If they are floating garbage their size
   2.233 @@ -8307,7 +8328,7 @@
   2.234                                                   size_t chunkSize) {
   2.235    // do_post_free_or_garbage_chunk() should only be called in the case
   2.236    // of the adaptive free list allocator.
   2.237 -  bool fcInFreeLists = fc->isFree();
   2.238 +  const bool fcInFreeLists = fc->isFree();
   2.239    assert(_sp->adaptive_freelists(), "Should only be used in this case.");
   2.240    assert((HeapWord*)fc <= _limit, "sweep invariant");
   2.241    if (CMSTestInFreeList && fcInFreeLists) {
   2.242 @@ -8318,11 +8339,11 @@
   2.243      gclog_or_tty->print_cr("  -- pick up another chunk at 0x%x (%d)", fc, chunkSize);
   2.244    }
   2.245  
   2.246 -  HeapWord* addr = (HeapWord*) fc;
   2.247 +  HeapWord* const fc_addr = (HeapWord*) fc;
   2.248  
   2.249    bool coalesce;
   2.250 -  size_t left  = pointer_delta(addr, freeFinger());
   2.251 -  size_t right = chunkSize;
   2.252 +  const size_t left  = pointer_delta(fc_addr, freeFinger());
   2.253 +  const size_t right = chunkSize;
   2.254    switch (FLSCoalescePolicy) {
   2.255      // numeric value forms a coalition aggressiveness metric
   2.256      case 0:  { // never coalesce
   2.257 @@ -8355,15 +8376,15 @@
   2.258    // If the chunk is in a free range and either we decided to coalesce above
   2.259    // or the chunk is near the large block at the end of the heap
   2.260    // (isNearLargestChunk() returns true), then coalesce this chunk.
   2.261 -  bool doCoalesce = inFreeRange() &&
   2.262 -    (coalesce || _g->isNearLargestChunk((HeapWord*)fc));
   2.263 +  const bool doCoalesce = inFreeRange()
   2.264 +                          && (coalesce || _g->isNearLargestChunk(fc_addr));
   2.265    if (doCoalesce) {
   2.266      // Coalesce the current free range on the left with the new
   2.267      // chunk on the right.  If either is on a free list,
   2.268      // it must be removed from the list and stashed in the closure.
   2.269      if (freeRangeInFreeLists()) {
   2.270 -      FreeChunk* ffc = (FreeChunk*)freeFinger();
   2.271 -      assert(ffc->size() == pointer_delta(addr, freeFinger()),
   2.272 +      FreeChunk* const ffc = (FreeChunk*)freeFinger();
   2.273 +      assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
   2.274          "Size of free range is inconsistent with chunk size.");
   2.275        if (CMSTestInFreeList) {
   2.276          assert(_sp->verifyChunkInFreeLists(ffc),
   2.277 @@ -8380,13 +8401,14 @@
   2.278        _sp->removeFreeChunkFromFreeLists(fc);
   2.279      }
   2.280      set_lastFreeRangeCoalesced(true);
   2.281 +    print_free_block_coalesced(fc);
   2.282    } else {  // not in a free range and/or should not coalesce
   2.283      // Return the current free range and start a new one.
   2.284      if (inFreeRange()) {
   2.285        // In a free range but cannot coalesce with the right hand chunk.
   2.286        // Put the current free range into the free lists.
   2.287        flush_cur_free_chunk(freeFinger(),
   2.288 -                           pointer_delta(addr, freeFinger()));
   2.289 +                           pointer_delta(fc_addr, freeFinger()));
   2.290      }
   2.291      // Set up for new free range.  Pass along whether the right hand
   2.292      // chunk is in the free lists.
   2.293 @@ -8394,6 +8416,42 @@
   2.294    }
   2.295  }
   2.296  
   2.297 +// Lookahead flush:
   2.298 +// If we are tracking a free range, and this is the last chunk that
   2.299 +// we'll look at because its end crosses past _limit, we'll preemptively
   2.300 +// flush it along with any free range we may be holding on to. Note that
   2.301 +// this can be the case only for an already free or freshly garbage
   2.302 +// chunk. If this block is an object, it can never straddle
   2.303 +// over _limit. The "straddling" occurs when _limit is set at
   2.304 +// the previous end of the space when this cycle started, and
   2.305 +// a subsequent heap expansion caused the previously co-terminal
   2.306 +// free block to be coalesced with the newly expanded portion,
   2.307 +// thus rendering _limit a non-block-boundary making it dangerous
   2.308 +// for the sweeper to step over and examine.
   2.309 +void SweepClosure::lookahead_and_flush(FreeChunk* fc, size_t chunk_size) {
   2.310 +  assert(inFreeRange(), "Should only be called if currently in a free range.");
   2.311 +  HeapWord* const eob = ((HeapWord*)fc) + chunk_size;
   2.312 +  assert(_sp->used_region().contains(eob - 1),
   2.313 +         err_msg("eob = " PTR_FORMAT " out of bounds wrt _sp = [" PTR_FORMAT "," PTR_FORMAT ")"
   2.314 +                 " when examining fc = " PTR_FORMAT "(" SIZE_FORMAT ")",
   2.315 +                 _limit, _sp->bottom(), _sp->end(), fc, chunk_size));
   2.316 +  if (eob >= _limit) {
   2.317 +    assert(eob == _limit || fc->isFree(), "Only a free chunk should allow us to cross over the limit");
   2.318 +    if (CMSTraceSweeper) {
   2.319 +      gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
   2.320 +                             "[" PTR_FORMAT "," PTR_FORMAT ") in space "
   2.321 +                             "[" PTR_FORMAT "," PTR_FORMAT ")",
   2.322 +                             _limit, fc, eob, _sp->bottom(), _sp->end());
   2.323 +    }
   2.324 +    // Return the storage we are tracking back into the free lists.
   2.325 +    if (CMSTraceSweeper) {
   2.326 +      gclog_or_tty->print_cr("Flushing ... ");
   2.327 +    }
   2.328 +    assert(freeFinger() < eob, "Error");
   2.329 +    flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
   2.330 +  }
   2.331 +}
   2.332 +
   2.333  void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
   2.334    assert(inFreeRange(), "Should only be called if currently in a free range.");
   2.335    assert(size > 0,
   2.336 @@ -8419,6 +8477,8 @@
   2.337      }
   2.338      _sp->addChunkAndRepairOffsetTable(chunk, size,
   2.339              lastFreeRangeCoalesced());
   2.340 +  } else if (CMSTraceSweeper) {
   2.341 +    gclog_or_tty->print_cr("Already in free list: nothing to flush");
   2.342    }
   2.343    set_inFreeRange(false);
   2.344    set_freeRangeInFreeLists(false);
   2.345 @@ -8477,13 +8537,14 @@
   2.346  bool debug_verifyChunkInFreeLists(FreeChunk* fc) {
   2.347    return debug_cms_space->verifyChunkInFreeLists(fc);
   2.348  }
   2.349 -
   2.350 -void SweepClosure::record_free_block_coalesced(FreeChunk* fc) const {
   2.351 +#endif
   2.352 +
   2.353 +void SweepClosure::print_free_block_coalesced(FreeChunk* fc) const {
   2.354    if (CMSTraceSweeper) {
   2.355 -    gclog_or_tty->print("Sweep:coal_free_blk 0x%x (%d)\n", fc, fc->size());
   2.356 -  }
   2.357 -}
   2.358 -#endif
   2.359 +    gclog_or_tty->print_cr("Sweep:coal_free_blk " PTR_FORMAT " (" SIZE_FORMAT ")",
   2.360 +                           fc, fc->size());
   2.361 +  }
   2.362 +}
   2.363  
   2.364  // CMSIsAliveClosure
   2.365  bool CMSIsAliveClosure::do_object_b(oop obj) {
     3.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Tue Jun 14 15:20:55 2011 -0700
     3.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.hpp	Wed Jun 15 10:20:03 2011 -0700
     3.3 @@ -1701,9 +1701,9 @@
     3.4    CMSCollector*                  _collector;  // collector doing the work
     3.5    ConcurrentMarkSweepGeneration* _g;    // Generation being swept
     3.6    CompactibleFreeListSpace*      _sp;   // Space being swept
     3.7 -  HeapWord*                      _limit;// the address at which the sweep should stop because
     3.8 -                                        // we do not expect blocks eligible for sweeping past
     3.9 -                                        // that address.
    3.10 +  HeapWord*                      _limit;// the address at or above which the sweep should stop
    3.11 +                                        // because we do not expect newly garbage blocks
    3.12 +                                        // eligible for sweeping past that address.
    3.13    Mutex*                         _freelistLock; // Free list lock (in space)
    3.14    CMSBitMap*                     _bitMap;       // Marking bit map (in
    3.15                                                  // generation)
    3.16 @@ -1750,6 +1750,10 @@
    3.17    void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
    3.18    // Process a free chunk during sweeping.
    3.19    void do_already_free_chunk(FreeChunk *fc);
    3.20 +  // Work method called when processing an already free or a
    3.21 +  // freshly garbage chunk to do a lookahead and possibly a
    3.22 +  // premptive flush if crossing over _limit.
    3.23 +  void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
    3.24    // Process a garbage chunk during sweeping.
    3.25    size_t do_garbage_chunk(FreeChunk *fc);
    3.26    // Process a live chunk during sweeping.
    3.27 @@ -1758,8 +1762,6 @@
    3.28    // Accessors.
    3.29    HeapWord* freeFinger() const          { return _freeFinger; }
    3.30    void set_freeFinger(HeapWord* v)      { _freeFinger = v; }
    3.31 -  size_t freeRangeSize() const          { return _freeRangeSize; }
    3.32 -  void set_freeRangeSize(size_t v)      { _freeRangeSize = v; }
    3.33    bool inFreeRange()    const           { return _inFreeRange; }
    3.34    void set_inFreeRange(bool v)          { _inFreeRange = v; }
    3.35    bool lastFreeRangeCoalesced() const    { return _lastFreeRangeCoalesced; }
    3.36 @@ -1779,14 +1781,16 @@
    3.37    void do_yield_work(HeapWord* addr);
    3.38  
    3.39    // Debugging/Printing
    3.40 -  void record_free_block_coalesced(FreeChunk* fc) const PRODUCT_RETURN;
    3.41 +  void print_free_block_coalesced(FreeChunk* fc) const;
    3.42  
    3.43   public:
    3.44    SweepClosure(CMSCollector* collector, ConcurrentMarkSweepGeneration* g,
    3.45                 CMSBitMap* bitMap, bool should_yield);
    3.46 -  ~SweepClosure();
    3.47 +  ~SweepClosure() PRODUCT_RETURN;
    3.48  
    3.49    size_t       do_blk_careful(HeapWord* addr);
    3.50 +  void         print() const { print_on(tty); }
    3.51 +  void         print_on(outputStream *st) const;
    3.52  };
    3.53  
    3.54  // Closures related to weak references processing
     4.1 --- a/src/share/vm/interpreter/rewriter.cpp	Tue Jun 14 15:20:55 2011 -0700
     4.2 +++ b/src/share/vm/interpreter/rewriter.cpp	Wed Jun 15 10:20:03 2011 -0700
     4.3 @@ -63,6 +63,15 @@
     4.4    _have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0);
     4.5  }
     4.6  
     4.7 +// Unrewrite the bytecodes if an error occurs.
     4.8 +void Rewriter::restore_bytecodes() {
     4.9 +  int len = _methods->length();
    4.10 +
    4.11 +  for (int i = len-1; i >= 0; i--) {
    4.12 +    methodOop method = (methodOop)_methods->obj_at(i);
    4.13 +    scan_method(method, true);
    4.14 +  }
    4.15 +}
    4.16  
    4.17  // Creates a constant pool cache given a CPC map
    4.18  void Rewriter::make_constant_pool_cache(TRAPS) {
    4.19 @@ -133,57 +142,94 @@
    4.20  
    4.21  
    4.22  // Rewrite a classfile-order CP index into a native-order CPC index.
    4.23 -void Rewriter::rewrite_member_reference(address bcp, int offset) {
    4.24 +void Rewriter::rewrite_member_reference(address bcp, int offset, bool reverse) {
    4.25    address p = bcp + offset;
    4.26 -  int  cp_index    = Bytes::get_Java_u2(p);
    4.27 -  int  cache_index = cp_entry_to_cp_cache(cp_index);
    4.28 -  Bytes::put_native_u2(p, cache_index);
    4.29 +  if (!reverse) {
    4.30 +    int  cp_index    = Bytes::get_Java_u2(p);
    4.31 +    int  cache_index = cp_entry_to_cp_cache(cp_index);
    4.32 +    Bytes::put_native_u2(p, cache_index);
    4.33 +  } else {
    4.34 +    int cache_index = Bytes::get_native_u2(p);
    4.35 +    int pool_index = cp_cache_entry_pool_index(cache_index);
    4.36 +    Bytes::put_Java_u2(p, pool_index);
    4.37 +  }
    4.38  }
    4.39  
    4.40  
    4.41 -void Rewriter::rewrite_invokedynamic(address bcp, int offset) {
    4.42 +void Rewriter::rewrite_invokedynamic(address bcp, int offset, bool reverse) {
    4.43    address p = bcp + offset;
    4.44 -  assert(p[-1] == Bytecodes::_invokedynamic, "");
    4.45 -  int cp_index = Bytes::get_Java_u2(p);
    4.46 -  int cpc  = maybe_add_cp_cache_entry(cp_index);  // add lazily
    4.47 -  int cpc2 = add_secondary_cp_cache_entry(cpc);
    4.48 +  assert(p[-1] == Bytecodes::_invokedynamic, "not invokedynamic bytecode");
    4.49 +  if (!reverse) {
    4.50 +    int cp_index = Bytes::get_Java_u2(p);
    4.51 +    int cpc  = maybe_add_cp_cache_entry(cp_index);  // add lazily
    4.52 +    int cpc2 = add_secondary_cp_cache_entry(cpc);
    4.53  
    4.54 -  // Replace the trailing four bytes with a CPC index for the dynamic
    4.55 -  // call site.  Unlike other CPC entries, there is one per bytecode,
    4.56 -  // not just one per distinct CP entry.  In other words, the
    4.57 -  // CPC-to-CP relation is many-to-one for invokedynamic entries.
    4.58 -  // This means we must use a larger index size than u2 to address
    4.59 -  // all these entries.  That is the main reason invokedynamic
    4.60 -  // must have a five-byte instruction format.  (Of course, other JVM
    4.61 -  // implementations can use the bytes for other purposes.)
    4.62 -  Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
    4.63 -  // Note: We use native_u4 format exclusively for 4-byte indexes.
    4.64 +    // Replace the trailing four bytes with a CPC index for the dynamic
    4.65 +    // call site.  Unlike other CPC entries, there is one per bytecode,
    4.66 +    // not just one per distinct CP entry.  In other words, the
    4.67 +    // CPC-to-CP relation is many-to-one for invokedynamic entries.
    4.68 +    // This means we must use a larger index size than u2 to address
    4.69 +    // all these entries.  That is the main reason invokedynamic
    4.70 +    // must have a five-byte instruction format.  (Of course, other JVM
    4.71 +    // implementations can use the bytes for other purposes.)
    4.72 +    Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
    4.73 +    // Note: We use native_u4 format exclusively for 4-byte indexes.
    4.74 +  } else {
    4.75 +    int cache_index = constantPoolCacheOopDesc::decode_secondary_index(
    4.76 +                        Bytes::get_native_u4(p));
    4.77 +    int secondary_index = cp_cache_secondary_entry_main_index(cache_index);
    4.78 +    int pool_index = cp_cache_entry_pool_index(secondary_index);
    4.79 +    assert(_pool->tag_at(pool_index).is_invoke_dynamic(), "wrong index");
    4.80 +    // zero out 4 bytes
    4.81 +    Bytes::put_Java_u4(p, 0);
    4.82 +    Bytes::put_Java_u2(p, pool_index);
    4.83 +  }
    4.84  }
    4.85  
    4.86  
    4.87  // Rewrite some ldc bytecodes to _fast_aldc
    4.88 -void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide) {
    4.89 -  assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "");
    4.90 -  address p = bcp + offset;
    4.91 -  int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
    4.92 -  constantTag tag = _pool->tag_at(cp_index).value();
    4.93 -  if (tag.is_method_handle() || tag.is_method_type()) {
    4.94 -    int cache_index = cp_entry_to_cp_cache(cp_index);
    4.95 -    if (is_wide) {
    4.96 -      (*bcp) = Bytecodes::_fast_aldc_w;
    4.97 -      assert(cache_index == (u2)cache_index, "");
    4.98 -      Bytes::put_native_u2(p, cache_index);
    4.99 -    } else {
   4.100 -      (*bcp) = Bytecodes::_fast_aldc;
   4.101 -      assert(cache_index == (u1)cache_index, "");
   4.102 -      (*p) = (u1)cache_index;
   4.103 +void Rewriter::maybe_rewrite_ldc(address bcp, int offset, bool is_wide,
   4.104 +                                 bool reverse) {
   4.105 +  if (!reverse) {
   4.106 +    assert((*bcp) == (is_wide ? Bytecodes::_ldc_w : Bytecodes::_ldc), "not ldc bytecode");
   4.107 +    address p = bcp + offset;
   4.108 +    int cp_index = is_wide ? Bytes::get_Java_u2(p) : (u1)(*p);
   4.109 +    constantTag tag = _pool->tag_at(cp_index).value();
   4.110 +    if (tag.is_method_handle() || tag.is_method_type()) {
   4.111 +      int cache_index = cp_entry_to_cp_cache(cp_index);
   4.112 +      if (is_wide) {
   4.113 +        (*bcp) = Bytecodes::_fast_aldc_w;
   4.114 +        assert(cache_index == (u2)cache_index, "index overflow");
   4.115 +        Bytes::put_native_u2(p, cache_index);
   4.116 +      } else {
   4.117 +        (*bcp) = Bytecodes::_fast_aldc;
   4.118 +        assert(cache_index == (u1)cache_index, "index overflow");
   4.119 +        (*p) = (u1)cache_index;
   4.120 +      }
   4.121 +    }
   4.122 +  } else {
   4.123 +    Bytecodes::Code rewritten_bc =
   4.124 +              (is_wide ? Bytecodes::_fast_aldc_w : Bytecodes::_fast_aldc);
   4.125 +    if ((*bcp) == rewritten_bc) {
   4.126 +      address p = bcp + offset;
   4.127 +      int cache_index = is_wide ? Bytes::get_native_u2(p) : (u1)(*p);
   4.128 +      int pool_index = cp_cache_entry_pool_index(cache_index);
   4.129 +      if (is_wide) {
   4.130 +        (*bcp) = Bytecodes::_ldc_w;
   4.131 +        assert(pool_index == (u2)pool_index, "index overflow");
   4.132 +        Bytes::put_Java_u2(p, pool_index);
   4.133 +      } else {
   4.134 +        (*bcp) = Bytecodes::_ldc;
   4.135 +        assert(pool_index == (u1)pool_index, "index overflow");
   4.136 +        (*p) = (u1)pool_index;
   4.137 +      }
   4.138      }
   4.139    }
   4.140  }
   4.141  
   4.142  
   4.143  // Rewrites a method given the index_map information
   4.144 -void Rewriter::scan_method(methodOop method) {
   4.145 +void Rewriter::scan_method(methodOop method, bool reverse) {
   4.146  
   4.147    int nof_jsrs = 0;
   4.148    bool has_monitor_bytecodes = false;
   4.149 @@ -236,6 +282,13 @@
   4.150  #endif
   4.151            break;
   4.152          }
   4.153 +        case Bytecodes::_fast_linearswitch:
   4.154 +        case Bytecodes::_fast_binaryswitch: {
   4.155 +#ifndef CC_INTERP
   4.156 +          (*bcp) = Bytecodes::_lookupswitch;
   4.157 +#endif
   4.158 +          break;
   4.159 +        }
   4.160          case Bytecodes::_getstatic      : // fall through
   4.161          case Bytecodes::_putstatic      : // fall through
   4.162          case Bytecodes::_getfield       : // fall through
   4.163 @@ -244,16 +297,18 @@
   4.164          case Bytecodes::_invokespecial  : // fall through
   4.165          case Bytecodes::_invokestatic   :
   4.166          case Bytecodes::_invokeinterface:
   4.167 -          rewrite_member_reference(bcp, prefix_length+1);
   4.168 +          rewrite_member_reference(bcp, prefix_length+1, reverse);
   4.169            break;
   4.170          case Bytecodes::_invokedynamic:
   4.171 -          rewrite_invokedynamic(bcp, prefix_length+1);
   4.172 +          rewrite_invokedynamic(bcp, prefix_length+1, reverse);
   4.173            break;
   4.174          case Bytecodes::_ldc:
   4.175 -          maybe_rewrite_ldc(bcp, prefix_length+1, false);
   4.176 +        case Bytecodes::_fast_aldc:
   4.177 +          maybe_rewrite_ldc(bcp, prefix_length+1, false, reverse);
   4.178            break;
   4.179          case Bytecodes::_ldc_w:
   4.180 -          maybe_rewrite_ldc(bcp, prefix_length+1, true);
   4.181 +        case Bytecodes::_fast_aldc_w:
   4.182 +          maybe_rewrite_ldc(bcp, prefix_length+1, true, reverse);
   4.183            break;
   4.184          case Bytecodes::_jsr            : // fall through
   4.185          case Bytecodes::_jsr_w          : nof_jsrs++;                   break;
   4.186 @@ -273,12 +328,13 @@
   4.187    if (nof_jsrs > 0) {
   4.188      method->set_has_jsrs();
   4.189      // Second pass will revisit this method.
   4.190 -    assert(method->has_jsrs(), "");
   4.191 +    assert(method->has_jsrs(), "didn't we just set this?");
   4.192    }
   4.193  }
   4.194  
   4.195  // After constant pool is created, revisit methods containing jsrs.
   4.196  methodHandle Rewriter::rewrite_jsrs(methodHandle method, TRAPS) {
   4.197 +  ResourceMark rm(THREAD);
   4.198    ResolveOopMapConflicts romc(method);
   4.199    methodHandle original_method = method;
   4.200    method = romc.do_potential_rewrite(CHECK_(methodHandle()));
   4.201 @@ -300,7 +356,6 @@
   4.202    return method;
   4.203  }
   4.204  
   4.205 -
   4.206  void Rewriter::rewrite(instanceKlassHandle klass, TRAPS) {
   4.207    ResourceMark rm(THREAD);
   4.208    Rewriter     rw(klass, klass->constants(), klass->methods(), CHECK);
   4.209 @@ -343,34 +398,57 @@
   4.210    }
   4.211  
   4.212    // rewrite methods, in two passes
   4.213 -  int i, len = _methods->length();
   4.214 +  int len = _methods->length();
   4.215  
   4.216 -  for (i = len; --i >= 0; ) {
   4.217 +  for (int i = len-1; i >= 0; i--) {
   4.218      methodOop method = (methodOop)_methods->obj_at(i);
   4.219      scan_method(method);
   4.220    }
   4.221  
   4.222    // allocate constant pool cache, now that we've seen all the bytecodes
   4.223 -  make_constant_pool_cache(CHECK);
   4.224 +  make_constant_pool_cache(THREAD);
   4.225  
   4.226 -  for (i = len; --i >= 0; ) {
   4.227 -    methodHandle m(THREAD, (methodOop)_methods->obj_at(i));
   4.228 +  // Restore bytecodes to their unrewritten state if there are exceptions
   4.229 +  // rewriting bytecodes or allocating the cpCache
   4.230 +  if (HAS_PENDING_EXCEPTION) {
   4.231 +    restore_bytecodes();
   4.232 +    return;
   4.233 +  }
   4.234 +}
   4.235 +
   4.236 +// Relocate jsr/rets in a method.  This can't be done with the rewriter
   4.237 +// stage because it can throw other exceptions, leaving the bytecodes
   4.238 +// pointing at constant pool cache entries.
   4.239 +// Link and check jvmti dependencies while we're iterating over the methods.
   4.240 +// JSR292 code calls with a different set of methods, so two entry points.
   4.241 +void Rewriter::relocate_and_link(instanceKlassHandle this_oop, TRAPS) {
   4.242 +  objArrayHandle methods(THREAD, this_oop->methods());
   4.243 +  relocate_and_link(this_oop, methods, THREAD);
   4.244 +}
   4.245 +
   4.246 +void Rewriter::relocate_and_link(instanceKlassHandle this_oop,
   4.247 +                                 objArrayHandle methods, TRAPS) {
   4.248 +  int len = methods->length();
   4.249 +  for (int i = len-1; i >= 0; i--) {
   4.250 +    methodHandle m(THREAD, (methodOop)methods->obj_at(i));
   4.251  
   4.252      if (m->has_jsrs()) {
   4.253        m = rewrite_jsrs(m, CHECK);
   4.254        // Method might have gotten rewritten.
   4.255 -      _methods->obj_at_put(i, m());
   4.256 +      methods->obj_at_put(i, m());
   4.257      }
   4.258  
   4.259 -    // Set up method entry points for compiler and interpreter.
   4.260 +    // Set up method entry points for compiler and interpreter    .
   4.261      m->link_method(m, CHECK);
   4.262  
   4.263 +    // This is for JVMTI and unrelated to relocator but the last thing we do
   4.264  #ifdef ASSERT
   4.265      if (StressMethodComparator) {
   4.266        static int nmc = 0;
   4.267        for (int j = i; j >= 0 && j >= i-4; j--) {
   4.268          if ((++nmc % 1000) == 0)  tty->print_cr("Have run MethodComparator %d times...", nmc);
   4.269 -        bool z = MethodComparator::methods_EMCP(m(), (methodOop)_methods->obj_at(j));
   4.270 +        bool z = MethodComparator::methods_EMCP(m(),
   4.271 +                   (methodOop)methods->obj_at(j));
   4.272          if (j == i && !z) {
   4.273            tty->print("MethodComparator FAIL: "); m->print(); m->print_codes();
   4.274            assert(z, "method must compare equal to itself");
     5.1 --- a/src/share/vm/interpreter/rewriter.hpp	Tue Jun 14 15:20:55 2011 -0700
     5.2 +++ b/src/share/vm/interpreter/rewriter.hpp	Wed Jun 15 10:20:03 2011 -0700
     5.3 @@ -85,13 +85,15 @@
     5.4  
     5.5    void compute_index_maps();
     5.6    void make_constant_pool_cache(TRAPS);
     5.7 -  void scan_method(methodOop m);
     5.8 -  methodHandle rewrite_jsrs(methodHandle m, TRAPS);
     5.9 +  void scan_method(methodOop m, bool reverse = false);
    5.10    void rewrite_Object_init(methodHandle m, TRAPS);
    5.11 -  void rewrite_member_reference(address bcp, int offset);
    5.12 -  void rewrite_invokedynamic(address bcp, int offset);
    5.13 -  void maybe_rewrite_ldc(address bcp, int offset, bool is_wide);
    5.14 +  void rewrite_member_reference(address bcp, int offset, bool reverse = false);
    5.15 +  void rewrite_invokedynamic(address bcp, int offset, bool reverse = false);
    5.16 +  void maybe_rewrite_ldc(address bcp, int offset, bool is_wide, bool reverse = false);
    5.17 +  // Revert bytecodes in case of an exception.
    5.18 +  void restore_bytecodes();
    5.19  
    5.20 +  static methodHandle rewrite_jsrs(methodHandle m, TRAPS);
    5.21   public:
    5.22    // Driver routine:
    5.23    static void rewrite(instanceKlassHandle klass, TRAPS);
    5.24 @@ -100,6 +102,13 @@
    5.25    enum {
    5.26      _secondary_entry_tag = nth_bit(30)
    5.27    };
    5.28 +
    5.29 +  // Second pass, not gated by is_rewritten flag
    5.30 +  static void relocate_and_link(instanceKlassHandle klass, TRAPS);
    5.31 +  // JSR292 version to call with it's own methods.
    5.32 +  static void relocate_and_link(instanceKlassHandle klass,
    5.33 +                                objArrayHandle methods, TRAPS);
    5.34 +
    5.35  };
    5.36  
    5.37  #endif // SHARE_VM_INTERPRETER_REWRITER_HPP
     6.1 --- a/src/share/vm/memory/blockOffsetTable.cpp	Tue Jun 14 15:20:55 2011 -0700
     6.2 +++ b/src/share/vm/memory/blockOffsetTable.cpp	Wed Jun 15 10:20:03 2011 -0700
     6.3 @@ -1,5 +1,5 @@
     6.4  /*
     6.5 - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
     6.6 + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved.
     6.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     6.8   *
     6.9   * This code is free software; you can redistribute it and/or modify it
    6.10 @@ -566,11 +566,17 @@
    6.11      q = n;
    6.12      n += _sp->block_size(n);
    6.13      assert(n > q,
    6.14 -           err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT " _sp = [" PTR_FORMAT "," PTR_FORMAT ")",
    6.15 -                   n, last, _sp->bottom(), _sp->end()));
    6.16 +           err_msg("Looping at n = " PTR_FORMAT " with last = " PTR_FORMAT","
    6.17 +                   " while querying blk_start(" PTR_FORMAT ")"
    6.18 +                   " on _sp = [" PTR_FORMAT "," PTR_FORMAT ")",
    6.19 +                   n, last, addr, _sp->bottom(), _sp->end()));
    6.20    }
    6.21 -  assert(q <= addr, err_msg("wrong order for current (" INTPTR_FORMAT ") <= arg (" INTPTR_FORMAT ")", q, addr));
    6.22 -  assert(addr <= n, err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")", addr, n));
    6.23 +  assert(q <= addr,
    6.24 +         err_msg("wrong order for current (" INTPTR_FORMAT ")" " <= arg (" INTPTR_FORMAT ")",
    6.25 +                 q, addr));
    6.26 +  assert(addr <= n,
    6.27 +         err_msg("wrong order for arg (" INTPTR_FORMAT ") <= next (" INTPTR_FORMAT ")",
    6.28 +                 addr, n));
    6.29    return q;
    6.30  }
    6.31  
     7.1 --- a/src/share/vm/oops/instanceKlass.cpp	Tue Jun 14 15:20:55 2011 -0700
     7.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Wed Jun 15 10:20:03 2011 -0700
     7.3 @@ -335,6 +335,9 @@
     7.4          this_oop->rewrite_class(CHECK_false);
     7.5        }
     7.6  
     7.7 +      // relocate jsrs and link methods after they are all rewritten
     7.8 +      this_oop->relocate_and_link_methods(CHECK_false);
     7.9 +
    7.10        // Initialize the vtable and interface table after
    7.11        // methods have been rewritten since rewrite may
    7.12        // fabricate new methodOops.
    7.13 @@ -365,17 +368,8 @@
    7.14  
    7.15  
    7.16  // Rewrite the byte codes of all of the methods of a class.
    7.17 -// Three cases:
    7.18 -//    During the link of a newly loaded class.
    7.19 -//    During the preloading of classes to be written to the shared spaces.
    7.20 -//      - Rewrite the methods and update the method entry points.
    7.21 -//
    7.22 -//    During the link of a class in the shared spaces.
    7.23 -//      - The methods were already rewritten, update the metho entry points.
    7.24 -//
    7.25  // The rewriter must be called exactly once. Rewriting must happen after
    7.26  // verification but before the first method of the class is executed.
    7.27 -
    7.28  void instanceKlass::rewrite_class(TRAPS) {
    7.29    assert(is_loaded(), "must be loaded");
    7.30    instanceKlassHandle this_oop(THREAD, this->as_klassOop());
    7.31 @@ -383,10 +377,19 @@
    7.32      assert(this_oop()->is_shared(), "rewriting an unshared class?");
    7.33      return;
    7.34    }
    7.35 -  Rewriter::rewrite(this_oop, CHECK); // No exception can happen here
    7.36 +  Rewriter::rewrite(this_oop, CHECK);
    7.37    this_oop->set_rewritten();
    7.38  }
    7.39  
    7.40 +// Now relocate and link method entry points after class is rewritten.
    7.41 +// This is outside is_rewritten flag. In case of an exception, it can be
    7.42 +// executed more than once.
    7.43 +void instanceKlass::relocate_and_link_methods(TRAPS) {
    7.44 +  assert(is_loaded(), "must be loaded");
    7.45 +  instanceKlassHandle this_oop(THREAD, this->as_klassOop());
    7.46 +  Rewriter::relocate_and_link(this_oop, CHECK);
    7.47 +}
    7.48 +
    7.49  
    7.50  void instanceKlass::initialize_impl(instanceKlassHandle this_oop, TRAPS) {
    7.51    // Make sure klass is linked (verified) before initialization
     8.1 --- a/src/share/vm/oops/instanceKlass.hpp	Tue Jun 14 15:20:55 2011 -0700
     8.2 +++ b/src/share/vm/oops/instanceKlass.hpp	Wed Jun 15 10:20:03 2011 -0700
     8.3 @@ -392,6 +392,7 @@
     8.4    bool link_class_or_fail(TRAPS); // returns false on failure
     8.5    void unlink_class();
     8.6    void rewrite_class(TRAPS);
     8.7 +  void relocate_and_link_methods(TRAPS);
     8.8    methodOop class_initializer();
     8.9  
    8.10    // set the class to initialized if no static initializer is present
     9.1 --- a/src/share/vm/oops/methodOop.cpp	Tue Jun 14 15:20:55 2011 -0700
     9.2 +++ b/src/share/vm/oops/methodOop.cpp	Wed Jun 15 10:20:03 2011 -0700
     9.3 @@ -693,7 +693,10 @@
     9.4  // Called when the method_holder is getting linked. Setup entrypoints so the method
     9.5  // is ready to be called from interpreter, compiler, and vtables.
     9.6  void methodOopDesc::link_method(methodHandle h_method, TRAPS) {
     9.7 -  assert(_i2i_entry == NULL, "should only be called once");
     9.8 +  // If the code cache is full, we may reenter this function for the
     9.9 +  // leftover methods that weren't linked.
    9.10 +  if (_i2i_entry != NULL) return;
    9.11 +
    9.12    assert(_adapter == NULL, "init'd to NULL" );
    9.13    assert( _code == NULL, "nothing compiled yet" );
    9.14  
    10.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Tue Jun 14 15:20:55 2011 -0700
    10.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Wed Jun 15 10:20:03 2011 -0700
    10.3 @@ -992,6 +992,9 @@
    10.4      }
    10.5  
    10.6      Rewriter::rewrite(scratch_class, THREAD);
    10.7 +    if (!HAS_PENDING_EXCEPTION) {
    10.8 +      Rewriter::relocate_and_link(scratch_class, THREAD);
    10.9 +    }
   10.10      if (HAS_PENDING_EXCEPTION) {
   10.11        Symbol* ex_name = PENDING_EXCEPTION->klass()->klass_part()->name();
   10.12        CLEAR_PENDING_EXCEPTION;
    11.1 --- a/src/share/vm/prims/methodHandleWalk.cpp	Tue Jun 14 15:20:55 2011 -0700
    11.2 +++ b/src/share/vm/prims/methodHandleWalk.cpp	Wed Jun 15 10:20:03 2011 -0700
    11.3 @@ -1604,6 +1604,7 @@
    11.4    objArrayHandle methods(THREAD, m_array);
    11.5    methods->obj_at_put(0, m());
    11.6    Rewriter::rewrite(_target_klass(), cpool, methods, CHECK_(empty));  // Use fake class.
    11.7 +  Rewriter::relocate_and_link(_target_klass(), methods, CHECK_(empty));  // Use fake class.
    11.8  
    11.9    // Set the invocation counter's count to the invoke count of the
   11.10    // original call site.

mercurial