Merge

Wed, 18 May 2011 11:45:06 -0700

author
never
date
Wed, 18 May 2011 11:45:06 -0700
changeset 2922
1be2f0c40a34
parent 2921
b79e8b4ecd76
parent 2914
3f3325361b86
child 2923
62f39d40ebf1

Merge

     1.1 --- a/make/jprt.gmk	Tue May 17 19:15:34 2011 -0700
     1.2 +++ b/make/jprt.gmk	Wed May 18 11:45:06 2011 -0700
     1.3 @@ -1,5 +1,5 @@
     1.4  #
     1.5 -# Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
     1.6 +# Copyright (c) 2006, 2011, Oracle and/or its affiliates. All rights reserved.
     1.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.8  #
     1.9  # This code is free software; you can redistribute it and/or modify it
    1.10 @@ -33,6 +33,24 @@
    1.11    ZIPFLAGS=-q -y
    1.12  endif
    1.13  
    1.14 +jprt_build_productEmb:
    1.15 +	$(MAKE) JAVASE_EMBEDDED=true jprt_build_product
    1.16 +
    1.17 +jprt_build_debugEmb:
    1.18 +	$(MAKE) JAVASE_EMBEDDED=true jprt_build_debug
    1.19 +
    1.20 +jprt_build_fastdebugEmb:
    1.21 +	$(MAKE) JAVASE_EMBEDDED=true jprt_build_fastdebug
    1.22 +
    1.23 +jprt_build_productOpen:
    1.24 +	$(MAKE) OPENJDK=true jprt_build_product
    1.25 +
    1.26 +jprt_build_debugOpen:
    1.27 +	$(MAKE) OPENJDK=true jprt_build_debug
    1.28 +
    1.29 +jprt_build_fastdebugOpen:
    1.30 +	$(MAKE) OPENJDK=true jprt_build_fastdebug
    1.31 +
    1.32  jprt_build_product: all_product copy_product_jdk export_product_jdk
    1.33  	( $(CD) $(JDK_IMAGE_DIR) && \
    1.34  	  $(ZIPEXE) $(ZIPFLAGS) -r $(JPRT_ARCHIVE_BUNDLE) . )
     2.1 --- a/make/jprt.properties	Tue May 17 19:15:34 2011 -0700
     2.2 +++ b/make/jprt.properties	Wed May 18 11:45:06 2011 -0700
     2.3 @@ -202,16 +202,21 @@
     2.4      ${jprt.my.windows.i586}-{product|fastdebug|debug}, \
     2.5      ${jprt.my.windows.x64}-{product|fastdebug|debug}
     2.6  
     2.7 +jprt.build.targets.open= \
     2.8 +    ${jprt.my.solaris.i586}-{productOpen}, \
     2.9 +    ${jprt.my.solaris.x64}-{debugOpen}, \
    2.10 +    ${jprt.my.linux.x64}-{productOpen}
    2.11 +
    2.12  jprt.build.targets.embedded= \
    2.13 -    ${jprt.my.linux.i586}-{product|fastdebug|debug}, \
    2.14 -    ${jprt.my.linux.ppc}-{product|fastdebug}, \
    2.15 -    ${jprt.my.linux.ppcv2}-{product|fastdebug}, \
    2.16 -    ${jprt.my.linux.ppcsflt}-{product|fastdebug}, \
    2.17 -    ${jprt.my.linux.armvfp}-{product|fastdebug}, \
    2.18 -    ${jprt.my.linux.armsflt}-{product|fastdebug}
    2.19 +    ${jprt.my.linux.i586}-{productEmb|fastdebugEmb|debugEmb}, \
    2.20 +    ${jprt.my.linux.ppc}-{productEmb|fastdebugEmb}, \
    2.21 +    ${jprt.my.linux.ppcv2}-{productEmb|fastdebugEmb}, \
    2.22 +    ${jprt.my.linux.ppcsflt}-{productEmb|fastdebugEmb}, \
    2.23 +    ${jprt.my.linux.armvfp}-{productEmb|fastdebugEmb}, \
    2.24 +    ${jprt.my.linux.armsflt}-{productEmb|fastdebugEmb}
    2.25  
    2.26  jprt.build.targets.all=${jprt.build.targets.standard}, \
    2.27 -    ${jprt.build.targets.embedded}
    2.28 +    ${jprt.build.targets.embedded}, ${jprt.build.targets.open}
    2.29  
    2.30  jprt.build.targets.jdk7=${jprt.build.targets.all}
    2.31  jprt.build.targets.jdk7temp=${jprt.build.targets.all}
    2.32 @@ -453,6 +458,12 @@
    2.33      ${jprt.my.windows.x64}-product-c2-jbb_G1, \
    2.34      ${jprt.my.windows.x64}-product-c2-jbb_ParOldGC
    2.35  
    2.36 +# Some basic "smoke" tests for OpenJDK builds
    2.37 +jprt.test.targets.open = \
    2.38 +    ${jprt.my.solaris.x64}-{productOpen|debugOpen|fastdebugOpen}-c2-jvm98_tiered, \
    2.39 +    ${jprt.my.solaris.i586}-{productOpen|fastdebugOpen}-c2-jvm98_tiered, \
    2.40 +    ${jprt.my.linux.x64}-{productOpen|fastdebugOpen}-c2-jvm98_tiered
    2.41 +
    2.42  # Testing for actual embedded builds is different to standard
    2.43  jprt.my.linux.i586.test.targets.embedded = \
    2.44      linux_i586_2.6-product-c1-scimark
    2.45 @@ -461,6 +472,7 @@
    2.46  # Note: no PPC or ARM tests at this stage
    2.47  
    2.48  jprt.test.targets.standard = \
    2.49 +  ${jprt.my.linux.i586.test.targets.embedded}, \
    2.50    ${jprt.my.solaris.sparc.test.targets}, \
    2.51    ${jprt.my.solaris.sparcv9.test.targets}, \
    2.52    ${jprt.my.solaris.i586.test.targets}, \
    2.53 @@ -468,7 +480,8 @@
    2.54    ${jprt.my.linux.i586.test.targets}, \
    2.55    ${jprt.my.linux.x64.test.targets}, \
    2.56    ${jprt.my.windows.i586.test.targets}, \
    2.57 -  ${jprt.my.windows.x64.test.targets}
    2.58 +  ${jprt.my.windows.x64.test.targets}, \
    2.59 +  ${jprt.test.targets.open}
    2.60  
    2.61  jprt.test.targets.embedded= 		\
    2.62    ${jprt.my.linux.i586.test.targets.embedded}, \
     3.1 --- a/src/share/vm/code/nmethod.cpp	Tue May 17 19:15:34 2011 -0700
     3.2 +++ b/src/share/vm/code/nmethod.cpp	Wed May 18 11:45:06 2011 -0700
     3.3 @@ -1810,7 +1810,7 @@
     3.4    void maybe_print(oop* p) {
     3.5      if (_print_nm == NULL)  return;
     3.6      if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
     3.7 -    tty->print_cr(""PTR_FORMAT"[offset=%d] detected non-perm oop "PTR_FORMAT" (found at "PTR_FORMAT")",
     3.8 +    tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
     3.9                    _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
    3.10                    (intptr_t)(*p), (intptr_t)p);
    3.11      (*p)->print();
    3.12 @@ -2311,7 +2311,7 @@
    3.13        _nm->print_nmethod(true);
    3.14        _ok = false;
    3.15      }
    3.16 -    tty->print_cr("*** non-perm oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
    3.17 +    tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
    3.18                    (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
    3.19      (*p)->print();
    3.20    }
    3.21 @@ -2324,7 +2324,7 @@
    3.22      DebugScavengeRoot debug_scavenge_root(this);
    3.23      oops_do(&debug_scavenge_root);
    3.24      if (!debug_scavenge_root.ok())
    3.25 -      fatal("found an unadvertised bad non-perm oop in the code cache");
    3.26 +      fatal("found an unadvertised bad scavengable oop in the code cache");
    3.27    }
    3.28    assert(scavenge_root_not_marked(), "");
    3.29  }
     4.1 --- a/src/share/vm/code/nmethod.hpp	Tue May 17 19:15:34 2011 -0700
     4.2 +++ b/src/share/vm/code/nmethod.hpp	Wed May 18 11:45:06 2011 -0700
     4.3 @@ -109,7 +109,7 @@
     4.4  class nmethod : public CodeBlob {
     4.5    friend class VMStructs;
     4.6    friend class NMethodSweeper;
     4.7 -  friend class CodeCache;  // non-perm oops
     4.8 +  friend class CodeCache;  // scavengable oops
     4.9   private:
    4.10    // Shared fields for all nmethod's
    4.11    methodOop _method;
    4.12 @@ -466,17 +466,17 @@
    4.13    bool is_at_poll_return(address pc);
    4.14    bool is_at_poll_or_poll_return(address pc);
    4.15  
    4.16 -  // Non-perm oop support
    4.17 +  // Scavengable oop support
    4.18    bool  on_scavenge_root_list() const                  { return (_scavenge_root_state & 1) != 0; }
    4.19   protected:
    4.20 -  enum { npl_on_list = 0x01, npl_marked = 0x10 };
    4.21 -  void  set_on_scavenge_root_list()                    { _scavenge_root_state = npl_on_list; }
    4.22 +  enum { sl_on_list = 0x01, sl_marked = 0x10 };
    4.23 +  void  set_on_scavenge_root_list()                    { _scavenge_root_state = sl_on_list; }
    4.24    void  clear_on_scavenge_root_list()                  { _scavenge_root_state = 0; }
    4.25    // assertion-checking and pruning logic uses the bits of _scavenge_root_state
    4.26  #ifndef PRODUCT
    4.27 -  void  set_scavenge_root_marked()                     { _scavenge_root_state |= npl_marked; }
    4.28 -  void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~npl_marked; }
    4.29 -  bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ npl_on_list) == 0; }
    4.30 +  void  set_scavenge_root_marked()                     { _scavenge_root_state |= sl_marked; }
    4.31 +  void  clear_scavenge_root_marked()                   { _scavenge_root_state &= ~sl_marked; }
    4.32 +  bool  scavenge_root_not_marked()                     { return (_scavenge_root_state &~ sl_on_list) == 0; }
    4.33    // N.B. there is no positive marked query, and we only use the not_marked query for asserts.
    4.34  #endif //PRODUCT
    4.35    nmethod* scavenge_root_link() const                  { return _scavenge_root_link; }
     5.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue May 17 19:15:34 2011 -0700
     5.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Wed May 18 11:45:06 2011 -0700
     5.3 @@ -3054,6 +3054,28 @@
     5.4      _should_gray_objects = true;
     5.5  }
     5.6  
     5.7 +// Resets the region fields of active CMTasks whose values point
     5.8 +// into the collection set.
     5.9 +void ConcurrentMark::reset_active_task_region_fields_in_cset() {
    5.10 +  assert(SafepointSynchronize::is_at_safepoint(), "should be in STW");
    5.11 +  assert(parallel_marking_threads() <= _max_task_num, "sanity");
    5.12 +
    5.13 +  for (int i = 0; i < (int)parallel_marking_threads(); i += 1) {
    5.14 +    CMTask* task = _tasks[i];
    5.15 +    HeapWord* task_finger = task->finger();
    5.16 +    if (task_finger != NULL) {
    5.17 +      assert(_g1h->is_in_g1_reserved(task_finger), "not in heap");
    5.18 +      HeapRegion* finger_region = _g1h->heap_region_containing(task_finger);
    5.19 +      if (finger_region->in_collection_set()) {
    5.20 +        // The task's current region is in the collection set.
    5.21 +        // This region will be evacuated in the current GC and
    5.22 +        // the region fields in the task will be stale.
    5.23 +        task->giveup_current_region();
    5.24 +      }
    5.25 +    }
    5.26 +  }
    5.27 +}
    5.28 +
    5.29  // abandon current marking iteration due to a Full GC
    5.30  void ConcurrentMark::abort() {
    5.31    // Clear all marks to force marking thread to do nothing
     6.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Tue May 17 19:15:34 2011 -0700
     6.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.hpp	Wed May 18 11:45:06 2011 -0700
     6.3 @@ -809,10 +809,19 @@
     6.4  
     6.5    // It indicates that a new collection set is being chosen.
     6.6    void newCSet();
     6.7 +
     6.8    // It registers a collection set heap region with CM. This is used
     6.9    // to determine whether any heap regions are located above the finger.
    6.10    void registerCSetRegion(HeapRegion* hr);
    6.11  
    6.12 +  // Resets the region fields of any active CMTask whose region fields
    6.13 +  // are in the collection set (i.e. the region currently claimed by
    6.14 +  // the CMTask will be evacuated and may be used, subsequently, as
    6.15 +  // an alloc region). When this happens the region fields in the CMTask
    6.16 +  // are stale and, hence, should be cleared causing the worker thread
    6.17 +  // to claim a new region.
    6.18 +  void reset_active_task_region_fields_in_cset();
    6.19 +
    6.20    // Registers the maximum region-end associated with a set of
    6.21    // regions with CM. Again this is used to determine whether any
    6.22    // heap regions are located above the finger.
    6.23 @@ -1039,9 +1048,6 @@
    6.24    void setup_for_region(HeapRegion* hr);
    6.25    // it brings up-to-date the limit of the region
    6.26    void update_region_limit();
    6.27 -  // it resets the local fields after a task has finished scanning a
    6.28 -  // region
    6.29 -  void giveup_current_region();
    6.30  
    6.31    // called when either the words scanned or the refs visited limit
    6.32    // has been reached
    6.33 @@ -1094,6 +1100,11 @@
    6.34    // exit the termination protocol after it's entered it.
    6.35    virtual bool should_exit_termination();
    6.36  
    6.37 +  // Resets the local region fields after a task has finished scanning a
    6.38 +  // region; or when they have become stale as a result of the region
    6.39 +  // being evacuated.
    6.40 +  void giveup_current_region();
    6.41 +
    6.42    HeapWord* finger()            { return _finger; }
    6.43  
    6.44    bool has_aborted()            { return _has_aborted; }
     7.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue May 17 19:15:34 2011 -0700
     7.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Wed May 18 11:45:06 2011 -0700
     7.3 @@ -428,6 +428,37 @@
     7.4    _cmThread->stop();
     7.5  }
     7.6  
     7.7 +#ifdef ASSERT
     7.8 +// A region is added to the collection set as it is retired
     7.9 +// so an address p can point to a region which will be in the
    7.10 +// collection set but has not yet been retired.  This method
    7.11 +// therefore is only accurate during a GC pause after all
    7.12 +// regions have been retired.  It is used for debugging
    7.13 +// to check if an nmethod has references to objects that can
    7.14 +// be move during a partial collection.  Though it can be
    7.15 +// inaccurate, it is sufficient for G1 because the conservative
    7.16 +// implementation of is_scavengable() for G1 will indicate that
    7.17 +// all nmethods must be scanned during a partial collection.
    7.18 +bool G1CollectedHeap::is_in_partial_collection(const void* p) {
    7.19 +  HeapRegion* hr = heap_region_containing(p);
    7.20 +  return hr != NULL && hr->in_collection_set();
    7.21 +}
    7.22 +#endif
    7.23 +
    7.24 +// Returns true if the reference points to an object that
    7.25 +// can move in an incremental collecction.
    7.26 +bool G1CollectedHeap::is_scavengable(const void* p) {
    7.27 +  G1CollectedHeap* g1h = G1CollectedHeap::heap();
    7.28 +  G1CollectorPolicy* g1p = g1h->g1_policy();
    7.29 +  HeapRegion* hr = heap_region_containing(p);
    7.30 +  if (hr == NULL) {
    7.31 +     // perm gen (or null)
    7.32 +     return false;
    7.33 +  } else {
    7.34 +    return !hr->isHumongous();
    7.35 +  }
    7.36 +}
    7.37 +
    7.38  void G1CollectedHeap::check_ct_logs_at_safepoint() {
    7.39    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
    7.40    CardTableModRefBS* ct_bs = (CardTableModRefBS*)barrier_set();
    7.41 @@ -3292,8 +3323,9 @@
    7.42        // progress, this will be zero.
    7.43        _cm->set_oops_do_bound();
    7.44  
    7.45 -      if (mark_in_progress())
    7.46 +      if (mark_in_progress()) {
    7.47          concurrent_mark()->newCSet();
    7.48 +      }
    7.49  
    7.50  #if YOUNG_LIST_VERBOSE
    7.51        gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
    7.52 @@ -3303,6 +3335,16 @@
    7.53  
    7.54        g1_policy()->choose_collection_set(target_pause_time_ms);
    7.55  
    7.56 +      // We have chosen the complete collection set. If marking is
    7.57 +      // active then, we clear the region fields of any of the
    7.58 +      // concurrent marking tasks whose region fields point into
    7.59 +      // the collection set as these values will become stale. This
    7.60 +      // will cause the owning marking threads to claim a new region
    7.61 +      // when marking restarts.
    7.62 +      if (mark_in_progress()) {
    7.63 +        concurrent_mark()->reset_active_task_region_fields_in_cset();
    7.64 +      }
    7.65 +
    7.66        // Nothing to do if we were unable to choose a collection set.
    7.67  #if G1_REM_SET_LOGGING
    7.68        gclog_or_tty->print_cr("\nAfter pause, heap:");
     8.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue May 17 19:15:34 2011 -0700
     8.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Wed May 18 11:45:06 2011 -0700
     8.3 @@ -1254,6 +1254,12 @@
     8.4      return hr != NULL && hr->is_young();
     8.5    }
     8.6  
     8.7 +#ifdef ASSERT
     8.8 +  virtual bool is_in_partial_collection(const void* p);
     8.9 +#endif
    8.10 +
    8.11 +  virtual bool is_scavengable(const void* addr);
    8.12 +
    8.13    // We don't need barriers for initializing stores to objects
    8.14    // in the young gen: for the SATB pre-barrier, there is no
    8.15    // pre-value that needs to be remembered; for the remembered-set
     9.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Tue May 17 19:15:34 2011 -0700
     9.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp	Wed May 18 11:45:06 2011 -0700
     9.3 @@ -339,6 +339,21 @@
     9.4    return false;
     9.5  }
     9.6  
     9.7 +bool ParallelScavengeHeap::is_scavengable(const void* addr) {
     9.8 +  return is_in_young((oop)addr);
     9.9 +}
    9.10 +
    9.11 +#ifdef ASSERT
    9.12 +// Don't implement this by using is_in_young().  This method is used
    9.13 +// in some cases to check that is_in_young() is correct.
    9.14 +bool ParallelScavengeHeap::is_in_partial_collection(const void *p) {
    9.15 +  assert(is_in_reserved(p) || p == NULL,
    9.16 +    "Does not work if address is non-null and outside of the heap");
    9.17 +  // The order of the generations is perm (low addr), old, young (high addr)
    9.18 +  return p >= old_gen()->reserved().end();
    9.19 +}
    9.20 +#endif
    9.21 +
    9.22  // There are two levels of allocation policy here.
    9.23  //
    9.24  // When an allocation request fails, the requesting thread must invoke a VM
    10.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Tue May 17 19:15:34 2011 -0700
    10.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.hpp	Wed May 18 11:45:06 2011 -0700
    10.3 @@ -127,6 +127,12 @@
    10.4    // collection.
    10.5    virtual bool is_maximal_no_gc() const;
    10.6  
    10.7 +  // Return true if the reference points to an object that
    10.8 +  // can be moved in a partial collection.  For currently implemented
    10.9 +  // generational collectors that means during a collection of
   10.10 +  // the young gen.
   10.11 +  virtual bool is_scavengable(const void* addr);
   10.12 +
   10.13    // Does this heap support heap inspection? (+PrintClassHistogram)
   10.14    bool supports_heap_inspection() const { return true; }
   10.15  
   10.16 @@ -143,6 +149,10 @@
   10.17      return perm_gen()->reserved().contains(p);
   10.18    }
   10.19  
   10.20 +#ifdef ASSERT
   10.21 +  virtual bool is_in_partial_collection(const void *p);
   10.22 +#endif
   10.23 +
   10.24    bool is_permanent(const void *p) const {    // committed part
   10.25      return perm_gen()->is_in(p);
   10.26    }
    11.1 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Tue May 17 19:15:34 2011 -0700
    11.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp	Wed May 18 11:45:06 2011 -0700
    11.3 @@ -51,7 +51,12 @@
    11.4  }
    11.5  
    11.6  inline bool ParallelScavengeHeap::is_in_young(oop p) {
    11.7 -  return young_gen()->is_in_reserved(p);
    11.8 +  // Assumes the the old gen address range is lower than that of the young gen.
    11.9 +  const void* loc = (void*) p;
   11.10 +  bool result = ((HeapWord*)p) >= young_gen()->reserved().start();
   11.11 +  assert(result == young_gen()->is_in_reserved(p),
   11.12 +        err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
   11.13 +  return result;
   11.14  }
   11.15  
   11.16  inline bool ParallelScavengeHeap::is_in_old_or_perm(oop p) {
    12.1 --- a/src/share/vm/gc_interface/collectedHeap.hpp	Tue May 17 19:15:34 2011 -0700
    12.2 +++ b/src/share/vm/gc_interface/collectedHeap.hpp	Wed May 18 11:45:06 2011 -0700
    12.3 @@ -269,6 +269,13 @@
    12.4    // space). If you need the more conservative answer use is_permanent().
    12.5    virtual bool is_in_permanent(const void *p) const = 0;
    12.6  
    12.7 +
    12.8 +#ifdef ASSERT
    12.9 +  // Returns true if "p" is in the part of the
   12.10 +  // heap being collected.
   12.11 +  virtual bool is_in_partial_collection(const void *p) = 0;
   12.12 +#endif
   12.13 +
   12.14    bool is_in_permanent_or_null(const void *p) const {
   12.15      return p == NULL || is_in_permanent(p);
   12.16    }
   12.17 @@ -284,11 +291,7 @@
   12.18  
   12.19    // An object is scavengable if its location may move during a scavenge.
   12.20    // (A scavenge is a GC which is not a full GC.)
   12.21 -  // Currently, this just means it is not perm (and not null).
   12.22 -  // This could change if we rethink what's in perm-gen.
   12.23 -  bool is_scavengable(const void *p) const {
   12.24 -    return !is_in_permanent_or_null(p);
   12.25 -  }
   12.26 +  virtual bool is_scavengable(const void *p) = 0;
   12.27  
   12.28    // Returns "TRUE" if "p" is a method oop in the
   12.29    // current heap, with high probability. This predicate
    13.1 --- a/src/share/vm/memory/genCollectedHeap.cpp	Tue May 17 19:15:34 2011 -0700
    13.2 +++ b/src/share/vm/memory/genCollectedHeap.cpp	Wed May 18 11:45:06 2011 -0700
    13.3 @@ -711,15 +711,6 @@
    13.4    _gen_process_strong_tasks->set_n_threads(t);
    13.5  }
    13.6  
    13.7 -class AssertIsPermClosure: public OopClosure {
    13.8 -public:
    13.9 -  void do_oop(oop* p) {
   13.10 -    assert((*p) == NULL || (*p)->is_perm(), "Referent should be perm.");
   13.11 -  }
   13.12 -  void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   13.13 -};
   13.14 -static AssertIsPermClosure assert_is_perm_closure;
   13.15 -
   13.16  void GenCollectedHeap::
   13.17  gen_process_strong_roots(int level,
   13.18                           bool younger_gens_as_roots,
   13.19 @@ -962,6 +953,13 @@
   13.20    }
   13.21  }
   13.22  
   13.23 +bool GenCollectedHeap::is_in_young(oop p) {
   13.24 +  bool result = ((HeapWord*)p) < _gens[_n_gens - 1]->reserved().start();
   13.25 +  assert(result == _gens[0]->is_in_reserved(p),
   13.26 +         err_msg("incorrect test - result=%d, p=" PTR_FORMAT, result, (void*)p));
   13.27 +  return result;
   13.28 +}
   13.29 +
   13.30  // Returns "TRUE" iff "p" points into the allocated area of the heap.
   13.31  bool GenCollectedHeap::is_in(const void* p) const {
   13.32    #ifndef ASSERT
   13.33 @@ -984,10 +982,16 @@
   13.34    return false;
   13.35  }
   13.36  
   13.37 -// Returns "TRUE" iff "p" points into the allocated area of the heap.
   13.38 -bool GenCollectedHeap::is_in_youngest(void* p) {
   13.39 -  return _gens[0]->is_in(p);
   13.40 +#ifdef ASSERT
   13.41 +// Don't implement this by using is_in_young().  This method is used
   13.42 +// in some cases to check that is_in_young() is correct.
   13.43 +bool GenCollectedHeap::is_in_partial_collection(const void* p) {
   13.44 +  assert(is_in_reserved(p) || p == NULL,
   13.45 +    "Does not work if address is non-null and outside of the heap");
   13.46 +  // The order of the generations is young (low addr), old, perm (high addr)
   13.47 +  return p < _gens[_n_gens - 2]->reserved().end() && p != NULL;
   13.48  }
   13.49 +#endif
   13.50  
   13.51  void GenCollectedHeap::oop_iterate(OopClosure* cl) {
   13.52    for (int i = 0; i < _n_gens; i++) {
    14.1 --- a/src/share/vm/memory/genCollectedHeap.hpp	Tue May 17 19:15:34 2011 -0700
    14.2 +++ b/src/share/vm/memory/genCollectedHeap.hpp	Wed May 18 11:45:06 2011 -0700
    14.3 @@ -216,8 +216,18 @@
    14.4      }
    14.5    }
    14.6  
    14.7 -  // Returns "TRUE" iff "p" points into the youngest generation.
    14.8 -  bool is_in_youngest(void* p);
    14.9 +  // Returns true if the reference is to an object in the reserved space
   14.10 +  // for the young generation.
   14.11 +  // Assumes the the young gen address range is less than that of the old gen.
   14.12 +  bool is_in_young(oop p);
   14.13 +
   14.14 +#ifdef ASSERT
   14.15 +  virtual bool is_in_partial_collection(const void* p);
   14.16 +#endif
   14.17 +
   14.18 +  virtual bool is_scavengable(const void* addr) {
   14.19 +    return is_in_young((oop)addr);
   14.20 +  }
   14.21  
   14.22    // Iteration functions.
   14.23    void oop_iterate(OopClosure* cl);
   14.24 @@ -283,7 +293,7 @@
   14.25      //       "Check can_elide_initializing_store_barrier() for this collector");
   14.26      // but unfortunately the flag UseSerialGC need not necessarily always
   14.27      // be set when DefNew+Tenured are being used.
   14.28 -    return is_in_youngest((void*)new_obj);
   14.29 +    return is_in_young(new_obj);
   14.30    }
   14.31  
   14.32    // Can a compiler elide a store barrier when it writes
    15.1 --- a/src/share/vm/memory/sharedHeap.cpp	Tue May 17 19:15:34 2011 -0700
    15.2 +++ b/src/share/vm/memory/sharedHeap.cpp	Wed May 18 11:45:06 2011 -0700
    15.3 @@ -102,6 +102,17 @@
    15.4  };
    15.5  static AssertIsPermClosure assert_is_perm_closure;
    15.6  
    15.7 +#ifdef ASSERT
    15.8 +class AssertNonScavengableClosure: public OopClosure {
    15.9 +public:
   15.10 +  virtual void do_oop(oop* p) {
   15.11 +    assert(!Universe::heap()->is_in_partial_collection(*p),
   15.12 +      "Referent should not be scavengable.");  }
   15.13 +  virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   15.14 +};
   15.15 +static AssertNonScavengableClosure assert_is_non_scavengable_closure;
   15.16 +#endif
   15.17 +
   15.18  void SharedHeap::change_strong_roots_parity() {
   15.19    // Also set the new collection parity.
   15.20    assert(_strong_roots_parity >= 0 && _strong_roots_parity <= 2,
   15.21 @@ -196,9 +207,10 @@
   15.22          CodeCache::scavenge_root_nmethods_do(code_roots);
   15.23        }
   15.24      }
   15.25 -    // Verify if the code cache contents are in the perm gen
   15.26 -    NOT_PRODUCT(CodeBlobToOopClosure assert_code_is_perm(&assert_is_perm_closure, /*do_marking=*/ false));
   15.27 -    NOT_PRODUCT(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_perm));
   15.28 +    // Verify that the code cache contents are not subject to
   15.29 +    // movement by a scavenging collection.
   15.30 +    DEBUG_ONLY(CodeBlobToOopClosure assert_code_is_non_scavengable(&assert_is_non_scavengable_closure, /*do_marking=*/ false));
   15.31 +    DEBUG_ONLY(CodeCache::asserted_non_scavengable_nmethods_do(&assert_code_is_non_scavengable));
   15.32    }
   15.33  
   15.34    if (!collecting_perm_gen) {
    16.1 --- a/src/share/vm/oops/instanceRefKlass.cpp	Tue May 17 19:15:34 2011 -0700
    16.2 +++ b/src/share/vm/oops/instanceRefKlass.cpp	Wed May 18 11:45:06 2011 -0700
    16.3 @@ -397,7 +397,7 @@
    16.4  
    16.5    if (referent != NULL) {
    16.6      guarantee(referent->is_oop(), "referent field heap failed");
    16.7 -    if (gch != NULL && !gch->is_in_youngest(obj)) {
    16.8 +    if (gch != NULL && !gch->is_in_young(obj)) {
    16.9        // We do a specific remembered set check here since the referent
   16.10        // field is not part of the oop mask and therefore skipped by the
   16.11        // regular verify code.
   16.12 @@ -415,7 +415,7 @@
   16.13    if (next != NULL) {
   16.14      guarantee(next->is_oop(), "next field verify failed");
   16.15      guarantee(next->is_instanceRef(), "next field verify failed");
   16.16 -    if (gch != NULL && !gch->is_in_youngest(obj)) {
   16.17 +    if (gch != NULL && !gch->is_in_young(obj)) {
   16.18        // We do a specific remembered set check here since the next field is
   16.19        // not part of the oop mask and therefore skipped by the regular
   16.20        // verify code.

mercurial