src/share/vm/memory/space.hpp

changeset 777
37f87013dfd8
parent 548
ba764ed4b6f2
child 782
60fb9c4db4e6
     1.1 --- a/src/share/vm/memory/space.hpp	Wed Jun 04 13:51:09 2008 -0700
     1.2 +++ b/src/share/vm/memory/space.hpp	Thu Jun 05 15:57:56 2008 -0700
     1.3 @@ -105,7 +105,7 @@
     1.4    virtual void set_bottom(HeapWord* value) { _bottom = value; }
     1.5    virtual void set_end(HeapWord* value)    { _end = value; }
     1.6  
     1.7 -  HeapWord* saved_mark_word() const  { return _saved_mark_word; }
     1.8 +  virtual HeapWord* saved_mark_word() const  { return _saved_mark_word; }
     1.9    void set_saved_mark_word(HeapWord* p) { _saved_mark_word = p; }
    1.10  
    1.11    MemRegionClosure* preconsumptionDirtyCardClosure() const {
    1.12 @@ -131,8 +131,18 @@
    1.13      return MemRegion(bottom(), saved_mark_word());
    1.14    }
    1.15  
    1.16 -  // Initialization
    1.17 +  // Initialization.
    1.18 +  // "initialize" should be called once on a space, before it is used for
    1.19 +  // any purpose.  The "mr" arguments gives the bounds of the space, and
    1.20 +  // the "clear_space" argument should be true unless the memory in "mr" is
    1.21 +  // known to be zeroed.
    1.22    virtual void initialize(MemRegion mr, bool clear_space);
    1.23 +
    1.24 +  // Sets the bounds (bottom and end) of the current space to those of "mr."
    1.25 +  void set_bounds(MemRegion mr);
    1.26 +
    1.27 +  // The "clear" method must be called on a region that may have
    1.28 +  // had allocation performed in it, but is now to be considered empty.
    1.29    virtual void clear();
    1.30  
    1.31    // For detecting GC bugs.  Should only be called at GC boundaries, since
    1.32 @@ -216,7 +226,13 @@
    1.33    // "block" that contains "p".  We say "block" instead of "object" since
    1.34    // some heaps may not pack objects densely; a chunk may either be an
    1.35    // object or a non-object.  If "p" is not in the space, return NULL.
    1.36 -  virtual HeapWord* block_start(const void* p) const = 0;
    1.37 +  virtual HeapWord* block_start_const(const void* p) const = 0;
    1.38 +
    1.39 +  // The non-const version may have benevolent side effects on the data
    1.40 +  // structure supporting these calls, possibly speeding up future calls.
    1.41 +  // The default implementation, however, is simply to call the const
    1.42 +  // version.
    1.43 +  inline virtual HeapWord* block_start(const void* p);
    1.44  
    1.45    // Requires "addr" to be the start of a chunk, and returns its size.
    1.46    // "addr + size" is required to be the start of a new chunk, or the end
    1.47 @@ -282,12 +298,13 @@
    1.48    CardTableModRefBS::PrecisionStyle _precision;
    1.49    HeapWord* _boundary;          // If non-NULL, process only non-NULL oops
    1.50                                  // pointing below boundary.
    1.51 -  HeapWord* _min_done;                // ObjHeadPreciseArray precision requires
    1.52 +  HeapWord* _min_done;          // ObjHeadPreciseArray precision requires
    1.53                                  // a downwards traversal; this is the
    1.54                                  // lowest location already done (or,
    1.55                                  // alternatively, the lowest address that
    1.56                                  // shouldn't be done again.  NULL means infinity.)
    1.57    NOT_PRODUCT(HeapWord* _last_bottom;)
    1.58 +  NOT_PRODUCT(HeapWord* _last_explicit_min_done;)
    1.59  
    1.60    // Get the actual top of the area on which the closure will
    1.61    // operate, given where the top is assumed to be (the end of the
    1.62 @@ -311,13 +328,15 @@
    1.63                          HeapWord* boundary) :
    1.64      _sp(sp), _cl(cl), _precision(precision), _boundary(boundary),
    1.65      _min_done(NULL) {
    1.66 -    NOT_PRODUCT(_last_bottom = NULL;)
    1.67 +    NOT_PRODUCT(_last_bottom = NULL);
    1.68 +    NOT_PRODUCT(_last_explicit_min_done = NULL);
    1.69    }
    1.70  
    1.71    void do_MemRegion(MemRegion mr);
    1.72  
    1.73    void set_min_done(HeapWord* min_done) {
    1.74      _min_done = min_done;
    1.75 +    NOT_PRODUCT(_last_explicit_min_done = _min_done);
    1.76    }
    1.77  #ifndef PRODUCT
    1.78    void set_last_bottom(HeapWord* last_bottom) {
    1.79 @@ -355,6 +374,7 @@
    1.80  
    1.81  public:
    1.82    virtual void initialize(MemRegion mr, bool clear_space);
    1.83 +  virtual void clear();
    1.84  
    1.85    // Used temporarily during a compaction phase to hold the value
    1.86    // top should have when compaction is complete.
    1.87 @@ -511,7 +531,7 @@
    1.88        /* prefetch beyond q */                                                \
    1.89        Prefetch::write(q, interval);                                          \
    1.90        /* size_t size = oop(q)->size();  changing this for cms for perm gen */\
    1.91 -      size_t size = block_size(q);                                             \
    1.92 +      size_t size = block_size(q);                                           \
    1.93        compact_top = cp->space->forward(oop(q), size, cp, compact_top);       \
    1.94        q += size;                                                             \
    1.95        end_of_live = q;                                                       \
    1.96 @@ -575,68 +595,68 @@
    1.97    cp->space->set_compaction_top(compact_top);                                \
    1.98  }
    1.99  
   1.100 -#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) {                                \
   1.101 -  /* adjust all the interior pointers to point at the new locations of objects        \
   1.102 -   * Used by MarkSweep::mark_sweep_phase3() */                                        \
   1.103 +#define SCAN_AND_ADJUST_POINTERS(adjust_obj_size) {                             \
   1.104 +  /* adjust all the interior pointers to point at the new locations of objects  \
   1.105 +   * Used by MarkSweep::mark_sweep_phase3() */                                  \
   1.106                                                                                  \
   1.107 -  HeapWord* q = bottom();                                                        \
   1.108 -  HeapWord* t = _end_of_live;  /* Established by "prepare_for_compaction". */        \
   1.109 +  HeapWord* q = bottom();                                                       \
   1.110 +  HeapWord* t = _end_of_live;  /* Established by "prepare_for_compaction". */   \
   1.111                                                                                  \
   1.112 -  assert(_first_dead <= _end_of_live, "Stands to reason, no?");                        \
   1.113 +  assert(_first_dead <= _end_of_live, "Stands to reason, no?");                 \
   1.114                                                                                  \
   1.115 -  if (q < t && _first_dead > q &&                                                \
   1.116 +  if (q < t && _first_dead > q &&                                               \
   1.117        !oop(q)->is_gc_marked()) {                                                \
   1.118      /* we have a chunk of the space which hasn't moved and we've                \
   1.119       * reinitialized the mark word during the previous pass, so we can't        \
   1.120 -     * use is_gc_marked for the traversal. */                                        \
   1.121 +     * use is_gc_marked for the traversal. */                                   \
   1.122      HeapWord* end = _first_dead;                                                \
   1.123                                                                                  \
   1.124 -    while (q < end) {                                                                \
   1.125 -      /* I originally tried to conjoin "block_start(q) == q" to the                \
   1.126 -       * assertion below, but that doesn't work, because you can't                \
   1.127 -       * accurately traverse previous objects to get to the current one                \
   1.128 -       * after their pointers (including pointers into permGen) have been        \
   1.129 -       * updated, until the actual compaction is done.  dld, 4/00 */                \
   1.130 -      assert(block_is_obj(q),                                                        \
   1.131 -             "should be at block boundaries, and should be looking at objs");        \
   1.132 +    while (q < end) {                                                           \
   1.133 +      /* I originally tried to conjoin "block_start(q) == q" to the             \
   1.134 +       * assertion below, but that doesn't work, because you can't              \
   1.135 +       * accurately traverse previous objects to get to the current one         \
   1.136 +       * after their pointers (including pointers into permGen) have been       \
   1.137 +       * updated, until the actual compaction is done.  dld, 4/00 */            \
   1.138 +      assert(block_is_obj(q),                                                   \
   1.139 +             "should be at block boundaries, and should be looking at objs");   \
   1.140                                                                                  \
   1.141        VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));     \
   1.142                                                                                  \
   1.143 -      /* point all the oops to the new location */                                \
   1.144 -      size_t size = oop(q)->adjust_pointers();                                        \
   1.145 -      size = adjust_obj_size(size);                                                \
   1.146 +      /* point all the oops to the new location */                              \
   1.147 +      size_t size = oop(q)->adjust_pointers();                                  \
   1.148 +      size = adjust_obj_size(size);                                             \
   1.149                                                                                  \
   1.150        VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());           \
   1.151 -                                                                                      \
   1.152 +                                                                                \
   1.153        VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));     \
   1.154 -                                                                                      \
   1.155 +                                                                                \
   1.156        q += size;                                                                \
   1.157 -    }                                                                                \
   1.158 +    }                                                                           \
   1.159                                                                                  \
   1.160 -    if (_first_dead == t) {                                                        \
   1.161 -      q = t;                                                                        \
   1.162 -    } else {                                                                        \
   1.163 -      /* $$$ This is funky.  Using this to read the previously written                \
   1.164 -       * LiveRange.  See also use below. */                                        \
   1.165 +    if (_first_dead == t) {                                                     \
   1.166 +      q = t;                                                                    \
   1.167 +    } else {                                                                    \
   1.168 +      /* $$$ This is funky.  Using this to read the previously written          \
   1.169 +       * LiveRange.  See also use below. */                                     \
   1.170        q = (HeapWord*)oop(_first_dead)->mark()->decode_pointer();                \
   1.171 -    }                                                                                \
   1.172 -  }                                                                                \
   1.173 +    }                                                                           \
   1.174 +  }                                                                             \
   1.175                                                                                  \
   1.176    const intx interval = PrefetchScanIntervalInBytes;                            \
   1.177                                                                                  \
   1.178 -  debug_only(HeapWord* prev_q = NULL);                                                \
   1.179 -  while (q < t) {                                                                \
   1.180 -    /* prefetch beyond q */                                                        \
   1.181 +  debug_only(HeapWord* prev_q = NULL);                                          \
   1.182 +  while (q < t) {                                                               \
   1.183 +    /* prefetch beyond q */                                                     \
   1.184      Prefetch::write(q, interval);                                               \
   1.185 -    if (oop(q)->is_gc_marked()) {                                                \
   1.186 -      /* q is alive */                                                                \
   1.187 +    if (oop(q)->is_gc_marked()) {                                               \
   1.188 +      /* q is alive */                                                          \
   1.189        VALIDATE_MARK_SWEEP_ONLY(MarkSweep::track_interior_pointers(oop(q)));     \
   1.190 -      /* point all the oops to the new location */                                \
   1.191 -      size_t size = oop(q)->adjust_pointers();                                        \
   1.192 -      size = adjust_obj_size(size);                                                \
   1.193 -      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());                \
   1.194 +      /* point all the oops to the new location */                              \
   1.195 +      size_t size = oop(q)->adjust_pointers();                                  \
   1.196 +      size = adjust_obj_size(size);                                             \
   1.197 +      VALIDATE_MARK_SWEEP_ONLY(MarkSweep::check_interior_pointers());           \
   1.198        VALIDATE_MARK_SWEEP_ONLY(MarkSweep::validate_live_oop(oop(q), size));     \
   1.199 -      debug_only(prev_q = q);                                                        \
   1.200 +      debug_only(prev_q = q);                                                   \
   1.201        q += size;                                                                \
   1.202      } else {                                                                        \
   1.203        /* q is not a live object, so its mark should point at the next                \
   1.204 @@ -716,6 +736,8 @@
   1.205      }                                                                                \
   1.206    }                                                                                \
   1.207                                                                                  \
   1.208 +  /* Let's remember if we were empty before we did the compaction. */           \
   1.209 +  bool was_empty = used_region().is_empty();                                    \
   1.210    /* Reset space after compaction is complete */                                \
   1.211    reset_after_compaction();                                                        \
   1.212    /* We do this clear, below, since it has overloaded meanings for some */      \
   1.213 @@ -723,8 +745,8 @@
   1.214    /* compacted into will have had their offset table thresholds updated */      \
   1.215    /* continuously, but those that weren't need to have their thresholds */      \
   1.216    /* re-initialized.  Also mangles unused area for debugging.           */      \
   1.217 -  if (is_empty()) {                                                             \
   1.218 -    clear();                                                                    \
   1.219 +  if (used_region().is_empty()) {                                               \
   1.220 +    if (!was_empty) clear();                                                    \
   1.221    } else {                                                                      \
   1.222      if (ZapUnusedHeapArea) mangle_unused_area();                                \
   1.223    }                                                                             \
   1.224 @@ -750,8 +772,8 @@
   1.225    HeapWord* top() const            { return _top;    }
   1.226    void set_top(HeapWord* value)    { _top = value; }
   1.227  
   1.228 -  void set_saved_mark()       { _saved_mark_word = top();    }
   1.229 -  void reset_saved_mark()     { _saved_mark_word = bottom(); }
   1.230 +  virtual void set_saved_mark()    { _saved_mark_word = top();    }
   1.231 +  void reset_saved_mark()          { _saved_mark_word = bottom(); }
   1.232  
   1.233    virtual void clear();
   1.234  
   1.235 @@ -843,7 +865,7 @@
   1.236    virtual void object_iterate_from(WaterMark mark, ObjectClosure* blk);
   1.237  
   1.238    // Very inefficient implementation.
   1.239 -  virtual HeapWord* block_start(const void* p) const;
   1.240 +  virtual HeapWord* block_start_const(const void* p) const;
   1.241    size_t block_size(const HeapWord* p) const;
   1.242    // If a block is in the allocated area, it is an object.
   1.243    bool block_is_obj(const HeapWord* p) const { return p < top(); }
   1.244 @@ -1000,9 +1022,10 @@
   1.245    void set_bottom(HeapWord* value);
   1.246    void set_end(HeapWord* value);
   1.247  
   1.248 +  virtual void initialize(MemRegion mr, bool clear_space);
   1.249    void clear();
   1.250  
   1.251 -  inline HeapWord* block_start(const void* p) const;
   1.252 +  inline HeapWord* block_start_const(const void* p) const;
   1.253  
   1.254    // Add offset table update.
   1.255    virtual inline HeapWord* allocate(size_t word_size);

mercurial