Merge

Fri, 10 Aug 2012 10:41:13 -0700

author
asaha
date
Fri, 10 Aug 2012 10:41:13 -0700
changeset 4215
defeb6dad7d5
parent 4214
000352e00389
parent 3967
abc951e44e1b
child 4216
e4d10261499c

Merge

     1.1 --- a/.hgtags	Thu Aug 02 22:23:28 2012 -0700
     1.2 +++ b/.hgtags	Fri Aug 10 10:41:13 2012 -0700
     1.3 @@ -266,3 +266,5 @@
     1.4  e3619706a7253540a2d94e9e841acaab8ace7038 jdk8-b49
     1.5  72e0362c3f0cfacbbac8af8a5b9d2e182f21c17b hs24-b18
     1.6  58f237a9e83af6ded0d2e2c81d252cd47c0f4c45 jdk8-b50
     1.7 +3b3ad16429701b2eb6712851c2f7c5a726eb2cbe hs24-b19
     1.8 +663fc23da8d51c4c0552cbcb17ffc85f5869d4fd jdk8-b51
     2.1 --- a/make/hotspot_version	Thu Aug 02 22:23:28 2012 -0700
     2.2 +++ b/make/hotspot_version	Fri Aug 10 10:41:13 2012 -0700
     2.3 @@ -35,7 +35,7 @@
     2.4  
     2.5  HS_MAJOR_VER=24
     2.6  HS_MINOR_VER=0
     2.7 -HS_BUILD_NUMBER=18
     2.8 +HS_BUILD_NUMBER=19
     2.9  
    2.10  JDK_MAJOR_VER=1
    2.11  JDK_MINOR_VER=8
     3.1 --- a/src/os/bsd/vm/decoder_machO.cpp	Thu Aug 02 22:23:28 2012 -0700
     3.2 +++ b/src/os/bsd/vm/decoder_machO.cpp	Fri Aug 10 10:41:13 2012 -0700
     3.3 @@ -26,6 +26,139 @@
     3.4  
     3.5  #ifdef __APPLE__
     3.6  #include "decoder_machO.hpp"
     3.7 +
     3.8 +#include <cxxabi.h>
     3.9 +#include <mach-o/loader.h>
    3.10 +#include <mach-o/nlist.h>
    3.11 +
    3.12 +
    3.13 +bool MachODecoder::demangle(const char* symbol, char *buf, int buflen) {
    3.14 +  int   status;
    3.15 +  char* result;
    3.16 +  size_t size = (size_t)buflen;
    3.17 +  // Don't pass buf to __cxa_demangle. In case of the 'buf' is too small,
    3.18 +  // __cxa_demangle will call system "realloc" for additional memory, which
    3.19 +  // may use different malloc/realloc mechanism that allocates 'buf'.
    3.20 +  if ((result = abi::__cxa_demangle(symbol, NULL, NULL, &status)) != NULL) {
    3.21 +    jio_snprintf(buf, buflen, "%s", result);
    3.22 +      // call c library's free
    3.23 +      ::free(result);
    3.24 +      return true;
    3.25 +  }
    3.26 +  return false;
    3.27 +}
    3.28 +
    3.29 +bool MachODecoder::decode(address addr, char *buf,
    3.30 +      int buflen, int *offset, const void *mach_base) {
    3.31 +  struct symtab_command * symt = (struct symtab_command *)
    3.32 +    mach_find_command((struct mach_header_64 *)mach_base, LC_SYMTAB);
    3.33 +  if (symt == NULL) {
    3.34 +    DEBUG_ONLY(tty->print_cr("no symtab in mach file at 0x%lx", mach_base));
    3.35 +    return false;
    3.36 +  }
    3.37 +  uint32_t off = symt->symoff;          /* symbol table offset (within this mach file) */
    3.38 +  uint32_t nsyms = symt->nsyms;         /* number of symbol table entries */
    3.39 +  uint32_t stroff = symt->stroff;       /* string table offset */
    3.40 +  uint32_t strsize = symt->strsize;     /* string table size in bytes */
    3.41 +
    3.42 +  // iterate through symbol table trying to match our offset
    3.43 +
    3.44 +  uint32_t addr_relative = (uintptr_t) mach_base - (uintptr_t) addr; // offset we seek in the symtab
    3.45 +  void * symtab_addr = (void*) ((uintptr_t) mach_base + off);
    3.46 +  struct nlist_64 *cur_nlist = (struct nlist_64 *) symtab_addr;
    3.47 +  struct nlist_64 *last_nlist = cur_nlist;  // no size stored in an entry, so keep previously seen nlist
    3.48 +
    3.49 +  int32_t found_strx = 0;
    3.50 +  int32_t found_symval = 0;
    3.51 +
    3.52 +  for (uint32_t i=0; i < nsyms; i++) {
    3.53 +    uint32_t this_value = cur_nlist->n_value;
    3.54 +
    3.55 +    if (addr_relative == this_value) {
    3.56 +      found_strx =  cur_nlist->n_un.n_strx;
    3.57 +      found_symval = this_value;
    3.58 +      break;
    3.59 +    } else if (addr_relative > this_value) {
    3.60 +      // gone past it, use previously seen nlist:
    3.61 +      found_strx = last_nlist->n_un.n_strx;
    3.62 +      found_symval = last_nlist->n_value;
    3.63 +      break;
    3.64 +    }
    3.65 +    last_nlist = cur_nlist;
    3.66 +    cur_nlist = cur_nlist + sizeof(struct nlist_64);
    3.67 +  }
    3.68 +  if (found_strx == 0) {
    3.69 +    return false;
    3.70 +  }
    3.71 +  // write the offset:
    3.72 +  *offset = addr_relative - found_symval;
    3.73 +
    3.74 +  // lookup found_strx in the string table
    3.75 +  char * symname = mach_find_in_stringtable((char*) ((uintptr_t)mach_base + stroff), strsize, found_strx);
    3.76 +  if (symname) {
    3.77 +      strncpy(buf, symname, buflen);
    3.78 +      return true;
    3.79 +  }
    3.80 +  DEBUG_ONLY(tty->print_cr("no string or null string found."));
    3.81 +  return false;
    3.82 +}
    3.83 +
    3.84 +void* MachODecoder::mach_find_command(struct mach_header_64 * mach_base, uint32_t command_wanted) {
    3.85 +  // possibly verify it is a mach_header, use magic number.
    3.86 +  // commands begin immediately after the header.
    3.87 +  struct load_command *pos = (struct load_command *) mach_base + sizeof(struct mach_header_64);
    3.88 +  for (uint32_t i = 0; i < mach_base->ncmds; i++) {
    3.89 +    struct load_command *this_cmd = (struct load_command *) pos;
    3.90 +    if (this_cmd->cmd == command_wanted) {
    3.91 +       return pos;
    3.92 +    }
    3.93 +    int cmdsize = this_cmd->cmdsize;
    3.94 +    pos += cmdsize;
    3.95 +  }
    3.96 +  return NULL;
    3.97 +}
    3.98 +
    3.99 +char* MachODecoder::mach_find_in_stringtable(char *strtab, uint32_t tablesize, int strx_wanted) {
   3.100 +
   3.101 +  if (strx_wanted == 0) {
   3.102 +    return NULL;
   3.103 +  }
   3.104 +  char *strtab_end = strtab + tablesize;
   3.105 +
   3.106 +  // find the first string, skip over the space char
   3.107 +  // (or the four zero bytes we see e.g. in libclient)
   3.108 +  if (*strtab == ' ') {
   3.109 +      strtab++;
   3.110 +      if (*strtab != 0) {
   3.111 +          DEBUG_ONLY(tty->print_cr("string table has leading space but no following zero."));
   3.112 +          return NULL;
   3.113 +      }
   3.114 +      strtab++;
   3.115 +  } else {
   3.116 +      if ((uint32_t) *strtab != 0) {
   3.117 +          DEBUG_ONLY(tty->print_cr("string table without leading space or leading int of zero."));
   3.118 +          return NULL;
   3.119 +      }
   3.120 +      strtab+=4;
   3.121 +  }
   3.122 +  // read the real strings starting at index 1
   3.123 +  int cur_strx = 1;
   3.124 +  while (strtab < strtab_end) {
   3.125 +    if (cur_strx == strx_wanted) {
   3.126 +        return strtab;
   3.127 +    }
   3.128 +    // find start of next string
   3.129 +    while (*strtab != 0) {
   3.130 +        strtab++;
   3.131 +    }
   3.132 +    strtab++; // skip the terminating zero
   3.133 +    cur_strx++;
   3.134 +  }
   3.135 +  DEBUG_ONLY(tty->print_cr("string number %d not found.", strx_wanted));
   3.136 +  return NULL;
   3.137 +}
   3.138 +
   3.139 +
   3.140  #endif
   3.141  
   3.142  
     4.1 --- a/src/os/bsd/vm/decoder_machO.hpp	Thu Aug 02 22:23:28 2012 -0700
     4.2 +++ b/src/os/bsd/vm/decoder_machO.hpp	Fri Aug 10 10:41:13 2012 -0700
     4.3 @@ -31,10 +31,25 @@
     4.4  
     4.5  // Just a placehold for now, a real implementation should derive
     4.6  // from AbstractDecoder
     4.7 -class MachODecoder : public NullDecoder {
     4.8 -public:
     4.9 +class MachODecoder : public AbstractDecoder {
    4.10 + public:
    4.11    MachODecoder() { }
    4.12    ~MachODecoder() { }
    4.13 +  virtual bool can_decode_C_frame_in_vm() const {
    4.14 +    return true;
    4.15 +  }
    4.16 +  virtual bool demangle(const char* symbol, char* buf, int buflen);
    4.17 +  virtual bool decode(address pc, char* buf, int buflen, int* offset,
    4.18 +                      const void* base);
    4.19 +  virtual bool decode(address pc, char* buf, int buflen, int* offset,
    4.20 +                      const char* module_path = NULL) {
    4.21 +    ShouldNotReachHere();
    4.22 +    return false;
    4.23 +  }
    4.24 +
    4.25 + private:
    4.26 +  void * mach_find_command(struct mach_header_64 * mach_base, uint32_t command_wanted);
    4.27 +  char * mach_find_in_stringtable(char *strtab, uint32_t tablesize, int strx_wanted);
    4.28  };
    4.29  
    4.30  #endif
     5.1 --- a/src/os/bsd/vm/os_bsd.cpp	Thu Aug 02 22:23:28 2012 -0700
     5.2 +++ b/src/os/bsd/vm/os_bsd.cpp	Fri Aug 10 10:41:13 2012 -0700
     5.3 @@ -1946,10 +1946,16 @@
     5.4    return false;
     5.5  }
     5.6  
     5.7 +
     5.8 +#define MACH_MAXSYMLEN 256
     5.9 +
    5.10  bool os::dll_address_to_function_name(address addr, char *buf,
    5.11                                        int buflen, int *offset) {
    5.12    Dl_info dlinfo;
    5.13 -
    5.14 +  char localbuf[MACH_MAXSYMLEN];
    5.15 +
    5.16 +  // dladdr will find names of dynamic functions only, but does
    5.17 +  // it set dli_fbase with mach_header address when it "fails" ?
    5.18    if (dladdr((void*)addr, &dlinfo) && dlinfo.dli_sname != NULL) {
    5.19      if (buf != NULL) {
    5.20        if(!Decoder::demangle(dlinfo.dli_sname, buf, buflen)) {
    5.21 @@ -1965,6 +1971,14 @@
    5.22      }
    5.23    }
    5.24  
    5.25 +  // Handle non-dymanic manually:
    5.26 +  if (dlinfo.dli_fbase != NULL &&
    5.27 +      Decoder::decode(addr, localbuf, MACH_MAXSYMLEN, offset, dlinfo.dli_fbase)) {
    5.28 +    if(!Decoder::demangle(localbuf, buf, buflen)) {
    5.29 +      jio_snprintf(buf, buflen, "%s", localbuf);
    5.30 +    }
    5.31 +    return true;
    5.32 +  }
    5.33    if (buf != NULL) buf[0] = '\0';
    5.34    if (offset != NULL) *offset = -1;
    5.35    return false;
     6.1 --- a/src/os/windows/vm/decoder_windows.cpp	Thu Aug 02 22:23:28 2012 -0700
     6.2 +++ b/src/os/windows/vm/decoder_windows.cpp	Fri Aug 10 10:41:13 2012 -0700
     6.3 @@ -72,10 +72,10 @@
     6.4  
     6.5       // find out if jvm.dll contains private symbols, by decoding
     6.6       // current function and comparing the result
     6.7 -     address addr = (address)Decoder::decode;
     6.8 +     address addr = (address)Decoder::demangle;
     6.9       char buf[MAX_PATH];
    6.10       if (decode(addr, buf, sizeof(buf), NULL)) {
    6.11 -       _can_decode_in_vm = !strcmp(buf, "Decoder::decode");
    6.12 +       _can_decode_in_vm = !strcmp(buf, "Decoder::demangle");
    6.13       }
    6.14    }
    6.15  }
     7.1 --- a/src/os/windows/vm/decoder_windows.hpp	Thu Aug 02 22:23:28 2012 -0700
     7.2 +++ b/src/os/windows/vm/decoder_windows.hpp	Fri Aug 10 10:41:13 2012 -0700
     7.3 @@ -45,6 +45,10 @@
     7.4    bool can_decode_C_frame_in_vm() const;
     7.5    bool demangle(const char* symbol, char *buf, int buflen);
     7.6    bool decode(address addr, char *buf, int buflen, int* offset, const char* modulepath = NULL);
     7.7 +  bool decode(address addr, char *buf, int buflen, int* offset, const void* base) {
     7.8 +    ShouldNotReachHere();
     7.9 +    return false;
    7.10 +  }
    7.11  
    7.12  private:
    7.13    void initialize();
     8.1 --- a/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Thu Aug 02 22:23:28 2012 -0700
     8.2 +++ b/src/share/vm/gc_implementation/g1/collectionSetChooser.hpp	Fri Aug 10 10:41:13 2012 -0700
     8.3 @@ -153,4 +153,47 @@
     8.4    void verify() PRODUCT_RETURN;
     8.5  };
     8.6  
     8.7 +class CSetChooserParUpdater : public StackObj {
     8.8 +private:
     8.9 +  CollectionSetChooser* _chooser;
    8.10 +  bool _parallel;
    8.11 +  uint _chunk_size;
    8.12 +  uint _cur_chunk_idx;
    8.13 +  uint _cur_chunk_end;
    8.14 +  uint _regions_added;
    8.15 +  size_t _reclaimable_bytes_added;
    8.16 +
    8.17 +public:
    8.18 +  CSetChooserParUpdater(CollectionSetChooser* chooser,
    8.19 +                        bool parallel, uint chunk_size) :
    8.20 +    _chooser(chooser), _parallel(parallel), _chunk_size(chunk_size),
    8.21 +    _cur_chunk_idx(0), _cur_chunk_end(0),
    8.22 +    _regions_added(0), _reclaimable_bytes_added(0) { }
    8.23 +
    8.24 +  ~CSetChooserParUpdater() {
    8.25 +    if (_parallel && _regions_added > 0) {
    8.26 +      _chooser->update_totals(_regions_added, _reclaimable_bytes_added);
    8.27 +    }
    8.28 +  }
    8.29 +
    8.30 +  void add_region(HeapRegion* hr) {
    8.31 +    if (_parallel) {
    8.32 +      if (_cur_chunk_idx == _cur_chunk_end) {
    8.33 +        _cur_chunk_idx = _chooser->claim_array_chunk(_chunk_size);
    8.34 +        _cur_chunk_end = _cur_chunk_idx + _chunk_size;
    8.35 +      }
    8.36 +      assert(_cur_chunk_idx < _cur_chunk_end, "invariant");
    8.37 +      _chooser->set_region(_cur_chunk_idx, hr);
    8.38 +      _cur_chunk_idx += 1;
    8.39 +    } else {
    8.40 +      _chooser->add_region(hr);
    8.41 +    }
    8.42 +    _regions_added += 1;
    8.43 +    _reclaimable_bytes_added += hr->reclaimable_bytes();
    8.44 +  }
    8.45 +
    8.46 +  bool should_add(HeapRegion* hr) { return _chooser->should_add(hr); }
    8.47 +};
    8.48 +
    8.49  #endif // SHARE_VM_GC_IMPLEMENTATION_G1_COLLECTIONSETCHOOSER_HPP
    8.50 +
     9.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Thu Aug 02 22:23:28 2012 -0700
     9.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Fri Aug 10 10:41:13 2012 -0700
     9.3 @@ -1226,9 +1226,7 @@
     9.4      } else {
     9.5        // Starts humongous case: calculate how many regions are part of
     9.6        // this humongous region and then set the bit range.
     9.7 -      G1CollectedHeap* g1h = G1CollectedHeap::heap();
     9.8 -      HeapRegion *last_hr = g1h->heap_region_containing_raw(hr->end() - 1);
     9.9 -      BitMap::idx_t end_index = (BitMap::idx_t) last_hr->hrs_index() + 1;
    9.10 +      BitMap::idx_t end_index = (BitMap::idx_t) hr->last_hc_index();
    9.11        _region_bm->par_at_put_range(index, end_index, true);
    9.12      }
    9.13    }
    9.14 @@ -1645,26 +1643,27 @@
    9.15    size_t freed_bytes() { return _freed_bytes; }
    9.16  
    9.17    bool doHeapRegion(HeapRegion *hr) {
    9.18 +    if (hr->continuesHumongous()) {
    9.19 +      return false;
    9.20 +    }
    9.21      // We use a claim value of zero here because all regions
    9.22      // were claimed with value 1 in the FinalCount task.
    9.23 -    hr->reset_gc_time_stamp();
    9.24 -    if (!hr->continuesHumongous()) {
    9.25 -      double start = os::elapsedTime();
    9.26 -      _regions_claimed++;
    9.27 -      hr->note_end_of_marking();
    9.28 -      _max_live_bytes += hr->max_live_bytes();
    9.29 -      _g1->free_region_if_empty(hr,
    9.30 -                                &_freed_bytes,
    9.31 -                                _local_cleanup_list,
    9.32 -                                _old_proxy_set,
    9.33 -                                _humongous_proxy_set,
    9.34 -                                _hrrs_cleanup_task,
    9.35 -                                true /* par */);
    9.36 -      double region_time = (os::elapsedTime() - start);
    9.37 -      _claimed_region_time += region_time;
    9.38 -      if (region_time > _max_region_time) {
    9.39 -        _max_region_time = region_time;
    9.40 -      }
    9.41 +    _g1->reset_gc_time_stamps(hr);
    9.42 +    double start = os::elapsedTime();
    9.43 +    _regions_claimed++;
    9.44 +    hr->note_end_of_marking();
    9.45 +    _max_live_bytes += hr->max_live_bytes();
    9.46 +    _g1->free_region_if_empty(hr,
    9.47 +                              &_freed_bytes,
    9.48 +                              _local_cleanup_list,
    9.49 +                              _old_proxy_set,
    9.50 +                              _humongous_proxy_set,
    9.51 +                              _hrrs_cleanup_task,
    9.52 +                              true /* par */);
    9.53 +    double region_time = (os::elapsedTime() - start);
    9.54 +    _claimed_region_time += region_time;
    9.55 +    if (region_time > _max_region_time) {
    9.56 +      _max_region_time = region_time;
    9.57      }
    9.58      return false;
    9.59    }
    9.60 @@ -1881,6 +1880,7 @@
    9.61    } else {
    9.62      g1_par_note_end_task.work(0);
    9.63    }
    9.64 +  g1h->check_gc_time_stamps();
    9.65  
    9.66    if (!cleanup_list_is_empty()) {
    9.67      // The cleanup list is not empty, so we'll have to process it
    9.68 @@ -2449,24 +2449,8 @@
    9.69      } else {
    9.70        HeapRegion* hr  = _g1h->heap_region_containing(obj);
    9.71        guarantee(hr != NULL, "invariant");
    9.72 -      bool over_tams = false;
    9.73 -      bool marked = false;
    9.74 -
    9.75 -      switch (_vo) {
    9.76 -        case VerifyOption_G1UsePrevMarking:
    9.77 -          over_tams = hr->obj_allocated_since_prev_marking(obj);
    9.78 -          marked = _g1h->isMarkedPrev(obj);
    9.79 -          break;
    9.80 -        case VerifyOption_G1UseNextMarking:
    9.81 -          over_tams = hr->obj_allocated_since_next_marking(obj);
    9.82 -          marked = _g1h->isMarkedNext(obj);
    9.83 -          break;
    9.84 -        case VerifyOption_G1UseMarkWord:
    9.85 -          marked = obj->is_gc_marked();
    9.86 -          break;
    9.87 -        default:
    9.88 -          ShouldNotReachHere();
    9.89 -      }
    9.90 +      bool over_tams = _g1h->allocated_since_marking(obj, hr, _vo);
    9.91 +      bool marked = _g1h->is_marked(obj, _vo);
    9.92  
    9.93        if (over_tams) {
    9.94          str = " >";
    9.95 @@ -2502,24 +2486,8 @@
    9.96      _out(out), _vo(vo), _all(all), _hr(hr) { }
    9.97  
    9.98    void do_object(oop o) {
    9.99 -    bool over_tams = false;
   9.100 -    bool marked = false;
   9.101 -
   9.102 -    switch (_vo) {
   9.103 -      case VerifyOption_G1UsePrevMarking:
   9.104 -        over_tams = _hr->obj_allocated_since_prev_marking(o);
   9.105 -        marked = _g1h->isMarkedPrev(o);
   9.106 -        break;
   9.107 -      case VerifyOption_G1UseNextMarking:
   9.108 -        over_tams = _hr->obj_allocated_since_next_marking(o);
   9.109 -        marked = _g1h->isMarkedNext(o);
   9.110 -        break;
   9.111 -      case VerifyOption_G1UseMarkWord:
   9.112 -        marked = o->is_gc_marked();
   9.113 -        break;
   9.114 -      default:
   9.115 -        ShouldNotReachHere();
   9.116 -    }
   9.117 +    bool over_tams = _g1h->allocated_since_marking(o, _hr, _vo);
   9.118 +    bool marked = _g1h->is_marked(o, _vo);
   9.119      bool print_it = _all || over_tams || marked;
   9.120  
   9.121      if (print_it) {
   9.122 @@ -2533,32 +2501,17 @@
   9.123  
   9.124  class PrintReachableRegionClosure : public HeapRegionClosure {
   9.125  private:
   9.126 -  outputStream* _out;
   9.127 -  VerifyOption  _vo;
   9.128 -  bool          _all;
   9.129 +  G1CollectedHeap* _g1h;
   9.130 +  outputStream*    _out;
   9.131 +  VerifyOption     _vo;
   9.132 +  bool             _all;
   9.133  
   9.134  public:
   9.135    bool doHeapRegion(HeapRegion* hr) {
   9.136      HeapWord* b = hr->bottom();
   9.137      HeapWord* e = hr->end();
   9.138      HeapWord* t = hr->top();
   9.139 -    HeapWord* p = NULL;
   9.140 -
   9.141 -    switch (_vo) {
   9.142 -      case VerifyOption_G1UsePrevMarking:
   9.143 -        p = hr->prev_top_at_mark_start();
   9.144 -        break;
   9.145 -      case VerifyOption_G1UseNextMarking:
   9.146 -        p = hr->next_top_at_mark_start();
   9.147 -        break;
   9.148 -      case VerifyOption_G1UseMarkWord:
   9.149 -        // When we are verifying marking using the mark word
   9.150 -        // TAMS has no relevance.
   9.151 -        assert(p == NULL, "post-condition");
   9.152 -        break;
   9.153 -      default:
   9.154 -        ShouldNotReachHere();
   9.155 -    }
   9.156 +    HeapWord* p = _g1h->top_at_mark_start(hr, _vo);
   9.157      _out->print_cr("** ["PTR_FORMAT", "PTR_FORMAT"] top: "PTR_FORMAT" "
   9.158                     "TAMS: "PTR_FORMAT, b, e, t, p);
   9.159      _out->cr();
   9.160 @@ -2580,20 +2533,9 @@
   9.161    PrintReachableRegionClosure(outputStream* out,
   9.162                                VerifyOption  vo,
   9.163                                bool          all) :
   9.164 -    _out(out), _vo(vo), _all(all) { }
   9.165 +    _g1h(G1CollectedHeap::heap()), _out(out), _vo(vo), _all(all) { }
   9.166  };
   9.167  
   9.168 -static const char* verify_option_to_tams(VerifyOption vo) {
   9.169 -  switch (vo) {
   9.170 -    case VerifyOption_G1UsePrevMarking:
   9.171 -      return "PTAMS";
   9.172 -    case VerifyOption_G1UseNextMarking:
   9.173 -      return "NTAMS";
   9.174 -    default:
   9.175 -      return "NONE";
   9.176 -  }
   9.177 -}
   9.178 -
   9.179  void ConcurrentMark::print_reachable(const char* str,
   9.180                                       VerifyOption vo,
   9.181                                       bool all) {
   9.182 @@ -2622,7 +2564,7 @@
   9.183    }
   9.184  
   9.185    outputStream* out = &fout;
   9.186 -  out->print_cr("-- USING %s", verify_option_to_tams(vo));
   9.187 +  out->print_cr("-- USING %s", _g1h->top_at_mark_start_str(vo));
   9.188    out->cr();
   9.189  
   9.190    out->print_cr("--- ITERATING OVER REGIONS");
    10.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Aug 02 22:23:28 2012 -0700
    10.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Fri Aug 10 10:41:13 2012 -0700
    10.3 @@ -1149,13 +1149,16 @@
    10.4  }
    10.5  
    10.6  class PostMCRemSetClearClosure: public HeapRegionClosure {
    10.7 +  G1CollectedHeap* _g1h;
    10.8    ModRefBarrierSet* _mr_bs;
    10.9  public:
   10.10 -  PostMCRemSetClearClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
   10.11 +  PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
   10.12 +    _g1h(g1h), _mr_bs(mr_bs) { }
   10.13    bool doHeapRegion(HeapRegion* r) {
   10.14 -    r->reset_gc_time_stamp();
   10.15 -    if (r->continuesHumongous())
   10.16 +    if (r->continuesHumongous()) {
   10.17        return false;
   10.18 +    }
   10.19 +    _g1h->reset_gc_time_stamps(r);
   10.20      HeapRegionRemSet* hrrs = r->rem_set();
   10.21      if (hrrs != NULL) hrrs->clear();
   10.22      // You might think here that we could clear just the cards
   10.23 @@ -1168,19 +1171,10 @@
   10.24    }
   10.25  };
   10.26  
   10.27 -
   10.28 -class PostMCRemSetInvalidateClosure: public HeapRegionClosure {
   10.29 -  ModRefBarrierSet* _mr_bs;
   10.30 -public:
   10.31 -  PostMCRemSetInvalidateClosure(ModRefBarrierSet* mr_bs) : _mr_bs(mr_bs) {}
   10.32 -  bool doHeapRegion(HeapRegion* r) {
   10.33 -    if (r->continuesHumongous()) return false;
   10.34 -    if (r->used_region().word_size() != 0) {
   10.35 -      _mr_bs->invalidate(r->used_region(), true /*whole heap*/);
   10.36 -    }
   10.37 -    return false;
   10.38 -  }
   10.39 -};
   10.40 +void G1CollectedHeap::clear_rsets_post_compaction() {
   10.41 +  PostMCRemSetClearClosure rs_clear(this, mr_bs());
   10.42 +  heap_region_iterate(&rs_clear);
   10.43 +}
   10.44  
   10.45  class RebuildRSOutOfRegionClosure: public HeapRegionClosure {
   10.46    G1CollectedHeap*   _g1h;
   10.47 @@ -1229,7 +1223,7 @@
   10.48        if (!hr->isHumongous()) {
   10.49          _hr_printer->post_compaction(hr, G1HRPrinter::Old);
   10.50        } else if (hr->startsHumongous()) {
   10.51 -        if (hr->capacity() == HeapRegion::GrainBytes) {
   10.52 +        if (hr->region_num() == 1) {
   10.53            // single humongous region
   10.54            _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
   10.55          } else {
   10.56 @@ -1247,6 +1241,11 @@
   10.57      : _hr_printer(hr_printer) { }
   10.58  };
   10.59  
   10.60 +void G1CollectedHeap::print_hrs_post_compaction() {
   10.61 +  PostCompactionPrinterClosure cl(hr_printer());
   10.62 +  heap_region_iterate(&cl);
   10.63 +}
   10.64 +
   10.65  bool G1CollectedHeap::do_collection(bool explicit_gc,
   10.66                                      bool clear_all_soft_refs,
   10.67                                      size_t word_size) {
   10.68 @@ -1402,8 +1401,8 @@
   10.69      // Since everything potentially moved, we will clear all remembered
   10.70      // sets, and clear all cards.  Later we will rebuild remebered
   10.71      // sets. We will also reset the GC time stamps of the regions.
   10.72 -    PostMCRemSetClearClosure rs_clear(mr_bs());
   10.73 -    heap_region_iterate(&rs_clear);
   10.74 +    clear_rsets_post_compaction();
   10.75 +    check_gc_time_stamps();
   10.76  
   10.77      // Resize the heap if necessary.
   10.78      resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
   10.79 @@ -1413,9 +1412,7 @@
   10.80        // that all the COMMIT / UNCOMMIT events are generated before
   10.81        // the end GC event.
   10.82  
   10.83 -      PostCompactionPrinterClosure cl(hr_printer());
   10.84 -      heap_region_iterate(&cl);
   10.85 -
   10.86 +      print_hrs_post_compaction();
   10.87        _hr_printer.end_gc(true /* full */, (size_t) total_collections());
   10.88      }
   10.89  
   10.90 @@ -2263,6 +2260,51 @@
   10.91    return _g1_committed.byte_size();
   10.92  }
   10.93  
   10.94 +void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
   10.95 +  assert(!hr->continuesHumongous(), "pre-condition");
   10.96 +  hr->reset_gc_time_stamp();
   10.97 +  if (hr->startsHumongous()) {
   10.98 +    uint first_index = hr->hrs_index() + 1;
   10.99 +    uint last_index = hr->last_hc_index();
  10.100 +    for (uint i = first_index; i < last_index; i += 1) {
  10.101 +      HeapRegion* chr = region_at(i);
  10.102 +      assert(chr->continuesHumongous(), "sanity");
  10.103 +      chr->reset_gc_time_stamp();
  10.104 +    }
  10.105 +  }
  10.106 +}
  10.107 +
  10.108 +#ifndef PRODUCT
  10.109 +class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
  10.110 +private:
  10.111 +  unsigned _gc_time_stamp;
  10.112 +  bool _failures;
  10.113 +
  10.114 +public:
  10.115 +  CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
  10.116 +    _gc_time_stamp(gc_time_stamp), _failures(false) { }
  10.117 +
  10.118 +  virtual bool doHeapRegion(HeapRegion* hr) {
  10.119 +    unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
  10.120 +    if (_gc_time_stamp != region_gc_time_stamp) {
  10.121 +      gclog_or_tty->print_cr("Region "HR_FORMAT" has GC time stamp = %d, "
  10.122 +                             "expected %d", HR_FORMAT_PARAMS(hr),
  10.123 +                             region_gc_time_stamp, _gc_time_stamp);
  10.124 +      _failures = true;
  10.125 +    }
  10.126 +    return false;
  10.127 +  }
  10.128 +
  10.129 +  bool failures() { return _failures; }
  10.130 +};
  10.131 +
  10.132 +void G1CollectedHeap::check_gc_time_stamps() {
  10.133 +  CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
  10.134 +  heap_region_iterate(&cl);
  10.135 +  guarantee(!cl.failures(), "all GC time stamps should have been reset");
  10.136 +}
  10.137 +#endif // PRODUCT
  10.138 +
  10.139  void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
  10.140                                                   DirtyCardQueue* into_cset_dcq,
  10.141                                                   bool concurrent,
  10.142 @@ -2530,7 +2572,7 @@
  10.143    IterateOopClosureRegionClosure(MemRegion mr, OopClosure* cl)
  10.144      : _mr(mr), _cl(cl) {}
  10.145    bool doHeapRegion(HeapRegion* r) {
  10.146 -    if (! r->continuesHumongous()) {
  10.147 +    if (!r->continuesHumongous()) {
  10.148        r->oop_iterate(_cl);
  10.149      }
  10.150      return false;
  10.151 @@ -2601,14 +2643,9 @@
  10.152    _hrs.iterate(cl);
  10.153  }
  10.154  
  10.155 -void G1CollectedHeap::heap_region_iterate_from(HeapRegion* r,
  10.156 -                                               HeapRegionClosure* cl) const {
  10.157 -  _hrs.iterate_from(r, cl);
  10.158 -}
  10.159 -
  10.160  void
  10.161  G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl,
  10.162 -                                                 uint worker,
  10.163 +                                                 uint worker_id,
  10.164                                                   uint no_of_par_workers,
  10.165                                                   jint claim_value) {
  10.166    const uint regions = n_regions();
  10.167 @@ -2619,7 +2656,9 @@
  10.168           no_of_par_workers == workers()->total_workers(),
  10.169           "Non dynamic should use fixed number of workers");
  10.170    // try to spread out the starting points of the workers
  10.171 -  const uint start_index = regions / max_workers * worker;
  10.172 +  const HeapRegion* start_hr =
  10.173 +                        start_region_for_worker(worker_id, no_of_par_workers);
  10.174 +  const uint start_index = start_hr->hrs_index();
  10.175  
  10.176    // each worker will actually look at all regions
  10.177    for (uint count = 0; count < regions; ++count) {
  10.178 @@ -2861,6 +2900,17 @@
  10.179    return result;
  10.180  }
  10.181  
  10.182 +HeapRegion* G1CollectedHeap::start_region_for_worker(uint worker_i,
  10.183 +                                                     uint no_of_par_workers) {
  10.184 +  uint worker_num =
  10.185 +           G1CollectedHeap::use_parallel_gc_threads() ? no_of_par_workers : 1U;
  10.186 +  assert(UseDynamicNumberOfGCThreads ||
  10.187 +         no_of_par_workers == workers()->total_workers(),
  10.188 +         "Non dynamic should use fixed number of workers");
  10.189 +  const uint start_index = n_regions() * worker_i / worker_num;
  10.190 +  return region_at(start_index);
  10.191 +}
  10.192 +
  10.193  void G1CollectedHeap::collection_set_iterate(HeapRegionClosure* cl) {
  10.194    HeapRegion* r = g1_policy()->collection_set();
  10.195    while (r != NULL) {
  10.196 @@ -2974,6 +3024,51 @@
  10.197    g1_rem_set()->prepare_for_verify();
  10.198  }
  10.199  
  10.200 +bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
  10.201 +                                              VerifyOption vo) {
  10.202 +  switch (vo) {
  10.203 +  case VerifyOption_G1UsePrevMarking:
  10.204 +    return hr->obj_allocated_since_prev_marking(obj);
  10.205 +  case VerifyOption_G1UseNextMarking:
  10.206 +    return hr->obj_allocated_since_next_marking(obj);
  10.207 +  case VerifyOption_G1UseMarkWord:
  10.208 +    return false;
  10.209 +  default:
  10.210 +    ShouldNotReachHere();
  10.211 +  }
  10.212 +  return false; // keep some compilers happy
  10.213 +}
  10.214 +
  10.215 +HeapWord* G1CollectedHeap::top_at_mark_start(HeapRegion* hr, VerifyOption vo) {
  10.216 +  switch (vo) {
  10.217 +  case VerifyOption_G1UsePrevMarking: return hr->prev_top_at_mark_start();
  10.218 +  case VerifyOption_G1UseNextMarking: return hr->next_top_at_mark_start();
  10.219 +  case VerifyOption_G1UseMarkWord:    return NULL;
  10.220 +  default:                            ShouldNotReachHere();
  10.221 +  }
  10.222 +  return NULL; // keep some compilers happy
  10.223 +}
  10.224 +
  10.225 +bool G1CollectedHeap::is_marked(oop obj, VerifyOption vo) {
  10.226 +  switch (vo) {
  10.227 +  case VerifyOption_G1UsePrevMarking: return isMarkedPrev(obj);
  10.228 +  case VerifyOption_G1UseNextMarking: return isMarkedNext(obj);
  10.229 +  case VerifyOption_G1UseMarkWord:    return obj->is_gc_marked();
  10.230 +  default:                            ShouldNotReachHere();
  10.231 +  }
  10.232 +  return false; // keep some compilers happy
  10.233 +}
  10.234 +
  10.235 +const char* G1CollectedHeap::top_at_mark_start_str(VerifyOption vo) {
  10.236 +  switch (vo) {
  10.237 +  case VerifyOption_G1UsePrevMarking: return "PTAMS";
  10.238 +  case VerifyOption_G1UseNextMarking: return "NTAMS";
  10.239 +  case VerifyOption_G1UseMarkWord:    return "NONE";
  10.240 +  default:                            ShouldNotReachHere();
  10.241 +  }
  10.242 +  return NULL; // keep some compilers happy
  10.243 +}
  10.244 +
  10.245  class VerifyLivenessOopClosure: public OopClosure {
  10.246    G1CollectedHeap* _g1h;
  10.247    VerifyOption _vo;
  10.248 @@ -3061,9 +3156,9 @@
  10.249  
  10.250  class VerifyRegionClosure: public HeapRegionClosure {
  10.251  private:
  10.252 -  bool         _par;
  10.253 -  VerifyOption _vo;
  10.254 -  bool         _failures;
  10.255 +  bool             _par;
  10.256 +  VerifyOption     _vo;
  10.257 +  bool             _failures;
  10.258  public:
  10.259    // _vo == UsePrevMarking -> use "prev" marking information,
  10.260    // _vo == UseNextMarking -> use "next" marking information,
  10.261 @@ -3078,8 +3173,6 @@
  10.262    }
  10.263  
  10.264    bool doHeapRegion(HeapRegion* r) {
  10.265 -    guarantee(_par || r->claim_value() == HeapRegion::InitialClaimValue,
  10.266 -              "Should be unclaimed at verify points.");
  10.267      if (!r->continuesHumongous()) {
  10.268        bool failures = false;
  10.269        r->verify(_vo, &failures);
  10.270 @@ -5612,19 +5705,18 @@
  10.271    size_t hr_capacity = hr->capacity();
  10.272    size_t hr_pre_used = 0;
  10.273    _humongous_set.remove_with_proxy(hr, humongous_proxy_set);
  10.274 +  // We need to read this before we make the region non-humongous,
  10.275 +  // otherwise the information will be gone.
  10.276 +  uint last_index = hr->last_hc_index();
  10.277    hr->set_notHumongous();
  10.278    free_region(hr, &hr_pre_used, free_list, par);
  10.279  
  10.280    uint i = hr->hrs_index() + 1;
  10.281 -  uint num = 1;
  10.282 -  while (i < n_regions()) {
  10.283 +  while (i < last_index) {
  10.284      HeapRegion* curr_hr = region_at(i);
  10.285 -    if (!curr_hr->continuesHumongous()) {
  10.286 -      break;
  10.287 -    }
  10.288 +    assert(curr_hr->continuesHumongous(), "invariant");
  10.289      curr_hr->set_notHumongous();
  10.290      free_region(curr_hr, &hr_pre_used, free_list, par);
  10.291 -    num += 1;
  10.292      i += 1;
  10.293    }
  10.294    assert(hr_pre_used == hr_used,
  10.295 @@ -5732,7 +5824,6 @@
  10.296  
  10.297  void G1CollectedHeap::verify_dirty_young_regions() {
  10.298    verify_dirty_young_list(_young_list->first_region());
  10.299 -  verify_dirty_young_list(_young_list->first_survivor_region());
  10.300  }
  10.301  #endif
  10.302  
    11.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Thu Aug 02 22:23:28 2012 -0700
    11.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Fri Aug 10 10:41:13 2012 -0700
    11.3 @@ -375,6 +375,13 @@
    11.4    // this method will be found dead by the marking cycle).
    11.5    void allocate_dummy_regions() PRODUCT_RETURN;
    11.6  
    11.7 +  // Clear RSets after a compaction. It also resets the GC time stamps.
    11.8 +  void clear_rsets_post_compaction();
    11.9 +
   11.10 +  // If the HR printer is active, dump the state of the regions in the
   11.11 +  // heap after a compaction.
   11.12 +  void print_hrs_post_compaction();
   11.13 +
   11.14    // These are macros so that, if the assert fires, we get the correct
   11.15    // line number, file, etc.
   11.16  
   11.17 @@ -1061,11 +1068,18 @@
   11.18      clear_cset_start_regions();
   11.19    }
   11.20  
   11.21 +  void check_gc_time_stamps() PRODUCT_RETURN;
   11.22 +
   11.23    void increment_gc_time_stamp() {
   11.24      ++_gc_time_stamp;
   11.25      OrderAccess::fence();
   11.26    }
   11.27  
   11.28 +  // Reset the given region's GC timestamp. If it's starts humongous,
   11.29 +  // also reset the GC timestamp of its corresponding
   11.30 +  // continues humongous regions too.
   11.31 +  void reset_gc_time_stamps(HeapRegion* hr);
   11.32 +
   11.33    void iterate_dirty_card_closure(CardTableEntryClosure* cl,
   11.34                                    DirtyCardQueue* into_cset_dcq,
   11.35                                    bool concurrent, int worker_i);
   11.36 @@ -1302,11 +1316,6 @@
   11.37    // iteration early if the "doHeapRegion" method returns "true".
   11.38    void heap_region_iterate(HeapRegionClosure* blk) const;
   11.39  
   11.40 -  // Iterate over heap regions starting with r (or the first region if "r"
   11.41 -  // is NULL), in address order, terminating early if the "doHeapRegion"
   11.42 -  // method returns "true".
   11.43 -  void heap_region_iterate_from(HeapRegion* r, HeapRegionClosure* blk) const;
   11.44 -
   11.45    // Return the region with the given index. It assumes the index is valid.
   11.46    HeapRegion* region_at(uint index) const { return _hrs.at(index); }
   11.47  
   11.48 @@ -1351,6 +1360,11 @@
   11.49    // starting region for iterating over the current collection set.
   11.50    HeapRegion* start_cset_region_for_worker(int worker_i);
   11.51  
   11.52 +  // This is a convenience method that is used by the
   11.53 +  // HeapRegionIterator classes to calculate the starting region for
   11.54 +  // each worker so that they do not all start from the same region.
   11.55 +  HeapRegion* start_region_for_worker(uint worker_i, uint no_of_par_workers);
   11.56 +
   11.57    // Iterate over the regions (if any) in the current collection set.
   11.58    void collection_set_iterate(HeapRegionClosure* blk);
   11.59  
   11.60 @@ -1558,24 +1572,6 @@
   11.61    bool isMarkedPrev(oop obj) const;
   11.62    bool isMarkedNext(oop obj) const;
   11.63  
   11.64 -  // vo == UsePrevMarking -> use "prev" marking information,
   11.65 -  // vo == UseNextMarking -> use "next" marking information,
   11.66 -  // vo == UseMarkWord    -> use mark word from object header
   11.67 -  bool is_obj_dead_cond(const oop obj,
   11.68 -                        const HeapRegion* hr,
   11.69 -                        const VerifyOption vo) const {
   11.70 -
   11.71 -    switch (vo) {
   11.72 -      case VerifyOption_G1UsePrevMarking:
   11.73 -        return is_obj_dead(obj, hr);
   11.74 -      case VerifyOption_G1UseNextMarking:
   11.75 -        return is_obj_ill(obj, hr);
   11.76 -      default:
   11.77 -        assert(vo == VerifyOption_G1UseMarkWord, "must be");
   11.78 -        return !obj->is_gc_marked();
   11.79 -    }
   11.80 -  }
   11.81 -
   11.82    // Determine if an object is dead, given the object and also
   11.83    // the region to which the object belongs. An object is dead
   11.84    // iff a) it was not allocated since the last mark and b) it
   11.85 @@ -1587,15 +1583,6 @@
   11.86        !isMarkedPrev(obj);
   11.87    }
   11.88  
   11.89 -  // This is used when copying an object to survivor space.
   11.90 -  // If the object is marked live, then we mark the copy live.
   11.91 -  // If the object is allocated since the start of this mark
   11.92 -  // cycle, then we mark the copy live.
   11.93 -  // If the object has been around since the previous mark
   11.94 -  // phase, and hasn't been marked yet during this phase,
   11.95 -  // then we don't mark it, we just wait for the
   11.96 -  // current marking cycle to get to it.
   11.97 -
   11.98    // This function returns true when an object has been
   11.99    // around since the previous marking and hasn't yet
  11.100    // been marked during this marking.
  11.101 @@ -1613,23 +1600,6 @@
  11.102    // Added if it is in permanent gen it isn't dead.
  11.103    // Added if it is NULL it isn't dead.
  11.104  
  11.105 -  // vo == UsePrevMarking -> use "prev" marking information,
  11.106 -  // vo == UseNextMarking -> use "next" marking information,
  11.107 -  // vo == UseMarkWord    -> use mark word from object header
  11.108 -  bool is_obj_dead_cond(const oop obj,
  11.109 -                        const VerifyOption vo) const {
  11.110 -
  11.111 -    switch (vo) {
  11.112 -      case VerifyOption_G1UsePrevMarking:
  11.113 -        return is_obj_dead(obj);
  11.114 -      case VerifyOption_G1UseNextMarking:
  11.115 -        return is_obj_ill(obj);
  11.116 -      default:
  11.117 -        assert(vo == VerifyOption_G1UseMarkWord, "must be");
  11.118 -        return !obj->is_gc_marked();
  11.119 -    }
  11.120 -  }
  11.121 -
  11.122    bool is_obj_dead(const oop obj) const {
  11.123      const HeapRegion* hr = heap_region_containing(obj);
  11.124      if (hr == NULL) {
  11.125 @@ -1652,6 +1622,42 @@
  11.126      else return is_obj_ill(obj, hr);
  11.127    }
  11.128  
  11.129 +  // The methods below are here for convenience and dispatch the
  11.130 +  // appropriate method depending on value of the given VerifyOption
  11.131 +  // parameter. The options for that parameter are:
  11.132 +  //
  11.133 +  // vo == UsePrevMarking -> use "prev" marking information,
  11.134 +  // vo == UseNextMarking -> use "next" marking information,
  11.135 +  // vo == UseMarkWord    -> use mark word from object header
  11.136 +
  11.137 +  bool is_obj_dead_cond(const oop obj,
  11.138 +                        const HeapRegion* hr,
  11.139 +                        const VerifyOption vo) const {
  11.140 +    switch (vo) {
  11.141 +    case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
  11.142 +    case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
  11.143 +    case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
  11.144 +    default:                            ShouldNotReachHere();
  11.145 +    }
  11.146 +    return false; // keep some compilers happy
  11.147 +  }
  11.148 +
  11.149 +  bool is_obj_dead_cond(const oop obj,
  11.150 +                        const VerifyOption vo) const {
  11.151 +    switch (vo) {
  11.152 +    case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
  11.153 +    case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
  11.154 +    case VerifyOption_G1UseMarkWord:    return !obj->is_gc_marked();
  11.155 +    default:                            ShouldNotReachHere();
  11.156 +    }
  11.157 +    return false; // keep some compilers happy
  11.158 +  }
  11.159 +
  11.160 +  bool allocated_since_marking(oop obj, HeapRegion* hr, VerifyOption vo);
  11.161 +  HeapWord* top_at_mark_start(HeapRegion* hr, VerifyOption vo);
  11.162 +  bool is_marked(oop obj, VerifyOption vo);
  11.163 +  const char* top_at_mark_start_str(VerifyOption vo);
  11.164 +
  11.165    // The following is just to alert the verification code
  11.166    // that a full collection has occurred and that the
  11.167    // remembered sets are no longer up to date.
    12.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Aug 02 22:23:28 2012 -0700
    12.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Fri Aug 10 10:41:13 2012 -0700
    12.3 @@ -1528,35 +1528,13 @@
    12.4  
    12.5  class ParKnownGarbageHRClosure: public HeapRegionClosure {
    12.6    G1CollectedHeap* _g1h;
    12.7 -  CollectionSetChooser* _hrSorted;
    12.8 -  uint _marked_regions_added;
    12.9 -  size_t _reclaimable_bytes_added;
   12.10 -  uint _chunk_size;
   12.11 -  uint _cur_chunk_idx;
   12.12 -  uint _cur_chunk_end; // Cur chunk [_cur_chunk_idx, _cur_chunk_end)
   12.13 -
   12.14 -  void get_new_chunk() {
   12.15 -    _cur_chunk_idx = _hrSorted->claim_array_chunk(_chunk_size);
   12.16 -    _cur_chunk_end = _cur_chunk_idx + _chunk_size;
   12.17 -  }
   12.18 -  void add_region(HeapRegion* r) {
   12.19 -    if (_cur_chunk_idx == _cur_chunk_end) {
   12.20 -      get_new_chunk();
   12.21 -    }
   12.22 -    assert(_cur_chunk_idx < _cur_chunk_end, "postcondition");
   12.23 -    _hrSorted->set_region(_cur_chunk_idx, r);
   12.24 -    _marked_regions_added++;
   12.25 -    _reclaimable_bytes_added += r->reclaimable_bytes();
   12.26 -    _cur_chunk_idx++;
   12.27 -  }
   12.28 +  CSetChooserParUpdater _cset_updater;
   12.29  
   12.30  public:
   12.31    ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
   12.32                             uint chunk_size) :
   12.33 -      _g1h(G1CollectedHeap::heap()),
   12.34 -      _hrSorted(hrSorted), _chunk_size(chunk_size),
   12.35 -      _marked_regions_added(0), _reclaimable_bytes_added(0),
   12.36 -      _cur_chunk_idx(0), _cur_chunk_end(0) { }
   12.37 +    _g1h(G1CollectedHeap::heap()),
   12.38 +    _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
   12.39  
   12.40    bool doHeapRegion(HeapRegion* r) {
   12.41      // Do we have any marking information for this region?
   12.42 @@ -1564,14 +1542,12 @@
   12.43        // We will skip any region that's currently used as an old GC
   12.44        // alloc region (we should not consider those for collection
   12.45        // before we fill them up).
   12.46 -      if (_hrSorted->should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
   12.47 -        add_region(r);
   12.48 +      if (_cset_updater.should_add(r) && !_g1h->is_old_gc_alloc_region(r)) {
   12.49 +        _cset_updater.add_region(r);
   12.50        }
   12.51      }
   12.52      return false;
   12.53    }
   12.54 -  uint marked_regions_added() { return _marked_regions_added; }
   12.55 -  size_t reclaimable_bytes_added() { return _reclaimable_bytes_added; }
   12.56  };
   12.57  
   12.58  class ParKnownGarbageTask: public AbstractGangTask {
   12.59 @@ -1591,10 +1567,6 @@
   12.60      _g1->heap_region_par_iterate_chunked(&parKnownGarbageCl, worker_id,
   12.61                                           _g1->workers()->active_workers(),
   12.62                                           HeapRegion::InitialClaimValue);
   12.63 -    uint regions_added = parKnownGarbageCl.marked_regions_added();
   12.64 -    size_t reclaimable_bytes_added =
   12.65 -                                   parKnownGarbageCl.reclaimable_bytes_added();
   12.66 -    _hrSorted->update_totals(regions_added, reclaimable_bytes_added);
   12.67    }
   12.68  };
   12.69  
    13.1 --- a/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Thu Aug 02 22:23:28 2012 -0700
    13.2 +++ b/src/share/vm/gc_implementation/g1/g1MarkSweep.cpp	Fri Aug 10 10:41:13 2012 -0700
    13.3 @@ -262,18 +262,6 @@
    13.4    }
    13.5  };
    13.6  
    13.7 -// Finds the first HeapRegion.
    13.8 -class FindFirstRegionClosure: public HeapRegionClosure {
    13.9 -  HeapRegion* _a_region;
   13.10 -public:
   13.11 -  FindFirstRegionClosure() : _a_region(NULL) {}
   13.12 -  bool doHeapRegion(HeapRegion* r) {
   13.13 -    _a_region = r;
   13.14 -    return true;
   13.15 -  }
   13.16 -  HeapRegion* result() { return _a_region; }
   13.17 -};
   13.18 -
   13.19  void G1MarkSweep::mark_sweep_phase2() {
   13.20    // Now all live objects are marked, compute the new object addresses.
   13.21  
   13.22 @@ -294,9 +282,8 @@
   13.23    TraceTime tm("phase 2", G1Log::fine() && Verbose, true, gclog_or_tty);
   13.24    GenMarkSweep::trace("2");
   13.25  
   13.26 -  FindFirstRegionClosure cl;
   13.27 -  g1h->heap_region_iterate(&cl);
   13.28 -  HeapRegion *r = cl.result();
   13.29 +  // find the first region
   13.30 +  HeapRegion* r = g1h->region_at(0);
   13.31    CompactibleSpace* sp = r;
   13.32    if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) {
   13.33      sp = r->next_compaction_space();
   13.34 @@ -408,7 +395,3 @@
   13.35    g1h->heap_region_iterate(&blk);
   13.36  
   13.37  }
   13.38 -
   13.39 -// Local Variables: ***
   13.40 -// c-indentation-style: gnu ***
   13.41 -// End: ***
    14.1 --- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Thu Aug 02 22:23:28 2012 -0700
    14.2 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Fri Aug 10 10:41:13 2012 -0700
    14.3 @@ -197,7 +197,6 @@
    14.4    HeapWord* _r_bottom;
    14.5    HeapWord* _r_end;
    14.6    OopClosure* _oc;
    14.7 -  int _out_of_region;
    14.8  public:
    14.9    FilterOutOfRegionClosure(HeapRegion* r, OopClosure* oc);
   14.10    template <class T> void do_oop_nv(T* p);
   14.11 @@ -205,7 +204,6 @@
   14.12    virtual void do_oop(narrowOop* p) { do_oop_nv(p); }
   14.13    bool apply_to_weak_ref_discovered_field() { return true; }
   14.14    bool do_header() { return false; }
   14.15 -  int out_of_region() { return _out_of_region; }
   14.16  };
   14.17  
   14.18  // Closure for iterating over object fields during concurrent marking
    15.1 --- a/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Thu Aug 02 22:23:28 2012 -0700
    15.2 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.inline.hpp	Fri Aug 10 10:41:13 2012 -0700
    15.3 @@ -29,31 +29,22 @@
    15.4  #include "gc_implementation/g1/g1CollectedHeap.hpp"
    15.5  #include "gc_implementation/g1/g1OopClosures.hpp"
    15.6  #include "gc_implementation/g1/g1RemSet.hpp"
    15.7 +#include "gc_implementation/g1/heapRegionRemSet.hpp"
    15.8  
    15.9  /*
   15.10   * This really ought to be an inline function, but apparently the C++
   15.11   * compiler sometimes sees fit to ignore inline declarations.  Sigh.
   15.12   */
   15.13  
   15.14 -// This must a ifdef'ed because the counting it controls is in a
   15.15 -// perf-critical inner loop.
   15.16 -#define FILTERINTOCSCLOSURE_DOHISTOGRAMCOUNT 0
   15.17 -
   15.18  template <class T>
   15.19  inline void FilterIntoCSClosure::do_oop_nv(T* p) {
   15.20    T heap_oop = oopDesc::load_heap_oop(p);
   15.21    if (!oopDesc::is_null(heap_oop) &&
   15.22        _g1->obj_in_cs(oopDesc::decode_heap_oop_not_null(heap_oop))) {
   15.23      _oc->do_oop(p);
   15.24 -#if FILTERINTOCSCLOSURE_DOHISTOGRAMCOUNT
   15.25 -    if (_dcto_cl != NULL)
   15.26 -      _dcto_cl->incr_count();
   15.27 -#endif
   15.28    }
   15.29  }
   15.30  
   15.31 -#define FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT 0
   15.32 -
   15.33  template <class T>
   15.34  inline void FilterOutOfRegionClosure::do_oop_nv(T* p) {
   15.35    T heap_oop = oopDesc::load_heap_oop(p);
   15.36 @@ -61,9 +52,6 @@
   15.37      HeapWord* obj_hw = (HeapWord*)oopDesc::decode_heap_oop_not_null(heap_oop);
   15.38      if (obj_hw < _r_bottom || obj_hw >= _r_end) {
   15.39        _oc->do_oop(p);
   15.40 -#if FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT
   15.41 -      _out_of_region++;
   15.42 -#endif
   15.43      }
   15.44    }
   15.45  }
   15.46 @@ -182,6 +170,7 @@
   15.47  #endif // ASSERT
   15.48  
   15.49    assert(_from != NULL, "from region must be non-NULL");
   15.50 +  assert(_from->is_in_reserved(p), "p is not in from");
   15.51  
   15.52    HeapRegion* to = _g1->heap_region_containing(obj);
   15.53    if (to != NULL && _from != to) {
   15.54 @@ -212,14 +201,16 @@
   15.55        // or processed (if an evacuation failure occurs) at the end
   15.56        // of the collection.
   15.57        // See G1RemSet::cleanup_after_oops_into_collection_set_do().
   15.58 -    } else {
   15.59 -      // We either don't care about pushing references that point into the
   15.60 -      // collection set (i.e. we're not during an evacuation pause) _or_
   15.61 -      // the reference doesn't point into the collection set. Either way
   15.62 -      // we add the reference directly to the RSet of the region containing
   15.63 -      // the referenced object.
   15.64 -      _g1_rem_set->par_write_ref(_from, p, _worker_i);
   15.65 +      return;
   15.66      }
   15.67 +
   15.68 +    // We either don't care about pushing references that point into the
   15.69 +    // collection set (i.e. we're not during an evacuation pause) _or_
   15.70 +    // the reference doesn't point into the collection set. Either way
   15.71 +    // we add the reference directly to the RSet of the region containing
   15.72 +    // the referenced object.
   15.73 +    assert(to->rem_set() != NULL, "Need per-region 'into' remsets.");
   15.74 +    to->rem_set()->add_reference(p, _worker_i);
   15.75    }
   15.76  }
   15.77  
    16.1 --- a/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Thu Aug 02 22:23:28 2012 -0700
    16.2 +++ b/src/share/vm/gc_implementation/g1/g1RemSet.cpp	Fri Aug 10 10:41:13 2012 -0700
    16.3 @@ -280,62 +280,6 @@
    16.4    _g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0);
    16.5  }
    16.6  
    16.7 -class CountRSSizeClosure: public HeapRegionClosure {
    16.8 -  size_t _n;
    16.9 -  size_t _tot;
   16.10 -  size_t _max;
   16.11 -  HeapRegion* _max_r;
   16.12 -  enum {
   16.13 -    N = 20,
   16.14 -    MIN = 6
   16.15 -  };
   16.16 -  int _histo[N];
   16.17 -public:
   16.18 -  CountRSSizeClosure() : _n(0), _tot(0), _max(0), _max_r(NULL) {
   16.19 -    for (int i = 0; i < N; i++) _histo[i] = 0;
   16.20 -  }
   16.21 -  bool doHeapRegion(HeapRegion* r) {
   16.22 -    if (!r->continuesHumongous()) {
   16.23 -      size_t occ = r->rem_set()->occupied();
   16.24 -      _n++;
   16.25 -      _tot += occ;
   16.26 -      if (occ > _max) {
   16.27 -        _max = occ;
   16.28 -        _max_r = r;
   16.29 -      }
   16.30 -      // Fit it into a histo bin.
   16.31 -      int s = 1 << MIN;
   16.32 -      int i = 0;
   16.33 -      while (occ > (size_t) s && i < (N-1)) {
   16.34 -        s = s << 1;
   16.35 -        i++;
   16.36 -      }
   16.37 -      _histo[i]++;
   16.38 -    }
   16.39 -    return false;
   16.40 -  }
   16.41 -  size_t n() { return _n; }
   16.42 -  size_t tot() { return _tot; }
   16.43 -  size_t mx() { return _max; }
   16.44 -  HeapRegion* mxr() { return _max_r; }
   16.45 -  void print_histo() {
   16.46 -    int mx = N;
   16.47 -    while (mx >= 0) {
   16.48 -      if (_histo[mx-1] > 0) break;
   16.49 -      mx--;
   16.50 -    }
   16.51 -    gclog_or_tty->print_cr("Number of regions with given RS sizes:");
   16.52 -    gclog_or_tty->print_cr("           <= %8d   %8d", 1 << MIN, _histo[0]);
   16.53 -    for (int i = 1; i < mx-1; i++) {
   16.54 -      gclog_or_tty->print_cr("  %8d  - %8d   %8d",
   16.55 -                    (1 << (MIN + i - 1)) + 1,
   16.56 -                    1 << (MIN + i),
   16.57 -                    _histo[i]);
   16.58 -    }
   16.59 -    gclog_or_tty->print_cr("            > %8d   %8d", (1 << (MIN+mx-2))+1, _histo[mx-1]);
   16.60 -  }
   16.61 -};
   16.62 -
   16.63  void G1RemSet::cleanupHRRS() {
   16.64    HeapRegionRemSet::cleanup();
   16.65  }
   16.66 @@ -349,17 +293,6 @@
   16.67      _cg1r->clear_and_record_card_counts();
   16.68    }
   16.69  
   16.70 -  // Make this into a command-line flag...
   16.71 -  if (G1RSCountHisto && (ParallelGCThreads == 0 || worker_i == 0)) {
   16.72 -    CountRSSizeClosure count_cl;
   16.73 -    _g1->heap_region_iterate(&count_cl);
   16.74 -    gclog_or_tty->print_cr("Avg of %d RS counts is %f, max is %d, "
   16.75 -                  "max region is " PTR_FORMAT,
   16.76 -                  count_cl.n(), (float)count_cl.tot()/(float)count_cl.n(),
   16.77 -                  count_cl.mx(), count_cl.mxr());
   16.78 -    count_cl.print_histo();
   16.79 -  }
   16.80 -
   16.81    // We cache the value of 'oc' closure into the appropriate slot in the
   16.82    // _cset_rs_update_cl for this worker
   16.83    assert(worker_i < (int)n_workers(), "sanity");
   16.84 @@ -568,8 +501,6 @@
   16.85  }
   16.86  
   16.87  
   16.88 -static IntHistogram out_of_histo(50, 50);
   16.89 -
   16.90  
   16.91  G1TriggerClosure::G1TriggerClosure() :
   16.92    _triggered(false) { }
   16.93 @@ -671,7 +602,6 @@
   16.94        sdcq->enqueue(card_ptr);
   16.95      }
   16.96    } else {
   16.97 -    out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region());
   16.98      _conc_refine_cards++;
   16.99    }
  16.100  
  16.101 @@ -862,11 +792,6 @@
  16.102    card_repeat_count.print_on(gclog_or_tty);
  16.103  #endif
  16.104  
  16.105 -  if (FILTEROUTOFREGIONCLOSURE_DOHISTOGRAMCOUNT) {
  16.106 -    gclog_or_tty->print_cr("\nG1 rem-set out-of-region histogram: ");
  16.107 -    gclog_or_tty->print_cr("  # of CS ptrs --> # of cards with that number.");
  16.108 -    out_of_histo.print_on(gclog_or_tty);
  16.109 -  }
  16.110    gclog_or_tty->print_cr("\n Concurrent RS processed %d cards",
  16.111                           _conc_refine_cards);
  16.112    DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  16.113 @@ -889,21 +814,24 @@
  16.114  
  16.115    HRRSStatsIter blk;
  16.116    g1->heap_region_iterate(&blk);
  16.117 -  gclog_or_tty->print_cr("  Total heap region rem set sizes = " SIZE_FORMAT "K."
  16.118 -                         "  Max = " SIZE_FORMAT "K.",
  16.119 +  gclog_or_tty->print_cr("  Total heap region rem set sizes = "SIZE_FORMAT"K."
  16.120 +                         "  Max = "SIZE_FORMAT"K.",
  16.121                           blk.total_mem_sz()/K, blk.max_mem_sz()/K);
  16.122 -  gclog_or_tty->print_cr("  Static structures = " SIZE_FORMAT "K,"
  16.123 -                         " free_lists = " SIZE_FORMAT "K.",
  16.124 -                         HeapRegionRemSet::static_mem_size()/K,
  16.125 -                         HeapRegionRemSet::fl_mem_size()/K);
  16.126 -  gclog_or_tty->print_cr("    %d occupied cards represented.",
  16.127 +  gclog_or_tty->print_cr("  Static structures = "SIZE_FORMAT"K,"
  16.128 +                         " free_lists = "SIZE_FORMAT"K.",
  16.129 +                         HeapRegionRemSet::static_mem_size() / K,
  16.130 +                         HeapRegionRemSet::fl_mem_size() / K);
  16.131 +  gclog_or_tty->print_cr("    "SIZE_FORMAT" occupied cards represented.",
  16.132                           blk.occupied());
  16.133 -  gclog_or_tty->print_cr("    Max sz region = [" PTR_FORMAT ", " PTR_FORMAT " )"
  16.134 -                         ", cap = " SIZE_FORMAT "K, occ = " SIZE_FORMAT "K.",
  16.135 -                         blk.max_mem_sz_region()->bottom(), blk.max_mem_sz_region()->end(),
  16.136 -                         (blk.max_mem_sz_region()->rem_set()->mem_size() + K - 1)/K,
  16.137 -                         (blk.max_mem_sz_region()->rem_set()->occupied() + K - 1)/K);
  16.138 -  gclog_or_tty->print_cr("    Did %d coarsenings.", HeapRegionRemSet::n_coarsenings());
  16.139 +  HeapRegion* max_mem_sz_region = blk.max_mem_sz_region();
  16.140 +  HeapRegionRemSet* rem_set = max_mem_sz_region->rem_set();
  16.141 +  gclog_or_tty->print_cr("    Max size region = "HR_FORMAT", "
  16.142 +                         "size = "SIZE_FORMAT "K, occupied = "SIZE_FORMAT"K.",
  16.143 +                         HR_FORMAT_PARAMS(max_mem_sz_region),
  16.144 +                         (rem_set->mem_size() + K - 1)/K,
  16.145 +                         (rem_set->occupied() + K - 1)/K);
  16.146 +  gclog_or_tty->print_cr("    Did %d coarsenings.",
  16.147 +                         HeapRegionRemSet::n_coarsenings());
  16.148  }
  16.149  
  16.150  void G1RemSet::prepare_for_verify() {
    17.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Thu Aug 02 22:23:28 2012 -0700
    17.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Fri Aug 10 10:41:13 2012 -0700
    17.3 @@ -44,14 +44,11 @@
    17.4                                   CardTableModRefBS::PrecisionStyle precision,
    17.5                                   FilterKind fk) :
    17.6    ContiguousSpaceDCTOC(hr, cl, precision, NULL),
    17.7 -  _hr(hr), _fk(fk), _g1(g1)
    17.8 -{ }
    17.9 +  _hr(hr), _fk(fk), _g1(g1) { }
   17.10  
   17.11  FilterOutOfRegionClosure::FilterOutOfRegionClosure(HeapRegion* r,
   17.12                                                     OopClosure* oc) :
   17.13 -  _r_bottom(r->bottom()), _r_end(r->end()),
   17.14 -  _oc(oc), _out_of_region(0)
   17.15 -{}
   17.16 +  _r_bottom(r->bottom()), _r_end(r->end()), _oc(oc) { }
   17.17  
   17.18  class VerifyLiveClosure: public OopClosure {
   17.19  private:
   17.20 @@ -512,35 +509,19 @@
   17.21    assert(HeapRegionRemSet::num_par_rem_sets() > 0, "Invariant.");
   17.22  }
   17.23  
   17.24 -class NextCompactionHeapRegionClosure: public HeapRegionClosure {
   17.25 -  const HeapRegion* _target;
   17.26 -  bool _target_seen;
   17.27 -  HeapRegion* _last;
   17.28 -  CompactibleSpace* _res;
   17.29 -public:
   17.30 -  NextCompactionHeapRegionClosure(const HeapRegion* target) :
   17.31 -    _target(target), _target_seen(false), _res(NULL) {}
   17.32 -  bool doHeapRegion(HeapRegion* cur) {
   17.33 -    if (_target_seen) {
   17.34 -      if (!cur->isHumongous()) {
   17.35 -        _res = cur;
   17.36 -        return true;
   17.37 -      }
   17.38 -    } else if (cur == _target) {
   17.39 -      _target_seen = true;
   17.40 +CompactibleSpace* HeapRegion::next_compaction_space() const {
   17.41 +  // We're not using an iterator given that it will wrap around when
   17.42 +  // it reaches the last region and this is not what we want here.
   17.43 +  G1CollectedHeap* g1h = G1CollectedHeap::heap();
   17.44 +  uint index = hrs_index() + 1;
   17.45 +  while (index < g1h->n_regions()) {
   17.46 +    HeapRegion* hr = g1h->region_at(index);
   17.47 +    if (!hr->isHumongous()) {
   17.48 +      return hr;
   17.49      }
   17.50 -    return false;
   17.51 +    index += 1;
   17.52    }
   17.53 -  CompactibleSpace* result() { return _res; }
   17.54 -};
   17.55 -
   17.56 -CompactibleSpace* HeapRegion::next_compaction_space() const {
   17.57 -  G1CollectedHeap* g1h = G1CollectedHeap::heap();
   17.58 -  // cast away const-ness
   17.59 -  HeapRegion* r = (HeapRegion*) this;
   17.60 -  NextCompactionHeapRegionClosure blk(r);
   17.61 -  g1h->heap_region_iterate_from(r, &blk);
   17.62 -  return blk.result();
   17.63 +  return NULL;
   17.64  }
   17.65  
   17.66  void HeapRegion::save_marks() {
    18.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.hpp	Thu Aug 02 22:23:28 2012 -0700
    18.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.hpp	Fri Aug 10 10:41:13 2012 -0700
    18.3 @@ -55,7 +55,10 @@
    18.4  #define HR_FORMAT "%u:(%s)["PTR_FORMAT","PTR_FORMAT","PTR_FORMAT"]"
    18.5  #define HR_FORMAT_PARAMS(_hr_) \
    18.6                  (_hr_)->hrs_index(), \
    18.7 -                (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : "-", \
    18.8 +                (_hr_)->is_survivor() ? "S" : (_hr_)->is_young() ? "E" : \
    18.9 +                (_hr_)->startsHumongous() ? "HS" : \
   18.10 +                (_hr_)->continuesHumongous() ? "HC" : \
   18.11 +                !(_hr_)->is_empty() ? "O" : "F", \
   18.12                  (_hr_)->bottom(), (_hr_)->top(), (_hr_)->end()
   18.13  
   18.14  // sentinel value for hrs_index
   18.15 @@ -173,6 +176,7 @@
   18.16    virtual HeapWord* saved_mark_word() const;
   18.17    virtual void set_saved_mark();
   18.18    void reset_gc_time_stamp() { _gc_time_stamp = 0; }
   18.19 +  unsigned get_gc_time_stamp() { return _gc_time_stamp; }
   18.20  
   18.21    // See the comment above in the declaration of _pre_dummy_top for an
   18.22    // explanation of what it is.
   18.23 @@ -439,6 +443,25 @@
   18.24      return _humongous_start_region;
   18.25    }
   18.26  
   18.27 +  // Return the number of distinct regions that are covered by this region:
   18.28 +  // 1 if the region is not humongous, >= 1 if the region is humongous.
   18.29 +  uint region_num() const {
   18.30 +    if (!isHumongous()) {
   18.31 +      return 1U;
   18.32 +    } else {
   18.33 +      assert(startsHumongous(), "doesn't make sense on HC regions");
   18.34 +      assert(capacity() % HeapRegion::GrainBytes == 0, "sanity");
   18.35 +      return (uint) (capacity() >> HeapRegion::LogOfHRGrainBytes);
   18.36 +    }
   18.37 +  }
   18.38 +
   18.39 +  // Return the index + 1 of the last HC regions that's associated
   18.40 +  // with this HS region.
   18.41 +  uint last_hc_index() const {
   18.42 +    assert(startsHumongous(), "don't call this otherwise");
   18.43 +    return hrs_index() + region_num();
   18.44 +  }
   18.45 +
   18.46    // Same as Space::is_in_reserved, but will use the original size of the region.
   18.47    // The original size is different only for start humongous regions. They get
   18.48    // their _end set up to be the end of the last continues region of the
   18.49 @@ -622,8 +645,8 @@
   18.50    bool is_marked() { return _prev_top_at_mark_start != bottom(); }
   18.51  
   18.52    void reset_during_compaction() {
   18.53 -    guarantee( isHumongous() && startsHumongous(),
   18.54 -               "should only be called for humongous regions");
   18.55 +    assert(isHumongous() && startsHumongous(),
   18.56 +           "should only be called for starts humongous regions");
   18.57  
   18.58      zero_marked_bytes();
   18.59      init_top_at_mark_start();
   18.60 @@ -774,7 +797,7 @@
   18.61    virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
   18.62    SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(HeapRegion_OOP_SINCE_SAVE_MARKS_DECL)
   18.63  
   18.64 -  CompactibleSpace* next_compaction_space() const;
   18.65 +  virtual CompactibleSpace* next_compaction_space() const;
   18.66  
   18.67    virtual void reset_after_compaction();
   18.68  
    19.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Thu Aug 02 22:23:28 2012 -0700
    19.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Fri Aug 10 10:41:13 2012 -0700
    19.3 @@ -34,8 +34,6 @@
    19.4  #include "utilities/bitMap.inline.hpp"
    19.5  #include "utilities/globalDefinitions.hpp"
    19.6  
    19.7 -// OtherRegionsTable
    19.8 -
    19.9  class PerRegionTable: public CHeapObj<mtGC> {
   19.10    friend class OtherRegionsTable;
   19.11    friend class HeapRegionRemSetIterator;
   19.12 @@ -44,20 +42,18 @@
   19.13    BitMap          _bm;
   19.14    jint            _occupied;
   19.15  
   19.16 -  // next pointer for free/allocated lis
   19.17 +  // next pointer for free/allocated 'all' list
   19.18    PerRegionTable* _next;
   19.19  
   19.20 +  // prev pointer for the allocated 'all' list
   19.21 +  PerRegionTable* _prev;
   19.22 +
   19.23 +  // next pointer in collision list
   19.24 +  PerRegionTable * _collision_list_next;
   19.25 +
   19.26 +  // Global free list of PRTs
   19.27    static PerRegionTable* _free_list;
   19.28  
   19.29 -#ifdef _MSC_VER
   19.30 -  // For some reason even though the classes are marked as friend they are unable
   19.31 -  // to access CardsPerRegion when private/protected. Only the windows c++ compiler
   19.32 -  // says this Sun CC and linux gcc don't have a problem with access when private
   19.33 -
   19.34 -  public:
   19.35 -
   19.36 -#endif // _MSC_VER
   19.37 -
   19.38  protected:
   19.39    // We need access in order to union things into the base table.
   19.40    BitMap* bm() { return &_bm; }
   19.41 @@ -69,7 +65,8 @@
   19.42    PerRegionTable(HeapRegion* hr) :
   19.43      _hr(hr),
   19.44      _occupied(0),
   19.45 -    _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */)
   19.46 +    _bm(HeapRegion::CardsPerRegion, false /* in-resource-area */),
   19.47 +    _collision_list_next(NULL), _next(NULL), _prev(NULL)
   19.48    {}
   19.49  
   19.50    void add_card_work(CardIdx_t from_card, bool par) {
   19.51 @@ -126,9 +123,13 @@
   19.52      return _occupied;
   19.53    }
   19.54  
   19.55 -  void init(HeapRegion* hr) {
   19.56 +  void init(HeapRegion* hr, bool clear_links_to_all_list) {
   19.57 +    if (clear_links_to_all_list) {
   19.58 +      set_next(NULL);
   19.59 +      set_prev(NULL);
   19.60 +    }
   19.61      _hr = hr;
   19.62 -    _next = NULL;
   19.63 +    _collision_list_next = NULL;
   19.64      _occupied = 0;
   19.65      _bm.clear();
   19.66    }
   19.67 @@ -175,22 +176,25 @@
   19.68      return _bm.at(card_ind);
   19.69    }
   19.70  
   19.71 -  PerRegionTable* next() const { return _next; }
   19.72 -  void set_next(PerRegionTable* nxt) { _next = nxt; }
   19.73 -  PerRegionTable** next_addr() { return &_next; }
   19.74 -
   19.75 -  static void free(PerRegionTable* prt) {
   19.76 +  // Bulk-free the PRTs from prt to last, assumes that they are
   19.77 +  // linked together using their _next field.
   19.78 +  static void bulk_free(PerRegionTable* prt, PerRegionTable* last) {
   19.79      while (true) {
   19.80        PerRegionTable* fl = _free_list;
   19.81 -      prt->set_next(fl);
   19.82 -      PerRegionTable* res =
   19.83 -        (PerRegionTable*)
   19.84 -        Atomic::cmpxchg_ptr(prt, &_free_list, fl);
   19.85 -      if (res == fl) return;
   19.86 +      last->set_next(fl);
   19.87 +      PerRegionTable* res = (PerRegionTable*) Atomic::cmpxchg_ptr(prt, &_free_list, fl);
   19.88 +      if (res == fl) {
   19.89 +        return;
   19.90 +      }
   19.91      }
   19.92      ShouldNotReachHere();
   19.93    }
   19.94  
   19.95 +  static void free(PerRegionTable* prt) {
   19.96 +    bulk_free(prt, prt);
   19.97 +  }
   19.98 +
   19.99 +  // Returns an initialized PerRegionTable instance.
  19.100    static PerRegionTable* alloc(HeapRegion* hr) {
  19.101      PerRegionTable* fl = _free_list;
  19.102      while (fl != NULL) {
  19.103 @@ -199,7 +203,7 @@
  19.104          (PerRegionTable*)
  19.105          Atomic::cmpxchg_ptr(nxt, &_free_list, fl);
  19.106        if (res == fl) {
  19.107 -        fl->init(hr);
  19.108 +        fl->init(hr, true);
  19.109          return fl;
  19.110        } else {
  19.111          fl = _free_list;
  19.112 @@ -209,6 +213,31 @@
  19.113      return new PerRegionTable(hr);
  19.114    }
  19.115  
  19.116 +  PerRegionTable* next() const { return _next; }
  19.117 +  void set_next(PerRegionTable* next) { _next = next; }
  19.118 +  PerRegionTable* prev() const { return _prev; }
  19.119 +  void set_prev(PerRegionTable* prev) { _prev = prev; }
  19.120 +
  19.121 +  // Accessor and Modification routines for the pointer for the
  19.122 +  // singly linked collision list that links the PRTs within the
  19.123 +  // OtherRegionsTable::_fine_grain_regions hash table.
  19.124 +  //
  19.125 +  // It might be useful to also make the collision list doubly linked
  19.126 +  // to avoid iteration over the collisions list during scrubbing/deletion.
  19.127 +  // OTOH there might not be many collisions.
  19.128 +
  19.129 +  PerRegionTable* collision_list_next() const {
  19.130 +    return _collision_list_next;
  19.131 +  }
  19.132 +
  19.133 +  void set_collision_list_next(PerRegionTable* next) {
  19.134 +    _collision_list_next = next;
  19.135 +  }
  19.136 +
  19.137 +  PerRegionTable** collision_list_next_addr() {
  19.138 +    return &_collision_list_next;
  19.139 +  }
  19.140 +
  19.141    static size_t fl_mem_size() {
  19.142      PerRegionTable* cur = _free_list;
  19.143      size_t res = 0;
  19.144 @@ -234,6 +263,7 @@
  19.145    _coarse_map(G1CollectedHeap::heap()->max_regions(),
  19.146                false /* in-resource-area */),
  19.147    _fine_grain_regions(NULL),
  19.148 +  _first_all_fine_prts(NULL), _last_all_fine_prts(NULL),
  19.149    _n_fine_entries(0), _n_coarse_entries(0),
  19.150    _fine_eviction_start(0),
  19.151    _sparse_table(hr)
  19.152 @@ -264,6 +294,66 @@
  19.153    }
  19.154  }
  19.155  
  19.156 +void OtherRegionsTable::link_to_all(PerRegionTable* prt) {
  19.157 +  // We always append to the beginning of the list for convenience;
  19.158 +  // the order of entries in this list does not matter.
  19.159 +  if (_first_all_fine_prts != NULL) {
  19.160 +    assert(_first_all_fine_prts->prev() == NULL, "invariant");
  19.161 +    _first_all_fine_prts->set_prev(prt);
  19.162 +    prt->set_next(_first_all_fine_prts);
  19.163 +  } else {
  19.164 +    // this is the first element we insert. Adjust the "last" pointer
  19.165 +    _last_all_fine_prts = prt;
  19.166 +    assert(prt->next() == NULL, "just checking");
  19.167 +  }
  19.168 +  // the new element is always the first element without a predecessor
  19.169 +  prt->set_prev(NULL);
  19.170 +  _first_all_fine_prts = prt;
  19.171 +
  19.172 +  assert(prt->prev() == NULL, "just checking");
  19.173 +  assert(_first_all_fine_prts == prt, "just checking");
  19.174 +  assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
  19.175 +         (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
  19.176 +         "just checking");
  19.177 +  assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
  19.178 +         "just checking");
  19.179 +  assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
  19.180 +         "just checking");
  19.181 +}
  19.182 +
  19.183 +void OtherRegionsTable::unlink_from_all(PerRegionTable* prt) {
  19.184 +  if (prt->prev() != NULL) {
  19.185 +    assert(_first_all_fine_prts != prt, "just checking");
  19.186 +    prt->prev()->set_next(prt->next());
  19.187 +    // removing the last element in the list?
  19.188 +    if (_last_all_fine_prts == prt) {
  19.189 +      _last_all_fine_prts = prt->prev();
  19.190 +    }
  19.191 +  } else {
  19.192 +    assert(_first_all_fine_prts == prt, "just checking");
  19.193 +    _first_all_fine_prts = prt->next();
  19.194 +    // list is empty now?
  19.195 +    if (_first_all_fine_prts == NULL) {
  19.196 +      _last_all_fine_prts = NULL;
  19.197 +    }
  19.198 +  }
  19.199 +
  19.200 +  if (prt->next() != NULL) {
  19.201 +    prt->next()->set_prev(prt->prev());
  19.202 +  }
  19.203 +
  19.204 +  prt->set_next(NULL);
  19.205 +  prt->set_prev(NULL);
  19.206 +
  19.207 +  assert((_first_all_fine_prts == NULL && _last_all_fine_prts == NULL) ||
  19.208 +         (_first_all_fine_prts != NULL && _last_all_fine_prts != NULL),
  19.209 +         "just checking");
  19.210 +  assert(_last_all_fine_prts == NULL || _last_all_fine_prts->next() == NULL,
  19.211 +         "just checking");
  19.212 +  assert(_first_all_fine_prts == NULL || _first_all_fine_prts->prev() == NULL,
  19.213 +         "just checking");
  19.214 +}
  19.215 +
  19.216  int**  OtherRegionsTable::_from_card_cache = NULL;
  19.217  size_t OtherRegionsTable::_from_card_cache_max_regions = 0;
  19.218  size_t OtherRegionsTable::_from_card_cache_mem_size = 0;
  19.219 @@ -386,13 +476,16 @@
  19.220  
  19.221        if (_n_fine_entries == _max_fine_entries) {
  19.222          prt = delete_region_table();
  19.223 +        // There is no need to clear the links to the 'all' list here:
  19.224 +        // prt will be reused immediately, i.e. remain in the 'all' list.
  19.225 +        prt->init(from_hr, false /* clear_links_to_all_list */);
  19.226        } else {
  19.227          prt = PerRegionTable::alloc(from_hr);
  19.228 +        link_to_all(prt);
  19.229        }
  19.230 -      prt->init(from_hr);
  19.231  
  19.232        PerRegionTable* first_prt = _fine_grain_regions[ind];
  19.233 -      prt->set_next(first_prt);  // XXX Maybe move to init?
  19.234 +      prt->set_collision_list_next(first_prt);
  19.235        _fine_grain_regions[ind] = prt;
  19.236        _n_fine_entries++;
  19.237  
  19.238 @@ -438,7 +531,7 @@
  19.239    assert(0 <= ind && ind < _max_fine_entries, "Preconditions.");
  19.240    PerRegionTable* prt = _fine_grain_regions[ind];
  19.241    while (prt != NULL && prt->hr() != hr) {
  19.242 -    prt = prt->next();
  19.243 +    prt = prt->collision_list_next();
  19.244    }
  19.245    // Loop postcondition is the method postcondition.
  19.246    return prt;
  19.247 @@ -473,8 +566,8 @@
  19.248          max_ind = i;
  19.249          max_occ = cur_occ;
  19.250        }
  19.251 -      prev = cur->next_addr();
  19.252 -      cur = cur->next();
  19.253 +      prev = cur->collision_list_next_addr();
  19.254 +      cur = cur->collision_list_next();
  19.255      }
  19.256      i = i + _fine_eviction_stride;
  19.257      if (i >= _n_fine_entries) i = i - _n_fine_entries;
  19.258 @@ -503,7 +596,7 @@
  19.259    }
  19.260  
  19.261    // Unsplice.
  19.262 -  *max_prev = max->next();
  19.263 +  *max_prev = max->collision_list_next();
  19.264    Atomic::inc(&_n_coarsenings);
  19.265    _n_fine_entries--;
  19.266    return max;
  19.267 @@ -534,7 +627,7 @@
  19.268      PerRegionTable* cur = _fine_grain_regions[i];
  19.269      PerRegionTable** prev = &_fine_grain_regions[i];
  19.270      while (cur != NULL) {
  19.271 -      PerRegionTable* nxt = cur->next();
  19.272 +      PerRegionTable* nxt = cur->collision_list_next();
  19.273        // If the entire region is dead, eliminate.
  19.274        if (G1RSScrubVerbose) {
  19.275          gclog_or_tty->print_cr("     For other region %u:",
  19.276 @@ -542,11 +635,12 @@
  19.277        }
  19.278        if (!region_bm->at((size_t) cur->hr()->hrs_index())) {
  19.279          *prev = nxt;
  19.280 -        cur->set_next(NULL);
  19.281 +        cur->set_collision_list_next(NULL);
  19.282          _n_fine_entries--;
  19.283          if (G1RSScrubVerbose) {
  19.284            gclog_or_tty->print_cr("          deleted via region map.");
  19.285          }
  19.286 +        unlink_from_all(cur);
  19.287          PerRegionTable::free(cur);
  19.288        } else {
  19.289          // Do fine-grain elimination.
  19.290 @@ -560,11 +654,12 @@
  19.291          // Did that empty the table completely?
  19.292          if (cur->occupied() == 0) {
  19.293            *prev = nxt;
  19.294 -          cur->set_next(NULL);
  19.295 +          cur->set_collision_list_next(NULL);
  19.296            _n_fine_entries--;
  19.297 +          unlink_from_all(cur);
  19.298            PerRegionTable::free(cur);
  19.299          } else {
  19.300 -          prev = cur->next_addr();
  19.301 +          prev = cur->collision_list_next_addr();
  19.302          }
  19.303        }
  19.304        cur = nxt;
  19.305 @@ -587,13 +682,15 @@
  19.306  
  19.307  size_t OtherRegionsTable::occ_fine() const {
  19.308    size_t sum = 0;
  19.309 -  for (size_t i = 0; i < _max_fine_entries; i++) {
  19.310 -    PerRegionTable* cur = _fine_grain_regions[i];
  19.311 -    while (cur != NULL) {
  19.312 -      sum += cur->occupied();
  19.313 -      cur = cur->next();
  19.314 -    }
  19.315 +
  19.316 +  size_t num = 0;
  19.317 +  PerRegionTable * cur = _first_all_fine_prts;
  19.318 +  while (cur != NULL) {
  19.319 +    sum += cur->occupied();
  19.320 +    cur = cur->next();
  19.321 +    num++;
  19.322    }
  19.323 +  guarantee(num == _n_fine_entries, "just checking");
  19.324    return sum;
  19.325  }
  19.326  
  19.327 @@ -609,12 +706,10 @@
  19.328    // Cast away const in this case.
  19.329    MutexLockerEx x((Mutex*)&_m, Mutex::_no_safepoint_check_flag);
  19.330    size_t sum = 0;
  19.331 -  for (size_t i = 0; i < _max_fine_entries; i++) {
  19.332 -    PerRegionTable* cur = _fine_grain_regions[i];
  19.333 -    while (cur != NULL) {
  19.334 -      sum += cur->mem_size();
  19.335 -      cur = cur->next();
  19.336 -    }
  19.337 +  PerRegionTable * cur = _first_all_fine_prts;
  19.338 +  while (cur != NULL) {
  19.339 +    sum += cur->mem_size();
  19.340 +    cur = cur->next();
  19.341    }
  19.342    sum += (sizeof(PerRegionTable*) * _max_fine_entries);
  19.343    sum += (_coarse_map.size_in_words() * HeapWordSize);
  19.344 @@ -632,22 +727,24 @@
  19.345  }
  19.346  
  19.347  void OtherRegionsTable::clear_fcc() {
  19.348 +  size_t hrs_idx = hr()->hrs_index();
  19.349    for (int i = 0; i < HeapRegionRemSet::num_par_rem_sets(); i++) {
  19.350 -    _from_card_cache[i][hr()->hrs_index()] = -1;
  19.351 +    _from_card_cache[i][hrs_idx] = -1;
  19.352    }
  19.353  }
  19.354  
  19.355  void OtherRegionsTable::clear() {
  19.356    MutexLockerEx x(&_m, Mutex::_no_safepoint_check_flag);
  19.357 -  for (size_t i = 0; i < _max_fine_entries; i++) {
  19.358 -    PerRegionTable* cur = _fine_grain_regions[i];
  19.359 -    while (cur != NULL) {
  19.360 -      PerRegionTable* nxt = cur->next();
  19.361 -      PerRegionTable::free(cur);
  19.362 -      cur = nxt;
  19.363 -    }
  19.364 -    _fine_grain_regions[i] = NULL;
  19.365 +  // if there are no entries, skip this step
  19.366 +  if (_first_all_fine_prts != NULL) {
  19.367 +    guarantee(_first_all_fine_prts != NULL && _last_all_fine_prts != NULL, "just checking");
  19.368 +    PerRegionTable::bulk_free(_first_all_fine_prts, _last_all_fine_prts);
  19.369 +    memset(_fine_grain_regions, 0, _max_fine_entries * sizeof(_fine_grain_regions[0]));
  19.370 +  } else {
  19.371 +    guarantee(_first_all_fine_prts == NULL && _last_all_fine_prts == NULL, "just checking");
  19.372    }
  19.373 +
  19.374 +  _first_all_fine_prts = _last_all_fine_prts = NULL;
  19.375    _sparse_table.clear();
  19.376    _coarse_map.clear();
  19.377    _n_fine_entries = 0;
  19.378 @@ -686,12 +783,13 @@
  19.379    PerRegionTable** prev_addr = &_fine_grain_regions[ind];
  19.380    PerRegionTable* prt = *prev_addr;
  19.381    while (prt != NULL && prt->hr() != hr) {
  19.382 -    prev_addr = prt->next_addr();
  19.383 -    prt = prt->next();
  19.384 +    prev_addr = prt->collision_list_next_addr();
  19.385 +    prt = prt->collision_list_next();
  19.386    }
  19.387    if (prt != NULL) {
  19.388      assert(prt->hr() == hr, "Loop postcondition.");
  19.389 -    *prev_addr = prt->next();
  19.390 +    *prev_addr = prt->collision_list_next();
  19.391 +    unlink_from_all(prt);
  19.392      PerRegionTable::free(prt);
  19.393      _n_fine_entries--;
  19.394      return true;
  19.395 @@ -793,7 +891,6 @@
  19.396        G1CollectedHeap::heap()->bot_shared()->address_for_index(card_index);
  19.397      gclog_or_tty->print_cr("  Card " PTR_FORMAT, card_start);
  19.398    }
  19.399 -
  19.400    if (iter.n_yielded() != occupied()) {
  19.401      gclog_or_tty->print_cr("Yielded disagrees with occupied:");
  19.402      gclog_or_tty->print_cr("  %6d yielded (%6d coarse, %6d fine).",
  19.403 @@ -905,7 +1002,7 @@
  19.404    while (!fine_has_next()) {
  19.405      if (_cur_region_cur_card == (size_t) HeapRegion::CardsPerRegion) {
  19.406        _cur_region_cur_card = 0;
  19.407 -      _fine_cur_prt = _fine_cur_prt->next();
  19.408 +      _fine_cur_prt = _fine_cur_prt->collision_list_next();
  19.409      }
  19.410      if (_fine_cur_prt == NULL) {
  19.411        fine_find_next_non_null_prt();
    20.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Thu Aug 02 22:23:28 2012 -0700
    20.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.hpp	Fri Aug 10 10:41:13 2012 -0700
    20.3 @@ -82,6 +82,14 @@
    20.4    PerRegionTable** _fine_grain_regions;
    20.5    size_t           _n_fine_entries;
    20.6  
    20.7 +  // The fine grain remembered sets are doubly linked together using
    20.8 +  // their 'next' and 'prev' fields.
    20.9 +  // This allows fast bulk freeing of all the fine grain remembered
   20.10 +  // set entries, and fast finding of all of them without iterating
   20.11 +  // over the _fine_grain_regions table.
   20.12 +  PerRegionTable * _first_all_fine_prts;
   20.13 +  PerRegionTable * _last_all_fine_prts;
   20.14 +
   20.15    // Used to sample a subset of the fine grain PRTs to determine which
   20.16    // PRT to evict and coarsen.
   20.17    size_t        _fine_eviction_start;
   20.18 @@ -114,6 +122,11 @@
   20.19    static size_t _from_card_cache_max_regions;
   20.20    static size_t _from_card_cache_mem_size;
   20.21  
   20.22 +  // link/add the given fine grain remembered set into the "all" list
   20.23 +  void link_to_all(PerRegionTable * prt);
   20.24 +  // unlink/remove the given fine grain remembered set into the "all" list
   20.25 +  void unlink_from_all(PerRegionTable * prt);
   20.26 +
   20.27  public:
   20.28    OtherRegionsTable(HeapRegion* hr);
   20.29  
    21.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Thu Aug 02 22:23:28 2012 -0700
    21.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.cpp	Fri Aug 10 10:41:13 2012 -0700
    21.3 @@ -35,14 +35,6 @@
    21.4    _unrealistically_long_length = len;
    21.5  }
    21.6  
    21.7 -uint HeapRegionSetBase::calculate_region_num(HeapRegion* hr) {
    21.8 -  assert(hr->startsHumongous(), "pre-condition");
    21.9 -  assert(hr->capacity() % HeapRegion::GrainBytes == 0, "invariant");
   21.10 -  uint region_num = (uint) (hr->capacity() >> HeapRegion::LogOfHRGrainBytes);
   21.11 -  assert(region_num > 0, "sanity");
   21.12 -  return region_num;
   21.13 -}
   21.14 -
   21.15  void HeapRegionSetBase::fill_in_ext_msg(hrs_ext_msg* msg, const char* message) {
   21.16    msg->append("[%s] %s ln: %u rn: %u cy: "SIZE_FORMAT" ud: "SIZE_FORMAT,
   21.17                name(), message, length(), region_num(),
   21.18 @@ -152,11 +144,7 @@
   21.19    guarantee(verify_region(hr, this), hrs_ext_msg(this, "region verification"));
   21.20  
   21.21    _calc_length               += 1;
   21.22 -  if (!hr->isHumongous()) {
   21.23 -    _calc_region_num         += 1;
   21.24 -  } else {
   21.25 -    _calc_region_num         += calculate_region_num(hr);
   21.26 -  }
   21.27 +  _calc_region_num           += hr->region_num();
   21.28    _calc_total_capacity_bytes += hr->capacity();
   21.29    _calc_total_used_bytes     += hr->used();
   21.30  }
   21.31 @@ -292,7 +280,7 @@
   21.32      assert(length() >  0 && _tail != NULL, hrs_ext_msg(this, "invariant"));
   21.33      from_list->_tail->set_next(_head);
   21.34    } else {
   21.35 -    assert(length() == 0 && _head == NULL, hrs_ext_msg(this, "invariant"));
   21.36 +    assert(length() == 0 && _tail == NULL, hrs_ext_msg(this, "invariant"));
   21.37      _tail = from_list->_tail;
   21.38    }
   21.39    _head = from_list->_head;
    22.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Thu Aug 02 22:23:28 2012 -0700
    22.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.hpp	Fri Aug 10 10:41:13 2012 -0700
    22.3 @@ -62,8 +62,6 @@
    22.4    friend class VMStructs;
    22.5  
    22.6  protected:
    22.7 -  static uint calculate_region_num(HeapRegion* hr);
    22.8 -
    22.9    static uint _unrealistically_long_length;
   22.10  
   22.11    // The number of regions added to the set. If the set contains
    23.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Thu Aug 02 22:23:28 2012 -0700
    23.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSet.inline.hpp	Fri Aug 10 10:41:13 2012 -0700
    23.3 @@ -33,11 +33,7 @@
    23.4    // Assumes the caller has already verified the region.
    23.5  
    23.6    _length           += 1;
    23.7 -  if (!hr->isHumongous()) {
    23.8 -    _region_num     += 1;
    23.9 -  } else {
   23.10 -    _region_num     += calculate_region_num(hr);
   23.11 -  }
   23.12 +  _region_num       += hr->region_num();
   23.13    _total_used_bytes += hr->used();
   23.14  }
   23.15  
   23.16 @@ -54,12 +50,7 @@
   23.17    assert(_length > 0, hrs_ext_msg(this, "pre-condition"));
   23.18    _length -= 1;
   23.19  
   23.20 -  uint region_num_diff;
   23.21 -  if (!hr->isHumongous()) {
   23.22 -    region_num_diff = 1;
   23.23 -  } else {
   23.24 -    region_num_diff = calculate_region_num(hr);
   23.25 -  }
   23.26 +  uint region_num_diff = hr->region_num();
   23.27    assert(region_num_diff <= _region_num,
   23.28           hrs_err_msg("[%s] region's region num: %u "
   23.29                       "should be <= region num: %u",
    24.1 --- a/src/share/vm/utilities/decoder.cpp	Thu Aug 02 22:23:28 2012 -0700
    24.2 +++ b/src/share/vm/utilities/decoder.cpp	Fri Aug 10 10:41:13 2012 -0700
    24.3 @@ -91,6 +91,18 @@
    24.4    return decoder->decode(addr, buf, buflen, offset, modulepath);
    24.5  }
    24.6  
    24.7 +bool Decoder::decode(address addr, char* buf, int buflen, int* offset, const void* base) {
    24.8 +  assert(_shared_decoder_lock != NULL, "Just check");
    24.9 +  bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
   24.10 +  MutexLockerEx locker(error_handling_thread ? NULL : _shared_decoder_lock, true);
   24.11 +  AbstractDecoder* decoder = error_handling_thread ?
   24.12 +    get_error_handler_instance(): get_shared_instance();
   24.13 +  assert(decoder != NULL, "null decoder");
   24.14 +
   24.15 +  return decoder->decode(addr, buf, buflen, offset, base);
   24.16 +}
   24.17 +
   24.18 +
   24.19  bool Decoder::demangle(const char* symbol, char* buf, int buflen) {
   24.20    assert(_shared_decoder_lock != NULL, "Just check");
   24.21    bool error_handling_thread = os::current_thread_id() == VMError::first_error_tid;
    25.1 --- a/src/share/vm/utilities/decoder.hpp	Thu Aug 02 22:23:28 2012 -0700
    25.2 +++ b/src/share/vm/utilities/decoder.hpp	Fri Aug 10 10:41:13 2012 -0700
    25.3 @@ -47,6 +47,8 @@
    25.4    // the function
    25.5    virtual bool decode(address pc, char* buf, int buflen, int* offset,
    25.6      const char* modulepath = NULL) = 0;
    25.7 +  virtual bool decode(address pc, char* buf, int buflen, int* offset, const void* base) = 0;
    25.8 +
    25.9    // demangle a C++ symbol
   25.10    virtual bool demangle(const char* symbol, char* buf, int buflen) = 0;
   25.11    // if the decoder can decode symbols in vm
   25.12 @@ -82,6 +84,10 @@
   25.13      return false;
   25.14    }
   25.15  
   25.16 +  virtual bool decode(address pc, char* buf, int buflen, int* offset, const void* base) {
   25.17 +    return false;
   25.18 +  }
   25.19 +
   25.20    virtual bool demangle(const char* symbol, char* buf, int buflen) {
   25.21      return false;
   25.22    }
   25.23 @@ -95,6 +101,7 @@
   25.24  class Decoder : AllStatic {
   25.25  public:
   25.26    static bool decode(address pc, char* buf, int buflen, int* offset, const char* modulepath = NULL);
   25.27 +  static bool decode(address pc, char* buf, int buflen, int* offset, const void* base);
   25.28    static bool demangle(const char* symbol, char* buf, int buflen);
   25.29    static bool can_decode_C_frame_in_vm();
   25.30  
    26.1 --- a/src/share/vm/utilities/decoder_elf.hpp	Thu Aug 02 22:23:28 2012 -0700
    26.2 +++ b/src/share/vm/utilities/decoder_elf.hpp	Fri Aug 10 10:41:13 2012 -0700
    26.3 @@ -43,6 +43,10 @@
    26.4  
    26.5    bool demangle(const char* symbol, char *buf, int buflen);
    26.6    bool decode(address addr, char *buf, int buflen, int* offset, const char* filepath = NULL);
    26.7 +  bool decode(address addr, char *buf, int buflen, int* offset, const void *base) {
    26.8 +    ShouldNotReachHere();
    26.9 +    return false;
   26.10 +  }
   26.11  
   26.12  private:
   26.13    ElfFile*         get_elf_file(const char* filepath);
    27.1 --- a/src/share/vm/utilities/hashtable.cpp	Thu Aug 02 22:23:28 2012 -0700
    27.2 +++ b/src/share/vm/utilities/hashtable.cpp	Fri Aug 10 10:41:13 2012 -0700
    27.3 @@ -135,7 +135,7 @@
    27.4        // walking the hashtable past these entries requires
    27.5        // BasicHashtableEntry::make_ptr() call.
    27.6        bool keep_shared = p->is_shared();
    27.7 -      unlink_entry(p);
    27.8 +      this->unlink_entry(p);
    27.9        new_table->add_entry(index, p);
   27.10        if (keep_shared) {
   27.11          p->set_shared();
    28.1 --- a/src/share/vm/utilities/hashtable.hpp	Thu Aug 02 22:23:28 2012 -0700
    28.2 +++ b/src/share/vm/utilities/hashtable.hpp	Fri Aug 10 10:41:13 2012 -0700
    28.3 @@ -260,7 +260,7 @@
    28.4    }
    28.5  
    28.6    int index_for(Symbol* name) {
    28.7 -    return hash_to_index(compute_hash(name));
    28.8 +    return this->hash_to_index(compute_hash(name));
    28.9    }
   28.10  
   28.11    // Table entry management

mercurial