Merge

Thu, 17 Oct 2013 14:20:57 -0700

author
lana
date
Thu, 17 Oct 2013 14:20:57 -0700
changeset 5882
1a93f2c5945a
parent 5881
8321dcc18438
parent 5868
aeae561a6d0b
child 5950
4589b398ab03
child 6019
e39b138b2518

Merge

make/windows/makefiles/compile.make file | annotate | diff | comparison | revisions
make/windows/makefiles/sa.make file | annotate | diff | comparison | revisions
src/share/vm/classfile/classFileParser.cpp file | annotate | diff | comparison | revisions
     1.1 --- a/.hgtags	Sun Oct 13 21:14:04 2013 +0100
     1.2 +++ b/.hgtags	Thu Oct 17 14:20:57 2013 -0700
     1.3 @@ -384,3 +384,5 @@
     1.4  6209b0ed51c086d4127bac0e086c8f326d1764d7 jdk8-b110
     1.5  562a3d356de67670b4172b82aca2d30743449e04 hs25-b53
     1.6  f6962730bbde82f279a0ae3a1c14bc5e58096c6e jdk8-b111
     1.7 +4a845c7a463844cead9e1e1641d6bcfb8a77f1c7 hs25-b54
     1.8 +0ed9a90f45e1b392c671005f9ee22ce1acf02984 jdk8-b112
     2.1 --- a/agent/src/os/bsd/ps_core.c	Sun Oct 13 21:14:04 2013 +0100
     2.2 +++ b/agent/src/os/bsd/ps_core.c	Thu Oct 17 14:20:57 2013 -0700
     2.3 @@ -44,6 +44,7 @@
     2.4  // close all file descriptors
     2.5  static void close_files(struct ps_prochandle* ph) {
     2.6    lib_info* lib = NULL;
     2.7 +
     2.8    // close core file descriptor
     2.9    if (ph->core->core_fd >= 0)
    2.10      close(ph->core->core_fd);
    2.11 @@ -149,8 +150,7 @@
    2.12  
    2.13  // Return the map_info for the given virtual address.  We keep a sorted
    2.14  // array of pointers in ph->map_array, so we can binary search.
    2.15 -static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr)
    2.16 -{
    2.17 +static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr) {
    2.18    int mid, lo = 0, hi = ph->core->num_maps - 1;
    2.19    map_info *mp;
    2.20  
    2.21 @@ -230,9 +230,9 @@
    2.22      size_t _used;            // for setting space top on read
    2.23  
    2.24      // 4991491 NOTICE These are C++ bool's in filemap.hpp and must match up with
    2.25 -    // the C type matching the C++ bool type on any given platform. For
    2.26 -    // Hotspot on BSD we assume the corresponding C type is char but
    2.27 -    // licensees on BSD versions may need to adjust the type of these fields.
    2.28 +    // the C type matching the C++ bool type on any given platform.
    2.29 +    // We assume the corresponding C type is char but licensees
    2.30 +    // may need to adjust the type of these fields.
    2.31      char   _read_only;       // read only space?
    2.32      char   _allow_exec;      // executable code in space?
    2.33  
    2.34 @@ -286,10 +286,12 @@
    2.35  #define USE_SHARED_SPACES_SYM "_UseSharedSpaces"
    2.36  // mangled name of Arguments::SharedArchivePath
    2.37  #define SHARED_ARCHIVE_PATH_SYM "_ZN9Arguments17SharedArchivePathE"
    2.38 +#define LIBJVM_NAME "/libjvm.dylib"
    2.39  #else
    2.40  #define USE_SHARED_SPACES_SYM "UseSharedSpaces"
    2.41  // mangled name of Arguments::SharedArchivePath
    2.42  #define SHARED_ARCHIVE_PATH_SYM "__ZN9Arguments17SharedArchivePathE"
    2.43 +#define LIBJVM_NAME "/libjvm.so"
    2.44  #endif // __APPLE_
    2.45  
    2.46  static bool init_classsharing_workaround(struct ps_prochandle* ph) {
    2.47 @@ -300,12 +302,7 @@
    2.48      // we are iterating over shared objects from the core dump. look for
    2.49      // libjvm.so.
    2.50      const char *jvm_name = 0;
    2.51 -#ifdef __APPLE__
    2.52 -    if ((jvm_name = strstr(lib->name, "/libjvm.dylib")) != 0)
    2.53 -#else
    2.54 -    if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0)
    2.55 -#endif // __APPLE__
    2.56 -    {
    2.57 +    if ((jvm_name = strstr(lib->name, LIBJVM_NAME)) != 0) {
    2.58        char classes_jsa[PATH_MAX];
    2.59        struct FileMapHeader header;
    2.60        int fd = -1;
    2.61 @@ -399,8 +396,8 @@
    2.62          }
    2.63        }
    2.64        return true;
    2.65 -    }
    2.66 -    lib = lib->next;
    2.67 +   }
    2.68 +   lib = lib->next;
    2.69    }
    2.70    return true;
    2.71  }
    2.72 @@ -432,8 +429,8 @@
    2.73    // allocate map_array
    2.74    map_info** array;
    2.75    if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) {
    2.76 -     print_debug("can't allocate memory for map array\n");
    2.77 -     return false;
    2.78 +    print_debug("can't allocate memory for map array\n");
    2.79 +    return false;
    2.80    }
    2.81  
    2.82    // add maps to array
    2.83 @@ -450,7 +447,7 @@
    2.84    ph->core->map_array = array;
    2.85    // sort the map_info array by base virtual address.
    2.86    qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*),
    2.87 -           core_cmp_mapping);
    2.88 +        core_cmp_mapping);
    2.89  
    2.90    // print map
    2.91    if (is_debug()) {
    2.92 @@ -458,7 +455,7 @@
    2.93      print_debug("---- sorted virtual address map ----\n");
    2.94      for (j = 0; j < ph->core->num_maps; j++) {
    2.95        print_debug("base = 0x%lx\tsize = %d\n", ph->core->map_array[j]->vaddr,
    2.96 -                                       ph->core->map_array[j]->memsz);
    2.97 +                  ph->core->map_array[j]->memsz);
    2.98      }
    2.99    }
   2.100  
   2.101 @@ -1091,9 +1088,9 @@
   2.102                                     notep->n_type, notep->n_descsz);
   2.103  
   2.104        if (notep->n_type == NT_PRSTATUS) {
   2.105 -         if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) {
   2.106 -            return false;
   2.107 -         }
   2.108 +        if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) {
   2.109 +          return false;
   2.110 +        }
   2.111        }
   2.112        p = descdata + ROUNDUP(notep->n_descsz, 4);
   2.113     }
   2.114 @@ -1121,7 +1118,7 @@
   2.115      * contains a set of saved /proc structures), and PT_LOAD (which
   2.116      * represents a memory mapping from the process's address space).
   2.117      *
   2.118 -    * Difference b/w Solaris PT_NOTE and BSD PT_NOTE:
   2.119 +    * Difference b/w Solaris PT_NOTE and Linux/BSD PT_NOTE:
   2.120      *
   2.121      *     In Solaris there are two PT_NOTE segments the first PT_NOTE (if present)
   2.122      *     contains /proc structs in the pre-2.6 unstructured /proc format. the last
   2.123 @@ -1167,32 +1164,61 @@
   2.124  
   2.125  // read segments of a shared object
   2.126  static bool read_lib_segments(struct ps_prochandle* ph, int lib_fd, ELF_EHDR* lib_ehdr, uintptr_t lib_base) {
   2.127 -   int i = 0;
   2.128 -   ELF_PHDR* phbuf;
   2.129 -   ELF_PHDR* lib_php = NULL;
   2.130 +  int i = 0;
   2.131 +  ELF_PHDR* phbuf;
   2.132 +  ELF_PHDR* lib_php = NULL;
   2.133  
   2.134 -   if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL)
   2.135 -      return false;
   2.136 +  int page_size=sysconf(_SC_PAGE_SIZE);
   2.137  
   2.138 -   // we want to process only PT_LOAD segments that are not writable.
   2.139 -   // i.e., text segments. The read/write/exec (data) segments would
   2.140 -   // have been already added from core file segments.
   2.141 -   for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
   2.142 -      if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
   2.143 -         if (add_map_info(ph, lib_fd, lib_php->p_offset, lib_php->p_vaddr + lib_base, lib_php->p_filesz) == NULL)
   2.144 -            goto err;
   2.145 +  if ((phbuf = read_program_header_table(lib_fd, lib_ehdr)) == NULL) {
   2.146 +    return false;
   2.147 +  }
   2.148 +
   2.149 +  // we want to process only PT_LOAD segments that are not writable.
   2.150 +  // i.e., text segments. The read/write/exec (data) segments would
   2.151 +  // have been already added from core file segments.
   2.152 +  for (lib_php = phbuf, i = 0; i < lib_ehdr->e_phnum; i++) {
   2.153 +    if ((lib_php->p_type == PT_LOAD) && !(lib_php->p_flags & PF_W) && (lib_php->p_filesz != 0)) {
   2.154 +
   2.155 +      uintptr_t target_vaddr = lib_php->p_vaddr + lib_base;
   2.156 +      map_info *existing_map = core_lookup(ph, target_vaddr);
   2.157 +
   2.158 +      if (existing_map == NULL){
   2.159 +        if (add_map_info(ph, lib_fd, lib_php->p_offset,
   2.160 +                          target_vaddr, lib_php->p_filesz) == NULL) {
   2.161 +          goto err;
   2.162 +        }
   2.163 +      } else {
   2.164 +        if ((existing_map->memsz != page_size) &&
   2.165 +            (existing_map->fd != lib_fd) &&
   2.166 +            (existing_map->memsz != lib_php->p_filesz)){
   2.167 +
   2.168 +          print_debug("address conflict @ 0x%lx (size = %ld, flags = %d\n)",
   2.169 +                        target_vaddr, lib_php->p_filesz, lib_php->p_flags);
   2.170 +          goto err;
   2.171 +        }
   2.172 +
   2.173 +        /* replace PT_LOAD segment with library segment */
   2.174 +        print_debug("overwrote with new address mapping (memsz %ld -> %ld)\n",
   2.175 +                     existing_map->memsz, lib_php->p_filesz);
   2.176 +
   2.177 +        existing_map->fd = lib_fd;
   2.178 +        existing_map->offset = lib_php->p_offset;
   2.179 +        existing_map->memsz = lib_php->p_filesz;
   2.180        }
   2.181 -      lib_php++;
   2.182 -   }
   2.183 +    }
   2.184  
   2.185 -   free(phbuf);
   2.186 -   return true;
   2.187 +    lib_php++;
   2.188 +  }
   2.189 +
   2.190 +  free(phbuf);
   2.191 +  return true;
   2.192  err:
   2.193 -   free(phbuf);
   2.194 -   return false;
   2.195 +  free(phbuf);
   2.196 +  return false;
   2.197  }
   2.198  
   2.199 -// process segments from interpreter (ld-elf.so.1)
   2.200 +// process segments from interpreter (ld.so or ld-linux.so or ld-elf.so)
   2.201  static bool read_interp_segments(struct ps_prochandle* ph) {
   2.202     ELF_EHDR interp_ehdr;
   2.203  
   2.204 @@ -1303,32 +1329,34 @@
   2.205    debug_base = dyn.d_un.d_ptr;
   2.206    // at debug_base we have struct r_debug. This has first link map in r_map field
   2.207    if (ps_pread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET,
   2.208 -                  &first_link_map_addr, sizeof(uintptr_t)) != PS_OK) {
   2.209 +                 &first_link_map_addr, sizeof(uintptr_t)) != PS_OK) {
   2.210      print_debug("can't read first link map address\n");
   2.211      return false;
   2.212    }
   2.213  
   2.214    // read ld_base address from struct r_debug
   2.215 -  // XXX: There is no r_ldbase member on BSD
   2.216 -  /*
   2.217 +#if 0  // There is no r_ldbase member on BSD
   2.218    if (ps_pread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr,
   2.219                    sizeof(uintptr_t)) != PS_OK) {
   2.220      print_debug("can't read ld base address\n");
   2.221      return false;
   2.222    }
   2.223    ph->core->ld_base_addr = ld_base_addr;
   2.224 -  */
   2.225 +#else
   2.226    ph->core->ld_base_addr = 0;
   2.227 +#endif
   2.228  
   2.229    print_debug("interpreter base address is 0x%lx\n", ld_base_addr);
   2.230  
   2.231 -  // now read segments from interp (i.e ld-elf.so.1)
   2.232 -  if (read_interp_segments(ph) != true)
   2.233 +  // now read segments from interp (i.e ld.so or ld-linux.so or ld-elf.so)
   2.234 +  if (read_interp_segments(ph) != true) {
   2.235      return false;
   2.236 +  }
   2.237  
   2.238    // after adding interpreter (ld.so) mappings sort again
   2.239 -  if (sort_map_array(ph) != true)
   2.240 +  if (sort_map_array(ph) != true) {
   2.241      return false;
   2.242 +  }
   2.243  
   2.244    print_debug("first link map is at 0x%lx\n", first_link_map_addr);
   2.245  
   2.246 @@ -1380,8 +1408,9 @@
   2.247            add_lib_info_fd(ph, lib_name, lib_fd, lib_base);
   2.248            // Map info is added for the library (lib_name) so
   2.249            // we need to re-sort it before calling the p_pdread.
   2.250 -          if (sort_map_array(ph) != true)
   2.251 +          if (sort_map_array(ph) != true) {
   2.252              return false;
   2.253 +          }
   2.254          } else {
   2.255            print_debug("can't read ELF header for shared object %s\n", lib_name);
   2.256            close(lib_fd);
   2.257 @@ -1392,7 +1421,7 @@
   2.258  
   2.259      // read next link_map address
   2.260      if (ps_pread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET,
   2.261 -                 &link_map_addr, sizeof(uintptr_t)) != PS_OK) {
   2.262 +                  &link_map_addr, sizeof(uintptr_t)) != PS_OK) {
   2.263        print_debug("can't read next link in link_map\n");
   2.264        return false;
   2.265      }
   2.266 @@ -1408,7 +1437,7 @@
   2.267  
   2.268    struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle));
   2.269    if (ph == NULL) {
   2.270 -    print_debug("cant allocate ps_prochandle\n");
   2.271 +    print_debug("can't allocate ps_prochandle\n");
   2.272      return NULL;
   2.273    }
   2.274  
   2.275 @@ -1444,38 +1473,45 @@
   2.276    }
   2.277  
   2.278    if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true || exec_ehdr.e_type != ET_EXEC) {
   2.279 -     print_debug("executable file is not a valid ELF ET_EXEC file\n");
   2.280 -     goto err;
   2.281 +    print_debug("executable file is not a valid ELF ET_EXEC file\n");
   2.282 +    goto err;
   2.283    }
   2.284  
   2.285    // process core file segments
   2.286 -  if (read_core_segments(ph, &core_ehdr) != true)
   2.287 -     goto err;
   2.288 +  if (read_core_segments(ph, &core_ehdr) != true) {
   2.289 +    goto err;
   2.290 +  }
   2.291  
   2.292    // process exec file segments
   2.293 -  if (read_exec_segments(ph, &exec_ehdr) != true)
   2.294 -     goto err;
   2.295 +  if (read_exec_segments(ph, &exec_ehdr) != true) {
   2.296 +    goto err;
   2.297 +  }
   2.298  
   2.299    // exec file is also treated like a shared object for symbol search
   2.300    if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd,
   2.301 -                      (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL)
   2.302 -     goto err;
   2.303 +                      (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL) {
   2.304 +    goto err;
   2.305 +  }
   2.306  
   2.307    // allocate and sort maps into map_array, we need to do this
   2.308    // here because read_shared_lib_info needs to read from debuggee
   2.309    // address space
   2.310 -  if (sort_map_array(ph) != true)
   2.311 +  if (sort_map_array(ph) != true) {
   2.312      goto err;
   2.313 +  }
   2.314  
   2.315 -  if (read_shared_lib_info(ph) != true)
   2.316 +  if (read_shared_lib_info(ph) != true) {
   2.317      goto err;
   2.318 +  }
   2.319  
   2.320    // sort again because we have added more mappings from shared objects
   2.321 -  if (sort_map_array(ph) != true)
   2.322 +  if (sort_map_array(ph) != true) {
   2.323      goto err;
   2.324 +  }
   2.325  
   2.326 -  if (init_classsharing_workaround(ph) != true)
   2.327 +  if (init_classsharing_workaround(ph) != true) {
   2.328      goto err;
   2.329 +  }
   2.330  
   2.331    print_debug("Leave Pgrab_core\n");
   2.332    return ph;
     3.1 --- a/agent/src/os/linux/ps_core.c	Sun Oct 13 21:14:04 2013 +0100
     3.2 +++ b/agent/src/os/linux/ps_core.c	Thu Oct 17 14:20:57 2013 -0700
     3.3 @@ -41,155 +41,158 @@
     3.4  // ps_prochandle cleanup helper functions
     3.5  
     3.6  // close all file descriptors
     3.7 -static void close_elf_files(struct ps_prochandle* ph) {
     3.8 -   lib_info* lib = NULL;
     3.9 +static void close_files(struct ps_prochandle* ph) {
    3.10 +  lib_info* lib = NULL;
    3.11  
    3.12 -   // close core file descriptor
    3.13 -   if (ph->core->core_fd >= 0)
    3.14 -     close(ph->core->core_fd);
    3.15 +  // close core file descriptor
    3.16 +  if (ph->core->core_fd >= 0)
    3.17 +    close(ph->core->core_fd);
    3.18  
    3.19 -   // close exec file descriptor
    3.20 -   if (ph->core->exec_fd >= 0)
    3.21 -     close(ph->core->exec_fd);
    3.22 +  // close exec file descriptor
    3.23 +  if (ph->core->exec_fd >= 0)
    3.24 +    close(ph->core->exec_fd);
    3.25  
    3.26 -   // close interp file descriptor
    3.27 -   if (ph->core->interp_fd >= 0)
    3.28 -     close(ph->core->interp_fd);
    3.29 +  // close interp file descriptor
    3.30 +  if (ph->core->interp_fd >= 0)
    3.31 +    close(ph->core->interp_fd);
    3.32  
    3.33 -   // close class share archive file
    3.34 -   if (ph->core->classes_jsa_fd >= 0)
    3.35 -     close(ph->core->classes_jsa_fd);
    3.36 +  // close class share archive file
    3.37 +  if (ph->core->classes_jsa_fd >= 0)
    3.38 +    close(ph->core->classes_jsa_fd);
    3.39  
    3.40 -   // close all library file descriptors
    3.41 -   lib = ph->libs;
    3.42 -   while (lib) {
    3.43 -      int fd = lib->fd;
    3.44 -      if (fd >= 0 && fd != ph->core->exec_fd) close(fd);
    3.45 -      lib = lib->next;
    3.46 -   }
    3.47 +  // close all library file descriptors
    3.48 +  lib = ph->libs;
    3.49 +  while (lib) {
    3.50 +    int fd = lib->fd;
    3.51 +    if (fd >= 0 && fd != ph->core->exec_fd) {
    3.52 +      close(fd);
    3.53 +    }
    3.54 +    lib = lib->next;
    3.55 +  }
    3.56  }
    3.57  
    3.58  // clean all map_info stuff
    3.59  static void destroy_map_info(struct ps_prochandle* ph) {
    3.60    map_info* map = ph->core->maps;
    3.61    while (map) {
    3.62 -     map_info* next = map->next;
    3.63 -     free(map);
    3.64 -     map = next;
    3.65 +    map_info* next = map->next;
    3.66 +    free(map);
    3.67 +    map = next;
    3.68    }
    3.69  
    3.70    if (ph->core->map_array) {
    3.71 -     free(ph->core->map_array);
    3.72 +    free(ph->core->map_array);
    3.73    }
    3.74  
    3.75    // Part of the class sharing workaround
    3.76    map = ph->core->class_share_maps;
    3.77    while (map) {
    3.78 -     map_info* next = map->next;
    3.79 -     free(map);
    3.80 -     map = next;
    3.81 +    map_info* next = map->next;
    3.82 +    free(map);
    3.83 +    map = next;
    3.84    }
    3.85  }
    3.86  
    3.87  // ps_prochandle operations
    3.88  static void core_release(struct ps_prochandle* ph) {
    3.89 -   if (ph->core) {
    3.90 -      close_elf_files(ph);
    3.91 -      destroy_map_info(ph);
    3.92 -      free(ph->core);
    3.93 -   }
    3.94 +  if (ph->core) {
    3.95 +    close_files(ph);
    3.96 +    destroy_map_info(ph);
    3.97 +    free(ph->core);
    3.98 +  }
    3.99  }
   3.100  
   3.101  static map_info* allocate_init_map(int fd, off_t offset, uintptr_t vaddr, size_t memsz) {
   3.102 -   map_info* map;
   3.103 -   if ( (map = (map_info*) calloc(1, sizeof(map_info))) == NULL) {
   3.104 -      print_debug("can't allocate memory for map_info\n");
   3.105 -      return NULL;
   3.106 -   }
   3.107 +  map_info* map;
   3.108 +  if ( (map = (map_info*) calloc(1, sizeof(map_info))) == NULL) {
   3.109 +    print_debug("can't allocate memory for map_info\n");
   3.110 +    return NULL;
   3.111 +  }
   3.112  
   3.113 -   // initialize map
   3.114 -   map->fd     = fd;
   3.115 -   map->offset = offset;
   3.116 -   map->vaddr  = vaddr;
   3.117 -   map->memsz  = memsz;
   3.118 -   return map;
   3.119 +  // initialize map
   3.120 +  map->fd     = fd;
   3.121 +  map->offset = offset;
   3.122 +  map->vaddr  = vaddr;
   3.123 +  map->memsz  = memsz;
   3.124 +  return map;
   3.125  }
   3.126  
   3.127  // add map info with given fd, offset, vaddr and memsz
   3.128  static map_info* add_map_info(struct ps_prochandle* ph, int fd, off_t offset,
   3.129                               uintptr_t vaddr, size_t memsz) {
   3.130 -   map_info* map;
   3.131 -   if ((map = allocate_init_map(fd, offset, vaddr, memsz)) == NULL) {
   3.132 -      return NULL;
   3.133 -   }
   3.134 +  map_info* map;
   3.135 +  if ((map = allocate_init_map(fd, offset, vaddr, memsz)) == NULL) {
   3.136 +    return NULL;
   3.137 +  }
   3.138  
   3.139 -   // add this to map list
   3.140 -   map->next  = ph->core->maps;
   3.141 -   ph->core->maps   = map;
   3.142 -   ph->core->num_maps++;
   3.143 +  // add this to map list
   3.144 +  map->next  = ph->core->maps;
   3.145 +  ph->core->maps   = map;
   3.146 +  ph->core->num_maps++;
   3.147  
   3.148 -   return map;
   3.149 +  return map;
   3.150  }
   3.151  
   3.152  // Part of the class sharing workaround
   3.153 -static void add_class_share_map_info(struct ps_prochandle* ph, off_t offset,
   3.154 +static map_info* add_class_share_map_info(struct ps_prochandle* ph, off_t offset,
   3.155                               uintptr_t vaddr, size_t memsz) {
   3.156 -   map_info* map;
   3.157 -   if ((map = allocate_init_map(ph->core->classes_jsa_fd,
   3.158 -                                offset, vaddr, memsz)) == NULL) {
   3.159 -      return;
   3.160 -   }
   3.161 +  map_info* map;
   3.162 +  if ((map = allocate_init_map(ph->core->classes_jsa_fd,
   3.163 +                               offset, vaddr, memsz)) == NULL) {
   3.164 +    return NULL;
   3.165 +  }
   3.166  
   3.167 -   map->next = ph->core->class_share_maps;
   3.168 -   ph->core->class_share_maps = map;
   3.169 +  map->next = ph->core->class_share_maps;
   3.170 +  ph->core->class_share_maps = map;
   3.171 +  return map;
   3.172  }
   3.173  
   3.174  // Return the map_info for the given virtual address.  We keep a sorted
   3.175  // array of pointers in ph->map_array, so we can binary search.
   3.176 -static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr)
   3.177 -{
   3.178 -   int mid, lo = 0, hi = ph->core->num_maps - 1;
   3.179 -   map_info *mp;
   3.180 +static map_info* core_lookup(struct ps_prochandle *ph, uintptr_t addr) {
   3.181 +  int mid, lo = 0, hi = ph->core->num_maps - 1;
   3.182 +  map_info *mp;
   3.183  
   3.184 -   while (hi - lo > 1) {
   3.185 -     mid = (lo + hi) / 2;
   3.186 -      if (addr >= ph->core->map_array[mid]->vaddr)
   3.187 -         lo = mid;
   3.188 -      else
   3.189 -         hi = mid;
   3.190 -   }
   3.191 +  while (hi - lo > 1) {
   3.192 +    mid = (lo + hi) / 2;
   3.193 +    if (addr >= ph->core->map_array[mid]->vaddr) {
   3.194 +      lo = mid;
   3.195 +    } else {
   3.196 +      hi = mid;
   3.197 +    }
   3.198 +  }
   3.199  
   3.200 -   if (addr < ph->core->map_array[hi]->vaddr)
   3.201 -      mp = ph->core->map_array[lo];
   3.202 -   else
   3.203 -      mp = ph->core->map_array[hi];
   3.204 +  if (addr < ph->core->map_array[hi]->vaddr) {
   3.205 +    mp = ph->core->map_array[lo];
   3.206 +  } else {
   3.207 +    mp = ph->core->map_array[hi];
   3.208 +  }
   3.209  
   3.210 -   if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz)
   3.211 +  if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
   3.212 +    return (mp);
   3.213 +  }
   3.214 +
   3.215 +
   3.216 +  // Part of the class sharing workaround
   3.217 +  // Unfortunately, we have no way of detecting -Xshare state.
   3.218 +  // Check out the share maps atlast, if we don't find anywhere.
   3.219 +  // This is done this way so to avoid reading share pages
   3.220 +  // ahead of other normal maps. For eg. with -Xshare:off we don't
   3.221 +  // want to prefer class sharing data to data from core.
   3.222 +  mp = ph->core->class_share_maps;
   3.223 +  if (mp) {
   3.224 +    print_debug("can't locate map_info at 0x%lx, trying class share maps\n", addr);
   3.225 +  }
   3.226 +  while (mp) {
   3.227 +    if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
   3.228 +      print_debug("located map_info at 0x%lx from class share maps\n", addr);
   3.229        return (mp);
   3.230 +    }
   3.231 +    mp = mp->next;
   3.232 +  }
   3.233  
   3.234 -
   3.235 -   // Part of the class sharing workaround
   3.236 -   // Unfortunately, we have no way of detecting -Xshare state.
   3.237 -   // Check out the share maps atlast, if we don't find anywhere.
   3.238 -   // This is done this way so to avoid reading share pages
   3.239 -   // ahead of other normal maps. For eg. with -Xshare:off we don't
   3.240 -   // want to prefer class sharing data to data from core.
   3.241 -   mp = ph->core->class_share_maps;
   3.242 -   if (mp) {
   3.243 -      print_debug("can't locate map_info at 0x%lx, trying class share maps\n",
   3.244 -             addr);
   3.245 -   }
   3.246 -   while (mp) {
   3.247 -      if (addr >= mp->vaddr && addr < mp->vaddr + mp->memsz) {
   3.248 -         print_debug("located map_info at 0x%lx from class share maps\n",
   3.249 -                  addr);
   3.250 -         return (mp);
   3.251 -      }
   3.252 -      mp = mp->next;
   3.253 -   }
   3.254 -
   3.255 -   print_debug("can't locate map_info at 0x%lx\n", addr);
   3.256 -   return (NULL);
   3.257 +  print_debug("can't locate map_info at 0x%lx\n", addr);
   3.258 +  return (NULL);
   3.259  }
   3.260  
   3.261  //---------------------------------------------------------------
   3.262 @@ -226,9 +229,9 @@
   3.263      size_t _used;            // for setting space top on read
   3.264  
   3.265      // 4991491 NOTICE These are C++ bool's in filemap.hpp and must match up with
   3.266 -    // the C type matching the C++ bool type on any given platform. For
   3.267 -    // Hotspot on Linux we assume the corresponding C type is char but
   3.268 -    // licensees on Linux versions may need to adjust the type of these fields.
   3.269 +    // the C type matching the C++ bool type on any given platform.
   3.270 +    // We assume the corresponding C type is char but licensees
   3.271 +    // may need to adjust the type of these fields.
   3.272      char   _read_only;       // read only space?
   3.273      char   _allow_exec;      // executable code in space?
   3.274  
   3.275 @@ -238,154 +241,159 @@
   3.276  };
   3.277  
   3.278  static bool read_jboolean(struct ps_prochandle* ph, uintptr_t addr, jboolean* pvalue) {
   3.279 -   jboolean i;
   3.280 -   if (ps_pdread(ph, (psaddr_t) addr, &i, sizeof(i)) == PS_OK) {
   3.281 -      *pvalue = i;
   3.282 -      return true;
   3.283 -   } else {
   3.284 -      return false;
   3.285 -   }
   3.286 +  jboolean i;
   3.287 +  if (ps_pdread(ph, (psaddr_t) addr, &i, sizeof(i)) == PS_OK) {
   3.288 +    *pvalue = i;
   3.289 +    return true;
   3.290 +  } else {
   3.291 +    return false;
   3.292 +  }
   3.293  }
   3.294  
   3.295  static bool read_pointer(struct ps_prochandle* ph, uintptr_t addr, uintptr_t* pvalue) {
   3.296 -   uintptr_t uip;
   3.297 -   if (ps_pdread(ph, (psaddr_t) addr, &uip, sizeof(uip)) == PS_OK) {
   3.298 -      *pvalue = uip;
   3.299 -      return true;
   3.300 -   } else {
   3.301 -      return false;
   3.302 -   }
   3.303 +  uintptr_t uip;
   3.304 +  if (ps_pdread(ph, (psaddr_t) addr, (char *)&uip, sizeof(uip)) == PS_OK) {
   3.305 +    *pvalue = uip;
   3.306 +    return true;
   3.307 +  } else {
   3.308 +    return false;
   3.309 +  }
   3.310  }
   3.311  
   3.312  // used to read strings from debuggee
   3.313  static bool read_string(struct ps_prochandle* ph, uintptr_t addr, char* buf, size_t size) {
   3.314 -   size_t i = 0;
   3.315 -   char  c = ' ';
   3.316 +  size_t i = 0;
   3.317 +  char  c = ' ';
   3.318  
   3.319 -   while (c != '\0') {
   3.320 -     if (ps_pdread(ph, (psaddr_t) addr, &c, sizeof(char)) != PS_OK)
   3.321 -         return false;
   3.322 -      if (i < size - 1)
   3.323 -         buf[i] = c;
   3.324 -      else // smaller buffer
   3.325 -         return false;
   3.326 -      i++; addr++;
   3.327 -   }
   3.328 +  while (c != '\0') {
   3.329 +    if (ps_pdread(ph, (psaddr_t) addr, &c, sizeof(char)) != PS_OK) {
   3.330 +      return false;
   3.331 +    }
   3.332 +    if (i < size - 1) {
   3.333 +      buf[i] = c;
   3.334 +    } else {
   3.335 +      // smaller buffer
   3.336 +      return false;
   3.337 +    }
   3.338 +    i++; addr++;
   3.339 +  }
   3.340  
   3.341 -   buf[i] = '\0';
   3.342 -   return true;
   3.343 +  buf[i] = '\0';
   3.344 +  return true;
   3.345  }
   3.346  
   3.347  #define USE_SHARED_SPACES_SYM "UseSharedSpaces"
   3.348  // mangled name of Arguments::SharedArchivePath
   3.349  #define SHARED_ARCHIVE_PATH_SYM "_ZN9Arguments17SharedArchivePathE"
   3.350 +#define LIBJVM_NAME "/libjvm.so"
   3.351  
   3.352  static bool init_classsharing_workaround(struct ps_prochandle* ph) {
   3.353 -   lib_info* lib = ph->libs;
   3.354 -   while (lib != NULL) {
   3.355 -      // we are iterating over shared objects from the core dump. look for
   3.356 -      // libjvm.so.
   3.357 -      const char *jvm_name = 0;
   3.358 -      if ((jvm_name = strstr(lib->name, "/libjvm.so")) != 0) {
   3.359 -         char classes_jsa[PATH_MAX];
   3.360 -         struct FileMapHeader header;
   3.361 -         size_t n = 0;
   3.362 -         int fd = -1, m = 0;
   3.363 -         uintptr_t base = 0, useSharedSpacesAddr = 0;
   3.364 -         uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0;
   3.365 -         jboolean useSharedSpaces = 0;
   3.366 -         map_info* mi = 0;
   3.367 +  lib_info* lib = ph->libs;
   3.368 +  while (lib != NULL) {
   3.369 +    // we are iterating over shared objects from the core dump. look for
   3.370 +    // libjvm.so.
   3.371 +    const char *jvm_name = 0;
   3.372 +    if ((jvm_name = strstr(lib->name, LIBJVM_NAME)) != 0) {
   3.373 +      char classes_jsa[PATH_MAX];
   3.374 +      struct FileMapHeader header;
   3.375 +      int fd = -1;
   3.376 +      int m = 0;
   3.377 +      size_t n = 0;
   3.378 +      uintptr_t base = 0, useSharedSpacesAddr = 0;
   3.379 +      uintptr_t sharedArchivePathAddrAddr = 0, sharedArchivePathAddr = 0;
   3.380 +      jboolean useSharedSpaces = 0;
   3.381 +      map_info* mi = 0;
   3.382  
   3.383 -         memset(classes_jsa, 0, sizeof(classes_jsa));
   3.384 -         jvm_name = lib->name;
   3.385 -         useSharedSpacesAddr = lookup_symbol(ph, jvm_name, USE_SHARED_SPACES_SYM);
   3.386 -         if (useSharedSpacesAddr == 0) {
   3.387 -            print_debug("can't lookup 'UseSharedSpaces' flag\n");
   3.388 -            return false;
   3.389 -         }
   3.390 +      memset(classes_jsa, 0, sizeof(classes_jsa));
   3.391 +      jvm_name = lib->name;
   3.392 +      useSharedSpacesAddr = lookup_symbol(ph, jvm_name, USE_SHARED_SPACES_SYM);
   3.393 +      if (useSharedSpacesAddr == 0) {
   3.394 +        print_debug("can't lookup 'UseSharedSpaces' flag\n");
   3.395 +        return false;
   3.396 +      }
   3.397  
   3.398 -         // Hotspot vm types are not exported to build this library. So
   3.399 -         // using equivalent type jboolean to read the value of
   3.400 -         // UseSharedSpaces which is same as hotspot type "bool".
   3.401 -         if (read_jboolean(ph, useSharedSpacesAddr, &useSharedSpaces) != true) {
   3.402 -            print_debug("can't read the value of 'UseSharedSpaces' flag\n");
   3.403 -            return false;
   3.404 -         }
   3.405 +      // Hotspot vm types are not exported to build this library. So
   3.406 +      // using equivalent type jboolean to read the value of
   3.407 +      // UseSharedSpaces which is same as hotspot type "bool".
   3.408 +      if (read_jboolean(ph, useSharedSpacesAddr, &useSharedSpaces) != true) {
   3.409 +        print_debug("can't read the value of 'UseSharedSpaces' flag\n");
   3.410 +        return false;
   3.411 +      }
   3.412  
   3.413 -         if ((int)useSharedSpaces == 0) {
   3.414 -            print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n");
   3.415 -            return true;
   3.416 -         }
   3.417 +      if ((int)useSharedSpaces == 0) {
   3.418 +        print_debug("UseSharedSpaces is false, assuming -Xshare:off!\n");
   3.419 +        return true;
   3.420 +      }
   3.421  
   3.422 -         sharedArchivePathAddrAddr = lookup_symbol(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM);
   3.423 -         if (sharedArchivePathAddrAddr == 0) {
   3.424 -            print_debug("can't lookup shared archive path symbol\n");
   3.425 -            return false;
   3.426 -         }
   3.427 +      sharedArchivePathAddrAddr = lookup_symbol(ph, jvm_name, SHARED_ARCHIVE_PATH_SYM);
   3.428 +      if (sharedArchivePathAddrAddr == 0) {
   3.429 +        print_debug("can't lookup shared archive path symbol\n");
   3.430 +        return false;
   3.431 +      }
   3.432  
   3.433 -         if (read_pointer(ph, sharedArchivePathAddrAddr, &sharedArchivePathAddr) != true) {
   3.434 -            print_debug("can't read shared archive path pointer\n");
   3.435 -            return false;
   3.436 -         }
   3.437 +      if (read_pointer(ph, sharedArchivePathAddrAddr, &sharedArchivePathAddr) != true) {
   3.438 +        print_debug("can't read shared archive path pointer\n");
   3.439 +        return false;
   3.440 +      }
   3.441  
   3.442 -         if (read_string(ph, sharedArchivePathAddr, classes_jsa, sizeof(classes_jsa)) != true) {
   3.443 -            print_debug("can't read shared archive path value\n");
   3.444 -            return false;
   3.445 -         }
   3.446 +      if (read_string(ph, sharedArchivePathAddr, classes_jsa, sizeof(classes_jsa)) != true) {
   3.447 +        print_debug("can't read shared archive path value\n");
   3.448 +        return false;
   3.449 +      }
   3.450  
   3.451 -         print_debug("looking for %s\n", classes_jsa);
   3.452 -         // open the class sharing archive file
   3.453 -         fd = pathmap_open(classes_jsa);
   3.454 -         if (fd < 0) {
   3.455 -            print_debug("can't open %s!\n", classes_jsa);
   3.456 -            ph->core->classes_jsa_fd = -1;
   3.457 -            return false;
   3.458 -         } else {
   3.459 -            print_debug("opened %s\n", classes_jsa);
   3.460 -         }
   3.461 +      print_debug("looking for %s\n", classes_jsa);
   3.462 +      // open the class sharing archive file
   3.463 +      fd = pathmap_open(classes_jsa);
   3.464 +      if (fd < 0) {
   3.465 +        print_debug("can't open %s!\n", classes_jsa);
   3.466 +        ph->core->classes_jsa_fd = -1;
   3.467 +        return false;
   3.468 +      } else {
   3.469 +        print_debug("opened %s\n", classes_jsa);
   3.470 +      }
   3.471  
   3.472 -         // read FileMapHeader from the file
   3.473 -         memset(&header, 0, sizeof(struct FileMapHeader));
   3.474 -         if ((n = read(fd, &header, sizeof(struct FileMapHeader)))
   3.475 -              != sizeof(struct FileMapHeader)) {
   3.476 -            print_debug("can't read shared archive file map header from %s\n", classes_jsa);
   3.477 -            close(fd);
   3.478 -            return false;
   3.479 -         }
   3.480 +      // read FileMapHeader from the file
   3.481 +      memset(&header, 0, sizeof(struct FileMapHeader));
   3.482 +      if ((n = read(fd, &header, sizeof(struct FileMapHeader)))
   3.483 +           != sizeof(struct FileMapHeader)) {
   3.484 +        print_debug("can't read shared archive file map header from %s\n", classes_jsa);
   3.485 +        close(fd);
   3.486 +        return false;
   3.487 +      }
   3.488  
   3.489 -         // check file magic
   3.490 -         if (header._magic != 0xf00baba2) {
   3.491 -            print_debug("%s has bad shared archive file magic number 0x%x, expecing 0xf00baba2\n",
   3.492 -                        classes_jsa, header._magic);
   3.493 -            close(fd);
   3.494 -            return false;
   3.495 -         }
   3.496 +      // check file magic
   3.497 +      if (header._magic != 0xf00baba2) {
   3.498 +        print_debug("%s has bad shared archive file magic number 0x%x, expecing 0xf00baba2\n",
   3.499 +                     classes_jsa, header._magic);
   3.500 +        close(fd);
   3.501 +        return false;
   3.502 +      }
   3.503  
   3.504 -         // check version
   3.505 -         if (header._version != CURRENT_ARCHIVE_VERSION) {
   3.506 -            print_debug("%s has wrong shared archive file version %d, expecting %d\n",
   3.507 -                        classes_jsa, header._version, CURRENT_ARCHIVE_VERSION);
   3.508 -            close(fd);
   3.509 -            return false;
   3.510 -         }
   3.511 +      // check version
   3.512 +      if (header._version != CURRENT_ARCHIVE_VERSION) {
   3.513 +        print_debug("%s has wrong shared archive file version %d, expecting %d\n",
   3.514 +                     classes_jsa, header._version, CURRENT_ARCHIVE_VERSION);
   3.515 +        close(fd);
   3.516 +        return false;
   3.517 +      }
   3.518  
   3.519 -         ph->core->classes_jsa_fd = fd;
   3.520 -         // add read-only maps from classes.jsa to the list of maps
   3.521 -         for (m = 0; m < NUM_SHARED_MAPS; m++) {
   3.522 -            if (header._space[m]._read_only) {
   3.523 -               base = (uintptr_t) header._space[m]._base;
   3.524 -               // no need to worry about the fractional pages at-the-end.
   3.525 -               // possible fractional pages are handled by core_read_data.
   3.526 -               add_class_share_map_info(ph, (off_t) header._space[m]._file_offset,
   3.527 -                         base, (size_t) header._space[m]._used);
   3.528 -               print_debug("added a share archive map at 0x%lx\n", base);
   3.529 -            }
   3.530 -         }
   3.531 -         return true;
   3.532 +      ph->core->classes_jsa_fd = fd;
   3.533 +      // add read-only maps from classes.jsa to the list of maps
   3.534 +      for (m = 0; m < NUM_SHARED_MAPS; m++) {
   3.535 +        if (header._space[m]._read_only) {
   3.536 +          base = (uintptr_t) header._space[m]._base;
   3.537 +          // no need to worry about the fractional pages at-the-end.
   3.538 +          // possible fractional pages are handled by core_read_data.
   3.539 +          add_class_share_map_info(ph, (off_t) header._space[m]._file_offset,
   3.540 +                                   base, (size_t) header._space[m]._used);
   3.541 +          print_debug("added a share archive map at 0x%lx\n", base);
   3.542 +        }
   3.543        }
   3.544 -      lib = lib->next;
   3.545 +      return true;
   3.546     }
   3.547 -   return true;
   3.548 +   lib = lib->next;
   3.549 +  }
   3.550 +  return true;
   3.551  }
   3.552  
   3.553  
   3.554 @@ -396,54 +404,58 @@
   3.555  // callback for sorting the array of map_info pointers.
   3.556  static int core_cmp_mapping(const void *lhsp, const void *rhsp)
   3.557  {
   3.558 -   const map_info *lhs = *((const map_info **)lhsp);
   3.559 -   const map_info *rhs = *((const map_info **)rhsp);
   3.560 +  const map_info *lhs = *((const map_info **)lhsp);
   3.561 +  const map_info *rhs = *((const map_info **)rhsp);
   3.562  
   3.563 -   if (lhs->vaddr == rhs->vaddr)
   3.564 -      return (0);
   3.565 +  if (lhs->vaddr == rhs->vaddr) {
   3.566 +    return (0);
   3.567 +  }
   3.568  
   3.569 -   return (lhs->vaddr < rhs->vaddr ? -1 : 1);
   3.570 +  return (lhs->vaddr < rhs->vaddr ? -1 : 1);
   3.571  }
   3.572  
   3.573  // we sort map_info by starting virtual address so that we can do
   3.574  // binary search to read from an address.
   3.575  static bool sort_map_array(struct ps_prochandle* ph) {
   3.576 -   size_t num_maps = ph->core->num_maps;
   3.577 -   map_info* map = ph->core->maps;
   3.578 -   int i = 0;
   3.579 +  size_t num_maps = ph->core->num_maps;
   3.580 +  map_info* map = ph->core->maps;
   3.581 +  int i = 0;
   3.582  
   3.583 -   // allocate map_array
   3.584 -   map_info** array;
   3.585 -   if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) {
   3.586 -      print_debug("can't allocate memory for map array\n");
   3.587 -      return false;
   3.588 -   }
   3.589 +  // allocate map_array
   3.590 +  map_info** array;
   3.591 +  if ( (array = (map_info**) malloc(sizeof(map_info*) * num_maps)) == NULL) {
   3.592 +    print_debug("can't allocate memory for map array\n");
   3.593 +    return false;
   3.594 +  }
   3.595  
   3.596 -   // add maps to array
   3.597 -   while (map) {
   3.598 -      array[i] = map;
   3.599 -      i++;
   3.600 -      map = map->next;
   3.601 -   }
   3.602 +  // add maps to array
   3.603 +  while (map) {
   3.604 +    array[i] = map;
   3.605 +    i++;
   3.606 +    map = map->next;
   3.607 +  }
   3.608  
   3.609 -   // sort is called twice. If this is second time, clear map array
   3.610 -   if (ph->core->map_array) free(ph->core->map_array);
   3.611 -   ph->core->map_array = array;
   3.612 -   // sort the map_info array by base virtual address.
   3.613 -   qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*),
   3.614 -            core_cmp_mapping);
   3.615 +  // sort is called twice. If this is second time, clear map array
   3.616 +  if (ph->core->map_array) {
   3.617 +    free(ph->core->map_array);
   3.618 +  }
   3.619  
   3.620 -   // print map
   3.621 -   if (is_debug()) {
   3.622 -      int j = 0;
   3.623 -      print_debug("---- sorted virtual address map ----\n");
   3.624 -      for (j = 0; j < ph->core->num_maps; j++) {
   3.625 -        print_debug("base = 0x%lx\tsize = %zu\n", ph->core->map_array[j]->vaddr,
   3.626 -                                         ph->core->map_array[j]->memsz);
   3.627 -      }
   3.628 -   }
   3.629 +  ph->core->map_array = array;
   3.630 +  // sort the map_info array by base virtual address.
   3.631 +  qsort(ph->core->map_array, ph->core->num_maps, sizeof (map_info*),
   3.632 +        core_cmp_mapping);
   3.633  
   3.634 -   return true;
   3.635 +  // print map
   3.636 +  if (is_debug()) {
   3.637 +    int j = 0;
   3.638 +    print_debug("---- sorted virtual address map ----\n");
   3.639 +    for (j = 0; j < ph->core->num_maps; j++) {
   3.640 +      print_debug("base = 0x%lx\tsize = %zu\n", ph->core->map_array[j]->vaddr,
   3.641 +                  ph->core->map_array[j]->memsz);
   3.642 +    }
   3.643 +  }
   3.644 +
   3.645 +  return true;
   3.646  }
   3.647  
   3.648  #ifndef MIN
   3.649 @@ -460,16 +472,18 @@
   3.650        off_t off;
   3.651        int fd;
   3.652  
   3.653 -      if (mp == NULL)
   3.654 +      if (mp == NULL) {
   3.655           break;  /* No mapping for this address */
   3.656 +      }
   3.657  
   3.658        fd = mp->fd;
   3.659        mapoff = addr - mp->vaddr;
   3.660        len = MIN(resid, mp->memsz - mapoff);
   3.661        off = mp->offset + mapoff;
   3.662  
   3.663 -      if ((len = pread(fd, buf, len, off)) <= 0)
   3.664 +      if ((len = pread(fd, buf, len, off)) <= 0) {
   3.665           break;
   3.666 +      }
   3.667  
   3.668        resid -= len;
   3.669        addr += len;
   3.670 @@ -625,8 +639,9 @@
   3.671                                     notep->n_type, notep->n_descsz);
   3.672  
   3.673        if (notep->n_type == NT_PRSTATUS) {
   3.674 -         if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true)
   3.675 -            return false;
   3.676 +        if (core_handle_prstatus(ph, descdata, notep->n_descsz) != true) {
   3.677 +          return false;
   3.678 +        }
   3.679        }
   3.680        p = descdata + ROUNDUP(notep->n_descsz, 4);
   3.681     }
   3.682 @@ -654,7 +669,7 @@
   3.683      * contains a set of saved /proc structures), and PT_LOAD (which
   3.684      * represents a memory mapping from the process's address space).
   3.685      *
   3.686 -    * Difference b/w Solaris PT_NOTE and Linux PT_NOTE:
   3.687 +    * Difference b/w Solaris PT_NOTE and Linux/BSD PT_NOTE:
   3.688      *
   3.689      *     In Solaris there are two PT_NOTE segments the first PT_NOTE (if present)
   3.690      *     contains /proc structs in the pre-2.6 unstructured /proc format. the last
   3.691 @@ -674,7 +689,9 @@
   3.692      for (core_php = phbuf, i = 0; i < core_ehdr->e_phnum; i++) {
   3.693        switch (core_php->p_type) {
   3.694           case PT_NOTE:
   3.695 -            if (core_handle_note(ph, core_php) != true) goto err;
   3.696 +            if (core_handle_note(ph, core_php) != true) {
   3.697 +              goto err;
   3.698 +            }
   3.699              break;
   3.700  
   3.701           case PT_LOAD: {
   3.702 @@ -832,60 +849,62 @@
   3.703  // read shared library info from runtime linker's data structures.
   3.704  // This work is done by librtlb_db in Solaris
   3.705  static bool read_shared_lib_info(struct ps_prochandle* ph) {
   3.706 -   uintptr_t addr = ph->core->dynamic_addr;
   3.707 -   uintptr_t debug_base;
   3.708 -   uintptr_t first_link_map_addr;
   3.709 -   uintptr_t ld_base_addr;
   3.710 -   uintptr_t link_map_addr;
   3.711 -   uintptr_t lib_base_diff;
   3.712 -   uintptr_t lib_base;
   3.713 -   uintptr_t lib_name_addr;
   3.714 -   char lib_name[BUF_SIZE];
   3.715 -   ELF_DYN dyn;
   3.716 -   ELF_EHDR elf_ehdr;
   3.717 -   int lib_fd;
   3.718 +  uintptr_t addr = ph->core->dynamic_addr;
   3.719 +  uintptr_t debug_base;
   3.720 +  uintptr_t first_link_map_addr;
   3.721 +  uintptr_t ld_base_addr;
   3.722 +  uintptr_t link_map_addr;
   3.723 +  uintptr_t lib_base_diff;
   3.724 +  uintptr_t lib_base;
   3.725 +  uintptr_t lib_name_addr;
   3.726 +  char lib_name[BUF_SIZE];
   3.727 +  ELF_DYN dyn;
   3.728 +  ELF_EHDR elf_ehdr;
   3.729 +  int lib_fd;
   3.730  
   3.731 -   // _DYNAMIC has information of the form
   3.732 -   //         [tag] [data] [tag] [data] .....
   3.733 -   // Both tag and data are pointer sized.
   3.734 -   // We look for dynamic info with DT_DEBUG. This has shared object info.
   3.735 -   // refer to struct r_debug in link.h
   3.736 +  // _DYNAMIC has information of the form
   3.737 +  //         [tag] [data] [tag] [data] .....
   3.738 +  // Both tag and data are pointer sized.
   3.739 +  // We look for dynamic info with DT_DEBUG. This has shared object info.
   3.740 +  // refer to struct r_debug in link.h
   3.741  
   3.742 -   dyn.d_tag = DT_NULL;
   3.743 -   while (dyn.d_tag != DT_DEBUG) {
   3.744 -      if (ps_pdread(ph, (psaddr_t) addr, &dyn, sizeof(ELF_DYN)) != PS_OK) {
   3.745 -         print_debug("can't read debug info from _DYNAMIC\n");
   3.746 -         return false;
   3.747 -      }
   3.748 -      addr += sizeof(ELF_DYN);
   3.749 -   }
   3.750 +  dyn.d_tag = DT_NULL;
   3.751 +  while (dyn.d_tag != DT_DEBUG) {
   3.752 +    if (ps_pdread(ph, (psaddr_t) addr, &dyn, sizeof(ELF_DYN)) != PS_OK) {
   3.753 +      print_debug("can't read debug info from _DYNAMIC\n");
   3.754 +      return false;
   3.755 +    }
   3.756 +    addr += sizeof(ELF_DYN);
   3.757 +  }
   3.758  
   3.759 -   // we have got Dyn entry with DT_DEBUG
   3.760 -   debug_base = dyn.d_un.d_ptr;
   3.761 -   // at debug_base we have struct r_debug. This has first link map in r_map field
   3.762 -   if (ps_pdread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET,
   3.763 +  // we have got Dyn entry with DT_DEBUG
   3.764 +  debug_base = dyn.d_un.d_ptr;
   3.765 +  // at debug_base we have struct r_debug. This has first link map in r_map field
   3.766 +  if (ps_pdread(ph, (psaddr_t) debug_base + FIRST_LINK_MAP_OFFSET,
   3.767                   &first_link_map_addr, sizeof(uintptr_t)) != PS_OK) {
   3.768 -      print_debug("can't read first link map address\n");
   3.769 +    print_debug("can't read first link map address\n");
   3.770 +    return false;
   3.771 +  }
   3.772 +
   3.773 +  // read ld_base address from struct r_debug
   3.774 +  if (ps_pdread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr,
   3.775 +                 sizeof(uintptr_t)) != PS_OK) {
   3.776 +    print_debug("can't read ld base address\n");
   3.777 +    return false;
   3.778 +  }
   3.779 +  ph->core->ld_base_addr = ld_base_addr;
   3.780 +
   3.781 +  print_debug("interpreter base address is 0x%lx\n", ld_base_addr);
   3.782 +
   3.783 +  // now read segments from interp (i.e ld.so or ld-linux.so or ld-elf.so)
   3.784 +  if (read_interp_segments(ph) != true) {
   3.785        return false;
   3.786 -   }
   3.787 +  }
   3.788  
   3.789 -   // read ld_base address from struct r_debug
   3.790 -   if (ps_pdread(ph, (psaddr_t) debug_base + LD_BASE_OFFSET, &ld_base_addr,
   3.791 -                 sizeof(uintptr_t)) != PS_OK) {
   3.792 -      print_debug("can't read ld base address\n");
   3.793 -      return false;
   3.794 -   }
   3.795 -   ph->core->ld_base_addr = ld_base_addr;
   3.796 -
   3.797 -   print_debug("interpreter base address is 0x%lx\n", ld_base_addr);
   3.798 -
   3.799 -   // now read segments from interp (i.e ld.so or ld-linux.so)
   3.800 -   if (read_interp_segments(ph) != true)
   3.801 -      return false;
   3.802 -
   3.803 -   // after adding interpreter (ld.so) mappings sort again
   3.804 -   if (sort_map_array(ph) != true)
   3.805 -      return false;
   3.806 +  // after adding interpreter (ld.so) mappings sort again
   3.807 +  if (sort_map_array(ph) != true) {
   3.808 +    return false;
   3.809 +  }
   3.810  
   3.811     print_debug("first link map is at 0x%lx\n", first_link_map_addr);
   3.812  
   3.813 @@ -950,95 +969,102 @@
   3.814           }
   3.815        }
   3.816  
   3.817 -      // read next link_map address
   3.818 -      if (ps_pdread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET,
   3.819 -                        &link_map_addr, sizeof(uintptr_t)) != PS_OK) {
   3.820 -         print_debug("can't read next link in link_map\n");
   3.821 -         return false;
   3.822 -      }
   3.823 -   }
   3.824 +    // read next link_map address
   3.825 +    if (ps_pdread(ph, (psaddr_t) link_map_addr + LINK_MAP_NEXT_OFFSET,
   3.826 +                   &link_map_addr, sizeof(uintptr_t)) != PS_OK) {
   3.827 +      print_debug("can't read next link in link_map\n");
   3.828 +      return false;
   3.829 +    }
   3.830 +  }
   3.831  
   3.832 -   return true;
   3.833 +  return true;
   3.834  }
   3.835  
   3.836  // the one and only one exposed stuff from this file
   3.837  struct ps_prochandle* Pgrab_core(const char* exec_file, const char* core_file) {
   3.838 -   ELF_EHDR core_ehdr;
   3.839 -   ELF_EHDR exec_ehdr;
   3.840 -   ELF_EHDR lib_ehdr;
   3.841 +  ELF_EHDR core_ehdr;
   3.842 +  ELF_EHDR exec_ehdr;
   3.843 +  ELF_EHDR lib_ehdr;
   3.844  
   3.845 -   struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle));
   3.846 -   if (ph == NULL) {
   3.847 -      print_debug("can't allocate ps_prochandle\n");
   3.848 -      return NULL;
   3.849 -   }
   3.850 +  struct ps_prochandle* ph = (struct ps_prochandle*) calloc(1, sizeof(struct ps_prochandle));
   3.851 +  if (ph == NULL) {
   3.852 +    print_debug("can't allocate ps_prochandle\n");
   3.853 +    return NULL;
   3.854 +  }
   3.855  
   3.856 -   if ((ph->core = (struct core_data*) calloc(1, sizeof(struct core_data))) == NULL) {
   3.857 -      free(ph);
   3.858 -      print_debug("can't allocate ps_prochandle\n");
   3.859 -      return NULL;
   3.860 -   }
   3.861 +  if ((ph->core = (struct core_data*) calloc(1, sizeof(struct core_data))) == NULL) {
   3.862 +    free(ph);
   3.863 +    print_debug("can't allocate ps_prochandle\n");
   3.864 +    return NULL;
   3.865 +  }
   3.866  
   3.867 -   // initialize ph
   3.868 -   ph->ops = &core_ops;
   3.869 -   ph->core->core_fd   = -1;
   3.870 -   ph->core->exec_fd   = -1;
   3.871 -   ph->core->interp_fd = -1;
   3.872 +  // initialize ph
   3.873 +  ph->ops = &core_ops;
   3.874 +  ph->core->core_fd   = -1;
   3.875 +  ph->core->exec_fd   = -1;
   3.876 +  ph->core->interp_fd = -1;
   3.877  
   3.878 -   // open the core file
   3.879 -   if ((ph->core->core_fd = open(core_file, O_RDONLY)) < 0) {
   3.880 -      print_debug("can't open core file\n");
   3.881 -      goto err;
   3.882 -   }
   3.883 +  // open the core file
   3.884 +  if ((ph->core->core_fd = open(core_file, O_RDONLY)) < 0) {
   3.885 +    print_debug("can't open core file\n");
   3.886 +    goto err;
   3.887 +  }
   3.888  
   3.889 -   // read core file ELF header
   3.890 -   if (read_elf_header(ph->core->core_fd, &core_ehdr) != true || core_ehdr.e_type != ET_CORE) {
   3.891 -      print_debug("core file is not a valid ELF ET_CORE file\n");
   3.892 -      goto err;
   3.893 -   }
   3.894 +  // read core file ELF header
   3.895 +  if (read_elf_header(ph->core->core_fd, &core_ehdr) != true || core_ehdr.e_type != ET_CORE) {
   3.896 +    print_debug("core file is not a valid ELF ET_CORE file\n");
   3.897 +    goto err;
   3.898 +  }
   3.899  
   3.900 -   if ((ph->core->exec_fd = open(exec_file, O_RDONLY)) < 0) {
   3.901 -      print_debug("can't open executable file\n");
   3.902 -      goto err;
   3.903 -   }
   3.904 +  if ((ph->core->exec_fd = open(exec_file, O_RDONLY)) < 0) {
   3.905 +    print_debug("can't open executable file\n");
   3.906 +    goto err;
   3.907 +  }
   3.908  
   3.909 -   if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true || exec_ehdr.e_type != ET_EXEC) {
   3.910 -      print_debug("executable file is not a valid ELF ET_EXEC file\n");
   3.911 -      goto err;
   3.912 -   }
   3.913 +  if (read_elf_header(ph->core->exec_fd, &exec_ehdr) != true || exec_ehdr.e_type != ET_EXEC) {
   3.914 +    print_debug("executable file is not a valid ELF ET_EXEC file\n");
   3.915 +    goto err;
   3.916 +  }
   3.917  
   3.918 -   // process core file segments
   3.919 -   if (read_core_segments(ph, &core_ehdr) != true)
   3.920 -      goto err;
   3.921 +  // process core file segments
   3.922 +  if (read_core_segments(ph, &core_ehdr) != true) {
   3.923 +    goto err;
   3.924 +  }
   3.925  
   3.926 -   // process exec file segments
   3.927 -   if (read_exec_segments(ph, &exec_ehdr) != true)
   3.928 -      goto err;
   3.929 +  // process exec file segments
   3.930 +  if (read_exec_segments(ph, &exec_ehdr) != true) {
   3.931 +    goto err;
   3.932 +  }
   3.933  
   3.934 -   // exec file is also treated like a shared object for symbol search
   3.935 -   if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd,
   3.936 -                       (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL)
   3.937 -      goto err;
   3.938 +  // exec file is also treated like a shared object for symbol search
   3.939 +  if (add_lib_info_fd(ph, exec_file, ph->core->exec_fd,
   3.940 +                      (uintptr_t)0 + find_base_address(ph->core->exec_fd, &exec_ehdr)) == NULL) {
   3.941 +    goto err;
   3.942 +  }
   3.943  
   3.944 -   // allocate and sort maps into map_array, we need to do this
   3.945 -   // here because read_shared_lib_info needs to read from debuggee
   3.946 -   // address space
   3.947 -   if (sort_map_array(ph) != true)
   3.948 -      goto err;
   3.949 +  // allocate and sort maps into map_array, we need to do this
   3.950 +  // here because read_shared_lib_info needs to read from debuggee
   3.951 +  // address space
   3.952 +  if (sort_map_array(ph) != true) {
   3.953 +    goto err;
   3.954 +  }
   3.955  
   3.956 -   if (read_shared_lib_info(ph) != true)
   3.957 -      goto err;
   3.958 +  if (read_shared_lib_info(ph) != true) {
   3.959 +    goto err;
   3.960 +  }
   3.961  
   3.962 -   // sort again because we have added more mappings from shared objects
   3.963 -   if (sort_map_array(ph) != true)
   3.964 -      goto err;
   3.965 +  // sort again because we have added more mappings from shared objects
   3.966 +  if (sort_map_array(ph) != true) {
   3.967 +    goto err;
   3.968 +  }
   3.969  
   3.970 -   if (init_classsharing_workaround(ph) != true)
   3.971 -      goto err;
   3.972 +  if (init_classsharing_workaround(ph) != true) {
   3.973 +    goto err;
   3.974 +  }
   3.975  
   3.976 -   return ph;
   3.977 +  return ph;
   3.978  
   3.979  err:
   3.980 -   Prelease(ph);
   3.981 -   return NULL;
   3.982 +  Prelease(ph);
   3.983 +  return NULL;
   3.984  }
     4.1 --- a/agent/src/share/classes/sun/jvm/hotspot/asm/Disassembler.java	Sun Oct 13 21:14:04 2013 +0100
     4.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/asm/Disassembler.java	Thu Oct 17 14:20:57 2013 -0700
     4.3 @@ -67,6 +67,13 @@
     4.4           String libname = "hsdis";
     4.5           String arch = System.getProperty("os.arch");
     4.6           if (os.lastIndexOf("Windows", 0) != -1) {
     4.7 +            if (arch.equals("x86")) {
     4.8 +               libname +=  "-i386";
     4.9 +            } else if (arch.equals("amd64")) {
    4.10 +               libname +=  "-amd64";
    4.11 +            } else {
    4.12 +               libname +=  "-" + arch;
    4.13 +            }
    4.14              path.append(sep + "bin" + sep);
    4.15              libname += ".dll";
    4.16           } else if (os.lastIndexOf("SunOS", 0) != -1) {
     5.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     5.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainCacheEntry.java	Thu Oct 17 14:20:57 2013 -0700
     5.3 @@ -0,0 +1,56 @@
     5.4 +/*
     5.5 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     5.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     5.7 + *
     5.8 + * This code is free software; you can redistribute it and/or modify it
     5.9 + * under the terms of the GNU General Public License version 2 only, as
    5.10 + * published by the Free Software Foundation.
    5.11 + *
    5.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    5.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    5.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    5.15 + * version 2 for more details (a copy is included in the LICENSE file that
    5.16 + * accompanied this code).
    5.17 + *
    5.18 + * You should have received a copy of the GNU General Public License version
    5.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    5.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    5.21 + *
    5.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    5.23 + * or visit www.oracle.com if you need additional information or have any
    5.24 + * questions.
    5.25 + *
    5.26 + */
    5.27 +
    5.28 +package sun.jvm.hotspot.memory;
    5.29 +
    5.30 +import java.util.*;
    5.31 +import sun.jvm.hotspot.debugger.*;
    5.32 +import sun.jvm.hotspot.oops.*;
    5.33 +import sun.jvm.hotspot.runtime.*;
    5.34 +import sun.jvm.hotspot.types.*;
    5.35 +
    5.36 +public class ProtectionDomainCacheEntry extends VMObject {
    5.37 +  private static sun.jvm.hotspot.types.OopField protectionDomainField;
    5.38 +
    5.39 +  static {
    5.40 +    VM.registerVMInitializedObserver(new Observer() {
    5.41 +        public void update(Observable o, Object data) {
    5.42 +          initialize(VM.getVM().getTypeDataBase());
    5.43 +        }
    5.44 +      });
    5.45 +  }
    5.46 +
    5.47 +  private static synchronized void initialize(TypeDataBase db) {
    5.48 +    Type type = db.lookupType("ProtectionDomainCacheEntry");
    5.49 +    protectionDomainField = type.getOopField("_literal");
    5.50 +  }
    5.51 +
    5.52 +  public ProtectionDomainCacheEntry(Address addr) {
    5.53 +    super(addr);
    5.54 +  }
    5.55 +
    5.56 +  public Oop protectionDomain() {
    5.57 +    return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr));
    5.58 +  }
    5.59 +}
     6.1 --- a/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainEntry.java	Sun Oct 13 21:14:04 2013 +0100
     6.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/ProtectionDomainEntry.java	Thu Oct 17 14:20:57 2013 -0700
     6.3 @@ -1,5 +1,5 @@
     6.4  /*
     6.5 - * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved.
     6.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
     6.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     6.8   *
     6.9   * This code is free software; you can redistribute it and/or modify it
    6.10 @@ -32,7 +32,7 @@
    6.11  
    6.12  public class ProtectionDomainEntry extends VMObject {
    6.13    private static AddressField nextField;
    6.14 -  private static sun.jvm.hotspot.types.OopField protectionDomainField;
    6.15 +  private static AddressField pdCacheField;
    6.16  
    6.17    static {
    6.18      VM.registerVMInitializedObserver(new Observer() {
    6.19 @@ -46,7 +46,7 @@
    6.20      Type type = db.lookupType("ProtectionDomainEntry");
    6.21  
    6.22      nextField = type.getAddressField("_next");
    6.23 -    protectionDomainField = type.getOopField("_protection_domain");
    6.24 +    pdCacheField = type.getAddressField("_pd_cache");
    6.25    }
    6.26  
    6.27    public ProtectionDomainEntry(Address addr) {
    6.28 @@ -54,10 +54,12 @@
    6.29    }
    6.30  
    6.31    public ProtectionDomainEntry next() {
    6.32 -    return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, addr);
    6.33 +    return (ProtectionDomainEntry) VMObjectFactory.newObject(ProtectionDomainEntry.class, nextField.getValue(addr));
    6.34    }
    6.35  
    6.36    public Oop protectionDomain() {
    6.37 -    return VM.getVM().getObjectHeap().newOop(protectionDomainField.getValue(addr));
    6.38 +    ProtectionDomainCacheEntry pd_cache = (ProtectionDomainCacheEntry)
    6.39 +      VMObjectFactory.newObject(ProtectionDomainCacheEntry.class, pdCacheField.getValue(addr));
    6.40 +    return pd_cache.protectionDomain();
    6.41    }
    6.42  }
     7.1 --- a/agent/src/share/classes/sun/jvm/hotspot/memory/SymbolTable.java	Sun Oct 13 21:14:04 2013 +0100
     7.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/memory/SymbolTable.java	Thu Oct 17 14:20:57 2013 -0700
     7.3 @@ -44,12 +44,10 @@
     7.4    private static synchronized void initialize(TypeDataBase db) {
     7.5      Type type = db.lookupType("SymbolTable");
     7.6      theTableField  = type.getAddressField("_the_table");
     7.7 -    symbolTableSize = db.lookupIntConstant("SymbolTable::symbol_table_size").intValue();
     7.8    }
     7.9  
    7.10    // Fields
    7.11    private static AddressField theTableField;
    7.12 -  private static int symbolTableSize;
    7.13  
    7.14    // Accessors
    7.15    public static SymbolTable getTheTable() {
    7.16 @@ -57,10 +55,6 @@
    7.17      return (SymbolTable) VMObjectFactory.newObject(SymbolTable.class, tmp);
    7.18    }
    7.19  
    7.20 -  public static int getSymbolTableSize() {
    7.21 -    return symbolTableSize;
    7.22 -  }
    7.23 -
    7.24    public SymbolTable(Address addr) {
    7.25      super(addr);
    7.26    }
     8.1 --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java	Sun Oct 13 21:14:04 2013 +0100
     8.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/AbstractHeapGraphWriter.java	Thu Oct 17 14:20:57 2013 -0700
     8.3 @@ -59,6 +59,7 @@
     8.4  
     8.5                      public boolean doObj(Oop oop) {
     8.6                          try {
     8.7 +                            writeHeapRecordPrologue();
     8.8                              if (oop instanceof TypeArray) {
     8.9                                  writePrimitiveArray((TypeArray)oop);
    8.10                              } else if (oop instanceof ObjArray) {
    8.11 @@ -97,6 +98,7 @@
    8.12                                  // not-a-Java-visible oop
    8.13                                  writeInternalObject(oop);
    8.14                              }
    8.15 +                            writeHeapRecordEpilogue();
    8.16                          } catch (IOException exp) {
    8.17                              throw new RuntimeException(exp);
    8.18                          }
    8.19 @@ -416,6 +418,12 @@
    8.20      protected void writeHeapFooter() throws IOException {
    8.21      }
    8.22  
    8.23 +    protected void writeHeapRecordPrologue() throws IOException {
    8.24 +    }
    8.25 +
    8.26 +    protected void writeHeapRecordEpilogue() throws IOException {
    8.27 +    }
    8.28 +
    8.29      // HeapVisitor, OopVisitor methods can't throw any non-runtime
    8.30      // exception. But, derived class write methods (which are called
    8.31      // from visitor callbacks) may throw IOException. Hence, we throw
     9.1 --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java	Sun Oct 13 21:14:04 2013 +0100
     9.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/HeapHprofBinWriter.java	Thu Oct 17 14:20:57 2013 -0700
     9.3 @@ -44,7 +44,7 @@
     9.4   * WARNING: This format is still under development, and is subject to
     9.5   * change without notice.
     9.6   *
     9.7 - * header    "JAVA PROFILE 1.0.1" (0-terminated)
     9.8 + * header    "JAVA PROFILE 1.0.1" or "JAVA PROFILE 1.0.2" (0-terminated)
     9.9   * u4        size of identifiers. Identifiers are used to represent
    9.10   *            UTF8 strings, objects, stack traces, etc. They usually
    9.11   *            have the same size as host pointers. For example, on
    9.12 @@ -292,11 +292,34 @@
    9.13   *                          0x00000002: cpu sampling on/off
    9.14   *                u2        stack trace depth
    9.15   *
    9.16 + *
    9.17 + * When the header is "JAVA PROFILE 1.0.2" a heap dump can optionally
    9.18 + * be generated as a sequence of heap dump segments. This sequence is
    9.19 + * terminated by an end record. The additional tags allowed by format
    9.20 + * "JAVA PROFILE 1.0.2" are:
    9.21 + *
    9.22 + * HPROF_HEAP_DUMP_SEGMENT  denote a heap dump segment
    9.23 + *
    9.24 + *               [heap dump sub-records]*
    9.25 + *               The same sub-record types allowed by HPROF_HEAP_DUMP
    9.26 + *
    9.27 + * HPROF_HEAP_DUMP_END      denotes the end of a heap dump
    9.28 + *
    9.29   */
    9.30  
    9.31  public class HeapHprofBinWriter extends AbstractHeapGraphWriter {
    9.32 +
    9.33 +    // The heap size threshold used to determine if segmented format
    9.34 +    // ("JAVA PROFILE 1.0.2") should be used.
    9.35 +    private static final long HPROF_SEGMENTED_HEAP_DUMP_THRESHOLD = 2L * 0x40000000;
    9.36 +
    9.37 +    // The approximate size of a heap segment. Used to calculate when to create
    9.38 +    // a new segment.
    9.39 +    private static final long HPROF_SEGMENTED_HEAP_DUMP_SEGMENT_SIZE = 1L * 0x40000000;
    9.40 +
    9.41      // hprof binary file header
    9.42 -    private static final String HPROF_HEADER = "JAVA PROFILE 1.0.1";
    9.43 +    private static final String HPROF_HEADER_1_0_1 = "JAVA PROFILE 1.0.1";
    9.44 +    private static final String HPROF_HEADER_1_0_2 = "JAVA PROFILE 1.0.2";
    9.45  
    9.46      // constants in enum HprofTag
    9.47      private static final int HPROF_UTF8             = 0x01;
    9.48 @@ -312,6 +335,10 @@
    9.49      private static final int HPROF_CPU_SAMPLES      = 0x0D;
    9.50      private static final int HPROF_CONTROL_SETTINGS = 0x0E;
    9.51  
    9.52 +    // 1.0.2 record types
    9.53 +    private static final int HPROF_HEAP_DUMP_SEGMENT = 0x1C;
    9.54 +    private static final int HPROF_HEAP_DUMP_END     = 0x2C;
    9.55 +
    9.56      // Heap dump constants
    9.57      // constants in enum HprofGcTag
    9.58      private static final int HPROF_GC_ROOT_UNKNOWN       = 0xFF;
    9.59 @@ -352,11 +379,9 @@
    9.60      private static final int JVM_SIGNATURE_ARRAY   = '[';
    9.61      private static final int JVM_SIGNATURE_CLASS   = 'L';
    9.62  
    9.63 -
    9.64      public synchronized void write(String fileName) throws IOException {
    9.65          // open file stream and create buffered data output stream
    9.66 -        FileOutputStream fos = new FileOutputStream(fileName);
    9.67 -        FileChannel chn = fos.getChannel();
    9.68 +        fos = new FileOutputStream(fileName);
    9.69          out = new DataOutputStream(new BufferedOutputStream(fos));
    9.70  
    9.71          VM vm = VM.getVM();
    9.72 @@ -385,6 +410,9 @@
    9.73          FLOAT_SIZE = objectHeap.getFloatSize();
    9.74          DOUBLE_SIZE = objectHeap.getDoubleSize();
    9.75  
    9.76 +        // Check weather we should dump the heap as segments
    9.77 +        useSegmentedHeapDump = vm.getUniverse().heap().used() > HPROF_SEGMENTED_HEAP_DUMP_THRESHOLD;
    9.78 +
    9.79          // hprof bin format header
    9.80          writeFileHeader();
    9.81  
    9.82 @@ -394,38 +422,87 @@
    9.83  
    9.84          // hprof UTF-8 symbols section
    9.85          writeSymbols();
    9.86 +
    9.87          // HPROF_LOAD_CLASS records for all classes
    9.88          writeClasses();
    9.89  
    9.90 -        // write heap data now
    9.91 -        out.writeByte((byte)HPROF_HEAP_DUMP);
    9.92 -        out.writeInt(0); // relative timestamp
    9.93 -
    9.94 -        // remember position of dump length, we will fixup
    9.95 -        // length later - hprof format requires length.
    9.96 -        out.flush();
    9.97 -        long dumpStart = chn.position();
    9.98 -
    9.99 -        // write dummy length of 0 and we'll fix it later.
   9.100 -        out.writeInt(0);
   9.101 -
   9.102          // write CLASS_DUMP records
   9.103          writeClassDumpRecords();
   9.104  
   9.105          // this will write heap data into the buffer stream
   9.106          super.write();
   9.107  
   9.108 +        // flush buffer stream.
   9.109 +        out.flush();
   9.110 +
   9.111 +        // Fill in final length
   9.112 +        fillInHeapRecordLength();
   9.113 +
   9.114 +        if (useSegmentedHeapDump) {
   9.115 +            // Write heap segment-end record
   9.116 +            out.writeByte((byte) HPROF_HEAP_DUMP_END);
   9.117 +            out.writeInt(0);
   9.118 +            out.writeInt(0);
   9.119 +        }
   9.120 +
   9.121          // flush buffer stream and throw it.
   9.122          out.flush();
   9.123          out = null;
   9.124  
   9.125 +        // close the file stream
   9.126 +        fos.close();
   9.127 +    }
   9.128 +
   9.129 +    @Override
   9.130 +    protected void writeHeapRecordPrologue() throws IOException {
   9.131 +        if (currentSegmentStart == 0) {
   9.132 +            // write heap data header, depending on heap size use segmented heap
   9.133 +            // format
   9.134 +            out.writeByte((byte) (useSegmentedHeapDump ? HPROF_HEAP_DUMP_SEGMENT
   9.135 +                    : HPROF_HEAP_DUMP));
   9.136 +            out.writeInt(0);
   9.137 +
   9.138 +            // remember position of dump length, we will fixup
   9.139 +            // length later - hprof format requires length.
   9.140 +            out.flush();
   9.141 +            currentSegmentStart = fos.getChannel().position();
   9.142 +
   9.143 +            // write dummy length of 0 and we'll fix it later.
   9.144 +            out.writeInt(0);
   9.145 +        }
   9.146 +    }
   9.147 +
   9.148 +    @Override
   9.149 +    protected void writeHeapRecordEpilogue() throws IOException {
   9.150 +        if (useSegmentedHeapDump) {
   9.151 +            out.flush();
   9.152 +            if ((fos.getChannel().position() - currentSegmentStart - 4) >= HPROF_SEGMENTED_HEAP_DUMP_SEGMENT_SIZE) {
   9.153 +                fillInHeapRecordLength();
   9.154 +                currentSegmentStart = 0;
   9.155 +            }
   9.156 +        }
   9.157 +    }
   9.158 +
   9.159 +    private void fillInHeapRecordLength() throws IOException {
   9.160 +
   9.161          // now get current position to calculate length
   9.162 -        long dumpEnd = chn.position();
   9.163 +        long dumpEnd = fos.getChannel().position();
   9.164 +
   9.165          // calculate length of heap data
   9.166 -        int dumpLen = (int) (dumpEnd - dumpStart - 4);
   9.167 +        long dumpLenLong = (dumpEnd - currentSegmentStart - 4L);
   9.168 +
   9.169 +        // Check length boundary, overflow could happen but is _very_ unlikely
   9.170 +        if(dumpLenLong >= (4L * 0x40000000)){
   9.171 +            throw new RuntimeException("Heap segment size overflow.");
   9.172 +        }
   9.173 +
   9.174 +        // Save the current position
   9.175 +        long currentPosition = fos.getChannel().position();
   9.176  
   9.177          // seek the position to write length
   9.178 -        chn.position(dumpStart);
   9.179 +        fos.getChannel().position(currentSegmentStart);
   9.180 +
   9.181 +        int dumpLen = (int) dumpLenLong;
   9.182  
   9.183          // write length as integer
   9.184          fos.write((dumpLen >>> 24) & 0xFF);
   9.185 @@ -433,8 +510,8 @@
   9.186          fos.write((dumpLen >>> 8) & 0xFF);
   9.187          fos.write((dumpLen >>> 0) & 0xFF);
   9.188  
   9.189 -        // close the file stream
   9.190 -        fos.close();
   9.191 +        //Reset to previous current position
   9.192 +        fos.getChannel().position(currentPosition);
   9.193      }
   9.194  
   9.195      private void writeClassDumpRecords() throws IOException {
   9.196 @@ -443,7 +520,9 @@
   9.197              sysDict.allClassesDo(new SystemDictionary.ClassVisitor() {
   9.198                              public void visit(Klass k) {
   9.199                                  try {
   9.200 +                                    writeHeapRecordPrologue();
   9.201                                      writeClassDumpRecord(k);
   9.202 +                                    writeHeapRecordEpilogue();
   9.203                                  } catch (IOException e) {
   9.204                                      throw new RuntimeException(e);
   9.205                                  }
   9.206 @@ -884,7 +963,12 @@
   9.207      // writes hprof binary file header
   9.208      private void writeFileHeader() throws IOException {
   9.209          // version string
   9.210 -        out.writeBytes(HPROF_HEADER);
   9.211 +        if(useSegmentedHeapDump) {
   9.212 +            out.writeBytes(HPROF_HEADER_1_0_2);
   9.213 +        }
   9.214 +        else {
   9.215 +            out.writeBytes(HPROF_HEADER_1_0_1);
   9.216 +        }
   9.217          out.writeByte((byte)'\0');
   9.218  
   9.219          // write identifier size. we use pointers as identifiers.
   9.220 @@ -976,6 +1060,7 @@
   9.221      private static final int EMPTY_FRAME_DEPTH = -1;
   9.222  
   9.223      private DataOutputStream out;
   9.224 +    private FileOutputStream fos;
   9.225      private Debugger dbg;
   9.226      private ObjectHeap objectHeap;
   9.227      private SymbolTable symTbl;
   9.228 @@ -983,6 +1068,10 @@
   9.229      // oopSize of the debuggee
   9.230      private int OBJ_ID_SIZE;
   9.231  
   9.232 +    // Added for hprof file format 1.0.2 support
   9.233 +    private boolean useSegmentedHeapDump;
   9.234 +    private long currentSegmentStart;
   9.235 +
   9.236      private long BOOLEAN_BASE_OFFSET;
   9.237      private long BYTE_BASE_OFFSET;
   9.238      private long CHAR_BASE_OFFSET;
   9.239 @@ -1005,6 +1094,7 @@
   9.240      private static class ClassData {
   9.241          int instSize;
   9.242          List fields;
   9.243 +
   9.244          ClassData(int instSize, List fields) {
   9.245              this.instSize = instSize;
   9.246              this.fields = fields;
    10.1 --- a/make/hotspot_version	Sun Oct 13 21:14:04 2013 +0100
    10.2 +++ b/make/hotspot_version	Thu Oct 17 14:20:57 2013 -0700
    10.3 @@ -35,7 +35,7 @@
    10.4  
    10.5  HS_MAJOR_VER=25
    10.6  HS_MINOR_VER=0
    10.7 -HS_BUILD_NUMBER=53
    10.8 +HS_BUILD_NUMBER=54
    10.9  
   10.10  JDK_MAJOR_VER=1
   10.11  JDK_MINOR_VER=8
    11.1 --- a/make/windows/makefiles/compile.make	Sun Oct 13 21:14:04 2013 +0100
    11.2 +++ b/make/windows/makefiles/compile.make	Thu Oct 17 14:20:57 2013 -0700
    11.3 @@ -44,6 +44,7 @@
    11.4  #   /GS       Inserts security stack checks in some functions (VS2005 default)
    11.5  #   /Oi       Use intrinsics (in /O2)
    11.6  #   /Od       Disable all optimizations
    11.7 +#   /MP       Use multiple cores for compilation
    11.8  #
    11.9  # NOTE: Normally following any of the above with a '-' will turn off that flag
   11.10  #
   11.11 @@ -208,6 +209,7 @@
   11.12  DEBUG_OPT_OPTION     = /Od
   11.13  GX_OPTION = /EHsc
   11.14  LD_FLAGS = /manifest $(LD_FLAGS)
   11.15 +MP_FLAG = /MP
   11.16  # Manifest Tool - used in VS2005 and later to adjust manifests stored
   11.17  # as resources inside build artifacts.
   11.18  !if "x$(MT)" == "x"
   11.19 @@ -222,6 +224,7 @@
   11.20  DEBUG_OPT_OPTION     = /Od
   11.21  GX_OPTION = /EHsc
   11.22  LD_FLAGS = /manifest $(LD_FLAGS)
   11.23 +MP_FLAG = /MP
   11.24  # Manifest Tool - used in VS2005 and later to adjust manifests stored
   11.25  # as resources inside build artifacts.
   11.26  !if "x$(MT)" == "x"
   11.27 @@ -238,6 +241,7 @@
   11.28  DEBUG_OPT_OPTION     = /Od
   11.29  GX_OPTION = /EHsc
   11.30  LD_FLAGS = /manifest $(LD_FLAGS)
   11.31 +MP_FLAG = /MP
   11.32  # Manifest Tool - used in VS2005 and later to adjust manifests stored
   11.33  # as resources inside build artifacts.
   11.34  !if "x$(MT)" == "x"
   11.35 @@ -250,6 +254,8 @@
   11.36  LD_FLAGS = $(SAFESEH_FLAG) $(LD_FLAGS)
   11.37  !endif
   11.38  
   11.39 +CXX_FLAGS = $(CXX_FLAGS) $(MP_FLAG)
   11.40 +
   11.41  # If NO_OPTIMIZATIONS is defined in the environment, turn everything off
   11.42  !ifdef NO_OPTIMIZATIONS
   11.43  PRODUCT_OPT_OPTION   = $(DEBUG_OPT_OPTION)
    12.1 --- a/make/windows/makefiles/fastdebug.make	Sun Oct 13 21:14:04 2013 +0100
    12.2 +++ b/make/windows/makefiles/fastdebug.make	Thu Oct 17 14:20:57 2013 -0700
    12.3 @@ -38,7 +38,7 @@
    12.4  !include ../local.make
    12.5  !include compile.make
    12.6  
    12.7 -CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION) /D "CHECK_UNHANDLED_OOPS"
    12.8 +CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION)
    12.9  
   12.10  !include $(WorkSpace)/make/windows/makefiles/vm.make
   12.11  !include local.make
    13.1 --- a/make/windows/makefiles/sa.make	Sun Oct 13 21:14:04 2013 +0100
    13.2 +++ b/make/windows/makefiles/sa.make	Thu Oct 17 14:20:57 2013 -0700
    13.3 @@ -102,7 +102,10 @@
    13.4  !if "$(MT)" != ""
    13.5  SA_LD_FLAGS = -manifest $(SA_LD_FLAGS)
    13.6  !endif
    13.7 -SASRCFILE = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp
    13.8 +
    13.9 +SASRCFILES = $(AGENT_DIR)/src/os/win32/windbg/sawindbg.cpp \
   13.10 +		$(AGENT_DIR)/src/share/native/sadis.c
   13.11 +		            
   13.12  SA_LFLAGS = $(SA_LD_FLAGS) -nologo -subsystem:console -machine:$(MACHINE)
   13.13  !if "$(ENABLE_FULL_DEBUG_SYMBOLS)" == "1"
   13.14  SA_LFLAGS = $(SA_LFLAGS) -map -debug
   13.15 @@ -111,22 +114,24 @@
   13.16  SA_LFLAGS = $(SAFESEH_FLAG) $(SA_LFLAGS)
   13.17  !endif
   13.18  
   13.19 +SA_CFLAGS = $(SA_CFLAGS) $(MP_FLAG)
   13.20 +
   13.21  # Note that we do not keep sawindbj.obj around as it would then
   13.22  # get included in the dumpbin command in build_vm_def.sh
   13.23  
   13.24  # In VS2005 or VS2008 the link command creates a .manifest file that we want
   13.25  # to insert into the linked artifact so we do not need to track it separately.
   13.26  # Use ";#2" for .dll and ";#1" for .exe in the MT command below:
   13.27 -$(SAWINDBG): $(SASRCFILE)
   13.28 +$(SAWINDBG): $(SASRCFILES)
   13.29  	set INCLUDE=$(SA_INCLUDE)$(INCLUDE)
   13.30  	$(CXX) @<<
   13.31  	  -I"$(BootStrapDir)/include" -I"$(BootStrapDir)/include/win32" 
   13.32  	  -I"$(GENERATED)" $(SA_CFLAGS)
   13.33 -	  $(SASRCFILE)
   13.34 +	  $(SASRCFILES)
   13.35  	  -out:$*.obj
   13.36  <<
   13.37  	set LIB=$(SA_LIB)$(LIB)
   13.38 -	$(LD) -out:$@ -DLL $*.obj dbgeng.lib $(SA_LFLAGS)
   13.39 +	$(LD) -out:$@ -DLL sawindbg.obj sadis.obj dbgeng.lib $(SA_LFLAGS)
   13.40  !if "$(MT)" != ""
   13.41  	$(MT) -manifest $(@F).manifest -outputresource:$(@F);#2
   13.42  !endif
    14.1 --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Sun Oct 13 21:14:04 2013 +0100
    14.2 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp	Thu Oct 17 14:20:57 2013 -0700
    14.3 @@ -37,6 +37,9 @@
    14.4  #include "runtime/vframeArray.hpp"
    14.5  #include "utilities/macros.hpp"
    14.6  #include "vmreg_sparc.inline.hpp"
    14.7 +#if INCLUDE_ALL_GCS
    14.8 +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    14.9 +#endif
   14.10  
   14.11  // Implementation of StubAssembler
   14.12  
   14.13 @@ -912,7 +915,7 @@
   14.14          Register tmp2 = G3_scratch;
   14.15          jbyte* byte_map_base = ((CardTableModRefBS*)bs)->byte_map_base;
   14.16  
   14.17 -        Label not_already_dirty, restart, refill;
   14.18 +        Label not_already_dirty, restart, refill, young_card;
   14.19  
   14.20  #ifdef _LP64
   14.21          __ srlx(addr, CardTableModRefBS::card_shift, addr);
   14.22 @@ -924,9 +927,15 @@
   14.23          __ set(rs, cardtable);         // cardtable := <card table base>
   14.24          __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
   14.25  
   14.26 +        __ cmp_and_br_short(tmp, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
   14.27 +
   14.28 +        __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
   14.29 +        __ ldub(addr, cardtable, tmp); // tmp := [addr + cardtable]
   14.30 +
   14.31          assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
   14.32          __ cmp_and_br_short(tmp, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
   14.33  
   14.34 +        __ bind(young_card);
   14.35          // We didn't take the branch, so we're already dirty: return.
   14.36          // Use return-from-leaf
   14.37          __ retl();
    15.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Sun Oct 13 21:14:04 2013 +0100
    15.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp	Thu Oct 17 14:20:57 2013 -0700
    15.3 @@ -3752,7 +3752,7 @@
    15.4  #define __ masm.
    15.5    address start = __ pc();
    15.6  
    15.7 -  Label not_already_dirty, restart, refill;
    15.8 +  Label not_already_dirty, restart, refill, young_card;
    15.9  
   15.10  #ifdef _LP64
   15.11    __ srlx(O0, CardTableModRefBS::card_shift, O0);
   15.12 @@ -3763,9 +3763,15 @@
   15.13    __ set(addrlit, O1); // O1 := <card table base>
   15.14    __ ldub(O0, O1, O2); // O2 := [O0 + O1]
   15.15  
   15.16 +  __ cmp_and_br_short(O2, G1SATBCardTableModRefBS::g1_young_card_val(), Assembler::equal, Assembler::pt, young_card);
   15.17 +
   15.18 +  __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
   15.19 +  __ ldub(O0, O1, O2); // O2 := [O0 + O1]
   15.20 +
   15.21    assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
   15.22    __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
   15.23  
   15.24 +  __ bind(young_card);
   15.25    // We didn't take the branch, so we're already dirty: return.
   15.26    // Use return-from-leaf
   15.27    __ retl();
    16.1 --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Sun Oct 13 21:14:04 2013 +0100
    16.2 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp	Thu Oct 17 14:20:57 2013 -0700
    16.3 @@ -38,6 +38,9 @@
    16.4  #include "runtime/vframeArray.hpp"
    16.5  #include "utilities/macros.hpp"
    16.6  #include "vmreg_x86.inline.hpp"
    16.7 +#if INCLUDE_ALL_GCS
    16.8 +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    16.9 +#endif
   16.10  
   16.11  
   16.12  // Implementation of StubAssembler
   16.13 @@ -1753,13 +1756,17 @@
   16.14          __ leal(card_addr, __ as_Address(ArrayAddress(cardtable, index)));
   16.15  #endif
   16.16  
   16.17 -        __ cmpb(Address(card_addr, 0), 0);
   16.18 +        __ cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
   16.19 +        __ jcc(Assembler::equal, done);
   16.20 +
   16.21 +        __ membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
   16.22 +        __ cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
   16.23          __ jcc(Assembler::equal, done);
   16.24  
   16.25          // storing region crossing non-NULL, card is clean.
   16.26          // dirty card and log.
   16.27  
   16.28 -        __ movb(Address(card_addr, 0), 0);
   16.29 +        __ movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
   16.30  
   16.31          __ cmpl(queue_index, 0);
   16.32          __ jcc(Assembler::equal, runtime);
    17.1 --- a/src/cpu/x86/vm/macroAssembler_x86.cpp	Sun Oct 13 21:14:04 2013 +0100
    17.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp	Thu Oct 17 14:20:57 2013 -0700
    17.3 @@ -3389,13 +3389,18 @@
    17.4    const Register card_addr = tmp;
    17.5    lea(card_addr, as_Address(ArrayAddress(cardtable, index)));
    17.6  #endif
    17.7 -  cmpb(Address(card_addr, 0), 0);
    17.8 +  cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
    17.9    jcc(Assembler::equal, done);
   17.10  
   17.11 +  membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
   17.12 +  cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
   17.13 +  jcc(Assembler::equal, done);
   17.14 +
   17.15 +
   17.16    // storing a region crossing, non-NULL oop, card is clean.
   17.17    // dirty card and log.
   17.18  
   17.19 -  movb(Address(card_addr, 0), 0);
   17.20 +  movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
   17.21  
   17.22    cmpl(queue_index, 0);
   17.23    jcc(Assembler::equal, runtime);
    18.1 --- a/src/os/bsd/vm/osThread_bsd.hpp	Sun Oct 13 21:14:04 2013 +0100
    18.2 +++ b/src/os/bsd/vm/osThread_bsd.hpp	Thu Oct 17 14:20:57 2013 -0700
    18.3 @@ -42,7 +42,7 @@
    18.4  #ifdef __APPLE__
    18.5    typedef thread_t thread_id_t;
    18.6  #else
    18.7 -  typedef pthread_t thread_id_t;
    18.8 +  typedef pid_t thread_id_t;
    18.9  #endif
   18.10  
   18.11    // _pthread_id is the pthread id, which is used by library calls
    19.1 --- a/src/os/bsd/vm/os_bsd.cpp	Sun Oct 13 21:14:04 2013 +0100
    19.2 +++ b/src/os/bsd/vm/os_bsd.cpp	Thu Oct 17 14:20:57 2013 -0700
    19.3 @@ -100,6 +100,7 @@
    19.4  # include <stdint.h>
    19.5  # include <inttypes.h>
    19.6  # include <sys/ioctl.h>
    19.7 +# include <sys/syscall.h>
    19.8  
    19.9  #if defined(__FreeBSD__) || defined(__NetBSD__)
   19.10  # include <elf.h>
   19.11 @@ -152,6 +153,7 @@
   19.12  // utility functions
   19.13  
   19.14  static int SR_initialize();
   19.15 +static void unpackTime(timespec* absTime, bool isAbsolute, jlong time);
   19.16  
   19.17  julong os::available_memory() {
   19.18    return Bsd::available_memory();
   19.19 @@ -247,7 +249,17 @@
   19.20     * since it returns a 64 bit value)
   19.21     */
   19.22    mib[0] = CTL_HW;
   19.23 +
   19.24 +#if defined (HW_MEMSIZE) // Apple
   19.25    mib[1] = HW_MEMSIZE;
   19.26 +#elif defined(HW_PHYSMEM) // Most of BSD
   19.27 +  mib[1] = HW_PHYSMEM;
   19.28 +#elif defined(HW_REALMEM) // Old FreeBSD
   19.29 +  mib[1] = HW_REALMEM;
   19.30 +#else
   19.31 +  #error No ways to get physmem
   19.32 +#endif
   19.33 +
   19.34    len = sizeof(mem_val);
   19.35    if (sysctl(mib, 2, &mem_val, &len, NULL, 0) != -1) {
   19.36         assert(len == sizeof(mem_val), "unexpected data size");
   19.37 @@ -679,18 +691,12 @@
   19.38      return NULL;
   19.39    }
   19.40  
   19.41 +  osthread->set_thread_id(os::Bsd::gettid());
   19.42 +
   19.43  #ifdef __APPLE__
   19.44 -  // thread_id is mach thread on macos, which pthreads graciously caches and provides for us
   19.45 -  mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
   19.46 -  guarantee(thread_id != 0, "thread id missing from pthreads");
   19.47 -  osthread->set_thread_id(thread_id);
   19.48 -
   19.49 -  uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
   19.50 +  uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id());
   19.51    guarantee(unique_thread_id != 0, "unique thread id was not found");
   19.52    osthread->set_unique_thread_id(unique_thread_id);
   19.53 -#else
   19.54 -  // thread_id is pthread_id on BSD
   19.55 -  osthread->set_thread_id(::pthread_self());
   19.56  #endif
   19.57    // initialize signal mask for this thread
   19.58    os::Bsd::hotspot_sigmask(thread);
   19.59 @@ -847,18 +853,13 @@
   19.60      return false;
   19.61    }
   19.62  
   19.63 +  osthread->set_thread_id(os::Bsd::gettid());
   19.64 +
   19.65    // Store pthread info into the OSThread
   19.66  #ifdef __APPLE__
   19.67 -  // thread_id is mach thread on macos, which pthreads graciously caches and provides for us
   19.68 -  mach_port_t thread_id = ::pthread_mach_thread_np(::pthread_self());
   19.69 -  guarantee(thread_id != 0, "just checking");
   19.70 -  osthread->set_thread_id(thread_id);
   19.71 -
   19.72 -  uint64_t unique_thread_id = locate_unique_thread_id(thread_id);
   19.73 +  uint64_t unique_thread_id = locate_unique_thread_id(osthread->thread_id());
   19.74    guarantee(unique_thread_id != 0, "just checking");
   19.75    osthread->set_unique_thread_id(unique_thread_id);
   19.76 -#else
   19.77 -  osthread->set_thread_id(::pthread_self());
   19.78  #endif
   19.79    osthread->set_pthread_id(::pthread_self());
   19.80  
   19.81 @@ -1125,6 +1126,30 @@
   19.82    return n;
   19.83  }
   19.84  
   19.85 +// Information of current thread in variety of formats
   19.86 +pid_t os::Bsd::gettid() {
   19.87 +  int retval = -1;
   19.88 +
   19.89 +#ifdef __APPLE__ //XNU kernel
   19.90 +  // despite the fact mach port is actually not a thread id use it
   19.91 +  // instead of syscall(SYS_thread_selfid) as it certainly fits to u4
   19.92 +  retval = ::pthread_mach_thread_np(::pthread_self());
   19.93 +  guarantee(retval != 0, "just checking");
   19.94 +  return retval;
   19.95 +
   19.96 +#elif __FreeBSD__
   19.97 +  retval = syscall(SYS_thr_self);
   19.98 +#elif __OpenBSD__
   19.99 +  retval = syscall(SYS_getthrid);
  19.100 +#elif __NetBSD__
  19.101 +  retval = (pid_t) syscall(SYS__lwp_self);
  19.102 +#endif
  19.103 +
  19.104 +  if (retval == -1) {
  19.105 +    return getpid();
  19.106 +  }
  19.107 +}
  19.108 +
  19.109  intx os::current_thread_id() {
  19.110  #ifdef __APPLE__
  19.111    return (intx)::pthread_mach_thread_np(::pthread_self());
  19.112 @@ -1132,6 +1157,7 @@
  19.113    return (intx)::pthread_self();
  19.114  #endif
  19.115  }
  19.116 +
  19.117  int os::current_process_id() {
  19.118  
  19.119    // Under the old bsd thread library, bsd gives each thread
  19.120 @@ -1904,7 +1930,7 @@
  19.121      bool timedwait(unsigned int sec, int nsec);
  19.122    private:
  19.123      jlong currenttime() const;
  19.124 -    semaphore_t _semaphore;
  19.125 +    os_semaphore_t _semaphore;
  19.126  };
  19.127  
  19.128  Semaphore::Semaphore() : _semaphore(0) {
  19.129 @@ -1972,7 +1998,7 @@
  19.130  
  19.131  bool Semaphore::timedwait(unsigned int sec, int nsec) {
  19.132    struct timespec ts;
  19.133 -  jlong endtime = unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
  19.134 +  unpackTime(&ts, false, (sec * NANOSECS_PER_SEC) + nsec);
  19.135  
  19.136    while (1) {
  19.137      int result = sem_timedwait(&_semaphore, &ts);
    20.1 --- a/src/os/bsd/vm/os_bsd.hpp	Sun Oct 13 21:14:04 2013 +0100
    20.2 +++ b/src/os/bsd/vm/os_bsd.hpp	Thu Oct 17 14:20:57 2013 -0700
    20.3 @@ -84,6 +84,7 @@
    20.4    static void hotspot_sigmask(Thread* thread);
    20.5  
    20.6    static bool is_initial_thread(void);
    20.7 +  static pid_t gettid();
    20.8  
    20.9    static int page_size(void)                                        { return _page_size; }
   20.10    static void set_page_size(int val)                                { _page_size = val; }
    21.1 --- a/src/os/linux/vm/globals_linux.hpp	Sun Oct 13 21:14:04 2013 +0100
    21.2 +++ b/src/os/linux/vm/globals_linux.hpp	Thu Oct 17 14:20:57 2013 -0700
    21.3 @@ -53,7 +53,7 @@
    21.4  // Defines Linux-specific default values. The flags are available on all
    21.5  // platforms, but they may have different default values on other platforms.
    21.6  //
    21.7 -define_pd_global(bool, UseLargePages, true);
    21.8 +define_pd_global(bool, UseLargePages, false);
    21.9  define_pd_global(bool, UseLargePagesIndividualAllocation, false);
   21.10  define_pd_global(bool, UseOSErrorReporting, false);
   21.11  define_pd_global(bool, UseThreadPriorities, true) ;
    22.1 --- a/src/os/linux/vm/os_linux.cpp	Sun Oct 13 21:14:04 2013 +0100
    22.2 +++ b/src/os/linux/vm/os_linux.cpp	Thu Oct 17 14:20:57 2013 -0700
    22.3 @@ -3361,13 +3361,15 @@
    22.4    if (FLAG_IS_DEFAULT(UseHugeTLBFS) &&
    22.5        FLAG_IS_DEFAULT(UseSHM) &&
    22.6        FLAG_IS_DEFAULT(UseTransparentHugePages)) {
    22.7 -    // If UseLargePages is specified on the command line try all methods,
    22.8 -    // if it's default, then try only UseTransparentHugePages.
    22.9 -    if (FLAG_IS_DEFAULT(UseLargePages)) {
   22.10 -      UseTransparentHugePages = true;
   22.11 -    } else {
   22.12 -      UseHugeTLBFS = UseTransparentHugePages = UseSHM = true;
   22.13 -    }
   22.14 +
   22.15 +    // The type of large pages has not been specified by the user.
   22.16 +
   22.17 +    // Try UseHugeTLBFS and then UseSHM.
   22.18 +    UseHugeTLBFS = UseSHM = true;
   22.19 +
   22.20 +    // Don't try UseTransparentHugePages since there are known
   22.21 +    // performance issues with it turned on. This might change in the future.
   22.22 +    UseTransparentHugePages = false;
   22.23    }
   22.24  
   22.25    if (UseTransparentHugePages) {
   22.26 @@ -3393,9 +3395,19 @@
   22.27  }
   22.28  
   22.29  void os::large_page_init() {
   22.30 -  if (!UseLargePages) {
   22.31 +  if (!UseLargePages &&
   22.32 +      !UseTransparentHugePages &&
   22.33 +      !UseHugeTLBFS &&
   22.34 +      !UseSHM) {
   22.35 +    // Not using large pages.
   22.36 +    return;
   22.37 +  }
   22.38 +
   22.39 +  if (!FLAG_IS_DEFAULT(UseLargePages) && !UseLargePages) {
   22.40 +    // The user explicitly turned off large pages.
   22.41 +    // Ignore the rest of the large pages flags.
   22.42 +    UseTransparentHugePages = false;
   22.43      UseHugeTLBFS = false;
   22.44 -    UseTransparentHugePages = false;
   22.45      UseSHM = false;
   22.46      return;
   22.47    }
    23.1 --- a/src/share/vm/classfile/classFileParser.cpp	Sun Oct 13 21:14:04 2013 +0100
    23.2 +++ b/src/share/vm/classfile/classFileParser.cpp	Thu Oct 17 14:20:57 2013 -0700
    23.3 @@ -4080,8 +4080,7 @@
    23.4  
    23.5      // Generate any default methods - default methods are interface methods
    23.6      // that have a default implementation.  This is new with Lambda project.
    23.7 -    if (has_default_methods && !access_flags.is_interface() &&
    23.8 -        local_interfaces->length() > 0) {
    23.9 +    if (has_default_methods && !access_flags.is_interface() ) {
   23.10        DefaultMethods::generate_default_methods(
   23.11            this_klass(), &all_mirandas, CHECK_(nullHandle));
   23.12      }
    24.1 --- a/src/share/vm/classfile/defaultMethods.cpp	Sun Oct 13 21:14:04 2013 +0100
    24.2 +++ b/src/share/vm/classfile/defaultMethods.cpp	Thu Oct 17 14:20:57 2013 -0700
    24.3 @@ -345,7 +345,6 @@
    24.4    }
    24.5  
    24.6    Symbol* generate_no_defaults_message(TRAPS) const;
    24.7 -  Symbol* generate_abstract_method_message(Method* method, TRAPS) const;
    24.8    Symbol* generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const;
    24.9  
   24.10   public:
   24.11 @@ -404,20 +403,19 @@
   24.12        _exception_message = generate_no_defaults_message(CHECK);
   24.13        _exception_name = vmSymbols::java_lang_AbstractMethodError();
   24.14      } else if (qualified_methods.length() == 1) {
   24.15 +      // leave abstract methods alone, they will be found via normal search path
   24.16        Method* method = qualified_methods.at(0);
   24.17 -      if (method->is_abstract()) {
   24.18 -        _exception_message = generate_abstract_method_message(method, CHECK);
   24.19 -        _exception_name = vmSymbols::java_lang_AbstractMethodError();
   24.20 -      } else {
   24.21 +      if (!method->is_abstract()) {
   24.22          _selected_target = qualified_methods.at(0);
   24.23        }
   24.24      } else {
   24.25        _exception_message = generate_conflicts_message(&qualified_methods,CHECK);
   24.26        _exception_name = vmSymbols::java_lang_IncompatibleClassChangeError();
   24.27 +      if (TraceDefaultMethods) {
   24.28 +        _exception_message->print_value_on(tty);
   24.29 +        tty->print_cr("");
   24.30 +      }
   24.31      }
   24.32 -
   24.33 -    assert((has_target() ^ throws_exception()) == 1,
   24.34 -           "One and only one must be true");
   24.35    }
   24.36  
   24.37    bool contains_signature(Symbol* query) {
   24.38 @@ -475,20 +473,6 @@
   24.39    return SymbolTable::new_symbol("No qualifying defaults found", CHECK_NULL);
   24.40  }
   24.41  
   24.42 -Symbol* MethodFamily::generate_abstract_method_message(Method* method, TRAPS) const {
   24.43 -  Symbol* klass = method->klass_name();
   24.44 -  Symbol* name = method->name();
   24.45 -  Symbol* sig = method->signature();
   24.46 -  stringStream ss;
   24.47 -  ss.print("Method ");
   24.48 -  ss.write((const char*)klass->bytes(), klass->utf8_length());
   24.49 -  ss.print(".");
   24.50 -  ss.write((const char*)name->bytes(), name->utf8_length());
   24.51 -  ss.write((const char*)sig->bytes(), sig->utf8_length());
   24.52 -  ss.print(" is abstract");
   24.53 -  return SymbolTable::new_symbol(ss.base(), (int)ss.size(), CHECK_NULL);
   24.54 -}
   24.55 -
   24.56  Symbol* MethodFamily::generate_conflicts_message(GrowableArray<Method*>* methods, TRAPS) const {
   24.57    stringStream ss;
   24.58    ss.print("Conflicting default methods:");
   24.59 @@ -595,6 +579,18 @@
   24.60  #endif // ndef PRODUCT
   24.61  };
   24.62  
   24.63 +static bool already_in_vtable_slots(GrowableArray<EmptyVtableSlot*>* slots, Method* m) {
   24.64 +  bool found = false;
   24.65 +  for (int j = 0; j < slots->length(); ++j) {
   24.66 +    if (slots->at(j)->name() == m->name() &&
   24.67 +        slots->at(j)->signature() == m->signature() ) {
   24.68 +      found = true;
   24.69 +      break;
   24.70 +    }
   24.71 +  }
   24.72 +  return found;
   24.73 +}
   24.74 +
   24.75  static GrowableArray<EmptyVtableSlot*>* find_empty_vtable_slots(
   24.76      InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) {
   24.77  
   24.78 @@ -604,8 +600,10 @@
   24.79  
   24.80    // All miranda methods are obvious candidates
   24.81    for (int i = 0; i < mirandas->length(); ++i) {
   24.82 -    EmptyVtableSlot* slot = new EmptyVtableSlot(mirandas->at(i));
   24.83 -    slots->append(slot);
   24.84 +    Method* m = mirandas->at(i);
   24.85 +    if (!already_in_vtable_slots(slots, m)) {
   24.86 +      slots->append(new EmptyVtableSlot(m));
   24.87 +    }
   24.88    }
   24.89  
   24.90    // Also any overpasses in our superclasses, that we haven't implemented.
   24.91 @@ -621,7 +619,26 @@
   24.92          // unless we have a real implementation of it in the current class.
   24.93          Method* impl = klass->lookup_method(m->name(), m->signature());
   24.94          if (impl == NULL || impl->is_overpass()) {
   24.95 -          slots->append(new EmptyVtableSlot(m));
   24.96 +          if (!already_in_vtable_slots(slots, m)) {
   24.97 +            slots->append(new EmptyVtableSlot(m));
   24.98 +          }
   24.99 +        }
  24.100 +      }
  24.101 +    }
  24.102 +
  24.103 +    // also any default methods in our superclasses
  24.104 +    if (super->default_methods() != NULL) {
  24.105 +      for (int i = 0; i < super->default_methods()->length(); ++i) {
  24.106 +        Method* m = super->default_methods()->at(i);
  24.107 +        // m is a method that would have been a miranda if not for the
  24.108 +        // default method processing that occurred on behalf of our superclass,
  24.109 +        // so it's a method we want to re-examine in this new context.  That is,
  24.110 +        // unless we have a real implementation of it in the current class.
  24.111 +        Method* impl = klass->lookup_method(m->name(), m->signature());
  24.112 +        if (impl == NULL || impl->is_overpass()) {
  24.113 +          if (!already_in_vtable_slots(slots, m)) {
  24.114 +            slots->append(new EmptyVtableSlot(m));
  24.115 +          }
  24.116          }
  24.117        }
  24.118      }
  24.119 @@ -679,7 +696,7 @@
  24.120      // private interface methods are not candidates for default methods
  24.121      // invokespecial to private interface methods doesn't use default method logic
  24.122      // future: take access controls into account for superclass methods
  24.123 -    if (m != NULL && (!iklass->is_interface() || m->is_public())) {
  24.124 +    if (m != NULL && !m->is_static() && (!iklass->is_interface() || m->is_public())) {
  24.125        if (_family == NULL) {
  24.126          _family = new StatefulMethodFamily();
  24.127        }
  24.128 @@ -700,7 +717,7 @@
  24.129  
  24.130  
  24.131  
  24.132 -static void create_overpasses(
  24.133 +static void create_defaults_and_exceptions(
  24.134      GrowableArray<EmptyVtableSlot*>* slots, InstanceKlass* klass, TRAPS);
  24.135  
  24.136  static void generate_erased_defaults(
  24.137 @@ -721,6 +738,8 @@
  24.138  
  24.139  static void merge_in_new_methods(InstanceKlass* klass,
  24.140      GrowableArray<Method*>* new_methods, TRAPS);
  24.141 +static void create_default_methods( InstanceKlass* klass,
  24.142 +    GrowableArray<Method*>* new_methods, TRAPS);
  24.143  
  24.144  // This is the guts of the default methods implementation.  This is called just
  24.145  // after the classfile has been parsed if some ancestor has default methods.
  24.146 @@ -782,7 +801,7 @@
  24.147    }
  24.148  #endif // ndef PRODUCT
  24.149  
  24.150 -  create_overpasses(empty_slots, klass, CHECK);
  24.151 +  create_defaults_and_exceptions(empty_slots, klass, CHECK);
  24.152  
  24.153  #ifndef PRODUCT
  24.154    if (TraceDefaultMethods) {
  24.155 @@ -791,66 +810,6 @@
  24.156  #endif // ndef PRODUCT
  24.157  }
  24.158  
  24.159 -
  24.160 -
  24.161 -#ifdef ASSERT
  24.162 -// Return true is broad type is a covariant return of narrow type
  24.163 -static bool covariant_return_type(BasicType narrow, BasicType broad) {
  24.164 -  if (narrow == broad) {
  24.165 -    return true;
  24.166 -  }
  24.167 -  if (broad == T_OBJECT) {
  24.168 -    return true;
  24.169 -  }
  24.170 -  return false;
  24.171 -}
  24.172 -#endif
  24.173 -
  24.174 -static int assemble_redirect(
  24.175 -    BytecodeConstantPool* cp, BytecodeBuffer* buffer,
  24.176 -    Symbol* incoming, Method* target, TRAPS) {
  24.177 -
  24.178 -  BytecodeAssembler assem(buffer, cp);
  24.179 -
  24.180 -  SignatureStream in(incoming, true);
  24.181 -  SignatureStream out(target->signature(), true);
  24.182 -  u2 parameter_count = 0;
  24.183 -
  24.184 -  assem.aload(parameter_count++); // load 'this'
  24.185 -
  24.186 -  while (!in.at_return_type()) {
  24.187 -    assert(!out.at_return_type(), "Parameter counts do not match");
  24.188 -    BasicType bt = in.type();
  24.189 -    assert(out.type() == bt, "Parameter types are not compatible");
  24.190 -    assem.load(bt, parameter_count);
  24.191 -    if (in.is_object() && in.as_symbol(THREAD) != out.as_symbol(THREAD)) {
  24.192 -      assem.checkcast(out.as_symbol(THREAD));
  24.193 -    } else if (bt == T_LONG || bt == T_DOUBLE) {
  24.194 -      ++parameter_count; // longs and doubles use two slots
  24.195 -    }
  24.196 -    ++parameter_count;
  24.197 -    in.next();
  24.198 -    out.next();
  24.199 -  }
  24.200 -  assert(out.at_return_type(), "Parameter counts do not match");
  24.201 -  assert(covariant_return_type(out.type(), in.type()), "Return types are not compatible");
  24.202 -
  24.203 -  if (parameter_count == 1 && (in.type() == T_LONG || in.type() == T_DOUBLE)) {
  24.204 -    ++parameter_count; // need room for return value
  24.205 -  }
  24.206 -  if (target->method_holder()->is_interface()) {
  24.207 -    assem.invokespecial(target);
  24.208 -  } else {
  24.209 -    assem.invokevirtual(target);
  24.210 -  }
  24.211 -
  24.212 -  if (in.is_object() && in.as_symbol(THREAD) != out.as_symbol(THREAD)) {
  24.213 -    assem.checkcast(in.as_symbol(THREAD));
  24.214 -  }
  24.215 -  assem._return(in.type());
  24.216 -  return parameter_count;
  24.217 -}
  24.218 -
  24.219  static int assemble_method_error(
  24.220      BytecodeConstantPool* cp, BytecodeBuffer* buffer, Symbol* errorName, Symbol* message, TRAPS) {
  24.221  
  24.222 @@ -924,18 +883,18 @@
  24.223    }
  24.224  }
  24.225  
  24.226 -// A "bridge" is a method created by javac to bridge the gap between
  24.227 -// an implementation and a generically-compatible, but different, signature.
  24.228 -// Bridges have actual bytecode implementation in classfiles.
  24.229 -// An "overpass", on the other hand, performs the same function as a bridge
  24.230 -// but does not occur in a classfile; the VM creates overpass itself,
  24.231 -// when it needs a path to get from a call site to an default method, and
  24.232 -// a bridge doesn't exist.
  24.233 -static void create_overpasses(
  24.234 +// Create default_methods list for the current class.
  24.235 +// With the VM only processing erased signatures, the VM only
  24.236 +// creates an overpass in a conflict case or a case with no candidates.
  24.237 +// This allows virtual methods to override the overpass, but ensures
  24.238 +// that a local method search will find the exception rather than an abstract
  24.239 +// or default method that is not a valid candidate.
  24.240 +static void create_defaults_and_exceptions(
  24.241      GrowableArray<EmptyVtableSlot*>* slots,
  24.242      InstanceKlass* klass, TRAPS) {
  24.243  
  24.244    GrowableArray<Method*> overpasses;
  24.245 +  GrowableArray<Method*> defaults;
  24.246    BytecodeConstantPool bpool(klass->constants());
  24.247  
  24.248    for (int i = 0; i < slots->length(); ++i) {
  24.249 @@ -943,7 +902,6 @@
  24.250  
  24.251      if (slot->is_bound()) {
  24.252        MethodFamily* method = slot->get_binding();
  24.253 -      int max_stack = 0;
  24.254        BytecodeBuffer buffer;
  24.255  
  24.256  #ifndef PRODUCT
  24.257 @@ -953,26 +911,27 @@
  24.258          tty->print_cr("");
  24.259          if (method->has_target()) {
  24.260            method->print_selected(tty, 1);
  24.261 -        } else {
  24.262 +        } else if (method->throws_exception()) {
  24.263            method->print_exception(tty, 1);
  24.264          }
  24.265        }
  24.266  #endif // ndef PRODUCT
  24.267 +
  24.268        if (method->has_target()) {
  24.269          Method* selected = method->get_selected_target();
  24.270          if (selected->method_holder()->is_interface()) {
  24.271 -          max_stack = assemble_redirect(
  24.272 -            &bpool, &buffer, slot->signature(), selected, CHECK);
  24.273 +          defaults.push(selected);
  24.274          }
  24.275        } else if (method->throws_exception()) {
  24.276 -        max_stack = assemble_method_error(&bpool, &buffer, method->get_exception_name(), method->get_exception_message(), CHECK);
  24.277 -      }
  24.278 -      if (max_stack != 0) {
  24.279 +        int max_stack = assemble_method_error(&bpool, &buffer,
  24.280 +           method->get_exception_name(), method->get_exception_message(), CHECK);
  24.281          AccessFlags flags = accessFlags_from(
  24.282            JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE);
  24.283 -        Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
  24.284 +         Method* m = new_method(&bpool, &buffer, slot->name(), slot->signature(),
  24.285            flags, max_stack, slot->size_of_parameters(),
  24.286            ConstMethod::OVERPASS, CHECK);
  24.287 +        // We push to the methods list:
  24.288 +        // overpass methods which are exception throwing methods
  24.289          if (m != NULL) {
  24.290            overpasses.push(m);
  24.291          }
  24.292 @@ -983,11 +942,31 @@
  24.293  #ifndef PRODUCT
  24.294    if (TraceDefaultMethods) {
  24.295      tty->print_cr("Created %d overpass methods", overpasses.length());
  24.296 +    tty->print_cr("Created %d default  methods", defaults.length());
  24.297    }
  24.298  #endif // ndef PRODUCT
  24.299  
  24.300 -  switchover_constant_pool(&bpool, klass, &overpasses, CHECK);
  24.301 -  merge_in_new_methods(klass, &overpasses, CHECK);
  24.302 +  if (overpasses.length() > 0) {
  24.303 +    switchover_constant_pool(&bpool, klass, &overpasses, CHECK);
  24.304 +    merge_in_new_methods(klass, &overpasses, CHECK);
  24.305 +  }
  24.306 +  if (defaults.length() > 0) {
  24.307 +    create_default_methods(klass, &defaults, CHECK);
  24.308 +  }
  24.309 +}
  24.310 +
  24.311 +static void create_default_methods( InstanceKlass* klass,
  24.312 +    GrowableArray<Method*>* new_methods, TRAPS) {
  24.313 +
  24.314 +  int new_size = new_methods->length();
  24.315 +  Array<Method*>* total_default_methods = MetadataFactory::new_array<Method*>(
  24.316 +      klass->class_loader_data(), new_size, NULL, CHECK);
  24.317 +  for (int index = 0; index < new_size; index++ ) {
  24.318 +    total_default_methods->at_put(index, new_methods->at(index));
  24.319 +  }
  24.320 +  Method::sort_methods(total_default_methods, false, false);
  24.321 +
  24.322 +  klass->set_default_methods(total_default_methods);
  24.323  }
  24.324  
  24.325  static void sort_methods(GrowableArray<Method*>* methods) {
    25.1 --- a/src/share/vm/classfile/dictionary.cpp	Sun Oct 13 21:14:04 2013 +0100
    25.2 +++ b/src/share/vm/classfile/dictionary.cpp	Thu Oct 17 14:20:57 2013 -0700
    25.3 @@ -1,5 +1,5 @@
    25.4  /*
    25.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    25.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    25.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    25.8   *
    25.9   * This code is free software; you can redistribute it and/or modify it
   25.10 @@ -25,6 +25,7 @@
   25.11  #include "precompiled.hpp"
   25.12  #include "classfile/dictionary.hpp"
   25.13  #include "classfile/systemDictionary.hpp"
   25.14 +#include "memory/iterator.hpp"
   25.15  #include "oops/oop.inline.hpp"
   25.16  #include "prims/jvmtiRedefineClassesTrace.hpp"
   25.17  #include "utilities/hashtable.inline.hpp"
   25.18 @@ -38,17 +39,21 @@
   25.19    : TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry)) {
   25.20    _current_class_index = 0;
   25.21    _current_class_entry = NULL;
   25.22 +  _pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize);
   25.23  };
   25.24  
   25.25  
   25.26 -
   25.27  Dictionary::Dictionary(int table_size, HashtableBucket<mtClass>* t,
   25.28                         int number_of_entries)
   25.29    : TwoOopHashtable<Klass*, mtClass>(table_size, sizeof(DictionaryEntry), t, number_of_entries) {
   25.30    _current_class_index = 0;
   25.31    _current_class_entry = NULL;
   25.32 +  _pd_cache_table = new ProtectionDomainCacheTable(defaultProtectionDomainCacheSize);
   25.33  };
   25.34  
   25.35 +ProtectionDomainCacheEntry* Dictionary::cache_get(oop protection_domain) {
   25.36 +  return _pd_cache_table->get(protection_domain);
   25.37 +}
   25.38  
   25.39  DictionaryEntry* Dictionary::new_entry(unsigned int hash, Klass* klass,
   25.40                                         ClassLoaderData* loader_data) {
   25.41 @@ -105,11 +110,12 @@
   25.42  }
   25.43  
   25.44  
   25.45 -void DictionaryEntry::add_protection_domain(oop protection_domain) {
   25.46 +void DictionaryEntry::add_protection_domain(Dictionary* dict, oop protection_domain) {
   25.47    assert_locked_or_safepoint(SystemDictionary_lock);
   25.48    if (!contains_protection_domain(protection_domain)) {
   25.49 +    ProtectionDomainCacheEntry* entry = dict->cache_get(protection_domain);
   25.50      ProtectionDomainEntry* new_head =
   25.51 -                new ProtectionDomainEntry(protection_domain, _pd_set);
   25.52 +                new ProtectionDomainEntry(entry, _pd_set);
   25.53      // Warning: Preserve store ordering.  The SystemDictionary is read
   25.54      //          without locks.  The new ProtectionDomainEntry must be
   25.55      //          complete before other threads can be allowed to see it
   25.56 @@ -193,7 +199,10 @@
   25.57  
   25.58  
   25.59  void Dictionary::always_strong_oops_do(OopClosure* blk) {
   25.60 -  // Follow all system classes and temporary placeholders in dictionary
   25.61 +  // Follow all system classes and temporary placeholders in dictionary; only
   25.62 +  // protection domain oops contain references into the heap. In a first
   25.63 +  // pass over the system dictionary determine which need to be treated as
   25.64 +  // strongly reachable and mark them as such.
   25.65    for (int index = 0; index < table_size(); index++) {
   25.66      for (DictionaryEntry *probe = bucket(index);
   25.67                            probe != NULL;
   25.68 @@ -201,10 +210,13 @@
   25.69        Klass* e = probe->klass();
   25.70        ClassLoaderData* loader_data = probe->loader_data();
   25.71        if (is_strongly_reachable(loader_data, e)) {
   25.72 -        probe->protection_domain_set_oops_do(blk);
   25.73 +        probe->set_strongly_reachable();
   25.74        }
   25.75      }
   25.76    }
   25.77 +  // Then iterate over the protection domain cache to apply the closure on the
   25.78 +  // previously marked ones.
   25.79 +  _pd_cache_table->always_strong_oops_do(blk);
   25.80  }
   25.81  
   25.82  
   25.83 @@ -266,18 +278,12 @@
   25.84    }
   25.85  }
   25.86  
   25.87 -
   25.88  void Dictionary::oops_do(OopClosure* f) {
   25.89 -  for (int index = 0; index < table_size(); index++) {
   25.90 -    for (DictionaryEntry* probe = bucket(index);
   25.91 -                          probe != NULL;
   25.92 -                          probe = probe->next()) {
   25.93 -      probe->protection_domain_set_oops_do(f);
   25.94 -    }
   25.95 -  }
   25.96 +  // Only the protection domain oops contain references into the heap. Iterate
   25.97 +  // over all of them.
   25.98 +  _pd_cache_table->oops_do(f);
   25.99  }
  25.100  
  25.101 -
  25.102  void Dictionary::methods_do(void f(Method*)) {
  25.103    for (int index = 0; index < table_size(); index++) {
  25.104      for (DictionaryEntry* probe = bucket(index);
  25.105 @@ -292,6 +298,11 @@
  25.106    }
  25.107  }
  25.108  
  25.109 +void Dictionary::unlink(BoolObjectClosure* is_alive) {
  25.110 +  // Only the protection domain cache table may contain references to the heap
  25.111 +  // that need to be unlinked.
  25.112 +  _pd_cache_table->unlink(is_alive);
  25.113 +}
  25.114  
  25.115  Klass* Dictionary::try_get_next_class() {
  25.116    while (true) {
  25.117 @@ -306,7 +317,6 @@
  25.118    // never reached
  25.119  }
  25.120  
  25.121 -
  25.122  // Add a loaded class to the system dictionary.
  25.123  // Readers of the SystemDictionary aren't always locked, so _buckets
  25.124  // is volatile. The store of the next field in the constructor is
  25.125 @@ -396,7 +406,7 @@
  25.126    assert(protection_domain() != NULL,
  25.127           "real protection domain should be present");
  25.128  
  25.129 -  entry->add_protection_domain(protection_domain());
  25.130 +  entry->add_protection_domain(this, protection_domain());
  25.131  
  25.132    assert(entry->contains_protection_domain(protection_domain()),
  25.133           "now protection domain should be present");
  25.134 @@ -446,6 +456,146 @@
  25.135    }
  25.136  }
  25.137  
  25.138 +ProtectionDomainCacheTable::ProtectionDomainCacheTable(int table_size)
  25.139 +  : Hashtable<oop, mtClass>(table_size, sizeof(ProtectionDomainCacheEntry))
  25.140 +{
  25.141 +}
  25.142 +
  25.143 +void ProtectionDomainCacheTable::unlink(BoolObjectClosure* is_alive) {
  25.144 +  assert(SafepointSynchronize::is_at_safepoint(), "must be");
  25.145 +  for (int i = 0; i < table_size(); ++i) {
  25.146 +    ProtectionDomainCacheEntry** p = bucket_addr(i);
  25.147 +    ProtectionDomainCacheEntry* entry = bucket(i);
  25.148 +    while (entry != NULL) {
  25.149 +      if (is_alive->do_object_b(entry->literal())) {
  25.150 +        p = entry->next_addr();
  25.151 +      } else {
  25.152 +        *p = entry->next();
  25.153 +        free_entry(entry);
  25.154 +      }
  25.155 +      entry = *p;
  25.156 +    }
  25.157 +  }
  25.158 +}
  25.159 +
  25.160 +void ProtectionDomainCacheTable::oops_do(OopClosure* f) {
  25.161 +  for (int index = 0; index < table_size(); index++) {
  25.162 +    for (ProtectionDomainCacheEntry* probe = bucket(index);
  25.163 +                                     probe != NULL;
  25.164 +                                     probe = probe->next()) {
  25.165 +      probe->oops_do(f);
  25.166 +    }
  25.167 +  }
  25.168 +}
  25.169 +
  25.170 +uint ProtectionDomainCacheTable::bucket_size() {
  25.171 +  return sizeof(ProtectionDomainCacheEntry);
  25.172 +}
  25.173 +
  25.174 +#ifndef PRODUCT
  25.175 +void ProtectionDomainCacheTable::print() {
  25.176 +  tty->print_cr("Protection domain cache table (table_size=%d, classes=%d)",
  25.177 +                table_size(), number_of_entries());
  25.178 +  for (int index = 0; index < table_size(); index++) {
  25.179 +    for (ProtectionDomainCacheEntry* probe = bucket(index);
  25.180 +                                     probe != NULL;
  25.181 +                                     probe = probe->next()) {
  25.182 +      probe->print();
  25.183 +    }
  25.184 +  }
  25.185 +}
  25.186 +
  25.187 +void ProtectionDomainCacheEntry::print() {
  25.188 +  tty->print_cr("entry "PTR_FORMAT" value "PTR_FORMAT" strongly_reachable %d next "PTR_FORMAT,
  25.189 +                this, (void*)literal(), _strongly_reachable, next());
  25.190 +}
  25.191 +#endif
  25.192 +
  25.193 +void ProtectionDomainCacheTable::verify() {
  25.194 +  int element_count = 0;
  25.195 +  for (int index = 0; index < table_size(); index++) {
  25.196 +    for (ProtectionDomainCacheEntry* probe = bucket(index);
  25.197 +                                     probe != NULL;
  25.198 +                                     probe = probe->next()) {
  25.199 +      probe->verify();
  25.200 +      element_count++;
  25.201 +    }
  25.202 +  }
  25.203 +  guarantee(number_of_entries() == element_count,
  25.204 +            "Verify of protection domain cache table failed");
  25.205 +  debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
  25.206 +}
  25.207 +
  25.208 +void ProtectionDomainCacheEntry::verify() {
  25.209 +  guarantee(literal()->is_oop(), "must be an oop");
  25.210 +}
  25.211 +
  25.212 +void ProtectionDomainCacheTable::always_strong_oops_do(OopClosure* f) {
  25.213 +  // the caller marked the protection domain cache entries that we need to apply
  25.214 +  // the closure on. Only process them.
  25.215 +  for (int index = 0; index < table_size(); index++) {
  25.216 +    for (ProtectionDomainCacheEntry* probe = bucket(index);
  25.217 +                                     probe != NULL;
  25.218 +                                     probe = probe->next()) {
  25.219 +      if (probe->is_strongly_reachable()) {
  25.220 +        probe->reset_strongly_reachable();
  25.221 +        probe->oops_do(f);
  25.222 +      }
  25.223 +    }
  25.224 +  }
  25.225 +}
  25.226 +
  25.227 +ProtectionDomainCacheEntry* ProtectionDomainCacheTable::get(oop protection_domain) {
  25.228 +  unsigned int hash = compute_hash(protection_domain);
  25.229 +  int index = hash_to_index(hash);
  25.230 +
  25.231 +  ProtectionDomainCacheEntry* entry = find_entry(index, protection_domain);
  25.232 +  if (entry == NULL) {
  25.233 +    entry = add_entry(index, hash, protection_domain);
  25.234 +  }
  25.235 +  return entry;
  25.236 +}
  25.237 +
  25.238 +ProtectionDomainCacheEntry* ProtectionDomainCacheTable::find_entry(int index, oop protection_domain) {
  25.239 +  for (ProtectionDomainCacheEntry* e = bucket(index); e != NULL; e = e->next()) {
  25.240 +    if (e->protection_domain() == protection_domain) {
  25.241 +      return e;
  25.242 +    }
  25.243 +  }
  25.244 +
  25.245 +  return NULL;
  25.246 +}
  25.247 +
  25.248 +ProtectionDomainCacheEntry* ProtectionDomainCacheTable::add_entry(int index, unsigned int hash, oop protection_domain) {
  25.249 +  assert_locked_or_safepoint(SystemDictionary_lock);
  25.250 +  assert(index == index_for(protection_domain), "incorrect index?");
  25.251 +  assert(find_entry(index, protection_domain) == NULL, "no double entry");
  25.252 +
  25.253 +  ProtectionDomainCacheEntry* p = new_entry(hash, protection_domain);
  25.254 +  Hashtable<oop, mtClass>::add_entry(index, p);
  25.255 +  return p;
  25.256 +}
  25.257 +
  25.258 +void ProtectionDomainCacheTable::free(ProtectionDomainCacheEntry* to_delete) {
  25.259 +  unsigned int hash = compute_hash(to_delete->protection_domain());
  25.260 +  int index = hash_to_index(hash);
  25.261 +
  25.262 +  ProtectionDomainCacheEntry** p = bucket_addr(index);
  25.263 +  ProtectionDomainCacheEntry* entry = bucket(index);
  25.264 +  while (true) {
  25.265 +    assert(entry != NULL, "sanity");
  25.266 +
  25.267 +    if (entry == to_delete) {
  25.268 +      *p = entry->next();
  25.269 +      Hashtable<oop, mtClass>::free_entry(entry);
  25.270 +      break;
  25.271 +    } else {
  25.272 +      p = entry->next_addr();
  25.273 +      entry = *p;
  25.274 +    }
  25.275 +  }
  25.276 +}
  25.277 +
  25.278  SymbolPropertyTable::SymbolPropertyTable(int table_size)
  25.279    : Hashtable<Symbol*, mtSymbol>(table_size, sizeof(SymbolPropertyEntry))
  25.280  {
  25.281 @@ -532,11 +682,13 @@
  25.282        tty->cr();
  25.283      }
  25.284    }
  25.285 +  tty->cr();
  25.286 +  _pd_cache_table->print();
  25.287 +  tty->cr();
  25.288  }
  25.289  
  25.290  #endif
  25.291  
  25.292 -
  25.293  void Dictionary::verify() {
  25.294    guarantee(number_of_entries() >= 0, "Verify of system dictionary failed");
  25.295  
  25.296 @@ -563,5 +715,7 @@
  25.297    guarantee(number_of_entries() == element_count,
  25.298              "Verify of system dictionary failed");
  25.299    debug_only(verify_lookup_length((double)number_of_entries() / table_size()));
  25.300 +
  25.301 +  _pd_cache_table->verify();
  25.302  }
  25.303  
    26.1 --- a/src/share/vm/classfile/dictionary.hpp	Sun Oct 13 21:14:04 2013 +0100
    26.2 +++ b/src/share/vm/classfile/dictionary.hpp	Thu Oct 17 14:20:57 2013 -0700
    26.3 @@ -27,11 +27,14 @@
    26.4  
    26.5  #include "classfile/systemDictionary.hpp"
    26.6  #include "oops/instanceKlass.hpp"
    26.7 -#include "oops/oop.hpp"
    26.8 +#include "oops/oop.inline.hpp"
    26.9  #include "utilities/hashtable.hpp"
   26.10  
   26.11  class DictionaryEntry;
   26.12  class PSPromotionManager;
   26.13 +class ProtectionDomainCacheTable;
   26.14 +class ProtectionDomainCacheEntry;
   26.15 +class BoolObjectClosure;
   26.16  
   26.17  //~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
   26.18  // The data structure for the system dictionary (and the shared system
   26.19 @@ -45,6 +48,8 @@
   26.20    // pointer to the current hash table entry.
   26.21    static DictionaryEntry*       _current_class_entry;
   26.22  
   26.23 +  ProtectionDomainCacheTable*   _pd_cache_table;
   26.24 +
   26.25    DictionaryEntry* get_entry(int index, unsigned int hash,
   26.26                               Symbol* name, ClassLoaderData* loader_data);
   26.27  
   26.28 @@ -93,6 +98,7 @@
   26.29  
   26.30    void methods_do(void f(Method*));
   26.31  
   26.32 +  void unlink(BoolObjectClosure* is_alive);
   26.33  
   26.34    // Classes loaded by the bootstrap loader are always strongly reachable.
   26.35    // If we're not doing class unloading, all classes are strongly reachable.
   26.36 @@ -118,6 +124,7 @@
   26.37    // Sharing support
   26.38    void reorder_dictionary();
   26.39  
   26.40 +  ProtectionDomainCacheEntry* cache_get(oop protection_domain);
   26.41  
   26.42  #ifndef PRODUCT
   26.43    void print();
   26.44 @@ -126,21 +133,112 @@
   26.45  };
   26.46  
   26.47  // The following classes can be in dictionary.cpp, but we need these
   26.48 -// to be in header file so that SA's vmStructs can access.
   26.49 +// to be in header file so that SA's vmStructs can access them.
   26.50 +class ProtectionDomainCacheEntry : public HashtableEntry<oop, mtClass> {
   26.51 +  friend class VMStructs;
   26.52 + private:
   26.53 +  // Flag indicating whether this protection domain entry is strongly reachable.
   26.54 +  // Used during iterating over the system dictionary to remember oops that need
   26.55 +  // to be updated.
   26.56 +  bool _strongly_reachable;
   26.57 + public:
   26.58 +  oop protection_domain() { return literal(); }
   26.59 +
   26.60 +  void init() {
   26.61 +    _strongly_reachable = false;
   26.62 +  }
   26.63 +
   26.64 +  ProtectionDomainCacheEntry* next() {
   26.65 +    return (ProtectionDomainCacheEntry*)HashtableEntry<oop, mtClass>::next();
   26.66 +  }
   26.67 +
   26.68 +  ProtectionDomainCacheEntry** next_addr() {
   26.69 +    return (ProtectionDomainCacheEntry**)HashtableEntry<oop, mtClass>::next_addr();
   26.70 +  }
   26.71 +
   26.72 +  void oops_do(OopClosure* f) {
   26.73 +    f->do_oop(literal_addr());
   26.74 +  }
   26.75 +
   26.76 +  void set_strongly_reachable()   { _strongly_reachable = true; }
   26.77 +  bool is_strongly_reachable()    { return _strongly_reachable; }
   26.78 +  void reset_strongly_reachable() { _strongly_reachable = false; }
   26.79 +
   26.80 +  void print() PRODUCT_RETURN;
   26.81 +  void verify();
   26.82 +};
   26.83 +
   26.84 +// The ProtectionDomainCacheTable contains all protection domain oops. The system
   26.85 +// dictionary entries reference its entries instead of having references to oops
   26.86 +// directly.
   26.87 +// This is used to speed up system dictionary iteration: the oops in the
   26.88 +// protection domain are the only ones referring the Java heap. So when there is
   26.89 +// need to update these, instead of going over every entry of the system dictionary,
   26.90 +// we only need to iterate over this set.
   26.91 +// The amount of different protection domains used is typically magnitudes smaller
   26.92 +// than the number of system dictionary entries (loaded classes).
   26.93 +class ProtectionDomainCacheTable : public Hashtable<oop, mtClass> {
   26.94 +  friend class VMStructs;
   26.95 +private:
   26.96 +  ProtectionDomainCacheEntry* bucket(int i) {
   26.97 +    return (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::bucket(i);
   26.98 +  }
   26.99 +
  26.100 +  // The following method is not MT-safe and must be done under lock.
  26.101 +  ProtectionDomainCacheEntry** bucket_addr(int i) {
  26.102 +    return (ProtectionDomainCacheEntry**) Hashtable<oop, mtClass>::bucket_addr(i);
  26.103 +  }
  26.104 +
  26.105 +  ProtectionDomainCacheEntry* new_entry(unsigned int hash, oop protection_domain) {
  26.106 +    ProtectionDomainCacheEntry* entry = (ProtectionDomainCacheEntry*) Hashtable<oop, mtClass>::new_entry(hash, protection_domain);
  26.107 +    entry->init();
  26.108 +    return entry;
  26.109 +  }
  26.110 +
  26.111 +  static unsigned int compute_hash(oop protection_domain) {
  26.112 +    return (unsigned int)(protection_domain->identity_hash());
  26.113 +  }
  26.114 +
  26.115 +  int index_for(oop protection_domain) {
  26.116 +    return hash_to_index(compute_hash(protection_domain));
  26.117 +  }
  26.118 +
  26.119 +  ProtectionDomainCacheEntry* add_entry(int index, unsigned int hash, oop protection_domain);
  26.120 +  ProtectionDomainCacheEntry* find_entry(int index, oop protection_domain);
  26.121 +
  26.122 +public:
  26.123 +
  26.124 +  ProtectionDomainCacheTable(int table_size);
  26.125 +
  26.126 +  ProtectionDomainCacheEntry* get(oop protection_domain);
  26.127 +  void free(ProtectionDomainCacheEntry* entry);
  26.128 +
  26.129 +  void unlink(BoolObjectClosure* cl);
  26.130 +
  26.131 +  // GC support
  26.132 +  void oops_do(OopClosure* f);
  26.133 +  void always_strong_oops_do(OopClosure* f);
  26.134 +
  26.135 +  static uint bucket_size();
  26.136 +
  26.137 +  void print() PRODUCT_RETURN;
  26.138 +  void verify();
  26.139 +};
  26.140 +
  26.141  
  26.142  class ProtectionDomainEntry :public CHeapObj<mtClass> {
  26.143    friend class VMStructs;
  26.144   public:
  26.145    ProtectionDomainEntry* _next;
  26.146 -  oop                    _protection_domain;
  26.147 +  ProtectionDomainCacheEntry* _pd_cache;
  26.148  
  26.149 -  ProtectionDomainEntry(oop protection_domain, ProtectionDomainEntry* next) {
  26.150 -    _protection_domain = protection_domain;
  26.151 -    _next              = next;
  26.152 +  ProtectionDomainEntry(ProtectionDomainCacheEntry* pd_cache, ProtectionDomainEntry* next) {
  26.153 +    _pd_cache = pd_cache;
  26.154 +    _next     = next;
  26.155    }
  26.156  
  26.157    ProtectionDomainEntry* next() { return _next; }
  26.158 -  oop protection_domain() { return _protection_domain; }
  26.159 +  oop protection_domain() { return _pd_cache->protection_domain(); }
  26.160  };
  26.161  
  26.162  // An entry in the system dictionary, this describes a class as
  26.163 @@ -151,6 +249,24 @@
  26.164   private:
  26.165    // Contains the set of approved protection domains that can access
  26.166    // this system dictionary entry.
  26.167 +  //
  26.168 +  // This protection domain set is a set of tuples:
  26.169 +  //
  26.170 +  // (InstanceKlass C, initiating class loader ICL, Protection Domain PD)
  26.171 +  //
  26.172 +  // [Note that C.protection_domain(), which is stored in the java.lang.Class
  26.173 +  // mirror of C, is NOT the same as PD]
  26.174 +  //
  26.175 +  // If such an entry (C, ICL, PD) exists in the table, it means that
  26.176 +  // it is okay for a class Foo to reference C, where
  26.177 +  //
  26.178 +  //    Foo.protection_domain() == PD, and
  26.179 +  //    Foo's defining class loader == ICL
  26.180 +  //
  26.181 +  // The usage of the PD set can be seen in SystemDictionary::validate_protection_domain()
  26.182 +  // It is essentially a cache to avoid repeated Java up-calls to
  26.183 +  // ClassLoader.checkPackageAccess().
  26.184 +  //
  26.185    ProtectionDomainEntry* _pd_set;
  26.186    ClassLoaderData*       _loader_data;
  26.187  
  26.188 @@ -158,7 +274,7 @@
  26.189    // Tells whether a protection is in the approved set.
  26.190    bool contains_protection_domain(oop protection_domain) const;
  26.191    // Adds a protection domain to the approved set.
  26.192 -  void add_protection_domain(oop protection_domain);
  26.193 +  void add_protection_domain(Dictionary* dict, oop protection_domain);
  26.194  
  26.195    Klass* klass() const { return (Klass*)literal(); }
  26.196    Klass** klass_addr() { return (Klass**)literal_addr(); }
  26.197 @@ -189,12 +305,11 @@
  26.198           : contains_protection_domain(protection_domain());
  26.199    }
  26.200  
  26.201 -
  26.202 -  void protection_domain_set_oops_do(OopClosure* f) {
  26.203 +  void set_strongly_reachable() {
  26.204      for (ProtectionDomainEntry* current = _pd_set;
  26.205                                  current != NULL;
  26.206                                  current = current->_next) {
  26.207 -      f->do_oop(&(current->_protection_domain));
  26.208 +      current->_pd_cache->set_strongly_reachable();
  26.209      }
  26.210    }
  26.211  
  26.212 @@ -202,7 +317,7 @@
  26.213      for (ProtectionDomainEntry* current = _pd_set;
  26.214                                  current != NULL;
  26.215                                  current = current->_next) {
  26.216 -      current->_protection_domain->verify();
  26.217 +      current->_pd_cache->protection_domain()->verify();
  26.218      }
  26.219    }
  26.220  
    27.1 --- a/src/share/vm/classfile/javaClasses.cpp	Sun Oct 13 21:14:04 2013 +0100
    27.2 +++ b/src/share/vm/classfile/javaClasses.cpp	Thu Oct 17 14:20:57 2013 -0700
    27.3 @@ -1376,8 +1376,15 @@
    27.4    const char* klass_name  = holder->external_name();
    27.5    int buf_len = (int)strlen(klass_name);
    27.6  
    27.7 -  // pushing to the stack trace added one.
    27.8 +  // The method id may point to an obsolete method, can't get more stack information
    27.9    Method* method = holder->method_with_idnum(method_id);
   27.10 +  if (method == NULL) {
   27.11 +    char* buf = NEW_RESOURCE_ARRAY(char, buf_len + 64);
   27.12 +    // This is what the java code prints in this case - added Redefined
   27.13 +    sprintf(buf, "\tat %s.null (Redefined)", klass_name);
   27.14 +    return buf;
   27.15 +  }
   27.16 +
   27.17    char* method_name = method->name()->as_C_string();
   27.18    buf_len += (int)strlen(method_name);
   27.19  
   27.20 @@ -1773,7 +1780,8 @@
   27.21    return element;
   27.22  }
   27.23  
   27.24 -oop java_lang_StackTraceElement::create(Handle mirror, int method_id, int version, int bci, TRAPS) {
   27.25 +oop java_lang_StackTraceElement::create(Handle mirror, int method_id,
   27.26 +                                        int version, int bci, TRAPS) {
   27.27    // Allocate java.lang.StackTraceElement instance
   27.28    Klass* k = SystemDictionary::StackTraceElement_klass();
   27.29    assert(k != NULL, "must be loaded in 1.4+");
   27.30 @@ -1790,8 +1798,16 @@
   27.31    oop classname = StringTable::intern((char*) str, CHECK_0);
   27.32    java_lang_StackTraceElement::set_declaringClass(element(), classname);
   27.33  
   27.34 +  Method* method = holder->method_with_idnum(method_id);
   27.35 +  // Method on stack may be obsolete because it was redefined so cannot be
   27.36 +  // found by idnum.
   27.37 +  if (method == NULL) {
   27.38 +    // leave name and fileName null
   27.39 +    java_lang_StackTraceElement::set_lineNumber(element(), -1);
   27.40 +    return element();
   27.41 +  }
   27.42 +
   27.43    // Fill in method name
   27.44 -  Method* method = holder->method_with_idnum(method_id);
   27.45    oop methodname = StringTable::intern(method->name(), CHECK_0);
   27.46    java_lang_StackTraceElement::set_methodName(element(), methodname);
   27.47  
    28.1 --- a/src/share/vm/classfile/symbolTable.hpp	Sun Oct 13 21:14:04 2013 +0100
    28.2 +++ b/src/share/vm/classfile/symbolTable.hpp	Thu Oct 17 14:20:57 2013 -0700
    28.3 @@ -107,18 +107,13 @@
    28.4      add(loader_data, cp, names_count, name, lengths, cp_indices, hashValues, THREAD);
    28.5    }
    28.6  
    28.7 -  // Table size
    28.8 -  enum {
    28.9 -    symbol_table_size = 20011
   28.10 -  };
   28.11 -
   28.12    Symbol* lookup(int index, const char* name, int len, unsigned int hash);
   28.13  
   28.14    SymbolTable()
   28.15 -    : Hashtable<Symbol*, mtSymbol>(symbol_table_size, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
   28.16 +    : Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>)) {}
   28.17  
   28.18    SymbolTable(HashtableBucket<mtSymbol>* t, int number_of_entries)
   28.19 -    : Hashtable<Symbol*, mtSymbol>(symbol_table_size, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
   28.20 +    : Hashtable<Symbol*, mtSymbol>(SymbolTableSize, sizeof (HashtableEntry<Symbol*, mtSymbol>), t,
   28.21                  number_of_entries) {}
   28.22  
   28.23    // Arena for permanent symbols (null class loader) that are never unloaded
   28.24 @@ -136,6 +131,9 @@
   28.25    // The symbol table
   28.26    static SymbolTable* the_table() { return _the_table; }
   28.27  
   28.28 +  // Size of one bucket in the string table.  Used when checking for rollover.
   28.29 +  static uint bucket_size() { return sizeof(HashtableBucket<mtSymbol>); }
   28.30 +
   28.31    static void create_table() {
   28.32      assert(_the_table == NULL, "One symbol table allowed.");
   28.33      _the_table = new SymbolTable();
   28.34 @@ -145,8 +143,11 @@
   28.35    static void create_table(HashtableBucket<mtSymbol>* t, int length,
   28.36                             int number_of_entries) {
   28.37      assert(_the_table == NULL, "One symbol table allowed.");
   28.38 -    assert(length == symbol_table_size * sizeof(HashtableBucket<mtSymbol>),
   28.39 -           "bad shared symbol size.");
   28.40 +
   28.41 +    // If CDS archive used a different symbol table size, use that size instead
   28.42 +    // which is better than giving an error.
   28.43 +    SymbolTableSize = length/bucket_size();
   28.44 +
   28.45      _the_table = new SymbolTable(t, number_of_entries);
   28.46      // if CDS give symbol table a default arena size since most symbols
   28.47      // are already allocated in the shared misc section.
    29.1 --- a/src/share/vm/classfile/systemDictionary.cpp	Sun Oct 13 21:14:04 2013 +0100
    29.2 +++ b/src/share/vm/classfile/systemDictionary.cpp	Thu Oct 17 14:20:57 2013 -0700
    29.3 @@ -1697,6 +1697,24 @@
    29.4    return newsize;
    29.5  }
    29.6  
    29.7 +#ifdef ASSERT
    29.8 +class VerifySDReachableAndLiveClosure : public OopClosure {
    29.9 +private:
   29.10 +  BoolObjectClosure* _is_alive;
   29.11 +
   29.12 +  template <class T> void do_oop_work(T* p) {
   29.13 +    oop obj = oopDesc::load_decode_heap_oop(p);
   29.14 +    guarantee(_is_alive->do_object_b(obj), "Oop in system dictionary must be live");
   29.15 +  }
   29.16 +
   29.17 +public:
   29.18 +  VerifySDReachableAndLiveClosure(BoolObjectClosure* is_alive) : OopClosure(), _is_alive(is_alive) { }
   29.19 +
   29.20 +  virtual void do_oop(oop* p)       { do_oop_work(p); }
   29.21 +  virtual void do_oop(narrowOop* p) { do_oop_work(p); }
   29.22 +};
   29.23 +#endif
   29.24 +
   29.25  // Assumes classes in the SystemDictionary are only unloaded at a safepoint
   29.26  // Note: anonymous classes are not in the SD.
   29.27  bool SystemDictionary::do_unloading(BoolObjectClosure* is_alive) {
   29.28 @@ -1707,7 +1725,15 @@
   29.29      unloading_occurred = dictionary()->do_unloading();
   29.30      constraints()->purge_loader_constraints();
   29.31      resolution_errors()->purge_resolution_errors();
   29.32 -}
   29.33 +  }
   29.34 +  // Oops referenced by the system dictionary may get unreachable independently
   29.35 +  // of the class loader (eg. cached protection domain oops). So we need to
   29.36 +  // explicitly unlink them here instead of in Dictionary::do_unloading.
   29.37 +  dictionary()->unlink(is_alive);
   29.38 +#ifdef ASSERT
   29.39 +  VerifySDReachableAndLiveClosure cl(is_alive);
   29.40 +  dictionary()->oops_do(&cl);
   29.41 +#endif
   29.42    return unloading_occurred;
   29.43  }
   29.44  
    30.1 --- a/src/share/vm/classfile/verifier.cpp	Sun Oct 13 21:14:04 2013 +0100
    30.2 +++ b/src/share/vm/classfile/verifier.cpp	Thu Oct 17 14:20:57 2013 -0700
    30.3 @@ -2442,10 +2442,16 @@
    30.4      bool subtype = ref_class_type.is_assignable_from(
    30.5        current_type(), this, CHECK_VERIFY(this));
    30.6      if (!subtype) {
    30.7 -      verify_error(ErrorContext::bad_code(bci),
    30.8 -          "Bad invokespecial instruction: "
    30.9 -          "current class isn't assignable to reference class.");
   30.10 -       return;
   30.11 +      if (current_class()->is_anonymous()) {
   30.12 +        subtype = ref_class_type.is_assignable_from(VerificationType::reference_type(
   30.13 +                   current_class()->host_klass()->name()), this, CHECK_VERIFY(this));
   30.14 +      }
   30.15 +      if (!subtype) {
   30.16 +        verify_error(ErrorContext::bad_code(bci),
   30.17 +            "Bad invokespecial instruction: "
   30.18 +            "current class isn't assignable to reference class.");
   30.19 +         return;
   30.20 +      }
   30.21      }
   30.22    }
   30.23    // Match method descriptor with operand stack
   30.24 @@ -2461,7 +2467,28 @@
   30.25      } else {   // other methods
   30.26        // Ensures that target class is assignable to method class.
   30.27        if (opcode == Bytecodes::_invokespecial) {
   30.28 -        current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
   30.29 +        if (!current_class()->is_anonymous()) {
   30.30 +          current_frame->pop_stack(current_type(), CHECK_VERIFY(this));
   30.31 +        } else {
   30.32 +          // anonymous class invokespecial calls: either the
   30.33 +          // operand stack/objectref  is a subtype of the current class OR
   30.34 +          // the objectref is a subtype of the host_klass of the current class
   30.35 +          // to allow an anonymous class to reference methods in the host_klass
   30.36 +          VerificationType top = current_frame->pop_stack(CHECK_VERIFY(this));
   30.37 +          bool subtype = current_type().is_assignable_from(top, this, CHECK_VERIFY(this));
   30.38 +          if (!subtype) {
   30.39 +            VerificationType hosttype =
   30.40 +              VerificationType::reference_type(current_class()->host_klass()->name());
   30.41 +            subtype = hosttype.is_assignable_from(top, this, CHECK_VERIFY(this));
   30.42 +          }
   30.43 +          if (!subtype) {
   30.44 +            verify_error( ErrorContext::bad_type(current_frame->offset(),
   30.45 +              current_frame->stack_top_ctx(),
   30.46 +              TypeOrigin::implicit(top)),
   30.47 +              "Bad type on operand stack");
   30.48 +            return;
   30.49 +          }
   30.50 +        }
   30.51        } else if (opcode == Bytecodes::_invokevirtual) {
   30.52          VerificationType stack_object_type =
   30.53            current_frame->pop_stack(ref_class_type, CHECK_VERIFY(this));
    31.1 --- a/src/share/vm/code/dependencies.cpp	Sun Oct 13 21:14:04 2013 +0100
    31.2 +++ b/src/share/vm/code/dependencies.cpp	Thu Oct 17 14:20:57 2013 -0700
    31.3 @@ -1,5 +1,5 @@
    31.4  /*
    31.5 - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
    31.6 + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
    31.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    31.8   *
    31.9   * This code is free software; you can redistribute it and/or modify it
   31.10 @@ -812,8 +812,8 @@
   31.11      Klass* k = ctxk;
   31.12      Method* lm = k->lookup_method(m->name(), m->signature());
   31.13      if (lm == NULL && k->oop_is_instance()) {
   31.14 -      // It might be an abstract interface method, devoid of mirandas.
   31.15 -      lm = ((InstanceKlass*)k)->lookup_method_in_all_interfaces(m->name(),
   31.16 +      // It might be an interface method
   31.17 +        lm = ((InstanceKlass*)k)->lookup_method_in_ordered_interfaces(m->name(),
   31.18                                                                  m->signature());
   31.19      }
   31.20      if (lm == m)
    32.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Sun Oct 13 21:14:04 2013 +0100
    32.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Thu Oct 17 14:20:57 2013 -0700
    32.3 @@ -6035,7 +6035,11 @@
    32.4    // is dirty.
    32.5    G1SATBCardTableModRefBS* ct_bs = g1_barrier_set();
    32.6    MemRegion mr(hr->bottom(), hr->pre_dummy_top());
    32.7 -  ct_bs->verify_dirty_region(mr);
    32.8 +  if (hr->is_young()) {
    32.9 +    ct_bs->verify_g1_young_region(mr);
   32.10 +  } else {
   32.11 +    ct_bs->verify_dirty_region(mr);
   32.12 +  }
   32.13  }
   32.14  
   32.15  void G1CollectedHeap::verify_dirty_young_list(HeapRegion* head) {
    33.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Sun Oct 13 21:14:04 2013 +0100
    33.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.inline.hpp	Thu Oct 17 14:20:57 2013 -0700
    33.3 @@ -29,6 +29,7 @@
    33.4  #include "gc_implementation/g1/g1CollectedHeap.hpp"
    33.5  #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
    33.6  #include "gc_implementation/g1/g1CollectorPolicy.hpp"
    33.7 +#include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
    33.8  #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
    33.9  #include "utilities/taskqueue.hpp"
   33.10  
   33.11 @@ -134,7 +135,7 @@
   33.12    assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
   33.13  
   33.14    MemRegion mr(start, end);
   33.15 -  g1_barrier_set()->dirty(mr);
   33.16 +  g1_barrier_set()->g1_mark_as_young(mr);
   33.17  }
   33.18  
   33.19  inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
    34.1 --- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Sun Oct 13 21:14:04 2013 +0100
    34.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp	Thu Oct 17 14:20:57 2013 -0700
    34.3 @@ -319,10 +319,10 @@
    34.4  }
    34.5  
    34.6  void G1CollectorPolicy::initialize_flags() {
    34.7 -  set_min_alignment(HeapRegion::GrainBytes);
    34.8 +  _min_alignment = HeapRegion::GrainBytes;
    34.9    size_t card_table_alignment = GenRemSet::max_alignment_constraint(rem_set_name());
   34.10    size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
   34.11 -  set_max_alignment(MAX3(card_table_alignment, min_alignment(), page_size));
   34.12 +  _max_alignment = MAX3(card_table_alignment, _min_alignment, page_size);
   34.13    if (SurvivorRatio < 1) {
   34.14      vm_exit_during_initialization("Invalid survivor ratio specified");
   34.15    }
    35.1 --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Sun Oct 13 21:14:04 2013 +0100
    35.2 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Thu Oct 17 14:20:57 2013 -0700
    35.3 @@ -70,6 +70,12 @@
    35.4    if ((val & (clean_card_mask_val() | deferred_card_val())) == deferred_card_val()) {
    35.5      return false;
    35.6    }
    35.7 +
    35.8 +  if  (val == g1_young_gen) {
    35.9 +    // the card is for a young gen region. We don't need to keep track of all pointers into young
   35.10 +    return false;
   35.11 +  }
   35.12 +
   35.13    // Cached bit can be installed either on a clean card or on a claimed card.
   35.14    jbyte new_val = val;
   35.15    if (val == clean_card_val()) {
   35.16 @@ -85,6 +91,19 @@
   35.17    return true;
   35.18  }
   35.19  
   35.20 +void G1SATBCardTableModRefBS::g1_mark_as_young(const MemRegion& mr) {
   35.21 +  jbyte *const first = byte_for(mr.start());
   35.22 +  jbyte *const last = byte_after(mr.last());
   35.23 +
   35.24 +  memset(first, g1_young_gen, last - first);
   35.25 +}
   35.26 +
   35.27 +#ifndef PRODUCT
   35.28 +void G1SATBCardTableModRefBS::verify_g1_young_region(MemRegion mr) {
   35.29 +  verify_region(mr, g1_young_gen,  true);
   35.30 +}
   35.31 +#endif
   35.32 +
   35.33  G1SATBCardTableLoggingModRefBS::
   35.34  G1SATBCardTableLoggingModRefBS(MemRegion whole_heap,
   35.35                                 int max_covered_regions) :
   35.36 @@ -97,7 +116,11 @@
   35.37  void
   35.38  G1SATBCardTableLoggingModRefBS::write_ref_field_work(void* field,
   35.39                                                       oop new_val) {
   35.40 -  jbyte* byte = byte_for(field);
   35.41 +  volatile jbyte* byte = byte_for(field);
   35.42 +  if (*byte == g1_young_gen) {
   35.43 +    return;
   35.44 +  }
   35.45 +  OrderAccess::storeload();
   35.46    if (*byte != dirty_card) {
   35.47      *byte = dirty_card;
   35.48      Thread* thr = Thread::current();
   35.49 @@ -129,7 +152,7 @@
   35.50  
   35.51  void
   35.52  G1SATBCardTableLoggingModRefBS::invalidate(MemRegion mr, bool whole_heap) {
   35.53 -  jbyte* byte = byte_for(mr.start());
   35.54 +  volatile jbyte* byte = byte_for(mr.start());
   35.55    jbyte* last_byte = byte_for(mr.last());
   35.56    Thread* thr = Thread::current();
   35.57    if (whole_heap) {
   35.58 @@ -138,25 +161,35 @@
   35.59        byte++;
   35.60      }
   35.61    } else {
   35.62 -    // Enqueue if necessary.
   35.63 -    if (thr->is_Java_thread()) {
   35.64 -      JavaThread* jt = (JavaThread*)thr;
   35.65 -      while (byte <= last_byte) {
   35.66 -        if (*byte != dirty_card) {
   35.67 -          *byte = dirty_card;
   35.68 -          jt->dirty_card_queue().enqueue(byte);
   35.69 +    // skip all consecutive young cards
   35.70 +    for (; byte <= last_byte && *byte == g1_young_gen; byte++);
   35.71 +
   35.72 +    if (byte <= last_byte) {
   35.73 +      OrderAccess::storeload();
   35.74 +      // Enqueue if necessary.
   35.75 +      if (thr->is_Java_thread()) {
   35.76 +        JavaThread* jt = (JavaThread*)thr;
   35.77 +        for (; byte <= last_byte; byte++) {
   35.78 +          if (*byte == g1_young_gen) {
   35.79 +            continue;
   35.80 +          }
   35.81 +          if (*byte != dirty_card) {
   35.82 +            *byte = dirty_card;
   35.83 +            jt->dirty_card_queue().enqueue(byte);
   35.84 +          }
   35.85          }
   35.86 -        byte++;
   35.87 -      }
   35.88 -    } else {
   35.89 -      MutexLockerEx x(Shared_DirtyCardQ_lock,
   35.90 -                      Mutex::_no_safepoint_check_flag);
   35.91 -      while (byte <= last_byte) {
   35.92 -        if (*byte != dirty_card) {
   35.93 -          *byte = dirty_card;
   35.94 -          _dcqs.shared_dirty_card_queue()->enqueue(byte);
   35.95 +      } else {
   35.96 +        MutexLockerEx x(Shared_DirtyCardQ_lock,
   35.97 +                        Mutex::_no_safepoint_check_flag);
   35.98 +        for (; byte <= last_byte; byte++) {
   35.99 +          if (*byte == g1_young_gen) {
  35.100 +            continue;
  35.101 +          }
  35.102 +          if (*byte != dirty_card) {
  35.103 +            *byte = dirty_card;
  35.104 +            _dcqs.shared_dirty_card_queue()->enqueue(byte);
  35.105 +          }
  35.106          }
  35.107 -        byte++;
  35.108        }
  35.109      }
  35.110    }
    36.1 --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Sun Oct 13 21:14:04 2013 +0100
    36.2 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp	Thu Oct 17 14:20:57 2013 -0700
    36.3 @@ -38,7 +38,14 @@
    36.4  // snapshot-at-the-beginning marking.
    36.5  
    36.6  class G1SATBCardTableModRefBS: public CardTableModRefBSForCTRS {
    36.7 +protected:
    36.8 +  enum G1CardValues {
    36.9 +    g1_young_gen = CT_MR_BS_last_reserved << 1
   36.10 +  };
   36.11 +
   36.12  public:
   36.13 +  static int g1_young_card_val()   { return g1_young_gen; }
   36.14 +
   36.15    // Add "pre_val" to a set of objects that may have been disconnected from the
   36.16    // pre-marking object graph.
   36.17    static void enqueue(oop pre_val);
   36.18 @@ -118,6 +125,9 @@
   36.19        _byte_map[card_index] = val;
   36.20    }
   36.21  
   36.22 +  void verify_g1_young_region(MemRegion mr) PRODUCT_RETURN;
   36.23 +  void g1_mark_as_young(const MemRegion& mr);
   36.24 +
   36.25    bool mark_card_deferred(size_t card_index);
   36.26  
   36.27    bool is_card_deferred(size_t card_index) {
    37.1 --- a/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Sun Oct 13 21:14:04 2013 +0100
    37.2 +++ b/src/share/vm/gc_implementation/g1/ptrQueue.hpp	Thu Oct 17 14:20:57 2013 -0700
    37.3 @@ -80,6 +80,10 @@
    37.4  
    37.5    void reset() { if (_buf != NULL) _index = _sz; }
    37.6  
    37.7 +  void enqueue(volatile void* ptr) {
    37.8 +    enqueue((void*)(ptr));
    37.9 +  }
   37.10 +
   37.11    // Enqueues the given "obj".
   37.12    void enqueue(void* ptr) {
   37.13      if (!_active) return;
    38.1 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Sun Oct 13 21:14:04 2013 +0100
    38.2 +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp	Thu Oct 17 14:20:57 2013 -0700
    38.3 @@ -214,9 +214,6 @@
    38.4      : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true),
    38.5        _loader_data(loader_data), _size(size), _mdtype(mdtype), _result(NULL) {
    38.6    }
    38.7 -  ~VM_CollectForMetadataAllocation()  {
    38.8 -    MetaspaceGC::set_expand_after_GC(false);
    38.9 -  }
   38.10    virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
   38.11    virtual void doit();
   38.12    MetaWord* result() const       { return _result; }
    39.1 --- a/src/share/vm/gc_interface/collectedHeap.cpp	Sun Oct 13 21:14:04 2013 +0100
    39.2 +++ b/src/share/vm/gc_interface/collectedHeap.cpp	Thu Oct 17 14:20:57 2013 -0700
    39.3 @@ -202,12 +202,6 @@
    39.4        ShouldNotReachHere(); // Unexpected use of this function
    39.5    }
    39.6  }
    39.7 -MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(
    39.8 -                                              ClassLoaderData* loader_data,
    39.9 -                                              size_t size, Metaspace::MetadataType mdtype) {
   39.10 -  return collector_policy()->satisfy_failed_metadata_allocation(loader_data, size, mdtype);
   39.11 -}
   39.12 -
   39.13  
   39.14  void CollectedHeap::pre_initialize() {
   39.15    // Used for ReduceInitialCardMarks (when COMPILER2 is used);
    40.1 --- a/src/share/vm/gc_interface/collectedHeap.hpp	Sun Oct 13 21:14:04 2013 +0100
    40.2 +++ b/src/share/vm/gc_interface/collectedHeap.hpp	Thu Oct 17 14:20:57 2013 -0700
    40.3 @@ -475,11 +475,6 @@
    40.4    // the context of the vm thread.
    40.5    virtual void collect_as_vm_thread(GCCause::Cause cause);
    40.6  
    40.7 -  // Callback from VM_CollectForMetadataAllocation operation.
    40.8 -  MetaWord* satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
    40.9 -                                               size_t size,
   40.10 -                                               Metaspace::MetadataType mdtype);
   40.11 -
   40.12    // Returns the barrier set for this heap
   40.13    BarrierSet* barrier_set() { return _barrier_set; }
   40.14  
    41.1 --- a/src/share/vm/interpreter/linkResolver.cpp	Sun Oct 13 21:14:04 2013 +0100
    41.2 +++ b/src/share/vm/interpreter/linkResolver.cpp	Thu Oct 17 14:20:57 2013 -0700
    41.3 @@ -1,4 +1,5 @@
    41.4  /*
    41.5 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    41.6   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    41.7   *
    41.8   * This code is free software; you can redistribute it and/or modify it
    41.9 @@ -221,8 +222,17 @@
   41.10  //
   41.11  // According to JVM spec. $5.4.3c & $5.4.3d
   41.12  
   41.13 +// Look up method in klasses, including static methods
   41.14 +// Then look up local default methods
   41.15  void LinkResolver::lookup_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
   41.16    Method* result_oop = klass->uncached_lookup_method(name, signature);
   41.17 +  if (result_oop == NULL) {
   41.18 +    Array<Method*>* default_methods = InstanceKlass::cast(klass())->default_methods();
   41.19 +    if (default_methods != NULL) {
   41.20 +      result_oop = InstanceKlass::find_method(default_methods, name, signature);
   41.21 +    }
   41.22 +  }
   41.23 +
   41.24    if (EnableInvokeDynamic && result_oop != NULL) {
   41.25      vmIntrinsics::ID iid = result_oop->intrinsic_id();
   41.26      if (MethodHandles::is_signature_polymorphic(iid)) {
   41.27 @@ -234,6 +244,7 @@
   41.28  }
   41.29  
   41.30  // returns first instance method
   41.31 +// Looks up method in classes, then looks up local default methods
   41.32  void LinkResolver::lookup_instance_method_in_klasses(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
   41.33    Method* result_oop = klass->uncached_lookup_method(name, signature);
   41.34    result = methodHandle(THREAD, result_oop);
   41.35 @@ -241,13 +252,38 @@
   41.36      klass = KlassHandle(THREAD, result->method_holder()->super());
   41.37      result = methodHandle(THREAD, klass->uncached_lookup_method(name, signature));
   41.38    }
   41.39 +
   41.40 +  if (result.is_null()) {
   41.41 +    Array<Method*>* default_methods = InstanceKlass::cast(klass())->default_methods();
   41.42 +    if (default_methods != NULL) {
   41.43 +      result = methodHandle(InstanceKlass::find_method(default_methods, name, signature));
   41.44 +      assert(result.is_null() || !result->is_static(), "static defaults not allowed");
   41.45 +    }
   41.46 +  }
   41.47  }
   41.48  
   41.49 +int LinkResolver::vtable_index_of_interface_method(KlassHandle klass,
   41.50 +                                          methodHandle resolved_method, TRAPS) {
   41.51  
   41.52 -int LinkResolver::vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
   41.53 -  ResourceMark rm(THREAD);
   41.54 -  klassVtable *vt = InstanceKlass::cast(klass())->vtable();
   41.55 -  return vt->index_of_miranda(name, signature);
   41.56 +  int vtable_index = Method::invalid_vtable_index;
   41.57 +  Symbol* name = resolved_method->name();
   41.58 +  Symbol* signature = resolved_method->signature();
   41.59 +
   41.60 +  // First check in default method array
   41.61 +  if (!resolved_method->is_abstract()  &&
   41.62 +    (InstanceKlass::cast(klass())->default_methods() != NULL)) {
   41.63 +    int index = InstanceKlass::find_method_index(InstanceKlass::cast(klass())->default_methods(), name, signature);
   41.64 +    if (index >= 0 ) {
   41.65 +      vtable_index = InstanceKlass::cast(klass())->default_vtable_indices()->at(index);
   41.66 +    }
   41.67 +  }
   41.68 +  if (vtable_index == Method::invalid_vtable_index) {
   41.69 +    // get vtable_index for miranda methods
   41.70 +    ResourceMark rm(THREAD);
   41.71 +    klassVtable *vt = InstanceKlass::cast(klass())->vtable();
   41.72 +    vtable_index = vt->index_of_miranda(name, signature);
   41.73 +  }
   41.74 +  return vtable_index;
   41.75  }
   41.76  
   41.77  void LinkResolver::lookup_method_in_interfaces(methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature, TRAPS) {
   41.78 @@ -625,6 +661,12 @@
   41.79                     resolved_method->method_holder()->internal_name()
   41.80                    );
   41.81      resolved_method->access_flags().print_on(tty);
   41.82 +    if (resolved_method->is_default_method()) {
   41.83 +      tty->print("default");
   41.84 +    }
   41.85 +    if (resolved_method->is_overpass()) {
   41.86 +      tty->print("overpass");
   41.87 +    }
   41.88      tty->cr();
   41.89    }
   41.90  }
   41.91 @@ -853,6 +895,7 @@
   41.92                                                           resolved_method->signature()));
   41.93      THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
   41.94    }
   41.95 +
   41.96    if (TraceItables && Verbose) {
   41.97      ResourceMark rm(THREAD);
   41.98      tty->print("invokespecial resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
   41.99 @@ -864,8 +907,7 @@
  41.100                  resolved_method->method_holder()->internal_name()
  41.101                 );
  41.102      resolved_method->access_flags().print_on(tty);
  41.103 -    if (resolved_method->method_holder()->is_interface() &&
  41.104 -        !resolved_method->is_abstract()) {
  41.105 +    if (resolved_method->is_default_method()) {
  41.106        tty->print("default");
  41.107      }
  41.108      if (resolved_method->is_overpass()) {
  41.109 @@ -945,10 +987,12 @@
  41.110                   sel_method->method_holder()->internal_name()
  41.111                  );
  41.112      sel_method->access_flags().print_on(tty);
  41.113 -    if (sel_method->method_holder()->is_interface() &&
  41.114 -        !sel_method->is_abstract()) {
  41.115 +    if (sel_method->is_default_method()) {
  41.116        tty->print("default");
  41.117      }
  41.118 +    if (sel_method->is_overpass()) {
  41.119 +      tty->print("overpass");
  41.120 +    }
  41.121      tty->cr();
  41.122    }
  41.123  
  41.124 @@ -996,26 +1040,25 @@
  41.125      THROW_MSG(vmSymbols::java_lang_IncompatibleClassChangeError(), buf);
  41.126    }
  41.127  
  41.128 - if (PrintVtables && Verbose) {
  41.129 -   ResourceMark rm(THREAD);
  41.130 -   tty->print("invokevirtual resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
  41.131 -                  (current_klass.is_null() ? "<NULL>" : current_klass->internal_name()),
  41.132 -                  (resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
  41.133 -                  Method::name_and_sig_as_C_string(resolved_klass(),
  41.134 -                                                   resolved_method->name(),
  41.135 -                                                   resolved_method->signature()),
  41.136 -                  resolved_method->method_holder()->internal_name()
  41.137 -                 );
  41.138 -   resolved_method->access_flags().print_on(tty);
  41.139 -   if (resolved_method->method_holder()->is_interface() &&
  41.140 -       !resolved_method->is_abstract()) {
  41.141 -     tty->print("default");
  41.142 -   }
  41.143 -   if (resolved_method->is_overpass()) {
  41.144 -     tty->print("overpass");
  41.145 -   }
  41.146 -   tty->cr();
  41.147 - }
  41.148 +  if (PrintVtables && Verbose) {
  41.149 +    ResourceMark rm(THREAD);
  41.150 +    tty->print("invokevirtual resolved method: caller-class:%s, compile-time-class:%s, method:%s, method_holder:%s, access_flags: ",
  41.151 +                   (current_klass.is_null() ? "<NULL>" : current_klass->internal_name()),
  41.152 +                   (resolved_klass.is_null() ? "<NULL>" : resolved_klass->internal_name()),
  41.153 +                   Method::name_and_sig_as_C_string(resolved_klass(),
  41.154 +                                                    resolved_method->name(),
  41.155 +                                                    resolved_method->signature()),
  41.156 +                   resolved_method->method_holder()->internal_name()
  41.157 +                  );
  41.158 +    resolved_method->access_flags().print_on(tty);
  41.159 +    if (resolved_method->is_default_method()) {
  41.160 +      tty->print("default");
  41.161 +    }
  41.162 +    if (resolved_method->is_overpass()) {
  41.163 +      tty->print("overpass");
  41.164 +    }
  41.165 +    tty->cr();
  41.166 +  }
  41.167  }
  41.168  
  41.169  // throws runtime exceptions
  41.170 @@ -1045,10 +1088,8 @@
  41.171  
  41.172    // do lookup based on receiver klass using the vtable index
  41.173    if (resolved_method->method_holder()->is_interface()) { // miranda method
  41.174 -    vtable_index = vtable_index_of_miranda_method(resolved_klass,
  41.175 -                           resolved_method->name(),
  41.176 -                           resolved_method->signature(), CHECK);
  41.177 -
  41.178 +    vtable_index = vtable_index_of_interface_method(resolved_klass,
  41.179 +                           resolved_method, CHECK);
  41.180      assert(vtable_index >= 0 , "we should have valid vtable index at this point");
  41.181  
  41.182      InstanceKlass* inst = InstanceKlass::cast(recv_klass());
  41.183 @@ -1104,11 +1145,10 @@
  41.184                     vtable_index
  41.185                    );
  41.186      selected_method->access_flags().print_on(tty);
  41.187 -    if (selected_method->method_holder()->is_interface() &&
  41.188 -        !selected_method->is_abstract()) {
  41.189 +    if (selected_method->is_default_method()) {
  41.190        tty->print("default");
  41.191      }
  41.192 -    if (resolved_method->is_overpass()) {
  41.193 +    if (selected_method->is_overpass()) {
  41.194        tty->print("overpass");
  41.195      }
  41.196      tty->cr();
  41.197 @@ -1191,7 +1231,6 @@
  41.198                                                 sel_method->name(),
  41.199                                                 sel_method->signature()));
  41.200    }
  41.201 -
  41.202    // check if abstract
  41.203    if (check_null_and_abstract && sel_method->is_abstract()) {
  41.204      ResourceMark rm(THREAD);
  41.205 @@ -1220,11 +1259,10 @@
  41.206                     sel_method->method_holder()->internal_name()
  41.207                    );
  41.208      sel_method->access_flags().print_on(tty);
  41.209 -    if (sel_method->method_holder()->is_interface() &&
  41.210 -        !sel_method->is_abstract()) {
  41.211 +    if (sel_method->is_default_method()) {
  41.212        tty->print("default");
  41.213      }
  41.214 -    if (resolved_method->is_overpass()) {
  41.215 +    if (sel_method->is_overpass()) {
  41.216        tty->print("overpass");
  41.217      }
  41.218      tty->cr();
    42.1 --- a/src/share/vm/interpreter/linkResolver.hpp	Sun Oct 13 21:14:04 2013 +0100
    42.2 +++ b/src/share/vm/interpreter/linkResolver.hpp	Thu Oct 17 14:20:57 2013 -0700
    42.3 @@ -130,8 +130,7 @@
    42.4    static void lookup_polymorphic_method         (methodHandle& result, KlassHandle klass, Symbol* name, Symbol* signature,
    42.5                                                   KlassHandle current_klass, Handle *appendix_result_or_null, Handle *method_type_result, TRAPS);
    42.6  
    42.7 -  static int vtable_index_of_miranda_method(KlassHandle klass, Symbol* name, Symbol* signature, TRAPS);
    42.8 -
    42.9 +  static int vtable_index_of_interface_method(KlassHandle klass, methodHandle resolved_method, TRAPS);
   42.10    static void resolve_klass           (KlassHandle& result, constantPoolHandle  pool, int index, TRAPS);
   42.11  
   42.12    static void resolve_pool  (KlassHandle& resolved_klass, Symbol*& method_name, Symbol*& method_signature, KlassHandle& current_klass, constantPoolHandle pool, int index, TRAPS);
    43.1 --- a/src/share/vm/memory/collectorPolicy.cpp	Sun Oct 13 21:14:04 2013 +0100
    43.2 +++ b/src/share/vm/memory/collectorPolicy.cpp	Thu Oct 17 14:20:57 2013 -0700
    43.3 @@ -47,85 +47,53 @@
    43.4  
    43.5  // CollectorPolicy methods.
    43.6  
    43.7 -// Align down. If the aligning result in 0, return 'alignment'.
    43.8 -static size_t restricted_align_down(size_t size, size_t alignment) {
    43.9 -  return MAX2(alignment, align_size_down_(size, alignment));
   43.10 -}
   43.11 -
   43.12  void CollectorPolicy::initialize_flags() {
   43.13 -  assert(max_alignment() >= min_alignment(),
   43.14 -      err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
   43.15 -          max_alignment(), min_alignment()));
   43.16 -  assert(max_alignment() % min_alignment() == 0,
   43.17 -      err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
   43.18 -          max_alignment(), min_alignment()));
   43.19 +  assert(_max_alignment >= _min_alignment,
   43.20 +         err_msg("max_alignment: " SIZE_FORMAT " less than min_alignment: " SIZE_FORMAT,
   43.21 +                 _max_alignment, _min_alignment));
   43.22 +  assert(_max_alignment % _min_alignment == 0,
   43.23 +         err_msg("max_alignment: " SIZE_FORMAT " not aligned by min_alignment: " SIZE_FORMAT,
   43.24 +                 _max_alignment, _min_alignment));
   43.25  
   43.26    if (MaxHeapSize < InitialHeapSize) {
   43.27      vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
   43.28    }
   43.29  
   43.30 -  // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
   43.31 -  // override if MaxMetaspaceSize was set on the command line or not.
   43.32 -  // This information is needed later to conform to the specification of the
   43.33 -  // java.lang.management.MemoryUsage API.
   43.34 -  //
   43.35 -  // Ideally, we would be able to set the default value of MaxMetaspaceSize in
   43.36 -  // globals.hpp to the aligned value, but this is not possible, since the
   43.37 -  // alignment depends on other flags being parsed.
   43.38 -  MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, max_alignment());
   43.39 -
   43.40 -  if (MetaspaceSize > MaxMetaspaceSize) {
   43.41 -    MetaspaceSize = MaxMetaspaceSize;
   43.42 -  }
   43.43 -
   43.44 -  MetaspaceSize = restricted_align_down(MetaspaceSize, min_alignment());
   43.45 -
   43.46 -  assert(MetaspaceSize <= MaxMetaspaceSize, "Must be");
   43.47 -
   43.48 -  MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, min_alignment());
   43.49 -  MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, min_alignment());
   43.50 -
   43.51 -  MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, min_alignment());
   43.52 -
   43.53 -  assert(MetaspaceSize    % min_alignment() == 0, "metapace alignment");
   43.54 -  assert(MaxMetaspaceSize % max_alignment() == 0, "maximum metaspace alignment");
   43.55 -  if (MetaspaceSize < 256*K) {
   43.56 -    vm_exit_during_initialization("Too small initial Metaspace size");
   43.57 -  }
   43.58 +  MinHeapDeltaBytes = align_size_up(MinHeapDeltaBytes, _min_alignment);
   43.59  }
   43.60  
   43.61  void CollectorPolicy::initialize_size_info() {
   43.62    // User inputs from -mx and ms must be aligned
   43.63 -  set_min_heap_byte_size(align_size_up(Arguments::min_heap_size(), min_alignment()));
   43.64 -  set_initial_heap_byte_size(align_size_up(InitialHeapSize, min_alignment()));
   43.65 -  set_max_heap_byte_size(align_size_up(MaxHeapSize, max_alignment()));
   43.66 +  _min_heap_byte_size = align_size_up(Arguments::min_heap_size(), _min_alignment);
   43.67 +  _initial_heap_byte_size = align_size_up(InitialHeapSize, _min_alignment);
   43.68 +  _max_heap_byte_size = align_size_up(MaxHeapSize, _max_alignment);
   43.69  
   43.70    // Check heap parameter properties
   43.71 -  if (initial_heap_byte_size() < M) {
   43.72 +  if (_initial_heap_byte_size < M) {
   43.73      vm_exit_during_initialization("Too small initial heap");
   43.74    }
   43.75    // Check heap parameter properties
   43.76 -  if (min_heap_byte_size() < M) {
   43.77 +  if (_min_heap_byte_size < M) {
   43.78      vm_exit_during_initialization("Too small minimum heap");
   43.79    }
   43.80 -  if (initial_heap_byte_size() <= NewSize) {
   43.81 +  if (_initial_heap_byte_size <= NewSize) {
   43.82       // make sure there is at least some room in old space
   43.83      vm_exit_during_initialization("Too small initial heap for new size specified");
   43.84    }
   43.85 -  if (max_heap_byte_size() < min_heap_byte_size()) {
   43.86 +  if (_max_heap_byte_size < _min_heap_byte_size) {
   43.87      vm_exit_during_initialization("Incompatible minimum and maximum heap sizes specified");
   43.88    }
   43.89 -  if (initial_heap_byte_size() < min_heap_byte_size()) {
   43.90 +  if (_initial_heap_byte_size < _min_heap_byte_size) {
   43.91      vm_exit_during_initialization("Incompatible minimum and initial heap sizes specified");
   43.92    }
   43.93 -  if (max_heap_byte_size() < initial_heap_byte_size()) {
   43.94 +  if (_max_heap_byte_size < _initial_heap_byte_size) {
   43.95      vm_exit_during_initialization("Incompatible initial and maximum heap sizes specified");
   43.96    }
   43.97  
   43.98    if (PrintGCDetails && Verbose) {
   43.99      gclog_or_tty->print_cr("Minimum heap " SIZE_FORMAT "  Initial heap "
  43.100        SIZE_FORMAT "  Maximum heap " SIZE_FORMAT,
  43.101 -      min_heap_byte_size(), initial_heap_byte_size(), max_heap_byte_size());
  43.102 +      _min_heap_byte_size, _initial_heap_byte_size, _max_heap_byte_size);
  43.103    }
  43.104  }
  43.105  
  43.106 @@ -180,15 +148,15 @@
  43.107  
  43.108  size_t GenCollectorPolicy::scale_by_NewRatio_aligned(size_t base_size) {
  43.109    size_t x = base_size / (NewRatio+1);
  43.110 -  size_t new_gen_size = x > min_alignment() ?
  43.111 -                     align_size_down(x, min_alignment()) :
  43.112 -                     min_alignment();
  43.113 +  size_t new_gen_size = x > _min_alignment ?
  43.114 +                     align_size_down(x, _min_alignment) :
  43.115 +                     _min_alignment;
  43.116    return new_gen_size;
  43.117  }
  43.118  
  43.119  size_t GenCollectorPolicy::bound_minus_alignment(size_t desired_size,
  43.120                                                   size_t maximum_size) {
  43.121 -  size_t alignment = min_alignment();
  43.122 +  size_t alignment = _min_alignment;
  43.123    size_t max_minus = maximum_size - alignment;
  43.124    return desired_size < max_minus ? desired_size : max_minus;
  43.125  }
  43.126 @@ -207,8 +175,8 @@
  43.127  
  43.128  void GenCollectorPolicy::initialize_flags() {
  43.129    // All sizes must be multiples of the generation granularity.
  43.130 -  set_min_alignment((uintx) Generation::GenGrain);
  43.131 -  set_max_alignment(compute_max_alignment());
  43.132 +  _min_alignment = (uintx) Generation::GenGrain;
  43.133 +  _max_alignment = compute_max_alignment();
  43.134  
  43.135    CollectorPolicy::initialize_flags();
  43.136  
  43.137 @@ -218,26 +186,26 @@
  43.138    if (NewSize > MaxNewSize) {
  43.139      MaxNewSize = NewSize;
  43.140    }
  43.141 -  NewSize = align_size_down(NewSize, min_alignment());
  43.142 -  MaxNewSize = align_size_down(MaxNewSize, min_alignment());
  43.143 +  NewSize = align_size_down(NewSize, _min_alignment);
  43.144 +  MaxNewSize = align_size_down(MaxNewSize, _min_alignment);
  43.145  
  43.146    // Check validity of heap flags
  43.147 -  assert(NewSize     % min_alignment() == 0, "eden space alignment");
  43.148 -  assert(MaxNewSize  % min_alignment() == 0, "survivor space alignment");
  43.149 +  assert(NewSize     % _min_alignment == 0, "eden space alignment");
  43.150 +  assert(MaxNewSize  % _min_alignment == 0, "survivor space alignment");
  43.151  
  43.152 -  if (NewSize < 3*min_alignment()) {
  43.153 +  if (NewSize < 3 * _min_alignment) {
  43.154       // make sure there room for eden and two survivor spaces
  43.155      vm_exit_during_initialization("Too small new size specified");
  43.156    }
  43.157    if (SurvivorRatio < 1 || NewRatio < 1) {
  43.158 -    vm_exit_during_initialization("Invalid heap ratio specified");
  43.159 +    vm_exit_during_initialization("Invalid young gen ratio specified");
  43.160    }
  43.161  }
  43.162  
  43.163  void TwoGenerationCollectorPolicy::initialize_flags() {
  43.164    GenCollectorPolicy::initialize_flags();
  43.165  
  43.166 -  OldSize = align_size_down(OldSize, min_alignment());
  43.167 +  OldSize = align_size_down(OldSize, _min_alignment);
  43.168  
  43.169    if (FLAG_IS_CMDLINE(OldSize) && FLAG_IS_DEFAULT(NewSize)) {
  43.170      // NewRatio will be used later to set the young generation size so we use
  43.171 @@ -246,11 +214,11 @@
  43.172      assert(NewRatio > 0, "NewRatio should have been set up earlier");
  43.173      size_t calculated_heapsize = (OldSize / NewRatio) * (NewRatio + 1);
  43.174  
  43.175 -    calculated_heapsize = align_size_up(calculated_heapsize, max_alignment());
  43.176 +    calculated_heapsize = align_size_up(calculated_heapsize, _max_alignment);
  43.177      MaxHeapSize = calculated_heapsize;
  43.178      InitialHeapSize = calculated_heapsize;
  43.179    }
  43.180 -  MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
  43.181 +  MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
  43.182  
  43.183    // adjust max heap size if necessary
  43.184    if (NewSize + OldSize > MaxHeapSize) {
  43.185 @@ -260,18 +228,18 @@
  43.186        uintx calculated_size = NewSize + OldSize;
  43.187        double shrink_factor = (double) MaxHeapSize / calculated_size;
  43.188        // align
  43.189 -      NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
  43.190 +      NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
  43.191        // OldSize is already aligned because above we aligned MaxHeapSize to
  43.192 -      // max_alignment(), and we just made sure that NewSize is aligned to
  43.193 -      // min_alignment(). In initialize_flags() we verified that max_alignment()
  43.194 -      // is a multiple of min_alignment().
  43.195 +      // _max_alignment, and we just made sure that NewSize is aligned to
  43.196 +      // _min_alignment. In initialize_flags() we verified that _max_alignment
  43.197 +      // is a multiple of _min_alignment.
  43.198        OldSize = MaxHeapSize - NewSize;
  43.199      } else {
  43.200        MaxHeapSize = NewSize + OldSize;
  43.201      }
  43.202    }
  43.203    // need to do this again
  43.204 -  MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
  43.205 +  MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
  43.206  
  43.207    // adjust max heap size if necessary
  43.208    if (NewSize + OldSize > MaxHeapSize) {
  43.209 @@ -281,24 +249,24 @@
  43.210        uintx calculated_size = NewSize + OldSize;
  43.211        double shrink_factor = (double) MaxHeapSize / calculated_size;
  43.212        // align
  43.213 -      NewSize = align_size_down((uintx) (NewSize * shrink_factor), min_alignment());
  43.214 +      NewSize = align_size_down((uintx) (NewSize * shrink_factor), _min_alignment);
  43.215        // OldSize is already aligned because above we aligned MaxHeapSize to
  43.216 -      // max_alignment(), and we just made sure that NewSize is aligned to
  43.217 -      // min_alignment(). In initialize_flags() we verified that max_alignment()
  43.218 -      // is a multiple of min_alignment().
  43.219 +      // _max_alignment, and we just made sure that NewSize is aligned to
  43.220 +      // _min_alignment. In initialize_flags() we verified that _max_alignment
  43.221 +      // is a multiple of _min_alignment.
  43.222        OldSize = MaxHeapSize - NewSize;
  43.223      } else {
  43.224        MaxHeapSize = NewSize + OldSize;
  43.225      }
  43.226    }
  43.227    // need to do this again
  43.228 -  MaxHeapSize = align_size_up(MaxHeapSize, max_alignment());
  43.229 +  MaxHeapSize = align_size_up(MaxHeapSize, _max_alignment);
  43.230  
  43.231    always_do_update_barrier = UseConcMarkSweepGC;
  43.232  
  43.233    // Check validity of heap flags
  43.234 -  assert(OldSize     % min_alignment() == 0, "old space alignment");
  43.235 -  assert(MaxHeapSize % max_alignment() == 0, "maximum heap alignment");
  43.236 +  assert(OldSize     % _min_alignment == 0, "old space alignment");
  43.237 +  assert(MaxHeapSize % _max_alignment == 0, "maximum heap alignment");
  43.238  }
  43.239  
  43.240  // Values set on the command line win over any ergonomically
  43.241 @@ -313,7 +281,7 @@
  43.242  void GenCollectorPolicy::initialize_size_info() {
  43.243    CollectorPolicy::initialize_size_info();
  43.244  
  43.245 -  // min_alignment() is used for alignment within a generation.
  43.246 +  // _min_alignment is used for alignment within a generation.
  43.247    // There is additional alignment done down stream for some
  43.248    // collectors that sometimes causes unwanted rounding up of
  43.249    // generations sizes.
  43.250 @@ -322,18 +290,18 @@
  43.251  
  43.252    size_t max_new_size = 0;
  43.253    if (FLAG_IS_CMDLINE(MaxNewSize) || FLAG_IS_ERGO(MaxNewSize)) {
  43.254 -    if (MaxNewSize < min_alignment()) {
  43.255 -      max_new_size = min_alignment();
  43.256 +    if (MaxNewSize < _min_alignment) {
  43.257 +      max_new_size = _min_alignment;
  43.258      }
  43.259 -    if (MaxNewSize >= max_heap_byte_size()) {
  43.260 -      max_new_size = align_size_down(max_heap_byte_size() - min_alignment(),
  43.261 -                                     min_alignment());
  43.262 +    if (MaxNewSize >= _max_heap_byte_size) {
  43.263 +      max_new_size = align_size_down(_max_heap_byte_size - _min_alignment,
  43.264 +                                     _min_alignment);
  43.265        warning("MaxNewSize (" SIZE_FORMAT "k) is equal to or "
  43.266          "greater than the entire heap (" SIZE_FORMAT "k).  A "
  43.267          "new generation size of " SIZE_FORMAT "k will be used.",
  43.268 -        MaxNewSize/K, max_heap_byte_size()/K, max_new_size/K);
  43.269 +        MaxNewSize/K, _max_heap_byte_size/K, max_new_size/K);
  43.270      } else {
  43.271 -      max_new_size = align_size_down(MaxNewSize, min_alignment());
  43.272 +      max_new_size = align_size_down(MaxNewSize, _min_alignment);
  43.273      }
  43.274  
  43.275    // The case for FLAG_IS_ERGO(MaxNewSize) could be treated
  43.276 @@ -351,7 +319,7 @@
  43.277    // just accept those choices.  The choices currently made are
  43.278    // not always "wise".
  43.279    } else {
  43.280 -    max_new_size = scale_by_NewRatio_aligned(max_heap_byte_size());
  43.281 +    max_new_size = scale_by_NewRatio_aligned(_max_heap_byte_size);
  43.282      // Bound the maximum size by NewSize below (since it historically
  43.283      // would have been NewSize and because the NewRatio calculation could
  43.284      // yield a size that is too small) and bound it by MaxNewSize above.
  43.285 @@ -364,13 +332,13 @@
  43.286    // Given the maximum gen0 size, determine the initial and
  43.287    // minimum gen0 sizes.
  43.288  
  43.289 -  if (max_heap_byte_size() == min_heap_byte_size()) {
  43.290 +  if (_max_heap_byte_size == _min_heap_byte_size) {
  43.291      // The maximum and minimum heap sizes are the same so
  43.292      // the generations minimum and initial must be the
  43.293      // same as its maximum.
  43.294 -    set_min_gen0_size(max_new_size);
  43.295 -    set_initial_gen0_size(max_new_size);
  43.296 -    set_max_gen0_size(max_new_size);
  43.297 +    _min_gen0_size = max_new_size;
  43.298 +    _initial_gen0_size = max_new_size;
  43.299 +    _max_gen0_size = max_new_size;
  43.300    } else {
  43.301      size_t desired_new_size = 0;
  43.302      if (!FLAG_IS_DEFAULT(NewSize)) {
  43.303 @@ -391,43 +359,37 @@
  43.304        // Use the default NewSize as the floor for these values.  If
  43.305        // NewRatio is overly large, the resulting sizes can be too
  43.306        // small.
  43.307 -      _min_gen0_size = MAX2(scale_by_NewRatio_aligned(min_heap_byte_size()),
  43.308 -                          NewSize);
  43.309 +      _min_gen0_size = MAX2(scale_by_NewRatio_aligned(_min_heap_byte_size), NewSize);
  43.310        desired_new_size =
  43.311 -        MAX2(scale_by_NewRatio_aligned(initial_heap_byte_size()),
  43.312 -             NewSize);
  43.313 +        MAX2(scale_by_NewRatio_aligned(_initial_heap_byte_size), NewSize);
  43.314      }
  43.315  
  43.316      assert(_min_gen0_size > 0, "Sanity check");
  43.317 -    set_initial_gen0_size(desired_new_size);
  43.318 -    set_max_gen0_size(max_new_size);
  43.319 +    _initial_gen0_size = desired_new_size;
  43.320 +    _max_gen0_size = max_new_size;
  43.321  
  43.322      // At this point the desirable initial and minimum sizes have been
  43.323      // determined without regard to the maximum sizes.
  43.324  
  43.325      // Bound the sizes by the corresponding overall heap sizes.
  43.326 -    set_min_gen0_size(
  43.327 -      bound_minus_alignment(_min_gen0_size, min_heap_byte_size()));
  43.328 -    set_initial_gen0_size(
  43.329 -      bound_minus_alignment(_initial_gen0_size, initial_heap_byte_size()));
  43.330 -    set_max_gen0_size(
  43.331 -      bound_minus_alignment(_max_gen0_size, max_heap_byte_size()));
  43.332 +    _min_gen0_size = bound_minus_alignment(_min_gen0_size, _min_heap_byte_size);
  43.333 +    _initial_gen0_size = bound_minus_alignment(_initial_gen0_size, _initial_heap_byte_size);
  43.334 +    _max_gen0_size = bound_minus_alignment(_max_gen0_size, _max_heap_byte_size);
  43.335  
  43.336      // At this point all three sizes have been checked against the
  43.337      // maximum sizes but have not been checked for consistency
  43.338      // among the three.
  43.339  
  43.340      // Final check min <= initial <= max
  43.341 -    set_min_gen0_size(MIN2(_min_gen0_size, _max_gen0_size));
  43.342 -    set_initial_gen0_size(
  43.343 -      MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size));
  43.344 -    set_min_gen0_size(MIN2(_min_gen0_size, _initial_gen0_size));
  43.345 +    _min_gen0_size = MIN2(_min_gen0_size, _max_gen0_size);
  43.346 +    _initial_gen0_size = MAX2(MIN2(_initial_gen0_size, _max_gen0_size), _min_gen0_size);
  43.347 +    _min_gen0_size = MIN2(_min_gen0_size, _initial_gen0_size);
  43.348    }
  43.349  
  43.350    if (PrintGCDetails && Verbose) {
  43.351      gclog_or_tty->print_cr("1: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
  43.352        SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
  43.353 -      min_gen0_size(), initial_gen0_size(), max_gen0_size());
  43.354 +      _min_gen0_size, _initial_gen0_size, _max_gen0_size);
  43.355    }
  43.356  }
  43.357  
  43.358 @@ -447,19 +409,17 @@
  43.359  
  43.360    if ((*gen1_size_ptr + *gen0_size_ptr) > heap_size) {
  43.361      if ((heap_size < (*gen0_size_ptr + min_gen1_size)) &&
  43.362 -        (heap_size >= min_gen1_size + min_alignment())) {
  43.363 +        (heap_size >= min_gen1_size + _min_alignment)) {
  43.364        // Adjust gen0 down to accommodate min_gen1_size
  43.365        *gen0_size_ptr = heap_size - min_gen1_size;
  43.366        *gen0_size_ptr =
  43.367 -        MAX2((uintx)align_size_down(*gen0_size_ptr, min_alignment()),
  43.368 -             min_alignment());
  43.369 +        MAX2((uintx)align_size_down(*gen0_size_ptr, _min_alignment), _min_alignment);
  43.370        assert(*gen0_size_ptr > 0, "Min gen0 is too large");
  43.371        result = true;
  43.372      } else {
  43.373        *gen1_size_ptr = heap_size - *gen0_size_ptr;
  43.374        *gen1_size_ptr =
  43.375 -        MAX2((uintx)align_size_down(*gen1_size_ptr, min_alignment()),
  43.376 -                       min_alignment());
  43.377 +        MAX2((uintx)align_size_down(*gen1_size_ptr, _min_alignment), _min_alignment);
  43.378      }
  43.379    }
  43.380    return result;
  43.381 @@ -480,10 +440,9 @@
  43.382    // The maximum gen1 size can be determined from the maximum gen0
  43.383    // and maximum heap size since no explicit flags exits
  43.384    // for setting the gen1 maximum.
  43.385 -  _max_gen1_size = max_heap_byte_size() - _max_gen0_size;
  43.386 +  _max_gen1_size = _max_heap_byte_size - _max_gen0_size;
  43.387    _max_gen1_size =
  43.388 -    MAX2((uintx)align_size_down(_max_gen1_size, min_alignment()),
  43.389 -         min_alignment());
  43.390 +    MAX2((uintx)align_size_down(_max_gen1_size, _min_alignment), _min_alignment);
  43.391    // If no explicit command line flag has been set for the
  43.392    // gen1 size, use what is left for gen1.
  43.393    if (FLAG_IS_DEFAULT(OldSize) || FLAG_IS_ERGO(OldSize)) {
  43.394 @@ -492,70 +451,66 @@
  43.395      // with the overall heap size).  In either case make
  43.396      // the minimum, maximum and initial sizes consistent
  43.397      // with the gen0 sizes and the overall heap sizes.
  43.398 -    assert(min_heap_byte_size() > _min_gen0_size,
  43.399 +    assert(_min_heap_byte_size > _min_gen0_size,
  43.400        "gen0 has an unexpected minimum size");
  43.401 -    set_min_gen1_size(min_heap_byte_size() - min_gen0_size());
  43.402 -    set_min_gen1_size(
  43.403 -      MAX2((uintx)align_size_down(_min_gen1_size, min_alignment()),
  43.404 -           min_alignment()));
  43.405 -    set_initial_gen1_size(initial_heap_byte_size() - initial_gen0_size());
  43.406 -    set_initial_gen1_size(
  43.407 -      MAX2((uintx)align_size_down(_initial_gen1_size, min_alignment()),
  43.408 -           min_alignment()));
  43.409 -
  43.410 +    _min_gen1_size = _min_heap_byte_size - _min_gen0_size;
  43.411 +    _min_gen1_size =
  43.412 +      MAX2((uintx)align_size_down(_min_gen1_size, _min_alignment), _min_alignment);
  43.413 +    _initial_gen1_size = _initial_heap_byte_size - _initial_gen0_size;
  43.414 +    _initial_gen1_size =
  43.415 +      MAX2((uintx)align_size_down(_initial_gen1_size, _min_alignment), _min_alignment);
  43.416    } else {
  43.417      // It's been explicitly set on the command line.  Use the
  43.418      // OldSize and then determine the consequences.
  43.419 -    set_min_gen1_size(OldSize);
  43.420 -    set_initial_gen1_size(OldSize);
  43.421 +    _min_gen1_size = OldSize;
  43.422 +    _initial_gen1_size = OldSize;
  43.423  
  43.424      // If the user has explicitly set an OldSize that is inconsistent
  43.425      // with other command line flags, issue a warning.
  43.426      // The generation minimums and the overall heap mimimum should
  43.427      // be within one heap alignment.
  43.428 -    if ((_min_gen1_size + _min_gen0_size + min_alignment()) <
  43.429 -           min_heap_byte_size()) {
  43.430 +    if ((_min_gen1_size + _min_gen0_size + _min_alignment) < _min_heap_byte_size) {
  43.431        warning("Inconsistency between minimum heap size and minimum "
  43.432 -          "generation sizes: using minimum heap = " SIZE_FORMAT,
  43.433 -          min_heap_byte_size());
  43.434 +              "generation sizes: using minimum heap = " SIZE_FORMAT,
  43.435 +              _min_heap_byte_size);
  43.436      }
  43.437      if ((OldSize > _max_gen1_size)) {
  43.438        warning("Inconsistency between maximum heap size and maximum "
  43.439 -          "generation sizes: using maximum heap = " SIZE_FORMAT
  43.440 -          " -XX:OldSize flag is being ignored",
  43.441 -          max_heap_byte_size());
  43.442 +              "generation sizes: using maximum heap = " SIZE_FORMAT
  43.443 +              " -XX:OldSize flag is being ignored",
  43.444 +              _max_heap_byte_size);
  43.445      }
  43.446      // If there is an inconsistency between the OldSize and the minimum and/or
  43.447      // initial size of gen0, since OldSize was explicitly set, OldSize wins.
  43.448      if (adjust_gen0_sizes(&_min_gen0_size, &_min_gen1_size,
  43.449 -                          min_heap_byte_size(), OldSize)) {
  43.450 +                          _min_heap_byte_size, OldSize)) {
  43.451        if (PrintGCDetails && Verbose) {
  43.452          gclog_or_tty->print_cr("2: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
  43.453                SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
  43.454 -              min_gen0_size(), initial_gen0_size(), max_gen0_size());
  43.455 +              _min_gen0_size, _initial_gen0_size, _max_gen0_size);
  43.456        }
  43.457      }
  43.458      // Initial size
  43.459      if (adjust_gen0_sizes(&_initial_gen0_size, &_initial_gen1_size,
  43.460 -                         initial_heap_byte_size(), OldSize)) {
  43.461 +                          _initial_heap_byte_size, OldSize)) {
  43.462        if (PrintGCDetails && Verbose) {
  43.463          gclog_or_tty->print_cr("3: Minimum gen0 " SIZE_FORMAT "  Initial gen0 "
  43.464            SIZE_FORMAT "  Maximum gen0 " SIZE_FORMAT,
  43.465 -          min_gen0_size(), initial_gen0_size(), max_gen0_size());
  43.466 +          _min_gen0_size, _initial_gen0_size, _max_gen0_size);
  43.467        }
  43.468      }
  43.469    }
  43.470    // Enforce the maximum gen1 size.
  43.471 -  set_min_gen1_size(MIN2(_min_gen1_size, _max_gen1_size));
  43.472 +  _min_gen1_size = MIN2(_min_gen1_size, _max_gen1_size);
  43.473  
  43.474    // Check that min gen1 <= initial gen1 <= max gen1
  43.475 -  set_initial_gen1_size(MAX2(_initial_gen1_size, _min_gen1_size));
  43.476 -  set_initial_gen1_size(MIN2(_initial_gen1_size, _max_gen1_size));
  43.477 +  _initial_gen1_size = MAX2(_initial_gen1_size, _min_gen1_size);
  43.478 +  _initial_gen1_size = MIN2(_initial_gen1_size, _max_gen1_size);
  43.479  
  43.480    if (PrintGCDetails && Verbose) {
  43.481      gclog_or_tty->print_cr("Minimum gen1 " SIZE_FORMAT "  Initial gen1 "
  43.482        SIZE_FORMAT "  Maximum gen1 " SIZE_FORMAT,
  43.483 -      min_gen1_size(), initial_gen1_size(), max_gen1_size());
  43.484 +      _min_gen1_size, _initial_gen1_size, _max_gen1_size);
  43.485    }
  43.486  }
  43.487  
    44.1 --- a/src/share/vm/memory/collectorPolicy.hpp	Sun Oct 13 21:14:04 2013 +0100
    44.2 +++ b/src/share/vm/memory/collectorPolicy.hpp	Thu Oct 17 14:20:57 2013 -0700
    44.3 @@ -101,17 +101,12 @@
    44.4    // Return maximum heap alignment that may be imposed by the policy
    44.5    static size_t compute_max_alignment();
    44.6  
    44.7 -  void set_min_alignment(size_t align)         { _min_alignment = align; }
    44.8    size_t min_alignment()                       { return _min_alignment; }
    44.9 -  void set_max_alignment(size_t align)         { _max_alignment = align; }
   44.10    size_t max_alignment()                       { return _max_alignment; }
   44.11  
   44.12    size_t initial_heap_byte_size() { return _initial_heap_byte_size; }
   44.13 -  void set_initial_heap_byte_size(size_t v) { _initial_heap_byte_size = v; }
   44.14    size_t max_heap_byte_size()     { return _max_heap_byte_size; }
   44.15 -  void set_max_heap_byte_size(size_t v) { _max_heap_byte_size = v; }
   44.16    size_t min_heap_byte_size()     { return _min_heap_byte_size; }
   44.17 -  void set_min_heap_byte_size(size_t v) { _min_heap_byte_size = v; }
   44.18  
   44.19    enum Name {
   44.20      CollectorPolicyKind,
   44.21 @@ -248,12 +243,9 @@
   44.22  
   44.23   public:
   44.24    // Accessors
   44.25 -  size_t min_gen0_size() { return _min_gen0_size; }
   44.26 -  void set_min_gen0_size(size_t v) { _min_gen0_size = v; }
   44.27 +  size_t min_gen0_size()     { return _min_gen0_size; }
   44.28    size_t initial_gen0_size() { return _initial_gen0_size; }
   44.29 -  void set_initial_gen0_size(size_t v) { _initial_gen0_size = v; }
   44.30 -  size_t max_gen0_size() { return _max_gen0_size; }
   44.31 -  void set_max_gen0_size(size_t v) { _max_gen0_size = v; }
   44.32 +  size_t max_gen0_size()     { return _max_gen0_size; }
   44.33  
   44.34    virtual int number_of_generations() = 0;
   44.35  
   44.36 @@ -302,12 +294,9 @@
   44.37  
   44.38   public:
   44.39    // Accessors
   44.40 -  size_t min_gen1_size() { return _min_gen1_size; }
   44.41 -  void set_min_gen1_size(size_t v) { _min_gen1_size = v; }
   44.42 +  size_t min_gen1_size()     { return _min_gen1_size; }
   44.43    size_t initial_gen1_size() { return _initial_gen1_size; }
   44.44 -  void set_initial_gen1_size(size_t v) { _initial_gen1_size = v; }
   44.45 -  size_t max_gen1_size() { return _max_gen1_size; }
   44.46 -  void set_max_gen1_size(size_t v) { _max_gen1_size = v; }
   44.47 +  size_t max_gen1_size()     { return _max_gen1_size; }
   44.48  
   44.49    // Inherited methods
   44.50    TwoGenerationCollectorPolicy* as_two_generation_policy() { return this; }
    45.1 --- a/src/share/vm/memory/filemap.hpp	Sun Oct 13 21:14:04 2013 +0100
    45.2 +++ b/src/share/vm/memory/filemap.hpp	Thu Oct 17 14:20:57 2013 -0700
    45.3 @@ -26,6 +26,7 @@
    45.4  #define SHARE_VM_MEMORY_FILEMAP_HPP
    45.5  
    45.6  #include "memory/metaspaceShared.hpp"
    45.7 +#include "memory/metaspace.hpp"
    45.8  
    45.9  // Layout of the file:
   45.10  //  header: dump of archive instance plus versioning info, datestamp, etc.
    46.1 --- a/src/share/vm/memory/heapInspection.hpp	Sun Oct 13 21:14:04 2013 +0100
    46.2 +++ b/src/share/vm/memory/heapInspection.hpp	Thu Oct 17 14:20:57 2013 -0700
    46.3 @@ -73,6 +73,10 @@
    46.4          "Number of bytes used by the InstanceKlass::methods() array") \
    46.5      f(method_ordering_bytes, IK_method_ordering, \
    46.6          "Number of bytes used by the InstanceKlass::method_ordering() array") \
    46.7 +    f(default_methods_array_bytes, IK_default_methods, \
    46.8 +        "Number of bytes used by the InstanceKlass::default_methods() array") \
    46.9 +    f(default_vtable_indices_bytes, IK_default_vtable_indices, \
   46.10 +        "Number of bytes used by the InstanceKlass::default_vtable_indices() array") \
   46.11      f(local_interfaces_bytes, IK_local_interfaces, \
   46.12          "Number of bytes used by the InstanceKlass::local_interfaces() array") \
   46.13      f(transitive_interfaces_bytes, IK_transitive_interfaces, \
    47.1 --- a/src/share/vm/memory/metaspace.cpp	Sun Oct 13 21:14:04 2013 +0100
    47.2 +++ b/src/share/vm/memory/metaspace.cpp	Thu Oct 17 14:20:57 2013 -0700
    47.3 @@ -29,17 +29,21 @@
    47.4  #include "memory/collectorPolicy.hpp"
    47.5  #include "memory/filemap.hpp"
    47.6  #include "memory/freeList.hpp"
    47.7 +#include "memory/gcLocker.hpp"
    47.8  #include "memory/metablock.hpp"
    47.9  #include "memory/metachunk.hpp"
   47.10  #include "memory/metaspace.hpp"
   47.11  #include "memory/metaspaceShared.hpp"
   47.12  #include "memory/resourceArea.hpp"
   47.13  #include "memory/universe.hpp"
   47.14 +#include "runtime/atomic.inline.hpp"
   47.15  #include "runtime/globals.hpp"
   47.16 +#include "runtime/init.hpp"
   47.17  #include "runtime/java.hpp"
   47.18  #include "runtime/mutex.hpp"
   47.19  #include "runtime/orderAccess.hpp"
   47.20  #include "services/memTracker.hpp"
   47.21 +#include "services/memoryService.hpp"
   47.22  #include "utilities/copy.hpp"
   47.23  #include "utilities/debug.hpp"
   47.24  
   47.25 @@ -84,13 +88,7 @@
   47.26    return (ChunkIndex) (i+1);
   47.27  }
   47.28  
   47.29 -// Originally _capacity_until_GC was set to MetaspaceSize here but
   47.30 -// the default MetaspaceSize before argument processing was being
   47.31 -// used which was not the desired value.  See the code
   47.32 -// in should_expand() to see how the initialization is handled
   47.33 -// now.
   47.34 -size_t MetaspaceGC::_capacity_until_GC = 0;
   47.35 -bool MetaspaceGC::_expand_after_GC = false;
   47.36 +volatile intptr_t MetaspaceGC::_capacity_until_GC = 0;
   47.37  uint MetaspaceGC::_shrink_factor = 0;
   47.38  bool MetaspaceGC::_should_concurrent_collect = false;
   47.39  
   47.40 @@ -293,9 +291,10 @@
   47.41    MetaWord* end() const { return (MetaWord*) _virtual_space.high(); }
   47.42  
   47.43    size_t reserved_words() const  { return _virtual_space.reserved_size() / BytesPerWord; }
   47.44 -  size_t expanded_words() const  { return _virtual_space.committed_size() / BytesPerWord; }
   47.45    size_t committed_words() const { return _virtual_space.actual_committed_size() / BytesPerWord; }
   47.46  
   47.47 +  bool is_pre_committed() const { return _virtual_space.special(); }
   47.48 +
   47.49    // address of next available space in _virtual_space;
   47.50    // Accessors
   47.51    VirtualSpaceNode* next() { return _next; }
   47.52 @@ -337,7 +336,7 @@
   47.53  
   47.54    // Expands/shrinks the committed space in a virtual space.  Delegates
   47.55    // to Virtualspace
   47.56 -  bool expand_by(size_t words, bool pre_touch = false);
   47.57 +  bool expand_by(size_t min_words, size_t preferred_words);
   47.58  
   47.59    // In preparation for deleting this node, remove all the chunks
   47.60    // in the node from any freelist.
   47.61 @@ -351,29 +350,64 @@
   47.62    void print_on(outputStream* st) const;
   47.63  };
   47.64  
   47.65 +#define assert_is_ptr_aligned(ptr, alignment) \
   47.66 +  assert(is_ptr_aligned(ptr, alignment),      \
   47.67 +    err_msg(PTR_FORMAT " is not aligned to "  \
   47.68 +      SIZE_FORMAT, ptr, alignment))
   47.69 +
   47.70 +#define assert_is_size_aligned(size, alignment) \
   47.71 +  assert(is_size_aligned(size, alignment),      \
   47.72 +    err_msg(SIZE_FORMAT " is not aligned to "   \
   47.73 +       SIZE_FORMAT, size, alignment))
   47.74 +
   47.75 +
   47.76 +// Decide if large pages should be committed when the memory is reserved.
   47.77 +static bool should_commit_large_pages_when_reserving(size_t bytes) {
   47.78 +  if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) {
   47.79 +    size_t words = bytes / BytesPerWord;
   47.80 +    bool is_class = false; // We never reserve large pages for the class space.
   47.81 +    if (MetaspaceGC::can_expand(words, is_class) &&
   47.82 +        MetaspaceGC::allowed_expansion() >= words) {
   47.83 +      return true;
   47.84 +    }
   47.85 +  }
   47.86 +
   47.87 +  return false;
   47.88 +}
   47.89 +
   47.90    // byte_size is the size of the associated virtualspace.
   47.91 -VirtualSpaceNode::VirtualSpaceNode(size_t byte_size) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
   47.92 -  // align up to vm allocation granularity
   47.93 -  byte_size = align_size_up(byte_size, os::vm_allocation_granularity());
   47.94 +VirtualSpaceNode::VirtualSpaceNode(size_t bytes) : _top(NULL), _next(NULL), _rs(), _container_count(0) {
   47.95 +  assert_is_size_aligned(bytes, Metaspace::reserve_alignment());
   47.96  
   47.97    // This allocates memory with mmap.  For DumpSharedspaces, try to reserve
   47.98    // configurable address, generally at the top of the Java heap so other
   47.99    // memory addresses don't conflict.
  47.100    if (DumpSharedSpaces) {
  47.101 -    char* shared_base = (char*)SharedBaseAddress;
  47.102 -    _rs = ReservedSpace(byte_size, 0, false, shared_base, 0);
  47.103 +    bool large_pages = false; // No large pages when dumping the CDS archive.
  47.104 +    char* shared_base = (char*)align_ptr_up((char*)SharedBaseAddress, Metaspace::reserve_alignment());
  47.105 +
  47.106 +    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages, shared_base, 0);
  47.107      if (_rs.is_reserved()) {
  47.108        assert(shared_base == 0 || _rs.base() == shared_base, "should match");
  47.109      } else {
  47.110        // Get a mmap region anywhere if the SharedBaseAddress fails.
  47.111 -      _rs = ReservedSpace(byte_size);
  47.112 +      _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
  47.113      }
  47.114      MetaspaceShared::set_shared_rs(&_rs);
  47.115    } else {
  47.116 -    _rs = ReservedSpace(byte_size);
  47.117 +    bool large_pages = should_commit_large_pages_when_reserving(bytes);
  47.118 +
  47.119 +    _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages);
  47.120    }
  47.121  
  47.122 -  MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
  47.123 +  if (_rs.is_reserved()) {
  47.124 +    assert(_rs.base() != NULL, "Catch if we get a NULL address");
  47.125 +    assert(_rs.size() != 0, "Catch if we get a 0 size");
  47.126 +    assert_is_ptr_aligned(_rs.base(), Metaspace::reserve_alignment());
  47.127 +    assert_is_size_aligned(_rs.size(), Metaspace::reserve_alignment());
  47.128 +
  47.129 +    MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass);
  47.130 +  }
  47.131  }
  47.132  
  47.133  void VirtualSpaceNode::purge(ChunkManager* chunk_manager) {
  47.134 @@ -410,8 +444,6 @@
  47.135  #endif
  47.136  
  47.137  // List of VirtualSpaces for metadata allocation.
  47.138 -// It has a  _next link for singly linked list and a MemRegion
  47.139 -// for total space in the VirtualSpace.
  47.140  class VirtualSpaceList : public CHeapObj<mtClass> {
  47.141    friend class VirtualSpaceNode;
  47.142  
  47.143 @@ -419,16 +451,13 @@
  47.144      VirtualSpaceSize = 256 * K
  47.145    };
  47.146  
  47.147 -  // Global list of virtual spaces
  47.148    // Head of the list
  47.149    VirtualSpaceNode* _virtual_space_list;
  47.150    // virtual space currently being used for allocations
  47.151    VirtualSpaceNode* _current_virtual_space;
  47.152  
  47.153 -  // Can this virtual list allocate >1 spaces?  Also, used to determine
  47.154 -  // whether to allocate unlimited small chunks in this virtual space
  47.155 +  // Is this VirtualSpaceList used for the compressed class space
  47.156    bool _is_class;
  47.157 -  bool can_grow() const { return !is_class() || !UseCompressedClassPointers; }
  47.158  
  47.159    // Sum of reserved and committed memory in the virtual spaces
  47.160    size_t _reserved_words;
  47.161 @@ -453,7 +482,7 @@
  47.162    // Get another virtual space and add it to the list.  This
  47.163    // is typically prompted by a failed attempt to allocate a chunk
  47.164    // and is typically followed by the allocation of a chunk.
  47.165 -  bool grow_vs(size_t vs_word_size);
  47.166 +  bool create_new_virtual_space(size_t vs_word_size);
  47.167  
  47.168   public:
  47.169    VirtualSpaceList(size_t word_size);
  47.170 @@ -465,12 +494,12 @@
  47.171                             size_t grow_chunks_by_words,
  47.172                             size_t medium_chunk_bunch);
  47.173  
  47.174 -  bool expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch = false);
  47.175 -
  47.176 -  // Get the first chunk for a Metaspace.  Used for
  47.177 -  // special cases such as the boot class loader, reflection
  47.178 -  // class loader and anonymous class loader.
  47.179 -  Metachunk* get_initialization_chunk(size_t word_size, size_t chunk_bunch);
  47.180 +  bool expand_node_by(VirtualSpaceNode* node,
  47.181 +                      size_t min_words,
  47.182 +                      size_t preferred_words);
  47.183 +
  47.184 +  bool expand_by(size_t min_words,
  47.185 +                 size_t preferred_words);
  47.186  
  47.187    VirtualSpaceNode* current_virtual_space() {
  47.188      return _current_virtual_space;
  47.189 @@ -478,8 +507,7 @@
  47.190  
  47.191    bool is_class() const { return _is_class; }
  47.192  
  47.193 -  // Allocate the first virtualspace.
  47.194 -  void initialize(size_t word_size);
  47.195 +  bool initialization_succeeded() { return _virtual_space_list != NULL; }
  47.196  
  47.197    size_t reserved_words()  { return _reserved_words; }
  47.198    size_t reserved_bytes()  { return reserved_words() * BytesPerWord; }
  47.199 @@ -708,6 +736,9 @@
  47.200    // and allocates from that chunk.
  47.201    MetaWord* grow_and_allocate(size_t word_size);
  47.202  
  47.203 +  // Notify memory usage to MemoryService.
  47.204 +  void track_metaspace_memory_usage();
  47.205 +
  47.206    // debugging support.
  47.207  
  47.208    void dump(outputStream* const out) const;
  47.209 @@ -869,6 +900,12 @@
  47.210    MetaWord* chunk_limit = top();
  47.211    assert(chunk_limit != NULL, "Not safe to call this method");
  47.212  
  47.213 +  // The virtual spaces are always expanded by the
  47.214 +  // commit granularity to enforce the following condition.
  47.215 +  // Without this the is_available check will not work correctly.
  47.216 +  assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(),
  47.217 +      "The committed memory doesn't match the expanded memory.");
  47.218 +
  47.219    if (!is_available(chunk_word_size)) {
  47.220      if (TraceMetadataChunkAllocation) {
  47.221        gclog_or_tty->print("VirtualSpaceNode::take_from_committed() not available %d words ", chunk_word_size);
  47.222 @@ -888,14 +925,21 @@
  47.223  
  47.224  
  47.225  // Expand the virtual space (commit more of the reserved space)
  47.226 -bool VirtualSpaceNode::expand_by(size_t words, bool pre_touch) {
  47.227 -  size_t bytes = words * BytesPerWord;
  47.228 -  bool result =  virtual_space()->expand_by(bytes, pre_touch);
  47.229 -  if (TraceMetavirtualspaceAllocation && !result) {
  47.230 -    gclog_or_tty->print_cr("VirtualSpaceNode::expand_by() failed "
  47.231 -                           "for byte size " SIZE_FORMAT, bytes);
  47.232 -    virtual_space()->print_on(gclog_or_tty);
  47.233 +bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) {
  47.234 +  size_t min_bytes = min_words * BytesPerWord;
  47.235 +  size_t preferred_bytes = preferred_words * BytesPerWord;
  47.236 +
  47.237 +  size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size();
  47.238 +
  47.239 +  if (uncommitted < min_bytes) {
  47.240 +    return false;
  47.241    }
  47.242 +
  47.243 +  size_t commit = MIN2(preferred_bytes, uncommitted);
  47.244 +  bool result = virtual_space()->expand_by(commit, false);
  47.245 +
  47.246 +  assert(result, "Failed to commit memory");
  47.247 +
  47.248    return result;
  47.249  }
  47.250  
  47.251 @@ -914,12 +958,23 @@
  47.252      return false;
  47.253    }
  47.254  
  47.255 -  // An allocation out of this Virtualspace that is larger
  47.256 -  // than an initial commit size can waste that initial committed
  47.257 -  // space.
  47.258 -  size_t committed_byte_size = 0;
  47.259 -  bool result = virtual_space()->initialize(_rs, committed_byte_size);
  47.260 +  // These are necessary restriction to make sure that the virtual space always
  47.261 +  // grows in steps of Metaspace::commit_alignment(). If both base and size are
  47.262 +  // aligned only the middle alignment of the VirtualSpace is used.
  47.263 +  assert_is_ptr_aligned(_rs.base(), Metaspace::commit_alignment());
  47.264 +  assert_is_size_aligned(_rs.size(), Metaspace::commit_alignment());
  47.265 +
  47.266 +  // ReservedSpaces marked as special will have the entire memory
  47.267 +  // pre-committed. Setting a committed size will make sure that
  47.268 +  // committed_size and actual_committed_size agrees.
  47.269 +  size_t pre_committed_size = _rs.special() ? _rs.size() : 0;
  47.270 +
  47.271 +  bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size,
  47.272 +                                            Metaspace::commit_alignment());
  47.273    if (result) {
  47.274 +    assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(),
  47.275 +        "Checking that the pre-committed memory was registered by the VirtualSpace");
  47.276 +
  47.277      set_top((MetaWord*)virtual_space()->low());
  47.278      set_reserved(MemRegion((HeapWord*)_rs.base(),
  47.279                   (HeapWord*)(_rs.base() + _rs.size())));
  47.280 @@ -976,13 +1031,23 @@
  47.281    _reserved_words = _reserved_words - v;
  47.282  }
  47.283  
  47.284 +#define assert_committed_below_limit()                             \
  47.285 +  assert(MetaspaceAux::committed_bytes() <= MaxMetaspaceSize,      \
  47.286 +      err_msg("Too much committed memory. Committed: " SIZE_FORMAT \
  47.287 +              " limit (MaxMetaspaceSize): " SIZE_FORMAT,           \
  47.288 +          MetaspaceAux::committed_bytes(), MaxMetaspaceSize));
  47.289 +
  47.290  void VirtualSpaceList::inc_committed_words(size_t v) {
  47.291    assert_lock_strong(SpaceManager::expand_lock());
  47.292    _committed_words = _committed_words + v;
  47.293 +
  47.294 +  assert_committed_below_limit();
  47.295  }
  47.296  void VirtualSpaceList::dec_committed_words(size_t v) {
  47.297    assert_lock_strong(SpaceManager::expand_lock());
  47.298    _committed_words = _committed_words - v;
  47.299 +
  47.300 +  assert_committed_below_limit();
  47.301  }
  47.302  
  47.303  void VirtualSpaceList::inc_virtual_space_count() {
  47.304 @@ -1025,8 +1090,8 @@
  47.305      if (vsl->container_count() == 0 && vsl != current_virtual_space()) {
  47.306        // Unlink it from the list
  47.307        if (prev_vsl == vsl) {
  47.308 -        // This is the case of the current note being the first note.
  47.309 -        assert(vsl == virtual_space_list(), "Expected to be the first note");
  47.310 +        // This is the case of the current node being the first node.
  47.311 +        assert(vsl == virtual_space_list(), "Expected to be the first node");
  47.312          set_virtual_space_list(vsl->next());
  47.313        } else {
  47.314          prev_vsl->set_next(vsl->next());
  47.315 @@ -1054,7 +1119,7 @@
  47.316  #endif
  47.317  }
  47.318  
  47.319 -VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
  47.320 +VirtualSpaceList::VirtualSpaceList(size_t word_size) :
  47.321                                     _is_class(false),
  47.322                                     _virtual_space_list(NULL),
  47.323                                     _current_virtual_space(NULL),
  47.324 @@ -1063,9 +1128,7 @@
  47.325                                     _virtual_space_count(0) {
  47.326    MutexLockerEx cl(SpaceManager::expand_lock(),
  47.327                     Mutex::_no_safepoint_check_flag);
  47.328 -  bool initialization_succeeded = grow_vs(word_size);
  47.329 -  assert(initialization_succeeded,
  47.330 -    " VirtualSpaceList initialization should not fail");
  47.331 +  create_new_virtual_space(word_size);
  47.332  }
  47.333  
  47.334  VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) :
  47.335 @@ -1079,8 +1142,9 @@
  47.336                     Mutex::_no_safepoint_check_flag);
  47.337    VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
  47.338    bool succeeded = class_entry->initialize();
  47.339 -  assert(succeeded, " VirtualSpaceList initialization should not fail");
  47.340 -  link_vs(class_entry);
  47.341 +  if (succeeded) {
  47.342 +    link_vs(class_entry);
  47.343 +  }
  47.344  }
  47.345  
  47.346  size_t VirtualSpaceList::free_bytes() {
  47.347 @@ -1088,14 +1152,24 @@
  47.348  }
  47.349  
  47.350  // Allocate another meta virtual space and add it to the list.
  47.351 -bool VirtualSpaceList::grow_vs(size_t vs_word_size) {
  47.352 +bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) {
  47.353    assert_lock_strong(SpaceManager::expand_lock());
  47.354 -  if (vs_word_size == 0) {
  47.355 +
  47.356 +  if (is_class()) {
  47.357 +    assert(false, "We currently don't support more than one VirtualSpace for"
  47.358 +                  " the compressed class space. The initialization of the"
  47.359 +                  " CCS uses another code path and should not hit this path.");
  47.360      return false;
  47.361    }
  47.362 +
  47.363 +  if (vs_word_size == 0) {
  47.364 +    assert(false, "vs_word_size should always be at least _reserve_alignment large.");
  47.365 +    return false;
  47.366 +  }
  47.367 +
  47.368    // Reserve the space
  47.369    size_t vs_byte_size = vs_word_size * BytesPerWord;
  47.370 -  assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned");
  47.371 +  assert_is_size_aligned(vs_byte_size, Metaspace::reserve_alignment());
  47.372  
  47.373    // Allocate the meta virtual space and initialize it.
  47.374    VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
  47.375 @@ -1103,7 +1177,8 @@
  47.376      delete new_entry;
  47.377      return false;
  47.378    } else {
  47.379 -    assert(new_entry->reserved_words() == vs_word_size, "Must be");
  47.380 +    assert(new_entry->reserved_words() == vs_word_size,
  47.381 +        "Reserved memory size differs from requested memory size");
  47.382      // ensure lock-free iteration sees fully initialized node
  47.383      OrderAccess::storestore();
  47.384      link_vs(new_entry);
  47.385 @@ -1130,20 +1205,67 @@
  47.386    }
  47.387  }
  47.388  
  47.389 -bool VirtualSpaceList::expand_by(VirtualSpaceNode* node, size_t word_size, bool pre_touch) {
  47.390 +bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node,
  47.391 +                                      size_t min_words,
  47.392 +                                      size_t preferred_words) {
  47.393    size_t before = node->committed_words();
  47.394  
  47.395 -  bool result = node->expand_by(word_size, pre_touch);
  47.396 +  bool result = node->expand_by(min_words, preferred_words);
  47.397  
  47.398    size_t after = node->committed_words();
  47.399  
  47.400    // after and before can be the same if the memory was pre-committed.
  47.401 -  assert(after >= before, "Must be");
  47.402 +  assert(after >= before, "Inconsistency");
  47.403    inc_committed_words(after - before);
  47.404  
  47.405    return result;
  47.406  }
  47.407  
  47.408 +bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) {
  47.409 +  assert_is_size_aligned(min_words,       Metaspace::commit_alignment_words());
  47.410 +  assert_is_size_aligned(preferred_words, Metaspace::commit_alignment_words());
  47.411 +  assert(min_words <= preferred_words, "Invalid arguments");
  47.412 +
  47.413 +  if (!MetaspaceGC::can_expand(min_words, this->is_class())) {
  47.414 +    return  false;
  47.415 +  }
  47.416 +
  47.417 +  size_t allowed_expansion_words = MetaspaceGC::allowed_expansion();
  47.418 +  if (allowed_expansion_words < min_words) {
  47.419 +    return false;
  47.420 +  }
  47.421 +
  47.422 +  size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words);
  47.423 +
  47.424 +  // Commit more memory from the the current virtual space.
  47.425 +  bool vs_expanded = expand_node_by(current_virtual_space(),
  47.426 +                                    min_words,
  47.427 +                                    max_expansion_words);
  47.428 +  if (vs_expanded) {
  47.429 +    return true;
  47.430 +  }
  47.431 +
  47.432 +  // Get another virtual space.
  47.433 +  size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words);
  47.434 +  grow_vs_words = align_size_up(grow_vs_words, Metaspace::reserve_alignment_words());
  47.435 +
  47.436 +  if (create_new_virtual_space(grow_vs_words)) {
  47.437 +    if (current_virtual_space()->is_pre_committed()) {
  47.438 +      // The memory was pre-committed, so we are done here.
  47.439 +      assert(min_words <= current_virtual_space()->committed_words(),
  47.440 +          "The new VirtualSpace was pre-committed, so it"
  47.441 +          "should be large enough to fit the alloc request.");
  47.442 +      return true;
  47.443 +    }
  47.444 +
  47.445 +    return expand_node_by(current_virtual_space(),
  47.446 +                          min_words,
  47.447 +                          max_expansion_words);
  47.448 +  }
  47.449 +
  47.450 +  return false;
  47.451 +}
  47.452 +
  47.453  Metachunk* VirtualSpaceList::get_new_chunk(size_t word_size,
  47.454                                             size_t grow_chunks_by_words,
  47.455                                             size_t medium_chunk_bunch) {
  47.456 @@ -1151,63 +1273,27 @@
  47.457    // Allocate a chunk out of the current virtual space.
  47.458    Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  47.459  
  47.460 -  if (next == NULL) {
  47.461 -    // Not enough room in current virtual space.  Try to commit
  47.462 -    // more space.
  47.463 -    size_t expand_vs_by_words = MAX2(medium_chunk_bunch,
  47.464 -                                     grow_chunks_by_words);
  47.465 -    size_t page_size_words = os::vm_page_size() / BytesPerWord;
  47.466 -    size_t aligned_expand_vs_by_words = align_size_up(expand_vs_by_words,
  47.467 -                                                        page_size_words);
  47.468 -    bool vs_expanded =
  47.469 -      expand_by(current_virtual_space(), aligned_expand_vs_by_words);
  47.470 -    if (!vs_expanded) {
  47.471 -      // Should the capacity of the metaspaces be expanded for
  47.472 -      // this allocation?  If it's the virtual space for classes and is
  47.473 -      // being used for CompressedHeaders, don't allocate a new virtualspace.
  47.474 -      if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
  47.475 -        // Get another virtual space.
  47.476 -        size_t allocation_aligned_expand_words =
  47.477 -            align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
  47.478 -        size_t grow_vs_words =
  47.479 -            MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
  47.480 -        if (grow_vs(grow_vs_words)) {
  47.481 -          // Got it.  It's on the list now.  Get a chunk from it.
  47.482 -          assert(current_virtual_space()->expanded_words() == 0,
  47.483 -              "New virtual space nodes should not have expanded");
  47.484 -
  47.485 -          size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
  47.486 -                                                              page_size_words);
  47.487 -          // We probably want to expand by aligned_expand_vs_by_words here.
  47.488 -          expand_by(current_virtual_space(), grow_chunks_by_words_aligned);
  47.489 -          next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  47.490 -        }
  47.491 -      } else {
  47.492 -        // Allocation will fail and induce a GC
  47.493 -        if (TraceMetadataChunkAllocation && Verbose) {
  47.494 -          gclog_or_tty->print_cr("VirtualSpaceList::get_new_chunk():"
  47.495 -            " Fail instead of expand the metaspace");
  47.496 -        }
  47.497 -      }
  47.498 -    } else {
  47.499 -      // The virtual space expanded, get a new chunk
  47.500 -      next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  47.501 -      assert(next != NULL, "Just expanded, should succeed");
  47.502 -    }
  47.503 +  if (next != NULL) {
  47.504 +    return next;
  47.505    }
  47.506  
  47.507 -  assert(next == NULL || (next->next() == NULL && next->prev() == NULL),
  47.508 -         "New chunk is still on some list");
  47.509 -  return next;
  47.510 -}
  47.511 -
  47.512 -Metachunk* VirtualSpaceList::get_initialization_chunk(size_t chunk_word_size,
  47.513 -                                                      size_t chunk_bunch) {
  47.514 -  // Get a chunk from the chunk freelist
  47.515 -  Metachunk* new_chunk = get_new_chunk(chunk_word_size,
  47.516 -                                       chunk_word_size,
  47.517 -                                       chunk_bunch);
  47.518 -  return new_chunk;
  47.519 +  // The expand amount is currently only determined by the requested sizes
  47.520 +  // and not how much committed memory is left in the current virtual space.
  47.521 +
  47.522 +  size_t min_word_size       = align_size_up(grow_chunks_by_words, Metaspace::commit_alignment_words());
  47.523 +  size_t preferred_word_size = align_size_up(medium_chunk_bunch,   Metaspace::commit_alignment_words());
  47.524 +  if (min_word_size >= preferred_word_size) {
  47.525 +    // Can happen when humongous chunks are allocated.
  47.526 +    preferred_word_size = min_word_size;
  47.527 +  }
  47.528 +
  47.529 +  bool expanded = expand_by(min_word_size, preferred_word_size);
  47.530 +  if (expanded) {
  47.531 +    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  47.532 +    assert(next != NULL, "The allocation was expected to succeed after the expansion");
  47.533 +  }
  47.534 +
  47.535 +   return next;
  47.536  }
  47.537  
  47.538  void VirtualSpaceList::print_on(outputStream* st) const {
  47.539 @@ -1256,96 +1342,96 @@
  47.540  // Calculate the amount to increase the high water mark (HWM).
  47.541  // Increase by a minimum amount (MinMetaspaceExpansion) so that
  47.542  // another expansion is not requested too soon.  If that is not
  47.543 -// enough to satisfy the allocation (i.e. big enough for a word_size
  47.544 -// allocation), increase by MaxMetaspaceExpansion.  If that is still
  47.545 -// not enough, expand by the size of the allocation (word_size) plus
  47.546 -// some.
  47.547 -size_t MetaspaceGC::delta_capacity_until_GC(size_t word_size) {
  47.548 -  size_t before_inc = MetaspaceGC::capacity_until_GC();
  47.549 -  size_t min_delta_words = MinMetaspaceExpansion / BytesPerWord;
  47.550 -  size_t max_delta_words = MaxMetaspaceExpansion / BytesPerWord;
  47.551 -  size_t page_size_words = os::vm_page_size() / BytesPerWord;
  47.552 -  size_t size_delta_words = align_size_up(word_size, page_size_words);
  47.553 -  size_t delta_words = MAX2(size_delta_words, min_delta_words);
  47.554 -  if (delta_words > min_delta_words) {
  47.555 +// enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
  47.556 +// If that is still not enough, expand by the size of the allocation
  47.557 +// plus some.
  47.558 +size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
  47.559 +  size_t min_delta = MinMetaspaceExpansion;
  47.560 +  size_t max_delta = MaxMetaspaceExpansion;
  47.561 +  size_t delta = align_size_up(bytes, Metaspace::commit_alignment());
  47.562 +
  47.563 +  if (delta <= min_delta) {
  47.564 +    delta = min_delta;
  47.565 +  } else if (delta <= max_delta) {
  47.566      // Don't want to hit the high water mark on the next
  47.567      // allocation so make the delta greater than just enough
  47.568      // for this allocation.
  47.569 -    delta_words = MAX2(delta_words, max_delta_words);
  47.570 -    if (delta_words > max_delta_words) {
  47.571 -      // This allocation is large but the next ones are probably not
  47.572 -      // so increase by the minimum.
  47.573 -      delta_words = delta_words + min_delta_words;
  47.574 -    }
  47.575 +    delta = max_delta;
  47.576 +  } else {
  47.577 +    // This allocation is large but the next ones are probably not
  47.578 +    // so increase by the minimum.
  47.579 +    delta = delta + min_delta;
  47.580    }
  47.581 -  return delta_words;
  47.582 +
  47.583 +  assert_is_size_aligned(delta, Metaspace::commit_alignment());
  47.584 +
  47.585 +  return delta;
  47.586  }
  47.587  
  47.588 -bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) {
  47.589 -
  47.590 -  // If the user wants a limit, impose one.
  47.591 -  // The reason for someone using this flag is to limit reserved space.  So
  47.592 -  // for non-class virtual space, compare against virtual spaces that are reserved.
  47.593 -  // For class virtual space, we only compare against the committed space, not
  47.594 -  // reserved space, because this is a larger space prereserved for compressed
  47.595 -  // class pointers.
  47.596 -  if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
  47.597 -    size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
  47.598 -    size_t class_allocated    = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
  47.599 -    size_t real_allocated     = nonclass_allocated + class_allocated;
  47.600 -    if (real_allocated >= MaxMetaspaceSize) {
  47.601 +size_t MetaspaceGC::capacity_until_GC() {
  47.602 +  size_t value = (size_t)OrderAccess::load_ptr_acquire(&_capacity_until_GC);
  47.603 +  assert(value >= MetaspaceSize, "Not initialied properly?");
  47.604 +  return value;
  47.605 +}
  47.606 +
  47.607 +size_t MetaspaceGC::inc_capacity_until_GC(size_t v) {
  47.608 +  assert_is_size_aligned(v, Metaspace::commit_alignment());
  47.609 +
  47.610 +  return (size_t)Atomic::add_ptr(v, &_capacity_until_GC);
  47.611 +}
  47.612 +
  47.613 +size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
  47.614 +  assert_is_size_aligned(v, Metaspace::commit_alignment());
  47.615 +
  47.616 +  return (size_t)Atomic::add_ptr(-(intptr_t)v, &_capacity_until_GC);
  47.617 +}
  47.618 +
  47.619 +bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
  47.620 +  // Check if the compressed class space is full.
  47.621 +  if (is_class && Metaspace::using_class_space()) {
  47.622 +    size_t class_committed = MetaspaceAux::committed_bytes(Metaspace::ClassType);
  47.623 +    if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
  47.624        return false;
  47.625      }
  47.626    }
  47.627  
  47.628 -  // Class virtual space should always be expanded.  Call GC for the other
  47.629 -  // metadata virtual space.
  47.630 -  if (Metaspace::using_class_space() &&
  47.631 -      (vsl == Metaspace::class_space_list())) return true;
  47.632 -
  47.633 -  // If this is part of an allocation after a GC, expand
  47.634 -  // unconditionally.
  47.635 -  if (MetaspaceGC::expand_after_GC()) {
  47.636 -    return true;
  47.637 +  // Check if the user has imposed a limit on the metaspace memory.
  47.638 +  size_t committed_bytes = MetaspaceAux::committed_bytes();
  47.639 +  if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
  47.640 +    return false;
  47.641    }
  47.642  
  47.643 -
  47.644 -  // If the capacity is below the minimum capacity, allow the
  47.645 -  // expansion.  Also set the high-water-mark (capacity_until_GC)
  47.646 -  // to that minimum capacity so that a GC will not be induced
  47.647 -  // until that minimum capacity is exceeded.
  47.648 -  size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes();
  47.649 -  size_t metaspace_size_bytes = MetaspaceSize;
  47.650 -  if (committed_capacity_bytes < metaspace_size_bytes ||
  47.651 -      capacity_until_GC() == 0) {
  47.652 -    set_capacity_until_GC(metaspace_size_bytes);
  47.653 -    return true;
  47.654 -  } else {
  47.655 -    if (committed_capacity_bytes < capacity_until_GC()) {
  47.656 -      return true;
  47.657 -    } else {
  47.658 -      if (TraceMetadataChunkAllocation && Verbose) {
  47.659 -        gclog_or_tty->print_cr("  allocation request size " SIZE_FORMAT
  47.660 -                        "  capacity_until_GC " SIZE_FORMAT
  47.661 -                        "  allocated_capacity_bytes " SIZE_FORMAT,
  47.662 -                        word_size,
  47.663 -                        capacity_until_GC(),
  47.664 -                        MetaspaceAux::allocated_capacity_bytes());
  47.665 -      }
  47.666 -      return false;
  47.667 -    }
  47.668 +  return true;
  47.669 +}
  47.670 +
  47.671 +size_t MetaspaceGC::allowed_expansion() {
  47.672 +  size_t committed_bytes = MetaspaceAux::committed_bytes();
  47.673 +
  47.674 +  size_t left_until_max  = MaxMetaspaceSize - committed_bytes;
  47.675 +
  47.676 +  // Always grant expansion if we are initiating the JVM,
  47.677 +  // or if the GC_locker is preventing GCs.
  47.678 +  if (!is_init_completed() || GC_locker::is_active_and_needs_gc()) {
  47.679 +    return left_until_max / BytesPerWord;
  47.680    }
  47.681 +
  47.682 +  size_t capacity_until_gc = capacity_until_GC();
  47.683 +
  47.684 +  if (capacity_until_gc <= committed_bytes) {
  47.685 +    return 0;
  47.686 +  }
  47.687 +
  47.688 +  size_t left_until_GC = capacity_until_gc - committed_bytes;
  47.689 +  size_t left_to_commit = MIN2(left_until_GC, left_until_max);
  47.690 +
  47.691 +  return left_to_commit / BytesPerWord;
  47.692  }
  47.693  
  47.694 -
  47.695 -
  47.696  void MetaspaceGC::compute_new_size() {
  47.697    assert(_shrink_factor <= 100, "invalid shrink factor");
  47.698    uint current_shrink_factor = _shrink_factor;
  47.699    _shrink_factor = 0;
  47.700  
  47.701 -  // Until a faster way of calculating the "used" quantity is implemented,
  47.702 -  // use "capacity".
  47.703    const size_t used_after_gc = MetaspaceAux::allocated_capacity_bytes();
  47.704    const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
  47.705  
  47.706 @@ -1377,9 +1463,10 @@
  47.707      // If we have less capacity below the metaspace HWM, then
  47.708      // increment the HWM.
  47.709      size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
  47.710 +    expand_bytes = align_size_up(expand_bytes, Metaspace::commit_alignment());
  47.711      // Don't expand unless it's significant
  47.712      if (expand_bytes >= MinMetaspaceExpansion) {
  47.713 -      MetaspaceGC::set_capacity_until_GC(capacity_until_GC + expand_bytes);
  47.714 +      MetaspaceGC::inc_capacity_until_GC(expand_bytes);
  47.715      }
  47.716      if (PrintGCDetails && Verbose) {
  47.717        size_t new_capacity_until_GC = capacity_until_GC;
  47.718 @@ -1436,6 +1523,9 @@
  47.719        // on the third call, and 100% by the fourth call.  But if we recompute
  47.720        // size without shrinking, it goes back to 0%.
  47.721        shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
  47.722 +
  47.723 +      shrink_bytes = align_size_down(shrink_bytes, Metaspace::commit_alignment());
  47.724 +
  47.725        assert(shrink_bytes <= max_shrink_bytes,
  47.726          err_msg("invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
  47.727            shrink_bytes, max_shrink_bytes));
  47.728 @@ -1467,7 +1557,7 @@
  47.729    // Don't shrink unless it's significant
  47.730    if (shrink_bytes >= MinMetaspaceExpansion &&
  47.731        ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
  47.732 -    MetaspaceGC::set_capacity_until_GC(capacity_until_GC - shrink_bytes);
  47.733 +    MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
  47.734    }
  47.735  }
  47.736  
  47.737 @@ -1700,7 +1790,6 @@
  47.738      assert(free_list != NULL, "Sanity check");
  47.739  
  47.740      chunk = free_list->head();
  47.741 -    debug_only(Metachunk* debug_head = chunk;)
  47.742  
  47.743      if (chunk == NULL) {
  47.744        return NULL;
  47.745 @@ -1709,9 +1798,6 @@
  47.746      // Remove the chunk as the head of the list.
  47.747      free_list->remove_chunk(chunk);
  47.748  
  47.749 -    // Chunk is being removed from the chunks free list.
  47.750 -    dec_free_chunks_total(chunk->capacity_word_size());
  47.751 -
  47.752      if (TraceMetadataChunkAllocation && Verbose) {
  47.753        gclog_or_tty->print_cr("ChunkManager::free_chunks_get: free_list "
  47.754                               PTR_FORMAT " head " PTR_FORMAT " size " SIZE_FORMAT,
  47.755 @@ -1722,21 +1808,22 @@
  47.756        word_size,
  47.757        FreeBlockDictionary<Metachunk>::atLeast);
  47.758  
  47.759 -    if (chunk != NULL) {
  47.760 -      if (TraceMetadataHumongousAllocation) {
  47.761 -        size_t waste = chunk->word_size() - word_size;
  47.762 -        gclog_or_tty->print_cr("Free list allocate humongous chunk size "
  47.763 -                               SIZE_FORMAT " for requested size " SIZE_FORMAT
  47.764 -                               " waste " SIZE_FORMAT,
  47.765 -                               chunk->word_size(), word_size, waste);
  47.766 -      }
  47.767 -      // Chunk is being removed from the chunks free list.
  47.768 -      dec_free_chunks_total(chunk->capacity_word_size());
  47.769 -    } else {
  47.770 +    if (chunk == NULL) {
  47.771        return NULL;
  47.772      }
  47.773 +
  47.774 +    if (TraceMetadataHumongousAllocation) {
  47.775 +      size_t waste = chunk->word_size() - word_size;
  47.776 +      gclog_or_tty->print_cr("Free list allocate humongous chunk size "
  47.777 +                             SIZE_FORMAT " for requested size " SIZE_FORMAT
  47.778 +                             " waste " SIZE_FORMAT,
  47.779 +                             chunk->word_size(), word_size, waste);
  47.780 +    }
  47.781    }
  47.782  
  47.783 +  // Chunk is being removed from the chunks free list.
  47.784 +  dec_free_chunks_total(chunk->capacity_word_size());
  47.785 +
  47.786    // Remove it from the links to this freelist
  47.787    chunk->set_next(NULL);
  47.788    chunk->set_prev(NULL);
  47.789 @@ -1977,6 +2064,15 @@
  47.790    return chunk_word_size;
  47.791  }
  47.792  
  47.793 +void SpaceManager::track_metaspace_memory_usage() {
  47.794 +  if (is_init_completed()) {
  47.795 +    if (is_class()) {
  47.796 +      MemoryService::track_compressed_class_memory_usage();
  47.797 +    }
  47.798 +    MemoryService::track_metaspace_memory_usage();
  47.799 +  }
  47.800 +}
  47.801 +
  47.802  MetaWord* SpaceManager::grow_and_allocate(size_t word_size) {
  47.803    assert(vs_list()->current_virtual_space() != NULL,
  47.804           "Should have been set");
  47.805 @@ -2002,15 +2098,24 @@
  47.806    size_t grow_chunks_by_words = calc_chunk_size(word_size);
  47.807    Metachunk* next = get_new_chunk(word_size, grow_chunks_by_words);
  47.808  
  47.809 +  if (next != NULL) {
  47.810 +    Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
  47.811 +  }
  47.812 +
  47.813 +  MetaWord* mem = NULL;
  47.814 +
  47.815    // If a chunk was available, add it to the in-use chunk list
  47.816    // and do an allocation from it.
  47.817    if (next != NULL) {
  47.818 -    Metadebug::deallocate_chunk_a_lot(this, grow_chunks_by_words);
  47.819      // Add to this manager's list of chunks in use.
  47.820      add_chunk(next, false);
  47.821 -    return next->allocate(word_size);
  47.822 +    mem = next->allocate(word_size);
  47.823    }
  47.824 -  return NULL;
  47.825 +
  47.826 +  // Track metaspace memory usage statistic.
  47.827 +  track_metaspace_memory_usage();
  47.828 +
  47.829 +  return mem;
  47.830  }
  47.831  
  47.832  void SpaceManager::print_on(outputStream* st) const {
  47.833 @@ -2366,6 +2471,7 @@
  47.834      inc_used_metrics(word_size);
  47.835      return current_chunk()->allocate(word_size); // caller handles null result
  47.836    }
  47.837 +
  47.838    if (current_chunk() != NULL) {
  47.839      result = current_chunk()->allocate(word_size);
  47.840    }
  47.841 @@ -2373,7 +2479,8 @@
  47.842    if (result == NULL) {
  47.843      result = grow_and_allocate(word_size);
  47.844    }
  47.845 -  if (result != 0) {
  47.846 +
  47.847 +  if (result != NULL) {
  47.848      inc_used_metrics(word_size);
  47.849      assert(result != (MetaWord*) chunks_in_use(MediumIndex),
  47.850             "Head of the list is being allocated");
  47.851 @@ -2639,24 +2746,26 @@
  47.852  void MetaspaceAux::print_on(outputStream* out) {
  47.853    Metaspace::MetadataType nct = Metaspace::NonClassType;
  47.854  
  47.855 -  out->print_cr(" Metaspace total "
  47.856 -                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  47.857 -                " reserved " SIZE_FORMAT "K",
  47.858 -                allocated_capacity_bytes()/K, allocated_used_bytes()/K, reserved_bytes()/K);
  47.859 -
  47.860 -  out->print_cr("  data space     "
  47.861 -                SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  47.862 -                " reserved " SIZE_FORMAT "K",
  47.863 -                allocated_capacity_bytes(nct)/K,
  47.864 -                allocated_used_bytes(nct)/K,
  47.865 -                reserved_bytes(nct)/K);
  47.866 +  out->print_cr(" Metaspace       "
  47.867 +                "used "      SIZE_FORMAT "K, "
  47.868 +                "capacity "  SIZE_FORMAT "K, "
  47.869 +                "committed " SIZE_FORMAT "K, "
  47.870 +                "reserved "  SIZE_FORMAT "K",
  47.871 +                allocated_used_bytes()/K,
  47.872 +                allocated_capacity_bytes()/K,
  47.873 +                committed_bytes()/K,
  47.874 +                reserved_bytes()/K);
  47.875 +
  47.876    if (Metaspace::using_class_space()) {
  47.877      Metaspace::MetadataType ct = Metaspace::ClassType;
  47.878      out->print_cr("  class space    "
  47.879 -                  SIZE_FORMAT "K, used " SIZE_FORMAT "K,"
  47.880 -                  " reserved " SIZE_FORMAT "K",
  47.881 +                  "used "      SIZE_FORMAT "K, "
  47.882 +                  "capacity "  SIZE_FORMAT "K, "
  47.883 +                  "committed " SIZE_FORMAT "K, "
  47.884 +                  "reserved "  SIZE_FORMAT "K",
  47.885 +                  allocated_used_bytes(ct)/K,
  47.886                    allocated_capacity_bytes(ct)/K,
  47.887 -                  allocated_used_bytes(ct)/K,
  47.888 +                  committed_bytes(ct)/K,
  47.889                    reserved_bytes(ct)/K);
  47.890    }
  47.891  }
  47.892 @@ -2808,6 +2917,9 @@
  47.893  size_t Metaspace::_first_chunk_word_size = 0;
  47.894  size_t Metaspace::_first_class_chunk_word_size = 0;
  47.895  
  47.896 +size_t Metaspace::_commit_alignment = 0;
  47.897 +size_t Metaspace::_reserve_alignment = 0;
  47.898 +
  47.899  Metaspace::Metaspace(Mutex* lock, MetaspaceType type) {
  47.900    initialize(lock, type);
  47.901  }
  47.902 @@ -2869,21 +2981,30 @@
  47.903    assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
  47.904    assert(class_metaspace_size() < KlassEncodingMetaspaceMax,
  47.905           "Metaspace size is too big");
  47.906 +  assert_is_ptr_aligned(requested_addr,          _reserve_alignment);
  47.907 +  assert_is_ptr_aligned(cds_base,                _reserve_alignment);
  47.908 +  assert_is_size_aligned(class_metaspace_size(), _reserve_alignment);
  47.909 +
  47.910 +  // Don't use large pages for the class space.
  47.911 +  bool large_pages = false;
  47.912  
  47.913    ReservedSpace metaspace_rs = ReservedSpace(class_metaspace_size(),
  47.914 -                                             os::vm_allocation_granularity(),
  47.915 -                                             false, requested_addr, 0);
  47.916 +                                             _reserve_alignment,
  47.917 +                                             large_pages,
  47.918 +                                             requested_addr, 0);
  47.919    if (!metaspace_rs.is_reserved()) {
  47.920      if (UseSharedSpaces) {
  47.921 +      size_t increment = align_size_up(1*G, _reserve_alignment);
  47.922 +
  47.923        // Keep trying to allocate the metaspace, increasing the requested_addr
  47.924        // by 1GB each time, until we reach an address that will no longer allow
  47.925        // use of CDS with compressed klass pointers.
  47.926        char *addr = requested_addr;
  47.927 -      while (!metaspace_rs.is_reserved() && (addr + 1*G > addr) &&
  47.928 -             can_use_cds_with_metaspace_addr(addr + 1*G, cds_base)) {
  47.929 -        addr = addr + 1*G;
  47.930 +      while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
  47.931 +             can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
  47.932 +        addr = addr + increment;
  47.933          metaspace_rs = ReservedSpace(class_metaspace_size(),
  47.934 -                                     os::vm_allocation_granularity(), false, addr, 0);
  47.935 +                                     _reserve_alignment, large_pages, addr, 0);
  47.936        }
  47.937      }
  47.938  
  47.939 @@ -2894,7 +3015,7 @@
  47.940      // So, UseCompressedClassPointers cannot be turned off at this point.
  47.941      if (!metaspace_rs.is_reserved()) {
  47.942        metaspace_rs = ReservedSpace(class_metaspace_size(),
  47.943 -                                   os::vm_allocation_granularity(), false);
  47.944 +                                   _reserve_alignment, large_pages);
  47.945        if (!metaspace_rs.is_reserved()) {
  47.946          vm_exit_during_initialization(err_msg("Could not allocate metaspace: %d bytes",
  47.947                                                class_metaspace_size()));
  47.948 @@ -2933,34 +3054,96 @@
  47.949    assert(using_class_space(), "Must be using class space");
  47.950    _class_space_list = new VirtualSpaceList(rs);
  47.951    _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
  47.952 +
  47.953 +  if (!_class_space_list->initialization_succeeded()) {
  47.954 +    vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
  47.955 +  }
  47.956  }
  47.957  
  47.958  #endif
  47.959  
  47.960 +// Align down. If the aligning result in 0, return 'alignment'.
  47.961 +static size_t restricted_align_down(size_t size, size_t alignment) {
  47.962 +  return MAX2(alignment, align_size_down_(size, alignment));
  47.963 +}
  47.964 +
  47.965 +void Metaspace::ergo_initialize() {
  47.966 +  if (DumpSharedSpaces) {
  47.967 +    // Using large pages when dumping the shared archive is currently not implemented.
  47.968 +    FLAG_SET_ERGO(bool, UseLargePagesInMetaspace, false);
  47.969 +  }
  47.970 +
  47.971 +  size_t page_size = os::vm_page_size();
  47.972 +  if (UseLargePages && UseLargePagesInMetaspace) {
  47.973 +    page_size = os::large_page_size();
  47.974 +  }
  47.975 +
  47.976 +  _commit_alignment  = page_size;
  47.977 +  _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
  47.978 +
  47.979 +  // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
  47.980 +  // override if MaxMetaspaceSize was set on the command line or not.
  47.981 +  // This information is needed later to conform to the specification of the
  47.982 +  // java.lang.management.MemoryUsage API.
  47.983 +  //
  47.984 +  // Ideally, we would be able to set the default value of MaxMetaspaceSize in
  47.985 +  // globals.hpp to the aligned value, but this is not possible, since the
  47.986 +  // alignment depends on other flags being parsed.
  47.987 +  MaxMetaspaceSize = restricted_align_down(MaxMetaspaceSize, _reserve_alignment);
  47.988 +
  47.989 +  if (MetaspaceSize > MaxMetaspaceSize) {
  47.990 +    MetaspaceSize = MaxMetaspaceSize;
  47.991 +  }
  47.992 +
  47.993 +  MetaspaceSize = restricted_align_down(MetaspaceSize, _commit_alignment);
  47.994 +
  47.995 +  assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
  47.996 +
  47.997 +  if (MetaspaceSize < 256*K) {
  47.998 +    vm_exit_during_initialization("Too small initial Metaspace size");
  47.999 +  }
 47.1000 +
 47.1001 +  MinMetaspaceExpansion = restricted_align_down(MinMetaspaceExpansion, _commit_alignment);
 47.1002 +  MaxMetaspaceExpansion = restricted_align_down(MaxMetaspaceExpansion, _commit_alignment);
 47.1003 +
 47.1004 +  CompressedClassSpaceSize = restricted_align_down(CompressedClassSpaceSize, _reserve_alignment);
 47.1005 +  set_class_metaspace_size(CompressedClassSpaceSize);
 47.1006 +}
 47.1007 +
 47.1008  void Metaspace::global_initialize() {
 47.1009    // Initialize the alignment for shared spaces.
 47.1010    int max_alignment = os::vm_page_size();
 47.1011    size_t cds_total = 0;
 47.1012  
 47.1013 -  set_class_metaspace_size(align_size_up(CompressedClassSpaceSize,
 47.1014 -                                         os::vm_allocation_granularity()));
 47.1015 -
 47.1016    MetaspaceShared::set_max_alignment(max_alignment);
 47.1017  
 47.1018    if (DumpSharedSpaces) {
 47.1019 -    SharedReadOnlySize = align_size_up(SharedReadOnlySize, max_alignment);
 47.1020 +    SharedReadOnlySize  = align_size_up(SharedReadOnlySize,  max_alignment);
 47.1021      SharedReadWriteSize = align_size_up(SharedReadWriteSize, max_alignment);
 47.1022 -    SharedMiscDataSize  = align_size_up(SharedMiscDataSize, max_alignment);
 47.1023 -    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize, max_alignment);
 47.1024 +    SharedMiscDataSize  = align_size_up(SharedMiscDataSize,  max_alignment);
 47.1025 +    SharedMiscCodeSize  = align_size_up(SharedMiscCodeSize,  max_alignment);
 47.1026  
 47.1027      // Initialize with the sum of the shared space sizes.  The read-only
 47.1028      // and read write metaspace chunks will be allocated out of this and the
 47.1029      // remainder is the misc code and data chunks.
 47.1030      cds_total = FileMapInfo::shared_spaces_size();
 47.1031 +    cds_total = align_size_up(cds_total, _reserve_alignment);
 47.1032      _space_list = new VirtualSpaceList(cds_total/wordSize);
 47.1033      _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
 47.1034  
 47.1035 +    if (!_space_list->initialization_succeeded()) {
 47.1036 +      vm_exit_during_initialization("Unable to dump shared archive.", NULL);
 47.1037 +    }
 47.1038 +
 47.1039  #ifdef _LP64
 47.1040 +    if (cds_total + class_metaspace_size() > (uint64_t)max_juint) {
 47.1041 +      vm_exit_during_initialization("Unable to dump shared archive.",
 47.1042 +          err_msg("Size of archive (" SIZE_FORMAT ") + compressed class space ("
 47.1043 +                  SIZE_FORMAT ") == total (" SIZE_FORMAT ") is larger than compressed "
 47.1044 +                  "klass limit: " SIZE_FORMAT, cds_total, class_metaspace_size(),
 47.1045 +                  cds_total + class_metaspace_size(), (size_t)max_juint));
 47.1046 +    }
 47.1047 +
 47.1048      // Set the compressed klass pointer base so that decoding of these pointers works
 47.1049      // properly when creating the shared archive.
 47.1050      assert(UseCompressedOops && UseCompressedClassPointers,
 47.1051 @@ -2971,9 +3154,6 @@
 47.1052                               _space_list->current_virtual_space()->bottom());
 47.1053      }
 47.1054  
 47.1055 -    // Set the shift to zero.
 47.1056 -    assert(class_metaspace_size() < (uint64_t)(max_juint) - cds_total,
 47.1057 -           "CDS region is too large");
 47.1058      Universe::set_narrow_klass_shift(0);
 47.1059  #endif
 47.1060  
 47.1061 @@ -2992,12 +3172,12 @@
 47.1062        // Map in spaces now also
 47.1063        if (mapinfo->initialize() && MetaspaceShared::map_shared_spaces(mapinfo)) {
 47.1064          FileMapInfo::set_current_info(mapinfo);
 47.1065 +        cds_total = FileMapInfo::shared_spaces_size();
 47.1066 +        cds_address = (address)mapinfo->region_base(0);
 47.1067        } else {
 47.1068          assert(!mapinfo->is_open() && !UseSharedSpaces,
 47.1069                 "archive file not closed or shared spaces not disabled.");
 47.1070        }
 47.1071 -      cds_total = FileMapInfo::shared_spaces_size();
 47.1072 -      cds_address = (address)mapinfo->region_base(0);
 47.1073      }
 47.1074  
 47.1075  #ifdef _LP64
 47.1076 @@ -3005,7 +3185,9 @@
 47.1077      // above the heap and above the CDS area (if it exists).
 47.1078      if (using_class_space()) {
 47.1079        if (UseSharedSpaces) {
 47.1080 -        allocate_metaspace_compressed_klass_ptrs((char *)(cds_address + cds_total), cds_address);
 47.1081 +        char* cds_end = (char*)(cds_address + cds_total);
 47.1082 +        cds_end = (char *)align_ptr_up(cds_end, _reserve_alignment);
 47.1083 +        allocate_metaspace_compressed_klass_ptrs(cds_end, cds_address);
 47.1084        } else {
 47.1085          allocate_metaspace_compressed_klass_ptrs((char *)CompressedKlassPointersBase, 0);
 47.1086        }
 47.1087 @@ -3023,11 +3205,19 @@
 47.1088      _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
 47.1089      // Arbitrarily set the initial virtual space to a multiple
 47.1090      // of the boot class loader size.
 47.1091 -    size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
 47.1092 +    size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
 47.1093 +    word_size = align_size_up(word_size, Metaspace::reserve_alignment_words());
 47.1094 +
 47.1095      // Initialize the list of virtual spaces.
 47.1096      _space_list = new VirtualSpaceList(word_size);
 47.1097      _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
 47.1098 +
 47.1099 +    if (!_space_list->initialization_succeeded()) {
 47.1100 +      vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
 47.1101 +    }
 47.1102    }
 47.1103 +
 47.1104 +  MetaspaceGC::initialize();
 47.1105  }
 47.1106  
 47.1107  Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
 47.1108 @@ -3039,7 +3229,7 @@
 47.1109      return chunk;
 47.1110    }
 47.1111  
 47.1112 -  return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
 47.1113 +  return get_space_list(mdtype)->get_new_chunk(chunk_word_size, chunk_word_size, chunk_bunch);
 47.1114  }
 47.1115  
 47.1116  void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
 47.1117 @@ -3112,19 +3302,18 @@
 47.1118  }
 47.1119  
 47.1120  MetaWord* Metaspace::expand_and_allocate(size_t word_size, MetadataType mdtype) {
 47.1121 -  MetaWord* result;
 47.1122 -  MetaspaceGC::set_expand_after_GC(true);
 47.1123 -  size_t before_inc = MetaspaceGC::capacity_until_GC();
 47.1124 -  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size) * BytesPerWord;
 47.1125 -  MetaspaceGC::inc_capacity_until_GC(delta_bytes);
 47.1126 +  size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
 47.1127 +  assert(delta_bytes > 0, "Must be");
 47.1128 +
 47.1129 +  size_t after_inc = MetaspaceGC::inc_capacity_until_GC(delta_bytes);
 47.1130 +  size_t before_inc = after_inc - delta_bytes;
 47.1131 +
 47.1132    if (PrintGCDetails && Verbose) {
 47.1133      gclog_or_tty->print_cr("Increase capacity to GC from " SIZE_FORMAT
 47.1134 -      " to " SIZE_FORMAT, before_inc, MetaspaceGC::capacity_until_GC());
 47.1135 +        " to " SIZE_FORMAT, before_inc, after_inc);
 47.1136    }
 47.1137  
 47.1138 -  result = allocate(word_size, mdtype);
 47.1139 -
 47.1140 -  return result;
 47.1141 +  return allocate(word_size, mdtype);
 47.1142  }
 47.1143  
 47.1144  // Space allocated in the Metaspace.  This may
 47.1145 @@ -3206,6 +3395,7 @@
 47.1146    }
 47.1147  }
 47.1148  
 47.1149 +
 47.1150  Metablock* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
 47.1151                                bool read_only, MetaspaceObj::Type type, TRAPS) {
 47.1152    if (HAS_PENDING_EXCEPTION) {
 47.1153 @@ -3213,20 +3403,16 @@
 47.1154      return NULL;  // caller does a CHECK_NULL too
 47.1155    }
 47.1156  
 47.1157 -  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
 47.1158 -
 47.1159 -  // SSS: Should we align the allocations and make sure the sizes are aligned.
 47.1160 -  MetaWord* result = NULL;
 47.1161 -
 47.1162    assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
 47.1163          "ClassLoaderData::the_null_class_loader_data() should have been used.");
 47.1164 +
 47.1165    // Allocate in metaspaces without taking out a lock, because it deadlocks
 47.1166    // with the SymbolTable_lock.  Dumping is single threaded for now.  We'll have
 47.1167    // to revisit this for application class data sharing.
 47.1168    if (DumpSharedSpaces) {
 47.1169      assert(type > MetaspaceObj::UnknownType && type < MetaspaceObj::_number_of_types, "sanity");
 47.1170      Metaspace* space = read_only ? loader_data->ro_metaspace() : loader_data->rw_metaspace();
 47.1171 -    result = space->allocate(word_size, NonClassType);
 47.1172 +    MetaWord* result = space->allocate(word_size, NonClassType);
 47.1173      if (result == NULL) {
 47.1174        report_out_of_shared_space(read_only ? SharedReadOnly : SharedReadWrite);
 47.1175      } else {
 47.1176 @@ -3235,42 +3421,64 @@
 47.1177      return Metablock::initialize(result, word_size);
 47.1178    }
 47.1179  
 47.1180 -  result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 47.1181 +  MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
 47.1182 +
 47.1183 +  // Try to allocate metadata.
 47.1184 +  MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
 47.1185  
 47.1186    if (result == NULL) {
 47.1187 -    // Try to clean out some memory and retry.
 47.1188 -    result =
 47.1189 -      Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
 47.1190 -        loader_data, word_size, mdtype);
 47.1191 -
 47.1192 -    // If result is still null, we are out of memory.
 47.1193 -    if (result == NULL) {
 47.1194 -      if (Verbose && TraceMetadataChunkAllocation) {
 47.1195 -        gclog_or_tty->print_cr("Metaspace allocation failed for size "
 47.1196 -          SIZE_FORMAT, word_size);
 47.1197 -        if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty);
 47.1198 -        MetaspaceAux::dump(gclog_or_tty);
 47.1199 -      }
 47.1200 -      // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
 47.1201 -      const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
 47.1202 -                                                                     "Metadata space";
 47.1203 -      report_java_out_of_memory(space_string);
 47.1204 -
 47.1205 -      if (JvmtiExport::should_post_resource_exhausted()) {
 47.1206 -        JvmtiExport::post_resource_exhausted(
 47.1207 -            JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
 47.1208 -            space_string);
 47.1209 -      }
 47.1210 -      if (is_class_space_allocation(mdtype)) {
 47.1211 -        THROW_OOP_0(Universe::out_of_memory_error_class_metaspace());
 47.1212 -      } else {
 47.1213 -        THROW_OOP_0(Universe::out_of_memory_error_metaspace());
 47.1214 -      }
 47.1215 +    // Allocation failed.
 47.1216 +    if (is_init_completed()) {
 47.1217 +      // Only start a GC if the bootstrapping has completed.
 47.1218 +
 47.1219 +      // Try to clean out some memory and retry.
 47.1220 +      result = Universe::heap()->collector_policy()->satisfy_failed_metadata_allocation(
 47.1221 +          loader_data, word_size, mdtype);
 47.1222      }
 47.1223    }
 47.1224 +
 47.1225 +  if (result == NULL) {
 47.1226 +    report_metadata_oome(loader_data, word_size, mdtype, THREAD);
 47.1227 +    // Will not reach here.
 47.1228 +    return NULL;
 47.1229 +  }
 47.1230 +
 47.1231    return Metablock::initialize(result, word_size);
 47.1232  }
 47.1233  
 47.1234 +void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetadataType mdtype, TRAPS) {
 47.1235 +  // If result is still null, we are out of memory.
 47.1236 +  if (Verbose && TraceMetadataChunkAllocation) {
 47.1237 +    gclog_or_tty->print_cr("Metaspace allocation failed for size "
 47.1238 +        SIZE_FORMAT, word_size);
 47.1239 +    if (loader_data->metaspace_or_null() != NULL) {
 47.1240 +      loader_data->dump(gclog_or_tty);
 47.1241 +    }
 47.1242 +    MetaspaceAux::dump(gclog_or_tty);
 47.1243 +  }
 47.1244 +
 47.1245 +  // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
 47.1246 +  const char* space_string = is_class_space_allocation(mdtype) ? "Compressed class space" :
 47.1247 +                                                                 "Metadata space";
 47.1248 +  report_java_out_of_memory(space_string);
 47.1249 +
 47.1250 +  if (JvmtiExport::should_post_resource_exhausted()) {
 47.1251 +    JvmtiExport::post_resource_exhausted(
 47.1252 +        JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
 47.1253 +        space_string);
 47.1254 +  }
 47.1255 +
 47.1256 +  if (!is_init_completed()) {
 47.1257 +    vm_exit_during_initialization("OutOfMemoryError", space_string);
 47.1258 +  }
 47.1259 +
 47.1260 +  if (is_class_space_allocation(mdtype)) {
 47.1261 +    THROW_OOP(Universe::out_of_memory_error_class_metaspace());
 47.1262 +  } else {
 47.1263 +    THROW_OOP(Universe::out_of_memory_error_metaspace());
 47.1264 +  }
 47.1265 +}
 47.1266 +
 47.1267  void Metaspace::record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size) {
 47.1268    assert(DumpSharedSpaces, "sanity");
 47.1269  
    48.1 --- a/src/share/vm/memory/metaspace.hpp	Sun Oct 13 21:14:04 2013 +0100
    48.2 +++ b/src/share/vm/memory/metaspace.hpp	Thu Oct 17 14:20:57 2013 -0700
    48.3 @@ -87,9 +87,10 @@
    48.4    friend class MetaspaceAux;
    48.5  
    48.6   public:
    48.7 -  enum MetadataType {ClassType = 0,
    48.8 -                     NonClassType = ClassType + 1,
    48.9 -                     MetadataTypeCount = ClassType + 2
   48.10 +  enum MetadataType {
   48.11 +    ClassType,
   48.12 +    NonClassType,
   48.13 +    MetadataTypeCount
   48.14    };
   48.15    enum MetaspaceType {
   48.16      StandardMetaspaceType,
   48.17 @@ -103,6 +104,9 @@
   48.18   private:
   48.19    void initialize(Mutex* lock, MetaspaceType type);
   48.20  
   48.21 +  // Get the first chunk for a Metaspace.  Used for
   48.22 +  // special cases such as the boot class loader, reflection
   48.23 +  // class loader and anonymous class loader.
   48.24    Metachunk* get_initialization_chunk(MetadataType mdtype,
   48.25                                        size_t chunk_word_size,
   48.26                                        size_t chunk_bunch);
   48.27 @@ -123,6 +127,9 @@
   48.28    static size_t _first_chunk_word_size;
   48.29    static size_t _first_class_chunk_word_size;
   48.30  
   48.31 +  static size_t _commit_alignment;
   48.32 +  static size_t _reserve_alignment;
   48.33 +
   48.34    SpaceManager* _vsm;
   48.35    SpaceManager* vsm() const { return _vsm; }
   48.36  
   48.37 @@ -191,12 +198,17 @@
   48.38    Metaspace(Mutex* lock, MetaspaceType type);
   48.39    ~Metaspace();
   48.40  
   48.41 -  // Initialize globals for Metaspace
   48.42 +  static void ergo_initialize();
   48.43    static void global_initialize();
   48.44  
   48.45    static size_t first_chunk_word_size() { return _first_chunk_word_size; }
   48.46    static size_t first_class_chunk_word_size() { return _first_class_chunk_word_size; }
   48.47  
   48.48 +  static size_t reserve_alignment()       { return _reserve_alignment; }
   48.49 +  static size_t reserve_alignment_words() { return _reserve_alignment / BytesPerWord; }
   48.50 +  static size_t commit_alignment()        { return _commit_alignment; }
   48.51 +  static size_t commit_alignment_words()  { return _commit_alignment / BytesPerWord; }
   48.52 +
   48.53    char*  bottom() const;
   48.54    size_t used_words_slow(MetadataType mdtype) const;
   48.55    size_t free_words_slow(MetadataType mdtype) const;
   48.56 @@ -219,6 +231,9 @@
   48.57    static void purge(MetadataType mdtype);
   48.58    static void purge();
   48.59  
   48.60 +  static void report_metadata_oome(ClassLoaderData* loader_data, size_t word_size,
   48.61 +                                   MetadataType mdtype, TRAPS);
   48.62 +
   48.63    void print_on(outputStream* st) const;
   48.64    // Debugging support
   48.65    void verify();
   48.66 @@ -352,17 +367,10 @@
   48.67  
   48.68  class MetaspaceGC : AllStatic {
   48.69  
   48.70 -  // The current high-water-mark for inducing a GC.  When
   48.71 -  // the capacity of all space in the virtual lists reaches this value,
   48.72 -  // a GC is induced and the value is increased.  This should be changed
   48.73 -  // to the space actually used for allocations to avoid affects of
   48.74 -  // fragmentation losses to partially used chunks.  Size is in words.
   48.75 -  static size_t _capacity_until_GC;
   48.76 -
   48.77 -  // After a GC is done any allocation that fails should try to expand
   48.78 -  // the capacity of the Metaspaces.  This flag is set during attempts
   48.79 -  // to allocate in the VMGCOperation that does the GC.
   48.80 -  static bool _expand_after_GC;
   48.81 +  // The current high-water-mark for inducing a GC.
   48.82 +  // When committed memory of all metaspaces reaches this value,
   48.83 +  // a GC is induced and the value is increased. Size is in bytes.
   48.84 +  static volatile intptr_t _capacity_until_GC;
   48.85  
   48.86    // For a CMS collection, signal that a concurrent collection should
   48.87    // be started.
   48.88 @@ -370,20 +378,16 @@
   48.89  
   48.90    static uint _shrink_factor;
   48.91  
   48.92 -  static void set_capacity_until_GC(size_t v) { _capacity_until_GC = v; }
   48.93 -
   48.94    static size_t shrink_factor() { return _shrink_factor; }
   48.95    void set_shrink_factor(uint v) { _shrink_factor = v; }
   48.96  
   48.97   public:
   48.98  
   48.99 -  static size_t capacity_until_GC() { return _capacity_until_GC; }
  48.100 -  static void inc_capacity_until_GC(size_t v) { _capacity_until_GC += v; }
  48.101 -  static void dec_capacity_until_GC(size_t v) {
  48.102 -    _capacity_until_GC = _capacity_until_GC > v ? _capacity_until_GC - v : 0;
  48.103 -  }
  48.104 -  static bool expand_after_GC()           { return _expand_after_GC; }
  48.105 -  static void set_expand_after_GC(bool v) { _expand_after_GC = v; }
  48.106 +  static void initialize() { _capacity_until_GC = MetaspaceSize; }
  48.107 +
  48.108 +  static size_t capacity_until_GC();
  48.109 +  static size_t inc_capacity_until_GC(size_t v);
  48.110 +  static size_t dec_capacity_until_GC(size_t v);
  48.111  
  48.112    static bool should_concurrent_collect() { return _should_concurrent_collect; }
  48.113    static void set_should_concurrent_collect(bool v) {
  48.114 @@ -391,11 +395,14 @@
  48.115    }
  48.116  
  48.117    // The amount to increase the high-water-mark (_capacity_until_GC)
  48.118 -  static size_t delta_capacity_until_GC(size_t word_size);
  48.119 +  static size_t delta_capacity_until_GC(size_t bytes);
  48.120  
  48.121 -  // It is expected that this will be called when the current capacity
  48.122 -  // has been used and a GC should be considered.
  48.123 -  static bool should_expand(VirtualSpaceList* vsl, size_t word_size);
  48.124 +  // Tells if we have can expand metaspace without hitting set limits.
  48.125 +  static bool can_expand(size_t words, bool is_class);
  48.126 +
  48.127 +  // Returns amount that we can expand without hitting a GC,
  48.128 +  // measured in words.
  48.129 +  static size_t allowed_expansion();
  48.130  
  48.131    // Calculate the new high-water mark at which to induce
  48.132    // a GC.
    49.1 --- a/src/share/vm/oops/instanceKlass.cpp	Sun Oct 13 21:14:04 2013 +0100
    49.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Thu Oct 17 14:20:57 2013 -0700
    49.3 @@ -238,6 +238,13 @@
    49.4    }
    49.5  }
    49.6  
    49.7 +// create a new array of vtable_indices for default methods
    49.8 +Array<int>* InstanceKlass::create_new_default_vtable_indices(int len, TRAPS) {
    49.9 +  Array<int>* vtable_indices = MetadataFactory::new_array<int>(class_loader_data(), len, CHECK_NULL);
   49.10 +  assert(default_vtable_indices() == NULL, "only create once");
   49.11 +  set_default_vtable_indices(vtable_indices);
   49.12 +  return vtable_indices;
   49.13 +}
   49.14  
   49.15  InstanceKlass::InstanceKlass(int vtable_len,
   49.16                               int itable_len,
   49.17 @@ -263,6 +270,8 @@
   49.18    set_array_klasses(NULL);
   49.19    set_methods(NULL);
   49.20    set_method_ordering(NULL);
   49.21 +  set_default_methods(NULL);
   49.22 +  set_default_vtable_indices(NULL);
   49.23    set_local_interfaces(NULL);
   49.24    set_transitive_interfaces(NULL);
   49.25    init_implementor();
   49.26 @@ -376,6 +385,21 @@
   49.27    }
   49.28    set_method_ordering(NULL);
   49.29  
   49.30 +  // default methods can be empty
   49.31 +  if (default_methods() != NULL &&
   49.32 +      default_methods() != Universe::the_empty_method_array()) {
   49.33 +    MetadataFactory::free_array<Method*>(loader_data, default_methods());
   49.34 +  }
   49.35 +  // Do NOT deallocate the default methods, they are owned by superinterfaces.
   49.36 +  set_default_methods(NULL);
   49.37 +
   49.38 +  // default methods vtable indices can be empty
   49.39 +  if (default_vtable_indices() != NULL) {
   49.40 +    MetadataFactory::free_array<int>(loader_data, default_vtable_indices());
   49.41 +  }
   49.42 +  set_default_vtable_indices(NULL);
   49.43 +
   49.44 +
   49.45    // This array is in Klass, but remove it with the InstanceKlass since
   49.46    // this place would be the only caller and it can share memory with transitive
   49.47    // interfaces.
   49.48 @@ -456,14 +480,14 @@
   49.49    return java_lang_Class::signers(java_mirror());
   49.50  }
   49.51  
   49.52 -volatile oop InstanceKlass::init_lock() const {
   49.53 +oop InstanceKlass::init_lock() const {
   49.54    // return the init lock from the mirror
   49.55    return java_lang_Class::init_lock(java_mirror());
   49.56  }
   49.57  
   49.58  void InstanceKlass::eager_initialize_impl(instanceKlassHandle this_oop) {
   49.59    EXCEPTION_MARK;
   49.60 -  volatile oop init_lock = this_oop->init_lock();
   49.61 +  oop init_lock = this_oop->init_lock();
   49.62    ObjectLocker ol(init_lock, THREAD);
   49.63  
   49.64    // abort if someone beat us to the initialization
   49.65 @@ -608,7 +632,7 @@
   49.66  
   49.67    // verification & rewriting
   49.68    {
   49.69 -    volatile oop init_lock = this_oop->init_lock();
   49.70 +    oop init_lock = this_oop->init_lock();
   49.71      ObjectLocker ol(init_lock, THREAD);
   49.72      // rewritten will have been set if loader constraint error found
   49.73      // on an earlier link attempt
   49.74 @@ -731,7 +755,7 @@
   49.75    // refer to the JVM book page 47 for description of steps
   49.76    // Step 1
   49.77    {
   49.78 -    volatile oop init_lock = this_oop->init_lock();
   49.79 +    oop init_lock = this_oop->init_lock();
   49.80      ObjectLocker ol(init_lock, THREAD);
   49.81  
   49.82      Thread *self = THREAD; // it's passed the current thread
   49.83 @@ -879,7 +903,7 @@
   49.84  }
   49.85  
   49.86  void InstanceKlass::set_initialization_state_and_notify_impl(instanceKlassHandle this_oop, ClassState state, TRAPS) {
   49.87 -  volatile oop init_lock = this_oop->init_lock();
   49.88 +  oop init_lock = this_oop->init_lock();
   49.89    ObjectLocker ol(init_lock, THREAD);
   49.90    this_oop->set_init_state(state);
   49.91    ol.notify_all(CHECK);
   49.92 @@ -1354,32 +1378,44 @@
   49.93    return -1;
   49.94  }
   49.95  
   49.96 +// find_method looks up the name/signature in the local methods array
   49.97  Method* InstanceKlass::find_method(Symbol* name, Symbol* signature) const {
   49.98    return InstanceKlass::find_method(methods(), name, signature);
   49.99  }
  49.100  
  49.101 +// find_method looks up the name/signature in the local methods array
  49.102  Method* InstanceKlass::find_method(
  49.103      Array<Method*>* methods, Symbol* name, Symbol* signature) {
  49.104 +  int hit = find_method_index(methods, name, signature);
  49.105 +  return hit >= 0 ? methods->at(hit): NULL;
  49.106 +}
  49.107 +
  49.108 +// Used directly for default_methods to find the index into the
  49.109 +// default_vtable_indices, and indirectly by find_method
  49.110 +// find_method_index looks in the local methods array to return the index
  49.111 +// of the matching name/signature
  49.112 +int InstanceKlass::find_method_index(
  49.113 +    Array<Method*>* methods, Symbol* name, Symbol* signature) {
  49.114    int hit = binary_search(methods, name);
  49.115    if (hit != -1) {
  49.116      Method* m = methods->at(hit);
  49.117      // Do linear search to find matching signature.  First, quick check
  49.118      // for common case
  49.119 -    if (m->signature() == signature) return m;
  49.120 +    if (m->signature() == signature) return hit;
  49.121      // search downwards through overloaded methods
  49.122      int i;
  49.123      for (i = hit - 1; i >= 0; --i) {
  49.124          Method* m = methods->at(i);
  49.125          assert(m->is_method(), "must be method");
  49.126          if (m->name() != name) break;
  49.127 -        if (m->signature() == signature) return m;
  49.128 +        if (m->signature() == signature) return i;
  49.129      }
  49.130      // search upwards
  49.131      for (i = hit + 1; i < methods->length(); ++i) {
  49.132          Method* m = methods->at(i);
  49.133          assert(m->is_method(), "must be method");
  49.134          if (m->name() != name) break;
  49.135 -        if (m->signature() == signature) return m;
  49.136 +        if (m->signature() == signature) return i;
  49.137      }
  49.138      // not found
  49.139  #ifdef ASSERT
  49.140 @@ -1387,9 +1423,8 @@
  49.141      assert(index == -1, err_msg("binary search should have found entry %d", index));
  49.142  #endif
  49.143    }
  49.144 -  return NULL;
  49.145 +  return -1;
  49.146  }
  49.147 -
  49.148  int InstanceKlass::find_method_by_name(Symbol* name, int* end) {
  49.149    return find_method_by_name(methods(), name, end);
  49.150  }
  49.151 @@ -1408,6 +1443,7 @@
  49.152    return -1;
  49.153  }
  49.154  
  49.155 +// lookup_method searches both the local methods array and all superclasses methods arrays
  49.156  Method* InstanceKlass::uncached_lookup_method(Symbol* name, Symbol* signature) const {
  49.157    Klass* klass = const_cast<InstanceKlass*>(this);
  49.158    while (klass != NULL) {
  49.159 @@ -1418,6 +1454,21 @@
  49.160    return NULL;
  49.161  }
  49.162  
  49.163 +// lookup a method in the default methods list then in all transitive interfaces
  49.164 +// Do NOT return private or static methods
  49.165 +Method* InstanceKlass::lookup_method_in_ordered_interfaces(Symbol* name,
  49.166 +                                                         Symbol* signature) const {
  49.167 +  Method* m = NULL;
  49.168 +  if (default_methods() != NULL) {
  49.169 +    m = find_method(default_methods(), name, signature);
  49.170 +  }
  49.171 +  // Look up interfaces
  49.172 +  if (m == NULL) {
  49.173 +    m = lookup_method_in_all_interfaces(name, signature);
  49.174 +  }
  49.175 +  return m;
  49.176 +}
  49.177 +
  49.178  // lookup a method in all the interfaces that this class implements
  49.179  // Do NOT return private or static methods, new in JDK8 which are not externally visible
  49.180  // They should only be found in the initial InterfaceMethodRef
  49.181 @@ -2548,6 +2599,42 @@
  49.182    return m;
  49.183  }
  49.184  
  49.185 +
  49.186 +#if INCLUDE_JVMTI
  49.187 +// update default_methods for redefineclasses for methods that are
  49.188 +// not yet in the vtable due to concurrent subclass define and superinterface
  49.189 +// redefinition
  49.190 +// Note: those in the vtable, should have been updated via adjust_method_entries
  49.191 +void InstanceKlass::adjust_default_methods(Method** old_methods, Method** new_methods,
  49.192 +                                           int methods_length, bool* trace_name_printed) {
  49.193 +  // search the default_methods for uses of either obsolete or EMCP methods
  49.194 +  if (default_methods() != NULL) {
  49.195 +    for (int j = 0; j < methods_length; j++) {
  49.196 +      Method* old_method = old_methods[j];
  49.197 +      Method* new_method = new_methods[j];
  49.198 +
  49.199 +      for (int index = 0; index < default_methods()->length(); index ++) {
  49.200 +        if (default_methods()->at(index) == old_method) {
  49.201 +          default_methods()->at_put(index, new_method);
  49.202 +          if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
  49.203 +            if (!(*trace_name_printed)) {
  49.204 +              // RC_TRACE_MESG macro has an embedded ResourceMark
  49.205 +              RC_TRACE_MESG(("adjust: klassname=%s default methods from name=%s",
  49.206 +                             external_name(),
  49.207 +                             old_method->method_holder()->external_name()));
  49.208 +              *trace_name_printed = true;
  49.209 +            }
  49.210 +            RC_TRACE(0x00100000, ("default method update: %s(%s) ",
  49.211 +                                  new_method->name()->as_C_string(),
  49.212 +                                  new_method->signature()->as_C_string()));
  49.213 +          }
  49.214 +        }
  49.215 +      }
  49.216 +    }
  49.217 +  }
  49.218 +}
  49.219 +#endif // INCLUDE_JVMTI
  49.220 +
  49.221  // On-stack replacement stuff
  49.222  void InstanceKlass::add_osr_nmethod(nmethod* n) {
  49.223    // only one compilation can be active
  49.224 @@ -2742,11 +2829,21 @@
  49.225    st->print(BULLET"methods:           "); methods()->print_value_on(st);                  st->cr();
  49.226    if (Verbose || WizardMode) {
  49.227      Array<Method*>* method_array = methods();
  49.228 -    for(int i = 0; i < method_array->length(); i++) {
  49.229 +    for (int i = 0; i < method_array->length(); i++) {
  49.230        st->print("%d : ", i); method_array->at(i)->print_value(); st->cr();
  49.231      }
  49.232    }
  49.233 -  st->print(BULLET"method ordering:   "); method_ordering()->print_value_on(st);       st->cr();
  49.234 +  st->print(BULLET"method ordering:   "); method_ordering()->print_value_on(st);      st->cr();
  49.235 +  st->print(BULLET"default_methods:   "); default_methods()->print_value_on(st);      st->cr();
  49.236 +  if (Verbose && default_methods() != NULL) {
  49.237 +    Array<Method*>* method_array = default_methods();
  49.238 +    for (int i = 0; i < method_array->length(); i++) {
  49.239 +      st->print("%d : ", i); method_array->at(i)->print_value(); st->cr();
  49.240 +    }
  49.241 +  }
  49.242 +  if (default_vtable_indices() != NULL) {
  49.243 +    st->print(BULLET"default vtable indices:   "); default_vtable_indices()->print_value_on(st);       st->cr();
  49.244 +  }
  49.245    st->print(BULLET"local interfaces:  "); local_interfaces()->print_value_on(st);      st->cr();
  49.246    st->print(BULLET"trans. interfaces: "); transitive_interfaces()->print_value_on(st); st->cr();
  49.247    st->print(BULLET"constants:         "); constants()->print_value_on(st);         st->cr();
  49.248 @@ -3099,6 +3196,19 @@
  49.249      }
  49.250    }
  49.251  
  49.252 +  // Verify default methods
  49.253 +  if (default_methods() != NULL) {
  49.254 +    Array<Method*>* methods = this->default_methods();
  49.255 +    for (int j = 0; j < methods->length(); j++) {
  49.256 +      guarantee(methods->at(j)->is_method(), "non-method in methods array");
  49.257 +    }
  49.258 +    for (int j = 0; j < methods->length() - 1; j++) {
  49.259 +      Method* m1 = methods->at(j);
  49.260 +      Method* m2 = methods->at(j + 1);
  49.261 +      guarantee(m1->name()->fast_compare(m2->name()) <= 0, "methods not sorted correctly");
  49.262 +    }
  49.263 +  }
  49.264 +
  49.265    // Verify JNI static field identifiers
  49.266    if (jni_ids() != NULL) {
  49.267      jni_ids()->verify(this);
    50.1 --- a/src/share/vm/oops/instanceKlass.hpp	Sun Oct 13 21:14:04 2013 +0100
    50.2 +++ b/src/share/vm/oops/instanceKlass.hpp	Thu Oct 17 14:20:57 2013 -0700
    50.3 @@ -269,12 +269,18 @@
    50.4  
    50.5    // Method array.
    50.6    Array<Method*>* _methods;
    50.7 +  // Default Method Array, concrete methods inherited from interfaces
    50.8 +  Array<Method*>* _default_methods;
    50.9    // Interface (Klass*s) this class declares locally to implement.
   50.10    Array<Klass*>* _local_interfaces;
   50.11    // Interface (Klass*s) this class implements transitively.
   50.12    Array<Klass*>* _transitive_interfaces;
   50.13    // Int array containing the original order of method in the class file (for JVMTI).
   50.14    Array<int>*     _method_ordering;
   50.15 +  // Int array containing the vtable_indices for default_methods
   50.16 +  // offset matches _default_methods offset
   50.17 +  Array<int>*     _default_vtable_indices;
   50.18 +
   50.19    // Instance and static variable information, starts with 6-tuples of shorts
   50.20    // [access, name index, sig index, initval index, low_offset, high_offset]
   50.21    // for all fields, followed by the generic signature data at the end of
   50.22 @@ -356,6 +362,15 @@
   50.23    void set_method_ordering(Array<int>* m) { _method_ordering = m; }
   50.24    void copy_method_ordering(intArray* m, TRAPS);
   50.25  
   50.26 +  // default_methods
   50.27 +  Array<Method*>* default_methods() const  { return _default_methods; }
   50.28 +  void set_default_methods(Array<Method*>* a) { _default_methods = a; }
   50.29 +
   50.30 +  // default method vtable_indices
   50.31 +  Array<int>* default_vtable_indices() const { return _default_vtable_indices; }
   50.32 +  void set_default_vtable_indices(Array<int>* v) { _default_vtable_indices = v; }
   50.33 +  Array<int>* create_new_default_vtable_indices(int len, TRAPS);
   50.34 +
   50.35    // interfaces
   50.36    Array<Klass*>* local_interfaces() const          { return _local_interfaces; }
   50.37    void set_local_interfaces(Array<Klass*>* a)      {
   50.38 @@ -501,12 +516,18 @@
   50.39    Method* find_method(Symbol* name, Symbol* signature) const;
   50.40    static Method* find_method(Array<Method*>* methods, Symbol* name, Symbol* signature);
   50.41  
   50.42 +  // find a local method index in default_methods (returns -1 if not found)
   50.43 +  static int find_method_index(Array<Method*>* methods, Symbol* name, Symbol* signature);
   50.44 +
   50.45    // lookup operation (returns NULL if not found)
   50.46    Method* uncached_lookup_method(Symbol* name, Symbol* signature) const;
   50.47  
   50.48    // lookup a method in all the interfaces that this class implements
   50.49    // (returns NULL if not found)
   50.50    Method* lookup_method_in_all_interfaces(Symbol* name, Symbol* signature) const;
   50.51 +  // lookup a method in local defaults then in all interfaces
   50.52 +  // (returns NULL if not found)
   50.53 +  Method* lookup_method_in_ordered_interfaces(Symbol* name, Symbol* signature) const;
   50.54  
   50.55    // Find method indices by name.  If a method with the specified name is
   50.56    // found the index to the first method is returned, and 'end' is filled in
   50.57 @@ -910,6 +931,11 @@
   50.58    klassItable* itable() const;        // return new klassItable wrapper
   50.59    Method* method_at_itable(Klass* holder, int index, TRAPS);
   50.60  
   50.61 +#if INCLUDE_JVMTI
   50.62 +  void adjust_default_methods(Method** old_methods, Method** new_methods,
   50.63 +                              int methods_length, bool* trace_name_printed);
   50.64 +#endif // INCLUDE_JVMTI
   50.65 +
   50.66    // Garbage collection
   50.67    void oop_follow_contents(oop obj);
   50.68    int  oop_adjust_pointers(oop obj);
   50.69 @@ -995,7 +1021,7 @@
   50.70    // Must be one per class and it has to be a VM internal object so java code
   50.71    // cannot lock it (like the mirror).
   50.72    // It has to be an object not a Mutex because it's held through java calls.
   50.73 -  volatile oop init_lock() const;
   50.74 +  oop init_lock() const;
   50.75  private:
   50.76  
   50.77    // Static methods that are used to implement member methods where an exposed this pointer
    51.1 --- a/src/share/vm/oops/klassVtable.cpp	Sun Oct 13 21:14:04 2013 +0100
    51.2 +++ b/src/share/vm/oops/klassVtable.cpp	Thu Oct 17 14:20:57 2013 -0700
    51.3 @@ -83,7 +83,7 @@
    51.4  
    51.5    GrowableArray<Method*> new_mirandas(20);
    51.6    // compute the number of mirandas methods that must be added to the end
    51.7 -  get_mirandas(&new_mirandas, all_mirandas, super, methods, local_interfaces);
    51.8 +  get_mirandas(&new_mirandas, all_mirandas, super, methods, NULL, local_interfaces);
    51.9    *num_new_mirandas = new_mirandas.length();
   51.10  
   51.11    vtable_length += *num_new_mirandas * vtableEntry::size();
   51.12 @@ -186,7 +186,7 @@
   51.13        assert(methods->at(i)->is_method(), "must be a Method*");
   51.14        methodHandle mh(THREAD, methods->at(i));
   51.15  
   51.16 -      bool needs_new_entry = update_inherited_vtable(ik(), mh, super_vtable_len, checkconstraints, CHECK);
   51.17 +      bool needs_new_entry = update_inherited_vtable(ik(), mh, super_vtable_len, -1, checkconstraints, CHECK);
   51.18  
   51.19        if (needs_new_entry) {
   51.20          put_method_at(mh(), initialized);
   51.21 @@ -195,7 +195,35 @@
   51.22        }
   51.23      }
   51.24  
   51.25 -    // add miranda methods to end of vtable.
   51.26 +    // update vtable with default_methods
   51.27 +    Array<Method*>* default_methods = ik()->default_methods();
   51.28 +    if (default_methods != NULL) {
   51.29 +      len = default_methods->length();
   51.30 +      if (len > 0) {
   51.31 +        Array<int>* def_vtable_indices = NULL;
   51.32 +        if ((def_vtable_indices = ik()->default_vtable_indices()) == NULL) {
   51.33 +          def_vtable_indices = ik()->create_new_default_vtable_indices(len, CHECK);
   51.34 +        } else {
   51.35 +          assert(def_vtable_indices->length() == len, "reinit vtable len?");
   51.36 +        }
   51.37 +        for (int i = 0; i < len; i++) {
   51.38 +          HandleMark hm(THREAD);
   51.39 +          assert(default_methods->at(i)->is_method(), "must be a Method*");
   51.40 +          methodHandle mh(THREAD, default_methods->at(i));
   51.41 +
   51.42 +          bool needs_new_entry = update_inherited_vtable(ik(), mh, super_vtable_len, i, checkconstraints, CHECK);
   51.43 +
   51.44 +          // needs new entry
   51.45 +          if (needs_new_entry) {
   51.46 +            put_method_at(mh(), initialized);
   51.47 +            def_vtable_indices->at_put(i, initialized); //set vtable index
   51.48 +            initialized++;
   51.49 +          }
   51.50 +        }
   51.51 +      }
   51.52 +    }
   51.53 +
   51.54 +    // add miranda methods; it will also return the updated initialized
   51.55      initialized = fill_in_mirandas(initialized);
   51.56  
   51.57      // In class hierarchies where the accessibility is not increasing (i.e., going from private ->
   51.58 @@ -230,14 +258,19 @@
   51.59  #ifndef PRODUCT
   51.60          if (PrintVtables && Verbose) {
   51.61            ResourceMark rm(THREAD);
   51.62 +          char* sig = target_method()->name_and_sig_as_C_string();
   51.63            tty->print("transitive overriding superclass %s with %s::%s index %d, original flags: ",
   51.64             supersuperklass->internal_name(),
   51.65 -           _klass->internal_name(), (target_method() != NULL) ?
   51.66 -           target_method()->name()->as_C_string() : "<NULL>", vtable_index);
   51.67 +           _klass->internal_name(), sig, vtable_index);
   51.68             super_method->access_flags().print_on(tty);
   51.69 +           if (super_method->is_default_method()) {
   51.70 +             tty->print("default");
   51.71 +           }
   51.72             tty->print("overriders flags: ");
   51.73             target_method->access_flags().print_on(tty);
   51.74 -           tty->cr();
   51.75 +           if (target_method->is_default_method()) {
   51.76 +             tty->print("default");
   51.77 +           }
   51.78          }
   51.79  #endif /*PRODUCT*/
   51.80          break; // return found superk
   51.81 @@ -258,16 +291,31 @@
   51.82  // OR return true if a new vtable entry is required.
   51.83  // Only called for InstanceKlass's, i.e. not for arrays
   51.84  // If that changed, could not use _klass as handle for klass
   51.85 -bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len,
   51.86 -                  bool checkconstraints, TRAPS) {
   51.87 +bool klassVtable::update_inherited_vtable(InstanceKlass* klass, methodHandle target_method,
   51.88 +                                          int super_vtable_len, int default_index,
   51.89 +                                          bool checkconstraints, TRAPS) {
   51.90    ResourceMark rm;
   51.91    bool allocate_new = true;
   51.92    assert(klass->oop_is_instance(), "must be InstanceKlass");
   51.93 -  assert(klass == target_method()->method_holder(), "caller resp.");
   51.94  
   51.95 -  // Initialize the method's vtable index to "nonvirtual".
   51.96 -  // If we allocate a vtable entry, we will update it to a non-negative number.
   51.97 -  target_method()->set_vtable_index(Method::nonvirtual_vtable_index);
   51.98 +  Array<int>* def_vtable_indices = NULL;
   51.99 +  bool is_default = false;
  51.100 +  // default methods are concrete methods in superinterfaces which are added to the vtable
  51.101 +  // with their real method_holder
  51.102 +  // Since vtable and itable indices share the same storage, don't touch
  51.103 +  // the default method's real vtable/itable index
  51.104 +  // default_vtable_indices stores the vtable value relative to this inheritor
  51.105 +  if (default_index >= 0 ) {
  51.106 +    is_default = true;
  51.107 +    def_vtable_indices = klass->default_vtable_indices();
  51.108 +    assert(def_vtable_indices != NULL, "def vtable alloc?");
  51.109 +    assert(default_index <= def_vtable_indices->length(), "def vtable len?");
  51.110 +  } else {
  51.111 +    assert(klass == target_method()->method_holder(), "caller resp.");
  51.112 +    // Initialize the method's vtable index to "nonvirtual".
  51.113 +    // If we allocate a vtable entry, we will update it to a non-negative number.
  51.114 +    target_method()->set_vtable_index(Method::nonvirtual_vtable_index);
  51.115 +  }
  51.116  
  51.117    // Static and <init> methods are never in
  51.118    if (target_method()->is_static() || target_method()->name() ==  vmSymbols::object_initializer_name()) {
  51.119 @@ -284,6 +332,8 @@
  51.120      // An interface never allocates new vtable slots, only inherits old ones.
  51.121      // This method will either be assigned its own itable index later,
  51.122      // or be assigned an inherited vtable index in the loop below.
  51.123 +    // default methods store their vtable indices in the inheritors default_vtable_indices
  51.124 +    assert (default_index == -1, "interfaces don't store resolved default methods");
  51.125      target_method()->set_vtable_index(Method::pending_itable_index);
  51.126    }
  51.127  
  51.128 @@ -307,8 +357,15 @@
  51.129  
  51.130    Symbol* name = target_method()->name();
  51.131    Symbol* signature = target_method()->signature();
  51.132 -  Handle target_loader(THREAD, _klass()->class_loader());
  51.133 -  Symbol*  target_classname = _klass->name();
  51.134 +
  51.135 +  KlassHandle target_klass(THREAD, target_method()->method_holder());
  51.136 +  if (target_klass == NULL) {
  51.137 +    target_klass = _klass;
  51.138 +  }
  51.139 +
  51.140 +  Handle target_loader(THREAD, target_klass->class_loader());
  51.141 +
  51.142 +  Symbol* target_classname = target_klass->name();
  51.143    for(int i = 0; i < super_vtable_len; i++) {
  51.144      Method* super_method = method_at(i);
  51.145      // Check if method name matches
  51.146 @@ -317,10 +374,14 @@
  51.147        // get super_klass for method_holder for the found method
  51.148        InstanceKlass* super_klass =  super_method->method_holder();
  51.149  
  51.150 -      if ((super_klass->is_override(super_method, target_loader, target_classname, THREAD)) ||
  51.151 -      ((klass->major_version() >= VTABLE_TRANSITIVE_OVERRIDE_VERSION)
  51.152 -        && ((super_klass = find_transitive_override(super_klass, target_method, i, target_loader,
  51.153 -             target_classname, THREAD)) != (InstanceKlass*)NULL))) {
  51.154 +      if (is_default
  51.155 +          || ((super_klass->is_override(super_method, target_loader, target_classname, THREAD))
  51.156 +          || ((klass->major_version() >= VTABLE_TRANSITIVE_OVERRIDE_VERSION)
  51.157 +          && ((super_klass = find_transitive_override(super_klass,
  51.158 +                             target_method, i, target_loader,
  51.159 +                             target_classname, THREAD))
  51.160 +                             != (InstanceKlass*)NULL))))
  51.161 +        {
  51.162          // overriding, so no new entry
  51.163          allocate_new = false;
  51.164  
  51.165 @@ -347,7 +408,7 @@
  51.166                  "%s used in the signature";
  51.167                char* sig = target_method()->name_and_sig_as_C_string();
  51.168                const char* loader1 = SystemDictionary::loader_name(target_loader());
  51.169 -              char* current = _klass->name()->as_C_string();
  51.170 +              char* current = target_klass->name()->as_C_string();
  51.171                const char* loader2 = SystemDictionary::loader_name(super_loader());
  51.172                char* failed_type_name = failed_type_symbol->as_C_string();
  51.173                size_t buflen = strlen(msg) + strlen(sig) + strlen(loader1) +
  51.174 @@ -360,16 +421,39 @@
  51.175            }
  51.176         }
  51.177  
  51.178 -        put_method_at(target_method(), i);
  51.179 -        target_method()->set_vtable_index(i);
  51.180 +       put_method_at(target_method(), i);
  51.181 +       if (!is_default) {
  51.182 +         target_method()->set_vtable_index(i);
  51.183 +       } else {
  51.184 +         if (def_vtable_indices != NULL) {
  51.185 +           def_vtable_indices->at_put(default_index, i);
  51.186 +         }
  51.187 +         assert(super_method->is_default_method() || super_method->is_overpass()
  51.188 +                || super_method->is_abstract(), "default override error");
  51.189 +       }
  51.190 +
  51.191 +
  51.192  #ifndef PRODUCT
  51.193          if (PrintVtables && Verbose) {
  51.194 +          ResourceMark rm(THREAD);
  51.195 +          char* sig = target_method()->name_and_sig_as_C_string();
  51.196            tty->print("overriding with %s::%s index %d, original flags: ",
  51.197 -           _klass->internal_name(), (target_method() != NULL) ?
  51.198 -           target_method()->name()->as_C_string() : "<NULL>", i);
  51.199 +           target_klass->internal_name(), sig, i);
  51.200             super_method->access_flags().print_on(tty);
  51.201 +           if (super_method->is_default_method()) {
  51.202 +             tty->print("default");
  51.203 +           }
  51.204 +           if (super_method->is_overpass()) {
  51.205 +             tty->print("overpass");
  51.206 +           }
  51.207             tty->print("overriders flags: ");
  51.208             target_method->access_flags().print_on(tty);
  51.209 +           if (target_method->is_default_method()) {
  51.210 +             tty->print("default");
  51.211 +           }
  51.212 +           if (target_method->is_overpass()) {
  51.213 +             tty->print("overpass");
  51.214 +           }
  51.215             tty->cr();
  51.216          }
  51.217  #endif /*PRODUCT*/
  51.218 @@ -378,12 +462,25 @@
  51.219          // but not override another. Once we override one, not need new
  51.220  #ifndef PRODUCT
  51.221          if (PrintVtables && Verbose) {
  51.222 +          ResourceMark rm(THREAD);
  51.223 +          char* sig = target_method()->name_and_sig_as_C_string();
  51.224            tty->print("NOT overriding with %s::%s index %d, original flags: ",
  51.225 -           _klass->internal_name(), (target_method() != NULL) ?
  51.226 -           target_method()->name()->as_C_string() : "<NULL>", i);
  51.227 +           target_klass->internal_name(), sig,i);
  51.228             super_method->access_flags().print_on(tty);
  51.229 +           if (super_method->is_default_method()) {
  51.230 +             tty->print("default");
  51.231 +           }
  51.232 +           if (super_method->is_overpass()) {
  51.233 +             tty->print("overpass");
  51.234 +           }
  51.235             tty->print("overriders flags: ");
  51.236             target_method->access_flags().print_on(tty);
  51.237 +           if (target_method->is_default_method()) {
  51.238 +             tty->print("default");
  51.239 +           }
  51.240 +           if (target_method->is_overpass()) {
  51.241 +             tty->print("overpass");
  51.242 +           }
  51.243             tty->cr();
  51.244          }
  51.245  #endif /*PRODUCT*/
  51.246 @@ -438,6 +535,14 @@
  51.247      return false;
  51.248    }
  51.249  
  51.250 +  // Concrete interface methods do not need new entries, they override
  51.251 +  // abstract method entries using default inheritance rules
  51.252 +  if (target_method()->method_holder() != NULL &&
  51.253 +      target_method()->method_holder()->is_interface()  &&
  51.254 +      !target_method()->is_abstract() ) {
  51.255 +    return false;
  51.256 +  }
  51.257 +
  51.258    // we need a new entry if there is no superclass
  51.259    if (super == NULL) {
  51.260      return true;
  51.261 @@ -446,7 +551,7 @@
  51.262    // private methods in classes always have a new entry in the vtable
  51.263    // specification interpretation since classic has
  51.264    // private methods not overriding
  51.265 -  // JDK8 adds private methods in interfaces which require invokespecial
  51.266 +  // JDK8 adds private  methods in interfaces which require invokespecial
  51.267    if (target_method()->is_private()) {
  51.268      return true;
  51.269    }
  51.270 @@ -526,35 +631,40 @@
  51.271    if (mhk->is_interface()) {
  51.272      assert(m->is_public(), "should be public");
  51.273      assert(ik()->implements_interface(method_holder) , "this class should implement the interface");
  51.274 -    assert(is_miranda(m, ik()->methods(), ik()->super()), "should be a miranda_method");
  51.275 +    assert(is_miranda(m, ik()->methods(), ik()->default_methods(), ik()->super()), "should be a miranda_method");
  51.276      return true;
  51.277    }
  51.278    return false;
  51.279  }
  51.280  
  51.281 -// check if a method is a miranda method, given a class's methods table and its super
  51.282 -// "miranda" means not static, not defined by this class, and not defined
  51.283 -// in super unless it is private and therefore inaccessible to this class.
  51.284 +// check if a method is a miranda method, given a class's methods table,
  51.285 +// its default_method table  and its super
  51.286 +// "miranda" means not static, not defined by this class.
  51.287 +// private methods in interfaces do not belong in the miranda list.
  51.288  // the caller must make sure that the method belongs to an interface implemented by the class
  51.289  // Miranda methods only include public interface instance methods
  51.290 -// Not private methods, not static methods, not default = concrete abstract
  51.291 -bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods, Klass* super) {
  51.292 -  if (m->is_static()) {
  51.293 +// Not private methods, not static methods, not default == concrete abstract
  51.294 +bool klassVtable::is_miranda(Method* m, Array<Method*>* class_methods,
  51.295 +                             Array<Method*>* default_methods, Klass* super) {
  51.296 +  if (m->is_static() || m->is_private()) {
  51.297      return false;
  51.298    }
  51.299    Symbol* name = m->name();
  51.300    Symbol* signature = m->signature();
  51.301    if (InstanceKlass::find_method(class_methods, name, signature) == NULL) {
  51.302      // did not find it in the method table of the current class
  51.303 -    if (super == NULL) {
  51.304 -      // super doesn't exist
  51.305 -      return true;
  51.306 -    }
  51.307 +    if ((default_methods == NULL) ||
  51.308 +        InstanceKlass::find_method(default_methods, name, signature) == NULL) {
  51.309 +      if (super == NULL) {
  51.310 +        // super doesn't exist
  51.311 +        return true;
  51.312 +      }
  51.313  
  51.314 -    Method* mo = InstanceKlass::cast(super)->lookup_method(name, signature);
  51.315 -    if (mo == NULL || mo->access_flags().is_private() ) {
  51.316 -      // super class hierarchy does not implement it or protection is different
  51.317 -      return true;
  51.318 +      Method* mo = InstanceKlass::cast(super)->lookup_method(name, signature);
  51.319 +      if (mo == NULL || mo->access_flags().is_private() ) {
  51.320 +        // super class hierarchy does not implement it or protection is different
  51.321 +        return true;
  51.322 +      }
  51.323      }
  51.324    }
  51.325  
  51.326 @@ -562,7 +672,7 @@
  51.327  }
  51.328  
  51.329  // Scans current_interface_methods for miranda methods that do not
  51.330 -// already appear in new_mirandas and are also not defined-and-non-private
  51.331 +// already appear in new_mirandas, or default methods,  and are also not defined-and-non-private
  51.332  // in super (superclass).  These mirandas are added to all_mirandas if it is
  51.333  // not null; in addition, those that are not duplicates of miranda methods
  51.334  // inherited by super from its interfaces are added to new_mirandas.
  51.335 @@ -572,7 +682,8 @@
  51.336  void klassVtable::add_new_mirandas_to_lists(
  51.337      GrowableArray<Method*>* new_mirandas, GrowableArray<Method*>* all_mirandas,
  51.338      Array<Method*>* current_interface_methods, Array<Method*>* class_methods,
  51.339 -    Klass* super) {
  51.340 +    Array<Method*>* default_methods, Klass* super) {
  51.341 +
  51.342    // iterate thru the current interface's method to see if it a miranda
  51.343    int num_methods = current_interface_methods->length();
  51.344    for (int i = 0; i < num_methods; i++) {
  51.345 @@ -590,7 +701,7 @@
  51.346      }
  51.347  
  51.348      if (!is_duplicate) { // we don't want duplicate miranda entries in the vtable
  51.349 -      if (is_miranda(im, class_methods, super)) { // is it a miranda at all?
  51.350 +      if (is_miranda(im, class_methods, default_methods, super)) { // is it a miranda at all?
  51.351          InstanceKlass *sk = InstanceKlass::cast(super);
  51.352          // check if it is a duplicate of a super's miranda
  51.353          if (sk->lookup_method_in_all_interfaces(im->name(), im->signature()) == NULL) {
  51.354 @@ -607,6 +718,7 @@
  51.355  void klassVtable::get_mirandas(GrowableArray<Method*>* new_mirandas,
  51.356                                 GrowableArray<Method*>* all_mirandas,
  51.357                                 Klass* super, Array<Method*>* class_methods,
  51.358 +                               Array<Method*>* default_methods,
  51.359                                 Array<Klass*>* local_interfaces) {
  51.360    assert((new_mirandas->length() == 0) , "current mirandas must be 0");
  51.361  
  51.362 @@ -615,14 +727,16 @@
  51.363    for (int i = 0; i < num_local_ifs; i++) {
  51.364      InstanceKlass *ik = InstanceKlass::cast(local_interfaces->at(i));
  51.365      add_new_mirandas_to_lists(new_mirandas, all_mirandas,
  51.366 -                              ik->methods(), class_methods, super);
  51.367 +                              ik->methods(), class_methods,
  51.368 +                              default_methods, super);
  51.369      // iterate thru each local's super interfaces
  51.370      Array<Klass*>* super_ifs = ik->transitive_interfaces();
  51.371      int num_super_ifs = super_ifs->length();
  51.372      for (int j = 0; j < num_super_ifs; j++) {
  51.373        InstanceKlass *sik = InstanceKlass::cast(super_ifs->at(j));
  51.374        add_new_mirandas_to_lists(new_mirandas, all_mirandas,
  51.375 -                                sik->methods(), class_methods, super);
  51.376 +                                sik->methods(), class_methods,
  51.377 +                                default_methods, super);
  51.378      }
  51.379    }
  51.380  }
  51.381 @@ -633,8 +747,22 @@
  51.382  int klassVtable::fill_in_mirandas(int initialized) {
  51.383    GrowableArray<Method*> mirandas(20);
  51.384    get_mirandas(&mirandas, NULL, ik()->super(), ik()->methods(),
  51.385 -               ik()->local_interfaces());
  51.386 +               ik()->default_methods(), ik()->local_interfaces());
  51.387    for (int i = 0; i < mirandas.length(); i++) {
  51.388 +    if (PrintVtables && Verbose) {
  51.389 +      Method* meth = mirandas.at(i);
  51.390 +      ResourceMark rm(Thread::current());
  51.391 +      if (meth != NULL) {
  51.392 +        char* sig = meth->name_and_sig_as_C_string();
  51.393 +        tty->print("fill in mirandas with %s index %d, flags: ",
  51.394 +          sig, initialized);
  51.395 +        meth->access_flags().print_on(tty);
  51.396 +        if (meth->is_default_method()) {
  51.397 +          tty->print("default");
  51.398 +        }
  51.399 +        tty->cr();
  51.400 +      }
  51.401 +    }
  51.402      put_method_at(mirandas.at(i), initialized);
  51.403      ++initialized;
  51.404    }
  51.405 @@ -648,6 +776,26 @@
  51.406  }
  51.407  
  51.408  #if INCLUDE_JVMTI
  51.409 +bool klassVtable::adjust_default_method(int vtable_index, Method* old_method, Method* new_method) {
  51.410 +  // If old_method is default, find this vtable index in default_vtable_indices
  51.411 +  // and replace that method in the _default_methods list
  51.412 +  bool updated = false;
  51.413 +
  51.414 +  Array<Method*>* default_methods = ik()->default_methods();
  51.415 +  if (default_methods != NULL) {
  51.416 +    int len = default_methods->length();
  51.417 +    for (int idx = 0; idx < len; idx++) {
  51.418 +      if (vtable_index == ik()->default_vtable_indices()->at(idx)) {
  51.419 +        if (default_methods->at(idx) == old_method) {
  51.420 +          default_methods->at_put(idx, new_method);
  51.421 +          updated = true;
  51.422 +        }
  51.423 +        break;
  51.424 +      }
  51.425 +    }
  51.426 +  }
  51.427 +  return updated;
  51.428 +}
  51.429  void klassVtable::adjust_method_entries(Method** old_methods, Method** new_methods,
  51.430                                          int methods_length, bool * trace_name_printed) {
  51.431    // search the vtable for uses of either obsolete or EMCP methods
  51.432 @@ -663,18 +811,26 @@
  51.433      for (int index = 0; index < length(); index++) {
  51.434        if (unchecked_method_at(index) == old_method) {
  51.435          put_method_at(new_method, index);
  51.436 +          // For default methods, need to update the _default_methods array
  51.437 +          // which can only have one method entry for a given signature
  51.438 +          bool updated_default = false;
  51.439 +          if (old_method->is_default_method()) {
  51.440 +            updated_default = adjust_default_method(index, old_method, new_method);
  51.441 +          }
  51.442  
  51.443          if (RC_TRACE_IN_RANGE(0x00100000, 0x00400000)) {
  51.444            if (!(*trace_name_printed)) {
  51.445              // RC_TRACE_MESG macro has an embedded ResourceMark
  51.446 -            RC_TRACE_MESG(("adjust: name=%s",
  51.447 +            RC_TRACE_MESG(("adjust: klassname=%s for methods from name=%s",
  51.448 +                           klass()->external_name(),
  51.449                             old_method->method_holder()->external_name()));
  51.450              *trace_name_printed = true;
  51.451            }
  51.452            // RC_TRACE macro has an embedded ResourceMark
  51.453 -          RC_TRACE(0x00100000, ("vtable method update: %s(%s)",
  51.454 +          RC_TRACE(0x00100000, ("vtable method update: %s(%s), updated default = %s",
  51.455                                  new_method->name()->as_C_string(),
  51.456 -                                new_method->signature()->as_C_string()));
  51.457 +                                new_method->signature()->as_C_string(),
  51.458 +                                updated_default ? "true" : "false"));
  51.459          }
  51.460          // cannot 'break' here; see for-loop comment above.
  51.461        }
  51.462 @@ -701,6 +857,12 @@
  51.463      if (m != NULL) {
  51.464        tty->print("      (%5d)  ", i);
  51.465        m->access_flags().print_on(tty);
  51.466 +      if (m->is_default_method()) {
  51.467 +        tty->print("default");
  51.468 +      }
  51.469 +      if (m->is_overpass()) {
  51.470 +        tty->print("overpass");
  51.471 +      }
  51.472        tty->print(" --  ");
  51.473        m->print_name(tty);
  51.474        tty->cr();
  51.475 @@ -757,9 +919,9 @@
  51.476  // Initialization
  51.477  void klassItable::initialize_itable(bool checkconstraints, TRAPS) {
  51.478    if (_klass->is_interface()) {
  51.479 -    // This needs to go after vtable indexes are assigned but
  51.480 -    // before implementors need to know the number of itable indexes.
  51.481 -    assign_itable_indexes_for_interface(_klass());
  51.482 +    // This needs to go after vtable indices are assigned but
  51.483 +    // before implementors need to know the number of itable indices.
  51.484 +    assign_itable_indices_for_interface(_klass());
  51.485    }
  51.486  
  51.487    // Cannot be setup doing bootstrapping, interfaces don't have
  51.488 @@ -803,7 +965,7 @@
  51.489    return true;
  51.490  }
  51.491  
  51.492 -int klassItable::assign_itable_indexes_for_interface(Klass* klass) {
  51.493 +int klassItable::assign_itable_indices_for_interface(Klass* klass) {
  51.494    // an interface does not have an itable, but its methods need to be numbered
  51.495    if (TraceItables) tty->print_cr("%3d: Initializing itable for interface %s", ++initialize_count,
  51.496                                    klass->name()->as_C_string());
  51.497 @@ -846,7 +1008,7 @@
  51.498      }
  51.499      nof_methods -= 1;
  51.500    }
  51.501 -  // no methods have itable indexes
  51.502 +  // no methods have itable indices
  51.503    return 0;
  51.504  }
  51.505  
  51.506 @@ -907,6 +1069,21 @@
  51.507        int ime_num = m->itable_index();
  51.508        assert(ime_num < ime_count, "oob");
  51.509        itableOffsetEntry::method_entry(_klass(), method_table_offset)[ime_num].initialize(target());
  51.510 +      if (TraceItables && Verbose) {
  51.511 +        ResourceMark rm(THREAD);
  51.512 +        if (target() != NULL) {
  51.513 +          char* sig = target()->name_and_sig_as_C_string();
  51.514 +          tty->print("interface: %s, ime_num: %d, target: %s, method_holder: %s ",
  51.515 +                    interf_h()->internal_name(), ime_num, sig,
  51.516 +                    target()->method_holder()->internal_name());
  51.517 +          tty->print("target_method flags: ");
  51.518 +          target()->access_flags().print_on(tty);
  51.519 +          if (target()->is_default_method()) {
  51.520 +            tty->print("default");
  51.521 +          }
  51.522 +          tty->cr();
  51.523 +        }
  51.524 +      }
  51.525      }
  51.526    }
  51.527  }
  51.528 @@ -980,6 +1157,9 @@
  51.529      if (m != NULL) {
  51.530        tty->print("      (%5d)  ", i);
  51.531        m->access_flags().print_on(tty);
  51.532 +      if (m->is_default_method()) {
  51.533 +        tty->print("default");
  51.534 +      }
  51.535        tty->print(" --  ");
  51.536        m->print_name(tty);
  51.537        tty->cr();
  51.538 @@ -1116,7 +1296,7 @@
  51.539    Array<Method*>* methods = InstanceKlass::cast(intf)->methods();
  51.540  
  51.541    if (itable_index < 0 || itable_index >= method_count_for_interface(intf))
  51.542 -    return NULL;                // help caller defend against bad indexes
  51.543 +    return NULL;                // help caller defend against bad indices
  51.544  
  51.545    int index = itable_index;
  51.546    Method* m = methods->at(index);
    52.1 --- a/src/share/vm/oops/klassVtable.hpp	Sun Oct 13 21:14:04 2013 +0100
    52.2 +++ b/src/share/vm/oops/klassVtable.hpp	Thu Oct 17 14:20:57 2013 -0700
    52.3 @@ -97,6 +97,7 @@
    52.4    // trace_name_printed is set to true if the current call has
    52.5    // printed the klass name so that other routines in the adjust_*
    52.6    // group don't print the klass name.
    52.7 +  bool adjust_default_method(int vtable_index, Method* old_method, Method* new_method);
    52.8    void adjust_method_entries(Method** old_methods, Method** new_methods,
    52.9                               int methods_length, bool * trace_name_printed);
   52.10    bool check_no_old_or_obsolete_entries();
   52.11 @@ -118,24 +119,28 @@
   52.12    void put_method_at(Method* m, int index);
   52.13    static bool needs_new_vtable_entry(methodHandle m, Klass* super, Handle classloader, Symbol* classname, AccessFlags access_flags, TRAPS);
   52.14  
   52.15 -  bool update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len, bool checkconstraints, TRAPS);
   52.16 +  bool update_inherited_vtable(InstanceKlass* klass, methodHandle target_method, int super_vtable_len, int default_index, bool checkconstraints, TRAPS);
   52.17   InstanceKlass* find_transitive_override(InstanceKlass* initialsuper, methodHandle target_method, int vtable_index,
   52.18                                           Handle target_loader, Symbol* target_classname, Thread* THREAD);
   52.19  
   52.20    // support for miranda methods
   52.21    bool is_miranda_entry_at(int i);
   52.22    int fill_in_mirandas(int initialized);
   52.23 -  static bool is_miranda(Method* m, Array<Method*>* class_methods, Klass* super);
   52.24 +  static bool is_miranda(Method* m, Array<Method*>* class_methods,
   52.25 +                         Array<Method*>* default_methods, Klass* super);
   52.26    static void add_new_mirandas_to_lists(
   52.27        GrowableArray<Method*>* new_mirandas,
   52.28        GrowableArray<Method*>* all_mirandas,
   52.29 -      Array<Method*>* current_interface_methods, Array<Method*>* class_methods,
   52.30 +      Array<Method*>* current_interface_methods,
   52.31 +      Array<Method*>* class_methods,
   52.32 +      Array<Method*>* default_methods,
   52.33        Klass* super);
   52.34    static void get_mirandas(
   52.35        GrowableArray<Method*>* new_mirandas,
   52.36        GrowableArray<Method*>* all_mirandas, Klass* super,
   52.37 -      Array<Method*>* class_methods, Array<Klass*>* local_interfaces);
   52.38 -
   52.39 +      Array<Method*>* class_methods,
   52.40 +      Array<Method*>* default_methods,
   52.41 +      Array<Klass*>* local_interfaces);
   52.42    void verify_against(outputStream* st, klassVtable* vt, int index);
   52.43    inline InstanceKlass* ik() const;
   52.44  };
   52.45 @@ -290,7 +295,7 @@
   52.46  #endif // INCLUDE_JVMTI
   52.47  
   52.48    // Setup of itable
   52.49 -  static int assign_itable_indexes_for_interface(Klass* klass);
   52.50 +  static int assign_itable_indices_for_interface(Klass* klass);
   52.51    static int method_count_for_interface(Klass* klass);
   52.52    static int compute_itable_size(Array<Klass*>* transitive_interfaces);
   52.53    static void setup_itable_offset_table(instanceKlassHandle klass);
    53.1 --- a/src/share/vm/oops/method.cpp	Sun Oct 13 21:14:04 2013 +0100
    53.2 +++ b/src/share/vm/oops/method.cpp	Thu Oct 17 14:20:57 2013 -0700
    53.3 @@ -511,9 +511,9 @@
    53.4  
    53.5  bool Method::is_final_method(AccessFlags class_access_flags) const {
    53.6    // or "does_not_require_vtable_entry"
    53.7 -  // overpass can occur, is not final (reuses vtable entry)
    53.8 +  // default method or overpass can occur, is not final (reuses vtable entry)
    53.9    // private methods get vtable entries for backward class compatibility.
   53.10 -  if (is_overpass())  return false;
   53.11 +  if (is_overpass() || is_default_method())  return false;
   53.12    return is_final() || class_access_flags.is_final();
   53.13  }
   53.14  
   53.15 @@ -521,11 +521,24 @@
   53.16    return is_final_method(method_holder()->access_flags());
   53.17  }
   53.18  
   53.19 +bool Method::is_default_method() const {
   53.20 +  if (method_holder() != NULL &&
   53.21 +      method_holder()->is_interface() &&
   53.22 +      !is_abstract()) {
   53.23 +    return true;
   53.24 +  } else {
   53.25 +    return false;
   53.26 +  }
   53.27 +}
   53.28 +
   53.29  bool Method::can_be_statically_bound(AccessFlags class_access_flags) const {
   53.30    if (is_final_method(class_access_flags))  return true;
   53.31  #ifdef ASSERT
   53.32 +  ResourceMark rm;
   53.33    bool is_nonv = (vtable_index() == nonvirtual_vtable_index);
   53.34 -  if (class_access_flags.is_interface())  assert(is_nonv == is_static(), err_msg("is_nonv=%s", is_nonv));
   53.35 +  if (class_access_flags.is_interface()) {
   53.36 +      assert(is_nonv == is_static(), err_msg("is_nonv=%s", name_and_sig_as_C_string()));
   53.37 +  }
   53.38  #endif
   53.39    assert(valid_vtable_index() || valid_itable_index(), "method must be linked before we ask this question");
   53.40    return vtable_index() == nonvirtual_vtable_index;
   53.41 @@ -1371,7 +1384,8 @@
   53.42  }
   53.43  
   53.44  // This is only done during class loading, so it is OK to assume method_idnum matches the methods() array
   53.45 -void Method::sort_methods(Array<Method*>* methods, bool idempotent) {
   53.46 +// default_methods also uses this without the ordering for fast find_method
   53.47 +void Method::sort_methods(Array<Method*>* methods, bool idempotent, bool set_idnums) {
   53.48    int length = methods->length();
   53.49    if (length > 1) {
   53.50      {
   53.51 @@ -1379,14 +1393,15 @@
   53.52        QuickSort::sort<Method*>(methods->data(), length, method_comparator, idempotent);
   53.53      }
   53.54      // Reset method ordering
   53.55 -    for (int i = 0; i < length; i++) {
   53.56 -      Method* m = methods->at(i);
   53.57 -      m->set_method_idnum(i);
   53.58 +    if (set_idnums) {
   53.59 +      for (int i = 0; i < length; i++) {
   53.60 +        Method* m = methods->at(i);
   53.61 +        m->set_method_idnum(i);
   53.62 +      }
   53.63      }
   53.64    }
   53.65  }
   53.66  
   53.67 -
   53.68  //-----------------------------------------------------------------------------------
   53.69  // Non-product code unless JVM/TI needs it
   53.70  
    54.1 --- a/src/share/vm/oops/method.hpp	Sun Oct 13 21:14:04 2013 +0100
    54.2 +++ b/src/share/vm/oops/method.hpp	Thu Oct 17 14:20:57 2013 -0700
    54.3 @@ -567,6 +567,7 @@
    54.4    // checks method and its method holder
    54.5    bool is_final_method() const;
    54.6    bool is_final_method(AccessFlags class_access_flags) const;
    54.7 +  bool is_default_method() const;
    54.8  
    54.9    // true if method needs no dynamic dispatch (final and/or no vtable entry)
   54.10    bool can_be_statically_bound() const;
   54.11 @@ -846,7 +847,7 @@
   54.12  #endif
   54.13  
   54.14    // Helper routine used for method sorting
   54.15 -  static void sort_methods(Array<Method*>* methods, bool idempotent = false);
   54.16 +  static void sort_methods(Array<Method*>* methods, bool idempotent = false, bool set_idnums = true);
   54.17  
   54.18    // Deallocation function for redefine classes or if an error occurs
   54.19    void deallocate_contents(ClassLoaderData* loader_data);
    55.1 --- a/src/share/vm/opto/graphKit.cpp	Sun Oct 13 21:14:04 2013 +0100
    55.2 +++ b/src/share/vm/opto/graphKit.cpp	Thu Oct 17 14:20:57 2013 -0700
    55.3 @@ -3713,7 +3713,8 @@
    55.4    Node* no_base = __ top();
    55.5    float likely  = PROB_LIKELY(0.999);
    55.6    float unlikely  = PROB_UNLIKELY(0.999);
    55.7 -  Node* zero = __ ConI(0);
    55.8 +  Node* young_card = __ ConI((jint)G1SATBCardTableModRefBS::g1_young_card_val());
    55.9 +  Node* dirty_card = __ ConI((jint)CardTableModRefBS::dirty_card_val());
   55.10    Node* zeroX = __ ConX(0);
   55.11  
   55.12    // Get the alias_index for raw card-mark memory
   55.13 @@ -3769,8 +3770,16 @@
   55.14          // load the original value of the card
   55.15          Node* card_val = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
   55.16  
   55.17 -        __ if_then(card_val, BoolTest::ne, zero); {
   55.18 -          g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
   55.19 +        __ if_then(card_val, BoolTest::ne, young_card); {
   55.20 +          sync_kit(ideal);
   55.21 +          // Use Op_MemBarVolatile to achieve the effect of a StoreLoad barrier.
   55.22 +          insert_mem_bar(Op_MemBarVolatile, oop_store);
   55.23 +          __ sync_kit(this);
   55.24 +
   55.25 +          Node* card_val_reload = __ load(__ ctrl(), card_adr, TypeInt::INT, T_BYTE, Compile::AliasIdxRaw);
   55.26 +          __ if_then(card_val_reload, BoolTest::ne, dirty_card); {
   55.27 +            g1_mark_card(ideal, card_adr, oop_store, alias_idx, index, index_adr, buffer, tf);
   55.28 +          } __ end_if();
   55.29          } __ end_if();
   55.30        } __ end_if();
   55.31      } __ end_if();
    56.1 --- a/src/share/vm/prims/jni.cpp	Sun Oct 13 21:14:04 2013 +0100
    56.2 +++ b/src/share/vm/prims/jni.cpp	Thu Oct 17 14:20:57 2013 -0700
    56.3 @@ -1591,10 +1591,8 @@
    56.4      }
    56.5    } else {
    56.6      m = klass->lookup_method(name, signature);
    56.7 -    // Look up interfaces
    56.8 -    if (m == NULL && klass->oop_is_instance()) {
    56.9 -      m = InstanceKlass::cast(klass())->lookup_method_in_all_interfaces(name,
   56.10 -                                                                   signature);
   56.11 +    if (m == NULL &&  klass->oop_is_instance()) {
   56.12 +      m = InstanceKlass::cast(klass())->lookup_method_in_ordered_interfaces(name, signature);
   56.13      }
   56.14    }
   56.15    if (m == NULL || (m->is_static() != is_static)) {
   56.16 @@ -3210,7 +3208,11 @@
   56.17    HOTSPOT_JNI_GETSTRINGLENGTH_ENTRY(
   56.18                                      env, string);
   56.19  #endif /* USDT2 */
   56.20 -  jsize ret = java_lang_String::length(JNIHandles::resolve_non_null(string));
   56.21 +  jsize ret = 0;
   56.22 +  oop s = JNIHandles::resolve_non_null(string);
   56.23 +  if (java_lang_String::value(s) != NULL) {
   56.24 +    ret = java_lang_String::length(s);
   56.25 +  }
   56.26  #ifndef USDT2
   56.27    DTRACE_PROBE1(hotspot_jni, GetStringLength__return, ret);
   56.28  #else /* USDT2 */
   56.29 @@ -3230,20 +3232,23 @@
   56.30   HOTSPOT_JNI_GETSTRINGCHARS_ENTRY(
   56.31                                    env, string, (uintptr_t *) isCopy);
   56.32  #endif /* USDT2 */
   56.33 +  jchar* buf = NULL;
   56.34    oop s = JNIHandles::resolve_non_null(string);
   56.35 -  int s_len = java_lang_String::length(s);
   56.36    typeArrayOop s_value = java_lang_String::value(s);
   56.37 -  int s_offset = java_lang_String::offset(s);
   56.38 -  jchar* buf = NEW_C_HEAP_ARRAY_RETURN_NULL(jchar, s_len + 1, mtInternal);  // add one for zero termination
   56.39 -  /* JNI Specification states return NULL on OOM */
   56.40 -  if (buf != NULL) {
   56.41 -    if (s_len > 0) {
   56.42 -      memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len);
   56.43 -    }
   56.44 -    buf[s_len] = 0;
   56.45 -    //%note jni_5
   56.46 -    if (isCopy != NULL) {
   56.47 -      *isCopy = JNI_TRUE;
   56.48 +  if (s_value != NULL) {
   56.49 +    int s_len = java_lang_String::length(s);
   56.50 +    int s_offset = java_lang_String::offset(s);
   56.51 +    buf = NEW_C_HEAP_ARRAY_RETURN_NULL(jchar, s_len + 1, mtInternal);  // add one for zero termination
   56.52 +    /* JNI Specification states return NULL on OOM */
   56.53 +    if (buf != NULL) {
   56.54 +      if (s_len > 0) {
   56.55 +        memcpy(buf, s_value->char_at_addr(s_offset), sizeof(jchar)*s_len);
   56.56 +      }
   56.57 +      buf[s_len] = 0;
   56.58 +      //%note jni_5
   56.59 +      if (isCopy != NULL) {
   56.60 +        *isCopy = JNI_TRUE;
   56.61 +      }
   56.62      }
   56.63    }
   56.64  #ifndef USDT2
   56.65 @@ -3313,7 +3318,11 @@
   56.66   HOTSPOT_JNI_GETSTRINGUTFLENGTH_ENTRY(
   56.67                                        env, string);
   56.68  #endif /* USDT2 */
   56.69 -  jsize ret = java_lang_String::utf8_length(JNIHandles::resolve_non_null(string));
   56.70 +  jsize ret = 0;
   56.71 +  oop java_string = JNIHandles::resolve_non_null(string);
   56.72 +  if (java_lang_String::value(java_string) != NULL) {
   56.73 +    ret = java_lang_String::utf8_length(java_string);
   56.74 +  }
   56.75  #ifndef USDT2
   56.76    DTRACE_PROBE1(hotspot_jni, GetStringUTFLength__return, ret);
   56.77  #else /* USDT2 */
   56.78 @@ -3332,14 +3341,17 @@
   56.79   HOTSPOT_JNI_GETSTRINGUTFCHARS_ENTRY(
   56.80                                       env, string, (uintptr_t *) isCopy);
   56.81  #endif /* USDT2 */
   56.82 +  char* result = NULL;
   56.83    oop java_string = JNIHandles::resolve_non_null(string);
   56.84 -  size_t length = java_lang_String::utf8_length(java_string);
   56.85 -  /* JNI Specification states return NULL on OOM */
   56.86 -  char* result = AllocateHeap(length + 1, mtInternal, 0, AllocFailStrategy::RETURN_NULL);
   56.87 -  if (result != NULL) {
   56.88 -    java_lang_String::as_utf8_string(java_string, result, (int) length + 1);
   56.89 -    if (isCopy != NULL) {
   56.90 -      *isCopy = JNI_TRUE;
   56.91 +  if (java_lang_String::value(java_string) != NULL) {
   56.92 +    size_t length = java_lang_String::utf8_length(java_string);
   56.93 +    /* JNI Specification states return NULL on OOM */
   56.94 +    result = AllocateHeap(length + 1, mtInternal, 0, AllocFailStrategy::RETURN_NULL);
   56.95 +    if (result != NULL) {
   56.96 +      java_lang_String::as_utf8_string(java_string, result, (int) length + 1);
   56.97 +      if (isCopy != NULL) {
   56.98 +        *isCopy = JNI_TRUE;
   56.99 +      }
  56.100      }
  56.101    }
  56.102  #ifndef USDT2
    57.1 --- a/src/share/vm/prims/jniCheck.cpp	Sun Oct 13 21:14:04 2013 +0100
    57.2 +++ b/src/share/vm/prims/jniCheck.cpp	Thu Oct 17 14:20:57 2013 -0700
    57.3 @@ -1324,18 +1324,19 @@
    57.4      IN_VM(
    57.5        checkString(thr, str);
    57.6      )
    57.7 +    jchar* newResult = NULL;
    57.8      const jchar *result = UNCHECKED()->GetStringChars(env,str,isCopy);
    57.9      assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringChars didn't return a copy as expected");
   57.10 -
   57.11 -    size_t len = UNCHECKED()->GetStringLength(env,str) + 1; // + 1 for NULL termination
   57.12 -    jint* tagLocation = (jint*) AllocateHeap(len * sizeof(jchar) + sizeof(jint), mtInternal);
   57.13 -    *tagLocation = STRING_TAG;
   57.14 -    jchar* newResult = (jchar*) (tagLocation + 1);
   57.15 -    memcpy(newResult, result, len * sizeof(jchar));
   57.16 -    // Avoiding call to UNCHECKED()->ReleaseStringChars() since that will fire unexpected dtrace probes
   57.17 -    // Note that the dtrace arguments for the allocated memory will not match up with this solution.
   57.18 -    FreeHeap((char*)result);
   57.19 -
   57.20 +    if (result != NULL) {
   57.21 +      size_t len = UNCHECKED()->GetStringLength(env,str) + 1; // + 1 for NULL termination
   57.22 +      jint* tagLocation = (jint*) AllocateHeap(len * sizeof(jchar) + sizeof(jint), mtInternal);
   57.23 +      *tagLocation = STRING_TAG;
   57.24 +      newResult = (jchar*) (tagLocation + 1);
   57.25 +      memcpy(newResult, result, len * sizeof(jchar));
   57.26 +      // Avoiding call to UNCHECKED()->ReleaseStringChars() since that will fire unexpected dtrace probes
   57.27 +      // Note that the dtrace arguments for the allocated memory will not match up with this solution.
   57.28 +      FreeHeap((char*)result);
   57.29 +    }
   57.30      functionExit(env);
   57.31      return newResult;
   57.32  JNI_END
   57.33 @@ -1394,18 +1395,19 @@
   57.34      IN_VM(
   57.35        checkString(thr, str);
   57.36      )
   57.37 +    char* newResult = NULL;
   57.38      const char *result = UNCHECKED()->GetStringUTFChars(env,str,isCopy);
   57.39      assert (isCopy == NULL || *isCopy == JNI_TRUE, "GetStringUTFChars didn't return a copy as expected");
   57.40 -
   57.41 -    size_t len = strlen(result) + 1; // + 1 for NULL termination
   57.42 -    jint* tagLocation = (jint*) AllocateHeap(len + sizeof(jint), mtInternal);
   57.43 -    *tagLocation = STRING_UTF_TAG;
   57.44 -    char* newResult = (char*) (tagLocation + 1);
   57.45 -    strcpy(newResult, result);
   57.46 -    // Avoiding call to UNCHECKED()->ReleaseStringUTFChars() since that will fire unexpected dtrace probes
   57.47 -    // Note that the dtrace arguments for the allocated memory will not match up with this solution.
   57.48 -    FreeHeap((char*)result, mtInternal);
   57.49 -
   57.50 +    if (result != NULL) {
   57.51 +      size_t len = strlen(result) + 1; // + 1 for NULL termination
   57.52 +      jint* tagLocation = (jint*) AllocateHeap(len + sizeof(jint), mtInternal);
   57.53 +      *tagLocation = STRING_UTF_TAG;
   57.54 +      newResult = (char*) (tagLocation + 1);
   57.55 +      strcpy(newResult, result);
   57.56 +      // Avoiding call to UNCHECKED()->ReleaseStringUTFChars() since that will fire unexpected dtrace probes
   57.57 +      // Note that the dtrace arguments for the allocated memory will not match up with this solution.
   57.58 +      FreeHeap((char*)result, mtInternal);
   57.59 +    }
   57.60      functionExit(env);
   57.61      return newResult;
   57.62  JNI_END
    58.1 --- a/src/share/vm/prims/jvm.cpp	Sun Oct 13 21:14:04 2013 +0100
    58.2 +++ b/src/share/vm/prims/jvm.cpp	Thu Oct 17 14:20:57 2013 -0700
    58.3 @@ -668,13 +668,12 @@
    58.4  JVM_ENTRY(jclass, JVM_GetCallerClass(JNIEnv* env, int depth))
    58.5    JVMWrapper("JVM_GetCallerClass");
    58.6  
    58.7 -  // Pre-JDK 8 and early builds of JDK 8 don't have a CallerSensitive annotation.
    58.8 -  if (SystemDictionary::reflect_CallerSensitive_klass() == NULL) {
    58.9 +  // Pre-JDK 8 and early builds of JDK 8 don't have a CallerSensitive annotation; or
   58.10 +  // sun.reflect.Reflection.getCallerClass with a depth parameter is provided
   58.11 +  // temporarily for existing code to use until a replacement API is defined.
   58.12 +  if (SystemDictionary::reflect_CallerSensitive_klass() == NULL || depth != JVM_CALLER_DEPTH) {
   58.13      Klass* k = thread->security_get_caller_class(depth);
   58.14      return (k == NULL) ? NULL : (jclass) JNIHandles::make_local(env, k->java_mirror());
   58.15 -  } else {
   58.16 -    // Basic handshaking with Java_sun_reflect_Reflection_getCallerClass
   58.17 -    assert(depth == -1, "wrong handshake depth");
   58.18    }
   58.19  
   58.20    // Getting the class of the caller frame.
   58.21 @@ -3954,248 +3953,6 @@
   58.22  }
   58.23  
   58.24  
   58.25 -// Serialization
   58.26 -JVM_ENTRY(void, JVM_SetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj,
   58.27 -                                            jlongArray fieldIDs, jcharArray typecodes, jbyteArray data))
   58.28 -  assert(!JDK_Version::is_gte_jdk14x_version(), "should only be used in 1.3.1 and earlier");
   58.29 -
   58.30 -  typeArrayOop tcodes = typeArrayOop(JNIHandles::resolve(typecodes));
   58.31 -  typeArrayOop dbuf   = typeArrayOop(JNIHandles::resolve(data));
   58.32 -  typeArrayOop fids   = typeArrayOop(JNIHandles::resolve(fieldIDs));
   58.33 -  oop          o      = JNIHandles::resolve(obj);
   58.34 -
   58.35 -  if (o == NULL || fids == NULL  || dbuf == NULL  || tcodes == NULL) {
   58.36 -    THROW(vmSymbols::java_lang_NullPointerException());
   58.37 -  }
   58.38 -
   58.39 -  jsize nfids = fids->length();
   58.40 -  if (nfids == 0) return;
   58.41 -
   58.42 -  if (tcodes->length() < nfids) {
   58.43 -    THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException());
   58.44 -  }
   58.45 -
   58.46 -  jsize off = 0;
   58.47 -  /* loop through fields, setting values */
   58.48 -  for (jsize i = 0; i < nfids; i++) {
   58.49 -    jfieldID fid = (jfieldID)(intptr_t) fids->long_at(i);
   58.50 -    int field_offset;
   58.51 -    if (fid != NULL) {
   58.52 -      // NULL is a legal value for fid, but retrieving the field offset
   58.53 -      // trigger assertion in that case
   58.54 -      field_offset = jfieldIDWorkaround::from_instance_jfieldID(o->klass(), fid);
   58.55 -    }
   58.56 -
   58.57 -    switch (tcodes->char_at(i)) {
   58.58 -      case 'Z':
   58.59 -        if (fid != NULL) {
   58.60 -          jboolean val = (dbuf->byte_at(off) != 0) ? JNI_TRUE : JNI_FALSE;
   58.61 -          o->bool_field_put(field_offset, val);
   58.62 -        }
   58.63 -        off++;
   58.64 -        break;
   58.65 -
   58.66 -      case 'B':
   58.67 -        if (fid != NULL) {
   58.68 -          o->byte_field_put(field_offset, dbuf->byte_at(off));
   58.69 -        }
   58.70 -        off++;
   58.71 -        break;
   58.72 -
   58.73 -      case 'C':
   58.74 -        if (fid != NULL) {
   58.75 -          jchar val = ((dbuf->byte_at(off + 0) & 0xFF) << 8)
   58.76 -                    + ((dbuf->byte_at(off + 1) & 0xFF) << 0);
   58.77 -          o->char_field_put(field_offset, val);
   58.78 -        }
   58.79 -        off += 2;
   58.80 -        break;
   58.81 -
   58.82 -      case 'S':
   58.83 -        if (fid != NULL) {
   58.84 -          jshort val = ((dbuf->byte_at(off + 0) & 0xFF) << 8)
   58.85 -                     + ((dbuf->byte_at(off + 1) & 0xFF) << 0);
   58.86 -          o->short_field_put(field_offset, val);
   58.87 -        }
   58.88 -        off += 2;
   58.89 -        break;
   58.90 -
   58.91 -      case 'I':
   58.92 -        if (fid != NULL) {
   58.93 -          jint ival = ((dbuf->byte_at(off + 0) & 0xFF) << 24)
   58.94 -                    + ((dbuf->byte_at(off + 1) & 0xFF) << 16)
   58.95 -                    + ((dbuf->byte_at(off + 2) & 0xFF) << 8)
   58.96 -                    + ((dbuf->byte_at(off + 3) & 0xFF) << 0);
   58.97 -          o->int_field_put(field_offset, ival);
   58.98 -        }
   58.99 -        off += 4;
  58.100 -        break;
  58.101 -
  58.102 -      case 'F':
  58.103 -        if (fid != NULL) {
  58.104 -          jint ival = ((dbuf->byte_at(off + 0) & 0xFF) << 24)
  58.105 -                    + ((dbuf->byte_at(off + 1) & 0xFF) << 16)
  58.106 -                    + ((dbuf->byte_at(off + 2) & 0xFF) << 8)
  58.107 -                    + ((dbuf->byte_at(off + 3) & 0xFF) << 0);
  58.108 -          jfloat fval = (*int_bits_to_float_fn)(env, NULL, ival);
  58.109 -          o->float_field_put(field_offset, fval);
  58.110 -        }
  58.111 -        off += 4;
  58.112 -        break;
  58.113 -
  58.114 -      case 'J':
  58.115 -        if (fid != NULL) {
  58.116 -          jlong lval = (((jlong) dbuf->byte_at(off + 0) & 0xFF) << 56)
  58.117 -                     + (((jlong) dbuf->byte_at(off + 1) & 0xFF) << 48)
  58.118 -                     + (((jlong) dbuf->byte_at(off + 2) & 0xFF) << 40)
  58.119 -                     + (((jlong) dbuf->byte_at(off + 3) & 0xFF) << 32)
  58.120 -                     + (((jlong) dbuf->byte_at(off + 4) & 0xFF) << 24)
  58.121 -                     + (((jlong) dbuf->byte_at(off + 5) & 0xFF) << 16)
  58.122 -                     + (((jlong) dbuf->byte_at(off + 6) & 0xFF) << 8)
  58.123 -                     + (((jlong) dbuf->byte_at(off + 7) & 0xFF) << 0);
  58.124 -          o->long_field_put(field_offset, lval);
  58.125 -        }
  58.126 -        off += 8;
  58.127 -        break;
  58.128 -
  58.129 -      case 'D':
  58.130 -        if (fid != NULL) {
  58.131 -          jlong lval = (((jlong) dbuf->byte_at(off + 0) & 0xFF) << 56)
  58.132 -                     + (((jlong) dbuf->byte_at(off + 1) & 0xFF) << 48)
  58.133 -                     + (((jlong) dbuf->byte_at(off + 2) & 0xFF) << 40)
  58.134 -                     + (((jlong) dbuf->byte_at(off + 3) & 0xFF) << 32)
  58.135 -                     + (((jlong) dbuf->byte_at(off + 4) & 0xFF) << 24)
  58.136 -                     + (((jlong) dbuf->byte_at(off + 5) & 0xFF) << 16)
  58.137 -                     + (((jlong) dbuf->byte_at(off + 6) & 0xFF) << 8)
  58.138 -                     + (((jlong) dbuf->byte_at(off + 7) & 0xFF) << 0);
  58.139 -          jdouble dval = (*long_bits_to_double_fn)(env, NULL, lval);
  58.140 -          o->double_field_put(field_offset, dval);
  58.141 -        }
  58.142 -        off += 8;
  58.143 -        break;
  58.144 -
  58.145 -      default:
  58.146 -        // Illegal typecode
  58.147 -        THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "illegal typecode");
  58.148 -    }
  58.149 -  }
  58.150 -JVM_END
  58.151 -
  58.152 -
  58.153 -JVM_ENTRY(void, JVM_GetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj,
  58.154 -                            jlongArray fieldIDs, jcharArray typecodes, jbyteArray data))
  58.155 -  assert(!JDK_Version::is_gte_jdk14x_version(), "should only be used in 1.3.1 and earlier");
  58.156 -
  58.157 -  typeArrayOop tcodes = typeArrayOop(JNIHandles::resolve(typecodes));
  58.158 -  typeArrayOop dbuf   = typeArrayOop(JNIHandles::resolve(data));
  58.159 -  typeArrayOop fids   = typeArrayOop(JNIHandles::resolve(fieldIDs));
  58.160 -  oop          o      = JNIHandles::resolve(obj);
  58.161 -
  58.162 -  if (o == NULL || fids == NULL  || dbuf == NULL  || tcodes == NULL) {
  58.163 -    THROW(vmSymbols::java_lang_NullPointerException());
  58.164 -  }
  58.165 -
  58.166 -  jsize nfids = fids->length();
  58.167 -  if (nfids == 0) return;
  58.168 -
  58.169 -  if (tcodes->length() < nfids) {
  58.170 -    THROW(vmSymbols::java_lang_ArrayIndexOutOfBoundsException());
  58.171 -  }
  58.172 -
  58.173 -  /* loop through fields, fetching values */
  58.174 -  jsize off = 0;
  58.175 -  for (jsize i = 0; i < nfids; i++) {
  58.176 -    jfieldID fid = (jfieldID)(intptr_t) fids->long_at(i);
  58.177 -    if (fid == NULL) {
  58.178 -      THROW(vmSymbols::java_lang_NullPointerException());
  58.179 -    }
  58.180 -    int field_offset = jfieldIDWorkaround::from_instance_jfieldID(o->klass(), fid);
  58.181 -
  58.182 -     switch (tcodes->char_at(i)) {
  58.183 -       case 'Z':
  58.184 -         {
  58.185 -           jboolean val = o->bool_field(field_offset);
  58.186 -           dbuf->byte_at_put(off++, (val != 0) ? 1 : 0);
  58.187 -         }
  58.188 -         break;
  58.189 -
  58.190 -       case 'B':
  58.191 -         dbuf->byte_at_put(off++, o->byte_field(field_offset));
  58.192 -         break;
  58.193 -
  58.194 -       case 'C':
  58.195 -         {
  58.196 -           jchar val = o->char_field(field_offset);
  58.197 -           dbuf->byte_at_put(off++, (val >> 8) & 0xFF);
  58.198 -           dbuf->byte_at_put(off++, (val >> 0) & 0xFF);
  58.199 -         }
  58.200 -         break;
  58.201 -
  58.202 -       case 'S':
  58.203 -         {
  58.204 -           jshort val = o->short_field(field_offset);
  58.205 -           dbuf->byte_at_put(off++, (val >> 8) & 0xFF);
  58.206 -           dbuf->byte_at_put(off++, (val >> 0) & 0xFF);
  58.207 -         }
  58.208 -         break;
  58.209 -
  58.210 -       case 'I':
  58.211 -         {
  58.212 -           jint val = o->int_field(field_offset);
  58.213 -           dbuf->byte_at_put(off++, (val >> 24) & 0xFF);
  58.214 -           dbuf->byte_at_put(off++, (val >> 16) & 0xFF);
  58.215 -           dbuf->byte_at_put(off++, (val >> 8)  & 0xFF);
  58.216 -           dbuf->byte_at_put(off++, (val >> 0)  & 0xFF);
  58.217 -         }
  58.218 -         break;
  58.219 -
  58.220 -       case 'F':
  58.221 -         {
  58.222 -           jfloat fval = o->float_field(field_offset);
  58.223 -           jint ival = (*float_to_int_bits_fn)(env, NULL, fval);
  58.224 -           dbuf->byte_at_put(off++, (ival >> 24) & 0xFF);
  58.225 -           dbuf->byte_at_put(off++, (ival >> 16) & 0xFF);
  58.226 -           dbuf->byte_at_put(off++, (ival >> 8)  & 0xFF);
  58.227 -           dbuf->byte_at_put(off++, (ival >> 0)  & 0xFF);
  58.228 -         }
  58.229 -         break;
  58.230 -
  58.231 -       case 'J':
  58.232 -         {
  58.233 -           jlong val = o->long_field(field_offset);
  58.234 -           dbuf->byte_at_put(off++, (val >> 56) & 0xFF);
  58.235 -           dbuf->byte_at_put(off++, (val >> 48) & 0xFF);
  58.236 -           dbuf->byte_at_put(off++, (val >> 40) & 0xFF);
  58.237 -           dbuf->byte_at_put(off++, (val >> 32) & 0xFF);
  58.238 -           dbuf->byte_at_put(off++, (val >> 24) & 0xFF);
  58.239 -           dbuf->byte_at_put(off++, (val >> 16) & 0xFF);
  58.240 -           dbuf->byte_at_put(off++, (val >> 8)  & 0xFF);
  58.241 -           dbuf->byte_at_put(off++, (val >> 0)  & 0xFF);
  58.242 -         }
  58.243 -         break;
  58.244 -
  58.245 -       case 'D':
  58.246 -         {
  58.247 -           jdouble dval = o->double_field(field_offset);
  58.248 -           jlong lval = (*double_to_long_bits_fn)(env, NULL, dval);
  58.249 -           dbuf->byte_at_put(off++, (lval >> 56) & 0xFF);
  58.250 -           dbuf->byte_at_put(off++, (lval >> 48) & 0xFF);
  58.251 -           dbuf->byte_at_put(off++, (lval >> 40) & 0xFF);
  58.252 -           dbuf->byte_at_put(off++, (lval >> 32) & 0xFF);
  58.253 -           dbuf->byte_at_put(off++, (lval >> 24) & 0xFF);
  58.254 -           dbuf->byte_at_put(off++, (lval >> 16) & 0xFF);
  58.255 -           dbuf->byte_at_put(off++, (lval >> 8)  & 0xFF);
  58.256 -           dbuf->byte_at_put(off++, (lval >> 0)  & 0xFF);
  58.257 -         }
  58.258 -         break;
  58.259 -
  58.260 -       default:
  58.261 -         // Illegal typecode
  58.262 -         THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "illegal typecode");
  58.263 -     }
  58.264 -  }
  58.265 -JVM_END
  58.266 -
  58.267  
  58.268  // Shared JNI/JVM entry points //////////////////////////////////////////////////////////////
  58.269  
    59.1 --- a/src/share/vm/prims/jvm.h	Sun Oct 13 21:14:04 2013 +0100
    59.2 +++ b/src/share/vm/prims/jvm.h	Thu Oct 17 14:20:57 2013 -0700
    59.3 @@ -374,6 +374,9 @@
    59.4  /*
    59.5   * java.lang.Class and java.lang.ClassLoader
    59.6   */
    59.7 +
    59.8 +#define JVM_CALLER_DEPTH -1
    59.9 +
   59.10  /*
   59.11   * Returns the class in which the code invoking the native method
   59.12   * belongs.
    60.1 --- a/src/share/vm/prims/jvm_misc.hpp	Sun Oct 13 21:14:04 2013 +0100
    60.2 +++ b/src/share/vm/prims/jvm_misc.hpp	Thu Oct 17 14:20:57 2013 -0700
    60.3 @@ -36,22 +36,6 @@
    60.4  void trace_class_resolution(Klass* to_class);
    60.5  
    60.6  /*
    60.7 - * Support for Serialization and RMI. Currently used by HotSpot only.
    60.8 - */
    60.9 -
   60.10 -extern "C" {
   60.11 -
   60.12 -void JNICALL
   60.13 -JVM_SetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj,
   60.14 -                            jlongArray fieldIDs, jcharArray typecodes, jbyteArray data);
   60.15 -
   60.16 -void JNICALL
   60.17 -JVM_GetPrimitiveFieldValues(JNIEnv *env, jclass cb, jobject obj,
   60.18 -                            jlongArray fieldIDs, jcharArray typecodes, jbyteArray data);
   60.19 -
   60.20 -}
   60.21 -
   60.22 -/*
   60.23   * Support for -Xcheck:jni
   60.24   */
   60.25  
    61.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp	Sun Oct 13 21:14:04 2013 +0100
    61.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp	Thu Oct 17 14:20:57 2013 -0700
    61.3 @@ -2755,13 +2755,26 @@
    61.4      // InstanceKlass around to hold obsolete methods so we don't have
    61.5      // any other InstanceKlass embedded vtables to update. The vtable
    61.6      // holds the Method*s for virtual (but not final) methods.
    61.7 -    if (ik->vtable_length() > 0 && ik->is_subtype_of(_the_class_oop)) {
    61.8 +    // Default methods, or concrete methods in interfaces are stored
    61.9 +    // in the vtable, so if an interface changes we need to check
   61.10 +    // adjust_method_entries() for every InstanceKlass, which will also
   61.11 +    // adjust the default method vtable indices.
   61.12 +    // We also need to adjust any default method entries that are
   61.13 +    // not yet in the vtable, because the vtable setup is in progress.
   61.14 +    // This must be done after we adjust the default_methods and
   61.15 +    // default_vtable_indices for methods already in the vtable.
   61.16 +    if (ik->vtable_length() > 0 && (_the_class_oop->is_interface()
   61.17 +        || ik->is_subtype_of(_the_class_oop))) {
   61.18        // ik->vtable() creates a wrapper object; rm cleans it up
   61.19        ResourceMark rm(_thread);
   61.20        ik->vtable()->adjust_method_entries(_matching_old_methods,
   61.21                                            _matching_new_methods,
   61.22                                            _matching_methods_length,
   61.23                                            &trace_name_printed);
   61.24 +      ik->adjust_default_methods(_matching_old_methods,
   61.25 +                                 _matching_new_methods,
   61.26 +                                 _matching_methods_length,
   61.27 +                                 &trace_name_printed);
   61.28      }
   61.29  
   61.30      // If the current class has an itable and we are either redefining an
   61.31 @@ -2931,7 +2944,8 @@
   61.32        old_method->set_is_obsolete();
   61.33        obsolete_count++;
   61.34  
   61.35 -      // obsolete methods need a unique idnum
   61.36 +      // obsolete methods need a unique idnum so they become new entries in
   61.37 +      // the jmethodID cache in InstanceKlass
   61.38        u2 num = InstanceKlass::cast(_the_class_oop)->next_method_idnum();
   61.39        if (num != ConstMethod::UNSET_IDNUM) {
   61.40          old_method->set_method_idnum(num);
    62.1 --- a/src/share/vm/prims/methodHandles.cpp	Sun Oct 13 21:14:04 2013 +0100
    62.2 +++ b/src/share/vm/prims/methodHandles.cpp	Thu Oct 17 14:20:57 2013 -0700
    62.3 @@ -187,12 +187,34 @@
    62.4      receiver_limit = m->method_holder();
    62.5      assert(receiver_limit->verify_itable_index(vmindex), "");
    62.6      flags |= IS_METHOD | (JVM_REF_invokeInterface << REFERENCE_KIND_SHIFT);
    62.7 +    if (TraceInvokeDynamic) {
    62.8 +      ResourceMark rm;
    62.9 +      tty->print_cr("memberName: invokeinterface method_holder::method: %s, receiver: %s, itableindex: %d, access_flags:",
   62.10 +            Method::name_and_sig_as_C_string(receiver_limit(), m->name(), m->signature()),
   62.11 +            receiver_limit()->internal_name(), vmindex);
   62.12 +       m->access_flags().print_on(tty);
   62.13 +       if (!m->is_abstract()) {
   62.14 +         tty->print("default");
   62.15 +       }
   62.16 +       tty->cr();
   62.17 +    }
   62.18      break;
   62.19  
   62.20    case CallInfo::vtable_call:
   62.21      vmindex = info.vtable_index();
   62.22      flags |= IS_METHOD | (JVM_REF_invokeVirtual << REFERENCE_KIND_SHIFT);
   62.23      assert(receiver_limit->is_subtype_of(m->method_holder()), "virtual call must be type-safe");
   62.24 +    if (TraceInvokeDynamic) {
   62.25 +      ResourceMark rm;
   62.26 +      tty->print_cr("memberName: invokevirtual method_holder::method: %s, receiver: %s, vtableindex: %d, access_flags:",
   62.27 +            Method::name_and_sig_as_C_string(receiver_limit(), m->name(), m->signature()),
   62.28 +            receiver_limit()->internal_name(), vmindex);
   62.29 +       m->access_flags().print_on(tty);
   62.30 +       if (m->is_default_method()) {
   62.31 +         tty->print("default");
   62.32 +       }
   62.33 +       tty->cr();
   62.34 +    }
   62.35      break;
   62.36  
   62.37    case CallInfo::direct_call:
    63.1 --- a/src/share/vm/prims/nativeLookup.cpp	Sun Oct 13 21:14:04 2013 +0100
    63.2 +++ b/src/share/vm/prims/nativeLookup.cpp	Thu Oct 17 14:20:57 2013 -0700
    63.3 @@ -129,10 +129,6 @@
    63.4  #define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
    63.5  
    63.6  static JNINativeMethod lookup_special_native_methods[] = {
    63.7 -  // Next two functions only exist for compatibility with 1.3.1 and earlier.
    63.8 -  { CC"Java_java_io_ObjectOutputStream_getPrimitiveFieldValues",   NULL, FN_PTR(JVM_GetPrimitiveFieldValues)     },  // intercept ObjectOutputStream getPrimitiveFieldValues for faster serialization
    63.9 -  { CC"Java_java_io_ObjectInputStream_setPrimitiveFieldValues",    NULL, FN_PTR(JVM_SetPrimitiveFieldValues)     },  // intercept ObjectInputStream setPrimitiveFieldValues for faster serialization
   63.10 -
   63.11    { CC"Java_sun_misc_Unsafe_registerNatives",                      NULL, FN_PTR(JVM_RegisterUnsafeMethods)       },
   63.12    { CC"Java_java_lang_invoke_MethodHandleNatives_registerNatives", NULL, FN_PTR(JVM_RegisterMethodHandleMethods) },
   63.13    { CC"Java_sun_misc_Perf_registerNatives",                        NULL, FN_PTR(JVM_RegisterPerfMethods)         },
   63.14 @@ -140,9 +136,8 @@
   63.15  };
   63.16  
   63.17  static address lookup_special_native(char* jni_name) {
   63.18 -  int i = !JDK_Version::is_gte_jdk14x_version() ? 0 : 2;  // see comment in lookup_special_native_methods
   63.19    int count = sizeof(lookup_special_native_methods) / sizeof(JNINativeMethod);
   63.20 -  for (; i < count; i++) {
   63.21 +  for (int i = 0; i < count; i++) {
   63.22      // NB: To ignore the jni prefix and jni postfix strstr is used matching.
   63.23      if (strstr(jni_name, lookup_special_native_methods[i].name) != NULL) {
   63.24        return CAST_FROM_FN_PTR(address, lookup_special_native_methods[i].fnPtr);
    64.1 --- a/src/share/vm/prims/wbtestmethods/parserTests.cpp	Sun Oct 13 21:14:04 2013 +0100
    64.2 +++ b/src/share/vm/prims/wbtestmethods/parserTests.cpp	Thu Oct 17 14:20:57 2013 -0700
    64.3 @@ -1,5 +1,5 @@
    64.4  /*
    64.5 - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
    64.6 + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
    64.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    64.8   *
    64.9   * This code is free software; you can redistribute it and/or modify it
   64.10 @@ -117,11 +117,12 @@
   64.11  
   64.12    const char* c_cmdline = java_lang_String::as_utf8_string(JNIHandles::resolve(j_cmdline));
   64.13    objArrayOop argumentArray = objArrayOop(JNIHandles::resolve_non_null(arguments));
   64.14 +  objArrayHandle argumentArray_ah(THREAD, argumentArray);
   64.15  
   64.16 -  int length = argumentArray->length();
   64.17 +  int length = argumentArray_ah->length();
   64.18  
   64.19    for (int i = 0; i < length; i++) {
   64.20 -    oop argument_oop = argumentArray->obj_at(i);
   64.21 +    oop argument_oop = argumentArray_ah->obj_at(i);
   64.22      fill_in_parser(&parser, argument_oop);
   64.23    }
   64.24  
   64.25 @@ -130,19 +131,20 @@
   64.26  
   64.27    Klass* k = SystemDictionary::Object_klass();
   64.28    objArrayOop returnvalue_array = oopFactory::new_objArray(k, parser.num_arguments() * 2, CHECK_NULL);
   64.29 +  objArrayHandle returnvalue_array_ah(THREAD, returnvalue_array);
   64.30  
   64.31    GrowableArray<const char *>*parsedArgNames = parser.argument_name_array();
   64.32  
   64.33    for (int i = 0; i < parser.num_arguments(); i++) {
   64.34      oop parsedName = java_lang_String::create_oop_from_str(parsedArgNames->at(i), CHECK_NULL);
   64.35 -    returnvalue_array->obj_at_put(i*2, parsedName);
   64.36 +    returnvalue_array_ah->obj_at_put(i*2, parsedName);
   64.37      GenDCmdArgument* arg = parser.lookup_dcmd_option(parsedArgNames->at(i), strlen(parsedArgNames->at(i)));
   64.38      char buf[VALUE_MAXLEN];
   64.39      arg->value_as_str(buf, sizeof(buf));
   64.40      oop parsedValue = java_lang_String::create_oop_from_str(buf, CHECK_NULL);
   64.41 -    returnvalue_array->obj_at_put(i*2+1, parsedValue);
   64.42 +    returnvalue_array_ah->obj_at_put(i*2+1, parsedValue);
   64.43    }
   64.44  
   64.45 -  return (jobjectArray) JNIHandles::make_local(returnvalue_array);
   64.46 +  return (jobjectArray) JNIHandles::make_local(returnvalue_array_ah());
   64.47  
   64.48  WB_END
    65.1 --- a/src/share/vm/runtime/arguments.cpp	Sun Oct 13 21:14:04 2013 +0100
    65.2 +++ b/src/share/vm/runtime/arguments.cpp	Thu Oct 17 14:20:57 2013 -0700
    65.3 @@ -2045,6 +2045,9 @@
    65.4    status = status && verify_interval(StringTableSize, minimumStringTableSize,
    65.5      (max_uintx / StringTable::bucket_size()), "StringTable size");
    65.6  
    65.7 +  status = status && verify_interval(SymbolTableSize, minimumSymbolTableSize,
    65.8 +    (max_uintx / SymbolTable::bucket_size()), "SymbolTable size");
    65.9 +
   65.10    if (MinHeapFreeRatio > MaxHeapFreeRatio) {
   65.11      jio_fprintf(defaultStream::error_stream(),
   65.12                  "MinHeapFreeRatio (" UINTX_FORMAT ") must be less than or "
   65.13 @@ -2654,16 +2657,16 @@
   65.14        FLAG_SET_CMDLINE(bool, BackgroundCompilation, false);
   65.15      // -Xmn for compatibility with other JVM vendors
   65.16      } else if (match_option(option, "-Xmn", &tail)) {
   65.17 -      julong long_initial_eden_size = 0;
   65.18 -      ArgsRange errcode = parse_memory_size(tail, &long_initial_eden_size, 1);
   65.19 +      julong long_initial_young_size = 0;
   65.20 +      ArgsRange errcode = parse_memory_size(tail, &long_initial_young_size, 1);
   65.21        if (errcode != arg_in_range) {
   65.22          jio_fprintf(defaultStream::error_stream(),
   65.23 -                    "Invalid initial eden size: %s\n", option->optionString);
   65.24 +                    "Invalid initial young generation size: %s\n", option->optionString);
   65.25          describe_range_error(errcode);
   65.26          return JNI_EINVAL;
   65.27        }
   65.28 -      FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_eden_size);
   65.29 -      FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_eden_size);
   65.30 +      FLAG_SET_CMDLINE(uintx, MaxNewSize, (uintx)long_initial_young_size);
   65.31 +      FLAG_SET_CMDLINE(uintx, NewSize, (uintx)long_initial_young_size);
   65.32      // -Xms
   65.33      } else if (match_option(option, "-Xms", &tail)) {
   65.34        julong long_initial_heap_size = 0;
   65.35 @@ -3663,6 +3666,9 @@
   65.36    assert(verify_serial_gc_flags(), "SerialGC unset");
   65.37  #endif // INCLUDE_ALL_GCS
   65.38  
   65.39 +  // Initialize Metaspace flags and alignments.
   65.40 +  Metaspace::ergo_initialize();
   65.41 +
   65.42    // Set bytecode rewriting flags
   65.43    set_bytecode_flags();
   65.44  
    66.1 --- a/src/share/vm/runtime/globals.hpp	Sun Oct 13 21:14:04 2013 +0100
    66.2 +++ b/src/share/vm/runtime/globals.hpp	Thu Oct 17 14:20:57 2013 -0700
    66.3 @@ -481,21 +481,21 @@
    66.4  #define RUNTIME_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct, manageable, product_rw, lp64_product) \
    66.5                                                                              \
    66.6    lp64_product(bool, UseCompressedOops, false,                              \
    66.7 -            "Use 32-bit object references in 64-bit VM  "                   \
    66.8 -            "lp64_product means flag is always constant in 32 bit VM")      \
    66.9 +          "Use 32-bit object references in 64-bit VM. "                     \
   66.10 +          "lp64_product means flag is always constant in 32 bit VM")        \
   66.11                                                                              \
   66.12    lp64_product(bool, UseCompressedClassPointers, false,                     \
   66.13 -            "Use 32-bit class pointers in 64-bit VM  "                      \
   66.14 -            "lp64_product means flag is always constant in 32 bit VM")      \
   66.15 +          "Use 32-bit class pointers in 64-bit VM. "                        \
   66.16 +          "lp64_product means flag is always constant in 32 bit VM")        \
   66.17                                                                              \
   66.18    notproduct(bool, CheckCompressedOops, true,                               \
   66.19 -            "generate checks in encoding/decoding code in debug VM")        \
   66.20 +          "Generate checks in encoding/decoding code in debug VM")          \
   66.21                                                                              \
   66.22    product_pd(uintx, HeapBaseMinAddress,                                     \
   66.23 -            "OS specific low limit for heap base address")                  \
   66.24 +          "OS specific low limit for heap base address")                    \
   66.25                                                                              \
   66.26    diagnostic(bool, PrintCompressedOopsMode, false,                          \
   66.27 -            "Print compressed oops base address and encoding mode")         \
   66.28 +          "Print compressed oops base address and encoding mode")           \
   66.29                                                                              \
   66.30    lp64_product(intx, ObjectAlignmentInBytes, 8,                             \
   66.31            "Default object alignment in bytes, 8 is minimum")                \
   66.32 @@ -517,7 +517,7 @@
   66.33            "Use lwsync instruction if true, else use slower sync")           \
   66.34                                                                              \
   66.35    develop(bool, CleanChunkPoolAsync, falseInEmbedded,                       \
   66.36 -          "Whether to clean the chunk pool asynchronously")                 \
   66.37 +          "Clean the chunk pool asynchronously")                            \
   66.38                                                                              \
   66.39    /* Temporary: See 6948537 */                                              \
   66.40    experimental(bool, UseMemSetInBOT, true,                                  \
   66.41 @@ -527,10 +527,12 @@
   66.42            "Enable normal processing of flags relating to field diagnostics")\
   66.43                                                                              \
   66.44    experimental(bool, UnlockExperimentalVMOptions, false,                    \
   66.45 -          "Enable normal processing of flags relating to experimental features")\
   66.46 +          "Enable normal processing of flags relating to experimental "     \
   66.47 +          "features")                                                       \
   66.48                                                                              \
   66.49    product(bool, JavaMonitorsInStackTrace, true,                             \
   66.50 -          "Print info. about Java monitor locks when the stacks are dumped")\
   66.51 +          "Print information about Java monitor locks when the stacks are"  \
   66.52 +          "dumped")                                                         \
   66.53                                                                              \
   66.54    product_pd(bool, UseLargePages,                                           \
   66.55            "Use large page memory")                                          \
   66.56 @@ -541,8 +543,12 @@
   66.57    develop(bool, LargePagesIndividualAllocationInjectError, false,           \
   66.58            "Fail large pages individual allocation")                         \
   66.59                                                                              \
   66.60 +  product(bool, UseLargePagesInMetaspace, false,                            \
   66.61 +          "Use large page memory in metaspace. "                            \
   66.62 +          "Only used if UseLargePages is enabled.")                         \
   66.63 +                                                                            \
   66.64    develop(bool, TracePageSizes, false,                                      \
   66.65 -          "Trace page size selection and usage.")                           \
   66.66 +          "Trace page size selection and usage")                            \
   66.67                                                                              \
   66.68    product(bool, UseNUMA, false,                                             \
   66.69            "Use NUMA if available")                                          \
   66.70 @@ -557,12 +563,12 @@
   66.71            "Force NUMA optimizations on single-node/UMA systems")            \
   66.72                                                                              \
   66.73    product(uintx, NUMAChunkResizeWeight, 20,                                 \
   66.74 -          "Percentage (0-100) used to weigh the current sample when "      \
   66.75 +          "Percentage (0-100) used to weigh the current sample when "       \
   66.76            "computing exponentially decaying average for "                   \
   66.77            "AdaptiveNUMAChunkSizing")                                        \
   66.78                                                                              \
   66.79    product(uintx, NUMASpaceResizeRate, 1*G,                                  \
   66.80 -          "Do not reallocate more that this amount per collection")         \
   66.81 +          "Do not reallocate more than this amount per collection")         \
   66.82                                                                              \
   66.83    product(bool, UseAdaptiveNUMAChunkSizing, true,                           \
   66.84            "Enable adaptive chunk sizing for NUMA")                          \
   66.85 @@ -579,17 +585,17 @@
   66.86    product(intx, UseSSE, 99,                                                 \
   66.87            "Highest supported SSE instructions set on x86/x64")              \
   66.88                                                                              \
   66.89 -  product(bool, UseAES, false,                                               \
   66.90 +  product(bool, UseAES, false,                                              \
   66.91            "Control whether AES instructions can be used on x86/x64")        \
   66.92                                                                              \
   66.93    product(uintx, LargePageSizeInBytes, 0,                                   \
   66.94 -          "Large page size (0 to let VM choose the page size")              \
   66.95 +          "Large page size (0 to let VM choose the page size)")             \
   66.96                                                                              \
   66.97    product(uintx, LargePageHeapSizeThreshold, 128*M,                         \
   66.98 -          "Use large pages if max heap is at least this big")               \
   66.99 +          "Use large pages if maximum heap is at least this big")           \
  66.100                                                                              \
  66.101    product(bool, ForceTimeHighResolution, false,                             \
  66.102 -          "Using high time resolution(For Win32 only)")                     \
  66.103 +          "Using high time resolution (for Win32 only)")                    \
  66.104                                                                              \
  66.105    develop(bool, TraceItables, false,                                        \
  66.106            "Trace initialization and use of itables")                        \
  66.107 @@ -605,10 +611,10 @@
  66.108                                                                              \
  66.109    develop(bool, TraceLongCompiles, false,                                   \
  66.110            "Print out every time compilation is longer than "                \
  66.111 -          "a given threashold")                                             \
  66.112 +          "a given threshold")                                              \
  66.113                                                                              \
  66.114    develop(bool, SafepointALot, false,                                       \
  66.115 -          "Generates a lot of safepoints. Works with "                      \
  66.116 +          "Generate a lot of safepoints. This works with "                  \
  66.117            "GuaranteedSafepointInterval")                                    \
  66.118                                                                              \
  66.119    product_pd(bool, BackgroundCompilation,                                   \
  66.120 @@ -616,13 +622,13 @@
  66.121            "compilation")                                                    \
  66.122                                                                              \
  66.123    product(bool, PrintVMQWaitTime, false,                                    \
  66.124 -          "Prints out the waiting time in VM operation queue")              \
  66.125 +          "Print out the waiting time in VM operation queue")               \
  66.126                                                                              \
  66.127    develop(bool, NoYieldsInMicrolock, false,                                 \
  66.128            "Disable yields in microlock")                                    \
  66.129                                                                              \
  66.130    develop(bool, TraceOopMapGeneration, false,                               \
  66.131 -          "Shows oopmap generation")                                        \
  66.132 +          "Show OopMapGeneration")                                          \
  66.133                                                                              \
  66.134    product(bool, MethodFlushing, true,                                       \
  66.135            "Reclamation of zombie and not-entrant methods")                  \
  66.136 @@ -631,10 +637,11 @@
  66.137            "Verify stack of each thread when it is entering a runtime call") \
  66.138                                                                              \
  66.139    diagnostic(bool, ForceUnreachable, false,                                 \
  66.140 -          "Make all non code cache addresses to be unreachable with forcing use of 64bit literal fixups") \
  66.141 +          "Make all non code cache addresses to be unreachable by "         \
  66.142 +          "forcing use of 64bit literal fixups")                            \
  66.143                                                                              \
  66.144    notproduct(bool, StressDerivedPointers, false,                            \
  66.145 -          "Force scavenge when a derived pointers is detected on stack "    \
  66.146 +          "Force scavenge when a derived pointer is detected on stack "     \
  66.147            "after rtm call")                                                 \
  66.148                                                                              \
  66.149    develop(bool, TraceDerivedPointers, false,                                \
  66.150 @@ -653,86 +660,86 @@
  66.151            "Use Inline Caches for virtual calls ")                           \
  66.152                                                                              \
  66.153    develop(bool, InlineArrayCopy, true,                                      \
  66.154 -          "inline arraycopy native that is known to be part of "            \
  66.155 +          "Inline arraycopy native that is known to be part of "            \
  66.156            "base library DLL")                                               \
  66.157                                                                              \
  66.158    develop(bool, InlineObjectHash, true,                                     \
  66.159 -          "inline Object::hashCode() native that is known to be part "      \
  66.160 +          "Inline Object::hashCode() native that is known to be part "      \
  66.161            "of base library DLL")                                            \
  66.162                                                                              \
  66.163    develop(bool, InlineNatives, true,                                        \
  66.164 -          "inline natives that are known to be part of base library DLL")   \
  66.165 +          "Inline natives that are known to be part of base library DLL")   \
  66.166                                                                              \
  66.167    develop(bool, InlineMathNatives, true,                                    \
  66.168 -          "inline SinD, CosD, etc.")                                        \
  66.169 +          "Inline SinD, CosD, etc.")                                        \
  66.170                                                                              \
  66.171    develop(bool, InlineClassNatives, true,                                   \
  66.172 -          "inline Class.isInstance, etc")                                   \
  66.173 +          "Inline Class.isInstance, etc")                                   \
  66.174                                                                              \
  66.175    develop(bool, InlineThreadNatives, true,                                  \
  66.176 -          "inline Thread.currentThread, etc")                               \
  66.177 +          "Inline Thread.currentThread, etc")                               \
  66.178                                                                              \
  66.179    develop(bool, InlineUnsafeOps, true,                                      \
  66.180 -          "inline memory ops (native methods) from sun.misc.Unsafe")        \
  66.181 +          "Inline memory ops (native methods) from sun.misc.Unsafe")        \
  66.182                                                                              \
  66.183    product(bool, CriticalJNINatives, true,                                   \
  66.184 -          "check for critical JNI entry points")                            \
  66.185 +          "Check for critical JNI entry points")                            \
  66.186                                                                              \
  66.187    notproduct(bool, StressCriticalJNINatives, false,                         \
  66.188 -            "Exercise register saving code in critical natives")            \
  66.189 +          "Exercise register saving code in critical natives")              \
  66.190                                                                              \
  66.191    product(bool, UseSSE42Intrinsics, false,                                  \
  66.192            "SSE4.2 versions of intrinsics")                                  \
  66.193                                                                              \
  66.194    product(bool, UseAESIntrinsics, false,                                    \
  66.195 -          "use intrinsics for AES versions of crypto")                      \
  66.196 +          "Use intrinsics for AES versions of crypto")                      \
  66.197                                                                              \
  66.198    product(bool, UseCRC32Intrinsics, false,                                  \
  66.199            "use intrinsics for java.util.zip.CRC32")                         \
  66.200                                                                              \
  66.201    develop(bool, TraceCallFixup, false,                                      \
  66.202 -          "traces all call fixups")                                         \
  66.203 +          "Trace all call fixups")                                          \
  66.204                                                                              \
  66.205    develop(bool, DeoptimizeALot, false,                                      \
  66.206 -          "deoptimize at every exit from the runtime system")               \
  66.207 +          "Deoptimize at every exit from the runtime system")               \
  66.208                                                                              \
  66.209    notproduct(ccstrlist, DeoptimizeOnlyAt, "",                               \
  66.210 -          "a comma separated list of bcis to deoptimize at")                \
  66.211 +          "A comma separated list of bcis to deoptimize at")                \
  66.212                                                                              \
  66.213    product(bool, DeoptimizeRandom, false,                                    \
  66.214 -          "deoptimize random frames on random exit from the runtime system")\
  66.215 +          "Deoptimize random frames on random exit from the runtime system")\
  66.216                                                                              \
  66.217    notproduct(bool, ZombieALot, false,                                       \
  66.218 -          "creates zombies (non-entrant) at exit from the runt. system")    \
  66.219 +          "Create zombies (non-entrant) at exit from the runtime system")   \
  66.220                                                                              \
  66.221    product(bool, UnlinkSymbolsALot, false,                                   \
  66.222 -          "unlink unreferenced symbols from the symbol table at safepoints")\
  66.223 +          "Unlink unreferenced symbols from the symbol table at safepoints")\
  66.224                                                                              \
  66.225    notproduct(bool, WalkStackALot, false,                                    \
  66.226 -          "trace stack (no print) at every exit from the runtime system")   \
  66.227 +          "Trace stack (no print) at every exit from the runtime system")   \
  66.228                                                                              \
  66.229    product(bool, Debugging, false,                                           \
  66.230 -          "set when executing debug methods in debug.ccp "                  \
  66.231 +          "Set when executing debug methods in debug.cpp "                  \
  66.232            "(to prevent triggering assertions)")                             \
  66.233                                                                              \
  66.234    notproduct(bool, StrictSafepointChecks, trueInDebug,                      \
  66.235            "Enable strict checks that safepoints cannot happen for threads " \
  66.236 -          "that used No_Safepoint_Verifier")                                \
  66.237 +          "that use No_Safepoint_Verifier")                                 \
  66.238                                                                              \
  66.239    notproduct(bool, VerifyLastFrame, false,                                  \
  66.240            "Verify oops on last frame on entry to VM")                       \
  66.241                                                                              \
  66.242    develop(bool, TraceHandleAllocation, false,                               \
  66.243 -          "Prints out warnings when suspicious many handles are allocated") \
  66.244 +          "Print out warnings when suspiciously many handles are allocated")\
  66.245                                                                              \
  66.246    product(bool, UseCompilerSafepoints, true,                                \
  66.247            "Stop at safepoints in compiled code")                            \
  66.248                                                                              \
  66.249    product(bool, FailOverToOldVerifier, true,                                \
  66.250 -          "fail over to old verifier when split verifier fails")            \
  66.251 +          "Fail over to old verifier when split verifier fails")            \
  66.252                                                                              \
  66.253    develop(bool, ShowSafepointMsgs, false,                                   \
  66.254 -          "Show msg. about safepoint synch.")                               \
  66.255 +          "Show message about safepoint synchronization")                   \
  66.256                                                                              \
  66.257    product(bool, SafepointTimeout, false,                                    \
  66.258            "Time out and warn or fail after SafepointTimeoutDelay "          \
  66.259 @@ -756,19 +763,19 @@
  66.260            "Trace external suspend wait failures")                           \
  66.261                                                                              \
  66.262    product(bool, MaxFDLimit, true,                                           \
  66.263 -          "Bump the number of file descriptors to max in solaris.")         \
  66.264 +          "Bump the number of file descriptors to maximum in Solaris")      \
  66.265                                                                              \
  66.266    diagnostic(bool, LogEvents, true,                                         \
  66.267 -             "Enable the various ring buffer event logs")                   \
  66.268 +          "Enable the various ring buffer event logs")                      \
  66.269                                                                              \
  66.270    diagnostic(uintx, LogEventsBufferEntries, 10,                             \
  66.271 -             "Enable the various ring buffer event logs")                   \
  66.272 +          "Number of ring buffer event logs")                               \
  66.273                                                                              \
  66.274    product(bool, BytecodeVerificationRemote, true,                           \
  66.275 -          "Enables the Java bytecode verifier for remote classes")          \
  66.276 +          "Enable the Java bytecode verifier for remote classes")           \
  66.277                                                                              \
  66.278    product(bool, BytecodeVerificationLocal, false,                           \
  66.279 -          "Enables the Java bytecode verifier for local classes")           \
  66.280 +          "Enable the Java bytecode verifier for local classes")            \
  66.281                                                                              \
  66.282    develop(bool, ForceFloatExceptions, trueInDebug,                          \
  66.283            "Force exceptions on FP stack under/overflow")                    \
  66.284 @@ -780,7 +787,7 @@
  66.285            "Trace java language assertions")                                 \
  66.286                                                                              \
  66.287    notproduct(bool, CheckAssertionStatusDirectives, false,                   \
  66.288 -          "temporary - see javaClasses.cpp")                                \
  66.289 +          "Temporary - see javaClasses.cpp")                                \
  66.290                                                                              \
  66.291    notproduct(bool, PrintMallocFree, false,                                  \
  66.292            "Trace calls to C heap malloc/free allocation")                   \
  66.293 @@ -799,16 +806,16 @@
  66.294            "entering the VM")                                                \
  66.295                                                                              \
  66.296    notproduct(bool, CheckOopishValues, false,                                \
  66.297 -          "Warn if value contains oop ( requires ZapDeadLocals)")           \
  66.298 +          "Warn if value contains oop (requires ZapDeadLocals)")            \
  66.299                                                                              \
  66.300    develop(bool, UseMallocOnly, false,                                       \
  66.301 -          "use only malloc/free for allocation (no resource area/arena)")   \
  66.302 +          "Use only malloc/free for allocation (no resource area/arena)")   \
  66.303                                                                              \
  66.304    develop(bool, PrintMalloc, false,                                         \
  66.305 -          "print all malloc/free calls")                                    \
  66.306 +          "Print all malloc/free calls")                                    \
  66.307                                                                              \
  66.308    develop(bool, PrintMallocStatistics, false,                               \
  66.309 -          "print malloc/free statistics")                                   \
  66.310 +          "Print malloc/free statistics")                                   \
  66.311                                                                              \
  66.312    develop(bool, ZapResourceArea, trueInDebug,                               \
  66.313            "Zap freed resource/arena space with 0xABABABAB")                 \
  66.314 @@ -820,7 +827,7 @@
  66.315            "Zap freed JNI handle space with 0xFEFEFEFE")                     \
  66.316                                                                              \
  66.317    notproduct(bool, ZapStackSegments, trueInDebug,                           \
  66.318 -             "Zap allocated/freed Stack segments with 0xFADFADED")          \
  66.319 +          "Zap allocated/freed stack segments with 0xFADFADED")             \
  66.320                                                                              \
  66.321    develop(bool, ZapUnusedHeapArea, trueInDebug,                             \
  66.322            "Zap unused heap space with 0xBAADBABE")                          \
  66.323 @@ -835,7 +842,7 @@
  66.324            "Zap filler objects with 0xDEAFBABE")                             \
  66.325                                                                              \
  66.326    develop(bool, PrintVMMessages, true,                                      \
  66.327 -          "Print vm messages on console")                                   \
  66.328 +          "Print VM messages on console")                                   \
  66.329                                                                              \
  66.330    product(bool, PrintGCApplicationConcurrentTime, false,                    \
  66.331            "Print the time the application has been running")                \
  66.332 @@ -844,21 +851,21 @@
  66.333            "Print the time the application has been stopped")                \
  66.334                                                                              \
  66.335    diagnostic(bool, VerboseVerification, false,                              \
  66.336 -             "Display detailed verification details")                       \
  66.337 +          "Display detailed verification details")                          \
  66.338                                                                              \
  66.339    notproduct(uintx, ErrorHandlerTest, 0,                                    \
  66.340 -          "If > 0, provokes an error after VM initialization; the value"    \
  66.341 -          "determines which error to provoke.  See test_error_handler()"    \
  66.342 +          "If > 0, provokes an error after VM initialization; the value "   \
  66.343 +          "determines which error to provoke. See test_error_handler() "    \
  66.344            "in debug.cpp.")                                                  \
  66.345                                                                              \
  66.346    develop(bool, Verbose, false,                                             \
  66.347 -          "Prints additional debugging information from other modes")       \
  66.348 +          "Print additional debugging information from other modes")        \
  66.349                                                                              \
  66.350    develop(bool, PrintMiscellaneous, false,                                  \
  66.351 -          "Prints uncategorized debugging information (requires +Verbose)") \
  66.352 +          "Print uncategorized debugging information (requires +Verbose)")  \
  66.353                                                                              \
  66.354    develop(bool, WizardMode, false,                                          \
  66.355 -          "Prints much more debugging information")                         \
  66.356 +          "Print much more debugging information")                          \
  66.357                                                                              \
  66.358    product(bool, ShowMessageBoxOnError, false,                               \
  66.359            "Keep process alive on VM fatal error")                           \
  66.360 @@ -870,7 +877,7 @@
  66.361            "Let VM fatal error propagate to the OS (ie. WER on Windows)")    \
  66.362                                                                              \
  66.363    product(bool, SuppressFatalErrorMessage, false,                           \
  66.364 -          "Do NO Fatal Error report [Avoid deadlock]")                      \
  66.365 +          "Report NO fatal error message (avoid deadlock)")                 \
  66.366                                                                              \
  66.367    product(ccstrlist, OnError, "",                                           \
  66.368            "Run user-defined commands on fatal error; see VMError.cpp "      \
  66.369 @@ -880,17 +887,17 @@
  66.370            "Run user-defined commands on first java.lang.OutOfMemoryError")  \
  66.371                                                                              \
  66.372    manageable(bool, HeapDumpBeforeFullGC, false,                             \
  66.373 -          "Dump heap to file before any major stop-world GC")               \
  66.374 +          "Dump heap to file before any major stop-the-world GC")           \
  66.375                                                                              \
  66.376    manageable(bool, HeapDumpAfterFullGC, false,                              \
  66.377 -          "Dump heap to file after any major stop-world GC")                \
  66.378 +          "Dump heap to file after any major stop-the-world GC")            \
  66.379                                                                              \
  66.380    manageable(bool, HeapDumpOnOutOfMemoryError, false,                       \
  66.381            "Dump heap to file when java.lang.OutOfMemoryError is thrown")    \
  66.382                                                                              \
  66.383    manageable(ccstr, HeapDumpPath, NULL,                                     \
  66.384 -          "When HeapDumpOnOutOfMemoryError is on, the path (filename or"    \
  66.385 -          "directory) of the dump file (defaults to java_pid<pid>.hprof"    \
  66.386 +          "When HeapDumpOnOutOfMemoryError is on, the path (filename or "   \
  66.387 +          "directory) of the dump file (defaults to java_pid<pid>.hprof "   \
  66.388            "in the working directory)")                                      \
  66.389                                                                              \
  66.390    develop(uintx, SegmentedHeapDumpThreshold, 2*G,                           \
  66.391 @@ -904,10 +911,10 @@
  66.392            "Execute breakpoint upon encountering VM warning")                \
  66.393                                                                              \
  66.394    develop(bool, TraceVMOperation, false,                                    \
  66.395 -          "Trace vm operations")                                            \
  66.396 +          "Trace VM operations")                                            \
  66.397                                                                              \
  66.398    develop(bool, UseFakeTimers, false,                                       \
  66.399 -          "Tells whether the VM should use system time or a fake timer")    \
  66.400 +          "Tell whether the VM should use system time or a fake timer")     \
  66.401                                                                              \
  66.402    product(ccstr, NativeMemoryTracking, "off",                               \
  66.403            "Native memory tracking options")                                 \
  66.404 @@ -917,7 +924,7 @@
  66.405                                                                              \
  66.406    diagnostic(bool, AutoShutdownNMT, true,                                   \
  66.407            "Automatically shutdown native memory tracking under stress "     \
  66.408 -          "situation. When set to false, native memory tracking tries to "  \
  66.409 +          "situations. When set to false, native memory tracking tries to " \
  66.410            "stay alive at the expense of JVM performance")                   \
  66.411                                                                              \
  66.412    diagnostic(bool, LogCompilation, false,                                   \
  66.413 @@ -927,12 +934,12 @@
  66.414            "Print compilations")                                             \
  66.415                                                                              \
  66.416    diagnostic(bool, TraceNMethodInstalls, false,                             \
  66.417 -             "Trace nmethod intallation")                                   \
  66.418 +          "Trace nmethod installation")                                     \
  66.419                                                                              \
  66.420    diagnostic(intx, ScavengeRootsInCode, 2,                                  \
  66.421 -             "0: do not allow scavengable oops in the code cache; "         \
  66.422 -             "1: allow scavenging from the code cache; "                    \
  66.423 -             "2: emit as many constants as the compiler can see")           \
  66.424 +          "0: do not allow scavengable oops in the code cache; "            \
  66.425 +          "1: allow scavenging from the code cache; "                       \
  66.426 +          "2: emit as many constants as the compiler can see")              \
  66.427                                                                              \
  66.428    product(bool, AlwaysRestoreFPU, false,                                    \
  66.429            "Restore the FPU control word after every JNI call (expensive)")  \
  66.430 @@ -953,7 +960,7 @@
  66.431            "Print assembly code (using external disassembler.so)")           \
  66.432                                                                              \
  66.433    diagnostic(ccstr, PrintAssemblyOptions, NULL,                             \
  66.434 -          "Options string passed to disassembler.so")                       \
  66.435 +          "Print options string passed to disassembler.so")                 \
  66.436                                                                              \
  66.437    diagnostic(bool, PrintNMethods, false,                                    \
  66.438            "Print assembly code for nmethods when generated")                \
  66.439 @@ -974,20 +981,21 @@
  66.440            "Print exception handler tables for all nmethods when generated") \
  66.441                                                                              \
  66.442    develop(bool, StressCompiledExceptionHandlers, false,                     \
  66.443 -         "Exercise compiled exception handlers")                            \
  66.444 +          "Exercise compiled exception handlers")                           \
  66.445                                                                              \
  66.446    develop(bool, InterceptOSException, false,                                \
  66.447 -          "Starts debugger when an implicit OS (e.g., NULL) "               \
  66.448 +          "Start debugger when an implicit OS (e.g. NULL) "                 \
  66.449            "exception happens")                                              \
  66.450                                                                              \
  66.451    product(bool, PrintCodeCache, false,                                      \
  66.452            "Print the code cache memory usage when exiting")                 \
  66.453                                                                              \
  66.454    develop(bool, PrintCodeCache2, false,                                     \
  66.455 -          "Print detailed usage info on the code cache when exiting")       \
  66.456 +          "Print detailed usage information on the code cache when exiting")\
  66.457                                                                              \
  66.458    product(bool, PrintCodeCacheOnCompilation, false,                         \
  66.459 -          "Print the code cache memory usage each time a method is compiled") \
  66.460 +          "Print the code cache memory usage each time a method is "        \
  66.461 +          "compiled")                                                       \
  66.462                                                                              \
  66.463    diagnostic(bool, PrintStubCode, false,                                    \
  66.464            "Print generated stub code")                                      \
  66.465 @@ -999,40 +1007,40 @@
  66.466            "Omit backtraces for some 'hot' exceptions in optimized code")    \
  66.467                                                                              \
  66.468    product(bool, ProfilerPrintByteCodeStatistics, false,                     \
  66.469 -          "Prints byte code statictics when dumping profiler output")       \
  66.470 +          "Print bytecode statistics when dumping profiler output")         \
  66.471                                                                              \
  66.472    product(bool, ProfilerRecordPC, false,                                    \
  66.473 -          "Collects tick for each 16 byte interval of compiled code")       \
  66.474 +          "Collect ticks for each 16 byte interval of compiled code")       \
  66.475                                                                              \
  66.476    product(bool, ProfileVM, false,                                           \
  66.477 -          "Profiles ticks that fall within VM (either in the VM Thread "    \
  66.478 +          "Profile ticks that fall within VM (either in the VM Thread "     \
  66.479            "or VM code called through stubs)")                               \
  66.480                                                                              \
  66.481    product(bool, ProfileIntervals, false,                                    \
  66.482 -          "Prints profiles for each interval (see ProfileIntervalsTicks)")  \
  66.483 +          "Print profiles for each interval (see ProfileIntervalsTicks)")   \
  66.484                                                                              \
  66.485    notproduct(bool, ProfilerCheckIntervals, false,                           \
  66.486 -          "Collect and print info on spacing of profiler ticks")            \
  66.487 +          "Collect and print information on spacing of profiler ticks")     \
  66.488                                                                              \
  66.489    develop(bool, PrintJVMWarnings, false,                                    \
  66.490 -          "Prints warnings for unimplemented JVM functions")                \
  66.491 +          "Print warnings for unimplemented JVM functions")                 \
  66.492                                                                              \
  66.493    product(bool, PrintWarnings, true,                                        \
  66.494 -          "Prints JVM warnings to output stream")                           \
  66.495 +          "Print JVM warnings to output stream")                            \
  66.496                                                                              \
  66.497    notproduct(uintx, WarnOnStalledSpinLock, 0,                               \
  66.498 -          "Prints warnings for stalled SpinLocks")                          \
  66.499 +          "Print warnings for stalled SpinLocks")                           \
  66.500                                                                              \
  66.501    product(bool, RegisterFinalizersAtInit, true,                             \
  66.502            "Register finalizable objects at end of Object.<init> or "        \
  66.503            "after allocation")                                               \
  66.504                                                                              \
  66.505    develop(bool, RegisterReferences, true,                                   \
  66.506 -          "Tells whether the VM should register soft/weak/final/phantom "   \
  66.507 +          "Tell whether the VM should register soft/weak/final/phantom "    \
  66.508            "references")                                                     \
  66.509                                                                              \
  66.510    develop(bool, IgnoreRewrites, false,                                      \
  66.511 -          "Supress rewrites of bytecodes in the oopmap generator. "         \
  66.512 +          "Suppress rewrites of bytecodes in the oopmap generator. "        \
  66.513            "This is unsafe!")                                                \
  66.514                                                                              \
  66.515    develop(bool, PrintCodeCacheExtension, false,                             \
  66.516 @@ -1042,8 +1050,7 @@
  66.517            "Enable the security JVM functions")                              \
  66.518                                                                              \
  66.519    develop(bool, ProtectionDomainVerification, true,                         \
  66.520 -          "Verifies protection domain before resolution in system "         \
  66.521 -          "dictionary")                                                     \
  66.522 +          "Verify protection domain before resolution in system dictionary")\
  66.523                                                                              \
  66.524    product(bool, ClassUnloading, true,                                       \
  66.525            "Do unloading of classes")                                        \
  66.526 @@ -1056,14 +1063,14 @@
  66.527            "Write memory usage profiling to log file")                       \
  66.528                                                                              \
  66.529    notproduct(bool, PrintSystemDictionaryAtExit, false,                      \
  66.530 -          "Prints the system dictionary at exit")                           \
  66.531 +          "Print the system dictionary at exit")                            \
  66.532                                                                              \
  66.533    experimental(intx, PredictedLoadedClassCount, 0,                          \
  66.534 -          "Experimental: Tune loaded class cache starting size.")           \
  66.535 +          "Experimental: Tune loaded class cache starting size")            \
  66.536                                                                              \
  66.537    diagnostic(bool, UnsyncloadClass, false,                                  \
  66.538            "Unstable: VM calls loadClass unsynchronized. Custom "            \
  66.539 -          "class loader  must call VM synchronized for findClass "          \
  66.540 +          "class loader must call VM synchronized for findClass "           \
  66.541            "and defineClass.")                                               \
  66.542                                                                              \
  66.543    product(bool, AlwaysLockClassLoader, false,                               \
  66.544 @@ -1079,22 +1086,22 @@
  66.545            "Call loadClassInternal() rather than loadClass()")               \
  66.546                                                                              \
  66.547    product_pd(bool, DontYieldALot,                                           \
  66.548 -          "Throw away obvious excess yield calls (for SOLARIS only)")       \
  66.549 +          "Throw away obvious excess yield calls (for Solaris only)")       \
  66.550                                                                              \
  66.551    product_pd(bool, ConvertSleepToYield,                                     \
  66.552 -          "Converts sleep(0) to thread yield "                              \
  66.553 -          "(may be off for SOLARIS to improve GUI)")                        \
  66.554 +          "Convert sleep(0) to thread yield "                               \
  66.555 +          "(may be off for Solaris to improve GUI)")                        \
  66.556                                                                              \
  66.557    product(bool, ConvertYieldToSleep, false,                                 \
  66.558 -          "Converts yield to a sleep of MinSleepInterval to simulate Win32 "\
  66.559 -          "behavior (SOLARIS only)")                                        \
  66.560 +          "Convert yield to a sleep of MinSleepInterval to simulate Win32 " \
  66.561 +          "behavior (Solaris only)")                                        \
  66.562                                                                              \
  66.563    product(bool, UseBoundThreads, true,                                      \
  66.564 -          "Bind user level threads to kernel threads (for SOLARIS only)")   \
  66.565 +          "Bind user level threads to kernel threads (for Solaris only)")   \
  66.566                                                                              \
  66.567    develop(bool, UseDetachedThreads, true,                                   \
  66.568            "Use detached threads that are recycled upon termination "        \
  66.569 -          "(for SOLARIS only)")                                             \
  66.570 +          "(for Solaris only)")                                             \
  66.571                                                                              \
  66.572    product(bool, UseLWPSynchronization, true,                                \
  66.573            "Use LWP-based instead of libthread-based synchronization "       \
  66.574 @@ -1104,41 +1111,43 @@
  66.575            "(Unstable) Various monitor synchronization tunables")            \
  66.576                                                                              \
  66.577    product(intx, EmitSync, 0,                                                \
  66.578 -          "(Unsafe,Unstable) "                                              \
  66.579 -          " Controls emission of inline sync fast-path code")               \
  66.580 +          "(Unsafe, Unstable) "                                             \
  66.581 +          "Control emission of inline sync fast-path code")                 \
  66.582                                                                              \
  66.583    product(intx, MonitorBound, 0, "Bound Monitor population")                \
  66.584                                                                              \
  66.585    product(bool, MonitorInUseLists, false, "Track Monitors for Deflation")   \
  66.586                                                                              \
  66.587 -  product(intx, SyncFlags, 0, "(Unsafe,Unstable) Experimental Sync flags" ) \
  66.588 -                                                                            \
  66.589 -  product(intx, SyncVerbose, 0, "(Unstable)" )                              \
  66.590 -                                                                            \
  66.591 -  product(intx, ClearFPUAtPark, 0, "(Unsafe,Unstable)" )                    \
  66.592 +  product(intx, SyncFlags, 0, "(Unsafe, Unstable) Experimental Sync flags") \
  66.593 +                                                                            \
  66.594 +  product(intx, SyncVerbose, 0, "(Unstable)")                               \
  66.595 +                                                                            \
  66.596 +  product(intx, ClearFPUAtPark, 0, "(Unsafe, Unstable)")                    \
  66.597                                                                              \
  66.598    product(intx, hashCode, 5,                                                \
  66.599 -         "(Unstable) select hashCode generation algorithm" )                \
  66.600 +          "(Unstable) select hashCode generation algorithm")                \
  66.601                                                                              \
  66.602    product(intx, WorkAroundNPTLTimedWaitHang, 1,                             \
  66.603 -         "(Unstable, Linux-specific)"                                       \
  66.604 -         " avoid NPTL-FUTEX hang pthread_cond_timedwait" )                  \
  66.605 +          "(Unstable, Linux-specific) "                                     \
  66.606 +          "avoid NPTL-FUTEX hang pthread_cond_timedwait")                   \
  66.607                                                                              \
  66.608    product(bool, FilterSpuriousWakeups, true,                                \
  66.609            "Prevent spurious or premature wakeups from object.wait "         \
  66.610            "(Solaris only)")                                                 \
  66.611                                                                              \
  66.612 -  product(intx, NativeMonitorTimeout, -1, "(Unstable)" )                    \
  66.613 -  product(intx, NativeMonitorFlags, 0, "(Unstable)" )                       \
  66.614 -  product(intx, NativeMonitorSpinLimit, 20, "(Unstable)" )                  \
  66.615 +  product(intx, NativeMonitorTimeout, -1, "(Unstable)")                     \
  66.616 +                                                                            \
  66.617 +  product(intx, NativeMonitorFlags, 0, "(Unstable)")                        \
  66.618 +                                                                            \
  66.619 +  product(intx, NativeMonitorSpinLimit, 20, "(Unstable)")                   \
  66.620                                                                              \
  66.621    develop(bool, UsePthreads, false,                                         \
  66.622            "Use pthread-based instead of libthread-based synchronization "   \
  66.623            "(SPARC only)")                                                   \
  66.624                                                                              \
  66.625    product(bool, AdjustConcurrency, false,                                   \
  66.626 -          "call thr_setconcurrency at thread create time to avoid "         \
  66.627 -          "LWP starvation on MP systems (For Solaris Only)")                \
  66.628 +          "Call thr_setconcurrency at thread creation time to avoid "       \
  66.629 +          "LWP starvation on MP systems (for Solaris Only)")                \
  66.630                                                                              \
  66.631    product(bool, ReduceSignalUsage, false,                                   \
  66.632            "Reduce the use of OS signals in Java and/or the VM")             \
  66.633 @@ -1147,13 +1156,14 @@
  66.634            "Share vtable stubs (smaller code but worse branch prediction")   \
  66.635                                                                              \
  66.636    develop(bool, LoadLineNumberTables, true,                                 \
  66.637 -          "Tells whether the class file parser loads line number tables")   \
  66.638 +          "Tell whether the class file parser loads line number tables")    \
  66.639                                                                              \
  66.640    develop(bool, LoadLocalVariableTables, true,                              \
  66.641 -          "Tells whether the class file parser loads local variable tables")\
  66.642 +          "Tell whether the class file parser loads local variable tables") \
  66.643                                                                              \
  66.644    develop(bool, LoadLocalVariableTypeTables, true,                          \
  66.645 -          "Tells whether the class file parser loads local variable type tables")\
  66.646 +          "Tell whether the class file parser loads local variable type"    \
  66.647 +          "tables")                                                         \
  66.648                                                                              \
  66.649    product(bool, AllowUserSignalHandlers, false,                             \
  66.650            "Do not complain if the application installs signal handlers "    \
  66.651 @@ -1184,10 +1194,12 @@
  66.652                                                                              \
  66.653    product(bool, EagerXrunInit, false,                                       \
  66.654            "Eagerly initialize -Xrun libraries; allows startup profiling, "  \
  66.655 -          " but not all -Xrun libraries may support the state of the VM at this time") \
  66.656 +          "but not all -Xrun libraries may support the state of the VM "    \
  66.657 +          "at this time")                                                   \
  66.658                                                                              \
  66.659    product(bool, PreserveAllAnnotations, false,                              \
  66.660 -          "Preserve RuntimeInvisibleAnnotations as well as RuntimeVisibleAnnotations") \
  66.661 +          "Preserve RuntimeInvisibleAnnotations as well "                   \
  66.662 +          "as RuntimeVisibleAnnotations")                                   \
  66.663                                                                              \
  66.664    develop(uintx, PreallocatedOutOfMemoryErrorCount, 4,                      \
  66.665            "Number of OutOfMemoryErrors preallocated with backtrace")        \
  66.666 @@ -1262,7 +1274,7 @@
  66.667            "Trace level for JVMTI RedefineClasses")                          \
  66.668                                                                              \
  66.669    develop(bool, StressMethodComparator, false,                              \
  66.670 -          "run the MethodComparator on all loaded methods")                 \
  66.671 +          "Run the MethodComparator on all loaded methods")                 \
  66.672                                                                              \
  66.673    /* change to false by default sometime after Mustang */                   \
  66.674    product(bool, VerifyMergedCPBytecodes, true,                              \
  66.675 @@ -1296,7 +1308,7 @@
  66.676            "Trace dependencies")                                             \
  66.677                                                                              \
  66.678    develop(bool, VerifyDependencies, trueInDebug,                            \
  66.679 -         "Exercise and verify the compilation dependency mechanism")        \
  66.680 +          "Exercise and verify the compilation dependency mechanism")       \
  66.681                                                                              \
  66.682    develop(bool, TraceNewOopMapGeneration, false,                            \
  66.683            "Trace OopMapGeneration")                                         \
  66.684 @@ -1314,7 +1326,7 @@
  66.685            "Trace monitor matching failures during OopMapGeneration")        \
  66.686                                                                              \
  66.687    develop(bool, TraceOopMapRewrites, false,                                 \
  66.688 -          "Trace rewritting of method oops during oop map generation")      \
  66.689 +          "Trace rewriting of method oops during oop map generation")       \
  66.690                                                                              \
  66.691    develop(bool, TraceSafepoint, false,                                      \
  66.692            "Trace safepoint operations")                                     \
  66.693 @@ -1332,10 +1344,10 @@
  66.694            "Trace setup time")                                               \
  66.695                                                                              \
  66.696    develop(bool, TraceProtectionDomainVerification, false,                   \
  66.697 -          "Trace protection domain verifcation")                            \
  66.698 +          "Trace protection domain verification")                           \
  66.699                                                                              \
  66.700    develop(bool, TraceClearedExceptions, false,                              \
  66.701 -          "Prints when an exception is forcibly cleared")                   \
  66.702 +          "Print when an exception is forcibly cleared")                    \
  66.703                                                                              \
  66.704    product(bool, TraceClassResolution, false,                                \
  66.705            "Trace all constant pool resolutions (for debugging)")            \
  66.706 @@ -1349,7 +1361,7 @@
  66.707    /* gc */                                                                  \
  66.708                                                                              \
  66.709    product(bool, UseSerialGC, false,                                         \
  66.710 -          "Use the serial garbage collector")                               \
  66.711 +          "Use the Serial garbage collector")                               \
  66.712                                                                              \
  66.713    product(bool, UseG1GC, false,                                             \
  66.714            "Use the Garbage-First garbage collector")                        \
  66.715 @@ -1368,16 +1380,16 @@
  66.716            "The collection count for the first maximum compaction")          \
  66.717                                                                              \
  66.718    product(bool, UseMaximumCompactionOnSystemGC, true,                       \
  66.719 -          "In the Parallel Old garbage collector maximum compaction for "   \
  66.720 -          "a system GC")                                                    \
  66.721 +          "Use maximum compaction in the Parallel Old garbage collector "   \
  66.722 +          "for a system GC")                                                \
  66.723                                                                              \
  66.724    product(uintx, ParallelOldDeadWoodLimiterMean, 50,                        \
  66.725 -          "The mean used by the par compact dead wood"                      \
  66.726 -          "limiter (a number between 0-100).")                              \
  66.727 +          "The mean used by the parallel compact dead wood "                \
  66.728 +          "limiter (a number between 0-100)")                               \
  66.729                                                                              \
  66.730    product(uintx, ParallelOldDeadWoodLimiterStdDev, 80,                      \
  66.731 -          "The standard deviation used by the par compact dead wood"        \
  66.732 -          "limiter (a number between 0-100).")                              \
  66.733 +          "The standard deviation used by the parallel compact dead wood "  \
  66.734 +          "limiter (a number between 0-100)")                               \
  66.735                                                                              \
  66.736    product(uintx, ParallelGCThreads, 0,                                      \
  66.737            "Number of parallel threads parallel gc will use")                \
  66.738 @@ -1387,7 +1399,7 @@
  66.739            "parallel gc will use")                                           \
  66.740                                                                              \
  66.741    diagnostic(bool, ForceDynamicNumberOfGCThreads, false,                    \
  66.742 -          "Force dynamic selection of the number of"                        \
  66.743 +          "Force dynamic selection of the number of "                       \
  66.744            "parallel threads parallel gc will use to aid debugging")         \
  66.745                                                                              \
  66.746    product(uintx, HeapSizePerGCThread, ScaleForWordSize(64*M),               \
  66.747 @@ -1398,7 +1410,7 @@
  66.748            "Trace the dynamic GC thread usage")                              \
  66.749                                                                              \
  66.750    develop(bool, ParallelOldGCSplitALot, false,                              \
  66.751 -          "Provoke splitting (copying data from a young gen space to"       \
  66.752 +          "Provoke splitting (copying data from a young gen space to "      \
  66.753            "multiple destination spaces)")                                   \
  66.754                                                                              \
  66.755    develop(uintx, ParallelOldGCSplitInterval, 3,                             \
  66.756 @@ -1408,19 +1420,19 @@
  66.757            "Number of threads concurrent gc will use")                       \
  66.758                                                                              \
  66.759    product(uintx, YoungPLABSize, 4096,                                       \
  66.760 -          "Size of young gen promotion labs (in HeapWords)")                \
  66.761 +          "Size of young gen promotion LAB's (in HeapWords)")               \
  66.762                                                                              \
  66.763    product(uintx, OldPLABSize, 1024,                                         \
  66.764 -          "Size of old gen promotion labs (in HeapWords)")                  \
  66.765 +          "Size of old gen promotion LAB's (in HeapWords)")                 \
  66.766                                                                              \
  66.767    product(uintx, GCTaskTimeStampEntries, 200,                               \
  66.768            "Number of time stamp entries per gc worker thread")              \
  66.769                                                                              \
  66.770    product(bool, AlwaysTenure, false,                                        \
  66.771 -          "Always tenure objects in eden. (ParallelGC only)")               \
  66.772 +          "Always tenure objects in eden (ParallelGC only)")                \
  66.773                                                                              \
  66.774    product(bool, NeverTenure, false,                                         \
  66.775 -          "Never tenure objects in eden, May tenure on overflow "           \
  66.776 +          "Never tenure objects in eden, may tenure on overflow "           \
  66.777            "(ParallelGC only)")                                              \
  66.778                                                                              \
  66.779    product(bool, ScavengeBeforeFullGC, true,                                 \
  66.780 @@ -1428,14 +1440,14 @@
  66.781            "used with UseParallelGC")                                        \
  66.782                                                                              \
  66.783    develop(bool, ScavengeWithObjectsInToSpace, false,                        \
  66.784 -          "Allow scavenges to occur when to_space contains objects.")       \
  66.785 +          "Allow scavenges to occur when to-space contains objects")        \
  66.786                                                                              \
  66.787    product(bool, UseConcMarkSweepGC, false,                                  \
  66.788            "Use Concurrent Mark-Sweep GC in the old generation")             \
  66.789                                                                              \
  66.790    product(bool, ExplicitGCInvokesConcurrent, false,                         \
  66.791 -          "A System.gc() request invokes a concurrent collection;"          \
  66.792 -          " (effective only when UseConcMarkSweepGC)")                      \
  66.793 +          "A System.gc() request invokes a concurrent collection; "         \
  66.794 +          "(effective only when UseConcMarkSweepGC)")                       \
  66.795                                                                              \
  66.796    product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false,        \
  66.797            "A System.gc() request invokes a concurrent collection and "      \
  66.798 @@ -1443,19 +1455,19 @@
  66.799            "(effective only when UseConcMarkSweepGC)")                       \
  66.800                                                                              \
  66.801    product(bool, GCLockerInvokesConcurrent, false,                           \
  66.802 -          "The exit of a JNI CS necessitating a scavenge also"              \
  66.803 -          " kicks off a bkgrd concurrent collection")                       \
  66.804 +          "The exit of a JNI critical section necessitating a scavenge, "   \
  66.805 +          "also kicks off a background concurrent collection")              \
  66.806                                                                              \
  66.807    product(uintx, GCLockerEdenExpansionPercent, 5,                           \
  66.808 -          "How much the GC can expand the eden by while the GC locker  "    \
  66.809 +          "How much the GC can expand the eden by while the GC locker "     \
  66.810            "is active (as a percentage)")                                    \
  66.811                                                                              \
  66.812    diagnostic(intx, GCLockerRetryAllocationCount, 2,                         \
  66.813 -          "Number of times to retry allocations when"                       \
  66.814 -          " blocked by the GC locker")                                      \
  66.815 +          "Number of times to retry allocations when "                      \
  66.816 +          "blocked by the GC locker")                                       \
  66.817                                                                              \
  66.818    develop(bool, UseCMSAdaptiveFreeLists, true,                              \
  66.819 -          "Use Adaptive Free Lists in the CMS generation")                  \
  66.820 +          "Use adaptive free lists in the CMS generation")                  \
  66.821                                                                              \
  66.822    develop(bool, UseAsyncConcMarkSweepGC, true,                              \
  66.823            "Use Asynchronous Concurrent Mark-Sweep GC in the old generation")\
  66.824 @@ -1470,44 +1482,46 @@
  66.825            "Use passing of collection from background to foreground")        \
  66.826                                                                              \
  66.827    product(bool, UseParNewGC, false,                                         \
  66.828 -          "Use parallel threads in the new generation.")                    \
  66.829 +          "Use parallel threads in the new generation")                     \
  66.830                                                                              \
  66.831    product(bool, ParallelGCVerbose, false,                                   \
  66.832 -          "Verbose output for parallel GC.")                                \
  66.833 +          "Verbose output for parallel gc")                                 \
  66.834                                                                              \
  66.835    product(uintx, ParallelGCBufferWastePct, 10,                              \
  66.836 -          "Wasted fraction of parallel allocation buffer.")                 \
  66.837 +          "Wasted fraction of parallel allocation buffer")                  \
  66.838                                                                              \
  66.839    diagnostic(bool, ParallelGCRetainPLAB, false,                             \
  66.840 -             "Retain parallel allocation buffers across scavenges; "        \
  66.841 -             " -- disabled because this currently conflicts with "          \
  66.842 -             " parallel card scanning under certain conditions ")           \
  66.843 +          "Retain parallel allocation buffers across scavenges; "           \
  66.844 +          "it is disabled because this currently conflicts with "           \
  66.845 +          "parallel card scanning under certain conditions.")               \
  66.846                                                                              \
  66.847    product(uintx, TargetPLABWastePct, 10,                                    \
  66.848            "Target wasted space in last buffer as percent of overall "       \
  66.849            "allocation")                                                     \
  66.850                                                                              \
  66.851    product(uintx, PLABWeight, 75,                                            \
  66.852 -          "Percentage (0-100) used to weight the current sample when"       \
  66.853 -          "computing exponentially decaying average for ResizePLAB.")       \
  66.854 +          "Percentage (0-100) used to weigh the current sample when "       \
  66.855 +          "computing exponentially decaying average for ResizePLAB")        \
  66.856                                                                              \
  66.857    product(bool, ResizePLAB, true,                                           \
  66.858 -          "Dynamically resize (survivor space) promotion labs")             \
  66.859 +          "Dynamically resize (survivor space) promotion LAB's")            \
  66.860                                                                              \
  66.861    product(bool, PrintPLAB, false,                                           \
  66.862 -          "Print (survivor space) promotion labs sizing decisions")         \
  66.863 +          "Print (survivor space) promotion LAB's sizing decisions")        \
  66.864                                                                              \
  66.865    product(intx, ParGCArrayScanChunk, 50,                                    \
  66.866 -          "Scan a subset and push remainder, if array is bigger than this") \
  66.867 +          "Scan a subset of object array and push remainder, if array is "  \
  66.868 +          "bigger than this")                                               \
  66.869                                                                              \
  66.870    product(bool, ParGCUseLocalOverflow, false,                               \
  66.871            "Instead of a global overflow list, use local overflow stacks")   \
  66.872                                                                              \
  66.873    product(bool, ParGCTrimOverflow, true,                                    \
  66.874 -          "Eagerly trim the local overflow lists (when ParGCUseLocalOverflow") \
  66.875 +          "Eagerly trim the local overflow lists "                          \
  66.876 +          "(when ParGCUseLocalOverflow)")                                   \
  66.877                                                                              \
  66.878    notproduct(bool, ParGCWorkQueueOverflowALot, false,                       \
  66.879 -          "Whether we should simulate work queue overflow in ParNew")       \
  66.880 +          "Simulate work queue overflow in ParNew")                         \
  66.881                                                                              \
  66.882    notproduct(uintx, ParGCWorkQueueOverflowInterval, 1000,                   \
  66.883            "An `interval' counter that determines how frequently "           \
  66.884 @@ -1525,43 +1539,46 @@
  66.885            "during card table scanning")                                     \
  66.886                                                                              \
  66.887    product(uintx, CMSParPromoteBlocksToClaim, 16,                            \
  66.888 -          "Number of blocks to attempt to claim when refilling CMS LAB for "\
  66.889 -          "parallel GC.")                                                   \
  66.890 +          "Number of blocks to attempt to claim when refilling CMS LAB's "  \
  66.891 +          "for parallel GC")                                                \
  66.892                                                                              \
  66.893    product(uintx, OldPLABWeight, 50,                                         \
  66.894 -          "Percentage (0-100) used to weight the current sample when"       \
  66.895 -          "computing exponentially decaying average for resizing CMSParPromoteBlocksToClaim.") \
  66.896 +          "Percentage (0-100) used to weight the current sample when "      \
  66.897 +          "computing exponentially decaying average for resizing "          \
  66.898 +          "CMSParPromoteBlocksToClaim")                                     \
  66.899                                                                              \
  66.900    product(bool, ResizeOldPLAB, true,                                        \
  66.901 -          "Dynamically resize (old gen) promotion labs")                    \
  66.902 +          "Dynamically resize (old gen) promotion LAB's")                   \
  66.903                                                                              \
  66.904    product(bool, PrintOldPLAB, false,                                        \
  66.905 -          "Print (old gen) promotion labs sizing decisions")                \
  66.906 +          "Print (old gen) promotion LAB's sizing decisions")               \
  66.907                                                                              \
  66.908    product(uintx, CMSOldPLABMin, 16,                                         \
  66.909 -          "Min size of CMS gen promotion lab caches per worker per blksize")\
  66.910 +          "Minimum size of CMS gen promotion LAB caches per worker "        \
  66.911 +          "per block size")                                                 \
  66.912                                                                              \
  66.913    product(uintx, CMSOldPLABMax, 1024,                                       \
  66.914 -          "Max size of CMS gen promotion lab caches per worker per blksize")\
  66.915 +          "Maximum size of CMS gen promotion LAB caches per worker "        \
  66.916 +          "per block size")                                                 \
  66.917                                                                              \
  66.918    product(uintx, CMSOldPLABNumRefills, 4,                                   \
  66.919 -          "Nominal number of refills of CMS gen promotion lab cache"        \
  66.920 -          " per worker per block size")                                     \
  66.921 +          "Nominal number of refills of CMS gen promotion LAB cache "       \
  66.922 +          "per worker per block size")                                      \
  66.923                                                                              \
  66.924    product(bool, CMSOldPLABResizeQuicker, false,                             \
  66.925 -          "Whether to react on-the-fly during a scavenge to a sudden"       \
  66.926 -          " change in block demand rate")                                   \
  66.927 +          "React on-the-fly during a scavenge to a sudden "                 \
  66.928 +          "change in block demand rate")                                    \
  66.929                                                                              \
  66.930    product(uintx, CMSOldPLABToleranceFactor, 4,                              \
  66.931 -          "The tolerance of the phase-change detector for on-the-fly"       \
  66.932 -          " PLAB resizing during a scavenge")                               \
  66.933 +          "The tolerance of the phase-change detector for on-the-fly "      \
  66.934 +          "PLAB resizing during a scavenge")                                \
  66.935                                                                              \
  66.936    product(uintx, CMSOldPLABReactivityFactor, 2,                             \
  66.937 -          "The gain in the feedback loop for on-the-fly PLAB resizing"      \
  66.938 -          " during a scavenge")                                             \
  66.939 +          "The gain in the feedback loop for on-the-fly PLAB resizing "     \
  66.940 +          "during a scavenge")                                              \
  66.941                                                                              \
  66.942    product(bool, AlwaysPreTouch, false,                                      \
  66.943 -          "It forces all freshly committed pages to be pre-touched.")       \
  66.944 +          "Force all freshly committed pages to be pre-touched")            \
  66.945                                                                              \
  66.946    product_pd(uintx, CMSYoungGenPerWorker,                                   \
  66.947            "The maximum size of young gen chosen by default per GC worker "  \
  66.948 @@ -1571,64 +1588,67 @@
  66.949            "Whether CMS GC should operate in \"incremental\" mode")          \
  66.950                                                                              \
  66.951    product(uintx, CMSIncrementalDutyCycle, 10,                               \
  66.952 -          "CMS incremental mode duty cycle (a percentage, 0-100).  If"      \
  66.953 -          "CMSIncrementalPacing is enabled, then this is just the initial"  \
  66.954 -          "value")                                                          \
  66.955 +          "Percentage (0-100) of CMS incremental mode duty cycle. If "      \
  66.956 +          "CMSIncrementalPacing is enabled, then this is just the initial " \
  66.957 +          "value.")                                                         \
  66.958                                                                              \
  66.959    product(bool, CMSIncrementalPacing, true,                                 \
  66.960            "Whether the CMS incremental mode duty cycle should be "          \
  66.961            "automatically adjusted")                                         \
  66.962                                                                              \
  66.963    product(uintx, CMSIncrementalDutyCycleMin, 0,                             \
  66.964 -          "Lower bound on the duty cycle when CMSIncrementalPacing is "     \
  66.965 -          "enabled (a percentage, 0-100)")                                  \
  66.966 +          "Minimum percentage (0-100) of the CMS incremental duty cycle "   \
  66.967 +          "used when CMSIncrementalPacing is enabled")                      \
  66.968                                                                              \
  66.969    product(uintx, CMSIncrementalSafetyFactor, 10,                            \
  66.970            "Percentage (0-100) used to add conservatism when computing the " \
  66.971            "duty cycle")                                                     \
  66.972                                                                              \
  66.973    product(uintx, CMSIncrementalOffset, 0,                                   \
  66.974 -          "Percentage (0-100) by which the CMS incremental mode duty cycle" \
  66.975 -          " is shifted to the right within the period between young GCs")   \
  66.976 +          "Percentage (0-100) by which the CMS incremental mode duty cycle "\
  66.977 +          "is shifted to the right within the period between young GCs")    \
  66.978                                                                              \
  66.979    product(uintx, CMSExpAvgFactor, 50,                                       \
  66.980 -          "Percentage (0-100) used to weight the current sample when"       \
  66.981 -          "computing exponential averages for CMS statistics.")             \
  66.982 +          "Percentage (0-100) used to weigh the current sample when "       \
  66.983 +          "computing exponential averages for CMS statistics")              \
  66.984                                                                              \
  66.985    product(uintx, CMS_FLSWeight, 75,                                         \
  66.986 -          "Percentage (0-100) used to weight the current sample when"       \
  66.987 -          "computing exponentially decating averages for CMS FLS statistics.") \
  66.988 +          "Percentage (0-100) used to weigh the current sample when "       \
  66.989 +          "computing exponentially decaying averages for CMS FLS "          \
  66.990 +          "statistics")                                                     \
  66.991                                                                              \
  66.992    product(uintx, CMS_FLSPadding, 1,                                         \
  66.993 -          "The multiple of deviation from mean to use for buffering"        \
  66.994 -          "against volatility in free list demand.")                        \
  66.995 +          "The multiple of deviation from mean to use for buffering "       \
  66.996 +          "against volatility in free list demand")                         \
  66.997                                                                              \
  66.998    product(uintx, FLSCoalescePolicy, 2,                                      \
  66.999 -          "CMS: Aggression level for coalescing, increasing from 0 to 4")   \
 66.1000 +          "CMS: aggressiveness level for coalescing, increasing "           \
 66.1001 +          "from 0 to 4")                                                    \
 66.1002                                                                              \
 66.1003    product(bool, FLSAlwaysCoalesceLarge, false,                              \
 66.1004 -          "CMS: Larger free blocks are always available for coalescing")    \
 66.1005 +          "CMS: larger free blocks are always available for coalescing")    \
 66.1006                                                                              \
 66.1007    product(double, FLSLargestBlockCoalesceProximity, 0.99,                   \
 66.1008 -          "CMS: the smaller the percentage the greater the coalition force")\
 66.1009 +          "CMS: the smaller the percentage the greater the coalescing "     \
 66.1010 +          "force")                                                          \
 66.1011                                                                              \
 66.1012    product(double, CMSSmallCoalSurplusPercent, 1.05,                         \
 66.1013 -          "CMS: the factor by which to inflate estimated demand of small"   \
 66.1014 -          " block sizes to prevent coalescing with an adjoining block")     \
 66.1015 +          "CMS: the factor by which to inflate estimated demand of small "  \
 66.1016 +          "block sizes to prevent coalescing with an adjoining block")      \
 66.1017                                                                              \
 66.1018    product(double, CMSLargeCoalSurplusPercent, 0.95,                         \
 66.1019 -          "CMS: the factor by which to inflate estimated demand of large"   \
 66.1020 -          " block sizes to prevent coalescing with an adjoining block")     \
 66.1021 +          "CMS: the factor by which to inflate estimated demand of large "  \
 66.1022 +          "block sizes to prevent coalescing with an adjoining block")      \
 66.1023                                                                              \
 66.1024    product(double, CMSSmallSplitSurplusPercent, 1.10,                        \
 66.1025 -          "CMS: the factor by which to inflate estimated demand of small"   \
 66.1026 -          " block sizes to prevent splitting to supply demand for smaller"  \
 66.1027 -          " blocks")                                                        \
 66.1028 +          "CMS: the factor by which to inflate estimated demand of small "  \
 66.1029 +          "block sizes to prevent splitting to supply demand for smaller "  \
 66.1030 +          "blocks")                                                         \
 66.1031                                                                              \
 66.1032    product(double, CMSLargeSplitSurplusPercent, 1.00,                        \
 66.1033 -          "CMS: the factor by which to inflate estimated demand of large"   \
 66.1034 -          " block sizes to prevent splitting to supply demand for smaller"  \
 66.1035 -          " blocks")                                                        \
 66.1036 +          "CMS: the factor by which to inflate estimated demand of large "  \
 66.1037 +          "block sizes to prevent splitting to supply demand for smaller "  \
 66.1038 +          "blocks")                                                         \
 66.1039                                                                              \
 66.1040    product(bool, CMSExtrapolateSweep, false,                                 \
 66.1041            "CMS: cushion for block demand during sweep")                     \
 66.1042 @@ -1640,11 +1660,11 @@
 66.1043                                                                              \
 66.1044    product(uintx, CMS_SweepPadding, 1,                                       \
 66.1045            "The multiple of deviation from mean to use for buffering "       \
 66.1046 -          "against volatility in inter-sweep duration.")                    \
 66.1047 +          "against volatility in inter-sweep duration")                     \
 66.1048                                                                              \
 66.1049    product(uintx, CMS_SweepTimerThresholdMillis, 10,                         \
 66.1050            "Skip block flux-rate sampling for an epoch unless inter-sweep "  \
 66.1051 -          "duration exceeds this threhold in milliseconds")                 \
 66.1052 +          "duration exceeds this threshold in milliseconds")                \
 66.1053                                                                              \
 66.1054    develop(bool, CMSTraceIncrementalMode, false,                             \
 66.1055            "Trace CMS incremental mode")                                     \
 66.1056 @@ -1659,14 +1679,15 @@
 66.1057            "Whether class unloading enabled when using CMS GC")              \
 66.1058                                                                              \
 66.1059    product(uintx, CMSClassUnloadingMaxInterval, 0,                           \
 66.1060 -          "When CMS class unloading is enabled, the maximum CMS cycle count"\
 66.1061 -          " for which classes may not be unloaded")                         \
 66.1062 +          "When CMS class unloading is enabled, the maximum CMS cycle "     \
 66.1063 +          "count for which classes may not be unloaded")                    \
 66.1064                                                                              \
 66.1065    product(bool, CMSCompactWhenClearAllSoftRefs, true,                       \
 66.1066 -          "Compact when asked to collect CMS gen with clear_all_soft_refs") \
 66.1067 +          "Compact when asked to collect CMS gen with "                     \
 66.1068 +          "clear_all_soft_refs()")                                          \
 66.1069                                                                              \
 66.1070    product(bool, UseCMSCompactAtFullCollection, true,                        \
 66.1071 -          "Use mark sweep compact at full collections")                     \
 66.1072 +          "Use Mark-Sweep-Compact algorithm at full collections")           \
 66.1073                                                                              \
 66.1074    product(uintx, CMSFullGCsBeforeCompaction, 0,                             \
 66.1075            "Number of CMS full collection done before compaction if > 0")    \
 66.1076 @@ -1688,38 +1709,37 @@
 66.1077            "Warn in case of excessive CMS looping")                          \
 66.1078                                                                              \
 66.1079    develop(bool, CMSOverflowEarlyRestoration, false,                         \
 66.1080 -          "Whether preserved marks should be restored early")               \
 66.1081 +          "Restore preserved marks early")                                  \
 66.1082                                                                              \
 66.1083    product(uintx, MarkStackSize, NOT_LP64(32*K) LP64_ONLY(4*M),              \
 66.1084            "Size of marking stack")                                          \
 66.1085                                                                              \
 66.1086    product(uintx, MarkStackSizeMax, NOT_LP64(4*M) LP64_ONLY(512*M),          \
 66.1087 -          "Max size of marking stack")                                      \
 66.1088 +          "Maximum size of marking stack")                                  \
 66.1089                                                                              \
 66.1090    notproduct(bool, CMSMarkStackOverflowALot, false,                         \
 66.1091 -          "Whether we should simulate frequent marking stack / work queue"  \
 66.1092 -          " overflow")                                                      \
 66.1093 +          "Simulate frequent marking stack / work queue overflow")          \
 66.1094                                                                              \
 66.1095    notproduct(uintx, CMSMarkStackOverflowInterval, 1000,                     \
 66.1096 -          "An `interval' counter that determines how frequently"            \
 66.1097 -          " we simulate overflow; a smaller number increases frequency")    \
 66.1098 +          "An \"interval\" counter that determines how frequently "         \
 66.1099 +          "to simulate overflow; a smaller number increases frequency")     \
 66.1100                                                                              \
 66.1101    product(uintx, CMSMaxAbortablePrecleanLoops, 0,                           \
 66.1102 -          "(Temporary, subject to experimentation)"                         \
 66.1103 +          "(Temporary, subject to experimentation) "                        \
 66.1104            "Maximum number of abortable preclean iterations, if > 0")        \
 66.1105                                                                              \
 66.1106    product(intx, CMSMaxAbortablePrecleanTime, 5000,                          \
 66.1107 -          "(Temporary, subject to experimentation)"                         \
 66.1108 -          "Maximum time in abortable preclean in ms")                       \
 66.1109 +          "(Temporary, subject to experimentation) "                        \
 66.1110 +          "Maximum time in abortable preclean (in milliseconds)")           \
 66.1111                                                                              \
 66.1112    product(uintx, CMSAbortablePrecleanMinWorkPerIteration, 100,              \
 66.1113 -          "(Temporary, subject to experimentation)"                         \
 66.1114 +          "(Temporary, subject to experimentation) "                        \
 66.1115            "Nominal minimum work per abortable preclean iteration")          \
 66.1116                                                                              \
 66.1117    manageable(intx, CMSAbortablePrecleanWaitMillis, 100,                     \
 66.1118 -          "(Temporary, subject to experimentation)"                         \
 66.1119 -          " Time that we sleep between iterations when not given"           \
 66.1120 -          " enough work per iteration")                                     \
 66.1121 +          "(Temporary, subject to experimentation) "                        \
 66.1122 +          "Time that we sleep between iterations when not given "           \
 66.1123 +          "enough work per iteration")                                      \
 66.1124                                                                              \
 66.1125    product(uintx, CMSRescanMultiple, 32,                                     \
 66.1126            "Size (in cards) of CMS parallel rescan task")                    \
 66.1127 @@ -1737,23 +1757,24 @@
 66.1128            "Whether parallel remark enabled (only if ParNewGC)")             \
 66.1129                                                                              \
 66.1130    product(bool, CMSParallelSurvivorRemarkEnabled, true,                     \
 66.1131 -          "Whether parallel remark of survivor space"                       \
 66.1132 -          " enabled (effective only if CMSParallelRemarkEnabled)")          \
 66.1133 +          "Whether parallel remark of survivor space "                      \
 66.1134 +          "enabled (effective only if CMSParallelRemarkEnabled)")           \
 66.1135                                                                              \
 66.1136    product(bool, CMSPLABRecordAlways, true,                                  \
 66.1137 -          "Whether to always record survivor space PLAB bdries"             \
 66.1138 -          " (effective only if CMSParallelSurvivorRemarkEnabled)")          \
 66.1139 +          "Always record survivor space PLAB boundaries (effective only "   \
 66.1140 +          "if CMSParallelSurvivorRemarkEnabled)")                           \
 66.1141                                                                              \
 66.1142    product(bool, CMSEdenChunksRecordAlways, true,                            \
 66.1143 -          "Whether to always record eden chunks used for "                  \
 66.1144 -          "the parallel initial mark or remark of eden" )                   \
 66.1145 +          "Always record eden chunks used for the parallel initial mark "   \
 66.1146 +          "or remark of eden")                                              \
 66.1147                                                                              \
 66.1148    product(bool, CMSPrintEdenSurvivorChunks, false,                          \
 66.1149            "Print the eden and the survivor chunks used for the parallel "   \
 66.1150            "initial mark or remark of the eden/survivor spaces")             \
 66.1151                                                                              \
 66.1152    product(bool, CMSConcurrentMTEnabled, true,                               \
 66.1153 -          "Whether multi-threaded concurrent work enabled (if ParNewGC)")   \
 66.1154 +          "Whether multi-threaded concurrent work enabled "                 \
 66.1155 +          "(effective only if ParNewGC)")                                   \
 66.1156                                                                              \
 66.1157    product(bool, CMSPrecleaningEnabled, true,                                \
 66.1158            "Whether concurrent precleaning enabled")                         \
 66.1159 @@ -1762,12 +1783,12 @@
 66.1160            "Maximum number of precleaning iteration passes")                 \
 66.1161                                                                              \
 66.1162    product(uintx, CMSPrecleanNumerator, 2,                                   \
 66.1163 -          "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence"  \
 66.1164 -          " ratio")                                                         \
 66.1165 +          "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
 66.1166 +          "ratio")                                                          \
 66.1167                                                                              \
 66.1168    product(uintx, CMSPrecleanDenominator, 3,                                 \
 66.1169 -          "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence"  \
 66.1170 -          " ratio")                                                         \
 66.1171 +          "CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence " \
 66.1172 +          "ratio")                                                          \
 66.1173                                                                              \
 66.1174    product(bool, CMSPrecleanRefLists1, true,                                 \
 66.1175            "Preclean ref lists during (initial) preclean phase")             \
 66.1176 @@ -1782,7 +1803,7 @@
 66.1177            "Preclean survivors during abortable preclean phase")             \
 66.1178                                                                              \
 66.1179    product(uintx, CMSPrecleanThreshold, 1000,                                \
 66.1180 -          "Don't re-iterate if #dirty cards less than this")                \
 66.1181 +          "Do not iterate again if number of dirty cards is less than this")\
 66.1182                                                                              \
 66.1183    product(bool, CMSCleanOnEnter, true,                                      \
 66.1184            "Clean-on-enter optimization for reducing number of dirty cards") \
 66.1185 @@ -1791,14 +1812,16 @@
 66.1186            "Choose variant (1,2) of verification following remark")          \
 66.1187                                                                              \
 66.1188    product(uintx, CMSScheduleRemarkEdenSizeThreshold, 2*M,                   \
 66.1189 -          "If Eden used is below this value, don't try to schedule remark") \
 66.1190 +          "If Eden size is below this, do not try to schedule remark")      \
 66.1191                                                                              \
 66.1192    product(uintx, CMSScheduleRemarkEdenPenetration, 50,                      \
 66.1193 -          "The Eden occupancy % at which to try and schedule remark pause") \
 66.1194 +          "The Eden occupancy percentage (0-100) at which "                 \
 66.1195 +          "to try and schedule remark pause")                               \
 66.1196                                                                              \
 66.1197    product(uintx, CMSScheduleRemarkSamplingRatio, 5,                         \
 66.1198 -          "Start sampling Eden top at least before yg occupancy reaches"    \
 66.1199 -          " 1/<ratio> of the size at which we plan to schedule remark")     \
 66.1200 +          "Start sampling eden top at least before young gen "              \
 66.1201 +          "occupancy reaches 1/<ratio> of the size at which "               \
 66.1202 +          "we plan to schedule remark")                                     \
 66.1203                                                                              \
 66.1204    product(uintx, CMSSamplingGrain, 16*K,                                    \
 66.1205            "The minimum distance between eden samples for CMS (see above)")  \
 66.1206 @@ -1820,27 +1843,27 @@
 66.1207            "should start a collection cycle")                                \
 66.1208                                                                              \
 66.1209    product(bool, CMSYield, true,                                             \
 66.1210 -          "Yield between steps of concurrent mark & sweep")                 \
 66.1211 +          "Yield between steps of CMS")                                     \
 66.1212                                                                              \
 66.1213    product(uintx, CMSBitMapYieldQuantum, 10*M,                               \
 66.1214 -          "Bitmap operations should process at most this many bits"         \
 66.1215 +          "Bitmap operations should process at most this many bits "        \
 66.1216            "between yields")                                                 \
 66.1217                                                                              \
 66.1218    product(bool, CMSDumpAtPromotionFailure, false,                           \
 66.1219            "Dump useful information about the state of the CMS old "         \
 66.1220 -          " generation upon a promotion failure.")                          \
 66.1221 +          "generation upon a promotion failure")                            \
 66.1222                                                                              \
 66.1223    product(bool, CMSPrintChunksInDump, false,                                \
 66.1224            "In a dump enabled by CMSDumpAtPromotionFailure, include "        \
 66.1225 -          " more detailed information about the free chunks.")              \
 66.1226 +          "more detailed information about the free chunks")                \
 66.1227                                                                              \
 66.1228    product(bool, CMSPrintObjectsInDump, false,                               \
 66.1229            "In a dump enabled by CMSDumpAtPromotionFailure, include "        \
 66.1230 -          " more detailed information about the allocated objects.")        \
 66.1231 +          "more detailed information about the allocated objects")          \
 66.1232                                                                              \
 66.1233    diagnostic(bool, FLSVerifyAllHeapReferences, false,                       \
 66.1234 -          "Verify that all refs across the FLS boundary "                   \
 66.1235 -          " are to valid objects")                                          \
 66.1236 +          "Verify that all references across the FLS boundary "             \
 66.1237 +          "are to valid objects")                                           \
 66.1238                                                                              \
 66.1239    diagnostic(bool, FLSVerifyLists, false,                                   \
 66.1240            "Do lots of (expensive) FreeListSpace verification")              \
 66.1241 @@ -1852,17 +1875,18 @@
 66.1242            "Do lots of (expensive) FLS dictionary verification")             \
 66.1243                                                                              \
 66.1244    develop(bool, VerifyBlockOffsetArray, false,                              \
 66.1245 -          "Do (expensive!) block offset array verification")                \
 66.1246 +          "Do (expensive) block offset array verification")                 \
 66.1247                                                                              \
 66.1248    diagnostic(bool, BlockOffsetArrayUseUnallocatedBlock, false,              \
 66.1249 -          "Maintain _unallocated_block in BlockOffsetArray"                 \
 66.1250 -          " (currently applicable only to CMS collector)")                  \
 66.1251 +          "Maintain _unallocated_block in BlockOffsetArray "                \
 66.1252 +          "(currently applicable only to CMS collector)")                   \
 66.1253                                                                              \
 66.1254    develop(bool, TraceCMSState, false,                                       \
 66.1255            "Trace the state of the CMS collection")                          \
 66.1256                                                                              \
 66.1257    product(intx, RefDiscoveryPolicy, 0,                                      \
 66.1258 -          "Whether reference-based(0) or referent-based(1)")                \
 66.1259 +          "Select type of reference discovery policy: "                     \
 66.1260 +          "reference-based(0) or referent-based(1)")                        \
 66.1261                                                                              \
 66.1262    product(bool, ParallelRefProcEnabled, false,                              \
 66.1263            "Enable parallel reference processing whenever possible")         \
 66.1264 @@ -1890,7 +1914,7 @@
 66.1265            "denotes 'do constant GC cycles'.")                               \
 66.1266                                                                              \
 66.1267    product(bool, UseCMSInitiatingOccupancyOnly, false,                       \
 66.1268 -          "Only use occupancy as a crierion for starting a CMS collection") \
 66.1269 +          "Only use occupancy as a criterion for starting a CMS collection")\
 66.1270                                                                              \
 66.1271    product(uintx, CMSIsTooFullPercentage, 98,                                \
 66.1272            "An absolute ceiling above which CMS will always consider the "   \
 66.1273 @@ -1902,7 +1926,7 @@
 66.1274                                                                              \
 66.1275    notproduct(bool, CMSVerifyReturnedBytes, false,                           \
 66.1276            "Check that all the garbage collected was returned to the "       \
 66.1277 -          "free lists.")                                                    \
 66.1278 +          "free lists")                                                     \
 66.1279                                                                              \
 66.1280    notproduct(bool, ScavengeALot, false,                                     \
 66.1281            "Force scavenge at every Nth exit from the runtime system "       \
 66.1282 @@ -1917,16 +1941,16 @@
 66.1283                                                                              \
 66.1284    product(bool, PrintPromotionFailure, false,                               \
 66.1285            "Print additional diagnostic information following "              \
 66.1286 -          " promotion failure")                                             \
 66.1287 +          "promotion failure")                                              \
 66.1288                                                                              \
 66.1289    notproduct(bool, PromotionFailureALot, false,                             \
 66.1290            "Use promotion failure handling on every youngest generation "    \
 66.1291            "collection")                                                     \
 66.1292                                                                              \
 66.1293    develop(uintx, PromotionFailureALotCount, 1000,                           \
 66.1294 -          "Number of promotion failures occurring at ParGCAllocBuffer"      \
 66.1295 +          "Number of promotion failures occurring at ParGCAllocBuffer "     \
 66.1296            "refill attempts (ParNew) or promotion attempts "                 \
 66.1297 -          "(other young collectors) ")                                      \
 66.1298 +          "(other young collectors)")                                       \
 66.1299                                                                              \
 66.1300    develop(uintx, PromotionFailureALotInterval, 5,                           \
 66.1301            "Total collections between promotion failures alot")              \
 66.1302 @@ -1945,7 +1969,7 @@
 66.1303            "Ratio of hard spins to calls to yield")                          \
 66.1304                                                                              \
 66.1305    develop(uintx, ObjArrayMarkingStride, 512,                                \
 66.1306 -          "Number of ObjArray elements to push onto the marking stack"      \
 66.1307 +          "Number of object array elements to push onto the marking stack " \
 66.1308            "before pushing a continuation entry")                            \
 66.1309                                                                              \
 66.1310    develop(bool, MetadataAllocationFailALot, false,                          \
 66.1311 @@ -1953,7 +1977,7 @@
 66.1312            "MetadataAllocationFailALotInterval")                             \
 66.1313                                                                              \
 66.1314    develop(uintx, MetadataAllocationFailALotInterval, 1000,                  \
 66.1315 -          "metadata allocation failure alot interval")                      \
 66.1316 +          "Metadata allocation failure a lot interval")                     \
 66.1317                                                                              \
 66.1318    develop(bool, MetaDataDeallocateALot, false,                              \
 66.1319            "Deallocation bunches of metadata at intervals controlled by "    \
 66.1320 @@ -1972,7 +1996,7 @@
 66.1321            "Trace virtual space metadata allocations")                       \
 66.1322                                                                              \
 66.1323    notproduct(bool, ExecuteInternalVMTests, false,                           \
 66.1324 -          "Enable execution of internal VM tests.")                         \
 66.1325 +          "Enable execution of internal VM tests")                          \
 66.1326                                                                              \
 66.1327    notproduct(bool, VerboseInternalVMTests, false,                           \
 66.1328            "Turn on logging for internal VM tests.")                         \
 66.1329 @@ -1980,7 +2004,7 @@
 66.1330    product_pd(bool, UseTLAB, "Use thread-local object allocation")           \
 66.1331                                                                              \
 66.1332    product_pd(bool, ResizeTLAB,                                              \
 66.1333 -          "Dynamically resize tlab size for threads")                       \
 66.1334 +          "Dynamically resize TLAB size for threads")                       \
 66.1335                                                                              \
 66.1336    product(bool, ZeroTLAB, false,                                            \
 66.1337            "Zero out the newly created TLAB")                                \
 66.1338 @@ -1992,7 +2016,8 @@
 66.1339            "Print various TLAB related information")                         \
 66.1340                                                                              \
 66.1341    product(bool, TLABStats, true,                                            \
 66.1342 -          "Print various TLAB related information")                         \
 66.1343 +          "Provide more detailed and expensive TLAB statistics "            \
 66.1344 +          "(with PrintTLAB)")                                               \
 66.1345                                                                              \
 66.1346    EMBEDDED_ONLY(product(bool, LowMemoryProtection, true,                    \
 66.1347            "Enable LowMemoryProtection"))                                    \
 66.1348 @@ -2026,14 +2051,14 @@
 66.1349            "Fraction (1/n) of real memory used for initial heap size")       \
 66.1350                                                                              \
 66.1351    develop(uintx, MaxVirtMemFraction, 2,                                     \
 66.1352 -          "Maximum fraction (1/n) of virtual memory used for ergonomically" \
 66.1353 +          "Maximum fraction (1/n) of virtual memory used for ergonomically "\
 66.1354            "determining maximum heap size")                                  \
 66.1355                                                                              \
 66.1356    product(bool, UseAutoGCSelectPolicy, false,                               \
 66.1357            "Use automatic collection selection policy")                      \
 66.1358                                                                              \
 66.1359    product(uintx, AutoGCSelectPauseMillis, 5000,                             \
 66.1360 -          "Automatic GC selection pause threshhold in ms")                  \
 66.1361 +          "Automatic GC selection pause threshold in milliseconds")         \
 66.1362                                                                              \
 66.1363    product(bool, UseAdaptiveSizePolicy, true,                                \
 66.1364            "Use adaptive generation sizing policies")                        \
 66.1365 @@ -2048,7 +2073,7 @@
 66.1366            "Use adaptive young-old sizing policies at major collections")    \
 66.1367                                                                              \
 66.1368    product(bool, UseAdaptiveSizePolicyWithSystemGC, false,                   \
 66.1369 -          "Use statistics from System.GC for adaptive size policy")         \
 66.1370 +          "Include statistics from System.gc() for adaptive size policy")   \
 66.1371                                                                              \
 66.1372    product(bool, UseAdaptiveGCBoundary, false,                               \
 66.1373            "Allow young-old boundary to move")                               \
 66.1374 @@ -2060,16 +2085,16 @@
 66.1375            "Resize the virtual spaces of the young or old generations")      \
 66.1376                                                                              \
 66.1377    product(uintx, AdaptiveSizeThroughPutPolicy, 0,                           \
 66.1378 -          "Policy for changeing generation size for throughput goals")      \
 66.1379 +          "Policy for changing generation size for throughput goals")       \
 66.1380                                                                              \
 66.1381    product(uintx, AdaptiveSizePausePolicy, 0,                                \
 66.1382            "Policy for changing generation size for pause goals")            \
 66.1383                                                                              \
 66.1384    develop(bool, PSAdjustTenuredGenForMinorPause, false,                     \
 66.1385 -          "Adjust tenured generation to achive a minor pause goal")         \
 66.1386 +          "Adjust tenured generation to achieve a minor pause goal")        \
 66.1387                                                                              \
 66.1388    develop(bool, PSAdjustYoungGenForMajorPause, false,                       \
 66.1389 -          "Adjust young generation to achive a major pause goal")           \
 66.1390 +          "Adjust young generation to achieve a major pause goal")          \
 66.1391                                                                              \
 66.1392    product(uintx, AdaptiveSizePolicyInitializingSteps, 20,                   \
 66.1393            "Number of steps where heuristics is used before data is used")   \
 66.1394 @@ -2124,14 +2149,15 @@
 66.1395            "Decay factor to TenuredGenerationSizeIncrement")                 \
 66.1396                                                                              \
 66.1397    product(uintx, MaxGCPauseMillis, max_uintx,                               \
 66.1398 -          "Adaptive size policy maximum GC pause time goal in msec, "       \
 66.1399 -          "or (G1 Only) the max. GC time per MMU time slice")               \
 66.1400 +          "Adaptive size policy maximum GC pause time goal in millisecond, "\
 66.1401 +          "or (G1 Only) the maximum GC time per MMU time slice")            \
 66.1402                                                                              \
 66.1403    product(uintx, GCPauseIntervalMillis, 0,                                  \
 66.1404            "Time slice for MMU specification")                               \
 66.1405                                                                              \
 66.1406    product(uintx, MaxGCMinorPauseMillis, max_uintx,                          \
 66.1407 -          "Adaptive size policy maximum GC minor pause time goal in msec")  \
 66.1408 +          "Adaptive size policy maximum GC minor pause time goal "          \
 66.1409 +          "in millisecond")                                                 \
 66.1410                                                                              \
 66.1411    product(uintx, GCTimeRatio, 99,                                           \
 66.1412            "Adaptive size policy application time to GC time ratio")         \
 66.1413 @@ -2159,8 +2185,8 @@
 66.1414            "before an OutOfMemory error is thrown")                          \
 66.1415                                                                              \
 66.1416    product(uintx, GCTimeLimit, 98,                                           \
 66.1417 -          "Limit of proportion of time spent in GC before an OutOfMemory"   \
 66.1418 -          "error is thrown (used with GCHeapFreeLimit)")                    \
 66.1419 +          "Limit of the proportion of time spent in GC before "             \
 66.1420 +          "an OutOfMemoryError is thrown (used with GCHeapFreeLimit)")      \
 66.1421                                                                              \
 66.1422    product(uintx, GCHeapFreeLimit, 2,                                        \
 66.1423            "Minimum percentage of free space after a full GC before an "     \
 66.1424 @@ -2182,7 +2208,7 @@
 66.1425            "How many fields ahead to prefetch in oop scan (<= 0 means off)") \
 66.1426                                                                              \
 66.1427    diagnostic(bool, VerifySilently, false,                                   \
 66.1428 -          "Don't print print the verification progress")                    \
 66.1429 +          "Do not print the verification progress")                         \
 66.1430                                                                              \
 66.1431    diagnostic(bool, VerifyDuringStartup, false,                              \
 66.1432            "Verify memory system before executing any Java code "            \
 66.1433 @@ -2205,7 +2231,7 @@
 66.1434                                                                              \
 66.1435    diagnostic(bool, DeferInitialCardMark, false,                             \
 66.1436            "When +ReduceInitialCardMarks, explicitly defer any that "        \
 66.1437 -           "may arise from new_pre_store_barrier")                          \
 66.1438 +          "may arise from new_pre_store_barrier")                           \
 66.1439                                                                              \
 66.1440    diagnostic(bool, VerifyRememberedSets, false,                             \
 66.1441            "Verify GC remembered sets")                                      \
 66.1442 @@ -2214,10 +2240,10 @@
 66.1443            "Verify GC object start array if verify before/after")            \
 66.1444                                                                              \
 66.1445    product(bool, DisableExplicitGC, false,                                   \
 66.1446 -          "Tells whether calling System.gc() does a full GC")               \
 66.1447 +          "Ignore calls to System.gc()")                                    \
 66.1448                                                                              \
 66.1449    notproduct(bool, CheckMemoryInitialization, false,                        \
 66.1450 -          "Checks memory initialization")                                   \
 66.1451 +          "Check memory initialization")                                    \
 66.1452                                                                              \
 66.1453    product(bool, CollectGen0First, false,                                    \
 66.1454            "Collect youngest generation before each full GC")                \
 66.1455 @@ -2238,44 +2264,45 @@
 66.1456            "Stride through processors when distributing processes")          \
 66.1457                                                                              \
 66.1458    product(uintx, CMSCoordinatorYieldSleepCount, 10,                         \
 66.1459 -          "number of times the coordinator GC thread will sleep while "     \
 66.1460 +          "Number of times the coordinator GC thread will sleep while "     \
 66.1461            "yielding before giving up and resuming GC")                      \
 66.1462                                                                              \
 66.1463    product(uintx, CMSYieldSleepCount, 0,                                     \
 66.1464 -          "number of times a GC thread (minus the coordinator) "            \
 66.1465 +          "Number of times a GC thread (minus the coordinator) "            \
 66.1466            "will sleep while yielding before giving up and resuming GC")     \
 66.1467                                                                              \
 66.1468    /* gc tracing */                                                          \
 66.1469    manageable(bool, PrintGC, false,                                          \
 66.1470 -          "Print message at garbage collect")                               \
 66.1471 +          "Print message at garbage collection")                            \
 66.1472                                                                              \
 66.1473    manageable(bool, PrintGCDetails, false,                                   \
 66.1474 -          "Print more details at garbage collect")                          \
 66.1475 +          "Print more details at garbage collection")                       \
 66.1476                                                                              \
 66.1477    manageable(bool, PrintGCDateStamps, false,                                \
 66.1478 -          "Print date stamps at garbage collect")                           \
 66.1479 +          "Print date stamps at garbage collection")                        \
 66.1480                                                                              \
 66.1481    manageable(bool, PrintGCTimeStamps, false,                                \
 66.1482 -          "Print timestamps at garbage collect")                            \
 66.1483 +          "Print timestamps at garbage collection")                         \
 66.1484                                                                              \
 66.1485    product(bool, PrintGCTaskTimeStamps, false,                               \
 66.1486            "Print timestamps for individual gc worker thread tasks")         \
 66.1487                                                                              \
 66.1488    develop(intx, ConcGCYieldTimeout, 0,                                      \
 66.1489 -          "If non-zero, assert that GC threads yield within this # of ms.") \
 66.1490 +          "If non-zero, assert that GC threads yield within this "          \
 66.1491 +          "number of milliseconds")                                         \
 66.1492                                                                              \
 66.1493    notproduct(bool, TraceMarkSweep, false,                                   \
 66.1494            "Trace mark sweep")                                               \
 66.1495                                                                              \
 66.1496    product(bool, PrintReferenceGC, false,                                    \
 66.1497            "Print times spent handling reference objects during GC "         \
 66.1498 -          " (enabled only when PrintGCDetails)")                            \
 66.1499 +          "(enabled only when PrintGCDetails)")                             \
 66.1500                                                                              \
 66.1501    develop(bool, TraceReferenceGC, false,                                    \
 66.1502            "Trace handling of soft/weak/final/phantom references")           \
 66.1503                                                                              \
 66.1504    develop(bool, TraceFinalizerRegistration, false,                          \
 66.1505 -         "Trace registration of final references")                          \
 66.1506 +          "Trace registration of final references")                         \
 66.1507                                                                              \
 66.1508    notproduct(bool, TraceScavenge, false,                                    \
 66.1509            "Trace scavenge")                                                 \
 66.1510 @@ -2312,7 +2339,7 @@
 66.1511            "Print heap layout before and after each GC")                     \
 66.1512                                                                              \
 66.1513    product_rw(bool, PrintHeapAtGCExtended, false,                            \
 66.1514 -          "Prints extended information about the layout of the heap "       \
 66.1515 +          "Print extended information about the layout of the heap "        \
 66.1516            "when -XX:+PrintHeapAtGC is set")                                 \
 66.1517                                                                              \
 66.1518    product(bool, PrintHeapAtSIGBREAK, true,                                  \
 66.1519 @@ -2349,45 +2376,45 @@
 66.1520            "Trace actions of the GC task threads")                           \
 66.1521                                                                              \
 66.1522    product(bool, PrintParallelOldGCPhaseTimes, false,                        \
 66.1523 -          "Print the time taken by each parallel old gc phase."             \
 66.1524 -          "PrintGCDetails must also be enabled.")                           \
 66.1525 +          "Print the time taken by each phase in ParallelOldGC "            \
 66.1526 +          "(PrintGCDetails must also be enabled)")                          \
 66.1527                                                                              \
 66.1528    develop(bool, TraceParallelOldGCMarkingPhase, false,                      \
 66.1529 -          "Trace parallel old gc marking phase")                            \
 66.1530 +          "Trace marking phase in ParallelOldGC")                           \
 66.1531                                                                              \
 66.1532    develop(bool, TraceParallelOldGCSummaryPhase, false,                      \
 66.1533 -          "Trace parallel old gc summary phase")                            \
 66.1534 +          "Trace summary phase in ParallelOldGC")                           \
 66.1535                                                                              \
 66.1536    develop(bool, TraceParallelOldGCCompactionPhase, false,                   \
 66.1537 -          "Trace parallel old gc compaction phase")                         \
 66.1538 +          "Trace compaction phase in ParallelOldGC")                        \
 66.1539                                                                              \
 66.1540    develop(bool, TraceParallelOldGCDensePrefix, false,                       \
 66.1541 -          "Trace parallel old gc dense prefix computation")                 \
 66.1542 +          "Trace dense prefix computation for ParallelOldGC")               \
 66.1543                                                                              \
 66.1544    develop(bool, IgnoreLibthreadGPFault, false,                              \
 66.1545            "Suppress workaround for libthread GP fault")                     \
 66.1546                                                                              \
 66.1547    product(bool, PrintJNIGCStalls, false,                                    \
 66.1548 -          "Print diagnostic message when GC is stalled"                     \
 66.1549 +          "Print diagnostic message when GC is stalled "                    \
 66.1550            "by JNI critical section")                                        \
 66.1551                                                                              \
 66.1552    experimental(double, ObjectCountCutOffPercent, 0.5,                       \
 66.1553            "The percentage of the used heap that the instances of a class "  \
 66.1554 -          "must occupy for the class to generate a trace event.")           \
 66.1555 +          "must occupy for the class to generate a trace event")            \
 66.1556                                                                              \
 66.1557    /* GC log rotation setting */                                             \
 66.1558                                                                              \
 66.1559    product(bool, UseGCLogFileRotation, false,                                \
 66.1560 -          "Prevent large gclog file for long running app. "                 \
 66.1561 -          "Requires -Xloggc:<filename>")                                    \
 66.1562 +          "Rotate gclog files (for long running applications). It requires "\
 66.1563 +          "-Xloggc:<filename>")                                             \
 66.1564                                                                              \
 66.1565    product(uintx, NumberOfGCLogFiles, 0,                                     \
 66.1566 -          "Number of gclog files in rotation, "                             \
 66.1567 -          "Default: 0, no rotation")                                        \
 66.1568 +          "Number of gclog files in rotation "                              \
 66.1569 +          "(default: 0, no rotation)")                                      \
 66.1570                                                                              \
 66.1571    product(uintx, GCLogFileSize, 0,                                          \
 66.1572 -          "GC log file size, Default: 0 bytes, no rotation "                \
 66.1573 -          "Only valid with UseGCLogFileRotation")                           \
 66.1574 +          "GC log file size (default: 0 bytes, no rotation). "              \
 66.1575 +          "It requires UseGCLogFileRotation")                               \
 66.1576                                                                              \
 66.1577    /* JVMTI heap profiling */                                                \
 66.1578                                                                              \
 66.1579 @@ -2464,40 +2491,40 @@
 66.1580            "Generate range checks for array accesses")                       \
 66.1581                                                                              \
 66.1582    develop_pd(bool, ImplicitNullChecks,                                      \
 66.1583 -          "generate code for implicit null checks")                         \
 66.1584 +          "Generate code for implicit null checks")                         \
 66.1585                                                                              \
 66.1586    product(bool, PrintSafepointStatistics, false,                            \
 66.1587 -          "print statistics about safepoint synchronization")               \
 66.1588 +          "Print statistics about safepoint synchronization")               \
 66.1589                                                                              \
 66.1590    product(intx, PrintSafepointStatisticsCount, 300,                         \
 66.1591 -          "total number of safepoint statistics collected "                 \
 66.1592 +          "Total number of safepoint statistics collected "                 \
 66.1593            "before printing them out")                                       \
 66.1594                                                                              \
 66.1595    product(intx, PrintSafepointStatisticsTimeout,  -1,                       \
 66.1596 -          "print safepoint statistics only when safepoint takes"            \
 66.1597 -          " more than PrintSafepointSatisticsTimeout in millis")            \
 66.1598 +          "Print safepoint statistics only when safepoint takes "           \
 66.1599 +          "more than PrintSafepointSatisticsTimeout in millis")             \
 66.1600                                                                              \
 66.1601    product(bool, TraceSafepointCleanupTime, false,                           \
 66.1602 -          "print the break down of clean up tasks performed during"         \
 66.1603 -          " safepoint")                                                     \
 66.1604 +          "Print the break down of clean up tasks performed during "        \
 66.1605 +          "safepoint")                                                      \
 66.1606                                                                              \
 66.1607    product(bool, Inline, true,                                               \
 66.1608 -          "enable inlining")                                                \
 66.1609 +          "Enable inlining")                                                \
 66.1610                                                                              \
 66.1611    product(bool, ClipInlining, true,                                         \
 66.1612 -          "clip inlining if aggregate method exceeds DesiredMethodLimit")   \
 66.1613 +          "Clip inlining if aggregate method exceeds DesiredMethodLimit")   \
 66.1614                                                                              \
 66.1615    develop(bool, UseCHA, true,                                               \
 66.1616 -          "enable CHA")                                                     \
 66.1617 +          "Enable CHA")                                                     \
 66.1618                                                                              \
 66.1619    product(bool, UseTypeProfile, true,                                       \
 66.1620            "Check interpreter profile for historically monomorphic calls")   \
 66.1621                                                                              \
 66.1622    notproduct(bool, TimeCompiler, false,                                     \
 66.1623 -          "time the compiler")                                              \
 66.1624 +          "Time the compiler")                                              \
 66.1625                                                                              \
 66.1626    diagnostic(bool, PrintInlining, false,                                    \
 66.1627 -          "prints inlining optimizations")                                  \
 66.1628 +          "Print inlining optimizations")                                   \
 66.1629                                                                              \
 66.1630    product(bool, UsePopCountInstruction, false,                              \
 66.1631            "Use population count instruction")                               \
 66.1632 @@ -2509,57 +2536,59 @@
 66.1633            "Print when methods are replaced do to recompilation")            \
 66.1634                                                                              \
 66.1635    develop(bool, PrintMethodFlushing, false,                                 \
 66.1636 -          "print the nmethods being flushed")                               \
 66.1637 +          "Print the nmethods being flushed")                               \
 66.1638                                                                              \
 66.1639    develop(bool, UseRelocIndex, false,                                       \
 66.1640 -         "use an index to speed random access to relocations")              \
 66.1641 +          "Use an index to speed random access to relocations")             \
 66.1642                                                                              \
 66.1643    develop(bool, StressCodeBuffers, false,                                   \
 66.1644 -         "Exercise code buffer expansion and other rare state changes")     \
 66.1645 +          "Exercise code buffer expansion and other rare state changes")    \
 66.1646                                                                              \
 66.1647    diagnostic(bool, DebugNonSafepoints, trueInDebug,                         \
 66.1648 -         "Generate extra debugging info for non-safepoints in nmethods")    \
 66.1649 +          "Generate extra debugging information for non-safepoints in "     \
 66.1650 +          "nmethods")                                                       \
 66.1651                                                                              \
 66.1652    product(bool, PrintVMOptions, false,                                      \
 66.1653 -         "Print flags that appeared on the command line")                   \
 66.1654 +          "Print flags that appeared on the command line")                  \
 66.1655                                                                              \
 66.1656    product(bool, IgnoreUnrecognizedVMOptions, false,                         \
 66.1657 -         "Ignore unrecognized VM options")                                  \
 66.1658 +          "Ignore unrecognized VM options")                                 \
 66.1659                                                                              \
 66.1660    product(bool, PrintCommandLineFlags, false,                               \
 66.1661 -         "Print flags specified on command line or set by ergonomics")      \
 66.1662 +          "Print flags specified on command line or set by ergonomics")     \
 66.1663                                                                              \
 66.1664    product(bool, PrintFlagsInitial, false,                                   \
 66.1665 -         "Print all VM flags before argument processing and exit VM")       \
 66.1666 +          "Print all VM flags before argument processing and exit VM")      \
 66.1667                                                                              \
 66.1668    product(bool, PrintFlagsFinal, false,                                     \
 66.1669 -         "Print all VM flags after argument and ergonomic processing")      \
 66.1670 +          "Print all VM flags after argument and ergonomic processing")     \
 66.1671                                                                              \
 66.1672    notproduct(bool, PrintFlagsWithComments, false,                           \
 66.1673 -         "Print all VM flags with default values and descriptions and exit")\
 66.1674 +          "Print all VM flags with default values and descriptions and "    \
 66.1675 +          "exit")                                                           \
 66.1676                                                                              \
 66.1677    diagnostic(bool, SerializeVMOutput, true,                                 \
 66.1678 -         "Use a mutex to serialize output to tty and LogFile")              \
 66.1679 +          "Use a mutex to serialize output to tty and LogFile")             \
 66.1680                                                                              \
 66.1681    diagnostic(bool, DisplayVMOutput, true,                                   \
 66.1682 -         "Display all VM output on the tty, independently of LogVMOutput")  \
 66.1683 +          "Display all VM output on the tty, independently of LogVMOutput") \
 66.1684                                                                              \
 66.1685    diagnostic(bool, LogVMOutput, false,                                      \
 66.1686 -         "Save VM output to LogFile")                                       \
 66.1687 +          "Save VM output to LogFile")                                      \
 66.1688                                                                              \
 66.1689    diagnostic(ccstr, LogFile, NULL,                                          \
 66.1690 -         "If LogVMOutput or LogCompilation is on, save VM output to "       \
 66.1691 -         "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)") \
 66.1692 +          "If LogVMOutput or LogCompilation is on, save VM output to "      \
 66.1693 +          "this file [default: ./hotspot_pid%p.log] (%p replaced with pid)")\
 66.1694                                                                              \
 66.1695    product(ccstr, ErrorFile, NULL,                                           \
 66.1696 -         "If an error occurs, save the error data to this file "            \
 66.1697 -         "[default: ./hs_err_pid%p.log] (%p replaced with pid)")            \
 66.1698 +          "If an error occurs, save the error data to this file "           \
 66.1699 +          "[default: ./hs_err_pid%p.log] (%p replaced with pid)")           \
 66.1700                                                                              \
 66.1701    product(bool, DisplayVMOutputToStderr, false,                             \
 66.1702 -         "If DisplayVMOutput is true, display all VM output to stderr")     \
 66.1703 +          "If DisplayVMOutput is true, display all VM output to stderr")    \
 66.1704                                                                              \
 66.1705    product(bool, DisplayVMOutputToStdout, false,                             \
 66.1706 -         "If DisplayVMOutput is true, display all VM output to stdout")     \
 66.1707 +          "If DisplayVMOutput is true, display all VM output to stdout")    \
 66.1708                                                                              \
 66.1709    product(bool, UseHeavyMonitors, false,                                    \
 66.1710            "use heavyweight instead of lightweight Java monitors")           \
 66.1711 @@ -2583,7 +2612,7 @@
 66.1712                                                                              \
 66.1713    notproduct(ccstr, AbortVMOnExceptionMessage, NULL,                        \
 66.1714            "Call fatal if the exception pointed by AbortVMOnException "      \
 66.1715 -          "has this message.")                                              \
 66.1716 +          "has this message")                                               \
 66.1717                                                                              \
 66.1718    develop(bool, DebugVtables, false,                                        \
 66.1719            "add debugging code to vtable dispatch")                          \
 66.1720 @@ -2650,29 +2679,29 @@
 66.1721                                                                              \
 66.1722    /* statistics */                                                          \
 66.1723    develop(bool, CountCompiledCalls, false,                                  \
 66.1724 -          "counts method invocations")                                      \
 66.1725 +          "Count method invocations")                                       \
 66.1726                                                                              \
 66.1727    notproduct(bool, CountRuntimeCalls, false,                                \
 66.1728 -          "counts VM runtime calls")                                        \
 66.1729 +          "Count VM runtime calls")                                         \
 66.1730                                                                              \
 66.1731    develop(bool, CountJNICalls, false,                                       \
 66.1732 -          "counts jni method invocations")                                  \
 66.1733 +          "Count jni method invocations")                                   \
 66.1734                                                                              \
 66.1735    notproduct(bool, CountJVMCalls, false,                                    \
 66.1736 -          "counts jvm method invocations")                                  \
 66.1737 +          "Count jvm method invocations")                                   \
 66.1738                                                                              \
 66.1739    notproduct(bool, CountRemovableExceptions, false,                         \
 66.1740 -          "count exceptions that could be replaced by branches due to "     \
 66.1741 +          "Count exceptions that could be replaced by branches due to "     \
 66.1742            "inlining")                                                       \
 66.1743                                                                              \
 66.1744    notproduct(bool, ICMissHistogram, false,                                  \
 66.1745 -          "produce histogram of IC misses")                                 \
 66.1746 +          "Produce histogram of IC misses")                                 \
 66.1747                                                                              \
 66.1748    notproduct(bool, PrintClassStatistics, false,                             \
 66.1749 -          "prints class statistics at end of run")                          \
 66.1750 +          "Print class statistics at end of run")                           \
 66.1751                                                                              \
 66.1752    notproduct(bool, PrintMethodStatistics, false,                            \
 66.1753 -          "prints method statistics at end of run")                         \
 66.1754 +          "Print method statistics at end of run")                          \
 66.1755                                                                              \
 66.1756    /* interpreter */                                                         \
 66.1757    develop(bool, ClearInterpreterLocals, false,                              \
 66.1758 @@ -2686,7 +2715,7 @@
 66.1759            "Rewrite frequently used bytecode pairs into a single bytecode")  \
 66.1760                                                                              \
 66.1761    diagnostic(bool, PrintInterpreter, false,                                 \
 66.1762 -          "Prints the generated interpreter code")                          \
 66.1763 +          "Print the generated interpreter code")                           \
 66.1764                                                                              \
 66.1765    product(bool, UseInterpreter, true,                                       \
 66.1766            "Use interpreter for non-compiled methods")                       \
 66.1767 @@ -2704,8 +2733,8 @@
 66.1768            "Use fast method entry code for accessor methods")                \
 66.1769                                                                              \
 66.1770    product_pd(bool, UseOnStackReplacement,                                   \
 66.1771 -           "Use on stack replacement, calls runtime if invoc. counter "     \
 66.1772 -           "overflows in loop")                                             \
 66.1773 +          "Use on stack replacement, calls runtime if invoc. counter "      \
 66.1774 +          "overflows in loop")                                              \
 66.1775                                                                              \
 66.1776    notproduct(bool, TraceOnStackReplacement, false,                          \
 66.1777            "Trace on stack replacement")                                     \
 66.1778 @@ -2753,10 +2782,10 @@
 66.1779            "Trace frequency based inlining")                                 \
 66.1780                                                                              \
 66.1781    develop_pd(bool, InlineIntrinsics,                                        \
 66.1782 -           "Inline intrinsics that can be statically resolved")             \
 66.1783 +          "Inline intrinsics that can be statically resolved")              \
 66.1784                                                                              \
 66.1785    product_pd(bool, ProfileInterpreter,                                      \
 66.1786 -           "Profile at the bytecode level during interpretation")           \
 66.1787 +          "Profile at the bytecode level during interpretation")            \
 66.1788                                                                              \
 66.1789    develop_pd(bool, ProfileTraps,                                            \
 66.1790            "Profile deoptimization traps at the bytecode level")             \
 66.1791 @@ -2766,7 +2795,7 @@
 66.1792            "CompileThreshold) before using the method's profile")            \
 66.1793                                                                              \
 66.1794    develop(bool, PrintMethodData, false,                                     \
 66.1795 -           "Print the results of +ProfileInterpreter at end of run")        \
 66.1796 +          "Print the results of +ProfileInterpreter at end of run")         \
 66.1797                                                                              \
 66.1798    develop(bool, VerifyDataPointer, trueInDebug,                             \
 66.1799            "Verify the method data pointer during interpreter profiling")    \
 66.1800 @@ -2781,7 +2810,7 @@
 66.1801                                                                              \
 66.1802    /* compilation */                                                         \
 66.1803    product(bool, UseCompiler, true,                                          \
 66.1804 -          "use compilation")                                                \
 66.1805 +          "Use Just-In-Time compilation")                                   \
 66.1806                                                                              \
 66.1807    develop(bool, TraceCompilationPolicy, false,                              \
 66.1808            "Trace compilation policy")                                       \
 66.1809 @@ -2790,20 +2819,21 @@
 66.1810            "Time the compilation policy")                                    \
 66.1811                                                                              \
 66.1812    product(bool, UseCounterDecay, true,                                      \
 66.1813 -           "adjust recompilation counters")                                 \
 66.1814 +          "Adjust recompilation counters")                                  \
 66.1815                                                                              \
 66.1816    develop(intx, CounterHalfLifeTime,    30,                                 \
 66.1817 -          "half-life time of invocation counters (in secs)")                \
 66.1818 +          "Half-life time of invocation counters (in seconds)")             \
 66.1819                                                                              \
 66.1820    develop(intx, CounterDecayMinIntervalLength,   500,                       \
 66.1821 -          "Min. ms. between invocation of CounterDecay")                    \
 66.1822 +          "The minimum interval (in milliseconds) between invocation of "   \
 66.1823 +          "CounterDecay")                                                   \
 66.1824                                                                              \
 66.1825    product(bool, AlwaysCompileLoopMethods, false,                            \
 66.1826 -          "when using recompilation, never interpret methods "              \
 66.1827 +          "When using recompilation, never interpret methods "              \
 66.1828            "containing loops")                                               \
 66.1829                                                                              \
 66.1830    product(bool, DontCompileHugeMethods, true,                               \
 66.1831 -          "don't compile methods > HugeMethodLimit")                        \
 66.1832 +          "Do not compile methods > HugeMethodLimit")                       \
 66.1833                                                                              \
 66.1834    /* Bytecode escape analysis estimation. */                                \
 66.1835    product(bool, EstimateArgEscape, true,                                    \
 66.1836 @@ -2813,10 +2843,10 @@
 66.1837            "How much tracing to do of bytecode escape analysis estimates")   \
 66.1838                                                                              \
 66.1839    product(intx, MaxBCEAEstimateLevel, 5,                                    \
 66.1840 -          "Maximum number of nested calls that are analyzed by BC EA.")     \
 66.1841 +          "Maximum number of nested calls that are analyzed by BC EA")      \
 66.1842                                                                              \
 66.1843    product(intx, MaxBCEAEstimateSize, 150,                                   \
 66.1844 -          "Maximum bytecode size of a method to be analyzed by BC EA.")     \
 66.1845 +          "Maximum bytecode size of a method to be analyzed by BC EA")      \
 66.1846                                                                              \
 66.1847    product(intx,  AllocatePrefetchStyle, 1,                                  \
 66.1848            "0 = no prefetch, "                                               \
 66.1849 @@ -2831,7 +2861,8 @@
 66.1850            "Number of lines to prefetch ahead of array allocation pointer")  \
 66.1851                                                                              \
 66.1852    product(intx,  AllocateInstancePrefetchLines, 1,                          \
 66.1853 -          "Number of lines to prefetch ahead of instance allocation pointer") \
 66.1854 +          "Number of lines to prefetch ahead of instance allocation "       \
 66.1855 +          "pointer")                                                        \
 66.1856                                                                              \
 66.1857    product(intx,  AllocatePrefetchStepSize, 16,                              \
 66.1858            "Step size in bytes of sequential prefetch instructions")         \
 66.1859 @@ -2851,8 +2882,8 @@
 66.1860            "(0 means off)")                                                  \
 66.1861                                                                              \
 66.1862    product(intx, MaxJavaStackTraceDepth, 1024,                               \
 66.1863 -          "Max. no. of lines in the stack trace for Java exceptions "       \
 66.1864 -          "(0 means all)")                                                  \
 66.1865 +          "The maximum number of lines in the stack trace for Java "        \
 66.1866 +          "exceptions (0 means all)")                                       \
 66.1867                                                                              \
 66.1868    NOT_EMBEDDED(diagnostic(intx, GuaranteedSafepointInterval, 1000,          \
 66.1869            "Guarantee a safepoint (at least) every so many milliseconds "    \
 66.1870 @@ -2876,10 +2907,10 @@
 66.1871            "result in more aggressive sweeping")                             \
 66.1872                                                                              \
 66.1873    notproduct(bool, LogSweeper, false,                                       \
 66.1874 -            "Keep a ring buffer of sweeper activity")                       \
 66.1875 +          "Keep a ring buffer of sweeper activity")                         \
 66.1876                                                                              \
 66.1877    notproduct(intx, SweeperLogEntries, 1024,                                 \
 66.1878 -            "Number of records in the ring buffer of sweeper activity")     \
 66.1879 +          "Number of records in the ring buffer of sweeper activity")       \
 66.1880                                                                              \
 66.1881    notproduct(intx, MemProfilingInterval, 500,                               \
 66.1882            "Time between each invocation of the MemProfiler")                \
 66.1883 @@ -2922,34 +2953,35 @@
 66.1884            "less than this")                                                 \
 66.1885                                                                              \
 66.1886    product(intx, MaxInlineSize, 35,                                          \
 66.1887 -          "maximum bytecode size of a method to be inlined")                \
 66.1888 +          "The maximum bytecode size of a method to be inlined")            \
 66.1889                                                                              \
 66.1890    product_pd(intx, FreqInlineSize,                                          \
 66.1891 -          "maximum bytecode size of a frequent method to be inlined")       \
 66.1892 +          "The maximum bytecode size of a frequent method to be inlined")   \
 66.1893                                                                              \
 66.1894    product(intx, MaxTrivialSize, 6,                                          \
 66.1895 -          "maximum bytecode size of a trivial method to be inlined")        \
 66.1896 +          "The maximum bytecode size of a trivial method to be inlined")    \
 66.1897                                                                              \
 66.1898    product(intx, MinInliningThreshold, 250,                                  \
 66.1899 -          "min. invocation count a method needs to have to be inlined")     \
 66.1900 +          "The minimum invocation count a method needs to have to be "      \
 66.1901 +          "inlined")                                                        \
 66.1902                                                                              \
 66.1903    develop(intx, MethodHistogramCutoff, 100,                                 \
 66.1904 -          "cutoff value for method invoc. histogram (+CountCalls)")         \
 66.1905 +          "The cutoff value for method invocation histogram (+CountCalls)") \
 66.1906                                                                              \
 66.1907    develop(intx, ProfilerNumberOfInterpretedMethods, 25,                     \
 66.1908 -          "# of interpreted methods to show in profile")                    \
 66.1909 +          "Number of interpreted methods to show in profile")               \
 66.1910                                                                              \
 66.1911    develop(intx, ProfilerNumberOfCompiledMethods, 25,                        \
 66.1912 -          "# of compiled methods to show in profile")                       \
 66.1913 +          "Number of compiled methods to show in profile")                  \
 66.1914                                                                              \
 66.1915    develop(intx, ProfilerNumberOfStubMethods, 25,                            \
 66.1916 -          "# of stub methods to show in profile")                           \
 66.1917 +          "Number of stub methods to show in profile")                      \
 66.1918                                                                              \
 66.1919    develop(intx, ProfilerNumberOfRuntimeStubNodes, 25,                       \
 66.1920 -          "# of runtime stub nodes to show in profile")                     \
 66.1921 +          "Number of runtime stub nodes to show in profile")                \
 66.1922                                                                              \
 66.1923    product(intx, ProfileIntervalsTicks, 100,                                 \
 66.1924 -          "# of ticks between printing of interval profile "                \
 66.1925 +          "Number of ticks between printing of interval profile "           \
 66.1926            "(+ProfileIntervals)")                                            \
 66.1927                                                                              \
 66.1928    notproduct(intx, ScavengeALotInterval,     1,                             \
 66.1929 @@ -2970,7 +3002,7 @@
 66.1930                                                                              \
 66.1931    develop(intx, MinSleepInterval,     1,                                    \
 66.1932            "Minimum sleep() interval (milliseconds) when "                   \
 66.1933 -          "ConvertSleepToYield is off (used for SOLARIS)")                  \
 66.1934 +          "ConvertSleepToYield is off (used for Solaris)")                  \
 66.1935                                                                              \
 66.1936    develop(intx, ProfilerPCTickThreshold,    15,                             \
 66.1937            "Number of ticks in a PC buckets to be a hotspot")                \
 66.1938 @@ -2985,22 +3017,22 @@
 66.1939            "Mark nmethods non-entrant at registration")                      \
 66.1940                                                                              \
 66.1941    diagnostic(intx, MallocVerifyInterval,     0,                             \
 66.1942 -          "if non-zero, verify C heap after every N calls to "              \
 66.1943 +          "If non-zero, verify C heap after every N calls to "              \
 66.1944            "malloc/realloc/free")                                            \
 66.1945                                                                              \
 66.1946    diagnostic(intx, MallocVerifyStart,     0,                                \
 66.1947 -          "if non-zero, start verifying C heap after Nth call to "          \
 66.1948 +          "If non-zero, start verifying C heap after Nth call to "          \
 66.1949            "malloc/realloc/free")                                            \
 66.1950                                                                              \
 66.1951    diagnostic(uintx, MallocMaxTestWords,     0,                              \
 66.1952 -          "if non-zero, max # of Words that malloc/realloc can allocate "   \
 66.1953 -          "(for testing only)")                                             \
 66.1954 +          "If non-zero, maximum number of words that malloc/realloc can "   \
 66.1955 +          "allocate (for testing only)")                                    \
 66.1956                                                                              \
 66.1957    product(intx, TypeProfileWidth,     2,                                    \
 66.1958 -          "number of receiver types to record in call/cast profile")        \
 66.1959 +          "Number of receiver types to record in call/cast profile")        \
 66.1960                                                                              \
 66.1961    develop(intx, BciProfileWidth,      2,                                    \
 66.1962 -          "number of return bci's to record in ret profile")                \
 66.1963 +          "Number of return bci's to record in ret profile")                \
 66.1964                                                                              \
 66.1965    product(intx, PerMethodRecompilationCutoff, 400,                          \
 66.1966            "After recompiling N times, stay in the interpreter (-1=>'Inf')") \
 66.1967 @@ -3067,7 +3099,7 @@
 66.1968            "Percentage of Eden that can be wasted")                          \
 66.1969                                                                              \
 66.1970    product(uintx, TLABRefillWasteFraction,    64,                            \
 66.1971 -          "Max TLAB waste at a refill (internal fragmentation)")            \
 66.1972 +          "Maximum TLAB waste at a refill (internal fragmentation)")        \
 66.1973                                                                              \
 66.1974    product(uintx, TLABWasteIncrement,    4,                                  \
 66.1975            "Increment allowed waste at slow allocation")                     \
 66.1976 @@ -3076,7 +3108,7 @@
 66.1977            "Ratio of eden/survivor space size")                              \
 66.1978                                                                              \
 66.1979    product(uintx, NewRatio, 2,                                               \
 66.1980 -          "Ratio of new/old generation sizes")                              \
 66.1981 +          "Ratio of old/new generation sizes")                              \
 66.1982                                                                              \
 66.1983    product_pd(uintx, NewSizeThreadIncrease,                                  \
 66.1984            "Additional size added to desired new generation size per "       \
 66.1985 @@ -3093,28 +3125,30 @@
 66.1986            "class pointers are used")                                        \
 66.1987                                                                              \
 66.1988    product(uintx, MinHeapFreeRatio,    40,                                   \
 66.1989 -          "Min percentage of heap free after GC to avoid expansion")        \
 66.1990 +          "The minimum percentage of heap free after GC to avoid expansion")\
 66.1991                                                                              \
 66.1992    product(uintx, MaxHeapFreeRatio,    70,                                   \
 66.1993 -          "Max percentage of heap free after GC to avoid shrinking")        \
 66.1994 +          "The maximum percentage of heap free after GC to avoid shrinking")\
 66.1995                                                                              \
 66.1996    product(intx, SoftRefLRUPolicyMSPerMB, 1000,                              \
 66.1997            "Number of milliseconds per MB of free space in the heap")        \
 66.1998                                                                              \
 66.1999    product(uintx, MinHeapDeltaBytes, ScaleForWordSize(128*K),                \
 66.2000 -          "Min change in heap space due to GC (in bytes)")                  \
 66.2001 +          "The minimum change in heap space due to GC (in bytes)")          \
 66.2002                                                                              \
 66.2003    product(uintx, MinMetaspaceExpansion, ScaleForWordSize(256*K),            \
 66.2004 -          "Min expansion of Metaspace (in bytes)")                          \
 66.2005 +          "The minimum expansion of Metaspace (in bytes)")                  \
 66.2006                                                                              \
 66.2007    product(uintx, MinMetaspaceFreeRatio,    40,                              \
 66.2008 -          "Min percentage of Metaspace free after GC to avoid expansion")   \
 66.2009 +          "The minimum percentage of Metaspace free after GC to avoid "     \
 66.2010 +          "expansion")                                                      \
 66.2011                                                                              \
 66.2012    product(uintx, MaxMetaspaceFreeRatio,    70,                              \
 66.2013 -          "Max percentage of Metaspace free after GC to avoid shrinking")   \
 66.2014 +          "The maximum percentage of Metaspace free after GC to avoid "     \
 66.2015 +          "shrinking")                                                      \
 66.2016                                                                              \
 66.2017    product(uintx, MaxMetaspaceExpansion, ScaleForWordSize(4*M),              \
 66.2018 -          "Max expansion of Metaspace without full GC (in bytes)")          \
 66.2019 +          "The maximum expansion of Metaspace without full GC (in bytes)")  \
 66.2020                                                                              \
 66.2021    product(uintx, QueuedAllocationWarningCount, 0,                           \
 66.2022            "Number of times an allocation that queues behind a GC "          \
 66.2023 @@ -3136,13 +3170,14 @@
 66.2024            "Desired percentage of survivor space used after scavenge")       \
 66.2025                                                                              \
 66.2026    product(uintx, MarkSweepDeadRatio,     5,                                 \
 66.2027 -          "Percentage (0-100) of the old gen allowed as dead wood."         \
 66.2028 -          "Serial mark sweep treats this as both the min and max value."    \
 66.2029 -          "CMS uses this value only if it falls back to mark sweep."        \
 66.2030 -          "Par compact uses a variable scale based on the density of the"   \
 66.2031 -          "generation and treats this as the max value when the heap is"    \
 66.2032 -          "either completely full or completely empty.  Par compact also"   \
 66.2033 -          "has a smaller default value; see arguments.cpp.")                \
 66.2034 +          "Percentage (0-100) of the old gen allowed as dead wood. "        \
 66.2035 +          "Serial mark sweep treats this as both the minimum and maximum "  \
 66.2036 +          "value. "                                                         \
 66.2037 +          "CMS uses this value only if it falls back to mark sweep. "       \
 66.2038 +          "Par compact uses a variable scale based on the density of the "  \
 66.2039 +          "generation and treats this as the maximum value when the heap "  \
 66.2040 +          "is either completely full or completely empty.  Par compact "    \
 66.2041 +          "also has a smaller default value; see arguments.cpp.")           \
 66.2042                                                                              \
 66.2043    product(uintx, MarkSweepAlwaysCompactCount,     4,                        \
 66.2044            "How often should we fully compact the heap (ignoring the dead "  \
 66.2045 @@ -3161,27 +3196,27 @@
 66.2046            "Census for CMS' FreeListSpace")                                  \
 66.2047                                                                              \
 66.2048    develop(uintx, GCExpandToAllocateDelayMillis, 0,                          \
 66.2049 -          "Delay in ms between expansion and allocation")                   \
 66.2050 +          "Delay between expansion and allocation (in milliseconds)")       \
 66.2051                                                                              \
 66.2052    develop(uintx, GCWorkerDelayMillis, 0,                                    \
 66.2053 -          "Delay in ms in scheduling GC workers")                           \
 66.2054 +          "Delay in scheduling GC workers (in milliseconds)")               \
 66.2055                                                                              \
 66.2056    product(intx, DeferThrSuspendLoopCount,     4000,                         \
 66.2057            "(Unstable) Number of times to iterate in safepoint loop "        \
 66.2058 -          " before blocking VM threads ")                                   \
 66.2059 +          "before blocking VM threads ")                                    \
 66.2060                                                                              \
 66.2061    product(intx, DeferPollingPageLoopCount,     -1,                          \
 66.2062            "(Unsafe,Unstable) Number of iterations in safepoint loop "       \
 66.2063            "before changing safepoint polling page to RO ")                  \
 66.2064                                                                              \
 66.2065 -  product(intx, SafepointSpinBeforeYield, 2000,  "(Unstable)")              \
 66.2066 +  product(intx, SafepointSpinBeforeYield, 2000, "(Unstable)")               \
 66.2067                                                                              \
 66.2068    product(bool, PSChunkLargeArrays, true,                                   \
 66.2069 -          "true: process large arrays in chunks")                           \
 66.2070 +          "Process large arrays in chunks")                                 \
 66.2071                                                                              \
 66.2072    product(uintx, GCDrainStackTargetSize, 64,                                \
 66.2073 -          "how many entries we'll try to leave on the stack during "        \
 66.2074 -          "parallel GC")                                                    \
 66.2075 +          "Number of entries we will try to leave on the stack "            \
 66.2076 +          "during parallel gc")                                             \
 66.2077                                                                              \
 66.2078    /* stack parameters */                                                    \
 66.2079    product_pd(intx, StackYellowPages,                                        \
 66.2080 @@ -3191,8 +3226,8 @@
 66.2081            "Number of red zone (unrecoverable overflows) pages")             \
 66.2082                                                                              \
 66.2083    product_pd(intx, StackShadowPages,                                        \
 66.2084 -          "Number of shadow zone (for overflow checking) pages"             \
 66.2085 -          " this should exceed the depth of the VM and native call stack")  \
 66.2086 +          "Number of shadow zone (for overflow checking) pages "            \
 66.2087 +          "this should exceed the depth of the VM and native call stack")   \
 66.2088                                                                              \
 66.2089    product_pd(intx, ThreadStackSize,                                         \
 66.2090            "Thread Stack Size (in Kbytes)")                                  \
 66.2091 @@ -3232,16 +3267,16 @@
 66.2092            "Reserved code cache size (in bytes) - maximum code cache size")  \
 66.2093                                                                              \
 66.2094    product(uintx, CodeCacheMinimumFreeSpace, 500*K,                          \
 66.2095 -          "When less than X space left, we stop compiling.")                \
 66.2096 +          "When less than X space left, we stop compiling")                 \
 66.2097                                                                              \
 66.2098    product_pd(uintx, CodeCacheExpansionSize,                                 \
 66.2099            "Code cache expansion size (in bytes)")                           \
 66.2100                                                                              \
 66.2101    develop_pd(uintx, CodeCacheMinBlockLength,                                \
 66.2102 -          "Minimum number of segments in a code cache block.")              \
 66.2103 +          "Minimum number of segments in a code cache block")               \
 66.2104                                                                              \
 66.2105    notproduct(bool, ExitOnFullCodeCache, false,                              \
 66.2106 -          "Exit the VM if we fill the code cache.")                         \
 66.2107 +          "Exit the VM if we fill the code cache")                          \
 66.2108                                                                              \
 66.2109    product(bool, UseCodeCacheFlushing, true,                                 \
 66.2110            "Attempt to clean the code cache before shutting off compiler")   \
 66.2111 @@ -3252,31 +3287,31 @@
 66.2112            "switch")                                                         \
 66.2113                                                                              \
 66.2114    develop(intx, StopInterpreterAt, 0,                                       \
 66.2115 -          "Stops interpreter execution at specified bytecode number")       \
 66.2116 +          "Stop interpreter execution at specified bytecode number")        \
 66.2117                                                                              \
 66.2118    develop(intx, TraceBytecodesAt, 0,                                        \
 66.2119 -          "Traces bytecodes starting with specified bytecode number")       \
 66.2120 +          "Trace bytecodes starting with specified bytecode number")        \
 66.2121                                                                              \
 66.2122    /* compiler interface */                                                  \
 66.2123    develop(intx, CIStart, 0,                                                 \
 66.2124 -          "the id of the first compilation to permit")                      \
 66.2125 +          "The id of the first compilation to permit")                      \
 66.2126                                                                              \
 66.2127    develop(intx, CIStop,    -1,                                              \
 66.2128 -          "the id of the last compilation to permit")                       \
 66.2129 +          "The id of the last compilation to permit")                       \
 66.2130                                                                              \
 66.2131    develop(intx, CIStartOSR,     0,                                          \
 66.2132 -          "the id of the first osr compilation to permit "                  \
 66.2133 +          "The id of the first osr compilation to permit "                  \
 66.2134            "(CICountOSR must be on)")                                        \
 66.2135                                                                              \
 66.2136    develop(intx, CIStopOSR,    -1,                                           \
 66.2137 -          "the id of the last osr compilation to permit "                   \
 66.2138 +          "The id of the last osr compilation to permit "                   \
 66.2139            "(CICountOSR must be on)")                                        \
 66.2140                                                                              \
 66.2141    develop(intx, CIBreakAtOSR,    -1,                                        \
 66.2142 -          "id of osr compilation to break at")                              \
 66.2143 +          "The id of osr compilation to break at")                          \
 66.2144                                                                              \
 66.2145    develop(intx, CIBreakAt,    -1,                                           \
 66.2146 -          "id of compilation to break at")                                  \
 66.2147 +          "The id of compilation to break at")                              \
 66.2148                                                                              \
 66.2149    product(ccstrlist, CompileOnly, "",                                       \
 66.2150            "List of methods (pkg/class.name) to restrict compilation to")    \
 66.2151 @@ -3295,11 +3330,11 @@
 66.2152            "[default: ./replay_pid%p.log] (%p replaced with pid)")           \
 66.2153                                                                              \
 66.2154    develop(intx, ReplaySuppressInitializers, 2,                              \
 66.2155 -          "Controls handling of class initialization during replay"         \
 66.2156 -          "0 - don't do anything special"                                   \
 66.2157 -          "1 - treat all class initializers as empty"                       \
 66.2158 -          "2 - treat class initializers for application classes as empty"   \
 66.2159 -          "3 - allow all class initializers to run during bootstrap but"    \
 66.2160 +          "Control handling of class initialization during replay: "        \
 66.2161 +          "0 - don't do anything special; "                                 \
 66.2162 +          "1 - treat all class initializers as empty; "                     \
 66.2163 +          "2 - treat class initializers for application classes as empty; " \
 66.2164 +          "3 - allow all class initializers to run during bootstrap but "   \
 66.2165            "    pretend they are empty after starting replay")               \
 66.2166                                                                              \
 66.2167    develop(bool, ReplayIgnoreInitErrors, false,                              \
 66.2168 @@ -3328,14 +3363,15 @@
 66.2169            "0 : Normal.                                                     "\
 66.2170            "    VM chooses priorities that are appropriate for normal       "\
 66.2171            "    applications. On Solaris NORM_PRIORITY and above are mapped "\
 66.2172 -          "    to normal native priority. Java priorities below NORM_PRIORITY"\
 66.2173 -          "    map to lower native priority values. On Windows applications"\
 66.2174 -          "    are allowed to use higher native priorities. However, with  "\
 66.2175 -          "    ThreadPriorityPolicy=0, VM will not use the highest possible"\
 66.2176 -          "    native priority, THREAD_PRIORITY_TIME_CRITICAL, as it may   "\
 66.2177 -          "    interfere with system threads. On Linux thread priorities   "\
 66.2178 -          "    are ignored because the OS does not support static priority "\
 66.2179 -          "    in SCHED_OTHER scheduling class which is the only choice for"\
 66.2180 +          "    to normal native priority. Java priorities below "           \
 66.2181 +          "    NORM_PRIORITY map to lower native priority values. On       "\
 66.2182 +          "    Windows applications are allowed to use higher native       "\
 66.2183 +          "    priorities. However, with ThreadPriorityPolicy=0, VM will   "\
 66.2184 +          "    not use the highest possible native priority,               "\
 66.2185 +          "    THREAD_PRIORITY_TIME_CRITICAL, as it may interfere with     "\
 66.2186 +          "    system threads. On Linux thread priorities are ignored      "\
 66.2187 +          "    because the OS does not support static priority in          "\
 66.2188 +          "    SCHED_OTHER scheduling class which is the only choice for   "\
 66.2189            "    non-root, non-realtime applications.                        "\
 66.2190            "1 : Aggressive.                                                 "\
 66.2191            "    Java thread priorities map over to the entire range of      "\
 66.2192 @@ -3366,16 +3402,35 @@
 66.2193    product(bool, VMThreadHintNoPreempt, false,                               \
 66.2194            "(Solaris only) Give VM thread an extra quanta")                  \
 66.2195                                                                              \
 66.2196 -  product(intx, JavaPriority1_To_OSPriority, -1, "Map Java priorities to OS priorities") \
 66.2197 -  product(intx, JavaPriority2_To_OSPriority, -1, "Map Java priorities to OS priorities") \
 66.2198 -  product(intx, JavaPriority3_To_OSPriority, -1, "Map Java priorities to OS priorities") \
 66.2199 -  product(intx, JavaPriority4_To_OSPriority, -1, "Map Java priorities to OS priorities") \
 66.2200 -  product(intx, JavaPriority5_To_OSPriority, -1, "Map Java priorities to OS priorities") \
 66.2201 -  product(intx, JavaPriority6_To_OSPriority, -1, "Map Java priorities to OS priorities") \
 66.2202 -  product(intx, JavaPriority7_To_OSPriority, -1, "Map Java priorities to OS priorities") \
 66.2203 -  product(intx, JavaPriority8_To_OSPriority, -1, "Map Java priorities to OS priorities") \
 66.2204 -  product(intx, JavaPriority9_To_OSPriority, -1, "Map Java priorities to OS priorities") \
 66.2205 -  product(intx, JavaPriority10_To_OSPriority,-1, "Map Java priorities to OS priorities") \
 66.2206 +  product(intx, JavaPriority1_To_OSPriority, -1,                            \
 66.2207 +          "Map Java priorities to OS priorities")                           \
 66.2208 +                                                                            \
 66.2209 +  product(intx, JavaPriority2_To_OSPriority, -1,                            \
 66.2210 +          "Map Java priorities to OS priorities")                           \
 66.2211 +                                                                            \
 66.2212 +  product(intx, JavaPriority3_To_OSPriority, -1,                            \
 66.2213 +          "Map Java priorities to OS priorities")                           \
 66.2214 +                                                                            \
 66.2215 +  product(intx, JavaPriority4_To_OSPriority, -1,                            \
 66.2216 +          "Map Java priorities to OS priorities")                           \
 66.2217 +                                                                            \
 66.2218 +  product(intx, JavaPriority5_To_OSPriority, -1,                            \
 66.2219 +          "Map Java priorities to OS priorities")                           \
 66.2220 +                                                                            \
 66.2221 +  product(intx, JavaPriority6_To_OSPriority, -1,                            \
 66.2222 +          "Map Java priorities to OS priorities")                           \
 66.2223 +                                                                            \
 66.2224 +  product(intx, JavaPriority7_To_OSPriority, -1,                            \
 66.2225 +          "Map Java priorities to OS priorities")                           \
 66.2226 +                                                                            \
 66.2227 +  product(intx, JavaPriority8_To_OSPriority, -1,                            \
 66.2228 +          "Map Java priorities to OS priorities")                           \
 66.2229 +                                                                            \
 66.2230 +  product(intx, JavaPriority9_To_OSPriority, -1,                            \
 66.2231 +          "Map Java priorities to OS priorities")                           \
 66.2232 +                                                                            \
 66.2233 +  product(intx, JavaPriority10_To_OSPriority,-1,                            \
 66.2234 +          "Map Java priorities to OS priorities")                           \
 66.2235                                                                              \
 66.2236    experimental(bool, UseCriticalJavaThreadPriority, false,                  \
 66.2237            "Java thread priority 10 maps to critical scheduling priority")   \
 66.2238 @@ -3406,37 +3461,38 @@
 66.2239            "Used with +TraceLongCompiles")                                   \
 66.2240                                                                              \
 66.2241    product(intx, StarvationMonitorInterval,    200,                          \
 66.2242 -          "Pause between each check in ms")                                 \
 66.2243 +          "Pause between each check (in milliseconds)")                     \
 66.2244                                                                              \
 66.2245    /* recompilation */                                                       \
 66.2246    product_pd(intx, CompileThreshold,                                        \
 66.2247            "number of interpreted method invocations before (re-)compiling") \
 66.2248                                                                              \
 66.2249    product_pd(intx, BackEdgeThreshold,                                       \
 66.2250 -          "Interpreter Back edge threshold at which an OSR compilation is invoked")\
 66.2251 +          "Interpreter Back edge threshold at which an OSR compilation is " \
 66.2252 +          "invoked")                                                        \
 66.2253                                                                              \
 66.2254    product(intx, Tier0InvokeNotifyFreqLog, 7,                                \
 66.2255 -          "Interpreter (tier 0) invocation notification frequency.")        \
 66.2256 +          "Interpreter (tier 0) invocation notification frequency")         \
 66.2257                                                                              \
 66.2258    product(intx, Tier2InvokeNotifyFreqLog, 11,                               \
 66.2259 -          "C1 without MDO (tier 2) invocation notification frequency.")     \
 66.2260 +          "C1 without MDO (tier 2) invocation notification frequency")      \
 66.2261                                                                              \
 66.2262    product(intx, Tier3InvokeNotifyFreqLog, 10,                               \
 66.2263            "C1 with MDO profiling (tier 3) invocation notification "         \
 66.2264 -          "frequency.")                                                     \
 66.2265 +          "frequency")                                                      \
 66.2266                                                                              \
 66.2267    product(intx, Tier23InlineeNotifyFreqLog, 20,                             \
 66.2268            "Inlinee invocation (tiers 2 and 3) notification frequency")      \
 66.2269                                                                              \
 66.2270    product(intx, Tier0BackedgeNotifyFreqLog, 10,                             \
 66.2271 -          "Interpreter (tier 0) invocation notification frequency.")        \
 66.2272 +          "Interpreter (tier 0) invocation notification frequency")         \
 66.2273                                                                              \
 66.2274    product(intx, Tier2BackedgeNotifyFreqLog, 14,                             \
 66.2275 -          "C1 without MDO (tier 2) invocation notification frequency.")     \
 66.2276 +          "C1 without MDO (tier 2) invocation notification frequency")      \
 66.2277                                                                              \
 66.2278    product(intx, Tier3BackedgeNotifyFreqLog, 13,                             \
 66.2279            "C1 with MDO profiling (tier 3) invocation notification "         \
 66.2280 -          "frequency.")                                                     \
 66.2281 +          "frequency")                                                      \
 66.2282                                                                              \
 66.2283    product(intx, Tier2CompileThreshold, 0,                                   \
 66.2284            "threshold at which tier 2 compilation is invoked")               \
 66.2285 @@ -3453,7 +3509,7 @@
 66.2286                                                                              \
 66.2287    product(intx, Tier3CompileThreshold, 2000,                                \
 66.2288            "Threshold at which tier 3 compilation is invoked (invocation "   \
 66.2289 -          "minimum must be satisfied.")                                     \
 66.2290 +          "minimum must be satisfied")                                      \
 66.2291                                                                              \
 66.2292    product(intx, Tier3BackEdgeThreshold,  60000,                             \
 66.2293            "Back edge threshold at which tier 3 OSR compilation is invoked") \
 66.2294 @@ -3467,7 +3523,7 @@
 66.2295                                                                              \
 66.2296    product(intx, Tier4CompileThreshold, 15000,                               \
 66.2297            "Threshold at which tier 4 compilation is invoked (invocation "   \
 66.2298 -          "minimum must be satisfied.")                                     \
 66.2299 +          "minimum must be satisfied")                                      \
 66.2300                                                                              \
 66.2301    product(intx, Tier4BackEdgeThreshold, 40000,                              \
 66.2302            "Back edge threshold at which tier 4 OSR compilation is invoked") \
 66.2303 @@ -3496,12 +3552,12 @@
 66.2304            "Stop at given compilation level")                                \
 66.2305                                                                              \
 66.2306    product(intx, Tier0ProfilingStartPercentage, 200,                         \
 66.2307 -          "Start profiling in interpreter if the counters exceed tier 3"    \
 66.2308 +          "Start profiling in interpreter if the counters exceed tier 3 "   \
 66.2309            "thresholds by the specified percentage")                         \
 66.2310                                                                              \
 66.2311    product(uintx, IncreaseFirstTierCompileThresholdAt, 50,                   \
 66.2312 -          "Increase the compile threshold for C1 compilation if the code"   \
 66.2313 -          "cache is filled by the specified percentage.")                   \
 66.2314 +          "Increase the compile threshold for C1 compilation if the code "  \
 66.2315 +          "cache is filled by the specified percentage")                    \
 66.2316                                                                              \
 66.2317    product(intx, TieredRateUpdateMinTime, 1,                                 \
 66.2318            "Minimum rate sampling interval (in milliseconds)")               \
 66.2319 @@ -3516,24 +3572,26 @@
 66.2320            "Print tiered events notifications")                              \
 66.2321                                                                              \
 66.2322    product_pd(intx, OnStackReplacePercentage,                                \
 66.2323 -          "NON_TIERED number of method invocations/branches (expressed as %"\
 66.2324 -          "of CompileThreshold) before (re-)compiling OSR code")            \
 66.2325 +          "NON_TIERED number of method invocations/branches (expressed as " \
 66.2326 +          "% of CompileThreshold) before (re-)compiling OSR code")          \
 66.2327                                                                              \
 66.2328    product(intx, InterpreterProfilePercentage, 33,                           \
 66.2329 -          "NON_TIERED number of method invocations/branches (expressed as %"\
 66.2330 -          "of CompileThreshold) before profiling in the interpreter")       \
 66.2331 +          "NON_TIERED number of method invocations/branches (expressed as " \
 66.2332 +          "% of CompileThreshold) before profiling in the interpreter")     \
 66.2333                                                                              \
 66.2334    develop(intx, MaxRecompilationSearchLength,    10,                        \
 66.2335 -          "max. # frames to inspect searching for recompilee")              \
 66.2336 +          "The maximum number of frames to inspect when searching for "     \
 66.2337 +          "recompilee")                                                     \
 66.2338                                                                              \
 66.2339    develop(intx, MaxInterpretedSearchLength,     3,                          \
 66.2340 -          "max. # interp. frames to skip when searching for recompilee")    \
 66.2341 +          "The maximum number of interpreted frames to skip when searching "\
 66.2342 +          "for recompilee")                                                 \
 66.2343                                                                              \
 66.2344    develop(intx, DesiredMethodLimit,  8000,                                  \
 66.2345 -          "desired max. method size (in bytecodes) after inlining")         \
 66.2346 +          "The desired maximum method size (in bytecodes) after inlining")  \
 66.2347                                                                              \
 66.2348    develop(intx, HugeMethodLimit,  8000,                                     \
 66.2349 -          "don't compile methods larger than this if "                      \
 66.2350 +          "Don't compile methods larger than this if "                      \
 66.2351            "+DontCompileHugeMethods")                                        \
 66.2352                                                                              \
 66.2353    /* New JDK 1.4 reflection implementation */                               \
 66.2354 @@ -3555,7 +3613,7 @@
 66.2355            "in InvocationTargetException. See 6531596")                      \
 66.2356                                                                              \
 66.2357    develop(bool, VerifyLambdaBytecodes, false,                               \
 66.2358 -          "Force verification of jdk 8 lambda metafactory bytecodes.")      \
 66.2359 +          "Force verification of jdk 8 lambda metafactory bytecodes")       \
 66.2360                                                                              \
 66.2361    develop(intx, FastSuperclassLimit, 8,                                     \
 66.2362            "Depth of hardwired instanceof accelerator array")                \
 66.2363 @@ -3579,18 +3637,19 @@
 66.2364    /* flags for performance data collection */                               \
 66.2365                                                                              \
 66.2366    product(bool, UsePerfData, falseInEmbedded,                               \
 66.2367 -          "Flag to disable jvmstat instrumentation for performance testing" \
 66.2368 -          "and problem isolation purposes.")                                \
 66.2369 +          "Flag to disable jvmstat instrumentation for performance testing "\
 66.2370 +          "and problem isolation purposes")                                 \
 66.2371                                                                              \
 66.2372    product(bool, PerfDataSaveToFile, false,                                  \
 66.2373            "Save PerfData memory to hsperfdata_<pid> file on exit")          \
 66.2374                                                                              \
 66.2375    product(ccstr, PerfDataSaveFile, NULL,                                    \
 66.2376 -          "Save PerfData memory to the specified absolute pathname,"        \
 66.2377 -           "%p in the file name if present will be replaced by pid")        \
 66.2378 -                                                                            \
 66.2379 -  product(intx, PerfDataSamplingInterval, 50 /*ms*/,                        \
 66.2380 -          "Data sampling interval in milliseconds")                         \
 66.2381 +          "Save PerfData memory to the specified absolute pathname. "       \
 66.2382 +          "The string %p in the file name (if present) "                    \
 66.2383 +          "will be replaced by pid")                                        \
 66.2384 +                                                                            \
 66.2385 +  product(intx, PerfDataSamplingInterval, 50,                               \
 66.2386 +          "Data sampling interval (in milliseconds)")                       \
 66.2387                                                                              \
 66.2388    develop(bool, PerfTraceDataCreation, false,                               \
 66.2389            "Trace creation of Performance Data Entries")                     \
 66.2390 @@ -3615,7 +3674,7 @@
 66.2391            "Bypass Win32 file system criteria checks (Windows Only)")        \
 66.2392                                                                              \
 66.2393    product(intx, UnguardOnExecutionViolation, 0,                             \
 66.2394 -          "Unguard page and retry on no-execute fault (Win32 only)"         \
 66.2395 +          "Unguard page and retry on no-execute fault (Win32 only) "        \
 66.2396            "0=off, 1=conservative, 2=aggressive")                            \
 66.2397                                                                              \
 66.2398    /* Serviceability Support */                                              \
 66.2399 @@ -3624,7 +3683,7 @@
 66.2400            "Create JMX Management Server")                                   \
 66.2401                                                                              \
 66.2402    product(bool, DisableAttachMechanism, false,                              \
 66.2403 -         "Disable mechanism that allows tools to attach to this VM")        \
 66.2404 +          "Disable mechanism that allows tools to attach to this VM")       \
 66.2405                                                                              \
 66.2406    product(bool, StartAttachListener, false,                                 \
 66.2407            "Always start Attach Listener at VM startup")                     \
 66.2408 @@ -3647,9 +3706,9 @@
 66.2409            "Require shared spaces for metadata")                             \
 66.2410                                                                              \
 66.2411    product(bool, DumpSharedSpaces, false,                                    \
 66.2412 -           "Special mode: JVM reads a class list, loads classes, builds "   \
 66.2413 -            "shared spaces, and dumps the shared spaces to a file to be "   \
 66.2414 -            "used in future JVM runs.")                                     \
 66.2415 +          "Special mode: JVM reads a class list, loads classes, builds "    \
 66.2416 +          "shared spaces, and dumps the shared spaces to a file to be "     \
 66.2417 +          "used in future JVM runs")                                        \
 66.2418                                                                              \
 66.2419    product(bool, PrintSharedSpaces, false,                                   \
 66.2420            "Print usage of shared spaces")                                   \
 66.2421 @@ -3722,11 +3781,14 @@
 66.2422            "Relax the access control checks in the verifier")                \
 66.2423                                                                              \
 66.2424    diagnostic(bool, PrintDTraceDOF, false,                                   \
 66.2425 -             "Print the DTrace DOF passed to the system for JSDT probes")   \
 66.2426 +          "Print the DTrace DOF passed to the system for JSDT probes")      \
 66.2427                                                                              \
 66.2428    product(uintx, StringTableSize, defaultStringTableSize,                   \
 66.2429            "Number of buckets in the interned String table")                 \
 66.2430                                                                              \
 66.2431 +  experimental(uintx, SymbolTableSize, defaultSymbolTableSize,              \
 66.2432 +          "Number of buckets in the JVM internal Symbol table")             \
 66.2433 +                                                                            \
 66.2434    develop(bool, TraceDefaultMethods, false,                                 \
 66.2435            "Trace the default method processing steps")                      \
 66.2436                                                                              \
 66.2437 @@ -3735,8 +3797,8 @@
 66.2438                                                                              \
 66.2439    product(bool, UseVMInterruptibleIO, false,                                \
 66.2440            "(Unstable, Solaris-specific) Thread interrupt before or with "   \
 66.2441 -          "EINTR for I/O operations results in OS_INTRPT. The default value"\
 66.2442 -          " of this flag is true for JDK 6 and earlier")                    \
 66.2443 +          "EINTR for I/O operations results in OS_INTRPT. The default "     \
 66.2444 +          "value of this flag is true for JDK 6 and earlier")               \
 66.2445                                                                              \
 66.2446    diagnostic(bool, WhiteBoxAPI, false,                                      \
 66.2447            "Enable internal testing APIs")                                   \
 66.2448 @@ -3757,6 +3819,7 @@
 66.2449                                                                              \
 66.2450    product(bool, EnableTracing, false,                                       \
 66.2451            "Enable event-based tracing")                                     \
 66.2452 +                                                                            \
 66.2453    product(bool, UseLockedTracing, false,                                    \
 66.2454            "Use locked-tracing when doing event-based tracing")
 66.2455  
    67.1 --- a/src/share/vm/runtime/reflectionUtils.cpp	Sun Oct 13 21:14:04 2013 +0100
    67.2 +++ b/src/share/vm/runtime/reflectionUtils.cpp	Thu Oct 17 14:20:57 2013 -0700
    67.3 @@ -1,5 +1,5 @@
    67.4  /*
    67.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
    67.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
    67.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    67.8   *
    67.9   * This code is free software; you can redistribute it and/or modify it
   67.10 @@ -27,8 +27,11 @@
   67.11  #include "memory/universe.inline.hpp"
   67.12  #include "runtime/reflectionUtils.hpp"
   67.13  
   67.14 -KlassStream::KlassStream(instanceKlassHandle klass, bool local_only, bool classes_only) {
   67.15 -  _klass = klass;
   67.16 +KlassStream::KlassStream(instanceKlassHandle klass, bool local_only,
   67.17 +                         bool classes_only, bool walk_defaults) {
   67.18 +  _klass = _base_klass = klass;
   67.19 +  _base_class_search_defaults = false;
   67.20 +  _defaults_checked = false;
   67.21    if (classes_only) {
   67.22      _interfaces = Universe::the_empty_klass_array();
   67.23    } else {
   67.24 @@ -37,6 +40,7 @@
   67.25    _interface_index = _interfaces->length();
   67.26    _local_only = local_only;
   67.27    _classes_only = classes_only;
   67.28 +  _walk_defaults = walk_defaults;
   67.29  }
   67.30  
   67.31  bool KlassStream::eos() {
   67.32 @@ -45,7 +49,13 @@
   67.33    if (!_klass->is_interface() && _klass->super() != NULL) {
   67.34      // go up superclass chain (not for interfaces)
   67.35      _klass = _klass->super();
   67.36 +  // Next for method walks, walk default methods
   67.37 +  } else if (_walk_defaults && (_defaults_checked == false)  && (_base_klass->default_methods() != NULL)) {
   67.38 +      _base_class_search_defaults = true;
   67.39 +      _klass = _base_klass;
   67.40 +      _defaults_checked = true;
   67.41    } else {
   67.42 +    // Next walk transitive interfaces
   67.43      if (_interface_index > 0) {
   67.44        _klass = _interfaces->at(--_interface_index);
   67.45      } else {
    68.1 --- a/src/share/vm/runtime/reflectionUtils.hpp	Sun Oct 13 21:14:04 2013 +0100
    68.2 +++ b/src/share/vm/runtime/reflectionUtils.hpp	Thu Oct 17 14:20:57 2013 -0700
    68.3 @@ -38,7 +38,7 @@
    68.4  // and (super)interfaces. Streaming is done in reverse order (subclasses first,
    68.5  // interfaces last).
    68.6  //
    68.7 -//    for (KlassStream st(k, false, false); !st.eos(); st.next()) {
    68.8 +//    for (KlassStream st(k, false, false, false); !st.eos(); st.next()) {
    68.9  //      Klass* k = st.klass();
   68.10  //      ...
   68.11  //    }
   68.12 @@ -46,17 +46,21 @@
   68.13  class KlassStream VALUE_OBJ_CLASS_SPEC {
   68.14   protected:
   68.15    instanceKlassHandle _klass;           // current klass/interface iterated over
   68.16 -  Array<Klass*>*    _interfaces;      // transitive interfaces for initial class
   68.17 +  instanceKlassHandle _base_klass;      // initial klass/interface to iterate over
   68.18 +  Array<Klass*>*      _interfaces;      // transitive interfaces for initial class
   68.19    int                 _interface_index; // current interface being processed
   68.20    bool                _local_only;      // process initial class/interface only
   68.21    bool                _classes_only;    // process classes only (no interfaces)
   68.22 +  bool                _walk_defaults;   // process default methods
   68.23 +  bool                _base_class_search_defaults; // time to process default methods
   68.24 +  bool                _defaults_checked; // already checked for default methods
   68.25    int                 _index;
   68.26  
   68.27 -  virtual int length() const = 0;
   68.28 +  virtual int length() = 0;
   68.29  
   68.30   public:
   68.31    // constructor
   68.32 -  KlassStream(instanceKlassHandle klass, bool local_only, bool classes_only);
   68.33 +  KlassStream(instanceKlassHandle klass, bool local_only, bool classes_only, bool walk_defaults);
   68.34  
   68.35    // testing
   68.36    bool eos();
   68.37 @@ -67,6 +71,8 @@
   68.38    // accessors
   68.39    instanceKlassHandle klass() const { return _klass; }
   68.40    int index() const                 { return _index; }
   68.41 +  bool base_class_search_defaults() const { return _base_class_search_defaults; }
   68.42 +  void base_class_search_defaults(bool b) { _base_class_search_defaults = b; }
   68.43  };
   68.44  
   68.45  
   68.46 @@ -81,17 +87,24 @@
   68.47  
   68.48  class MethodStream : public KlassStream {
   68.49   private:
   68.50 -  int length() const          { return methods()->length(); }
   68.51 -  Array<Method*>* methods() const { return _klass->methods(); }
   68.52 +  int length()                    { return methods()->length(); }
   68.53 +  Array<Method*>* methods() {
   68.54 +    if (base_class_search_defaults()) {
   68.55 +      base_class_search_defaults(false);
   68.56 +      return _klass->default_methods();
   68.57 +    } else {
   68.58 +      return _klass->methods();
   68.59 +    }
   68.60 +  }
   68.61   public:
   68.62    MethodStream(instanceKlassHandle klass, bool local_only, bool classes_only)
   68.63 -    : KlassStream(klass, local_only, classes_only) {
   68.64 +    : KlassStream(klass, local_only, classes_only, true) {
   68.65      _index = length();
   68.66      next();
   68.67    }
   68.68  
   68.69    void next() { _index--; }
   68.70 -  Method* method() const { return methods()->at(index()); }
   68.71 +  Method* method() { return methods()->at(index()); }
   68.72  };
   68.73  
   68.74  
   68.75 @@ -107,13 +120,13 @@
   68.76  
   68.77  class FieldStream : public KlassStream {
   68.78   private:
   68.79 -  int length() const                { return _klass->java_fields_count(); }
   68.80 +  int length() { return _klass->java_fields_count(); }
   68.81  
   68.82    fieldDescriptor _fd_buf;
   68.83  
   68.84   public:
   68.85    FieldStream(instanceKlassHandle klass, bool local_only, bool classes_only)
   68.86 -    : KlassStream(klass, local_only, classes_only) {
   68.87 +    : KlassStream(klass, local_only, classes_only, false) {
   68.88      _index = length();
   68.89      next();
   68.90    }
    69.1 --- a/src/share/vm/runtime/virtualspace.cpp	Sun Oct 13 21:14:04 2013 +0100
    69.2 +++ b/src/share/vm/runtime/virtualspace.cpp	Thu Oct 17 14:20:57 2013 -0700
    69.3 @@ -368,8 +368,15 @@
    69.4  
    69.5  
    69.6  bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
    69.7 +  const size_t max_commit_granularity = os::page_size_for_region(rs.size(), rs.size(), 1);
    69.8 +  return initialize_with_granularity(rs, committed_size, max_commit_granularity);
    69.9 +}
   69.10 +
   69.11 +bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
   69.12    if(!rs.is_reserved()) return false;  // allocation failed.
   69.13    assert(_low_boundary == NULL, "VirtualSpace already initialized");
   69.14 +  assert(max_commit_granularity > 0, "Granularity must be non-zero.");
   69.15 +
   69.16    _low_boundary  = rs.base();
   69.17    _high_boundary = low_boundary() + rs.size();
   69.18  
   69.19 @@ -390,7 +397,7 @@
   69.20    // No attempt is made to force large page alignment at the very top and
   69.21    // bottom of the space if they are not aligned so already.
   69.22    _lower_alignment  = os::vm_page_size();
   69.23 -  _middle_alignment = os::page_size_for_region(rs.size(), rs.size(), 1);
   69.24 +  _middle_alignment = max_commit_granularity;
   69.25    _upper_alignment  = os::vm_page_size();
   69.26  
   69.27    // End of each region
   69.28 @@ -966,17 +973,52 @@
   69.29  
   69.30  
   69.31  class TestVirtualSpace : AllStatic {
   69.32 +  enum TestLargePages {
   69.33 +    Default,
   69.34 +    Disable,
   69.35 +    Reserve,
   69.36 +    Commit
   69.37 +  };
   69.38 +
   69.39 +  static ReservedSpace reserve_memory(size_t reserve_size_aligned, TestLargePages mode) {
   69.40 +    switch(mode) {
   69.41 +    default:
   69.42 +    case Default:
   69.43 +    case Reserve:
   69.44 +      return ReservedSpace(reserve_size_aligned);
   69.45 +    case Disable:
   69.46 +    case Commit:
   69.47 +      return ReservedSpace(reserve_size_aligned,
   69.48 +                           os::vm_allocation_granularity(),
   69.49 +                           /* large */ false, /* exec */ false);
   69.50 +    }
   69.51 +  }
   69.52 +
   69.53 +  static bool initialize_virtual_space(VirtualSpace& vs, ReservedSpace rs, TestLargePages mode) {
   69.54 +    switch(mode) {
   69.55 +    default:
   69.56 +    case Default:
   69.57 +    case Reserve:
   69.58 +      return vs.initialize(rs, 0);
   69.59 +    case Disable:
   69.60 +      return vs.initialize_with_granularity(rs, 0, os::vm_page_size());
   69.61 +    case Commit:
   69.62 +      return vs.initialize_with_granularity(rs, 0, os::page_size_for_region(rs.size(), rs.size(), 1));
   69.63 +    }
   69.64 +  }
   69.65 +
   69.66   public:
   69.67 -  static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size) {
   69.68 +  static void test_virtual_space_actual_committed_space(size_t reserve_size, size_t commit_size,
   69.69 +                                                        TestLargePages mode = Default) {
   69.70      size_t granularity = os::vm_allocation_granularity();
   69.71      size_t reserve_size_aligned = align_size_up(reserve_size, granularity);
   69.72  
   69.73 -    ReservedSpace reserved(reserve_size_aligned);
   69.74 +    ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
   69.75  
   69.76      assert(reserved.is_reserved(), "Must be");
   69.77  
   69.78      VirtualSpace vs;
   69.79 -    bool initialized = vs.initialize(reserved, 0);
   69.80 +    bool initialized = initialize_virtual_space(vs, reserved, mode);
   69.81      assert(initialized, "Failed to initialize VirtualSpace");
   69.82  
   69.83      vs.expand_by(commit_size, false);
   69.84 @@ -986,7 +1028,10 @@
   69.85      } else {
   69.86        assert_ge(vs.actual_committed_size(), commit_size);
   69.87        // Approximate the commit granularity.
   69.88 -      size_t commit_granularity = UseLargePages ? os::large_page_size() : os::vm_page_size();
   69.89 +      // Make sure that we don't commit using large pages
   69.90 +      // if large pages has been disabled for this VirtualSpace.
   69.91 +      size_t commit_granularity = (mode == Disable || !UseLargePages) ?
   69.92 +                                   os::vm_page_size() : os::large_page_size();
   69.93        assert_lt(vs.actual_committed_size(), commit_size + commit_granularity);
   69.94      }
   69.95  
   69.96 @@ -1042,9 +1087,40 @@
   69.97      test_virtual_space_actual_committed_space(10 * M, 10 * M);
   69.98    }
   69.99  
  69.100 +  static void test_virtual_space_disable_large_pages() {
  69.101 +    if (!UseLargePages) {
  69.102 +      return;
  69.103 +    }
  69.104 +    // These test cases verify that if we force VirtualSpace to disable large pages
  69.105 +    test_virtual_space_actual_committed_space(10 * M, 0, Disable);
  69.106 +    test_virtual_space_actual_committed_space(10 * M, 4 * K, Disable);
  69.107 +    test_virtual_space_actual_committed_space(10 * M, 8 * K, Disable);
  69.108 +    test_virtual_space_actual_committed_space(10 * M, 1 * M, Disable);
  69.109 +    test_virtual_space_actual_committed_space(10 * M, 2 * M, Disable);
  69.110 +    test_virtual_space_actual_committed_space(10 * M, 5 * M, Disable);
  69.111 +    test_virtual_space_actual_committed_space(10 * M, 10 * M, Disable);
  69.112 +
  69.113 +    test_virtual_space_actual_committed_space(10 * M, 0, Reserve);
  69.114 +    test_virtual_space_actual_committed_space(10 * M, 4 * K, Reserve);
  69.115 +    test_virtual_space_actual_committed_space(10 * M, 8 * K, Reserve);
  69.116 +    test_virtual_space_actual_committed_space(10 * M, 1 * M, Reserve);
  69.117 +    test_virtual_space_actual_committed_space(10 * M, 2 * M, Reserve);
  69.118 +    test_virtual_space_actual_committed_space(10 * M, 5 * M, Reserve);
  69.119 +    test_virtual_space_actual_committed_space(10 * M, 10 * M, Reserve);
  69.120 +
  69.121 +    test_virtual_space_actual_committed_space(10 * M, 0, Commit);
  69.122 +    test_virtual_space_actual_committed_space(10 * M, 4 * K, Commit);
  69.123 +    test_virtual_space_actual_committed_space(10 * M, 8 * K, Commit);
  69.124 +    test_virtual_space_actual_committed_space(10 * M, 1 * M, Commit);
  69.125 +    test_virtual_space_actual_committed_space(10 * M, 2 * M, Commit);
  69.126 +    test_virtual_space_actual_committed_space(10 * M, 5 * M, Commit);
  69.127 +    test_virtual_space_actual_committed_space(10 * M, 10 * M, Commit);
  69.128 +  }
  69.129 +
  69.130    static void test_virtual_space() {
  69.131      test_virtual_space_actual_committed_space();
  69.132      test_virtual_space_actual_committed_space_one_large_page();
  69.133 +    test_virtual_space_disable_large_pages();
  69.134    }
  69.135  };
  69.136  
    70.1 --- a/src/share/vm/runtime/virtualspace.hpp	Sun Oct 13 21:14:04 2013 +0100
    70.2 +++ b/src/share/vm/runtime/virtualspace.hpp	Thu Oct 17 14:20:57 2013 -0700
    70.3 @@ -178,6 +178,7 @@
    70.4   public:
    70.5    // Initialization
    70.6    VirtualSpace();
    70.7 +  bool initialize_with_granularity(ReservedSpace rs, size_t committed_byte_size, size_t max_commit_ganularity);
    70.8    bool initialize(ReservedSpace rs, size_t committed_byte_size);
    70.9  
   70.10    // Destruction
    71.1 --- a/src/share/vm/runtime/vmStructs.cpp	Sun Oct 13 21:14:04 2013 +0100
    71.2 +++ b/src/share/vm/runtime/vmStructs.cpp	Thu Oct 17 14:20:57 2013 -0700
    71.3 @@ -27,7 +27,6 @@
    71.4  #include "classfile/javaClasses.hpp"
    71.5  #include "classfile/loaderConstraints.hpp"
    71.6  #include "classfile/placeholders.hpp"
    71.7 -#include "classfile/symbolTable.hpp"
    71.8  #include "classfile/systemDictionary.hpp"
    71.9  #include "ci/ciField.hpp"
   71.10  #include "ci/ciInstance.hpp"
   71.11 @@ -289,6 +288,7 @@
   71.12    nonstatic_field(ConstantPoolCache,    _constant_pool,                                ConstantPool*)                         \
   71.13    nonstatic_field(InstanceKlass,               _array_klasses,                                Klass*)                                \
   71.14    nonstatic_field(InstanceKlass,               _methods,                                      Array<Method*>*)                       \
   71.15 +  nonstatic_field(InstanceKlass,               _default_methods,                              Array<Method*>*)                       \
   71.16    nonstatic_field(InstanceKlass,               _local_interfaces,                             Array<Klass*>*)                        \
   71.17    nonstatic_field(InstanceKlass,               _transitive_interfaces,                        Array<Klass*>*)                        \
   71.18    nonstatic_field(InstanceKlass,               _fields,                                       Array<u2>*)                            \
   71.19 @@ -323,6 +323,7 @@
   71.20    nonstatic_field(nmethodBucket,               _count,                                        int)                                   \
   71.21    nonstatic_field(nmethodBucket,               _next,                                         nmethodBucket*)                        \
   71.22    nonstatic_field(InstanceKlass,               _method_ordering,                              Array<int>*)                           \
   71.23 +  nonstatic_field(InstanceKlass,               _default_vtable_indices,                       Array<int>*)                           \
   71.24    nonstatic_field(Klass,                       _super_check_offset,                           juint)                                 \
   71.25    nonstatic_field(Klass,                       _secondary_super_cache,                        Klass*)                                \
   71.26    nonstatic_field(Klass,                       _secondary_supers,                             Array<Klass*>*)                        \
   71.27 @@ -715,11 +716,17 @@
   71.28    nonstatic_field(PlaceholderEntry,            _loader_data,                                  ClassLoaderData*)                      \
   71.29                                                                                                                                       \
   71.30    /**************************/                                                                                                       \
   71.31 -  /* ProctectionDomainEntry */                                                                                                       \
   71.32 +  /* ProtectionDomainEntry  */                                                                                                       \
   71.33    /**************************/                                                                                                       \
   71.34                                                                                                                                       \
   71.35    nonstatic_field(ProtectionDomainEntry,       _next,                                         ProtectionDomainEntry*)                \
   71.36 -  nonstatic_field(ProtectionDomainEntry,       _protection_domain,                            oop)                                   \
   71.37 +  nonstatic_field(ProtectionDomainEntry,       _pd_cache,                                     ProtectionDomainCacheEntry*)           \
   71.38 +                                                                                                                                     \
   71.39 +  /*******************************/                                                                                                  \
   71.40 +  /* ProtectionDomainCacheEntry  */                                                                                                  \
   71.41 +  /*******************************/                                                                                                  \
   71.42 +                                                                                                                                     \
   71.43 +  nonstatic_field(ProtectionDomainCacheEntry,  _literal,                                      oop)                                   \
   71.44                                                                                                                                       \
   71.45    /*************************/                                                                                                        \
   71.46    /* LoaderConstraintEntry */                                                                                                        \
   71.47 @@ -1562,6 +1569,7 @@
   71.48    declare_toplevel_type(SystemDictionary)                                 \
   71.49    declare_toplevel_type(vmSymbols)                                        \
   71.50    declare_toplevel_type(ProtectionDomainEntry)                            \
   71.51 +  declare_toplevel_type(ProtectionDomainCacheEntry)                       \
   71.52                                                                            \
   71.53    declare_toplevel_type(GenericGrowableArray)                             \
   71.54    declare_toplevel_type(GrowableArray<int>)                               \
   71.55 @@ -2247,12 +2255,6 @@
   71.56    declare_preprocessor_constant("PERFDATA_BIG_ENDIAN", PERFDATA_BIG_ENDIAN)       \
   71.57    declare_preprocessor_constant("PERFDATA_LITTLE_ENDIAN", PERFDATA_LITTLE_ENDIAN) \
   71.58                                                                            \
   71.59 -  /***************/                                                       \
   71.60 -  /* SymbolTable */                                                       \
   71.61 -  /***************/                                                       \
   71.62 -                                                                          \
   71.63 -  declare_constant(SymbolTable::symbol_table_size)                        \
   71.64 -                                                                          \
   71.65    /***********************************/                                   \
   71.66    /* LoaderConstraintTable constants */                                   \
   71.67    /***********************************/                                   \
    72.1 --- a/src/share/vm/services/memoryService.hpp	Sun Oct 13 21:14:04 2013 +0100
    72.2 +++ b/src/share/vm/services/memoryService.hpp	Thu Oct 17 14:20:57 2013 -0700
    72.3 @@ -148,6 +148,12 @@
    72.4    static void track_code_cache_memory_usage() {
    72.5      track_memory_pool_usage(_code_heap_pool);
    72.6    }
    72.7 +  static void track_metaspace_memory_usage() {
    72.8 +    track_memory_pool_usage(_metaspace_pool);
    72.9 +  }
   72.10 +  static void track_compressed_class_memory_usage() {
   72.11 +    track_memory_pool_usage(_compressed_class_pool);
   72.12 +  }
   72.13    static void track_memory_pool_usage(MemoryPool* pool);
   72.14  
   72.15    static void gc_begin(bool fullGC, bool recordGCBeginTime,
    73.1 --- a/src/share/vm/utilities/globalDefinitions.hpp	Sun Oct 13 21:14:04 2013 +0100
    73.2 +++ b/src/share/vm/utilities/globalDefinitions.hpp	Thu Oct 17 14:20:57 2013 -0700
    73.3 @@ -326,12 +326,18 @@
    73.4  
    73.5  const int max_method_code_size = 64*K - 1;  // JVM spec, 2nd ed. section 4.8.1 (p.134)
    73.6  
    73.7 +// Default ProtectionDomainCacheSize values
    73.8 +
    73.9 +const int defaultProtectionDomainCacheSize = NOT_LP64(137) LP64_ONLY(2017);
   73.10  
   73.11  //----------------------------------------------------------------------------------------------------
   73.12  // Default and minimum StringTableSize values
   73.13  
   73.14  const int defaultStringTableSize = NOT_LP64(1009) LP64_ONLY(60013);
   73.15 -const int minimumStringTableSize=1009;
   73.16 +const int minimumStringTableSize = 1009;
   73.17 +
   73.18 +const int defaultSymbolTableSize = 20011;
   73.19 +const int minimumSymbolTableSize = 1009;
   73.20  
   73.21  
   73.22  //----------------------------------------------------------------------------------------------------
    74.1 --- a/test/TEST.groups	Sun Oct 13 21:14:04 2013 +0100
    74.2 +++ b/test/TEST.groups	Thu Oct 17 14:20:57 2013 -0700
    74.3 @@ -65,7 +65,6 @@
    74.4    gc/metaspace/CompressedClassSpaceSizeInJmapHeap.java \
    74.5    gc/metaspace/TestMetaspacePerfCounters.java \
    74.6    runtime/6819213/TestBootNativeLibraryPath.java \
    74.7 -  runtime/6878713/Test6878713.sh \
    74.8    runtime/6925573/SortMethodsTest.java \
    74.9    runtime/7107135/Test7107135.sh \
   74.10    runtime/7158988/FieldMonitor.java \
   74.11 @@ -85,7 +84,9 @@
   74.12    runtime/NMT/VirtualAllocTestType.java \
   74.13    runtime/RedefineObject/TestRedefineObject.java \
   74.14    runtime/XCheckJniJsig/XCheckJSig.java \
   74.15 -  serviceability/attach/AttachWithStalePidFile.java
   74.16 +  serviceability/attach/AttachWithStalePidFile.java \
   74.17 +  serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java
   74.18 +
   74.19  
   74.20  # JRE adds further tests to compact3
   74.21  #
    75.1 --- a/test/runtime/6888954/vmerrors.sh	Sun Oct 13 21:14:04 2013 +0100
    75.2 +++ b/test/runtime/6888954/vmerrors.sh	Thu Oct 17 14:20:57 2013 -0700
    75.3 @@ -1,3 +1,25 @@
    75.4 +# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    75.5 +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    75.6 +#
    75.7 +# This code is free software; you can redistribute it and/or modify it
    75.8 +# under the terms of the GNU General Public License version 2 only, as
    75.9 +# published by the Free Software Foundation.
   75.10 +#
   75.11 +# This code is distributed in the hope that it will be useful, but WITHOUT
   75.12 +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   75.13 +# FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   75.14 +# version 2 for more details (a copy is included in the LICENSE file that
   75.15 +# accompanied this code).
   75.16 +#
   75.17 +# You should have received a copy of the GNU General Public License version
   75.18 +# 2 along with this work; if not, write to the Free Software Foundation,
   75.19 +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   75.20 +#
   75.21 +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   75.22 +# or visit www.oracle.com if you need additional information or have any
   75.23 +# questions.
   75.24 +#
   75.25 +
   75.26  # @test
   75.27  # @bug 6888954
   75.28  # @bug 8015884
   75.29 @@ -63,6 +85,7 @@
   75.30      [ $i -lt 10 ] && i2=0$i
   75.31  
   75.32      "$TESTJAVA/bin/java" $TESTVMOPTS -XX:+IgnoreUnrecognizedVMOptions \
   75.33 +        -XX:-TransmitErrorReport \
   75.34          -XX:ErrorHandlerTest=${i} -version > ${i2}.out 2>&1
   75.35  
   75.36      # If ErrorHandlerTest is ignored (product build), stop.
    76.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    76.2 +++ b/test/runtime/memory/LargePages/TestLargePagesFlags.java	Thu Oct 17 14:20:57 2013 -0700
    76.3 @@ -0,0 +1,389 @@
    76.4 +/*
    76.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    76.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    76.7 + *
    76.8 + * This code is free software; you can redistribute it and/or modify it
    76.9 + * under the terms of the GNU General Public License version 2 only, as
   76.10 + * published by the Free Software Foundation.
   76.11 + *
   76.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   76.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   76.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   76.15 + * version 2 for more details (a copy is included in the LICENSE file that
   76.16 + * accompanied this code).
   76.17 + *
   76.18 + * You should have received a copy of the GNU General Public License version
   76.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   76.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   76.21 + *
   76.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   76.23 + * or visit www.oracle.com if you need additional information or have any
   76.24 + * questions.
   76.25 + */
   76.26 +
   76.27 +/* @test TestLargePagesFlags
   76.28 + * @summary Tests how large pages are choosen depending on the given large pages flag combinations.
   76.29 + * @library /testlibrary
   76.30 + * @run main TestLargePagesFlags
   76.31 + */
   76.32 +
   76.33 +import com.oracle.java.testlibrary.OutputAnalyzer;
   76.34 +import com.oracle.java.testlibrary.Platform;
   76.35 +import com.oracle.java.testlibrary.ProcessTools;
   76.36 +import java.util.ArrayList;
   76.37 +
   76.38 +public class TestLargePagesFlags {
   76.39 +
   76.40 +  public static void main(String [] args) throws Exception {
   76.41 +    if (!Platform.isLinux()) {
   76.42 +      System.out.println("Skipping. TestLargePagesFlags has only been implemented for Linux.");
   76.43 +      return;
   76.44 +    }
   76.45 +
   76.46 +    testUseTransparentHugePages();
   76.47 +    testUseHugeTLBFS();
   76.48 +    testUseSHM();
   76.49 +    testCombinations();
   76.50 +  }
   76.51 +
   76.52 +  public static void testUseTransparentHugePages() throws Exception {
   76.53 +    if (!canUse(UseTransparentHugePages(true))) {
   76.54 +      System.out.println("Skipping testUseTransparentHugePages");
   76.55 +      return;
   76.56 +    }
   76.57 +
   76.58 +    // -XX:-UseLargePages overrides all other flags.
   76.59 +    new FlagTester()
   76.60 +      .use(UseLargePages(false),
   76.61 +           UseTransparentHugePages(true))
   76.62 +      .expect(
   76.63 +           UseLargePages(false),
   76.64 +           UseTransparentHugePages(false),
   76.65 +           UseHugeTLBFS(false),
   76.66 +           UseSHM(false));
   76.67 +
   76.68 +    // Explicitly turn on UseTransparentHugePages.
   76.69 +    new FlagTester()
   76.70 +      .use(UseTransparentHugePages(true))
   76.71 +      .expect(
   76.72 +           UseLargePages(true),
   76.73 +           UseTransparentHugePages(true),
   76.74 +           UseHugeTLBFS(false),
   76.75 +           UseSHM(false));
   76.76 +
   76.77 +    new FlagTester()
   76.78 +      .use(UseLargePages(true),
   76.79 +           UseTransparentHugePages(true))
   76.80 +      .expect(
   76.81 +           UseLargePages(true),
   76.82 +           UseTransparentHugePages(true),
   76.83 +           UseHugeTLBFS(false),
   76.84 +           UseSHM(false));
   76.85 +
   76.86 +    // Setting a specific large pages flag will turn
   76.87 +    // off heuristics to choose large pages type.
   76.88 +    new FlagTester()
   76.89 +      .use(UseLargePages(true),
   76.90 +           UseTransparentHugePages(false))
   76.91 +      .expect(
   76.92 +           UseLargePages(false),
   76.93 +           UseTransparentHugePages(false),
   76.94 +           UseHugeTLBFS(false),
   76.95 +           UseSHM(false));
   76.96 +
   76.97 +    // Don't turn on UseTransparentHugePages
   76.98 +    // unless the user explicitly asks for them.
   76.99 +    new FlagTester()
  76.100 +      .use(UseLargePages(true))
  76.101 +      .expect(
  76.102 +           UseTransparentHugePages(false));
  76.103 +  }
  76.104 +
  76.105 +  public static void testUseHugeTLBFS() throws Exception {
  76.106 +    if (!canUse(UseHugeTLBFS(true))) {
  76.107 +      System.out.println("Skipping testUseHugeTLBFS");
  76.108 +      return;
  76.109 +    }
  76.110 +
  76.111 +    // -XX:-UseLargePages overrides all other flags.
  76.112 +    new FlagTester()
  76.113 +      .use(UseLargePages(false),
  76.114 +           UseHugeTLBFS(true))
  76.115 +      .expect(
  76.116 +           UseLargePages(false),
  76.117 +           UseTransparentHugePages(false),
  76.118 +           UseHugeTLBFS(false),
  76.119 +           UseSHM(false));
  76.120 +
  76.121 +    // Explicitly turn on UseHugeTLBFS.
  76.122 +    new FlagTester()
  76.123 +      .use(UseHugeTLBFS(true))
  76.124 +      .expect(
  76.125 +           UseLargePages(true),
  76.126 +           UseTransparentHugePages(false),
  76.127 +           UseHugeTLBFS(true),
  76.128 +           UseSHM(false));
  76.129 +
  76.130 +    new FlagTester()
  76.131 +      .use(UseLargePages(true),
  76.132 +           UseHugeTLBFS(true))
  76.133 +      .expect(
  76.134 +           UseLargePages(true),
  76.135 +           UseTransparentHugePages(false),
  76.136 +           UseHugeTLBFS(true),
  76.137 +           UseSHM(false));
  76.138 +
  76.139 +    // Setting a specific large pages flag will turn
  76.140 +    // off heuristics to choose large pages type.
  76.141 +    new FlagTester()
  76.142 +      .use(UseLargePages(true),
  76.143 +           UseHugeTLBFS(false))
  76.144 +      .expect(
  76.145 +           UseLargePages(false),
  76.146 +           UseTransparentHugePages(false),
  76.147 +           UseHugeTLBFS(false),
  76.148 +           UseSHM(false));
  76.149 +
  76.150 +    // Using UseLargePages will default to UseHugeTLBFS large pages.
  76.151 +    new FlagTester()
  76.152 +      .use(UseLargePages(true))
  76.153 +      .expect(
  76.154 +           UseLargePages(true),
  76.155 +           UseTransparentHugePages(false),
  76.156 +           UseHugeTLBFS(true),
  76.157 +           UseSHM(false));
  76.158 +  }
  76.159 +
  76.160 +  public static void testUseSHM() throws Exception {
  76.161 +    if (!canUse(UseSHM(true))) {
  76.162 +      System.out.println("Skipping testUseSHM");
  76.163 +      return;
  76.164 +    }
  76.165 +
  76.166 +    // -XX:-UseLargePages overrides all other flags.
  76.167 +    new FlagTester()
  76.168 +      .use(UseLargePages(false),
  76.169 +           UseSHM(true))
  76.170 +      .expect(
  76.171 +           UseLargePages(false),
  76.172 +           UseTransparentHugePages(false),
  76.173 +           UseHugeTLBFS(false),
  76.174 +           UseSHM(false));
  76.175 +
  76.176 +    // Explicitly turn on UseSHM.
  76.177 +    new FlagTester()
  76.178 +      .use(UseSHM(true))
  76.179 +      .expect(
  76.180 +           UseLargePages(true),
  76.181 +           UseTransparentHugePages(false),
  76.182 +           UseHugeTLBFS(false),
  76.183 +           UseSHM(true)) ;
  76.184 +
  76.185 +    new FlagTester()
  76.186 +      .use(UseLargePages(true),
  76.187 +           UseSHM(true))
  76.188 +      .expect(
  76.189 +           UseLargePages(true),
  76.190 +           UseTransparentHugePages(false),
  76.191 +           UseHugeTLBFS(false),
  76.192 +           UseSHM(true)) ;
  76.193 +
  76.194 +    // Setting a specific large pages flag will turn
  76.195 +    // off heuristics to choose large pages type.
  76.196 +    new FlagTester()
  76.197 +      .use(UseLargePages(true),
  76.198 +           UseSHM(false))
  76.199 +      .expect(
  76.200 +           UseLargePages(false),
  76.201 +           UseTransparentHugePages(false),
  76.202 +           UseHugeTLBFS(false),
  76.203 +           UseSHM(false));
  76.204 +
  76.205 +    // Setting UseLargePages can allow the system to choose
  76.206 +    // UseHugeTLBFS instead of UseSHM, but never UseTransparentHugePages.
  76.207 +    new FlagTester()
  76.208 +      .use(UseLargePages(true))
  76.209 +      .expect(
  76.210 +           UseLargePages(true),
  76.211 +           UseTransparentHugePages(false));
  76.212 +  }
  76.213 +
  76.214 +  public static void testCombinations() throws Exception {
  76.215 +    if (!canUse(UseSHM(true)) || !canUse(UseHugeTLBFS(true))) {
  76.216 +      System.out.println("Skipping testUseHugeTLBFSAndUseSHMCombination");
  76.217 +      return;
  76.218 +    }
  76.219 +
  76.220 +    // UseHugeTLBFS takes precedence over SHM.
  76.221 +
  76.222 +    new FlagTester()
  76.223 +      .use(UseLargePages(true),
  76.224 +           UseHugeTLBFS(true),
  76.225 +           UseSHM(true))
  76.226 +      .expect(
  76.227 +           UseLargePages(true),
  76.228 +           UseTransparentHugePages(false),
  76.229 +           UseHugeTLBFS(true),
  76.230 +           UseSHM(false));
  76.231 +
  76.232 +    new FlagTester()
  76.233 +      .use(UseLargePages(true),
  76.234 +           UseHugeTLBFS(false),
  76.235 +           UseSHM(true))
  76.236 +      .expect(
  76.237 +           UseLargePages(true),
  76.238 +           UseTransparentHugePages(false),
  76.239 +           UseHugeTLBFS(false),
  76.240 +           UseSHM(true));
  76.241 +
  76.242 +    new FlagTester()
  76.243 +      .use(UseLargePages(true),
  76.244 +           UseHugeTLBFS(true),
  76.245 +           UseSHM(false))
  76.246 +      .expect(
  76.247 +           UseLargePages(true),
  76.248 +           UseTransparentHugePages(false),
  76.249 +           UseHugeTLBFS(true),
  76.250 +           UseSHM(false));
  76.251 +
  76.252 +    new FlagTester()
  76.253 +      .use(UseLargePages(true),
  76.254 +           UseHugeTLBFS(false),
  76.255 +           UseSHM(false))
  76.256 +      .expect(
  76.257 +           UseLargePages(false),
  76.258 +           UseTransparentHugePages(false),
  76.259 +           UseHugeTLBFS(false),
  76.260 +           UseSHM(false));
  76.261 +
  76.262 +
  76.263 +    if (!canUse(UseTransparentHugePages(true))) {
  76.264 +      return;
  76.265 +    }
  76.266 +
  76.267 +    // UseTransparentHugePages takes precedence.
  76.268 +
  76.269 +    new FlagTester()
  76.270 +      .use(UseLargePages(true),
  76.271 +           UseTransparentHugePages(true),
  76.272 +           UseHugeTLBFS(true),
  76.273 +           UseSHM(true))
  76.274 +      .expect(
  76.275 +           UseLargePages(true),
  76.276 +           UseTransparentHugePages(true),
  76.277 +           UseHugeTLBFS(false),
  76.278 +           UseSHM(false));
  76.279 +
  76.280 +    new FlagTester()
  76.281 +      .use(UseTransparentHugePages(true),
  76.282 +           UseHugeTLBFS(true),
  76.283 +           UseSHM(true))
  76.284 +      .expect(
  76.285 +           UseLargePages(true),
  76.286 +           UseTransparentHugePages(true),
  76.287 +           UseHugeTLBFS(false),
  76.288 +           UseSHM(false));
  76.289 +  }
  76.290 +
  76.291 +  private static class FlagTester {
  76.292 +    private Flag [] useFlags;
  76.293 +
  76.294 +    public FlagTester use(Flag... useFlags) {
  76.295 +      this.useFlags = useFlags;
  76.296 +      return this;
  76.297 +    }
  76.298 +
  76.299 +    public void expect(Flag... expectedFlags) throws Exception {
  76.300 +      if (useFlags == null) {
  76.301 +        throw new IllegalStateException("Must run use() before expect()");
  76.302 +      }
  76.303 +
  76.304 +      OutputAnalyzer output = executeNewJVM(useFlags);
  76.305 +
  76.306 +      for (Flag flag : expectedFlags) {
  76.307 +        System.out.println("Looking for: " + flag.flagString());
  76.308 +        String strValue = output.firstMatch(".* " + flag.name() +  " .* :?= (\\S+).*", 1);
  76.309 +
  76.310 +        if (strValue == null) {
  76.311 +          throw new RuntimeException("Flag " + flag.name() + " couldn't be found");
  76.312 +        }
  76.313 +
  76.314 +        if (!flag.value().equals(strValue)) {
  76.315 +          throw new RuntimeException("Wrong value for: " + flag.name()
  76.316 +                                     + " expected: " + flag.value()
  76.317 +                                     + " got: " + strValue);
  76.318 +        }
  76.319 +      }
  76.320 +
  76.321 +      output.shouldHaveExitValue(0);
  76.322 +    }
  76.323 +  }
  76.324 +
  76.325 +  private static OutputAnalyzer executeNewJVM(Flag... flags) throws Exception {
  76.326 +    ArrayList<String> args = new ArrayList<>();
  76.327 +    for (Flag flag : flags) {
  76.328 +      args.add(flag.flagString());
  76.329 +    }
  76.330 +    args.add("-XX:+PrintFlagsFinal");
  76.331 +    args.add("-version");
  76.332 +
  76.333 +    ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(args.toArray(new String[args.size()]));
  76.334 +    OutputAnalyzer output = new OutputAnalyzer(pb.start());
  76.335 +
  76.336 +    return output;
  76.337 +  }
  76.338 +
  76.339 +  private static boolean canUse(Flag flag) {
  76.340 +    try {
  76.341 +      new FlagTester().use(flag).expect(flag);
  76.342 +    } catch (Exception e) {
  76.343 +      return false;
  76.344 +    }
  76.345 +
  76.346 +    return true;
  76.347 +  }
  76.348 +
  76.349 +  private static Flag UseLargePages(boolean value) {
  76.350 +    return new BooleanFlag("UseLargePages", value);
  76.351 +  }
  76.352 +
  76.353 +  private static Flag UseTransparentHugePages(boolean value) {
  76.354 +    return new BooleanFlag("UseTransparentHugePages", value);
  76.355 +  }
  76.356 +
  76.357 +  private static Flag UseHugeTLBFS(boolean value) {
  76.358 +    return new BooleanFlag("UseHugeTLBFS", value);
  76.359 +  }
  76.360 +
  76.361 +  private static Flag UseSHM(boolean value) {
  76.362 +    return new BooleanFlag("UseSHM", value);
  76.363 +  }
  76.364 +
  76.365 +  private static class BooleanFlag implements Flag {
  76.366 +    private String name;
  76.367 +    private boolean value;
  76.368 +
  76.369 +    BooleanFlag(String name, boolean value) {
  76.370 +      this.name = name;
  76.371 +      this.value = value;
  76.372 +    }
  76.373 +
  76.374 +    public String flagString() {
  76.375 +      return "-XX:" + (value ? "+" : "-") + name;
  76.376 +    }
  76.377 +
  76.378 +    public String name() {
  76.379 +      return name;
  76.380 +    }
  76.381 +
  76.382 +    public String value() {
  76.383 +      return Boolean.toString(value);
  76.384 +    }
  76.385 +  }
  76.386 +
  76.387 +  private static interface Flag {
  76.388 +    public String flagString();
  76.389 +    public String name();
  76.390 +    public String value();
  76.391 +  }
  76.392 +}
    77.1 --- a/test/runtime/memory/ReserveMemory.java	Sun Oct 13 21:14:04 2013 +0100
    77.2 +++ b/test/runtime/memory/ReserveMemory.java	Thu Oct 17 14:20:57 2013 -0700
    77.3 @@ -56,6 +56,7 @@
    77.4            "-Xbootclasspath/a:.",
    77.5            "-XX:+UnlockDiagnosticVMOptions",
    77.6            "-XX:+WhiteBoxAPI",
    77.7 +          "-XX:-TransmitErrorReport",
    77.8            "ReserveMemory",
    77.9            "test");
   77.10  
    78.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    78.2 +++ b/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapProc.java	Thu Oct 17 14:20:57 2013 -0700
    78.3 @@ -0,0 +1,71 @@
    78.4 +/*
    78.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    78.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    78.7 + *
    78.8 + * This code is free software; you can redistribute it and/or modify it
    78.9 + * under the terms of the GNU General Public License version 2 only, as
   78.10 + * published by the Free Software Foundation.
   78.11 + *
   78.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   78.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   78.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   78.15 + * version 2 for more details (a copy is included in the LICENSE file that
   78.16 + * accompanied this code).
   78.17 + *
   78.18 + * You should have received a copy of the GNU General Public License version
   78.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   78.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   78.21 + *
   78.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   78.23 + * or visit www.oracle.com if you need additional information or have any
   78.24 + * questions.
   78.25 + */
   78.26 +
   78.27 +import java.lang.management.ManagementFactory;
   78.28 +import java.lang.management.RuntimeMXBean;
   78.29 +import java.lang.reflect.Field;
   78.30 +import java.lang.reflect.Method;
   78.31 +import java.util.ArrayList;
   78.32 +import java.util.List;
   78.33 +
   78.34 +import sun.management.VMManagement;
   78.35 +
   78.36 +public class JMapHProfLargeHeapProc {
   78.37 +    private static final List<byte[]> heapGarbage = new ArrayList<>();
   78.38 +
   78.39 +    public static void main(String[] args) throws Exception {
   78.40 +
   78.41 +        buildLargeHeap(args);
   78.42 +
   78.43 +        // Print our pid on stdout
   78.44 +        System.out.println("PID[" + getProcessId() + "]");
   78.45 +
   78.46 +        // Wait for input before termination
   78.47 +        System.in.read();
   78.48 +    }
   78.49 +
   78.50 +    private static void buildLargeHeap(String[] args) {
   78.51 +        for (long i = 0; i < Integer.parseInt(args[0]); i++) {
   78.52 +            heapGarbage.add(new byte[1024]);
   78.53 +        }
   78.54 +    }
   78.55 +
   78.56 +    public static int getProcessId() throws Exception {
   78.57 +
   78.58 +        // Get the current process id using a reflection hack
   78.59 +        RuntimeMXBean runtime = ManagementFactory.getRuntimeMXBean();
   78.60 +        Field jvm = runtime.getClass().getDeclaredField("jvm");
   78.61 +
   78.62 +        jvm.setAccessible(true);
   78.63 +        VMManagement mgmt = (sun.management.VMManagement) jvm.get(runtime);
   78.64 +
   78.65 +        Method pid_method = mgmt.getClass().getDeclaredMethod("getProcessId");
   78.66 +
   78.67 +        pid_method.setAccessible(true);
   78.68 +
   78.69 +        int pid = (Integer) pid_method.invoke(mgmt);
   78.70 +
   78.71 +        return pid;
   78.72 +    }
   78.73 +
   78.74 +}
    79.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    79.2 +++ b/test/serviceability/sa/jmap-hprof/JMapHProfLargeHeapTest.java	Thu Oct 17 14:20:57 2013 -0700
    79.3 @@ -0,0 +1,146 @@
    79.4 +/*
    79.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    79.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    79.7 + *
    79.8 + * This code is free software; you can redistribute it and/or modify it
    79.9 + * under the terms of the GNU General Public License version 2 only, as
   79.10 + * published by the Free Software Foundation.
   79.11 + *
   79.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   79.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   79.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   79.15 + * version 2 for more details (a copy is included in the LICENSE file that
   79.16 + * accompanied this code).
   79.17 + *
   79.18 + * You should have received a copy of the GNU General Public License version
   79.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   79.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   79.21 + *
   79.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   79.23 + * or visit www.oracle.com if you need additional information or have any
   79.24 + * questions.
   79.25 + */
   79.26 +
   79.27 +import java.io.BufferedReader;
   79.28 +import java.io.File;
   79.29 +import java.io.FileNotFoundException;
   79.30 +import java.io.FileReader;
   79.31 +import java.io.IOException;
   79.32 +import java.io.Reader;
   79.33 +import java.nio.CharBuffer;
   79.34 +import java.util.Arrays;
   79.35 +import java.util.Scanner;
   79.36 +
   79.37 +import com.oracle.java.testlibrary.Asserts;
   79.38 +import com.oracle.java.testlibrary.JDKToolFinder;
   79.39 +import com.oracle.java.testlibrary.JDKToolLauncher;
   79.40 +import com.oracle.java.testlibrary.OutputAnalyzer;
   79.41 +import com.oracle.java.testlibrary.Platform;
   79.42 +import com.oracle.java.testlibrary.ProcessTools;
   79.43 +
   79.44 +/*
   79.45 + * @test
   79.46 + * @bug 6313383
   79.47 + * @key regression
   79.48 + * @summary Regression test for hprof export issue due to large heaps (>2G)
   79.49 + * @library /testlibrary
   79.50 + * @compile JMapHProfLargeHeapProc.java
   79.51 + * @run main JMapHProfLargeHeapTest
   79.52 + */
   79.53 +
   79.54 +public class JMapHProfLargeHeapTest {
   79.55 +    private static final String HEAP_DUMP_FILE_NAME = "heap.hprof";
   79.56 +    private static final String HPROF_HEADER_1_0_1 = "JAVA PROFILE 1.0.1";
   79.57 +    private static final String HPROF_HEADER_1_0_2 = "JAVA PROFILE 1.0.2";
   79.58 +    private static final long M = 1024L;
   79.59 +    private static final long G = 1024L * M;
   79.60 +
   79.61 +    public static void main(String[] args) throws Exception {
   79.62 +        // If we are on MacOSX, test if JMap tool is signed, otherwise return
   79.63 +        // since test will fail with privilege error.
   79.64 +        if (Platform.isOSX()) {
   79.65 +            String jmapToolPath = JDKToolFinder.getCurrentJDKTool("jmap");
   79.66 +            ProcessBuilder codesignProcessBuilder = new ProcessBuilder(
   79.67 +                    "codesign", "-v", jmapToolPath);
   79.68 +            Process codesignProcess = codesignProcessBuilder.start();
   79.69 +            OutputAnalyzer analyser = new OutputAnalyzer(codesignProcess);
   79.70 +            try {
   79.71 +                analyser.shouldNotContain("code object is not signed at all");
   79.72 +                System.out.println("Signed jmap found at: " + jmapToolPath);
   79.73 +            } catch (Exception e) {
   79.74 +                // Abort since we can't know if the test will work
   79.75 +                System.out
   79.76 +                        .println("Test aborted since we are on MacOSX and the jmap tool is not signed.");
   79.77 +                return;
   79.78 +            }
   79.79 +        }
   79.80 +
   79.81 +        // Small heap 22 megabytes, should create 1.0.1 file format
   79.82 +        testHProfFileFormat("-Xmx1g", 22 * M, HPROF_HEADER_1_0_1);
   79.83 +
   79.84 +        /**
   79.85 +         * This test was deliberately commented out since the test system lacks
   79.86 +         * support to handle the requirements for this kind of heap size in a
   79.87 +         * good way. If or when it becomes possible to run this kind of tests in
   79.88 +         * the test environment the test should be enabled again.
   79.89 +         * */
   79.90 +        // Large heap 2,2 gigabytes, should create 1.0.2 file format
   79.91 +        // testHProfFileFormat("-Xmx4g", 2 * G + 2 * M, HPROF_HEADER_1_0_2);
   79.92 +    }
   79.93 +
   79.94 +    private static void testHProfFileFormat(String vmArgs, long heapSize,
   79.95 +            String expectedFormat) throws Exception, IOException,
   79.96 +            InterruptedException, FileNotFoundException {
   79.97 +        ProcessBuilder procBuilder = ProcessTools.createJavaProcessBuilder(
   79.98 +                vmArgs, "JMapHProfLargeHeapProc", String.valueOf(heapSize));
   79.99 +        procBuilder.redirectError(ProcessBuilder.Redirect.INHERIT);
  79.100 +        Process largeHeapProc = procBuilder.start();
  79.101 +
  79.102 +        try (Scanner largeHeapScanner = new Scanner(
  79.103 +                largeHeapProc.getInputStream());) {
  79.104 +            String pidstring = null;
  79.105 +            while ((pidstring = largeHeapScanner.findInLine("PID\\[[0-9].*\\]")) == null) {
  79.106 +                Thread.sleep(500);
  79.107 +            }
  79.108 +            int pid = Integer.parseInt(pidstring.substring(4,
  79.109 +                    pidstring.length() - 1));
  79.110 +            System.out.println("Extracted pid: " + pid);
  79.111 +
  79.112 +            JDKToolLauncher jMapLauncher = JDKToolLauncher
  79.113 +                    .create("jmap", false);
  79.114 +            jMapLauncher.addToolArg("-dump:format=b,file=" + pid + "-"
  79.115 +                    + HEAP_DUMP_FILE_NAME);
  79.116 +            jMapLauncher.addToolArg(String.valueOf(pid));
  79.117 +
  79.118 +            ProcessBuilder jMapProcessBuilder = new ProcessBuilder(
  79.119 +                    jMapLauncher.getCommand());
  79.120 +            System.out.println("jmap command: "
  79.121 +                    + Arrays.toString(jMapLauncher.getCommand()));
  79.122 +
  79.123 +            Process jMapProcess = jMapProcessBuilder.start();
  79.124 +            OutputAnalyzer analyzer = new OutputAnalyzer(jMapProcess);
  79.125 +            analyzer.shouldHaveExitValue(0);
  79.126 +            analyzer.shouldContain(pid + "-" + HEAP_DUMP_FILE_NAME);
  79.127 +            analyzer.shouldContain("Heap dump file created");
  79.128 +
  79.129 +            largeHeapProc.getOutputStream().write('\n');
  79.130 +
  79.131 +            File dumpFile = new File(pid + "-" + HEAP_DUMP_FILE_NAME);
  79.132 +            Asserts.assertTrue(dumpFile.exists(), "Heap dump file not found.");
  79.133 +
  79.134 +            try (Reader reader = new BufferedReader(new FileReader(dumpFile))) {
  79.135 +                CharBuffer buf = CharBuffer.allocate(expectedFormat.length());
  79.136 +                reader.read(buf);
  79.137 +                buf.clear();
  79.138 +                Asserts.assertEQ(buf.toString(), expectedFormat,
  79.139 +                        "Wrong file format. Expected '" + expectedFormat
  79.140 +                                + "', but found '" + buf.toString() + "'");
  79.141 +            }
  79.142 +
  79.143 +            System.out.println("Success!");
  79.144 +
  79.145 +        } finally {
  79.146 +            largeHeapProc.destroyForcibly();
  79.147 +        }
  79.148 +    }
  79.149 +}
    80.1 --- a/test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java	Sun Oct 13 21:14:04 2013 +0100
    80.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/JDKToolLauncher.java	Thu Oct 17 14:20:57 2013 -0700
    80.3 @@ -23,20 +23,17 @@
    80.4  
    80.5  package com.oracle.java.testlibrary;
    80.6  
    80.7 -import java.util.List;
    80.8  import java.util.ArrayList;
    80.9  import java.util.Arrays;
   80.10 -
   80.11 -import com.oracle.java.testlibrary.JDKToolFinder;
   80.12 -import com.oracle.java.testlibrary.ProcessTools;
   80.13 +import java.util.List;
   80.14  
   80.15  /**
   80.16   * A utility for constructing command lines for starting JDK tool processes.
   80.17   *
   80.18   * The JDKToolLauncher can in particular be combined with a
   80.19 - * java.lang.ProcessBuilder to easily run a JDK tool. For example, the
   80.20 - * following code run {@code jmap -heap} against a process with GC logging
   80.21 - * turned on for the {@code jmap} process:
   80.22 + * java.lang.ProcessBuilder to easily run a JDK tool. For example, the following
   80.23 + * code run {@code jmap -heap} against a process with GC logging turned on for
   80.24 + * the {@code jmap} process:
   80.25   *
   80.26   * <pre>
   80.27   * {@code
   80.28 @@ -55,19 +52,39 @@
   80.29      private final List<String> vmArgs = new ArrayList<String>();
   80.30      private final List<String> toolArgs = new ArrayList<String>();
   80.31  
   80.32 -    private JDKToolLauncher(String tool) {
   80.33 -        executable = JDKToolFinder.getJDKTool(tool);
   80.34 +    private JDKToolLauncher(String tool, boolean useCompilerJDK) {
   80.35 +        if (useCompilerJDK) {
   80.36 +            executable = JDKToolFinder.getJDKTool(tool);
   80.37 +        } else {
   80.38 +            executable = JDKToolFinder.getCurrentJDKTool(tool);
   80.39 +        }
   80.40          vmArgs.addAll(Arrays.asList(ProcessTools.getPlatformSpecificVMArgs()));
   80.41      }
   80.42  
   80.43      /**
   80.44 +     * Creates a new JDKToolLauncher for the specified tool. Using tools path
   80.45 +     * from the compiler JDK.
   80.46 +     *
   80.47 +     * @param tool
   80.48 +     *            The name of the tool
   80.49 +     * @return A new JDKToolLauncher
   80.50 +     */
   80.51 +    public static JDKToolLauncher create(String tool) {
   80.52 +        return new JDKToolLauncher(tool, true);
   80.53 +    }
   80.54 +
   80.55 +    /**
   80.56       * Creates a new JDKToolLauncher for the specified tool.
   80.57       *
   80.58 -     * @param tool The name of the tool
   80.59 +     * @param tool
   80.60 +     *            The name of the tool
   80.61 +     * @param useCompilerPath
   80.62 +     *            If true use the compiler JDK path, otherwise use the tested
   80.63 +     *            JDK path.
   80.64       * @return A new JDKToolLauncher
   80.65       */
   80.66 -    public static JDKToolLauncher create(String tool) {
   80.67 -        return new JDKToolLauncher(tool);
   80.68 +    public static JDKToolLauncher create(String tool, boolean useCompilerJDK) {
   80.69 +        return new JDKToolLauncher(tool, useCompilerJDK);
   80.70      }
   80.71  
   80.72      /**
   80.73 @@ -80,7 +97,8 @@
   80.74       * automatically added.
   80.75       *
   80.76       *
   80.77 -     * @param arg The argument to VM running the tool
   80.78 +     * @param arg
   80.79 +     *            The argument to VM running the tool
   80.80       * @return The JDKToolLauncher instance
   80.81       */
   80.82      public JDKToolLauncher addVMArg(String arg) {
   80.83 @@ -91,7 +109,8 @@
   80.84      /**
   80.85       * Adds an argument to the tool.
   80.86       *
   80.87 -     * @param arg The argument to the tool
   80.88 +     * @param arg
   80.89 +     *            The argument to the tool
   80.90       * @return The JDKToolLauncher instance
   80.91       */
   80.92      public JDKToolLauncher addToolArg(String arg) {

mercurial