Merge

Tue, 01 Oct 2013 09:21:43 -0400

author
zgu
date
Tue, 01 Oct 2013 09:21:43 -0400
changeset 5788
90b27e931639
parent 5787
de059a14e159
parent 5785
a5ac0873476c
child 5789
31f0118ea584

Merge

     1.1 --- a/.hgtags	Tue Oct 01 08:54:05 2013 -0400
     1.2 +++ b/.hgtags	Tue Oct 01 09:21:43 2013 -0400
     1.3 @@ -379,3 +379,5 @@
     1.4  a09fe9d1e016c285307507a5793bc4fa6215e9c9 hs25-b50
     1.5  85072013aad46050a362d10ab78e963121c8014c jdk8-b108
     1.6  566db1b0e6efca31f181456e54c8911d0192410d hs25-b51
     1.7 +c81dd5393a5e333df7cb1f6621f5897ada6522b5 jdk8-b109
     1.8 +58043478c26d4e8bf48700acea5f97aba8b417d4 hs25-b52
     2.1 --- a/make/bsd/makefiles/fastdebug.make	Tue Oct 01 08:54:05 2013 -0400
     2.2 +++ b/make/bsd/makefiles/fastdebug.make	Tue Oct 01 09:21:43 2013 -0400
     2.3 @@ -1,5 +1,5 @@
     2.4  #
     2.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
     2.6 +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
     2.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     2.8  #
     2.9  # This code is free software; you can redistribute it and/or modify it
    2.10 @@ -59,5 +59,5 @@
    2.11  MAPFILE = $(GAMMADIR)/make/bsd/makefiles/mapfile-vers-debug
    2.12  
    2.13  VERSION = fastdebug
    2.14 -SYSDEFS += -DASSERT
    2.15 +SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS
    2.16  PICFLAGS = DEFAULT
     3.1 --- a/make/excludeSrc.make	Tue Oct 01 08:54:05 2013 -0400
     3.2 +++ b/make/excludeSrc.make	Tue Oct 01 09:21:43 2013 -0400
     3.3 @@ -88,7 +88,7 @@
     3.4  	g1ErgoVerbose.cpp g1GCPhaseTimes.cpp g1HRPrinter.cpp g1HotCardCache.cpp g1Log.cpp \
     3.5  	g1MMUTracker.cpp g1MarkSweep.cpp g1MemoryPool.cpp g1MonitoringSupport.cpp \
     3.6  	g1RemSet.cpp g1RemSetSummary.cpp g1SATBCardTableModRefBS.cpp g1_globals.cpp heapRegion.cpp \
     3.7 -	heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
     3.8 +	g1BiasedArray.cpp heapRegionRemSet.cpp heapRegionSeq.cpp heapRegionSet.cpp heapRegionSets.cpp \
     3.9  	ptrQueue.cpp satbQueue.cpp sparsePRT.cpp survRateGroup.cpp vm_operations_g1.cpp \
    3.10  	adjoiningGenerations.cpp adjoiningVirtualSpaces.cpp asPSOldGen.cpp asPSYoungGen.cpp \
    3.11  	cardTableExtension.cpp gcTaskManager.cpp gcTaskThread.cpp objectStartArray.cpp \
     4.1 --- a/make/hotspot_version	Tue Oct 01 08:54:05 2013 -0400
     4.2 +++ b/make/hotspot_version	Tue Oct 01 09:21:43 2013 -0400
     4.3 @@ -35,7 +35,7 @@
     4.4  
     4.5  HS_MAJOR_VER=25
     4.6  HS_MINOR_VER=0
     4.7 -HS_BUILD_NUMBER=51
     4.8 +HS_BUILD_NUMBER=53
     4.9  
    4.10  JDK_MAJOR_VER=1
    4.11  JDK_MINOR_VER=8
     5.1 --- a/make/jprt.properties	Tue Oct 01 08:54:05 2013 -0400
     5.2 +++ b/make/jprt.properties	Tue Oct 01 09:21:43 2013 -0400
     5.3 @@ -120,13 +120,13 @@
     5.4  jprt.my.macosx.x64.jdk7u8=${jprt.my.macosx.x64.jdk7}
     5.5  jprt.my.macosx.x64=${jprt.my.macosx.x64.${jprt.tools.default.release}}
     5.6  
     5.7 -jprt.my.windows.i586.jdk8=windows_i586_5.1
     5.8 -jprt.my.windows.i586.jdk7=windows_i586_5.1
     5.9 +jprt.my.windows.i586.jdk8=windows_i586_6.1
    5.10 +jprt.my.windows.i586.jdk7=windows_i586_6.1
    5.11  jprt.my.windows.i586.jdk7u8=${jprt.my.windows.i586.jdk7}
    5.12  jprt.my.windows.i586=${jprt.my.windows.i586.${jprt.tools.default.release}}
    5.13  
    5.14 -jprt.my.windows.x64.jdk8=windows_x64_5.2
    5.15 -jprt.my.windows.x64.jdk7=windows_x64_5.2
    5.16 +jprt.my.windows.x64.jdk8=windows_x64_6.1
    5.17 +jprt.my.windows.x64.jdk7=windows_x64_6.1
    5.18  jprt.my.windows.x64.jdk7u8=${jprt.my.windows.x64.jdk7}
    5.19  jprt.my.windows.x64=${jprt.my.windows.x64.${jprt.tools.default.release}}
    5.20  
     6.1 --- a/make/linux/makefiles/fastdebug.make	Tue Oct 01 08:54:05 2013 -0400
     6.2 +++ b/make/linux/makefiles/fastdebug.make	Tue Oct 01 09:21:43 2013 -0400
     6.3 @@ -59,5 +59,5 @@
     6.4  MAPFILE = $(GAMMADIR)/make/linux/makefiles/mapfile-vers-debug
     6.5  
     6.6  VERSION = optimized
     6.7 -SYSDEFS += -DASSERT
     6.8 +SYSDEFS += -DASSERT -DCHECK_UNHANDLED_OOPS
     6.9  PICFLAGS = DEFAULT
     7.1 --- a/make/windows/makefiles/fastdebug.make	Tue Oct 01 08:54:05 2013 -0400
     7.2 +++ b/make/windows/makefiles/fastdebug.make	Tue Oct 01 09:21:43 2013 -0400
     7.3 @@ -1,5 +1,5 @@
     7.4  #
     7.5 -# Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
     7.6 +# Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
     7.7  # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     7.8  #
     7.9  # This code is free software; you can redistribute it and/or modify it
    7.10 @@ -38,7 +38,7 @@
    7.11  !include ../local.make
    7.12  !include compile.make
    7.13  
    7.14 -CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION)
    7.15 +CXX_FLAGS=$(CXX_FLAGS) $(FASTDEBUG_OPT_OPTION) /D "CHECK_UNHANDLED_OOPS"
    7.16  
    7.17  !include $(WorkSpace)/make/windows/makefiles/vm.make
    7.18  !include local.make
     8.1 --- a/src/cpu/sparc/vm/frame_sparc.cpp	Tue Oct 01 08:54:05 2013 -0400
     8.2 +++ b/src/cpu/sparc/vm/frame_sparc.cpp	Tue Oct 01 09:21:43 2013 -0400
     8.3 @@ -764,7 +764,7 @@
     8.4  #ifdef CC_INTERP
     8.5          *oop_result = istate->_oop_temp;
     8.6  #else
     8.7 -        oop obj = (oop) at(interpreter_frame_oop_temp_offset);
     8.8 +        oop obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
     8.9          assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
    8.10          *oop_result = obj;
    8.11  #endif // CC_INTERP
    8.12 @@ -788,7 +788,7 @@
    8.13      switch(type) {
    8.14        case T_OBJECT:
    8.15        case T_ARRAY: {
    8.16 -        oop obj = (oop)*tos_addr;
    8.17 +        oop obj = cast_to_oop(*tos_addr);
    8.18          assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
    8.19          *oop_result = obj;
    8.20          break;
     9.1 --- a/src/cpu/sparc/vm/nativeInst_sparc.cpp	Tue Oct 01 08:54:05 2013 -0400
     9.2 +++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp	Tue Oct 01 09:21:43 2013 -0400
     9.3 @@ -1,5 +1,5 @@
     9.4  /*
     9.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     9.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     9.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     9.8   *
     9.9   * This code is free software; you can redistribute it and/or modify it
    9.10 @@ -358,7 +358,7 @@
    9.11          oop_Relocation *r = iter.oop_reloc();
    9.12          if (oop_addr == NULL) {
    9.13            oop_addr = r->oop_addr();
    9.14 -          *oop_addr = (oop)x;
    9.15 +          *oop_addr = cast_to_oop(x);
    9.16          } else {
    9.17            assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
    9.18          }
    9.19 @@ -478,7 +478,7 @@
    9.20          oop_Relocation *r = iter.oop_reloc();
    9.21          if (oop_addr == NULL) {
    9.22            oop_addr = r->oop_addr();
    9.23 -          *oop_addr = (oop)x;
    9.24 +          *oop_addr = cast_to_oop(x);
    9.25          } else {
    9.26            assert(oop_addr == r->oop_addr(), "must be only one set-oop here");
    9.27          }
    10.1 --- a/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Tue Oct 01 08:54:05 2013 -0400
    10.2 +++ b/src/cpu/sparc/vm/vtableStubs_sparc.cpp	Tue Oct 01 09:21:43 2013 -0400
    10.3 @@ -52,6 +52,11 @@
    10.4  VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
    10.5    const int sparc_code_length = VtableStub::pd_code_size_limit(true);
    10.6    VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index);
    10.7 +  // Can be NULL if there is no free space in the code cache.
    10.8 +  if (s == NULL) {
    10.9 +    return NULL;
   10.10 +  }
   10.11 +
   10.12    ResourceMark rm;
   10.13    CodeBuffer cb(s->entry_point(), sparc_code_length);
   10.14    MacroAssembler* masm = new MacroAssembler(&cb);
   10.15 @@ -125,6 +130,11 @@
   10.16  VtableStub* VtableStubs::create_itable_stub(int itable_index) {
   10.17    const int sparc_code_length = VtableStub::pd_code_size_limit(false);
   10.18    VtableStub* s = new(sparc_code_length) VtableStub(false, itable_index);
   10.19 +  // Can be NULL if there is no free space in the code cache.
   10.20 +  if (s == NULL) {
   10.21 +    return NULL;
   10.22 +  }
   10.23 +
   10.24    ResourceMark rm;
   10.25    CodeBuffer cb(s->entry_point(), sparc_code_length);
   10.26    MacroAssembler* masm = new MacroAssembler(&cb);
    11.1 --- a/src/cpu/x86/vm/frame_x86.cpp	Tue Oct 01 08:54:05 2013 -0400
    11.2 +++ b/src/cpu/x86/vm/frame_x86.cpp	Tue Oct 01 09:21:43 2013 -0400
    11.3 @@ -639,7 +639,7 @@
    11.4  #ifdef CC_INTERP
    11.5          obj = istate->_oop_temp;
    11.6  #else
    11.7 -        obj = (oop) at(interpreter_frame_oop_temp_offset);
    11.8 +        obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
    11.9  #endif // CC_INTERP
   11.10        } else {
   11.11          oop* obj_p = (oop*)tos_addr;
    12.1 --- a/src/cpu/x86/vm/methodHandles_x86.cpp	Tue Oct 01 08:54:05 2013 -0400
    12.2 +++ b/src/cpu/x86/vm/methodHandles_x86.cpp	Tue Oct 01 09:21:43 2013 -0400
    12.3 @@ -1,5 +1,5 @@
    12.4  /*
    12.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    12.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    12.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    12.8   *
    12.9   * This code is free software; you can redistribute it and/or modify it
   12.10 @@ -475,7 +475,7 @@
   12.11    const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
   12.12    tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT,
   12.13                  adaptername, mh_reg_name,
   12.14 -                mh, entry_sp);
   12.15 +                (void *)mh, entry_sp);
   12.16  
   12.17    if (Verbose) {
   12.18      tty->print_cr("Registers:");
    13.1 --- a/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Tue Oct 01 08:54:05 2013 -0400
    13.2 +++ b/src/cpu/x86/vm/vtableStubs_x86_32.cpp	Tue Oct 01 09:21:43 2013 -0400
    13.3 @@ -58,6 +58,11 @@
    13.4  VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
    13.5    const int i486_code_length = VtableStub::pd_code_size_limit(true);
    13.6    VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);
    13.7 +  // Can be NULL if there is no free space in the code cache.
    13.8 +  if (s == NULL) {
    13.9 +    return NULL;
   13.10 +  }
   13.11 +
   13.12    ResourceMark rm;
   13.13    CodeBuffer cb(s->entry_point(), i486_code_length);
   13.14    MacroAssembler* masm = new MacroAssembler(&cb);
   13.15 @@ -132,6 +137,11 @@
   13.16    //            add code here, bump the code stub size returned by pd_code_size_limit!
   13.17    const int i486_code_length = VtableStub::pd_code_size_limit(false);
   13.18    VtableStub* s = new(i486_code_length) VtableStub(false, itable_index);
   13.19 +  // Can be NULL if there is no free space in the code cache.
   13.20 +  if (s == NULL) {
   13.21 +    return NULL;
   13.22 +  }
   13.23 +
   13.24    ResourceMark rm;
   13.25    CodeBuffer cb(s->entry_point(), i486_code_length);
   13.26    MacroAssembler* masm = new MacroAssembler(&cb);
    14.1 --- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Tue Oct 01 08:54:05 2013 -0400
    14.2 +++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp	Tue Oct 01 09:21:43 2013 -0400
    14.3 @@ -49,6 +49,11 @@
    14.4  VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
    14.5    const int amd64_code_length = VtableStub::pd_code_size_limit(true);
    14.6    VtableStub* s = new(amd64_code_length) VtableStub(true, vtable_index);
    14.7 +  // Can be NULL if there is no free space in the code cache.
    14.8 +  if (s == NULL) {
    14.9 +    return NULL;
   14.10 +  }
   14.11 +
   14.12    ResourceMark rm;
   14.13    CodeBuffer cb(s->entry_point(), amd64_code_length);
   14.14    MacroAssembler* masm = new MacroAssembler(&cb);
   14.15 @@ -126,6 +131,11 @@
   14.16    // returned by pd_code_size_limit!
   14.17    const int amd64_code_length = VtableStub::pd_code_size_limit(false);
   14.18    VtableStub* s = new(amd64_code_length) VtableStub(false, itable_index);
   14.19 +  // Can be NULL if there is no free space in the code cache.
   14.20 +  if (s == NULL) {
   14.21 +    return NULL;
   14.22 +  }
   14.23 +
   14.24    ResourceMark rm;
   14.25    CodeBuffer cb(s->entry_point(), amd64_code_length);
   14.26    MacroAssembler* masm = new MacroAssembler(&cb);
    15.1 --- a/src/cpu/x86/vm/x86_32.ad	Tue Oct 01 08:54:05 2013 -0400
    15.2 +++ b/src/cpu/x86/vm/x86_32.ad	Tue Oct 01 09:21:43 2013 -0400
    15.3 @@ -351,7 +351,7 @@
    15.4          int format) {
    15.5  #ifdef ASSERT
    15.6    if (rspec.reloc()->type() == relocInfo::oop_type && d32 != 0 && d32 != (int)Universe::non_oop_word()) {
    15.7 -    assert(oop(d32)->is_oop() && (ScavengeRootsInCode || !oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
    15.8 +    assert(cast_to_oop(d32)->is_oop() && (ScavengeRootsInCode || !cast_to_oop(d32)->is_scavengable()), "cannot embed scavengable oops in code");
    15.9    }
   15.10  #endif
   15.11    cbuf.relocate(cbuf.insts_mark(), rspec, format);
    16.1 --- a/src/cpu/x86/vm/x86_64.ad	Tue Oct 01 08:54:05 2013 -0400
    16.2 +++ b/src/cpu/x86/vm/x86_64.ad	Tue Oct 01 09:21:43 2013 -0400
    16.3 @@ -529,7 +529,7 @@
    16.4    if (rspec.reloc()->type() == relocInfo::oop_type &&
    16.5        d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) {
    16.6      assert(Universe::heap()->is_in_reserved((address)(intptr_t)d32), "should be real oop");
    16.7 -    assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
    16.8 +    assert(cast_to_oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !cast_to_oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code");
    16.9    }
   16.10  #endif
   16.11    cbuf.relocate(cbuf.insts_mark(), rspec, format);
   16.12 @@ -556,7 +556,7 @@
   16.13    if (rspec.reloc()->type() == relocInfo::oop_type &&
   16.14        d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) {
   16.15      assert(Universe::heap()->is_in_reserved((address)d64), "should be real oop");
   16.16 -    assert(oop(d64)->is_oop() && (ScavengeRootsInCode || !oop(d64)->is_scavengable()),
   16.17 +    assert(cast_to_oop(d64)->is_oop() && (ScavengeRootsInCode || !cast_to_oop(d64)->is_scavengable()),
   16.18             "cannot embed scavengable oops in code");
   16.19    }
   16.20  #endif
    17.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Oct 01 08:54:05 2013 -0400
    17.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp	Tue Oct 01 09:21:43 2013 -0400
    17.3 @@ -4219,7 +4219,9 @@
    17.4      }
    17.5    }
    17.6  
    17.7 -  if (!PrintInlining)  return;
    17.8 +  if (!PrintInlining && !compilation()->method()->has_option("PrintInlining")) {
    17.9 +    return;
   17.10 +  }
   17.11    CompileTask::print_inlining(callee, scope()->level(), bci(), msg);
   17.12    if (success && CIPrintMethodCodes) {
   17.13      callee->print_codes();
    18.1 --- a/src/share/vm/c1/c1_Runtime1.cpp	Tue Oct 01 08:54:05 2013 -0400
    18.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp	Tue Oct 01 09:21:43 2013 -0400
    18.3 @@ -1019,7 +1019,7 @@
    18.4                n_copy->set_data((intx) (load_klass()));
    18.5              } else {
    18.6                assert(mirror() != NULL, "klass not set");
    18.7 -              n_copy->set_data((intx) (mirror()));
    18.8 +              n_copy->set_data(cast_from_oop<intx>(mirror()));
    18.9              }
   18.10  
   18.11              if (TracePatching) {
   18.12 @@ -1031,7 +1031,7 @@
   18.13            assert(n_copy->data() == 0 ||
   18.14                   n_copy->data() == (intptr_t)Universe::non_oop_word(),
   18.15                   "illegal init value");
   18.16 -          n_copy->set_data((intx) (appendix()));
   18.17 +          n_copy->set_data(cast_from_oop<intx>(appendix()));
   18.18  
   18.19            if (TracePatching) {
   18.20              Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
    19.1 --- a/src/share/vm/classfile/classLoaderData.cpp	Tue Oct 01 08:54:05 2013 -0400
    19.2 +++ b/src/share/vm/classfile/classLoaderData.cpp	Tue Oct 01 09:21:43 2013 -0400
    19.3 @@ -261,7 +261,7 @@
    19.4                    k,
    19.5                    k->external_name(),
    19.6                    k->class_loader_data(),
    19.7 -                  k->class_loader(),
    19.8 +                  (void *)k->class_loader(),
    19.9                    loader_name());
   19.10    }
   19.11  }
   19.12 @@ -297,7 +297,7 @@
   19.13    if (TraceClassLoaderData) {
   19.14      ResourceMark rm;
   19.15      tty->print("[ClassLoaderData: unload loader data "PTR_FORMAT, this);
   19.16 -    tty->print(" for instance "PTR_FORMAT" of %s", class_loader(),
   19.17 +    tty->print(" for instance "PTR_FORMAT" of %s", (void *)class_loader(),
   19.18                 loader_name());
   19.19      if (is_anonymous()) {
   19.20        tty->print(" for anonymous class  "PTR_FORMAT " ", _klasses);
   19.21 @@ -458,7 +458,7 @@
   19.22  void ClassLoaderData::dump(outputStream * const out) {
   19.23    ResourceMark rm;
   19.24    out->print("ClassLoaderData CLD: "PTR_FORMAT", loader: "PTR_FORMAT", loader_klass: "PTR_FORMAT" %s {",
   19.25 -      this, class_loader(),
   19.26 +      this, (void *)class_loader(),
   19.27        class_loader() != NULL ? class_loader()->klass() : NULL, loader_name());
   19.28    if (claimed()) out->print(" claimed ");
   19.29    if (is_unloading()) out->print(" unloading ");
   19.30 @@ -553,7 +553,7 @@
   19.31          ResourceMark rm;
   19.32          tty->print("[ClassLoaderData: ");
   19.33          tty->print("create class loader data "PTR_FORMAT, cld);
   19.34 -        tty->print(" for instance "PTR_FORMAT" of %s", cld->class_loader(),
   19.35 +        tty->print(" for instance "PTR_FORMAT" of %s", (void *)cld->class_loader(),
   19.36                     cld->loader_name());
   19.37          tty->print_cr("]");
   19.38        }
    20.1 --- a/src/share/vm/classfile/dictionary.hpp	Tue Oct 01 08:54:05 2013 -0400
    20.2 +++ b/src/share/vm/classfile/dictionary.hpp	Tue Oct 01 09:21:43 2013 -0400
    20.3 @@ -1,5 +1,5 @@
    20.4  /*
    20.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    20.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    20.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    20.8   *
    20.9   * This code is free software; you can redistribute it and/or modify it
   20.10 @@ -264,7 +264,7 @@
   20.11      }
   20.12      if (method_type() != NULL) {
   20.13        if (printed)  st->print(" and ");
   20.14 -      st->print(INTPTR_FORMAT, method_type());
   20.15 +      st->print(INTPTR_FORMAT, (void *)method_type());
   20.16        printed = true;
   20.17      }
   20.18      st->print_cr(printed ? "" : "(empty)");
    21.1 --- a/src/share/vm/classfile/symbolTable.cpp	Tue Oct 01 08:54:05 2013 -0400
    21.2 +++ b/src/share/vm/classfile/symbolTable.cpp	Tue Oct 01 09:21:43 2013 -0400
    21.3 @@ -341,7 +341,7 @@
    21.4  
    21.5  Symbol* SymbolTable::basic_add(int index_arg, u1 *name, int len,
    21.6                                 unsigned int hashValue_arg, bool c_heap, TRAPS) {
    21.7 -  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
    21.8 +  assert(!Universe::heap()->is_in_reserved(name),
    21.9           "proposed name of symbol must be stable");
   21.10  
   21.11    // Don't allow symbols to be created which cannot fit in a Symbol*.
   21.12 @@ -685,7 +685,7 @@
   21.13    if (found_string != NULL) return found_string;
   21.14  
   21.15    debug_only(StableMemoryChecker smc(name, len * sizeof(name[0])));
   21.16 -  assert(!Universe::heap()->is_in_reserved(name) || GC_locker::is_active(),
   21.17 +  assert(!Universe::heap()->is_in_reserved(name),
   21.18           "proposed name of symbol must be stable");
   21.19  
   21.20    Handle string;
   21.21 @@ -840,7 +840,7 @@
   21.22    if (str1 == str2) {
   21.23      tty->print_cr("ERROR: identical oop values (0x" PTR_FORMAT ") "
   21.24                    "in entry @ bucket[%d][%d] and entry @ bucket[%d][%d]",
   21.25 -                  str1, bkt1, e_cnt1, bkt2, e_cnt2);
   21.26 +                  (void *)str1, bkt1, e_cnt1, bkt2, e_cnt2);
   21.27      return _verify_fail_continue;
   21.28    }
   21.29  
    22.1 --- a/src/share/vm/code/compiledIC.cpp	Tue Oct 01 08:54:05 2013 -0400
    22.2 +++ b/src/share/vm/code/compiledIC.cpp	Tue Oct 01 09:21:43 2013 -0400
    22.3 @@ -160,7 +160,7 @@
    22.4  // High-level access to an inline cache. Guaranteed to be MT-safe.
    22.5  
    22.6  
    22.7 -void CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
    22.8 +bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS) {
    22.9    assert(CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "");
   22.10    assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
   22.11    assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
   22.12 @@ -170,8 +170,10 @@
   22.13      assert(bytecode == Bytecodes::_invokeinterface, "");
   22.14      int itable_index = call_info->itable_index();
   22.15      entry = VtableStubs::find_itable_stub(itable_index);
   22.16 +    if (entry == false) {
   22.17 +      return false;
   22.18 +    }
   22.19  #ifdef ASSERT
   22.20 -    assert(entry != NULL, "entry not computed");
   22.21      int index = call_info->resolved_method()->itable_index();
   22.22      assert(index == itable_index, "CallInfo pre-computes this");
   22.23  #endif //ASSERT
   22.24 @@ -184,6 +186,9 @@
   22.25      int vtable_index = call_info->vtable_index();
   22.26      assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
   22.27      entry = VtableStubs::find_vtable_stub(vtable_index);
   22.28 +    if (entry == NULL) {
   22.29 +      return false;
   22.30 +    }
   22.31      InlineCacheBuffer::create_transition_stub(this, NULL, entry);
   22.32    }
   22.33  
   22.34 @@ -200,6 +205,7 @@
   22.35    // race because the IC entry was complete when we safepointed so
   22.36    // cleaning it immediately is harmless.
   22.37    // assert(is_megamorphic(), "sanity check");
   22.38 +  return true;
   22.39  }
   22.40  
   22.41  
    23.1 --- a/src/share/vm/code/compiledIC.hpp	Tue Oct 01 08:54:05 2013 -0400
    23.2 +++ b/src/share/vm/code/compiledIC.hpp	Tue Oct 01 09:21:43 2013 -0400
    23.3 @@ -226,7 +226,10 @@
    23.4    //
    23.5    void set_to_clean();  // Can only be called during a safepoint operation
    23.6    void set_to_monomorphic(CompiledICInfo& info);
    23.7 -  void set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
    23.8 +
    23.9 +  // Returns true if successful and false otherwise. The call can fail if memory
   23.10 +  // allocation in the code cache fails.
   23.11 +  bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS);
   23.12  
   23.13    static void compute_monomorphic_entry(methodHandle method, KlassHandle receiver_klass,
   23.14                                          bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS);
    24.1 --- a/src/share/vm/code/nmethod.cpp	Tue Oct 01 08:54:05 2013 -0400
    24.2 +++ b/src/share/vm/code/nmethod.cpp	Tue Oct 01 09:21:43 2013 -0400
    24.3 @@ -1965,7 +1965,7 @@
    24.4      if (!_detected_scavenge_root)  _print_nm->print_on(tty, "new scavenge root");
    24.5      tty->print_cr(""PTR_FORMAT"[offset=%d] detected scavengable oop "PTR_FORMAT" (found at "PTR_FORMAT")",
    24.6                    _print_nm, (int)((intptr_t)p - (intptr_t)_print_nm),
    24.7 -                  (intptr_t)(*p), (intptr_t)p);
    24.8 +                  (void *)(*p), (intptr_t)p);
    24.9      (*p)->print();
   24.10    }
   24.11  #endif //PRODUCT
   24.12 @@ -2345,7 +2345,7 @@
   24.13        _ok = false;
   24.14      }
   24.15      tty->print_cr("*** non-oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
   24.16 -                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
   24.17 +                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
   24.18    }
   24.19    virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
   24.20  };
   24.21 @@ -2466,7 +2466,7 @@
   24.22        _ok = false;
   24.23      }
   24.24      tty->print_cr("*** scavengable oop "PTR_FORMAT" found at "PTR_FORMAT" (offset %d)",
   24.25 -                  (intptr_t)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
   24.26 +                  (void *)(*p), (intptr_t)p, (int)((intptr_t)p - (intptr_t)_nm));
   24.27      (*p)->print();
   24.28    }
   24.29    virtual void do_oop(narrowOop* p) { ShouldNotReachHere(); }
    25.1 --- a/src/share/vm/code/vtableStubs.cpp	Tue Oct 01 08:54:05 2013 -0400
    25.2 +++ b/src/share/vm/code/vtableStubs.cpp	Tue Oct 01 09:21:43 2013 -0400
    25.3 @@ -46,12 +46,9 @@
    25.4  address VtableStub::_chunk_end         = NULL;
    25.5  VMReg   VtableStub::_receiver_location = VMRegImpl::Bad();
    25.6  
    25.7 -static int num_vtable_chunks = 0;
    25.8 -
    25.9  
   25.10  void* VtableStub::operator new(size_t size, int code_size) throw() {
   25.11    assert(size == sizeof(VtableStub), "mismatched size");
   25.12 -  num_vtable_chunks++;
   25.13    // compute real VtableStub size (rounded to nearest word)
   25.14    const int real_size = round_to(code_size + sizeof(VtableStub), wordSize);
   25.15    // malloc them in chunks to minimize header overhead
   25.16 @@ -60,7 +57,7 @@
   25.17      const int bytes = chunk_factor * real_size + pd_code_alignment();
   25.18      BufferBlob* blob = BufferBlob::create("vtable chunks", bytes);
   25.19      if (blob == NULL) {
   25.20 -      vm_exit_out_of_memory(bytes, OOM_MALLOC_ERROR, "CodeCache: no room for vtable chunks");
   25.21 +      return NULL;
   25.22      }
   25.23      _chunk = blob->content_begin();
   25.24      _chunk_end = _chunk + bytes;
   25.25 @@ -121,6 +118,12 @@
   25.26      } else {
   25.27        s = create_itable_stub(vtable_index);
   25.28      }
   25.29 +
   25.30 +    // Creation of vtable or itable can fail if there is not enough free space in the code cache.
   25.31 +    if (s == NULL) {
   25.32 +      return NULL;
   25.33 +    }
   25.34 +
   25.35      enter(is_vtable_stub, vtable_index, s);
   25.36      if (PrintAdapterHandlers) {
   25.37        tty->print_cr("Decoding VtableStub %s[%d]@%d",
    26.1 --- a/src/share/vm/compiler/oopMap.cpp	Tue Oct 01 08:54:05 2013 -0400
    26.2 +++ b/src/share/vm/compiler/oopMap.cpp	Tue Oct 01 09:21:43 2013 -0400
    26.3 @@ -1,5 +1,5 @@
    26.4  /*
    26.5 - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
    26.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
    26.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    26.8   *
    26.9   * This code is free software; you can redistribute it and/or modify it
   26.10 @@ -628,7 +628,7 @@
   26.11  
   26.12  
   26.13  // Returns value of location as an int
   26.14 -intptr_t value_of_loc(oop *pointer) { return (intptr_t)(*pointer); }
   26.15 +intptr_t value_of_loc(oop *pointer) { return cast_from_oop<intptr_t>((*pointer)); }
   26.16  
   26.17  
   26.18  void DerivedPointerTable::add(oop *derived_loc, oop *base_loc) {
    27.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Oct 01 08:54:05 2013 -0400
    27.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp	Tue Oct 01 09:21:43 2013 -0400
    27.3 @@ -9065,7 +9065,7 @@
    27.4    return !stack->isEmpty();
    27.5  }
    27.6  
    27.7 -#define BUSY  (oop(0x1aff1aff))
    27.8 +#define BUSY  (cast_to_oop<intptr_t>(0x1aff1aff))
    27.9  // (MT-safe) Get a prefix of at most "num" from the list.
   27.10  // The overflow list is chained through the mark word of
   27.11  // each object in the list. We fetch the entire list,
   27.12 @@ -9098,7 +9098,7 @@
   27.13      return false;
   27.14    }
   27.15    // Grab the entire list; we'll put back a suffix
   27.16 -  oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
   27.17 +  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
   27.18    Thread* tid = Thread::current();
   27.19    // Before "no_of_gc_threads" was introduced CMSOverflowSpinCount was
   27.20    // set to ParallelGCThreads.
   27.21 @@ -9113,7 +9113,7 @@
   27.22        return false;
   27.23      } else if (_overflow_list != BUSY) {
   27.24        // Try and grab the prefix
   27.25 -      prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
   27.26 +      prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
   27.27      }
   27.28    }
   27.29    // If the list was found to be empty, or we spun long
    28.1 --- a/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Oct 01 08:54:05 2013 -0400
    28.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMark.cpp	Tue Oct 01 09:21:43 2013 -0400
    28.3 @@ -2694,7 +2694,7 @@
    28.4  
    28.5      if (print_it) {
    28.6        _out->print_cr(" "PTR_FORMAT"%s",
    28.7 -                     o, (over_tams) ? " >" : (marked) ? " M" : "");
    28.8 +                     (void *)o, (over_tams) ? " >" : (marked) ? " M" : "");
    28.9        PrintReachableOopClosure oopCl(_out, _vo, _all);
   28.10        o->oop_iterate_no_header(&oopCl);
   28.11      }
    29.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    29.2 +++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.cpp	Tue Oct 01 09:21:43 2013 -0400
    29.3 @@ -0,0 +1,141 @@
    29.4 +/*
    29.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    29.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    29.7 + *
    29.8 + * This code is free software; you can redistribute it and/or modify it
    29.9 + * under the terms of the GNU General Public License version 2 only, as
   29.10 + * published by the Free Software Foundation.
   29.11 + *
   29.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   29.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   29.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   29.15 + * version 2 for more details (a copy is included in the LICENSE file that
   29.16 + * accompanied this code).
   29.17 + *
   29.18 + * You should have received a copy of the GNU General Public License version
   29.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   29.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   29.21 + *
   29.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   29.23 + * or visit www.oracle.com if you need additional information or have any
   29.24 + * questions.
   29.25 + *
   29.26 + */
   29.27 +
   29.28 +#include "precompiled.hpp"
   29.29 +#include "gc_implementation/g1/g1BiasedArray.hpp"
   29.30 +
   29.31 +#ifndef PRODUCT
   29.32 +void G1BiasedMappedArrayBase::verify_index(idx_t index) const {
   29.33 +  guarantee(_base != NULL, "Array not initialized");
   29.34 +  guarantee(index < length(), err_msg("Index out of bounds index: "SIZE_FORMAT" length: "SIZE_FORMAT, index, length()));
   29.35 +}
   29.36 +
   29.37 +void G1BiasedMappedArrayBase::verify_biased_index(idx_t biased_index) const {
   29.38 +  guarantee(_biased_base != NULL, "Array not initialized");
   29.39 +  guarantee(biased_index >= bias() && biased_index < (bias() + length()),
   29.40 +    err_msg("Biased index out of bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
   29.41 +}
   29.42 +
   29.43 +void G1BiasedMappedArrayBase::verify_biased_index_inclusive_end(idx_t biased_index) const {
   29.44 +  guarantee(_biased_base != NULL, "Array not initialized");
   29.45 +  guarantee(biased_index >= bias() && biased_index <= (bias() + length()),
   29.46 +    err_msg("Biased index out of inclusive bounds, index: "SIZE_FORMAT" bias: "SIZE_FORMAT" length: "SIZE_FORMAT, biased_index, bias(), length()));
   29.47 +}
   29.48 +
   29.49 +class TestMappedArray : public G1BiasedMappedArray<int> {
   29.50 +protected:
   29.51 +  virtual int default_value() const { return 0xBAADBABE; }
   29.52 +public:
   29.53 +  static void test_biasedarray() {
   29.54 +    const size_t REGION_SIZE_IN_WORDS = 512;
   29.55 +    const size_t NUM_REGIONS = 20;
   29.56 +    HeapWord* fake_heap = (HeapWord*)LP64_ONLY(0xBAAA00000) NOT_LP64(0xBA000000); // Any value that is non-zero
   29.57 +
   29.58 +    TestMappedArray array;
   29.59 +    array.initialize(fake_heap, fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS,
   29.60 +            REGION_SIZE_IN_WORDS * HeapWordSize);
   29.61 +    // Check address calculation (bounds)
   29.62 +    assert(array.bottom_address_mapped() == fake_heap,
   29.63 +      err_msg("bottom mapped address should be "PTR_FORMAT", but is "PTR_FORMAT, fake_heap, array.bottom_address_mapped()));
   29.64 +    assert(array.end_address_mapped() == (fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS), "must be");
   29.65 +
   29.66 +    int* bottom = array.address_mapped_to(fake_heap);
   29.67 +    assert((void*)bottom == (void*) array.base(), "must be");
   29.68 +    int* end = array.address_mapped_to(fake_heap + REGION_SIZE_IN_WORDS * NUM_REGIONS);
   29.69 +    assert((void*)end == (void*)(array.base() + array.length()), "must be");
   29.70 +    // The entire array should contain default value elements
   29.71 +    for (int* current = bottom; current < end; current++) {
   29.72 +      assert(*current == array.default_value(), "must be");
   29.73 +    }
   29.74 +
   29.75 +    // Test setting values in the table
   29.76 +
   29.77 +    HeapWord* region_start_address = fake_heap + REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2);
   29.78 +    HeapWord* region_end_address = fake_heap + (REGION_SIZE_IN_WORDS * (NUM_REGIONS / 2) + REGION_SIZE_IN_WORDS - 1);
   29.79 +
   29.80 +    // Set/get by address tests: invert some value; first retrieve one
   29.81 +    int actual_value = array.get_by_index(NUM_REGIONS / 2);
   29.82 +    array.set_by_index(NUM_REGIONS / 2, ~actual_value);
   29.83 +    // Get the same value by address, should correspond to the start of the "region"
   29.84 +    int value = array.get_by_address(region_start_address);
   29.85 +    assert(value == ~actual_value, "must be");
   29.86 +    // Get the same value by address, at one HeapWord before the start
   29.87 +    value = array.get_by_address(region_start_address - 1);
   29.88 +    assert(value == array.default_value(), "must be");
   29.89 +    // Get the same value by address, at the end of the "region"
   29.90 +    value = array.get_by_address(region_end_address);
   29.91 +    assert(value == ~actual_value, "must be");
   29.92 +    // Make sure the next value maps to another index
   29.93 +    value = array.get_by_address(region_end_address + 1);
   29.94 +    assert(value == array.default_value(), "must be");
   29.95 +
   29.96 +    // Reset the value in the array
   29.97 +    array.set_by_address(region_start_address + (region_end_address - region_start_address) / 2, actual_value);
   29.98 +
   29.99 +    // The entire array should have the default value again
  29.100 +    for (int* current = bottom; current < end; current++) {
  29.101 +      assert(*current == array.default_value(), "must be");
  29.102 +    }
  29.103 +
  29.104 +    // Set/get by index tests: invert some value
  29.105 +    idx_t index = NUM_REGIONS / 2;
  29.106 +    actual_value = array.get_by_index(index);
  29.107 +    array.set_by_index(index, ~actual_value);
  29.108 +
  29.109 +    value = array.get_by_index(index);
  29.110 +    assert(value == ~actual_value, "must be");
  29.111 +
  29.112 +    value = array.get_by_index(index - 1);
  29.113 +    assert(value == array.default_value(), "must be");
  29.114 +
  29.115 +    value = array.get_by_index(index + 1);
  29.116 +    assert(value == array.default_value(), "must be");
  29.117 +
  29.118 +    array.set_by_index(0, 0);
  29.119 +    value = array.get_by_index(0);
  29.120 +    assert(value == 0, "must be");
  29.121 +
  29.122 +    array.set_by_index(array.length() - 1, 0);
  29.123 +    value = array.get_by_index(array.length() - 1);
  29.124 +    assert(value == 0, "must be");
  29.125 +
  29.126 +    array.set_by_index(index, 0);
  29.127 +
  29.128 +    // The array should have three zeros, and default values otherwise
  29.129 +    size_t num_zeros = 0;
  29.130 +    for (int* current = bottom; current < end; current++) {
  29.131 +      assert(*current == array.default_value() || *current == 0, "must be");
  29.132 +      if (*current == 0) {
  29.133 +        num_zeros++;
  29.134 +      }
  29.135 +    }
  29.136 +    assert(num_zeros == 3, "must be");
  29.137 +  }
  29.138 +};
  29.139 +
  29.140 +void TestG1BiasedArray_test() {
  29.141 +  TestMappedArray::test_biasedarray();
  29.142 +}
  29.143 +
  29.144 +#endif
    30.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    30.2 +++ b/src/share/vm/gc_implementation/g1/g1BiasedArray.hpp	Tue Oct 01 09:21:43 2013 -0400
    30.3 @@ -0,0 +1,181 @@
    30.4 +/*
    30.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    30.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    30.7 + *
    30.8 + * This code is free software; you can redistribute it and/or modify it
    30.9 + * under the terms of the GNU General Public License version 2 only, as
   30.10 + * published by the Free Software Foundation.
   30.11 + *
   30.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   30.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   30.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   30.15 + * version 2 for more details (a copy is included in the LICENSE file that
   30.16 + * accompanied this code).
   30.17 + *
   30.18 + * You should have received a copy of the GNU General Public License version
   30.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   30.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   30.21 + *
   30.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   30.23 + * or visit www.oracle.com if you need additional information or have any
   30.24 + * questions.
   30.25 + *
   30.26 + */
   30.27 +
   30.28 +#ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
   30.29 +#define SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
   30.30 +
   30.31 +#include "utilities/debug.hpp"
   30.32 +#include "memory/allocation.inline.hpp"
   30.33 +
   30.34 +// Implements the common base functionality for arrays that contain provisions
   30.35 +// for accessing its elements using a biased index.
   30.36 +// The element type is defined by the instantiating the template.
   30.37 +class G1BiasedMappedArrayBase VALUE_OBJ_CLASS_SPEC {
   30.38 +  friend class VMStructs;
   30.39 +public:
   30.40 +  typedef size_t idx_t;
   30.41 +protected:
   30.42 +  address _base;          // the real base address
   30.43 +  size_t _length;         // the length of the array
   30.44 +  address _biased_base;   // base address biased by "bias" elements
   30.45 +  size_t _bias;           // the bias, i.e. the offset biased_base is located to the right in elements
   30.46 +  uint _shift_by;         // the amount of bits to shift right when mapping to an index of the array.
   30.47 +
   30.48 +protected:
   30.49 +
   30.50 +  G1BiasedMappedArrayBase() : _base(NULL), _length(0), _biased_base(NULL),
   30.51 +    _bias(0), _shift_by(0) { }
   30.52 +
   30.53 +  // Allocate a new array, generic version.
   30.54 +  static address create_new_base_array(size_t length, size_t elem_size) {
   30.55 +    assert(length > 0, "just checking");
   30.56 +    assert(elem_size > 0, "just checking");
   30.57 +    return NEW_C_HEAP_ARRAY(u_char, length * elem_size, mtGC);
   30.58 +  }
   30.59 +
   30.60 +  // Initialize the members of this class. The biased start address of this array
   30.61 +  // is the bias (in elements) multiplied by the element size.
   30.62 +  void initialize_base(address base, size_t length, size_t bias, size_t elem_size, uint shift_by) {
   30.63 +    assert(base != NULL, "just checking");
   30.64 +    assert(length > 0, "just checking");
   30.65 +    assert(shift_by < sizeof(uintptr_t) * 8, err_msg("Shifting by %zd, larger than word size?", shift_by));
   30.66 +    _base = base;
   30.67 +    _length = length;
   30.68 +    _biased_base = base - (bias * elem_size);
   30.69 +    _bias = bias;
   30.70 +    _shift_by = shift_by;
   30.71 +  }
   30.72 +
   30.73 +  // Allocate and initialize this array to cover the heap addresses in the range
   30.74 +  // of [bottom, end).
   30.75 +  void initialize(HeapWord* bottom, HeapWord* end, size_t target_elem_size_in_bytes, size_t mapping_granularity_in_bytes) {
   30.76 +    assert(mapping_granularity_in_bytes > 0, "just checking");
   30.77 +    assert(is_power_of_2(mapping_granularity_in_bytes),
   30.78 +      err_msg("mapping granularity must be power of 2, is %zd", mapping_granularity_in_bytes));
   30.79 +    assert((uintptr_t)bottom % mapping_granularity_in_bytes == 0,
   30.80 +      err_msg("bottom mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
   30.81 +        mapping_granularity_in_bytes, bottom));
   30.82 +    assert((uintptr_t)end % mapping_granularity_in_bytes == 0,
   30.83 +      err_msg("end mapping area address must be a multiple of mapping granularity %zd, is "PTR_FORMAT,
   30.84 +        mapping_granularity_in_bytes, end));
   30.85 +    size_t num_target_elems = (end - bottom) / (mapping_granularity_in_bytes / HeapWordSize);
   30.86 +    idx_t bias = (uintptr_t)bottom / mapping_granularity_in_bytes;
   30.87 +    address base = create_new_base_array(num_target_elems, target_elem_size_in_bytes);
   30.88 +    initialize_base(base, num_target_elems, bias, target_elem_size_in_bytes, log2_intptr(mapping_granularity_in_bytes));
   30.89 +  }
   30.90 +
   30.91 +  size_t bias() const { return _bias; }
   30.92 +  uint shift_by() const { return _shift_by; }
   30.93 +
   30.94 +  void verify_index(idx_t index) const PRODUCT_RETURN;
   30.95 +  void verify_biased_index(idx_t biased_index) const PRODUCT_RETURN;
   30.96 +  void verify_biased_index_inclusive_end(idx_t biased_index) const PRODUCT_RETURN;
   30.97 +
   30.98 +public:
   30.99 +   // Return the length of the array in elements.
  30.100 +   size_t length() const { return _length; }
  30.101 +};
  30.102 +
  30.103 +// Array that provides biased access and mapping from (valid) addresses in the
  30.104 +// heap into this array.
  30.105 +template<class T>
  30.106 +class G1BiasedMappedArray : public G1BiasedMappedArrayBase {
  30.107 +public:
  30.108 +  typedef G1BiasedMappedArrayBase::idx_t idx_t;
  30.109 +
  30.110 +  T* base() const { return (T*)G1BiasedMappedArrayBase::_base; }
  30.111 +  // Return the element of the given array at the given index. Assume
  30.112 +  // the index is valid. This is a convenience method that does sanity
  30.113 +  // checking on the index.
  30.114 +  T get_by_index(idx_t index) const {
  30.115 +    verify_index(index);
  30.116 +    return this->base()[index];
  30.117 +  }
  30.118 +
  30.119 +  // Set the element of the given array at the given index to the
  30.120 +  // given value. Assume the index is valid. This is a convenience
  30.121 +  // method that does sanity checking on the index.
  30.122 +  void set_by_index(idx_t index, T value) {
  30.123 +    verify_index(index);
  30.124 +    this->base()[index] = value;
  30.125 +  }
  30.126 +
  30.127 +  // The raw biased base pointer.
  30.128 +  T* biased_base() const { return (T*)G1BiasedMappedArrayBase::_biased_base; }
  30.129 +
  30.130 +  // Return the element of the given array that covers the given word in the
  30.131 +  // heap. Assumes the index is valid.
  30.132 +  T get_by_address(HeapWord* value) const {
  30.133 +    idx_t biased_index = ((uintptr_t)value) >> this->shift_by();
  30.134 +    this->verify_biased_index(biased_index);
  30.135 +    return biased_base()[biased_index];
  30.136 +  }
  30.137 +
  30.138 +  // Set the value of the array entry that corresponds to the given array.
  30.139 +  void set_by_address(HeapWord * address, T value) {
  30.140 +    idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
  30.141 +    this->verify_biased_index(biased_index);
  30.142 +    biased_base()[biased_index] = value;
  30.143 +  }
  30.144 +
  30.145 +protected:
  30.146 +  // Returns the address of the element the given address maps to
  30.147 +  T* address_mapped_to(HeapWord* address) {
  30.148 +    idx_t biased_index = ((uintptr_t)address) >> this->shift_by();
  30.149 +    this->verify_biased_index_inclusive_end(biased_index);
  30.150 +    return biased_base() + biased_index;
  30.151 +  }
  30.152 +
  30.153 +public:
  30.154 +  // Return the smallest address (inclusive) in the heap that this array covers.
  30.155 +  HeapWord* bottom_address_mapped() const {
  30.156 +    return (HeapWord*) ((uintptr_t)this->bias() << this->shift_by());
  30.157 +  }
  30.158 +
  30.159 +  // Return the highest address (exclusive) in the heap that this array covers.
  30.160 +  HeapWord* end_address_mapped() const {
  30.161 +    return (HeapWord*) ((uintptr_t)(this->bias() + this->length()) << this->shift_by());
  30.162 +  }
  30.163 +
  30.164 +protected:
  30.165 +  virtual T default_value() const = 0;
  30.166 +  // Set all elements of the given array to the given value.
  30.167 +  void clear() {
  30.168 +    T value = default_value();
  30.169 +    for (idx_t i = 0; i < length(); i++) {
  30.170 +      set_by_index(i, value);
  30.171 +    }
  30.172 +  }
  30.173 +public:
  30.174 +  G1BiasedMappedArray() {}
  30.175 +
  30.176 +  // Allocate and initialize this array to cover the heap addresses in the range
  30.177 +  // of [bottom, end).
  30.178 +  void initialize(HeapWord* bottom, HeapWord* end, size_t mapping_granularity) {
  30.179 +    G1BiasedMappedArrayBase::initialize(bottom, end, sizeof(T), mapping_granularity);
  30.180 +    this->clear();
  30.181 +  }
  30.182 +};
  30.183 +
  30.184 +#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1BIASEDARRAY_HPP
    31.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Oct 01 08:54:05 2013 -0400
    31.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp	Tue Oct 01 09:21:43 2013 -0400
    31.3 @@ -2069,8 +2069,10 @@
    31.4    _g1_storage.initialize(g1_rs, 0);
    31.5    _g1_committed = MemRegion((HeapWord*)_g1_storage.low(), (size_t) 0);
    31.6    _hrs.initialize((HeapWord*) _g1_reserved.start(),
    31.7 -                  (HeapWord*) _g1_reserved.end(),
    31.8 -                  _expansion_regions);
    31.9 +                  (HeapWord*) _g1_reserved.end());
   31.10 +  assert(_hrs.max_length() == _expansion_regions,
   31.11 +         err_msg("max length: %u expansion regions: %u",
   31.12 +                 _hrs.max_length(), _expansion_regions));
   31.13  
   31.14    // Do later initialization work for concurrent refinement.
   31.15    _cg1r->init();
   31.16 @@ -4615,7 +4617,7 @@
   31.17    assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, ref));
   31.18    oop p = oopDesc::load_decode_heap_oop(ref);
   31.19    assert(_g1h->is_in_g1_reserved(p),
   31.20 -         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
   31.21 +         err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
   31.22    return true;
   31.23  }
   31.24  
   31.25 @@ -4625,11 +4627,11 @@
   31.26      // Must be in the collection set--it's already been copied.
   31.27      oop p = clear_partial_array_mask(ref);
   31.28      assert(_g1h->obj_in_cs(p),
   31.29 -           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
   31.30 +           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
   31.31    } else {
   31.32      oop p = oopDesc::load_decode_heap_oop(ref);
   31.33      assert(_g1h->is_in_g1_reserved(p),
   31.34 -           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
   31.35 +           err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, (void *)p));
   31.36    }
   31.37    return true;
   31.38  }
    32.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Oct 01 08:54:05 2013 -0400
    32.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp	Tue Oct 01 09:21:43 2013 -0400
    32.3 @@ -703,7 +703,7 @@
    32.4      if (_g1_committed.contains((HeapWord*) obj)) {
    32.5        // no need to subtract the bottom of the heap from obj,
    32.6        // _in_cset_fast_test is biased
    32.7 -      uintx index = (uintx) obj >> HeapRegion::LogOfHRGrainBytes;
    32.8 +      uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
    32.9        bool ret = _in_cset_fast_test[index];
   32.10        // let's make sure the result is consistent with what the slower
   32.11        // test returns
    33.1 --- a/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Tue Oct 01 08:54:05 2013 -0400
    33.2 +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.hpp	Tue Oct 01 09:21:43 2013 -0400
    33.3 @@ -1,5 +1,5 @@
    33.4  /*
    33.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
    33.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    33.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    33.8   *
    33.9   * This code is free software; you can redistribute it and/or modify it
   33.10 @@ -91,12 +91,12 @@
   33.11  }
   33.12  
   33.13  template <class T> inline T* set_partial_array_mask(T obj) {
   33.14 -  assert(((uintptr_t)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
   33.15 -  return (T*) ((uintptr_t)obj | G1_PARTIAL_ARRAY_MASK);
   33.16 +  assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
   33.17 +  return (T*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
   33.18  }
   33.19  
   33.20  template <class T> inline oop clear_partial_array_mask(T* ref) {
   33.21 -  return oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
   33.22 +  return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
   33.23  }
   33.24  
   33.25  class G1ParScanPartialArrayClosure : public G1ParClosureSuper {
    34.1 --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Tue Oct 01 08:54:05 2013 -0400
    34.2 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp	Tue Oct 01 09:21:43 2013 -0400
    34.3 @@ -1,5 +1,5 @@
    34.4  /*
    34.5 - * Copyright (c) 2001, 2011, Oracle and/or its affiliates. All rights reserved.
    34.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
    34.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    34.8   *
    34.9   * This code is free software; you can redistribute it and/or modify it
   34.10 @@ -95,7 +95,7 @@
   34.11  G1SATBCardTableLoggingModRefBS::write_ref_field_static(void* field,
   34.12                                                         oop new_val) {
   34.13    uintptr_t field_uint = (uintptr_t)field;
   34.14 -  uintptr_t new_val_uint = (uintptr_t)new_val;
   34.15 +  uintptr_t new_val_uint = cast_from_oop<uintptr_t>(new_val);
   34.16    uintptr_t comb = field_uint ^ new_val_uint;
   34.17    comb = comb >> HeapRegion::LogOfHRGrainBytes;
   34.18    if (comb == 0) return;
    35.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Oct 01 08:54:05 2013 -0400
    35.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp	Tue Oct 01 09:21:43 2013 -0400
    35.3 @@ -637,7 +637,7 @@
    35.4            gclog_or_tty->print_cr("Object "PTR_FORMAT" in region "
    35.5                                   "["PTR_FORMAT", "PTR_FORMAT") is above "
    35.6                                   "top "PTR_FORMAT,
    35.7 -                                 obj, _hr->bottom(), _hr->end(), _hr->top());
    35.8 +                                 (void *)obj, _hr->bottom(), _hr->end(), _hr->top());
    35.9            _failures = true;
   35.10            return;
   35.11          }
   35.12 @@ -951,12 +951,12 @@
   35.13          Klass* klass = obj->klass();
   35.14          if (!klass->is_metaspace_object()) {
   35.15            gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
   35.16 -                                 "not metadata", klass, obj);
   35.17 +                                 "not metadata", klass, (void *)obj);
   35.18            *failures = true;
   35.19            return;
   35.20          } else if (!klass->is_klass()) {
   35.21            gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" "
   35.22 -                                 "not a klass", klass, obj);
   35.23 +                                 "not a klass", klass, (void *)obj);
   35.24            *failures = true;
   35.25            return;
   35.26          } else {
   35.27 @@ -971,7 +971,7 @@
   35.28            }
   35.29          }
   35.30        } else {
   35.31 -        gclog_or_tty->print_cr(PTR_FORMAT" no an oop", obj);
   35.32 +        gclog_or_tty->print_cr(PTR_FORMAT" no an oop", (void *)obj);
   35.33          *failures = true;
   35.34          return;
   35.35        }
    36.1 --- a/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Tue Oct 01 08:54:05 2013 -0400
    36.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionRemSet.cpp	Tue Oct 01 09:21:43 2013 -0400
    36.3 @@ -91,8 +91,8 @@
    36.4        gclog_or_tty->print_cr("    PRT::Add_reference_work(" PTR_FORMAT "->" PTR_FORMAT").",
    36.5                               from,
    36.6                               UseCompressedOops
    36.7 -                             ? oopDesc::load_decode_heap_oop((narrowOop*)from)
    36.8 -                             : oopDesc::load_decode_heap_oop((oop*)from));
    36.9 +                             ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
   36.10 +                             : (void *)oopDesc::load_decode_heap_oop((oop*)from));
   36.11      }
   36.12  
   36.13      HeapRegion* loc_hr = hr();
   36.14 @@ -403,8 +403,8 @@
   36.15      gclog_or_tty->print_cr("ORT::add_reference_work(" PTR_FORMAT "->" PTR_FORMAT ").",
   36.16                                                      from,
   36.17                                                      UseCompressedOops
   36.18 -                                                    ? oopDesc::load_decode_heap_oop((narrowOop*)from)
   36.19 -                                                    : oopDesc::load_decode_heap_oop((oop*)from));
   36.20 +                                                    ? (void *)oopDesc::load_decode_heap_oop((narrowOop*)from)
   36.21 +                                                    : (void *)oopDesc::load_decode_heap_oop((oop*)from));
   36.22    }
   36.23  
   36.24    int from_card = (int)(uintptr_t(from) >> CardTableModRefBS::card_shift);
    37.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Tue Oct 01 08:54:05 2013 -0400
    37.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.cpp	Tue Oct 01 09:21:43 2013 -0400
    37.3 @@ -71,27 +71,16 @@
    37.4  
    37.5  // Public
    37.6  
    37.7 -void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end,
    37.8 -                               uint max_length) {
    37.9 +void HeapRegionSeq::initialize(HeapWord* bottom, HeapWord* end) {
   37.10    assert((uintptr_t) bottom % HeapRegion::GrainBytes == 0,
   37.11           "bottom should be heap region aligned");
   37.12    assert((uintptr_t) end % HeapRegion::GrainBytes == 0,
   37.13           "end should be heap region aligned");
   37.14  
   37.15 -  _length = 0;
   37.16 -  _heap_bottom = bottom;
   37.17 -  _heap_end = end;
   37.18 -  _region_shift = HeapRegion::LogOfHRGrainBytes;
   37.19    _next_search_index = 0;
   37.20    _allocated_length = 0;
   37.21 -  _max_length = max_length;
   37.22  
   37.23 -  _regions = NEW_C_HEAP_ARRAY(HeapRegion*, max_length, mtGC);
   37.24 -  memset(_regions, 0, (size_t) max_length * sizeof(HeapRegion*));
   37.25 -  _regions_biased = _regions - ((uintx) bottom >> _region_shift);
   37.26 -
   37.27 -  assert(&_regions[0] == &_regions_biased[addr_to_index_biased(bottom)],
   37.28 -         "bottom should be included in the region with index 0");
   37.29 +  _regions.initialize(bottom, end, HeapRegion::GrainBytes);
   37.30  }
   37.31  
   37.32  MemRegion HeapRegionSeq::expand_by(HeapWord* old_end,
   37.33 @@ -101,15 +90,15 @@
   37.34    G1CollectedHeap* g1h = G1CollectedHeap::heap();
   37.35  
   37.36    HeapWord* next_bottom = old_end;
   37.37 -  assert(_heap_bottom <= next_bottom, "invariant");
   37.38 +  assert(heap_bottom() <= next_bottom, "invariant");
   37.39    while (next_bottom < new_end) {
   37.40 -    assert(next_bottom < _heap_end, "invariant");
   37.41 +    assert(next_bottom < heap_end(), "invariant");
   37.42      uint index = length();
   37.43  
   37.44 -    assert(index < _max_length, "otherwise we cannot expand further");
   37.45 +    assert(index < max_length(), "otherwise we cannot expand further");
   37.46      if (index == 0) {
   37.47        // We have not allocated any regions so far
   37.48 -      assert(next_bottom == _heap_bottom, "invariant");
   37.49 +      assert(next_bottom == heap_bottom(), "invariant");
   37.50      } else {
   37.51        // next_bottom should match the end of the last/previous region
   37.52        assert(next_bottom == at(index - 1)->end(), "invariant");
   37.53 @@ -122,8 +111,8 @@
   37.54          // allocation failed, we bail out and return what we have done so far
   37.55          return MemRegion(old_end, next_bottom);
   37.56        }
   37.57 -      assert(_regions[index] == NULL, "invariant");
   37.58 -      _regions[index] = new_hr;
   37.59 +      assert(_regions.get_by_index(index) == NULL, "invariant");
   37.60 +      _regions.set_by_index(index, new_hr);
   37.61        increment_allocated_length();
   37.62      }
   37.63      // Have to increment the length first, otherwise we will get an
   37.64 @@ -228,26 +217,26 @@
   37.65  
   37.66  #ifndef PRODUCT
   37.67  void HeapRegionSeq::verify_optional() {
   37.68 -  guarantee(_length <= _allocated_length,
   37.69 +  guarantee(length() <= _allocated_length,
   37.70              err_msg("invariant: _length: %u _allocated_length: %u",
   37.71 -                    _length, _allocated_length));
   37.72 -  guarantee(_allocated_length <= _max_length,
   37.73 +                    length(), _allocated_length));
   37.74 +  guarantee(_allocated_length <= max_length(),
   37.75              err_msg("invariant: _allocated_length: %u _max_length: %u",
   37.76 -                    _allocated_length, _max_length));
   37.77 -  guarantee(_next_search_index <= _length,
   37.78 +                    _allocated_length, max_length()));
   37.79 +  guarantee(_next_search_index <= length(),
   37.80              err_msg("invariant: _next_search_index: %u _length: %u",
   37.81 -                    _next_search_index, _length));
   37.82 +                    _next_search_index, length()));
   37.83  
   37.84 -  HeapWord* prev_end = _heap_bottom;
   37.85 +  HeapWord* prev_end = heap_bottom();
   37.86    for (uint i = 0; i < _allocated_length; i += 1) {
   37.87 -    HeapRegion* hr = _regions[i];
   37.88 +    HeapRegion* hr = _regions.get_by_index(i);
   37.89      guarantee(hr != NULL, err_msg("invariant: i: %u", i));
   37.90      guarantee(hr->bottom() == prev_end,
   37.91                err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT,
   37.92                        i, HR_FORMAT_PARAMS(hr), prev_end));
   37.93      guarantee(hr->hrs_index() == i,
   37.94                err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index()));
   37.95 -    if (i < _length) {
   37.96 +    if (i < length()) {
   37.97        // Asserts will fire if i is >= _length
   37.98        HeapWord* addr = hr->bottom();
   37.99        guarantee(addr_to_region(addr) == hr, "sanity");
  37.100 @@ -265,8 +254,8 @@
  37.101        prev_end = hr->end();
  37.102      }
  37.103    }
  37.104 -  for (uint i = _allocated_length; i < _max_length; i += 1) {
  37.105 -    guarantee(_regions[i] == NULL, err_msg("invariant i: %u", i));
  37.106 +  for (uint i = _allocated_length; i < max_length(); i += 1) {
  37.107 +    guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i));
  37.108    }
  37.109  }
  37.110  #endif // PRODUCT
    38.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Tue Oct 01 08:54:05 2013 -0400
    38.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.hpp	Tue Oct 01 09:21:43 2013 -0400
    38.3 @@ -25,10 +25,17 @@
    38.4  #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
    38.5  #define SHARE_VM_GC_IMPLEMENTATION_G1_HEAPREGIONSEQ_HPP
    38.6  
    38.7 +#include "gc_implementation/g1/g1BiasedArray.hpp"
    38.8 +
    38.9  class HeapRegion;
   38.10  class HeapRegionClosure;
   38.11  class FreeRegionList;
   38.12  
   38.13 +class G1HeapRegionTable : public G1BiasedMappedArray<HeapRegion*> {
   38.14 + protected:
   38.15 +   virtual HeapRegion* default_value() const { return NULL; }
   38.16 +};
   38.17 +
   38.18  // This class keeps track of the region metadata (i.e., HeapRegion
   38.19  // instances). They are kept in the _regions array in address
   38.20  // order. A region's index in the array corresponds to its index in
   38.21 @@ -44,35 +51,21 @@
   38.22  //
   38.23  // We keep track of three lengths:
   38.24  //
   38.25 -// * _length (returned by length()) is the number of currently
   38.26 +// * _committed_length (returned by length()) is the number of currently
   38.27  //   committed regions.
   38.28  // * _allocated_length (not exposed outside this class) is the
   38.29  //   number of regions for which we have HeapRegions.
   38.30 -// * _max_length (returned by max_length()) is the maximum number of
   38.31 -//   regions the heap can have.
   38.32 +// * max_length() returns the maximum number of regions the heap can have.
   38.33  //
   38.34 -// and maintain that: _length <= _allocated_length <= _max_length
   38.35 +// and maintain that: _committed_length <= _allocated_length <= max_length()
   38.36  
   38.37  class HeapRegionSeq: public CHeapObj<mtGC> {
   38.38    friend class VMStructs;
   38.39  
   38.40 -  // The array that holds the HeapRegions.
   38.41 -  HeapRegion** _regions;
   38.42 -
   38.43 -  // Version of _regions biased to address 0
   38.44 -  HeapRegion** _regions_biased;
   38.45 +  G1HeapRegionTable _regions;
   38.46  
   38.47    // The number of regions committed in the heap.
   38.48 -  uint _length;
   38.49 -
   38.50 -  // The address of the first reserved word in the heap.
   38.51 -  HeapWord* _heap_bottom;
   38.52 -
   38.53 -  // The address of the last reserved word in the heap - 1.
   38.54 -  HeapWord* _heap_end;
   38.55 -
   38.56 -  // The log of the region byte size.
   38.57 -  uint _region_shift;
   38.58 +  uint _committed_length;
   38.59  
   38.60    // A hint for which index to start searching from for humongous
   38.61    // allocations.
   38.62 @@ -81,37 +74,33 @@
   38.63    // The number of regions for which we have allocated HeapRegions for.
   38.64    uint _allocated_length;
   38.65  
   38.66 -  // The maximum number of regions in the heap.
   38.67 -  uint _max_length;
   38.68 -
   38.69    // Find a contiguous set of empty regions of length num, starting
   38.70    // from the given index.
   38.71    uint find_contiguous_from(uint from, uint num);
   38.72  
   38.73 -  // Map a heap address to a biased region index. Assume that the
   38.74 -  // address is valid.
   38.75 -  inline uintx addr_to_index_biased(HeapWord* addr) const;
   38.76 -
   38.77    void increment_allocated_length() {
   38.78 -    assert(_allocated_length < _max_length, "pre-condition");
   38.79 +    assert(_allocated_length < max_length(), "pre-condition");
   38.80      _allocated_length++;
   38.81    }
   38.82  
   38.83    void increment_length() {
   38.84 -    assert(_length < _max_length, "pre-condition");
   38.85 -    _length++;
   38.86 +    assert(length() < max_length(), "pre-condition");
   38.87 +    _committed_length++;
   38.88    }
   38.89  
   38.90    void decrement_length() {
   38.91 -    assert(_length > 0, "pre-condition");
   38.92 -    _length--;
   38.93 +    assert(length() > 0, "pre-condition");
   38.94 +    _committed_length--;
   38.95    }
   38.96  
   38.97 +  HeapWord* heap_bottom() const { return _regions.bottom_address_mapped(); }
   38.98 +  HeapWord* heap_end() const {return _regions.end_address_mapped(); }
   38.99 +
  38.100   public:
  38.101    // Empty contructor, we'll initialize it with the initialize() method.
  38.102 -  HeapRegionSeq() { }
  38.103 +  HeapRegionSeq() : _regions(), _committed_length(0), _next_search_index(0), _allocated_length(0) { }
  38.104  
  38.105 -  void initialize(HeapWord* bottom, HeapWord* end, uint max_length);
  38.106 +  void initialize(HeapWord* bottom, HeapWord* end);
  38.107  
  38.108    // Return the HeapRegion at the given index. Assume that the index
  38.109    // is valid.
  38.110 @@ -126,10 +115,10 @@
  38.111    inline HeapRegion* addr_to_region_unsafe(HeapWord* addr) const;
  38.112  
  38.113    // Return the number of regions that have been committed in the heap.
  38.114 -  uint length() const { return _length; }
  38.115 +  uint length() const { return _committed_length; }
  38.116  
  38.117    // Return the maximum number of regions in the heap.
  38.118 -  uint max_length() const { return _max_length; }
  38.119 +  uint max_length() const { return (uint)_regions.length(); }
  38.120  
  38.121    // Expand the sequence to reflect that the heap has grown from
  38.122    // old_end to new_end. Either create new HeapRegions, or re-use
    39.1 --- a/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Tue Oct 01 08:54:05 2013 -0400
    39.2 +++ b/src/share/vm/gc_implementation/g1/heapRegionSeq.inline.hpp	Tue Oct 01 09:21:43 2013 -0400
    39.3 @@ -28,28 +28,16 @@
    39.4  #include "gc_implementation/g1/heapRegion.hpp"
    39.5  #include "gc_implementation/g1/heapRegionSeq.hpp"
    39.6  
    39.7 -inline uintx HeapRegionSeq::addr_to_index_biased(HeapWord* addr) const {
    39.8 -  assert(_heap_bottom <= addr && addr < _heap_end,
    39.9 -         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
   39.10 -                 addr, _heap_bottom, _heap_end));
   39.11 -  uintx index = (uintx) addr >> _region_shift;
   39.12 -  return index;
   39.13 -}
   39.14 -
   39.15  inline HeapRegion* HeapRegionSeq::addr_to_region_unsafe(HeapWord* addr) const {
   39.16 -  assert(_heap_bottom <= addr && addr < _heap_end,
   39.17 -         err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT" end: "PTR_FORMAT,
   39.18 -                 addr, _heap_bottom, _heap_end));
   39.19 -  uintx index_biased = addr_to_index_biased(addr);
   39.20 -  HeapRegion* hr = _regions_biased[index_biased];
   39.21 +  HeapRegion* hr = _regions.get_by_address(addr);
   39.22    assert(hr != NULL, "invariant");
   39.23    return hr;
   39.24  }
   39.25  
   39.26  inline HeapRegion* HeapRegionSeq::addr_to_region(HeapWord* addr) const {
   39.27 -  if (addr != NULL && addr < _heap_end) {
   39.28 -    assert(addr >= _heap_bottom,
   39.29 -          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, _heap_bottom));
   39.30 +  if (addr != NULL && addr < heap_end()) {
   39.31 +    assert(addr >= heap_bottom(),
   39.32 +          err_msg("addr: "PTR_FORMAT" bottom: "PTR_FORMAT, addr, heap_bottom()));
   39.33      return addr_to_region_unsafe(addr);
   39.34    }
   39.35    return NULL;
   39.36 @@ -57,7 +45,7 @@
   39.37  
   39.38  inline HeapRegion* HeapRegionSeq::at(uint index) const {
   39.39    assert(index < length(), "pre-condition");
   39.40 -  HeapRegion* hr = _regions[index];
   39.41 +  HeapRegion* hr = _regions.get_by_index(index);
   39.42    assert(hr != NULL, "sanity");
   39.43    assert(hr->hrs_index() == index, "sanity");
   39.44    return hr;
    40.1 --- a/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Tue Oct 01 08:54:05 2013 -0400
    40.2 +++ b/src/share/vm/gc_implementation/g1/vmStructs_g1.hpp	Tue Oct 01 09:21:43 2013 -0400
    40.3 @@ -34,8 +34,14 @@
    40.4    static_field(HeapRegion, GrainBytes,        size_t)                         \
    40.5    static_field(HeapRegion, LogOfHRGrainBytes, int)                            \
    40.6                                                                                \
    40.7 -  nonstatic_field(HeapRegionSeq,   _regions, HeapRegion**)                    \
    40.8 -  nonstatic_field(HeapRegionSeq,   _length,  uint)                            \
    40.9 +  nonstatic_field(G1HeapRegionTable, _base,             address)              \
   40.10 +  nonstatic_field(G1HeapRegionTable, _length,           size_t)               \
   40.11 +  nonstatic_field(G1HeapRegionTable, _biased_base,      address)              \
   40.12 +  nonstatic_field(G1HeapRegionTable, _bias,             size_t)               \
   40.13 +  nonstatic_field(G1HeapRegionTable, _shift_by,         uint)                 \
   40.14 +                                                                              \
   40.15 +  nonstatic_field(HeapRegionSeq,   _regions,            G1HeapRegionTable)    \
   40.16 +  nonstatic_field(HeapRegionSeq,   _committed_length,   uint)                 \
   40.17                                                                                \
   40.18    nonstatic_field(G1CollectedHeap, _hrs,                HeapRegionSeq)        \
   40.19    nonstatic_field(G1CollectedHeap, _g1_committed,       MemRegion)            \
   40.20 @@ -58,6 +64,8 @@
   40.21  
   40.22  #define VM_TYPES_G1(declare_type, declare_toplevel_type)                      \
   40.23                                                                                \
   40.24 +  declare_toplevel_type(G1HeapRegionTable)                                    \
   40.25 +                                                                              \
   40.26    declare_type(G1CollectedHeap, SharedHeap)                                   \
   40.27                                                                                \
   40.28    declare_type(HeapRegion, ContiguousSpace)                                   \
    41.1 --- a/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Oct 01 08:54:05 2013 -0400
    41.2 +++ b/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp	Tue Oct 01 09:21:43 2013 -0400
    41.3 @@ -1103,7 +1103,7 @@
    41.4    }
    41.5  }
    41.6  
    41.7 -static const oop ClaimedForwardPtr = oop(0x4);
    41.8 +static const oop ClaimedForwardPtr = cast_to_oop<intptr_t>(0x4);
    41.9  
   41.10  // Because of concurrency, there are times where an object for which
   41.11  // "is_forwarded()" is true contains an "interim" forwarding pointer
   41.12 @@ -1226,7 +1226,7 @@
   41.13    if (TraceScavenge) {
   41.14      gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
   41.15         is_in_reserved(new_obj) ? "copying" : "tenuring",
   41.16 -       new_obj->klass()->internal_name(), old, new_obj, new_obj->size());
   41.17 +       new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
   41.18    }
   41.19  #endif
   41.20  
   41.21 @@ -1347,7 +1347,7 @@
   41.22    if (TraceScavenge) {
   41.23      gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
   41.24         is_in_reserved(new_obj) ? "copying" : "tenuring",
   41.25 -       new_obj->klass()->internal_name(), old, new_obj, new_obj->size());
   41.26 +       new_obj->klass()->internal_name(), (void *)old, (void *)new_obj, new_obj->size());
   41.27    }
   41.28  #endif
   41.29  
   41.30 @@ -1436,7 +1436,7 @@
   41.31  // (although some performance comparisons would be useful since
   41.32  // single global lists have their own performance disadvantages
   41.33  // as we were made painfully aware not long ago, see 6786503).
   41.34 -#define BUSY (oop(0x1aff1aff))
   41.35 +#define BUSY (cast_to_oop<intptr_t>(0x1aff1aff))
   41.36  void ParNewGeneration::push_on_overflow_list(oop from_space_obj, ParScanThreadState* par_scan_state) {
   41.37    assert(is_in_reserved(from_space_obj), "Should be from this generation");
   41.38    if (ParGCUseLocalOverflow) {
   41.39 @@ -1512,7 +1512,7 @@
   41.40    if (_overflow_list == NULL) return false;
   41.41  
   41.42    // Otherwise, there was something there; try claiming the list.
   41.43 -  oop prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
   41.44 +  oop prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
   41.45    // Trim off a prefix of at most objsFromOverflow items
   41.46    Thread* tid = Thread::current();
   41.47    size_t spin_count = (size_t)ParallelGCThreads;
   41.48 @@ -1526,7 +1526,7 @@
   41.49        return false;
   41.50      } else if (_overflow_list != BUSY) {
   41.51       // try and grab the prefix
   41.52 -     prefix = (oop)Atomic::xchg_ptr(BUSY, &_overflow_list);
   41.53 +     prefix = cast_to_oop(Atomic::xchg_ptr(BUSY, &_overflow_list));
   41.54      }
   41.55    }
   41.56    if (prefix == NULL || prefix == BUSY) {
    42.1 --- a/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	Tue Oct 01 08:54:05 2013 -0400
    42.2 +++ b/src/share/vm/gc_implementation/parNew/parOopClosures.inline.hpp	Tue Oct 01 09:21:43 2013 -0400
    42.3 @@ -1,5 +1,5 @@
    42.4  /*
    42.5 - * Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
    42.6 + * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved.
    42.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    42.8   *
    42.9   * This code is free software; you can redistribute it and/or modify it
   42.10 @@ -84,7 +84,7 @@
   42.11          Space* sp = gch->space_containing(p);
   42.12          oop obj = oop(sp->block_start(p));
   42.13          assert((HeapWord*)obj < (HeapWord*)p, "Error");
   42.14 -        tty->print_cr("Object: " PTR_FORMAT, obj);
   42.15 +        tty->print_cr("Object: " PTR_FORMAT, (void *)obj);
   42.16          tty->print_cr("-------");
   42.17          obj->print();
   42.18          tty->print_cr("-----");
   42.19 @@ -110,7 +110,7 @@
   42.20          if (TraceScavenge) {
   42.21            gclog_or_tty->print_cr("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
   42.22               "forwarded ",
   42.23 -             new_obj->klass()->internal_name(), p, obj, new_obj, new_obj->size());
   42.24 +             new_obj->klass()->internal_name(), p, (void *)obj, (void *)new_obj, new_obj->size());
   42.25          }
   42.26  #endif
   42.27  
    43.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Tue Oct 01 08:54:05 2013 -0400
    43.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp	Tue Oct 01 09:21:43 2013 -0400
    43.3 @@ -333,7 +333,7 @@
    43.4      gclog_or_tty->print_cr("{%s %s 0x%x (%d)}",
    43.5                             "promotion-failure",
    43.6                             obj->klass()->internal_name(),
    43.7 -                           obj, obj->size());
    43.8 +                           (void *)obj, obj->size());
    43.9  
   43.10    }
   43.11  #endif
    44.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Tue Oct 01 08:54:05 2013 -0400
    44.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp	Tue Oct 01 09:21:43 2013 -0400
    44.3 @@ -126,7 +126,7 @@
    44.4  
    44.5    oop* mask_chunked_array_oop(oop obj) {
    44.6      assert(!is_oop_masked((oop*) obj), "invariant");
    44.7 -    oop* ret = (oop*) ((uintptr_t)obj | PS_CHUNKED_ARRAY_OOP_MASK);
    44.8 +    oop* ret = (oop*) (cast_from_oop<uintptr_t>(obj) | PS_CHUNKED_ARRAY_OOP_MASK);
    44.9      assert(is_oop_masked(ret), "invariant");
   44.10      return ret;
   44.11    }
    45.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Tue Oct 01 08:54:05 2013 -0400
    45.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp	Tue Oct 01 09:21:43 2013 -0400
    45.3 @@ -225,7 +225,7 @@
    45.4    if (TraceScavenge) {
    45.5      gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
    45.6         PSScavenge::should_scavenge(&new_obj) ? "copying" : "tenuring",
    45.7 -       new_obj->klass()->internal_name(), o, new_obj, new_obj->size());
    45.8 +       new_obj->klass()->internal_name(), (void *)o, (void *)new_obj, new_obj->size());
    45.9    }
   45.10  #endif
   45.11  
    46.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Tue Oct 01 08:54:05 2013 -0400
    46.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psScavenge.inline.hpp	Tue Oct 01 09:21:43 2013 -0400
    46.3 @@ -1,5 +1,5 @@
    46.4  /*
    46.5 - * Copyright (c) 2002, 2012, Oracle and/or its affiliates. All rights reserved.
    46.6 + * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved.
    46.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    46.8   *
    46.9   * This code is free software; you can redistribute it and/or modify it
   46.10 @@ -81,7 +81,7 @@
   46.11    if (TraceScavenge &&  o->is_forwarded()) {
   46.12      gclog_or_tty->print_cr("{%s %s " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
   46.13         "forwarding",
   46.14 -       new_obj->klass()->internal_name(), o, new_obj, new_obj->size());
   46.15 +       new_obj->klass()->internal_name(), (void *)o, (void *)new_obj, new_obj->size());
   46.16    }
   46.17  #endif
   46.18  
    47.1 --- a/src/share/vm/interpreter/bytecodeTracer.cpp	Tue Oct 01 08:54:05 2013 -0400
    47.2 +++ b/src/share/vm/interpreter/bytecodeTracer.cpp	Tue Oct 01 09:21:43 2013 -0400
    47.3 @@ -215,7 +215,7 @@
    47.4        st->print_cr(" %s", buf);
    47.5      }
    47.6    } else {
    47.7 -    st->print_cr(" " PTR_FORMAT, (intptr_t) value);
    47.8 +    st->print_cr(" " PTR_FORMAT, (void *)value);
    47.9    }
   47.10  }
   47.11  
    48.1 --- a/src/share/vm/interpreter/linkResolver.cpp	Tue Oct 01 08:54:05 2013 -0400
    48.2 +++ b/src/share/vm/interpreter/linkResolver.cpp	Tue Oct 01 09:21:43 2013 -0400
    48.3 @@ -1384,7 +1384,7 @@
    48.4                                                       THREAD);
    48.5    if (HAS_PENDING_EXCEPTION) {
    48.6      if (TraceMethodHandles) {
    48.7 -      tty->print_cr("invokedynamic throws BSME for "INTPTR_FORMAT, PENDING_EXCEPTION);
    48.8 +      tty->print_cr("invokedynamic throws BSME for "INTPTR_FORMAT, (void *)PENDING_EXCEPTION);
    48.9        PENDING_EXCEPTION->print();
   48.10      }
   48.11      if (PENDING_EXCEPTION->is_a(SystemDictionary::BootstrapMethodError_klass())) {
    49.1 --- a/src/share/vm/memory/gcLocker.cpp	Tue Oct 01 08:54:05 2013 -0400
    49.2 +++ b/src/share/vm/memory/gcLocker.cpp	Tue Oct 01 09:21:43 2013 -0400
    49.3 @@ -122,7 +122,7 @@
    49.4      // strictly needed. It's added here to make it clear that
    49.5      // the GC will NOT be performed if any other caller
    49.6      // of GC_locker::lock() still needs GC locked.
    49.7 -    if (!is_active()) {
    49.8 +    if (!is_active_internal()) {
    49.9        _doing_gc = true;
   49.10        {
   49.11          // Must give up the lock while at a safepoint
    50.1 --- a/src/share/vm/memory/gcLocker.hpp	Tue Oct 01 08:54:05 2013 -0400
    50.2 +++ b/src/share/vm/memory/gcLocker.hpp	Tue Oct 01 09:21:43 2013 -0400
    50.3 @@ -88,7 +88,7 @@
    50.4   public:
    50.5    // Accessors
    50.6    static bool is_active() {
    50.7 -    assert(_needs_gc || SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
    50.8 +    assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
    50.9      return is_active_internal();
   50.10    }
   50.11    static bool needs_gc()       { return _needs_gc;                        }
    51.1 --- a/src/share/vm/memory/heapInspection.hpp	Tue Oct 01 08:54:05 2013 -0400
    51.2 +++ b/src/share/vm/memory/heapInspection.hpp	Tue Oct 01 09:21:43 2013 -0400
    51.3 @@ -150,11 +150,11 @@
    51.4    HEAP_INSPECTION_COLUMNS_DO(DECLARE_KLASS_SIZE_STATS_FIELD)
    51.5  
    51.6    static int count(oop x) {
    51.7 -    return (HeapWordSize * ((x) ? (x)->size() : 0));
    51.8 +    return (HeapWordSize * (((x) != NULL) ? (x)->size() : 0));
    51.9    }
   51.10  
   51.11    static int count_array(objArrayOop x) {
   51.12 -    return (HeapWordSize * ((x) ? (x)->size() : 0));
   51.13 +    return (HeapWordSize * (((x) != NULL) ? (x)->size() : 0));
   51.14    }
   51.15  
   51.16    template <class T> static int count(T* x) {
    52.1 --- a/src/share/vm/memory/metaspace.cpp	Tue Oct 01 08:54:05 2013 -0400
    52.2 +++ b/src/share/vm/memory/metaspace.cpp	Tue Oct 01 09:21:43 2013 -0400
    52.3 @@ -23,6 +23,7 @@
    52.4   */
    52.5  #include "precompiled.hpp"
    52.6  #include "gc_interface/collectedHeap.hpp"
    52.7 +#include "memory/allocation.hpp"
    52.8  #include "memory/binaryTreeDictionary.hpp"
    52.9  #include "memory/freeList.hpp"
   52.10  #include "memory/collectorPolicy.hpp"
   52.11 @@ -111,7 +112,7 @@
   52.12  // Has three lists of free chunks, and a total size and
   52.13  // count that includes all three
   52.14  
   52.15 -class ChunkManager VALUE_OBJ_CLASS_SPEC {
   52.16 +class ChunkManager : public CHeapObj<mtInternal> {
   52.17  
   52.18    // Free list of chunks of different sizes.
   52.19    //   SpecializedChunk
   52.20 @@ -158,7 +159,12 @@
   52.21  
   52.22   public:
   52.23  
   52.24 -  ChunkManager() : _free_chunks_total(0), _free_chunks_count(0) {}
   52.25 +  ChunkManager(size_t specialized_size, size_t small_size, size_t medium_size)
   52.26 +      : _free_chunks_total(0), _free_chunks_count(0) {
   52.27 +    _free_chunks[SpecializedIndex].set_size(specialized_size);
   52.28 +    _free_chunks[SmallIndex].set_size(small_size);
   52.29 +    _free_chunks[MediumIndex].set_size(medium_size);
   52.30 +  }
   52.31  
   52.32    // add or delete (return) a chunk to the global freelist.
   52.33    Metachunk* chunk_freelist_allocate(size_t word_size);
   52.34 @@ -219,7 +225,7 @@
   52.35    void locked_print_free_chunks(outputStream* st);
   52.36    void locked_print_sum_free_chunks(outputStream* st);
   52.37  
   52.38 -  void print_on(outputStream* st);
   52.39 +  void print_on(outputStream* st) const;
   52.40  };
   52.41  
   52.42  // Used to manage the free list of Metablocks (a block corresponds
   52.43 @@ -276,11 +282,6 @@
   52.44    // VirtualSpace
   52.45    Metachunk* first_chunk() { return (Metachunk*) bottom(); }
   52.46  
   52.47 -  void inc_container_count();
   52.48 -#ifdef ASSERT
   52.49 -  uint container_count_slow();
   52.50 -#endif
   52.51 -
   52.52   public:
   52.53  
   52.54    VirtualSpaceNode(size_t byte_size);
   52.55 @@ -314,8 +315,10 @@
   52.56    void inc_top(size_t word_size) { _top += word_size; }
   52.57  
   52.58    uintx container_count() { return _container_count; }
   52.59 +  void inc_container_count();
   52.60    void dec_container_count();
   52.61  #ifdef ASSERT
   52.62 +  uint container_count_slow();
   52.63    void verify_container_count();
   52.64  #endif
   52.65  
   52.66 @@ -421,8 +424,6 @@
   52.67    VirtualSpaceNode* _virtual_space_list;
   52.68    // virtual space currently being used for allocations
   52.69    VirtualSpaceNode* _current_virtual_space;
   52.70 -  // Free chunk list for all other metadata
   52.71 -  ChunkManager      _chunk_manager;
   52.72  
   52.73    // Can this virtual list allocate >1 spaces?  Also, used to determine
   52.74    // whether to allocate unlimited small chunks in this virtual space
   52.75 @@ -475,7 +476,6 @@
   52.76      return _current_virtual_space;
   52.77    }
   52.78  
   52.79 -  ChunkManager* chunk_manager() { return &_chunk_manager; }
   52.80    bool is_class() const { return _is_class; }
   52.81  
   52.82    // Allocate the first virtualspace.
   52.83 @@ -494,14 +494,7 @@
   52.84    void dec_virtual_space_count();
   52.85  
   52.86    // Unlink empty VirtualSpaceNodes and free it.
   52.87 -  void purge();
   52.88 -
   52.89 -  // Used and capacity in the entire list of virtual spaces.
   52.90 -  // These are global values shared by all Metaspaces
   52.91 -  size_t capacity_words_sum();
   52.92 -  size_t capacity_bytes_sum() { return capacity_words_sum() * BytesPerWord; }
   52.93 -  size_t used_words_sum();
   52.94 -  size_t used_bytes_sum() { return used_words_sum() * BytesPerWord; }
   52.95 +  void purge(ChunkManager* chunk_manager);
   52.96  
   52.97    bool contains(const void *ptr);
   52.98  
   52.99 @@ -582,18 +575,12 @@
  52.100    // Type of metadata allocated.
  52.101    Metaspace::MetadataType _mdtype;
  52.102  
  52.103 -  // Chunk related size
  52.104 -  size_t _medium_chunk_bunch;
  52.105 -
  52.106    // List of chunks in use by this SpaceManager.  Allocations
  52.107    // are done from the current chunk.  The list is used for deallocating
  52.108    // chunks when the SpaceManager is freed.
  52.109    Metachunk* _chunks_in_use[NumberOfInUseLists];
  52.110    Metachunk* _current_chunk;
  52.111  
  52.112 -  // Virtual space where allocation comes from.
  52.113 -  VirtualSpaceList* _vs_list;
  52.114 -
  52.115    // Number of small chunks to allocate to a manager
  52.116    // If class space manager, small chunks are unlimited
  52.117    static uint const _small_chunk_limit;
  52.118 @@ -626,7 +613,9 @@
  52.119    }
  52.120  
  52.121    Metaspace::MetadataType mdtype() { return _mdtype; }
  52.122 -  VirtualSpaceList* vs_list() const    { return _vs_list; }
  52.123 +
  52.124 +  VirtualSpaceList* vs_list()   const { return Metaspace::get_space_list(_mdtype); }
  52.125 +  ChunkManager* chunk_manager() const { return Metaspace::get_chunk_manager(_mdtype); }
  52.126  
  52.127    Metachunk* current_chunk() const { return _current_chunk; }
  52.128    void set_current_chunk(Metachunk* v) {
  52.129 @@ -648,18 +637,19 @@
  52.130  
  52.131   public:
  52.132    SpaceManager(Metaspace::MetadataType mdtype,
  52.133 -               Mutex* lock,
  52.134 -               VirtualSpaceList* vs_list);
  52.135 +               Mutex* lock);
  52.136    ~SpaceManager();
  52.137  
  52.138    enum ChunkMultiples {
  52.139      MediumChunkMultiple = 4
  52.140    };
  52.141  
  52.142 +  bool is_class() { return _mdtype == Metaspace::ClassType; }
  52.143 +
  52.144    // Accessors
  52.145    size_t specialized_chunk_size() { return SpecializedChunk; }
  52.146 -  size_t small_chunk_size() { return (size_t) vs_list()->is_class() ? ClassSmallChunk : SmallChunk; }
  52.147 -  size_t medium_chunk_size() { return (size_t) vs_list()->is_class() ? ClassMediumChunk : MediumChunk; }
  52.148 +  size_t small_chunk_size() { return (size_t) is_class() ? ClassSmallChunk : SmallChunk; }
  52.149 +  size_t medium_chunk_size() { return (size_t) is_class() ? ClassMediumChunk : MediumChunk; }
  52.150    size_t medium_chunk_bunch() { return medium_chunk_size() * MediumChunkMultiple; }
  52.151  
  52.152    size_t allocated_blocks_words() const { return _allocated_blocks_words; }
  52.153 @@ -762,7 +752,7 @@
  52.154    _container_count++;
  52.155    assert(_container_count == container_count_slow(),
  52.156           err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
  52.157 -                 "container_count_slow() " SIZE_FORMAT,
  52.158 +                 " container_count_slow() " SIZE_FORMAT,
  52.159                   _container_count, container_count_slow()));
  52.160  }
  52.161  
  52.162 @@ -775,7 +765,7 @@
  52.163  void VirtualSpaceNode::verify_container_count() {
  52.164    assert(_container_count == container_count_slow(),
  52.165      err_msg("Inconsistency in countainer_count _container_count " SIZE_FORMAT
  52.166 -            "container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
  52.167 +            " container_count_slow() " SIZE_FORMAT, _container_count, container_count_slow()));
  52.168  }
  52.169  #endif
  52.170  
  52.171 @@ -1020,7 +1010,7 @@
  52.172  // Walk the list of VirtualSpaceNodes and delete
  52.173  // nodes with a 0 container_count.  Remove Metachunks in
  52.174  // the node from their respective freelists.
  52.175 -void VirtualSpaceList::purge() {
  52.176 +void VirtualSpaceList::purge(ChunkManager* chunk_manager) {
  52.177    assert_lock_strong(SpaceManager::expand_lock());
  52.178    // Don't use a VirtualSpaceListIterator because this
  52.179    // list is being changed and a straightforward use of an iterator is not safe.
  52.180 @@ -1042,7 +1032,7 @@
  52.181          prev_vsl->set_next(vsl->next());
  52.182        }
  52.183  
  52.184 -      vsl->purge(chunk_manager());
  52.185 +      vsl->purge(chunk_manager);
  52.186        dec_reserved_words(vsl->reserved_words());
  52.187        dec_committed_words(vsl->committed_words());
  52.188        dec_virtual_space_count();
  52.189 @@ -1064,36 +1054,6 @@
  52.190  #endif
  52.191  }
  52.192  
  52.193 -size_t VirtualSpaceList::used_words_sum() {
  52.194 -  size_t allocated_by_vs = 0;
  52.195 -  VirtualSpaceListIterator iter(virtual_space_list());
  52.196 -  while (iter.repeat()) {
  52.197 -    VirtualSpaceNode* vsl = iter.get_next();
  52.198 -    // Sum used region [bottom, top) in each virtualspace
  52.199 -    allocated_by_vs += vsl->used_words_in_vs();
  52.200 -  }
  52.201 -  assert(allocated_by_vs >= chunk_manager()->free_chunks_total_words(),
  52.202 -    err_msg("Total in free chunks " SIZE_FORMAT
  52.203 -            " greater than total from virtual_spaces " SIZE_FORMAT,
  52.204 -            allocated_by_vs, chunk_manager()->free_chunks_total_words()));
  52.205 -  size_t used =
  52.206 -    allocated_by_vs - chunk_manager()->free_chunks_total_words();
  52.207 -  return used;
  52.208 -}
  52.209 -
  52.210 -// Space available in all MetadataVirtualspaces allocated
  52.211 -// for metadata.  This is the upper limit on the capacity
  52.212 -// of chunks allocated out of all the MetadataVirtualspaces.
  52.213 -size_t VirtualSpaceList::capacity_words_sum() {
  52.214 -  size_t capacity = 0;
  52.215 -  VirtualSpaceListIterator iter(virtual_space_list());
  52.216 -  while (iter.repeat()) {
  52.217 -    VirtualSpaceNode* vsl = iter.get_next();
  52.218 -    capacity += vsl->capacity_words_in_vs();
  52.219 -  }
  52.220 -  return capacity;
  52.221 -}
  52.222 -
  52.223  VirtualSpaceList::VirtualSpaceList(size_t word_size ) :
  52.224                                     _is_class(false),
  52.225                                     _virtual_space_list(NULL),
  52.226 @@ -1104,10 +1064,6 @@
  52.227    MutexLockerEx cl(SpaceManager::expand_lock(),
  52.228                     Mutex::_no_safepoint_check_flag);
  52.229    bool initialization_succeeded = grow_vs(word_size);
  52.230 -
  52.231 -  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  52.232 -  _chunk_manager.free_chunks(SmallIndex)->set_size(SmallChunk);
  52.233 -  _chunk_manager.free_chunks(MediumIndex)->set_size(MediumChunk);
  52.234    assert(initialization_succeeded,
  52.235      " VirtualSpaceList initialization should not fail");
  52.236  }
  52.237 @@ -1123,9 +1079,6 @@
  52.238                     Mutex::_no_safepoint_check_flag);
  52.239    VirtualSpaceNode* class_entry = new VirtualSpaceNode(rs);
  52.240    bool succeeded = class_entry->initialize();
  52.241 -  _chunk_manager.free_chunks(SpecializedIndex)->set_size(SpecializedChunk);
  52.242 -  _chunk_manager.free_chunks(SmallIndex)->set_size(ClassSmallChunk);
  52.243 -  _chunk_manager.free_chunks(MediumIndex)->set_size(ClassMediumChunk);
  52.244    assert(succeeded, " VirtualSpaceList initialization should not fail");
  52.245    link_vs(class_entry);
  52.246  }
  52.247 @@ -1142,7 +1095,7 @@
  52.248    }
  52.249    // Reserve the space
  52.250    size_t vs_byte_size = vs_word_size * BytesPerWord;
  52.251 -  assert(vs_byte_size % os::vm_page_size() == 0, "Not aligned");
  52.252 +  assert(vs_byte_size % os::vm_allocation_granularity() == 0, "Not aligned");
  52.253  
  52.254    // Allocate the meta virtual space and initialize it.
  52.255    VirtualSpaceNode* new_entry = new VirtualSpaceNode(vs_byte_size);
  52.256 @@ -1195,15 +1148,8 @@
  52.257                                             size_t grow_chunks_by_words,
  52.258                                             size_t medium_chunk_bunch) {
  52.259  
  52.260 -  // Get a chunk from the chunk freelist
  52.261 -  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
  52.262 -
  52.263 -  if (next != NULL) {
  52.264 -    next->container()->inc_container_count();
  52.265 -  } else {
  52.266 -    // Allocate a chunk out of the current virtual space.
  52.267 -    next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  52.268 -  }
  52.269 +  // Allocate a chunk out of the current virtual space.
  52.270 +  Metachunk* next = current_virtual_space()->get_chunk_vs(grow_chunks_by_words);
  52.271  
  52.272    if (next == NULL) {
  52.273      // Not enough room in current virtual space.  Try to commit
  52.274 @@ -1221,12 +1167,14 @@
  52.275        // being used for CompressedHeaders, don't allocate a new virtualspace.
  52.276        if (can_grow() && MetaspaceGC::should_expand(this, word_size)) {
  52.277          // Get another virtual space.
  52.278 -          size_t grow_vs_words =
  52.279 -            MAX2((size_t)VirtualSpaceSize, aligned_expand_vs_by_words);
  52.280 +        size_t allocation_aligned_expand_words =
  52.281 +            align_size_up(aligned_expand_vs_by_words, os::vm_allocation_granularity() / BytesPerWord);
  52.282 +        size_t grow_vs_words =
  52.283 +            MAX2((size_t)VirtualSpaceSize, allocation_aligned_expand_words);
  52.284          if (grow_vs(grow_vs_words)) {
  52.285            // Got it.  It's on the list now.  Get a chunk from it.
  52.286            assert(current_virtual_space()->expanded_words() == 0,
  52.287 -              "New virtuals space nodes should not have expanded");
  52.288 +              "New virtual space nodes should not have expanded");
  52.289  
  52.290            size_t grow_chunks_by_words_aligned = align_size_up(grow_chunks_by_words,
  52.291                                                                page_size_words);
  52.292 @@ -1342,8 +1290,9 @@
  52.293    // reserved space, because this is a larger space prereserved for compressed
  52.294    // class pointers.
  52.295    if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) {
  52.296 -    size_t real_allocated = Metaspace::space_list()->reserved_words() +
  52.297 -              MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
  52.298 +    size_t nonclass_allocated = MetaspaceAux::reserved_bytes(Metaspace::NonClassType);
  52.299 +    size_t class_allocated    = MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType);
  52.300 +    size_t real_allocated     = nonclass_allocated + class_allocated;
  52.301      if (real_allocated >= MaxMetaspaceSize) {
  52.302        return false;
  52.303      }
  52.304 @@ -1536,15 +1485,15 @@
  52.305        if (dummy_chunk == NULL) {
  52.306          break;
  52.307        }
  52.308 -      vsl->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
  52.309 +      sm->chunk_manager()->chunk_freelist_deallocate(dummy_chunk);
  52.310  
  52.311        if (TraceMetadataChunkAllocation && Verbose) {
  52.312          gclog_or_tty->print("Metadebug::deallocate_chunk_a_lot: %d) ",
  52.313                                 sm->sum_count_in_chunks_in_use());
  52.314          dummy_chunk->print_on(gclog_or_tty);
  52.315          gclog_or_tty->print_cr("  Free chunks total %d  count %d",
  52.316 -                               vsl->chunk_manager()->free_chunks_total_words(),
  52.317 -                               vsl->chunk_manager()->free_chunks_count());
  52.318 +                               sm->chunk_manager()->free_chunks_total_words(),
  52.319 +                               sm->chunk_manager()->free_chunks_count());
  52.320        }
  52.321      }
  52.322    } else {
  52.323 @@ -1796,6 +1745,8 @@
  52.324    // work.
  52.325    chunk->set_is_free(false);
  52.326  #endif
  52.327 +  chunk->container()->inc_container_count();
  52.328 +
  52.329    slow_locked_verify();
  52.330    return chunk;
  52.331  }
  52.332 @@ -1830,9 +1781,9 @@
  52.333    return chunk;
  52.334  }
  52.335  
  52.336 -void ChunkManager::print_on(outputStream* out) {
  52.337 +void ChunkManager::print_on(outputStream* out) const {
  52.338    if (PrintFLSStatistics != 0) {
  52.339 -    humongous_dictionary()->report_statistics();
  52.340 +    const_cast<ChunkManager *>(this)->humongous_dictionary()->report_statistics();
  52.341    }
  52.342  }
  52.343  
  52.344 @@ -1979,8 +1930,8 @@
  52.345      }
  52.346    }
  52.347  
  52.348 -  vs_list()->chunk_manager()->locked_print_free_chunks(st);
  52.349 -  vs_list()->chunk_manager()->locked_print_sum_free_chunks(st);
  52.350 +  chunk_manager()->locked_print_free_chunks(st);
  52.351 +  chunk_manager()->locked_print_sum_free_chunks(st);
  52.352  }
  52.353  
  52.354  size_t SpaceManager::calc_chunk_size(size_t word_size) {
  52.355 @@ -2084,9 +2035,7 @@
  52.356  }
  52.357  
  52.358  SpaceManager::SpaceManager(Metaspace::MetadataType mdtype,
  52.359 -                           Mutex* lock,
  52.360 -                           VirtualSpaceList* vs_list) :
  52.361 -  _vs_list(vs_list),
  52.362 +                           Mutex* lock) :
  52.363    _mdtype(mdtype),
  52.364    _allocated_blocks_words(0),
  52.365    _allocated_chunks_words(0),
  52.366 @@ -2172,9 +2121,7 @@
  52.367    MutexLockerEx fcl(SpaceManager::expand_lock(),
  52.368                      Mutex::_no_safepoint_check_flag);
  52.369  
  52.370 -  ChunkManager* chunk_manager = vs_list()->chunk_manager();
  52.371 -
  52.372 -  chunk_manager->slow_locked_verify();
  52.373 +  chunk_manager()->slow_locked_verify();
  52.374  
  52.375    dec_total_from_size_metrics();
  52.376  
  52.377 @@ -2188,8 +2135,8 @@
  52.378  
  52.379    // Have to update before the chunks_in_use lists are emptied
  52.380    // below.
  52.381 -  chunk_manager->inc_free_chunks_total(allocated_chunks_words(),
  52.382 -                                       sum_count_in_chunks_in_use());
  52.383 +  chunk_manager()->inc_free_chunks_total(allocated_chunks_words(),
  52.384 +                                         sum_count_in_chunks_in_use());
  52.385  
  52.386    // Add all the chunks in use by this space manager
  52.387    // to the global list of free chunks.
  52.388 @@ -2204,11 +2151,11 @@
  52.389                               chunk_size_name(i));
  52.390      }
  52.391      Metachunk* chunks = chunks_in_use(i);
  52.392 -    chunk_manager->return_chunks(i, chunks);
  52.393 +    chunk_manager()->return_chunks(i, chunks);
  52.394      set_chunks_in_use(i, NULL);
  52.395      if (TraceMetadataChunkAllocation && Verbose) {
  52.396        gclog_or_tty->print_cr("updated freelist count %d %s",
  52.397 -                             chunk_manager->free_chunks(i)->count(),
  52.398 +                             chunk_manager()->free_chunks(i)->count(),
  52.399                               chunk_size_name(i));
  52.400      }
  52.401      assert(i != HumongousIndex, "Humongous chunks are handled explicitly later");
  52.402 @@ -2245,16 +2192,16 @@
  52.403                     humongous_chunks->word_size(), HumongousChunkGranularity));
  52.404      Metachunk* next_humongous_chunks = humongous_chunks->next();
  52.405      humongous_chunks->container()->dec_container_count();
  52.406 -    chunk_manager->humongous_dictionary()->return_chunk(humongous_chunks);
  52.407 +    chunk_manager()->humongous_dictionary()->return_chunk(humongous_chunks);
  52.408      humongous_chunks = next_humongous_chunks;
  52.409    }
  52.410    if (TraceMetadataChunkAllocation && Verbose) {
  52.411      gclog_or_tty->print_cr("");
  52.412      gclog_or_tty->print_cr("updated dictionary count %d %s",
  52.413 -                     chunk_manager->humongous_dictionary()->total_count(),
  52.414 +                     chunk_manager()->humongous_dictionary()->total_count(),
  52.415                       chunk_size_name(HumongousIndex));
  52.416    }
  52.417 -  chunk_manager->slow_locked_verify();
  52.418 +  chunk_manager()->slow_locked_verify();
  52.419  }
  52.420  
  52.421  const char* SpaceManager::chunk_size_name(ChunkIndex index) const {
  52.422 @@ -2343,9 +2290,7 @@
  52.423      gclog_or_tty->print("SpaceManager::add_chunk: %d) ",
  52.424                          sum_count_in_chunks_in_use());
  52.425      new_chunk->print_on(gclog_or_tty);
  52.426 -    if (vs_list() != NULL) {
  52.427 -      vs_list()->chunk_manager()->locked_print_free_chunks(gclog_or_tty);
  52.428 -    }
  52.429 +    chunk_manager()->locked_print_free_chunks(gclog_or_tty);
  52.430    }
  52.431  }
  52.432  
  52.433 @@ -2361,10 +2306,14 @@
  52.434  
  52.435  Metachunk* SpaceManager::get_new_chunk(size_t word_size,
  52.436                                         size_t grow_chunks_by_words) {
  52.437 -
  52.438 -  Metachunk* next = vs_list()->get_new_chunk(word_size,
  52.439 -                                             grow_chunks_by_words,
  52.440 -                                             medium_chunk_bunch());
  52.441 +  // Get a chunk from the chunk freelist
  52.442 +  Metachunk* next = chunk_manager()->chunk_freelist_allocate(grow_chunks_by_words);
  52.443 +
  52.444 +  if (next == NULL) {
  52.445 +    next = vs_list()->get_new_chunk(word_size,
  52.446 +                                    grow_chunks_by_words,
  52.447 +                                    medium_chunk_bunch());
  52.448 +  }
  52.449  
  52.450    if (TraceMetadataHumongousAllocation && next != NULL &&
  52.451        SpaceManager::is_humongous(next->word_size())) {
  52.452 @@ -2644,13 +2593,12 @@
  52.453  size_t MetaspaceAux::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
  52.454  
  52.455  size_t MetaspaceAux::free_chunks_total_words(Metaspace::MetadataType mdtype) {
  52.456 -  VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
  52.457 -  if (list == NULL) {
  52.458 +  ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
  52.459 +  if (chunk_manager == NULL) {
  52.460      return 0;
  52.461    }
  52.462 -  ChunkManager* chunk = list->chunk_manager();
  52.463 -  chunk->slow_verify();
  52.464 -  return chunk->free_chunks_total_words();
  52.465 +  chunk_manager->slow_verify();
  52.466 +  return chunk_manager->free_chunks_total_words();
  52.467  }
  52.468  
  52.469  size_t MetaspaceAux::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
  52.470 @@ -2801,9 +2749,9 @@
  52.471  }
  52.472  
  52.473  void MetaspaceAux::verify_free_chunks() {
  52.474 -  Metaspace::space_list()->chunk_manager()->verify();
  52.475 +  Metaspace::chunk_manager_metadata()->verify();
  52.476    if (Metaspace::using_class_space()) {
  52.477 -    Metaspace::class_space_list()->chunk_manager()->verify();
  52.478 +    Metaspace::chunk_manager_class()->verify();
  52.479    }
  52.480  }
  52.481  
  52.482 @@ -2874,6 +2822,9 @@
  52.483  VirtualSpaceList* Metaspace::_space_list = NULL;
  52.484  VirtualSpaceList* Metaspace::_class_space_list = NULL;
  52.485  
  52.486 +ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
  52.487 +ChunkManager* Metaspace::_chunk_manager_class = NULL;
  52.488 +
  52.489  #define VIRTUALSPACEMULTIPLIER 2
  52.490  
  52.491  #ifdef _LP64
  52.492 @@ -2981,6 +2932,7 @@
  52.493           err_msg(SIZE_FORMAT " != " UINTX_FORMAT, rs.size(), CompressedClassSpaceSize));
  52.494    assert(using_class_space(), "Must be using class space");
  52.495    _class_space_list = new VirtualSpaceList(rs);
  52.496 +  _chunk_manager_class = new ChunkManager(SpecializedChunk, ClassSmallChunk, ClassMediumChunk);
  52.497  }
  52.498  
  52.499  #endif
  52.500 @@ -3006,6 +2958,7 @@
  52.501      // remainder is the misc code and data chunks.
  52.502      cds_total = FileMapInfo::shared_spaces_size();
  52.503      _space_list = new VirtualSpaceList(cds_total/wordSize);
  52.504 +    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
  52.505  
  52.506  #ifdef _LP64
  52.507      // Set the compressed klass pointer base so that decoding of these pointers works
  52.508 @@ -3073,15 +3026,30 @@
  52.509      size_t word_size = VIRTUALSPACEMULTIPLIER * first_chunk_word_size();
  52.510      // Initialize the list of virtual spaces.
  52.511      _space_list = new VirtualSpaceList(word_size);
  52.512 +    _chunk_manager_metadata = new ChunkManager(SpecializedChunk, SmallChunk, MediumChunk);
  52.513    }
  52.514  }
  52.515  
  52.516 +Metachunk* Metaspace::get_initialization_chunk(MetadataType mdtype,
  52.517 +                                               size_t chunk_word_size,
  52.518 +                                               size_t chunk_bunch) {
  52.519 +  // Get a chunk from the chunk freelist
  52.520 +  Metachunk* chunk = get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
  52.521 +  if (chunk != NULL) {
  52.522 +    return chunk;
  52.523 +  }
  52.524 +
  52.525 +  return get_space_list(mdtype)->get_initialization_chunk(chunk_word_size, chunk_bunch);
  52.526 +}
  52.527 +
  52.528  void Metaspace::initialize(Mutex* lock, MetaspaceType type) {
  52.529  
  52.530    assert(space_list() != NULL,
  52.531      "Metadata VirtualSpaceList has not been initialized");
  52.532 -
  52.533 -  _vsm = new SpaceManager(NonClassType, lock, space_list());
  52.534 +  assert(chunk_manager_metadata() != NULL,
  52.535 +    "Metadata ChunkManager has not been initialized");
  52.536 +
  52.537 +  _vsm = new SpaceManager(NonClassType, lock);
  52.538    if (_vsm == NULL) {
  52.539      return;
  52.540    }
  52.541 @@ -3090,11 +3058,13 @@
  52.542    vsm()->get_initial_chunk_sizes(type, &word_size, &class_word_size);
  52.543  
  52.544    if (using_class_space()) {
  52.545 -    assert(class_space_list() != NULL,
  52.546 -      "Class VirtualSpaceList has not been initialized");
  52.547 +  assert(class_space_list() != NULL,
  52.548 +    "Class VirtualSpaceList has not been initialized");
  52.549 +  assert(chunk_manager_class() != NULL,
  52.550 +    "Class ChunkManager has not been initialized");
  52.551  
  52.552      // Allocate SpaceManager for classes.
  52.553 -    _class_vsm = new SpaceManager(ClassType, lock, class_space_list());
  52.554 +    _class_vsm = new SpaceManager(ClassType, lock);
  52.555      if (_class_vsm == NULL) {
  52.556        return;
  52.557      }
  52.558 @@ -3103,9 +3073,9 @@
  52.559    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  52.560  
  52.561    // Allocate chunk for metadata objects
  52.562 -  Metachunk* new_chunk =
  52.563 -     space_list()->get_initialization_chunk(word_size,
  52.564 -                                            vsm()->medium_chunk_bunch());
  52.565 +  Metachunk* new_chunk = get_initialization_chunk(NonClassType,
  52.566 +                                                  word_size,
  52.567 +                                                  vsm()->medium_chunk_bunch());
  52.568    assert(!DumpSharedSpaces || new_chunk != NULL, "should have enough space for both chunks");
  52.569    if (new_chunk != NULL) {
  52.570      // Add to this manager's list of chunks in use and current_chunk().
  52.571 @@ -3114,9 +3084,9 @@
  52.572  
  52.573    // Allocate chunk for class metadata objects
  52.574    if (using_class_space()) {
  52.575 -    Metachunk* class_chunk =
  52.576 -       class_space_list()->get_initialization_chunk(class_word_size,
  52.577 -                                                    class_vsm()->medium_chunk_bunch());
  52.578 +    Metachunk* class_chunk = get_initialization_chunk(ClassType,
  52.579 +                                                      class_word_size,
  52.580 +                                                      class_vsm()->medium_chunk_bunch());
  52.581      if (class_chunk != NULL) {
  52.582        class_vsm()->add_chunk(class_chunk, true);
  52.583      }
  52.584 @@ -3333,12 +3303,16 @@
  52.585    }
  52.586  }
  52.587  
  52.588 +void Metaspace::purge(MetadataType mdtype) {
  52.589 +  get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
  52.590 +}
  52.591 +
  52.592  void Metaspace::purge() {
  52.593    MutexLockerEx cl(SpaceManager::expand_lock(),
  52.594                     Mutex::_no_safepoint_check_flag);
  52.595 -  space_list()->purge();
  52.596 +  purge(NonClassType);
  52.597    if (using_class_space()) {
  52.598 -    class_space_list()->purge();
  52.599 +    purge(ClassType);
  52.600    }
  52.601  }
  52.602  
  52.603 @@ -3385,7 +3359,7 @@
  52.604  
  52.605  #ifndef PRODUCT
  52.606  
  52.607 -class MetaspaceAuxTest : AllStatic {
  52.608 +class TestMetaspaceAuxTest : AllStatic {
  52.609   public:
  52.610    static void test_reserved() {
  52.611      size_t reserved = MetaspaceAux::reserved_bytes();
  52.612 @@ -3425,14 +3399,25 @@
  52.613      }
  52.614    }
  52.615  
  52.616 +  static void test_virtual_space_list_large_chunk() {
  52.617 +    VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
  52.618 +    MutexLockerEx cl(SpaceManager::expand_lock(), Mutex::_no_safepoint_check_flag);
  52.619 +    // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
  52.620 +    // vm_allocation_granularity aligned on Windows.
  52.621 +    size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
  52.622 +    large_size += (os::vm_page_size()/BytesPerWord);
  52.623 +    vs_list->get_new_chunk(large_size, large_size, 0);
  52.624 +  }
  52.625 +
  52.626    static void test() {
  52.627      test_reserved();
  52.628      test_committed();
  52.629 +    test_virtual_space_list_large_chunk();
  52.630    }
  52.631  };
  52.632  
  52.633 -void MetaspaceAux_test() {
  52.634 -  MetaspaceAuxTest::test();
  52.635 +void TestMetaspaceAux_test() {
  52.636 +  TestMetaspaceAuxTest::test();
  52.637  }
  52.638  
  52.639  #endif
    53.1 --- a/src/share/vm/memory/metaspace.hpp	Tue Oct 01 08:54:05 2013 -0400
    53.2 +++ b/src/share/vm/memory/metaspace.hpp	Tue Oct 01 09:21:43 2013 -0400
    53.3 @@ -56,12 +56,15 @@
    53.4  //                       +-------------------+
    53.5  //
    53.6  
    53.7 +class ChunkManager;
    53.8  class ClassLoaderData;
    53.9  class Metablock;
   53.10 +class Metachunk;
   53.11  class MetaWord;
   53.12  class Mutex;
   53.13  class outputStream;
   53.14  class SpaceManager;
   53.15 +class VirtualSpaceList;
   53.16  
   53.17  // Metaspaces each have a  SpaceManager and allocations
   53.18  // are done by the SpaceManager.  Allocations are done
   53.19 @@ -76,8 +79,6 @@
   53.20  // allocate() method returns a block for use as a
   53.21  // quantum of metadata.
   53.22  
   53.23 -class VirtualSpaceList;
   53.24 -
   53.25  class Metaspace : public CHeapObj<mtClass> {
   53.26    friend class VMStructs;
   53.27    friend class SpaceManager;
   53.28 @@ -102,6 +103,10 @@
   53.29   private:
   53.30    void initialize(Mutex* lock, MetaspaceType type);
   53.31  
   53.32 +  Metachunk* get_initialization_chunk(MetadataType mdtype,
   53.33 +                                      size_t chunk_word_size,
   53.34 +                                      size_t chunk_bunch);
   53.35 +
   53.36    // Align up the word size to the allocation word size
   53.37    static size_t align_word_size_up(size_t);
   53.38  
   53.39 @@ -134,6 +139,10 @@
   53.40    static VirtualSpaceList* _space_list;
   53.41    static VirtualSpaceList* _class_space_list;
   53.42  
   53.43 +  static ChunkManager* _chunk_manager_metadata;
   53.44 +  static ChunkManager* _chunk_manager_class;
   53.45 +
   53.46 + public:
   53.47    static VirtualSpaceList* space_list()       { return _space_list; }
   53.48    static VirtualSpaceList* class_space_list() { return _class_space_list; }
   53.49    static VirtualSpaceList* get_space_list(MetadataType mdtype) {
   53.50 @@ -141,6 +150,14 @@
   53.51      return mdtype == ClassType ? class_space_list() : space_list();
   53.52    }
   53.53  
   53.54 +  static ChunkManager* chunk_manager_metadata() { return _chunk_manager_metadata; }
   53.55 +  static ChunkManager* chunk_manager_class()    { return _chunk_manager_class; }
   53.56 +  static ChunkManager* get_chunk_manager(MetadataType mdtype) {
   53.57 +    assert(mdtype != MetadataTypeCount, "MetadaTypeCount can't be used as mdtype");
   53.58 +    return mdtype == ClassType ? chunk_manager_class() : chunk_manager_metadata();
   53.59 +  }
   53.60 +
   53.61 + private:
   53.62    // This is used by DumpSharedSpaces only, where only _vsm is used. So we will
   53.63    // maintain a single list for now.
   53.64    void record_allocation(void* ptr, MetaspaceObj::Type type, size_t word_size);
   53.65 @@ -199,6 +216,7 @@
   53.66    void dump(outputStream* const out) const;
   53.67  
   53.68    // Free empty virtualspaces
   53.69 +  static void purge(MetadataType mdtype);
   53.70    static void purge();
   53.71  
   53.72    void print_on(outputStream* st) const;
    54.1 --- a/src/share/vm/memory/referenceProcessor.cpp	Tue Oct 01 08:54:05 2013 -0400
    54.2 +++ b/src/share/vm/memory/referenceProcessor.cpp	Tue Oct 01 09:21:43 2013 -0400
    54.3 @@ -367,7 +367,7 @@
    54.4        next_d = java_lang_ref_Reference::discovered(obj);
    54.5        if (TraceReferenceGC && PrintGCDetails) {
    54.6          gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
    54.7 -                               obj, next_d);
    54.8 +                               (void *)obj, (void *)next_d);
    54.9        }
   54.10        assert(java_lang_ref_Reference::next(obj) == NULL,
   54.11               "Reference not active; should not be discovered");
   54.12 @@ -392,7 +392,7 @@
   54.13        next_d = java_lang_ref_Reference::discovered(obj);
   54.14        if (TraceReferenceGC && PrintGCDetails) {
   54.15          gclog_or_tty->print_cr("        obj " INTPTR_FORMAT "/next_d " INTPTR_FORMAT,
   54.16 -                               obj, next_d);
   54.17 +                               (void *)obj, (void *)next_d);
   54.18        }
   54.19        assert(java_lang_ref_Reference::next(obj) == NULL,
   54.20               "The reference should not be enqueued");
   54.21 @@ -562,7 +562,7 @@
   54.22          !policy->should_clear_reference(iter.obj(), _soft_ref_timestamp_clock)) {
   54.23        if (TraceReferenceGC) {
   54.24          gclog_or_tty->print_cr("Dropping reference (" INTPTR_FORMAT ": %s"  ") by policy",
   54.25 -                               iter.obj(), iter.obj()->klass()->internal_name());
   54.26 +                               (void *)iter.obj(), iter.obj()->klass()->internal_name());
   54.27        }
   54.28        // Remove Reference object from list
   54.29        iter.remove();
   54.30 @@ -601,7 +601,7 @@
   54.31      if (iter.is_referent_alive()) {
   54.32        if (TraceReferenceGC) {
   54.33          gclog_or_tty->print_cr("Dropping strongly reachable reference (" INTPTR_FORMAT ": %s)",
   54.34 -                               iter.obj(), iter.obj()->klass()->internal_name());
   54.35 +                               (void *)iter.obj(), iter.obj()->klass()->internal_name());
   54.36        }
   54.37        // The referent is reachable after all.
   54.38        // Remove Reference object from list.
   54.39 @@ -687,7 +687,7 @@
   54.40      if (TraceReferenceGC) {
   54.41        gclog_or_tty->print_cr("Adding %sreference (" INTPTR_FORMAT ": %s) as pending",
   54.42                               clear_referent ? "cleared " : "",
   54.43 -                             iter.obj(), iter.obj()->klass()->internal_name());
   54.44 +                             (void *)iter.obj(), iter.obj()->klass()->internal_name());
   54.45      }
   54.46      assert(iter.obj()->is_oop(UseConcMarkSweepGC), "Adding a bad reference");
   54.47      iter.next();
   54.48 @@ -1003,7 +1003,7 @@
   54.49            gclog_or_tty->print_cr("clean_up_discovered_list: Dropping Reference: "
   54.50              INTPTR_FORMAT " with next field: " INTPTR_FORMAT
   54.51              " and referent: " INTPTR_FORMAT,
   54.52 -            iter.obj(), next, iter.referent());
   54.53 +            (void *)iter.obj(), (void *)next, (void *)iter.referent());
   54.54          }
   54.55        )
   54.56        // Remove Reference object from list
   54.57 @@ -1103,14 +1103,14 @@
   54.58  
   54.59      if (TraceReferenceGC) {
   54.60        gclog_or_tty->print_cr("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
   54.61 -                             obj, obj->klass()->internal_name());
   54.62 +                             (void *)obj, obj->klass()->internal_name());
   54.63      }
   54.64    } else {
   54.65      // If retest was non NULL, another thread beat us to it:
   54.66      // The reference has already been discovered...
   54.67      if (TraceReferenceGC) {
   54.68        gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
   54.69 -                             obj, obj->klass()->internal_name());
   54.70 +                             (void *)obj, obj->klass()->internal_name());
   54.71      }
   54.72    }
   54.73  }
   54.74 @@ -1125,7 +1125,7 @@
   54.75    assert(da ? referent->is_oop() : referent->is_oop_or_null(),
   54.76           err_msg("Bad referent " INTPTR_FORMAT " found in Reference "
   54.77                   INTPTR_FORMAT " during %satomic discovery ",
   54.78 -                 (intptr_t)referent, (intptr_t)obj, da ? "" : "non-"));
   54.79 +                 (void *)referent, (void *)obj, da ? "" : "non-"));
   54.80  }
   54.81  #endif
   54.82  
   54.83 @@ -1205,7 +1205,7 @@
   54.84      // The reference has already been discovered...
   54.85      if (TraceReferenceGC) {
   54.86        gclog_or_tty->print_cr("Already discovered reference (" INTPTR_FORMAT ": %s)",
   54.87 -                             obj, obj->klass()->internal_name());
   54.88 +                             (void *)obj, obj->klass()->internal_name());
   54.89      }
   54.90      if (RefDiscoveryPolicy == ReferentBasedDiscovery) {
   54.91        // assumes that an object is not processed twice;
   54.92 @@ -1273,7 +1273,7 @@
   54.93  
   54.94      if (TraceReferenceGC) {
   54.95        gclog_or_tty->print_cr("Discovered reference (" INTPTR_FORMAT ": %s)",
   54.96 -                                obj, obj->klass()->internal_name());
   54.97 +                                (void *)obj, obj->klass()->internal_name());
   54.98      }
   54.99    }
  54.100    assert(obj->is_oop(), "Discovered a bad reference");
  54.101 @@ -1372,7 +1372,7 @@
  54.102        // active; we need to trace and mark its cohort.
  54.103        if (TraceReferenceGC) {
  54.104          gclog_or_tty->print_cr("Precleaning Reference (" INTPTR_FORMAT ": %s)",
  54.105 -                               iter.obj(), iter.obj()->klass()->internal_name());
  54.106 +                               (void *)iter.obj(), iter.obj()->klass()->internal_name());
  54.107        }
  54.108        // Remove Reference object from list
  54.109        iter.remove();
    55.1 --- a/src/share/vm/oops/constantPool.cpp	Tue Oct 01 08:54:05 2013 -0400
    55.2 +++ b/src/share/vm/oops/constantPool.cpp	Tue Oct 01 09:21:43 2013 -0400
    55.3 @@ -1918,7 +1918,7 @@
    55.4      st->print_cr(" - holder: " INTPTR_FORMAT, pool_holder());
    55.5    }
    55.6    st->print_cr(" - cache: " INTPTR_FORMAT, cache());
    55.7 -  st->print_cr(" - resolved_references: " INTPTR_FORMAT, resolved_references());
    55.8 +  st->print_cr(" - resolved_references: " INTPTR_FORMAT, (void *)resolved_references());
    55.9    st->print_cr(" - reference_map: " INTPTR_FORMAT, reference_map());
   55.10  
   55.11    for (int index = 1; index < length(); index++) {      // Index 0 is unused
    56.1 --- a/src/share/vm/oops/cpCache.cpp	Tue Oct 01 08:54:05 2013 -0400
    56.2 +++ b/src/share/vm/oops/cpCache.cpp	Tue Oct 01 09:21:43 2013 -0400
    56.3 @@ -306,8 +306,8 @@
    56.4    if (TraceInvokeDynamic) {
    56.5      tty->print_cr("set_method_handle bc=%d appendix="PTR_FORMAT"%s method_type="PTR_FORMAT"%s method="PTR_FORMAT" ",
    56.6                    invoke_code,
    56.7 -                  (intptr_t)appendix(),    (has_appendix    ? "" : " (unused)"),
    56.8 -                  (intptr_t)method_type(), (has_method_type ? "" : " (unused)"),
    56.9 +                  (void *)appendix(),    (has_appendix    ? "" : " (unused)"),
   56.10 +                  (void *)method_type(), (has_method_type ? "" : " (unused)"),
   56.11                    (intptr_t)adapter());
   56.12      adapter->print();
   56.13      if (has_appendix)  appendix()->print();
    57.1 --- a/src/share/vm/oops/instanceKlass.cpp	Tue Oct 01 08:54:05 2013 -0400
    57.2 +++ b/src/share/vm/oops/instanceKlass.cpp	Tue Oct 01 09:21:43 2013 -0400
    57.3 @@ -106,7 +106,7 @@
    57.4        len = name->utf8_length();                                 \
    57.5      }                                                            \
    57.6      HS_DTRACE_PROBE4(hotspot, class__initialization__##type,     \
    57.7 -      data, len, (clss)->class_loader(), thread_type);           \
    57.8 +      data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type);           \
    57.9    }
   57.10  
   57.11  #define DTRACE_CLASSINIT_PROBE_WAIT(type, clss, thread_type, wait) \
   57.12 @@ -119,7 +119,7 @@
   57.13        len = name->utf8_length();                                 \
   57.14      }                                                            \
   57.15      HS_DTRACE_PROBE5(hotspot, class__initialization__##type,     \
   57.16 -      data, len, (clss)->class_loader(), thread_type, wait);     \
   57.17 +      data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), thread_type, wait);     \
   57.18    }
   57.19  #else /* USDT2 */
   57.20  
   57.21 @@ -2303,7 +2303,7 @@
   57.22  }
   57.23  
   57.24  address InstanceKlass::static_field_addr(int offset) {
   57.25 -  return (address)(offset + InstanceMirrorKlass::offset_of_static_fields() + (intptr_t)java_mirror());
   57.26 +  return (address)(offset + InstanceMirrorKlass::offset_of_static_fields() + cast_from_oop<intptr_t>(java_mirror()));
   57.27  }
   57.28  
   57.29  
    58.1 --- a/src/share/vm/oops/instanceMirrorKlass.hpp	Tue Oct 01 08:54:05 2013 -0400
    58.2 +++ b/src/share/vm/oops/instanceMirrorKlass.hpp	Tue Oct 01 09:21:43 2013 -0400
    58.3 @@ -1,5 +1,5 @@
    58.4  /*
    58.5 - * Copyright (c) 2011, 2012, Oracle and/or its affiliates. All rights reserved.
    58.6 + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved.
    58.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    58.8   *
    58.9   * This code is free software; you can redistribute it and/or modify it
   58.10 @@ -66,7 +66,7 @@
   58.11    // Static field offset is an offset into the Heap, should be converted by
   58.12    // based on UseCompressedOop for traversal
   58.13    static HeapWord* start_of_static_fields(oop obj) {
   58.14 -    return (HeapWord*)((intptr_t)obj + offset_of_static_fields());
   58.15 +    return (HeapWord*)(cast_from_oop<intptr_t>(obj) + offset_of_static_fields());
   58.16    }
   58.17  
   58.18    static void init_offset_of_static_fields() {
    59.1 --- a/src/share/vm/oops/instanceRefKlass.cpp	Tue Oct 01 08:54:05 2013 -0400
    59.2 +++ b/src/share/vm/oops/instanceRefKlass.cpp	Tue Oct 01 09:21:43 2013 -0400
    59.3 @@ -1,5 +1,5 @@
    59.4  /*
    59.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    59.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    59.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    59.8   *
    59.9   * This code is free software; you can redistribute it and/or modify it
   59.10 @@ -51,7 +51,7 @@
   59.11    T heap_oop = oopDesc::load_heap_oop(referent_addr);
   59.12    debug_only(
   59.13      if(TraceReferenceGC && PrintGCDetails) {
   59.14 -      gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
   59.15 +      gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (void *)obj);
   59.16      }
   59.17    )
   59.18    if (!oopDesc::is_null(heap_oop)) {
   59.19 @@ -62,7 +62,7 @@
   59.20        ref->InstanceKlass::oop_follow_contents(obj);
   59.21        debug_only(
   59.22          if(TraceReferenceGC && PrintGCDetails) {
   59.23 -          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, obj);
   59.24 +          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, (void *)obj);
   59.25          }
   59.26        )
   59.27        return;
   59.28 @@ -70,7 +70,7 @@
   59.29        // treat referent as normal oop
   59.30        debug_only(
   59.31          if(TraceReferenceGC && PrintGCDetails) {
   59.32 -          gclog_or_tty->print_cr("       Non NULL normal " INTPTR_FORMAT, obj);
   59.33 +          gclog_or_tty->print_cr("       Non NULL normal " INTPTR_FORMAT, (void *)obj);
   59.34          }
   59.35        )
   59.36        MarkSweep::mark_and_push(referent_addr);
   59.37 @@ -130,7 +130,7 @@
   59.38    T heap_oop = oopDesc::load_heap_oop(referent_addr);
   59.39    debug_only(
   59.40      if(TraceReferenceGC && PrintGCDetails) {
   59.41 -      gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, obj);
   59.42 +      gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (void *)obj);
   59.43      }
   59.44    )
   59.45    if (!oopDesc::is_null(heap_oop)) {
   59.46 @@ -142,7 +142,7 @@
   59.47        ref->InstanceKlass::oop_follow_contents(cm, obj);
   59.48        debug_only(
   59.49          if(TraceReferenceGC && PrintGCDetails) {
   59.50 -          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, obj);
   59.51 +          gclog_or_tty->print_cr("       Non NULL enqueued " INTPTR_FORMAT, (void *)obj);
   59.52          }
   59.53        )
   59.54        return;
   59.55 @@ -150,7 +150,7 @@
   59.56        // treat referent as normal oop
   59.57        debug_only(
   59.58          if(TraceReferenceGC && PrintGCDetails) {
   59.59 -          gclog_or_tty->print_cr("       Non NULL normal " INTPTR_FORMAT, obj);
   59.60 +          gclog_or_tty->print_cr("       Non NULL normal " INTPTR_FORMAT, (void *)obj);
   59.61          }
   59.62        )
   59.63        PSParallelCompact::mark_and_push(cm, referent_addr);
    60.1 --- a/src/share/vm/oops/methodData.hpp	Tue Oct 01 08:54:05 2013 -0400
    60.2 +++ b/src/share/vm/oops/methodData.hpp	Tue Oct 01 09:21:43 2013 -0400
    60.3 @@ -333,10 +333,10 @@
    60.4      return (int)data()->cell_at(index);
    60.5    }
    60.6    void set_oop_at(int index, oop value) {
    60.7 -    set_intptr_at(index, (intptr_t) value);
    60.8 +    set_intptr_at(index, cast_from_oop<intptr_t>(value));
    60.9    }
   60.10    oop oop_at(int index) {
   60.11 -    return (oop)intptr_at(index);
   60.12 +    return cast_to_oop(intptr_at(index));
   60.13    }
   60.14  
   60.15    void set_flag_at(int flag_number) {
    61.1 --- a/src/share/vm/oops/oop.inline.hpp	Tue Oct 01 08:54:05 2013 -0400
    61.2 +++ b/src/share/vm/oops/oop.inline.hpp	Tue Oct 01 09:21:43 2013 -0400
    61.3 @@ -183,7 +183,7 @@
    61.4  // in inner GC loops so these are separated.
    61.5  
    61.6  inline bool check_obj_alignment(oop obj) {
    61.7 -  return (intptr_t)obj % MinObjAlignmentInBytes == 0;
    61.8 +  return cast_from_oop<intptr_t>(obj) % MinObjAlignmentInBytes == 0;
    61.9  }
   61.10  
   61.11  inline narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
    62.1 --- a/src/share/vm/oops/oopsHierarchy.hpp	Tue Oct 01 08:54:05 2013 -0400
    62.2 +++ b/src/share/vm/oops/oopsHierarchy.hpp	Tue Oct 01 09:21:43 2013 -0400
    62.3 @@ -55,11 +55,16 @@
    62.4  // to and from the underlying oopDesc pointer type.
    62.5  //
    62.6  // Because oop and its subclasses <type>Oop are class types, arbitrary
    62.7 -// conversions are not accepted by the compiler, and you may get a message
    62.8 -// about overloading ambiguity (between long and int is common when converting
    62.9 -// from a constant in 64 bit mode), or unable to convert from type to 'oop'.
   62.10 -// Applying a cast to one of these conversion operators first will get to the
   62.11 -// underlying oopDesc* type if appropriate.
   62.12 +// conversions are not accepted by the compiler.  Applying a cast to
   62.13 +// an oop will cause the best matched conversion operator to be
   62.14 +// invoked returning the underlying oopDesc* type if appropriate.
   62.15 +// No copy constructors, explicit user conversions or operators of
   62.16 +// numerical type should be defined within the oop class. Most C++
   62.17 +// compilers will issue a compile time error concerning the overloading
   62.18 +// ambiguity between operators of numerical and pointer types. If
   62.19 +// a conversion to or from an oop to a numerical type is needed,
   62.20 +// use the inline template methods, cast_*_oop, defined below.
   62.21 +//
   62.22  // Converting NULL to oop to Handle implicit is no longer accepted by the
   62.23  // compiler because there are too many steps in the conversion.  Use Handle()
   62.24  // instead, which generates less code anyway.
   62.25 @@ -83,12 +88,9 @@
   62.26    void raw_set_obj(const void* p)     { _o = (oopDesc*)p; }
   62.27  
   62.28    oop()                               { set_obj(NULL); }
   62.29 +  oop(const oop& o)                   { set_obj(o.obj()); }
   62.30    oop(const volatile oop& o)          { set_obj(o.obj()); }
   62.31    oop(const void* p)                  { set_obj(p); }
   62.32 -  oop(intptr_t i)                     { set_obj((void *)i); }
   62.33 -#ifdef _LP64
   62.34 -  oop(int i)                          { set_obj((void *)i); }
   62.35 -#endif
   62.36    ~oop()                              {
   62.37      if (CheckUnhandledOops) unregister_oop();
   62.38    }
   62.39 @@ -101,8 +103,6 @@
   62.40    bool operator==(void *p) const      { return obj() == p; }
   62.41    bool operator!=(const volatile oop o) const  { return obj() != o.obj(); }
   62.42    bool operator!=(void *p) const      { return obj() != p; }
   62.43 -  bool operator==(intptr_t p) const   { return obj() == (oopDesc*)p; }
   62.44 -  bool operator!=(intptr_t p) const   { return obj() != (oopDesc*)p; }
   62.45  
   62.46    bool operator<(oop o) const         { return obj() < o.obj(); }
   62.47    bool operator>(oop o) const         { return obj() > o.obj(); }
   62.48 @@ -110,8 +110,18 @@
   62.49    bool operator>=(oop o) const        { return obj() >= o.obj(); }
   62.50    bool operator!() const              { return !obj(); }
   62.51  
   62.52 -  // Cast
   62.53 +  // Assignment
   62.54 +  oop& operator=(const oop& o)                            { _o = o.obj(); return *this; }
   62.55 +#ifndef SOLARIS
   62.56 +  volatile oop& operator=(const oop& o) volatile          { _o = o.obj(); return *this; }
   62.57 +#endif
   62.58 +  volatile oop& operator=(const volatile oop& o) volatile { _o = o.obj(); return *this; }
   62.59 +
   62.60 +  // Explict user conversions
   62.61    operator void* () const             { return (void *)obj(); }
   62.62 +#ifndef SOLARIS
   62.63 +  operator void* () const volatile    { return (void *)obj(); }
   62.64 +#endif
   62.65    operator HeapWord* () const         { return (HeapWord*)obj(); }
   62.66    operator oopDesc* () const          { return obj(); }
   62.67    operator intptr_t* () const         { return (intptr_t*)obj(); }
   62.68 @@ -119,7 +129,6 @@
   62.69    operator markOop () const           { return markOop(obj()); }
   62.70  
   62.71    operator address   () const         { return (address)obj(); }
   62.72 -  operator intptr_t () const volatile { return (intptr_t)obj(); }
   62.73  
   62.74    // from javaCalls.cpp
   62.75    operator jobject () const           { return (jobject)obj(); }
   62.76 @@ -141,12 +150,26 @@
   62.77     class type##Oop : public oop {                                          \
   62.78       public:                                                               \
   62.79         type##Oop() : oop() {}                                              \
   62.80 +       type##Oop(const oop& o) : oop(o) {}                                 \
   62.81         type##Oop(const volatile oop& o) : oop(o) {}                        \
   62.82         type##Oop(const void* p) : oop(p) {}                                \
   62.83         operator type##OopDesc* () const { return (type##OopDesc*)obj(); }  \
   62.84         type##OopDesc* operator->() const {                                 \
   62.85              return (type##OopDesc*)obj();                                  \
   62.86         }                                                                   \
   62.87 +       type##Oop& operator=(const type##Oop& o) {                          \
   62.88 +            oop::operator=(o);                                             \
   62.89 +            return *this;                                                  \
   62.90 +       }                                                                   \
   62.91 +       NOT_SOLARIS(                                                        \
   62.92 +       volatile type##Oop& operator=(const type##Oop& o) volatile {        \
   62.93 +            (void)const_cast<oop&>(oop::operator=(o));                     \
   62.94 +            return *this;                                                  \
   62.95 +       })                                                                  \
   62.96 +       volatile type##Oop& operator=(const volatile type##Oop& o) volatile {\
   62.97 +            (void)const_cast<oop&>(oop::operator=(o));                     \
   62.98 +            return *this;                                                  \
   62.99 +       }                                                                   \
  62.100     };
  62.101  
  62.102  DEF_OOP(instance);
  62.103 @@ -156,6 +179,16 @@
  62.104  
  62.105  #endif // CHECK_UNHANDLED_OOPS
  62.106  
  62.107 +// For CHECK_UNHANDLED_OOPS, it is ambiguous C++ behavior to have the oop
  62.108 +// structure contain explicit user defined conversions of both numerical
  62.109 +// and pointer type. Define inline methods to provide the numerical conversions.
  62.110 +template <class T> inline oop cast_to_oop(T value) {
  62.111 +  return (oop)(CHECK_UNHANDLED_OOPS_ONLY((void *))(value));
  62.112 +}
  62.113 +template <class T> inline T cast_from_oop(oop o) {
  62.114 +  return (T)(CHECK_UNHANDLED_OOPS_ONLY((void*))o);
  62.115 +}
  62.116 +
  62.117  // The metadata hierarchy is separate from the oop hierarchy
  62.118  
  62.119  //      class MetaspaceObj
    63.1 --- a/src/share/vm/opto/bytecodeInfo.cpp	Tue Oct 01 08:54:05 2013 -0400
    63.2 +++ b/src/share/vm/opto/bytecodeInfo.cpp	Tue Oct 01 09:21:43 2013 -0400
    63.3 @@ -123,7 +123,7 @@
    63.4    // Allows targeted inlining
    63.5    if(callee_method->should_inline()) {
    63.6      *wci_result = *(WarmCallInfo::always_hot());
    63.7 -    if (PrintInlining && Verbose) {
    63.8 +    if (C->print_inlining() && Verbose) {
    63.9        CompileTask::print_inline_indent(inline_level());
   63.10        tty->print_cr("Inlined method is hot: ");
   63.11      }
   63.12 @@ -137,7 +137,7 @@
   63.13    if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
   63.14       size < InlineThrowMaxSize ) {
   63.15      wci_result->set_profit(wci_result->profit() * 100);
   63.16 -    if (PrintInlining && Verbose) {
   63.17 +    if (C->print_inlining() && Verbose) {
   63.18        CompileTask::print_inline_indent(inline_level());
   63.19        tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
   63.20      }
   63.21 @@ -491,7 +491,7 @@
   63.22        C->log()->inline_fail(inline_msg);
   63.23      }
   63.24    }
   63.25 -  if (PrintInlining) {
   63.26 +  if (C->print_inlining()) {
   63.27      C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg);
   63.28      if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
   63.29      if (Verbose && callee_method) {
   63.30 @@ -540,7 +540,7 @@
   63.31  
   63.32  #ifndef PRODUCT
   63.33    if (UseOldInlining && InlineWarmCalls
   63.34 -      && (PrintOpto || PrintOptoInlining || PrintInlining)) {
   63.35 +      && (PrintOpto || C->print_inlining())) {
   63.36      bool cold = wci.is_cold();
   63.37      bool hot  = !cold && wci.is_hot();
   63.38      bool old_cold = !success;
   63.39 @@ -617,7 +617,7 @@
   63.40               callee_method->is_compiled_lambda_form()) {
   63.41        max_inline_level_adjust += 1;  // don't count method handle calls from java.lang.invoke implem
   63.42      }
   63.43 -    if (max_inline_level_adjust != 0 && PrintInlining && (Verbose || WizardMode)) {
   63.44 +    if (max_inline_level_adjust != 0 && C->print_inlining() && (Verbose || WizardMode)) {
   63.45        CompileTask::print_inline_indent(inline_level());
   63.46        tty->print_cr(" \\-> discounting inline depth");
   63.47      }
    64.1 --- a/src/share/vm/opto/callGenerator.hpp	Tue Oct 01 08:54:05 2013 -0400
    64.2 +++ b/src/share/vm/opto/callGenerator.hpp	Tue Oct 01 09:21:43 2013 -0400
    64.3 @@ -159,8 +159,9 @@
    64.4    virtual void print_inlining_late(const char* msg) { ShouldNotReachHere(); }
    64.5  
    64.6    static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
    64.7 -    if (PrintInlining)
    64.8 +    if (C->print_inlining()) {
    64.9        C->print_inlining(callee, inline_level, bci, msg);
   64.10 +    }
   64.11    }
   64.12  };
   64.13  
    65.1 --- a/src/share/vm/opto/compile.cpp	Tue Oct 01 08:54:05 2013 -0400
    65.2 +++ b/src/share/vm/opto/compile.cpp	Tue Oct 01 09:21:43 2013 -0400
    65.3 @@ -654,7 +654,7 @@
    65.4                    _inlining_progress(false),
    65.5                    _inlining_incrementally(false),
    65.6                    _print_inlining_list(NULL),
    65.7 -                  _print_inlining(0) {
    65.8 +                  _print_inlining_idx(0) {
    65.9    C = this;
   65.10  
   65.11    CompileWrapper cw(this);
   65.12 @@ -679,6 +679,8 @@
   65.13    set_print_assembly(print_opto_assembly);
   65.14    set_parsed_irreducible_loop(false);
   65.15  #endif
   65.16 +  set_print_inlining(PrintInlining || method()->has_option("PrintInlining") NOT_PRODUCT( || PrintOptoInlining));
   65.17 +  set_print_intrinsics(PrintIntrinsics || method()->has_option("PrintIntrinsics"));
   65.18  
   65.19    if (ProfileTraps) {
   65.20      // Make sure the method being compiled gets its own MDO,
   65.21 @@ -710,7 +712,7 @@
   65.22    PhaseGVN gvn(node_arena(), estimated_size);
   65.23    set_initial_gvn(&gvn);
   65.24  
   65.25 -  if (PrintInlining  || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
   65.26 +  if (print_inlining() || print_intrinsics()) {
   65.27      _print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
   65.28    }
   65.29    { // Scope for timing the parser
   65.30 @@ -937,7 +939,7 @@
   65.31      _inlining_progress(false),
   65.32      _inlining_incrementally(false),
   65.33      _print_inlining_list(NULL),
   65.34 -    _print_inlining(0) {
   65.35 +    _print_inlining_idx(0) {
   65.36    C = this;
   65.37  
   65.38  #ifndef PRODUCT
   65.39 @@ -3611,7 +3613,7 @@
   65.40  }
   65.41  
   65.42  void Compile::dump_inlining() {
   65.43 -  if (PrintInlining || PrintIntrinsics NOT_PRODUCT( || PrintOptoInlining)) {
   65.44 +  if (print_inlining() || print_intrinsics()) {
   65.45      // Print inlining message for candidates that we couldn't inline
   65.46      // for lack of space or non constant receiver
   65.47      for (int i = 0; i < _late_inlines.length(); i++) {
   65.48 @@ -3635,7 +3637,7 @@
   65.49        }
   65.50      }
   65.51      for (int i = 0; i < _print_inlining_list->length(); i++) {
   65.52 -      tty->print(_print_inlining_list->at(i).ss()->as_string());
   65.53 +      tty->print(_print_inlining_list->adr_at(i)->ss()->as_string());
   65.54      }
   65.55    }
   65.56  }
    66.1 --- a/src/share/vm/opto/compile.hpp	Tue Oct 01 08:54:05 2013 -0400
    66.2 +++ b/src/share/vm/opto/compile.hpp	Tue Oct 01 09:21:43 2013 -0400
    66.3 @@ -312,6 +312,8 @@
    66.4    bool                  _do_method_data_update; // True if we generate code to update MethodData*s
    66.5    int                   _AliasLevel;            // Locally-adjusted version of AliasLevel flag.
    66.6    bool                  _print_assembly;        // True if we should dump assembly code for this compilation
    66.7 +  bool                  _print_inlining;        // True if we should print inlining for this compilation
    66.8 +  bool                  _print_intrinsics;      // True if we should print intrinsics for this compilation
    66.9  #ifndef PRODUCT
   66.10    bool                  _trace_opto_output;
   66.11    bool                  _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
   66.12 @@ -414,7 +416,7 @@
   66.13    };
   66.14  
   66.15    GrowableArray<PrintInliningBuffer>* _print_inlining_list;
   66.16 -  int _print_inlining;
   66.17 +  int _print_inlining_idx;
   66.18  
   66.19    // Only keep nodes in the expensive node list that need to be optimized
   66.20    void cleanup_expensive_nodes(PhaseIterGVN &igvn);
   66.21 @@ -426,24 +428,24 @@
   66.22   public:
   66.23  
   66.24    outputStream* print_inlining_stream() const {
   66.25 -    return _print_inlining_list->at(_print_inlining).ss();
   66.26 +    return _print_inlining_list->adr_at(_print_inlining_idx)->ss();
   66.27    }
   66.28  
   66.29    void print_inlining_skip(CallGenerator* cg) {
   66.30 -    if (PrintInlining) {
   66.31 -      _print_inlining_list->at(_print_inlining).set_cg(cg);
   66.32 -      _print_inlining++;
   66.33 -      _print_inlining_list->insert_before(_print_inlining, PrintInliningBuffer());
   66.34 +    if (_print_inlining) {
   66.35 +      _print_inlining_list->adr_at(_print_inlining_idx)->set_cg(cg);
   66.36 +      _print_inlining_idx++;
   66.37 +      _print_inlining_list->insert_before(_print_inlining_idx, PrintInliningBuffer());
   66.38      }
   66.39    }
   66.40  
   66.41    void print_inlining_insert(CallGenerator* cg) {
   66.42 -    if (PrintInlining) {
   66.43 +    if (_print_inlining) {
   66.44        for (int i = 0; i < _print_inlining_list->length(); i++) {
   66.45 -        if (_print_inlining_list->at(i).cg() == cg) {
   66.46 +        if (_print_inlining_list->adr_at(i)->cg() == cg) {
   66.47            _print_inlining_list->insert_before(i+1, PrintInliningBuffer());
   66.48 -          _print_inlining = i+1;
   66.49 -          _print_inlining_list->at(i).set_cg(NULL);
   66.50 +          _print_inlining_idx = i+1;
   66.51 +          _print_inlining_list->adr_at(i)->set_cg(NULL);
   66.52            return;
   66.53          }
   66.54        }
   66.55 @@ -572,6 +574,10 @@
   66.56    int               AliasLevel() const          { return _AliasLevel; }
   66.57    bool              print_assembly() const       { return _print_assembly; }
   66.58    void          set_print_assembly(bool z)       { _print_assembly = z; }
   66.59 +  bool              print_inlining() const       { return _print_inlining; }
   66.60 +  void          set_print_inlining(bool z)       { _print_inlining = z; }
   66.61 +  bool              print_intrinsics() const     { return _print_intrinsics; }
   66.62 +  void          set_print_intrinsics(bool z)     { _print_intrinsics = z; }
   66.63    // check the CompilerOracle for special behaviours for this compile
   66.64    bool          method_has_option(const char * option) {
   66.65      return method() != NULL && method()->has_option(option);
    67.1 --- a/src/share/vm/opto/doCall.cpp	Tue Oct 01 08:54:05 2013 -0400
    67.2 +++ b/src/share/vm/opto/doCall.cpp	Tue Oct 01 09:21:43 2013 -0400
    67.3 @@ -41,9 +41,9 @@
    67.4  #include "runtime/sharedRuntime.hpp"
    67.5  
    67.6  void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
    67.7 -  if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) {
    67.8 +  if (TraceTypeProfile || C->print_inlining()) {
    67.9      outputStream* out = tty;
   67.10 -    if (!PrintInlining) {
   67.11 +    if (!C->print_inlining()) {
   67.12        if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
   67.13          method->print_short_name();
   67.14          tty->cr();
    68.1 --- a/src/share/vm/opto/library_call.cpp	Tue Oct 01 08:54:05 2013 -0400
    68.2 +++ b/src/share/vm/opto/library_call.cpp	Tue Oct 01 09:21:43 2013 -0400
    68.3 @@ -543,7 +543,7 @@
    68.4    Compile* C = kit.C;
    68.5    int nodes = C->unique();
    68.6  #ifndef PRODUCT
    68.7 -  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
    68.8 +  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
    68.9      char buf[1000];
   68.10      const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
   68.11      tty->print_cr("Intrinsic %s", str);
   68.12 @@ -554,7 +554,7 @@
   68.13  
   68.14    // Try to inline the intrinsic.
   68.15    if (kit.try_to_inline()) {
   68.16 -    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
   68.17 +    if (C->print_intrinsics() || C->print_inlining()) {
   68.18        C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
   68.19      }
   68.20      C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
   68.21 @@ -570,7 +570,7 @@
   68.22    }
   68.23  
   68.24    // The intrinsic bailed out
   68.25 -  if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
   68.26 +  if (C->print_intrinsics() || C->print_inlining()) {
   68.27      if (jvms->has_method()) {
   68.28        // Not a root compile.
   68.29        const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
   68.30 @@ -592,7 +592,7 @@
   68.31    int nodes = C->unique();
   68.32  #ifndef PRODUCT
   68.33    assert(is_predicted(), "sanity");
   68.34 -  if ((PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) && Verbose) {
   68.35 +  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
   68.36      char buf[1000];
   68.37      const char* str = vmIntrinsics::short_name_as_C_string(intrinsic_id(), buf, sizeof(buf));
   68.38      tty->print_cr("Predicate for intrinsic %s", str);
   68.39 @@ -603,7 +603,7 @@
   68.40  
   68.41    Node* slow_ctl = kit.try_to_predicate();
   68.42    if (!kit.failing()) {
   68.43 -    if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
   68.44 +    if (C->print_intrinsics() || C->print_inlining()) {
   68.45        C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
   68.46      }
   68.47      C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
   68.48 @@ -617,7 +617,7 @@
   68.49    }
   68.50  
   68.51    // The intrinsic bailed out
   68.52 -  if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
   68.53 +  if (C->print_intrinsics() || C->print_inlining()) {
   68.54      if (jvms->has_method()) {
   68.55        // Not a root compile.
   68.56        const char* msg = "failed to generate predicate for intrinsic";
   68.57 @@ -2299,7 +2299,7 @@
   68.58      const TypeOopPtr* tjp = TypeOopPtr::make_from_klass(sharpened_klass);
   68.59  
   68.60  #ifndef PRODUCT
   68.61 -    if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
   68.62 +    if (C->print_intrinsics() || C->print_inlining()) {
   68.63        tty->print("  from base type: ");  adr_type->dump();
   68.64        tty->print("  sharpened value: ");  tjp->dump();
   68.65      }
   68.66 @@ -3260,7 +3260,7 @@
   68.67    if (mirror_con == NULL)  return false;  // cannot happen?
   68.68  
   68.69  #ifndef PRODUCT
   68.70 -  if (PrintIntrinsics || PrintInlining || PrintOptoInlining) {
   68.71 +  if (C->print_intrinsics() || C->print_inlining()) {
   68.72      ciType* k = mirror_con->java_mirror_type();
   68.73      if (k) {
   68.74        tty->print("Inlining %s on constant Class ", vmIntrinsics::name_at(intrinsic_id()));
   68.75 @@ -3952,14 +3952,14 @@
   68.76  // caller sensitive methods.
   68.77  bool LibraryCallKit::inline_native_Reflection_getCallerClass() {
   68.78  #ifndef PRODUCT
   68.79 -  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
   68.80 +  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
   68.81      tty->print_cr("Attempting to inline sun.reflect.Reflection.getCallerClass");
   68.82    }
   68.83  #endif
   68.84  
   68.85    if (!jvms()->has_method()) {
   68.86  #ifndef PRODUCT
   68.87 -    if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
   68.88 +    if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
   68.89        tty->print_cr("  Bailing out because intrinsic was inlined at top level");
   68.90      }
   68.91  #endif
   68.92 @@ -3983,7 +3983,7 @@
   68.93        // Frame 0 and 1 must be caller sensitive (see JVM_GetCallerClass).
   68.94        if (!m->caller_sensitive()) {
   68.95  #ifndef PRODUCT
   68.96 -        if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
   68.97 +        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
   68.98            tty->print_cr("  Bailing out: CallerSensitive annotation expected at frame %d", n);
   68.99          }
  68.100  #endif
  68.101 @@ -3999,7 +3999,7 @@
  68.102          set_result(makecon(TypeInstPtr::make(caller_mirror)));
  68.103  
  68.104  #ifndef PRODUCT
  68.105 -        if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
  68.106 +        if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
  68.107            tty->print_cr("  Succeeded: caller = %d) %s.%s, JVMS depth = %d", n, caller_klass->name()->as_utf8(), caller_jvms->method()->name()->as_utf8(), jvms()->depth());
  68.108            tty->print_cr("  JVM state at this point:");
  68.109            for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
  68.110 @@ -4015,7 +4015,7 @@
  68.111    }
  68.112  
  68.113  #ifndef PRODUCT
  68.114 -  if ((PrintIntrinsics || PrintInlining || PrintOptoInlining) && Verbose) {
  68.115 +  if ((C->print_intrinsics() || C->print_inlining()) && Verbose) {
  68.116      tty->print_cr("  Bailing out because caller depth exceeded inlining depth = %d", jvms()->depth());
  68.117      tty->print_cr("  JVM state at this point:");
  68.118      for (int i = jvms()->depth(), n = 1; i >= 1; i--, n++) {
    69.1 --- a/src/share/vm/opto/machnode.cpp	Tue Oct 01 08:54:05 2013 -0400
    69.2 +++ b/src/share/vm/opto/machnode.cpp	Tue Oct 01 09:21:43 2013 -0400
    69.3 @@ -1,5 +1,5 @@
    69.4  /*
    69.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    69.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    69.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    69.8   *
    69.9   * This code is free software; you can redistribute it and/or modify it
   69.10 @@ -341,7 +341,7 @@
   69.11        return TypePtr::BOTTOM;
   69.12      }
   69.13      // %%% make offset be intptr_t
   69.14 -    assert(!Universe::heap()->is_in_reserved((oop)offset), "must be a raw ptr");
   69.15 +    assert(!Universe::heap()->is_in_reserved(cast_to_oop(offset)), "must be a raw ptr");
   69.16      return TypeRawPtr::BOTTOM;
   69.17    }
   69.18  
    70.1 --- a/src/share/vm/prims/jni.cpp	Tue Oct 01 08:54:05 2013 -0400
    70.2 +++ b/src/share/vm/prims/jni.cpp	Tue Oct 01 09:21:43 2013 -0400
    70.3 @@ -5046,7 +5046,10 @@
    70.4  void TestReservedSpace_test();
    70.5  void TestReserveMemorySpecial_test();
    70.6  void TestVirtualSpace_test();
    70.7 -void MetaspaceAux_test();
    70.8 +void TestMetaspaceAux_test();
    70.9 +#if INCLUDE_ALL_GCS
   70.10 +void TestG1BiasedArray_test();
   70.11 +#endif
   70.12  
   70.13  void execute_internal_vm_tests() {
   70.14    if (ExecuteInternalVMTests) {
   70.15 @@ -5054,7 +5057,7 @@
   70.16      run_unit_test(TestReservedSpace_test());
   70.17      run_unit_test(TestReserveMemorySpecial_test());
   70.18      run_unit_test(TestVirtualSpace_test());
   70.19 -    run_unit_test(MetaspaceAux_test());
   70.20 +    run_unit_test(TestMetaspaceAux_test());
   70.21      run_unit_test(GlobalDefinitions::test_globals());
   70.22      run_unit_test(GCTimerAllTest::all());
   70.23      run_unit_test(arrayOopDesc::test_max_array_length());
   70.24 @@ -5066,6 +5069,7 @@
   70.25      run_unit_test(VMStructs::test());
   70.26  #endif
   70.27  #if INCLUDE_ALL_GCS
   70.28 +    run_unit_test(TestG1BiasedArray_test());
   70.29      run_unit_test(HeapRegionRemSet::test_prt());
   70.30  #endif
   70.31      tty->print_cr("All internal VM tests passed");
    71.1 --- a/src/share/vm/prims/jvmtiTagMap.cpp	Tue Oct 01 08:54:05 2013 -0400
    71.2 +++ b/src/share/vm/prims/jvmtiTagMap.cpp	Tue Oct 01 09:21:43 2013 -0400
    71.3 @@ -165,7 +165,7 @@
    71.4    static unsigned int hash(oop key, int size) {
    71.5      // shift right to get better distribution (as these bits will be zero
    71.6      // with aligned addresses)
    71.7 -    unsigned int addr = (unsigned int)((intptr_t)key);
    71.8 +    unsigned int addr = (unsigned int)(cast_from_oop<intptr_t>(key));
    71.9  #ifdef _LP64
   71.10      return (addr >> 3) % size;
   71.11  #else
    72.1 --- a/src/share/vm/prims/unsafe.cpp	Tue Oct 01 08:54:05 2013 -0400
    72.2 +++ b/src/share/vm/prims/unsafe.cpp	Tue Oct 01 09:21:43 2013 -0400
    72.3 @@ -292,9 +292,9 @@
    72.4    volatile oop v;
    72.5    if (UseCompressedOops) {
    72.6      volatile narrowOop n = *(volatile narrowOop*) addr;
    72.7 -    v = oopDesc::decode_heap_oop(n);
    72.8 +    (void)const_cast<oop&>(v = oopDesc::decode_heap_oop(n));
    72.9    } else {
   72.10 -    v = *(volatile oop*) addr;
   72.11 +    (void)const_cast<oop&>(v = *(volatile oop*) addr);
   72.12    }
   72.13    OrderAccess::acquire();
   72.14    return JNIHandles::make_local(env, v);
   72.15 @@ -1222,9 +1222,9 @@
   72.16  #endif /* USDT2 */
   72.17    if (event.should_commit()) {
   72.18      oop obj = thread->current_park_blocker();
   72.19 -    event.set_klass(obj ? obj->klass() : NULL);
   72.20 +    event.set_klass((obj != NULL) ? obj->klass() : NULL);
   72.21      event.set_timeout(time);
   72.22 -    event.set_address(obj ? (TYPE_ADDRESS) (uintptr_t) obj : 0);
   72.23 +    event.set_address((obj != NULL) ? (TYPE_ADDRESS) cast_from_oop<uintptr_t>(obj) : 0);
   72.24      event.commit();
   72.25    }
   72.26  UNSAFE_END
    73.1 --- a/src/share/vm/runtime/biasedLocking.cpp	Tue Oct 01 08:54:05 2013 -0400
    73.2 +++ b/src/share/vm/runtime/biasedLocking.cpp	Tue Oct 01 09:21:43 2013 -0400
    73.3 @@ -1,5 +1,5 @@
    73.4  /*
    73.5 - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved.
    73.6 + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
    73.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    73.8   *
    73.9   * This code is free software; you can redistribute it and/or modify it
   73.10 @@ -161,7 +161,7 @@
   73.11    if (TraceBiasedLocking && (Verbose || !is_bulk)) {
   73.12      ResourceMark rm;
   73.13      tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
   73.14 -                  (intptr_t) obj, (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
   73.15 +                  (void *)obj, (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
   73.16    }
   73.17  
   73.18    JavaThread* biased_thread = mark->biased_locker();
   73.19 @@ -214,8 +214,8 @@
   73.20      if (mon_info->owner() == obj) {
   73.21        if (TraceBiasedLocking && Verbose) {
   73.22          tty->print_cr("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
   73.23 -                      (intptr_t) mon_info->owner(),
   73.24 -                      (intptr_t) obj);
   73.25 +                      (void *) mon_info->owner(),
   73.26 +                      (void *) obj);
   73.27        }
   73.28        // Assume recursive case and fix up highest lock later
   73.29        markOop mark = markOopDesc::encode((BasicLock*) NULL);
   73.30 @@ -224,8 +224,8 @@
   73.31      } else {
   73.32        if (TraceBiasedLocking && Verbose) {
   73.33          tty->print_cr("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
   73.34 -                      (intptr_t) mon_info->owner(),
   73.35 -                      (intptr_t) obj);
   73.36 +                      (void *) mon_info->owner(),
   73.37 +                      (void *) obj);
   73.38        }
   73.39      }
   73.40    }
   73.41 @@ -326,7 +326,7 @@
   73.42      tty->print_cr("* Beginning bulk revocation (kind == %s) because of object "
   73.43                    INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
   73.44                    (bulk_rebias ? "rebias" : "revoke"),
   73.45 -                  (intptr_t) o, (intptr_t) o->mark(), o->klass()->external_name());
   73.46 +                  (void *) o, (intptr_t) o->mark(), o->klass()->external_name());
   73.47    }
   73.48  
   73.49    jlong cur_time = os::javaTimeMillis();
    74.1 --- a/src/share/vm/runtime/deoptimization.cpp	Tue Oct 01 08:54:05 2013 -0400
    74.2 +++ b/src/share/vm/runtime/deoptimization.cpp	Tue Oct 01 09:21:43 2013 -0400
    74.3 @@ -1,5 +1,5 @@
    74.4  /*
    74.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    74.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    74.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    74.8   *
    74.9   * This code is free software; you can redistribute it and/or modify it
   74.10 @@ -234,7 +234,7 @@
   74.11          assert(Universe::heap()->is_in_or_null(result), "must be heap pointer");
   74.12          if (TraceDeoptimization) {
   74.13            ttyLocker ttyl;
   74.14 -          tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, result, thread);
   74.15 +          tty->print_cr("SAVED OOP RESULT " INTPTR_FORMAT " in thread " INTPTR_FORMAT, (void *)result, thread);
   74.16          }
   74.17        }
   74.18        bool reallocated = false;
   74.19 @@ -278,7 +278,7 @@
   74.20                    first = false;
   74.21                    tty->print_cr("RELOCK OBJECTS in thread " INTPTR_FORMAT, thread);
   74.22                  }
   74.23 -                tty->print_cr("     object <" INTPTR_FORMAT "> locked", mi->owner());
   74.24 +                tty->print_cr("     object <" INTPTR_FORMAT "> locked", (void *)mi->owner());
   74.25                }
   74.26              }
   74.27            }
   74.28 @@ -977,7 +977,7 @@
   74.29      KlassHandle k(java_lang_Class::as_Klass(sv->klass()->as_ConstantOopReadValue()->value()()));
   74.30      Handle obj = sv->value();
   74.31  
   74.32 -    tty->print("     object <" INTPTR_FORMAT "> of type ", sv->value()());
   74.33 +    tty->print("     object <" INTPTR_FORMAT "> of type ", (void *)sv->value()());
   74.34      k->print_value();
   74.35      tty->print(" allocated (%d bytes)", obj->size() * HeapWordSize);
   74.36      tty->cr();
    75.1 --- a/src/share/vm/runtime/frame.cpp	Tue Oct 01 08:54:05 2013 -0400
    75.2 +++ b/src/share/vm/runtime/frame.cpp	Tue Oct 01 09:21:43 2013 -0400
    75.3 @@ -1,5 +1,5 @@
    75.4  /*
    75.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    75.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    75.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    75.8   *
    75.9   * This code is free software; you can redistribute it and/or modify it
   75.10 @@ -1097,7 +1097,7 @@
   75.11      return NULL;
   75.12    }
   75.13    oop r = *oop_adr;
   75.14 -  assert(Universe::heap()->is_in_or_null(r), err_msg("bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", (intptr_t) r, (intptr_t) r));
   75.15 +  assert(Universe::heap()->is_in_or_null(r), err_msg("bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", (void *) r, (void *) r));
   75.16    return r;
   75.17  }
   75.18  
   75.19 @@ -1228,9 +1228,7 @@
   75.20  
   75.21  void frame::ZapDeadClosure::do_oop(oop* p) {
   75.22    if (TraceZapDeadLocals) tty->print_cr("zapping @ " INTPTR_FORMAT " containing " INTPTR_FORMAT, p, (address)*p);
   75.23 -  // Need cast because on _LP64 the conversion to oop is ambiguous.  Constant
   75.24 -  // can be either long or int.
   75.25 -  *p = (oop)(int)0xbabebabe;
   75.26 +  *p = cast_to_oop<intptr_t>(0xbabebabe);
   75.27  }
   75.28  frame::ZapDeadClosure frame::_zap_dead;
   75.29  
    76.1 --- a/src/share/vm/runtime/javaCalls.cpp	Tue Oct 01 08:54:05 2013 -0400
    76.2 +++ b/src/share/vm/runtime/javaCalls.cpp	Tue Oct 01 09:21:43 2013 -0400
    76.3 @@ -1,5 +1,5 @@
    76.4  /*
    76.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    76.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    76.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    76.8   *
    76.9   * This code is free software; you can redistribute it and/or modify it
   76.10 @@ -430,7 +430,7 @@
   76.11    for(int i = 0; i < _size; i++) {
   76.12      if (_is_oop[i]) {
   76.13        // Handle conversion
   76.14 -      _value[i] = (intptr_t)Handle::raw_resolve((oop *)_value[i]);
   76.15 +      _value[i] = cast_from_oop<intptr_t>(Handle::raw_resolve((oop *)_value[i]));
   76.16      }
   76.17    }
   76.18    // Return argument vector
    77.1 --- a/src/share/vm/runtime/safepoint.cpp	Tue Oct 01 08:54:05 2013 -0400
    77.2 +++ b/src/share/vm/runtime/safepoint.cpp	Tue Oct 01 09:21:43 2013 -0400
    77.3 @@ -1,5 +1,5 @@
    77.4  /*
    77.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    77.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    77.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    77.8   *
    77.9   * This code is free software; you can redistribute it and/or modify it
   77.10 @@ -745,14 +745,14 @@
   77.11  #endif
   77.12  
   77.13  static void print_ptrs(intptr_t oldptr, intptr_t newptr, bool wasoop) {
   77.14 -  bool is_oop = newptr ? ((oop)newptr)->is_oop() : false;
   77.15 +  bool is_oop = newptr ? (cast_to_oop(newptr))->is_oop() : false;
   77.16    tty->print_cr(PTR_FORMAT PTR_PAD " %s %c " PTR_FORMAT PTR_PAD " %s %s",
   77.17                  oldptr, wasoop?"oop":"   ", oldptr == newptr ? ' ' : '!',
   77.18                  newptr, is_oop?"oop":"   ", (wasoop && !is_oop) ? "STALE" : ((wasoop==false&&is_oop==false&&oldptr !=newptr)?"STOMP":"     "));
   77.19  }
   77.20  
   77.21  static void print_longs(jlong oldptr, jlong newptr, bool wasoop) {
   77.22 -  bool is_oop = newptr ? ((oop)(intptr_t)newptr)->is_oop() : false;
   77.23 +  bool is_oop = newptr ? (cast_to_oop(newptr))->is_oop() : false;
   77.24    tty->print_cr(PTR64_FORMAT " %s %c " PTR64_FORMAT " %s %s",
   77.25                  oldptr, wasoop?"oop":"   ", oldptr == newptr ? ' ' : '!',
   77.26                  newptr, is_oop?"oop":"   ", (wasoop && !is_oop) ? "STALE" : ((wasoop==false&&is_oop==false&&oldptr !=newptr)?"STOMP":"     "));
    78.1 --- a/src/share/vm/runtime/sharedRuntime.cpp	Tue Oct 01 08:54:05 2013 -0400
    78.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp	Tue Oct 01 09:21:43 2013 -0400
    78.3 @@ -1,5 +1,5 @@
    78.4  /*
    78.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    78.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    78.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    78.8   *
    78.9   * This code is free software; you can redistribute it and/or modify it
   78.10 @@ -577,7 +577,7 @@
   78.11    assert(caller.is_interpreted_frame(), "");
   78.12    int args_size = ArgumentSizeComputer(sig).size() + 1;
   78.13    assert(args_size <= caller.interpreter_frame_expression_stack_size(), "receiver must be on interpreter stack");
   78.14 -  oop result = (oop) *caller.interpreter_frame_tos_at(args_size - 1);
   78.15 +  oop result = cast_to_oop(*caller.interpreter_frame_tos_at(args_size - 1));
   78.16    assert(Universe::heap()->is_in(result) && result->is_oop(), "receiver must be an oop");
   78.17    return result;
   78.18  }
   78.19 @@ -1506,8 +1506,11 @@
   78.20                                                  info, CHECK_(methodHandle()));
   78.21          inline_cache->set_to_monomorphic(info);
   78.22        } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) {
   78.23 -        // Change to megamorphic
   78.24 -        inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
   78.25 +        // Potential change to megamorphic
   78.26 +        bool successful = inline_cache->set_to_megamorphic(&call_info, bc, CHECK_(methodHandle()));
   78.27 +        if (!successful) {
   78.28 +          inline_cache->set_to_clean();
   78.29 +        }
   78.30        } else {
   78.31          // Either clean or megamorphic
   78.32        }
   78.33 @@ -2872,7 +2875,7 @@
   78.34          ObjectSynchronizer::inflate_helper(kptr2->obj());
   78.35        // Now the displaced header is free to move
   78.36        buf[i++] = (intptr_t)lock->displaced_header();
   78.37 -      buf[i++] = (intptr_t)kptr2->obj();
   78.38 +      buf[i++] = cast_from_oop<intptr_t>(kptr2->obj());
   78.39      }
   78.40    }
   78.41    assert( i - max_locals == active_monitor_count*2, "found the expected number of monitors" );
    79.1 --- a/src/share/vm/runtime/synchronizer.cpp	Tue Oct 01 08:54:05 2013 -0400
    79.2 +++ b/src/share/vm/runtime/synchronizer.cpp	Tue Oct 01 09:21:43 2013 -0400
    79.3 @@ -154,7 +154,7 @@
    79.4  static volatile intptr_t ListLock = 0 ;      // protects global monitor free-list cache
    79.5  static volatile int MonitorFreeCount  = 0 ;      // # on gFreeList
    79.6  static volatile int MonitorPopulation = 0 ;      // # Extant -- in circulation
    79.7 -#define CHAINMARKER ((oop)-1)
    79.8 +#define CHAINMARKER (cast_to_oop<intptr_t>(-1))
    79.9  
   79.10  // -----------------------------------------------------------------------------
   79.11  //  Fast Monitor Enter/Exit
   79.12 @@ -510,7 +510,7 @@
   79.13           // then for each thread on the list, set the flag and unpark() the thread.
   79.14           // This is conceptually similar to muxAcquire-muxRelease, except that muxRelease
   79.15           // wakes at most one thread whereas we need to wake the entire list.
   79.16 -         int ix = (intptr_t(obj) >> 5) & (NINFLATIONLOCKS-1) ;
   79.17 +         int ix = (cast_from_oop<intptr_t>(obj) >> 5) & (NINFLATIONLOCKS-1) ;
   79.18           int YieldThenBlock = 0 ;
   79.19           assert (ix >= 0 && ix < NINFLATIONLOCKS, "invariant") ;
   79.20           assert ((NINFLATIONLOCKS & (NINFLATIONLOCKS-1)) == 0, "invariant") ;
   79.21 @@ -565,7 +565,7 @@
   79.22       // This variation has the property of being stable (idempotent)
   79.23       // between STW operations.  This can be useful in some of the 1-0
   79.24       // synchronization schemes.
   79.25 -     intptr_t addrBits = intptr_t(obj) >> 3 ;
   79.26 +     intptr_t addrBits = cast_from_oop<intptr_t>(obj) >> 3 ;
   79.27       value = addrBits ^ (addrBits >> 5) ^ GVars.stwRandom ;
   79.28    } else
   79.29    if (hashCode == 2) {
   79.30 @@ -575,7 +575,7 @@
   79.31       value = ++GVars.hcSequence ;
   79.32    } else
   79.33    if (hashCode == 4) {
   79.34 -     value = intptr_t(obj) ;
   79.35 +     value = cast_from_oop<intptr_t>(obj) ;
   79.36    } else {
   79.37       // Marsaglia's xor-shift scheme with thread-specific state
   79.38       // This is probably the best overall implementation -- we'll
   79.39 @@ -1321,7 +1321,7 @@
   79.40              if (object->is_instance()) {
   79.41                ResourceMark rm;
   79.42                tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
   79.43 -                (intptr_t) object, (intptr_t) object->mark(),
   79.44 +                (void *) object, (intptr_t) object->mark(),
   79.45                  object->klass()->external_name());
   79.46              }
   79.47            }
   79.48 @@ -1371,7 +1371,7 @@
   79.49          if (object->is_instance()) {
   79.50            ResourceMark rm;
   79.51            tty->print_cr("Inflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
   79.52 -            (intptr_t) object, (intptr_t) object->mark(),
   79.53 +            (void *) object, (intptr_t) object->mark(),
   79.54              object->klass()->external_name());
   79.55          }
   79.56        }
   79.57 @@ -1439,7 +1439,7 @@
   79.58         if (obj->is_instance()) {
   79.59           ResourceMark rm;
   79.60             tty->print_cr("Deflating object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
   79.61 -                (intptr_t) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
   79.62 +                (void *) obj, (intptr_t) obj->mark(), obj->klass()->external_name());
   79.63         }
   79.64       }
   79.65  
    80.1 --- a/src/share/vm/runtime/thread.cpp	Tue Oct 01 08:54:05 2013 -0400
    80.2 +++ b/src/share/vm/runtime/thread.cpp	Tue Oct 01 09:21:43 2013 -0400
    80.3 @@ -1444,7 +1444,7 @@
    80.4    _in_deopt_handler = 0;
    80.5    _doing_unsafe_access = false;
    80.6    _stack_guard_state = stack_guard_unused;
    80.7 -  _exception_oop = NULL;
    80.8 +  (void)const_cast<oop&>(_exception_oop = NULL);
    80.9    _exception_pc  = 0;
   80.10    _exception_handler_pc = 0;
   80.11    _is_method_handle_return = 0;
    81.1 --- a/src/share/vm/runtime/thread.hpp	Tue Oct 01 08:54:05 2013 -0400
    81.2 +++ b/src/share/vm/runtime/thread.hpp	Tue Oct 01 09:21:43 2013 -0400
    81.3 @@ -1278,7 +1278,7 @@
    81.4    address  exception_handler_pc() const          { return _exception_handler_pc; }
    81.5    bool     is_method_handle_return() const       { return _is_method_handle_return == 1; }
    81.6  
    81.7 -  void set_exception_oop(oop o)                  { _exception_oop = o; }
    81.8 +  void set_exception_oop(oop o)                  { (void)const_cast<oop&>(_exception_oop = o); }
    81.9    void set_exception_pc(address a)               { _exception_pc = a; }
   81.10    void set_exception_handler_pc(address a)       { _exception_handler_pc = a; }
   81.11    void set_is_method_handle_return(bool value)   { _is_method_handle_return = value ? 1 : 0; }
    82.1 --- a/src/share/vm/runtime/vframeArray.cpp	Tue Oct 01 08:54:05 2013 -0400
    82.2 +++ b/src/share/vm/runtime/vframeArray.cpp	Tue Oct 01 09:21:43 2013 -0400
    82.3 @@ -1,5 +1,5 @@
    82.4  /*
    82.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
    82.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
    82.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    82.8   *
    82.9   * This code is free software; you can redistribute it and/or modify it
   82.10 @@ -111,7 +111,7 @@
   82.11        case T_OBJECT:
   82.12          assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
   82.13          // preserve object type
   82.14 -        _locals->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
   82.15 +        _locals->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
   82.16          break;
   82.17        case T_CONFLICT:
   82.18          // A dead local.  Will be initialized to null/zero.
   82.19 @@ -136,7 +136,7 @@
   82.20        case T_OBJECT:
   82.21          assert(!value->obj_is_scalar_replaced(), "object should be reallocated already");
   82.22          // preserve object type
   82.23 -        _expressions->add( new StackValue((intptr_t) (value->get_obj()()), T_OBJECT ));
   82.24 +        _expressions->add( new StackValue(cast_from_oop<intptr_t>((value->get_obj()())), T_OBJECT ));
   82.25          break;
   82.26        case T_CONFLICT:
   82.27          // A dead stack element.  Will be initialized to null/zero.
    83.1 --- a/src/share/vm/runtime/vm_version.hpp	Tue Oct 01 08:54:05 2013 -0400
    83.2 +++ b/src/share/vm/runtime/vm_version.hpp	Tue Oct 01 09:21:43 2013 -0400
    83.3 @@ -78,7 +78,13 @@
    83.4    static const char* jre_release_version();
    83.5  
    83.6    // does HW support an 8-byte compare-exchange operation?
    83.7 -  static bool supports_cx8()  {return _supports_cx8;}
    83.8 +  static bool supports_cx8()  {
    83.9 +#ifdef SUPPORTS_NATIVE_CX8
   83.10 +    return true;
   83.11 +#else
   83.12 +    return _supports_cx8;
   83.13 +#endif
   83.14 +  }
   83.15    // does HW support atomic get-and-set or atomic get-and-add?  Used
   83.16    // to guide intrinsification decisions for Unsafe atomic ops
   83.17    static bool supports_atomic_getset4()  {return _supports_atomic_getset4;}
    84.1 --- a/src/share/vm/services/classLoadingService.cpp	Tue Oct 01 08:54:05 2013 -0400
    84.2 +++ b/src/share/vm/services/classLoadingService.cpp	Tue Oct 01 09:21:43 2013 -0400
    84.3 @@ -1,5 +1,5 @@
    84.4  /*
    84.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    84.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    84.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    84.8   *
    84.9   * This code is free software; you can redistribute it and/or modify it
   84.10 @@ -52,7 +52,7 @@
   84.11        len = name->utf8_length();                    \
   84.12      }                                               \
   84.13      HS_DTRACE_PROBE4(hotspot, class__##type,        \
   84.14 -      data, len, (clss)->class_loader(), (shared)); \
   84.15 +      data, len, SOLARIS_ONLY((void *))(clss)->class_loader(), (shared)); \
   84.16    }
   84.17  
   84.18  #else /* USDT2 */
    85.1 --- a/src/share/vm/services/heapDumper.cpp	Tue Oct 01 08:54:05 2013 -0400
    85.2 +++ b/src/share/vm/services/heapDumper.cpp	Tue Oct 01 09:21:43 2013 -0400
    85.3 @@ -563,7 +563,7 @@
    85.4  }
    85.5  
    85.6  void DumpWriter::write_objectID(oop o) {
    85.7 -  address a = (address)((uintptr_t)o);
    85.8 +  address a = (address)o;
    85.9  #ifdef _LP64
   85.10    write_u8((u8)a);
   85.11  #else
    86.1 --- a/src/share/vm/services/memoryManager.cpp	Tue Oct 01 08:54:05 2013 -0400
    86.2 +++ b/src/share/vm/services/memoryManager.cpp	Tue Oct 01 09:21:43 2013 -0400
    86.3 @@ -1,5 +1,5 @@
    86.4  /*
    86.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    86.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    86.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    86.8   *
    86.9   * This code is free software; you can redistribute it and/or modify it
   86.10 @@ -45,7 +45,7 @@
   86.11  
   86.12  MemoryManager::MemoryManager() {
   86.13    _num_pools = 0;
   86.14 -  _memory_mgr_obj = NULL;
   86.15 +  (void)const_cast<instanceOop&>(_memory_mgr_obj = NULL);
   86.16  }
   86.17  
   86.18  void MemoryManager::add_pool(MemoryPool* pool) {
    87.1 --- a/src/share/vm/services/memoryPool.cpp	Tue Oct 01 08:54:05 2013 -0400
    87.2 +++ b/src/share/vm/services/memoryPool.cpp	Tue Oct 01 09:21:43 2013 -0400
    87.3 @@ -1,5 +1,5 @@
    87.4  /*
    87.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    87.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    87.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    87.8   *
    87.9   * This code is free software; you can redistribute it and/or modify it
   87.10 @@ -45,7 +45,7 @@
   87.11    _name = name;
   87.12    _initial_size = init_size;
   87.13    _max_size = max_size;
   87.14 -  _memory_pool_obj = NULL;
   87.15 +  (void)const_cast<instanceOop&>(_memory_pool_obj = NULL);
   87.16    _available_for_allocation = true;
   87.17    _num_managers = 0;
   87.18    _type = type;
    88.1 --- a/src/share/vm/utilities/globalDefinitions.hpp	Tue Oct 01 08:54:05 2013 -0400
    88.2 +++ b/src/share/vm/utilities/globalDefinitions.hpp	Tue Oct 01 09:21:43 2013 -0400
    88.3 @@ -967,9 +967,9 @@
    88.4  // (These must be implemented as #defines because C++ compilers are
    88.5  // not obligated to inline non-integral constants!)
    88.6  #define       badAddress        ((address)::badAddressVal)
    88.7 -#define       badOop            ((oop)::badOopVal)
    88.8 +#define       badOop            (cast_to_oop(::badOopVal))
    88.9  #define       badHeapWord       (::badHeapWordVal)
   88.10 -#define       badJNIHandle      ((oop)::badJNIHandleVal)
   88.11 +#define       badJNIHandle      (cast_to_oop(::badJNIHandleVal))
   88.12  
   88.13  // Default TaskQueue size is 16K (32-bit) or 128K (64-bit)
   88.14  #define TASKQUEUE_SIZE (NOT_LP64(1<<14) LP64_ONLY(1<<17))
    89.1 --- a/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Tue Oct 01 08:54:05 2013 -0400
    89.2 +++ b/src/share/vm/utilities/globalDefinitions_visCPP.hpp	Tue Oct 01 09:21:43 2013 -0400
    89.3 @@ -189,6 +189,10 @@
    89.4  #pragma warning( disable : 4201 ) // nonstandard extension used : nameless struct/union (needed in windows.h)
    89.5  #pragma warning( disable : 4511 ) // copy constructor could not be generated
    89.6  #pragma warning( disable : 4291 ) // no matching operator delete found; memory will not be freed if initialization thows an exception
    89.7 +#ifdef CHECK_UNHANDLED_OOPS
    89.8 +#pragma warning( disable : 4521 ) // class has multiple copy ctors of a single type
    89.9 +#pragma warning( disable : 4522 ) // class has multiple assignment operators of a single type
   89.10 +#endif // CHECK_UNHANDLED_OOPS
   89.11  #if _MSC_VER >= 1400
   89.12  #pragma warning( disable : 4996 ) // unsafe string functions. Same as define _CRT_SECURE_NO_WARNINGS/_CRT_SECURE_NO_DEPRICATE
   89.13  #endif
    90.1 --- a/src/share/vm/utilities/hashtable.cpp	Tue Oct 01 08:54:05 2013 -0400
    90.2 +++ b/src/share/vm/utilities/hashtable.cpp	Tue Oct 01 09:21:43 2013 -0400
    90.3 @@ -1,5 +1,5 @@
    90.4  /*
    90.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved.
    90.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved.
    90.7   * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    90.8   *
    90.9   * This code is free software; you can redistribute it and/or modify it
   90.10 @@ -356,9 +356,9 @@
   90.11  template class Hashtable<Symbol*, mtSymbol>;
   90.12  template class Hashtable<Klass*, mtClass>;
   90.13  template class Hashtable<oop, mtClass>;
   90.14 -#ifdef SOLARIS
   90.15 +#if defined(SOLARIS) || defined(CHECK_UNHANDLED_OOPS)
   90.16  template class Hashtable<oop, mtSymbol>;
   90.17 -#endif
   90.18 +#endif // SOLARIS || CHECK_UNHANDLED_OOPS
   90.19  template class Hashtable<oopDesc*, mtSymbol>;
   90.20  template class Hashtable<Symbol*, mtClass>;
   90.21  template class HashtableEntry<Symbol*, mtSymbol>;
    91.1 --- a/src/share/vm/utilities/taskqueue.hpp	Tue Oct 01 08:54:05 2013 -0400
    91.2 +++ b/src/share/vm/utilities/taskqueue.hpp	Tue Oct 01 09:21:43 2013 -0400
    91.3 @@ -322,11 +322,11 @@
    91.4    // Attempts to claim a task from the "local" end of the queue (the most
    91.5    // recently pushed).  If successful, returns true and sets t to the task;
    91.6    // otherwise, returns false (the queue is empty).
    91.7 -  inline bool pop_local(E& t);
    91.8 +  inline bool pop_local(volatile E& t);
    91.9  
   91.10    // Like pop_local(), but uses the "global" end of the queue (the least
   91.11    // recently pushed).
   91.12 -  bool pop_global(E& t);
   91.13 +  bool pop_global(volatile E& t);
   91.14  
   91.15    // Delete any resource associated with the queue.
   91.16    ~GenericTaskQueue();
   91.17 @@ -424,7 +424,7 @@
   91.18  }
   91.19  
   91.20  template<class E, MEMFLAGS F, unsigned int N>
   91.21 -bool GenericTaskQueue<E, F, N>::pop_global(E& t) {
   91.22 +bool GenericTaskQueue<E, F, N>::pop_global(volatile E& t) {
   91.23    Age oldAge = _age.get();
   91.24    // Architectures with weak memory model require a barrier here
   91.25    // to guarantee that bottom is not older than age,
   91.26 @@ -701,7 +701,7 @@
   91.27  }
   91.28  
   91.29  template<class E, MEMFLAGS F, unsigned int N> inline bool
   91.30 -GenericTaskQueue<E, F, N>::pop_local(E& t) {
   91.31 +GenericTaskQueue<E, F, N>::pop_local(volatile E& t) {
   91.32    uint localBot = _bottom;
   91.33    // This value cannot be N-1.  That can only occur as a result of
   91.34    // the assignment to bottom in this method.  If it does, this method
   91.35 @@ -799,7 +799,7 @@
   91.36    }
   91.37    volatile ObjArrayTask&
   91.38    operator =(const volatile ObjArrayTask& t) volatile {
   91.39 -    _obj = t._obj;
   91.40 +    (void)const_cast<oop&>(_obj = t._obj);
   91.41      _index = t._index;
   91.42      return *this;
   91.43    }
    92.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
    92.2 +++ b/test/compiler/print/PrintInlining.java	Tue Oct 01 09:21:43 2013 -0400
    92.3 @@ -0,0 +1,36 @@
    92.4 +/*
    92.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
    92.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
    92.7 + *
    92.8 + * This code is free software; you can redistribute it and/or modify it
    92.9 + * under the terms of the GNU General Public License version 2 only, as
   92.10 + * published by the Free Software Foundation.
   92.11 + *
   92.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
   92.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
   92.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
   92.15 + * version 2 for more details (a copy is included in the LICENSE file that
   92.16 + * accompanied this code).
   92.17 + *
   92.18 + * You should have received a copy of the GNU General Public License version
   92.19 + * 2 along with this work; if not, write to the Free Software Foundation,
   92.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
   92.21 + *
   92.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
   92.23 + * or visit www.oracle.com if you need additional information or have any
   92.24 + * questions.
   92.25 + */
   92.26 +
   92.27 +/*
   92.28 + * @test
   92.29 + * @bug 8022585
   92.30 + * @summary VM crashes when ran with -XX:+PrintInlining
   92.31 + * @run main/othervm -Xcomp -XX:+PrintInlining PrintInlining
   92.32 + *
   92.33 + */
   92.34 +
   92.35 +public class PrintInlining {
   92.36 +  public static void main(String[] args) {
   92.37 +    System.out.println("Passed");
   92.38 +  }
   92.39 +}

mercurial