Fri, 04 Mar 2011 14:06:16 -0800
Merge
1.1 --- a/make/windows/makefiles/compile.make Thu Mar 03 15:13:18 2011 -0800 1.2 +++ b/make/windows/makefiles/compile.make Fri Mar 04 14:06:16 2011 -0800 1.3 @@ -207,6 +207,9 @@ 1.4 # Manifest Tool - used in VS2005 and later to adjust manifests stored 1.5 # as resources inside build artifacts. 1.6 MT=mt.exe 1.7 +!if "$(BUILDARCH)" == "i486" 1.8 +LINK_FLAGS = /SAFESEH $(LINK_FLAGS) 1.9 +!endif 1.10 !endif 1.11 1.12 # Compile for space above time.
2.1 --- a/make/windows/makefiles/launcher.make Thu Mar 03 15:13:18 2011 -0800 2.2 +++ b/make/windows/makefiles/launcher.make Fri Mar 04 14:06:16 2011 -0800 2.3 @@ -1,71 +1,73 @@ 2.4 -# 2.5 -# Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 2.6 -# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 2.7 -# 2.8 -# This code is free software; you can redistribute it and/or modify it 2.9 -# under the terms of the GNU General Public License version 2 only, as 2.10 -# published by the Free Software Foundation. 2.11 -# 2.12 -# This code is distributed in the hope that it will be useful, but WITHOUT 2.13 -# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 2.14 -# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 2.15 -# version 2 for more details (a copy is included in the LICENSE file that 2.16 -# accompanied this code). 2.17 -# 2.18 -# You should have received a copy of the GNU General Public License version 2.19 -# 2 along with this work; if not, write to the Free Software Foundation, 2.20 -# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 2.21 -# 2.22 -# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 2.23 -# or visit www.oracle.com if you need additional information or have any 2.24 -# questions. 2.25 -# 2.26 -# 2.27 - 2.28 - 2.29 -LAUNCHER_FLAGS=$(CPP_FLAGS) $(ARCHFLAG) \ 2.30 - /D FULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \ 2.31 - /D JDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \ 2.32 - /D JDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \ 2.33 - /D GAMMA \ 2.34 - /D LAUNCHER_TYPE=\"gamma\" \ 2.35 - /D _CRT_SECURE_NO_WARNINGS \ 2.36 - /D _CRT_SECURE_NO_DEPRECATE \ 2.37 - /D LINK_INTO_LIBJVM \ 2.38 - /I $(WorkSpace)\src\os\windows\launcher \ 2.39 - /I $(WorkSpace)\src\share\tools\launcher \ 2.40 - /I $(WorkSpace)\src\share\vm\prims \ 2.41 - /I $(WorkSpace)\src\share\vm \ 2.42 - /I $(WorkSpace)\src\cpu\$(Platform_arch)\vm \ 2.43 - /I $(WorkSpace)\src\os\windows\vm 2.44 - 2.45 -LINK_FLAGS=/manifest $(HS_INTERNAL_NAME).lib kernel32.lib user32.lib /nologo /machine:$(MACHINE) /map /debug /subsystem:console 2.46 - 2.47 -!if "$(COMPILER_NAME)" == "VS2005" 2.48 -# This VS2005 compiler has /GS as a default and requires bufferoverflowU.lib 2.49 -# on the link command line, otherwise we get missing __security_check_cookie 2.50 -# externals at link time. Even with /GS-, you need bufferoverflowU.lib. 2.51 -BUFFEROVERFLOWLIB = bufferoverflowU.lib 2.52 -LINK_FLAGS = $(LINK_FLAGS) $(BUFFEROVERFLOWLIB) 2.53 -!endif 2.54 - 2.55 -LAUNCHERDIR = $(WorkSpace)/src/os/windows/launcher 2.56 -LAUNCHERDIR_SHARE = $(WorkSpace)/src/share/tools/launcher 2.57 - 2.58 -OUTDIR = launcher 2.59 - 2.60 -{$(LAUNCHERDIR)}.c{$(OUTDIR)}.obj: 2.61 - -mkdir $(OUTDIR) 2>NUL >NUL 2.62 - $(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $< 2.63 - 2.64 -{$(LAUNCHERDIR_SHARE)}.c{$(OUTDIR)}.obj: 2.65 - -mkdir $(OUTDIR) 2>NUL >NUL 2.66 - $(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $< 2.67 - 2.68 -$(OUTDIR)\*.obj: $(LAUNCHERDIR)\*.c $(LAUNCHERDIR)\*.h $(LAUNCHERDIR_SHARE)\*.c $(LAUNCHERDIR_SHARE)\*.h 2.69 - 2.70 -launcher: $(OUTDIR)\java.obj $(OUTDIR)\java_md.obj $(OUTDIR)\jli_util.obj 2.71 - echo $(JAVA_HOME) > jdkpath.txt 2.72 - $(LINK) $(LINK_FLAGS) /out:hotspot.exe $** 2.73 - 2.74 - 2.75 +# 2.76 +# Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 2.77 +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 2.78 +# 2.79 +# This code is free software; you can redistribute it and/or modify it 2.80 +# under the terms of the GNU General Public License version 2 only, as 2.81 +# published by the Free Software Foundation. 2.82 +# 2.83 +# This code is distributed in the hope that it will be useful, but WITHOUT 2.84 +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 2.85 +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 2.86 +# version 2 for more details (a copy is included in the LICENSE file that 2.87 +# accompanied this code). 2.88 +# 2.89 +# You should have received a copy of the GNU General Public License version 2.90 +# 2 along with this work; if not, write to the Free Software Foundation, 2.91 +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 2.92 +# 2.93 +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 2.94 +# or visit www.oracle.com if you need additional information or have any 2.95 +# questions. 2.96 +# 2.97 +# 2.98 + 2.99 + 2.100 +LAUNCHER_FLAGS=$(CPP_FLAGS) $(ARCHFLAG) \ 2.101 + /D FULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \ 2.102 + /D JDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \ 2.103 + /D JDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \ 2.104 + /D GAMMA \ 2.105 + /D LAUNCHER_TYPE=\"gamma\" \ 2.106 + /D _CRT_SECURE_NO_WARNINGS \ 2.107 + /D _CRT_SECURE_NO_DEPRECATE \ 2.108 + /D LINK_INTO_LIBJVM \ 2.109 + /I $(WorkSpace)\src\os\windows\launcher \ 2.110 + /I $(WorkSpace)\src\share\tools\launcher \ 2.111 + /I $(WorkSpace)\src\share\vm\prims \ 2.112 + /I $(WorkSpace)\src\share\vm \ 2.113 + /I $(WorkSpace)\src\cpu\$(Platform_arch)\vm \ 2.114 + /I $(WorkSpace)\src\os\windows\vm 2.115 + 2.116 +LINK_FLAGS=/manifest $(HS_INTERNAL_NAME).lib kernel32.lib user32.lib /nologo /machine:$(MACHINE) /map /debug /subsystem:console 2.117 + 2.118 +!if "$(COMPILER_NAME)" == "VS2005" 2.119 +# This VS2005 compiler has /GS as a default and requires bufferoverflowU.lib 2.120 +# on the link command line, otherwise we get missing __security_check_cookie 2.121 +# externals at link time. Even with /GS-, you need bufferoverflowU.lib. 2.122 +BUFFEROVERFLOWLIB = bufferoverflowU.lib 2.123 +LINK_FLAGS = $(LINK_FLAGS) $(BUFFEROVERFLOWLIB) 2.124 +!endif 2.125 + 2.126 +!if "$(COMPILER_NAME)" == "VS2010" && "$(BUILDARCH)" == "i486" 2.127 +LINK_FLAGS = /SAFESEH $(LINK_FLAGS) 2.128 +!endif 2.129 + 2.130 +LAUNCHERDIR = $(WorkSpace)/src/os/windows/launcher 2.131 +LAUNCHERDIR_SHARE = $(WorkSpace)/src/share/tools/launcher 2.132 + 2.133 +OUTDIR = launcher 2.134 + 2.135 +{$(LAUNCHERDIR)}.c{$(OUTDIR)}.obj: 2.136 + -mkdir $(OUTDIR) 2>NUL >NUL 2.137 + $(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $< 2.138 + 2.139 +{$(LAUNCHERDIR_SHARE)}.c{$(OUTDIR)}.obj: 2.140 + -mkdir $(OUTDIR) 2>NUL >NUL 2.141 + $(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $< 2.142 + 2.143 +$(OUTDIR)\*.obj: $(LAUNCHERDIR)\*.c $(LAUNCHERDIR)\*.h $(LAUNCHERDIR_SHARE)\*.c $(LAUNCHERDIR_SHARE)\*.h 2.144 + 2.145 +launcher: $(OUTDIR)\java.obj $(OUTDIR)\java_md.obj $(OUTDIR)\jli_util.obj 2.146 + echo $(JAVA_HOME) > jdkpath.txt 2.147 + $(LINK) $(LINK_FLAGS) /out:hotspot.exe $**
3.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu Mar 03 15:13:18 2011 -0800 3.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Fri Mar 04 14:06:16 2011 -0800 3.3 @@ -395,9 +395,9 @@ 3.4 3.5 int offset = code_offset(); 3.6 3.7 - __ call(Runtime1::entry_for(Runtime1::handle_exception_id), relocInfo::runtime_call_type); 3.8 + __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); 3.9 __ delayed()->nop(); 3.10 - debug_only(__ stop("should have gone to the caller");) 3.11 + __ should_not_reach_here(); 3.12 assert(code_offset() - offset <= exception_handler_size, "overflow"); 3.13 __ end_a_stub(); 3.14
4.1 --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Thu Mar 03 15:13:18 2011 -0800 4.2 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Fri Mar 04 14:06:16 2011 -0800 4.3 @@ -148,7 +148,7 @@ 4.4 4.5 static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) { 4.6 assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words), 4.7 - " mismatch in calculation"); 4.8 + "mismatch in calculation"); 4.9 sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 4.10 int frame_size_in_slots = frame_size_in_bytes / sizeof(jint); 4.11 OopMap* oop_map = new OopMap(frame_size_in_slots, 0); 4.12 @@ -176,9 +176,8 @@ 4.13 4.14 static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) { 4.15 assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words), 4.16 - " mismatch in calculation"); 4.17 + "mismatch in calculation"); 4.18 __ save_frame_c1(frame_size_in_bytes); 4.19 - sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 4.20 4.21 // Record volatile registers as callee-save values in an OopMap so their save locations will be 4.22 // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for 4.23 @@ -367,23 +366,7 @@ 4.24 switch (id) { 4.25 case forward_exception_id: 4.26 { 4.27 - // we're handling an exception in the context of a compiled 4.28 - // frame. The registers have been saved in the standard 4.29 - // places. Perform an exception lookup in the caller and 4.30 - // dispatch to the handler if found. Otherwise unwind and 4.31 - // dispatch to the callers exception handler. 4.32 - 4.33 - oop_maps = new OopMapSet(); 4.34 - OopMap* oop_map = generate_oop_map(sasm, true); 4.35 - 4.36 - // transfer the pending exception to the exception_oop 4.37 - __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception); 4.38 - __ ld_ptr(Oexception, 0, G0); 4.39 - __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset())); 4.40 - __ add(I7, frame::pc_return_offset, Oissuing_pc); 4.41 - 4.42 - generate_handle_exception(sasm, oop_maps, oop_map); 4.43 - __ should_not_reach_here(); 4.44 + oop_maps = generate_handle_exception(id, sasm); 4.45 } 4.46 break; 4.47 4.48 @@ -671,15 +654,14 @@ 4.49 break; 4.50 4.51 case handle_exception_id: 4.52 - { 4.53 - __ set_info("handle_exception", dont_gc_arguments); 4.54 - // make a frame and preserve the caller's caller-save registers 4.55 + { __ set_info("handle_exception", dont_gc_arguments); 4.56 + oop_maps = generate_handle_exception(id, sasm); 4.57 + } 4.58 + break; 4.59 4.60 - oop_maps = new OopMapSet(); 4.61 - OopMap* oop_map = save_live_registers(sasm); 4.62 - __ mov(Oexception->after_save(), Oexception); 4.63 - __ mov(Oissuing_pc->after_save(), Oissuing_pc); 4.64 - generate_handle_exception(sasm, oop_maps, oop_map); 4.65 + case handle_exception_from_callee_id: 4.66 + { __ set_info("handle_exception_from_callee", dont_gc_arguments); 4.67 + oop_maps = generate_handle_exception(id, sasm); 4.68 } 4.69 break; 4.70 4.71 @@ -696,7 +678,7 @@ 4.72 G2_thread, Oissuing_pc->after_save()); 4.73 __ verify_not_null_oop(Oexception->after_save()); 4.74 4.75 - // Restore SP from L7 if the exception PC is a MethodHandle call site. 4.76 + // Restore SP from L7 if the exception PC is a method handle call site. 4.77 __ mov(O0, G5); // Save the target address. 4.78 __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0); 4.79 __ tst(L0); // Condition codes are preserved over the restore. 4.80 @@ -1006,48 +988,89 @@ 4.81 } 4.82 4.83 4.84 -void Runtime1::generate_handle_exception(StubAssembler* sasm, OopMapSet* oop_maps, OopMap* oop_map, bool) { 4.85 - Label no_deopt; 4.86 +OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler* sasm) { 4.87 + __ block_comment("generate_handle_exception"); 4.88 + 4.89 + // Save registers, if required. 4.90 + OopMapSet* oop_maps = new OopMapSet(); 4.91 + OopMap* oop_map = NULL; 4.92 + switch (id) { 4.93 + case forward_exception_id: 4.94 + // We're handling an exception in the context of a compiled frame. 4.95 + // The registers have been saved in the standard places. Perform 4.96 + // an exception lookup in the caller and dispatch to the handler 4.97 + // if found. Otherwise unwind and dispatch to the callers 4.98 + // exception handler. 4.99 + oop_map = generate_oop_map(sasm, true); 4.100 + 4.101 + // transfer the pending exception to the exception_oop 4.102 + __ ld_ptr(G2_thread, in_bytes(JavaThread::pending_exception_offset()), Oexception); 4.103 + __ ld_ptr(Oexception, 0, G0); 4.104 + __ st_ptr(G0, G2_thread, in_bytes(JavaThread::pending_exception_offset())); 4.105 + __ add(I7, frame::pc_return_offset, Oissuing_pc); 4.106 + break; 4.107 + case handle_exception_id: 4.108 + // At this point all registers MAY be live. 4.109 + oop_map = save_live_registers(sasm); 4.110 + __ mov(Oexception->after_save(), Oexception); 4.111 + __ mov(Oissuing_pc->after_save(), Oissuing_pc); 4.112 + break; 4.113 + case handle_exception_from_callee_id: 4.114 + // At this point all registers except exception oop (Oexception) 4.115 + // and exception pc (Oissuing_pc) are dead. 4.116 + oop_map = new OopMap(frame_size_in_bytes / sizeof(jint), 0); 4.117 + sasm->set_frame_size(frame_size_in_bytes / BytesPerWord); 4.118 + __ save_frame_c1(frame_size_in_bytes); 4.119 + __ mov(Oexception->after_save(), Oexception); 4.120 + __ mov(Oissuing_pc->after_save(), Oissuing_pc); 4.121 + break; 4.122 + default: ShouldNotReachHere(); 4.123 + } 4.124 4.125 __ verify_not_null_oop(Oexception); 4.126 4.127 // save the exception and issuing pc in the thread 4.128 - __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 4.129 + __ st_ptr(Oexception, G2_thread, in_bytes(JavaThread::exception_oop_offset())); 4.130 __ st_ptr(Oissuing_pc, G2_thread, in_bytes(JavaThread::exception_pc_offset())); 4.131 4.132 - // save the real return address and use the throwing pc as the return address to lookup (has bci & oop map) 4.133 - __ mov(I7, L0); 4.134 + // use the throwing pc as the return address to lookup (has bci & oop map) 4.135 __ mov(Oissuing_pc, I7); 4.136 __ sub(I7, frame::pc_return_offset, I7); 4.137 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 4.138 + oop_maps->add_gc_map(call_offset, oop_map); 4.139 4.140 // Note: if nmethod has been deoptimized then regardless of 4.141 // whether it had a handler or not we will deoptimize 4.142 // by entering the deopt blob with a pending exception. 4.143 4.144 -#ifdef ASSERT 4.145 - Label done; 4.146 - __ tst(O0); 4.147 - __ br(Assembler::notZero, false, Assembler::pn, done); 4.148 - __ delayed()->nop(); 4.149 - __ stop("should have found address"); 4.150 - __ bind(done); 4.151 -#endif 4.152 + // Restore the registers that were saved at the beginning, remove 4.153 + // the frame and jump to the exception handler. 4.154 + switch (id) { 4.155 + case forward_exception_id: 4.156 + case handle_exception_id: 4.157 + restore_live_registers(sasm); 4.158 + __ jmp(O0, 0); 4.159 + __ delayed()->restore(); 4.160 + break; 4.161 + case handle_exception_from_callee_id: 4.162 + // Restore SP from L7 if the exception PC is a method handle call site. 4.163 + __ mov(O0, G5); // Save the target address. 4.164 + __ lduw(Address(G2_thread, JavaThread::is_method_handle_return_offset()), L0); 4.165 + __ tst(L0); // Condition codes are preserved over the restore. 4.166 + __ restore(); 4.167 4.168 - // restore the registers that were saved at the beginning and jump to the exception handler. 4.169 - restore_live_registers(sasm); 4.170 + __ jmp(G5, 0); // jump to the exception handler 4.171 + __ delayed()->movcc(Assembler::notZero, false, Assembler::icc, L7_mh_SP_save, SP); // Restore SP if required. 4.172 + break; 4.173 + default: ShouldNotReachHere(); 4.174 + } 4.175 4.176 - __ jmp(O0, 0); 4.177 - __ delayed()->restore(); 4.178 - 4.179 - oop_maps->add_gc_map(call_offset, oop_map); 4.180 + return oop_maps; 4.181 } 4.182 4.183 4.184 #undef __ 4.185 4.186 -#define __ masm-> 4.187 - 4.188 const char *Runtime1::pd_name_for_address(address entry) { 4.189 return "<unknown function>"; 4.190 }
5.1 --- a/src/cpu/sparc/vm/methodHandles_sparc.cpp Thu Mar 03 15:13:18 2011 -0800 5.2 +++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp Fri Mar 04 14:06:16 2011 -0800 5.3 @@ -417,6 +417,7 @@ 5.4 5.5 // Some handy addresses: 5.6 Address G5_method_fie( G5_method, in_bytes(methodOopDesc::from_interpreted_offset())); 5.7 + Address G5_method_fce( G5_method, in_bytes(methodOopDesc::from_compiled_offset())); 5.8 5.9 Address G3_mh_vmtarget( G3_method_handle, java_dyn_MethodHandle::vmtarget_offset_in_bytes()); 5.10 5.11 @@ -444,12 +445,10 @@ 5.12 case _raise_exception: 5.13 { 5.14 // Not a real MH entry, but rather shared code for raising an 5.15 - // exception. Since we use a C2I adapter to set up the 5.16 - // interpreter state, arguments are expected in compiler 5.17 - // argument registers. 5.18 + // exception. Since we use the compiled entry, arguments are 5.19 + // expected in compiler argument registers. 5.20 assert(raise_exception_method(), "must be set"); 5.21 - address c2i_entry = raise_exception_method()->get_c2i_entry(); 5.22 - assert(c2i_entry, "method must be linked"); 5.23 + assert(raise_exception_method()->from_compiled_entry(), "method must be linked"); 5.24 5.25 __ mov(O5_savedSP, SP); // Cut the stack back to where the caller started. 5.26 5.27 @@ -468,10 +467,9 @@ 5.28 __ delayed()->nop(); 5.29 5.30 __ verify_oop(G5_method); 5.31 - __ jump_to(AddressLiteral(c2i_entry), O3_scratch); 5.32 + __ jump_indirect_to(G5_method_fce, O3_scratch); // jump to compiled entry 5.33 __ delayed()->nop(); 5.34 5.35 - // If we get here, the Java runtime did not do its job of creating the exception. 5.36 // Do something that is at least causes a valid throw from the interpreter. 5.37 __ bind(L_no_method); 5.38 __ unimplemented("call throw_WrongMethodType_entry");
6.1 --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu Mar 03 15:13:18 2011 -0800 6.2 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Fri Mar 04 14:06:16 2011 -0800 6.3 @@ -1,5 +1,5 @@ 6.4 /* 6.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 6.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 6.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6.8 * 6.9 * This code is free software; you can redistribute it and/or modify it 6.10 @@ -968,19 +968,6 @@ 6.11 return start; 6.12 } 6.13 6.14 - static address disjoint_byte_copy_entry; 6.15 - static address disjoint_short_copy_entry; 6.16 - static address disjoint_int_copy_entry; 6.17 - static address disjoint_long_copy_entry; 6.18 - static address disjoint_oop_copy_entry; 6.19 - 6.20 - static address byte_copy_entry; 6.21 - static address short_copy_entry; 6.22 - static address int_copy_entry; 6.23 - static address long_copy_entry; 6.24 - static address oop_copy_entry; 6.25 - 6.26 - static address checkcast_copy_entry; 6.27 6.28 // 6.29 // Verify that a register contains clean 32-bits positive value 6.30 @@ -1046,31 +1033,40 @@ 6.31 // 6.32 // The input registers are overwritten. 6.33 // 6.34 - void gen_write_ref_array_pre_barrier(Register addr, Register count) { 6.35 + void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 6.36 BarrierSet* bs = Universe::heap()->barrier_set(); 6.37 - if (bs->has_write_ref_pre_barrier()) { 6.38 - assert(bs->has_write_ref_array_pre_opt(), 6.39 - "Else unsupported barrier set."); 6.40 - 6.41 - __ save_frame(0); 6.42 - // Save the necessary global regs... will be used after. 6.43 - if (addr->is_global()) { 6.44 - __ mov(addr, L0); 6.45 - } 6.46 - if (count->is_global()) { 6.47 - __ mov(count, L1); 6.48 - } 6.49 - __ mov(addr->after_save(), O0); 6.50 - // Get the count into O1 6.51 - __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)); 6.52 - __ delayed()->mov(count->after_save(), O1); 6.53 - if (addr->is_global()) { 6.54 - __ mov(L0, addr); 6.55 - } 6.56 - if (count->is_global()) { 6.57 - __ mov(L1, count); 6.58 - } 6.59 - __ restore(); 6.60 + switch (bs->kind()) { 6.61 + case BarrierSet::G1SATBCT: 6.62 + case BarrierSet::G1SATBCTLogging: 6.63 + // With G1, don't generate the call if we statically know that the target in uninitialized 6.64 + if (!dest_uninitialized) { 6.65 + __ save_frame(0); 6.66 + // Save the necessary global regs... will be used after. 6.67 + if (addr->is_global()) { 6.68 + __ mov(addr, L0); 6.69 + } 6.70 + if (count->is_global()) { 6.71 + __ mov(count, L1); 6.72 + } 6.73 + __ mov(addr->after_save(), O0); 6.74 + // Get the count into O1 6.75 + __ call(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre)); 6.76 + __ delayed()->mov(count->after_save(), O1); 6.77 + if (addr->is_global()) { 6.78 + __ mov(L0, addr); 6.79 + } 6.80 + if (count->is_global()) { 6.81 + __ mov(L1, count); 6.82 + } 6.83 + __ restore(); 6.84 + } 6.85 + break; 6.86 + case BarrierSet::CardTableModRef: 6.87 + case BarrierSet::CardTableExtension: 6.88 + case BarrierSet::ModRef: 6.89 + break; 6.90 + default: 6.91 + ShouldNotReachHere(); 6.92 } 6.93 } 6.94 // 6.95 @@ -1084,7 +1080,7 @@ 6.96 // The input registers are overwritten. 6.97 // 6.98 void gen_write_ref_array_post_barrier(Register addr, Register count, 6.99 - Register tmp) { 6.100 + Register tmp) { 6.101 BarrierSet* bs = Universe::heap()->barrier_set(); 6.102 6.103 switch (bs->kind()) { 6.104 @@ -1283,7 +1279,7 @@ 6.105 // to: O1 6.106 // count: O2 treated as signed 6.107 // 6.108 - address generate_disjoint_byte_copy(bool aligned, const char * name) { 6.109 + address generate_disjoint_byte_copy(bool aligned, address *entry, const char *name) { 6.110 __ align(CodeEntryAlignment); 6.111 StubCodeMark mark(this, "StubRoutines", name); 6.112 address start = __ pc(); 6.113 @@ -1299,9 +1295,11 @@ 6.114 6.115 assert_clean_int(count, O3); // Make sure 'count' is clean int. 6.116 6.117 - if (!aligned) disjoint_byte_copy_entry = __ pc(); 6.118 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.119 - if (!aligned) BLOCK_COMMENT("Entry:"); 6.120 + if (entry != NULL) { 6.121 + *entry = __ pc(); 6.122 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.123 + BLOCK_COMMENT("Entry:"); 6.124 + } 6.125 6.126 // for short arrays, just do single element copy 6.127 __ cmp(count, 23); // 16 + 7 6.128 @@ -1391,15 +1389,13 @@ 6.129 // to: O1 6.130 // count: O2 treated as signed 6.131 // 6.132 - address generate_conjoint_byte_copy(bool aligned, const char * name) { 6.133 + address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 6.134 + address *entry, const char *name) { 6.135 // Do reverse copy. 6.136 6.137 __ align(CodeEntryAlignment); 6.138 StubCodeMark mark(this, "StubRoutines", name); 6.139 address start = __ pc(); 6.140 - address nooverlap_target = aligned ? 6.141 - StubRoutines::arrayof_jbyte_disjoint_arraycopy() : 6.142 - disjoint_byte_copy_entry; 6.143 6.144 Label L_skip_alignment, L_align, L_aligned_copy; 6.145 Label L_copy_byte, L_copy_byte_loop, L_exit; 6.146 @@ -1412,9 +1408,11 @@ 6.147 6.148 assert_clean_int(count, O3); // Make sure 'count' is clean int. 6.149 6.150 - if (!aligned) byte_copy_entry = __ pc(); 6.151 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.152 - if (!aligned) BLOCK_COMMENT("Entry:"); 6.153 + if (entry != NULL) { 6.154 + *entry = __ pc(); 6.155 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.156 + BLOCK_COMMENT("Entry:"); 6.157 + } 6.158 6.159 array_overlap_test(nooverlap_target, 0); 6.160 6.161 @@ -1504,7 +1502,7 @@ 6.162 // to: O1 6.163 // count: O2 treated as signed 6.164 // 6.165 - address generate_disjoint_short_copy(bool aligned, const char * name) { 6.166 + address generate_disjoint_short_copy(bool aligned, address *entry, const char * name) { 6.167 __ align(CodeEntryAlignment); 6.168 StubCodeMark mark(this, "StubRoutines", name); 6.169 address start = __ pc(); 6.170 @@ -1520,9 +1518,11 @@ 6.171 6.172 assert_clean_int(count, O3); // Make sure 'count' is clean int. 6.173 6.174 - if (!aligned) disjoint_short_copy_entry = __ pc(); 6.175 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.176 - if (!aligned) BLOCK_COMMENT("Entry:"); 6.177 + if (entry != NULL) { 6.178 + *entry = __ pc(); 6.179 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.180 + BLOCK_COMMENT("Entry:"); 6.181 + } 6.182 6.183 // for short arrays, just do single element copy 6.184 __ cmp(count, 11); // 8 + 3 (22 bytes) 6.185 @@ -1842,15 +1842,13 @@ 6.186 // to: O1 6.187 // count: O2 treated as signed 6.188 // 6.189 - address generate_conjoint_short_copy(bool aligned, const char * name) { 6.190 + address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 6.191 + address *entry, const char *name) { 6.192 // Do reverse copy. 6.193 6.194 __ align(CodeEntryAlignment); 6.195 StubCodeMark mark(this, "StubRoutines", name); 6.196 address start = __ pc(); 6.197 - address nooverlap_target = aligned ? 6.198 - StubRoutines::arrayof_jshort_disjoint_arraycopy() : 6.199 - disjoint_short_copy_entry; 6.200 6.201 Label L_skip_alignment, L_skip_alignment2, L_aligned_copy; 6.202 Label L_copy_2_bytes, L_copy_2_bytes_loop, L_exit; 6.203 @@ -1865,9 +1863,11 @@ 6.204 6.205 assert_clean_int(count, O3); // Make sure 'count' is clean int. 6.206 6.207 - if (!aligned) short_copy_entry = __ pc(); 6.208 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.209 - if (!aligned) BLOCK_COMMENT("Entry:"); 6.210 + if (entry != NULL) { 6.211 + *entry = __ pc(); 6.212 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.213 + BLOCK_COMMENT("Entry:"); 6.214 + } 6.215 6.216 array_overlap_test(nooverlap_target, 1); 6.217 6.218 @@ -2072,7 +2072,7 @@ 6.219 // to: O1 6.220 // count: O2 treated as signed 6.221 // 6.222 - address generate_disjoint_int_copy(bool aligned, const char * name) { 6.223 + address generate_disjoint_int_copy(bool aligned, address *entry, const char *name) { 6.224 __ align(CodeEntryAlignment); 6.225 StubCodeMark mark(this, "StubRoutines", name); 6.226 address start = __ pc(); 6.227 @@ -2080,9 +2080,11 @@ 6.228 const Register count = O2; 6.229 assert_clean_int(count, O3); // Make sure 'count' is clean int. 6.230 6.231 - if (!aligned) disjoint_int_copy_entry = __ pc(); 6.232 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.233 - if (!aligned) BLOCK_COMMENT("Entry:"); 6.234 + if (entry != NULL) { 6.235 + *entry = __ pc(); 6.236 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.237 + BLOCK_COMMENT("Entry:"); 6.238 + } 6.239 6.240 generate_disjoint_int_copy_core(aligned); 6.241 6.242 @@ -2204,20 +2206,19 @@ 6.243 // to: O1 6.244 // count: O2 treated as signed 6.245 // 6.246 - address generate_conjoint_int_copy(bool aligned, const char * name) { 6.247 + address generate_conjoint_int_copy(bool aligned, address nooverlap_target, 6.248 + address *entry, const char *name) { 6.249 __ align(CodeEntryAlignment); 6.250 StubCodeMark mark(this, "StubRoutines", name); 6.251 address start = __ pc(); 6.252 6.253 - address nooverlap_target = aligned ? 6.254 - StubRoutines::arrayof_jint_disjoint_arraycopy() : 6.255 - disjoint_int_copy_entry; 6.256 - 6.257 assert_clean_int(O2, O3); // Make sure 'count' is clean int. 6.258 6.259 - if (!aligned) int_copy_entry = __ pc(); 6.260 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.261 - if (!aligned) BLOCK_COMMENT("Entry:"); 6.262 + if (entry != NULL) { 6.263 + *entry = __ pc(); 6.264 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.265 + BLOCK_COMMENT("Entry:"); 6.266 + } 6.267 6.268 array_overlap_test(nooverlap_target, 2); 6.269 6.270 @@ -2336,16 +2337,18 @@ 6.271 // to: O1 6.272 // count: O2 treated as signed 6.273 // 6.274 - address generate_disjoint_long_copy(bool aligned, const char * name) { 6.275 + address generate_disjoint_long_copy(bool aligned, address *entry, const char *name) { 6.276 __ align(CodeEntryAlignment); 6.277 StubCodeMark mark(this, "StubRoutines", name); 6.278 address start = __ pc(); 6.279 6.280 assert_clean_int(O2, O3); // Make sure 'count' is clean int. 6.281 6.282 - if (!aligned) disjoint_long_copy_entry = __ pc(); 6.283 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.284 - if (!aligned) BLOCK_COMMENT("Entry:"); 6.285 + if (entry != NULL) { 6.286 + *entry = __ pc(); 6.287 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.288 + BLOCK_COMMENT("Entry:"); 6.289 + } 6.290 6.291 generate_disjoint_long_copy_core(aligned); 6.292 6.293 @@ -2406,19 +2409,21 @@ 6.294 // to: O1 6.295 // count: O2 treated as signed 6.296 // 6.297 - address generate_conjoint_long_copy(bool aligned, const char * name) { 6.298 + address generate_conjoint_long_copy(bool aligned, address nooverlap_target, 6.299 + address *entry, const char *name) { 6.300 __ align(CodeEntryAlignment); 6.301 StubCodeMark mark(this, "StubRoutines", name); 6.302 address start = __ pc(); 6.303 6.304 - assert(!aligned, "usage"); 6.305 - address nooverlap_target = disjoint_long_copy_entry; 6.306 + assert(aligned, "Should always be aligned"); 6.307 6.308 assert_clean_int(O2, O3); // Make sure 'count' is clean int. 6.309 6.310 - if (!aligned) long_copy_entry = __ pc(); 6.311 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.312 - if (!aligned) BLOCK_COMMENT("Entry:"); 6.313 + if (entry != NULL) { 6.314 + *entry = __ pc(); 6.315 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 6.316 + BLOCK_COMMENT("Entry:"); 6.317 + } 6.318 6.319 array_overlap_test(nooverlap_target, 3); 6.320 6.321 @@ -2439,7 +2444,8 @@ 6.322 // to: O1 6.323 // count: O2 treated as signed 6.324 // 6.325 - address generate_disjoint_oop_copy(bool aligned, const char * name) { 6.326 + address generate_disjoint_oop_copy(bool aligned, address *entry, const char *name, 6.327 + bool dest_uninitialized = false) { 6.328 6.329 const Register from = O0; // source array address 6.330 const Register to = O1; // destination array address 6.331 @@ -2451,14 +2457,16 @@ 6.332 6.333 assert_clean_int(count, O3); // Make sure 'count' is clean int. 6.334 6.335 - if (!aligned) disjoint_oop_copy_entry = __ pc(); 6.336 - // caller can pass a 64-bit byte count here 6.337 - if (!aligned) BLOCK_COMMENT("Entry:"); 6.338 + if (entry != NULL) { 6.339 + *entry = __ pc(); 6.340 + // caller can pass a 64-bit byte count here 6.341 + BLOCK_COMMENT("Entry:"); 6.342 + } 6.343 6.344 // save arguments for barrier generation 6.345 __ mov(to, G1); 6.346 __ mov(count, G5); 6.347 - gen_write_ref_array_pre_barrier(G1, G5); 6.348 + gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized); 6.349 #ifdef _LP64 6.350 assert_clean_int(count, O3); // Make sure 'count' is clean int. 6.351 if (UseCompressedOops) { 6.352 @@ -2487,7 +2495,9 @@ 6.353 // to: O1 6.354 // count: O2 treated as signed 6.355 // 6.356 - address generate_conjoint_oop_copy(bool aligned, const char * name) { 6.357 + address generate_conjoint_oop_copy(bool aligned, address nooverlap_target, 6.358 + address *entry, const char *name, 6.359 + bool dest_uninitialized = false) { 6.360 6.361 const Register from = O0; // source array address 6.362 const Register to = O1; // destination array address 6.363 @@ -2499,21 +2509,18 @@ 6.364 6.365 assert_clean_int(count, O3); // Make sure 'count' is clean int. 6.366 6.367 - if (!aligned) oop_copy_entry = __ pc(); 6.368 - // caller can pass a 64-bit byte count here 6.369 - if (!aligned) BLOCK_COMMENT("Entry:"); 6.370 + if (entry != NULL) { 6.371 + *entry = __ pc(); 6.372 + // caller can pass a 64-bit byte count here 6.373 + BLOCK_COMMENT("Entry:"); 6.374 + } 6.375 + 6.376 + array_overlap_test(nooverlap_target, LogBytesPerHeapOop); 6.377 6.378 // save arguments for barrier generation 6.379 __ mov(to, G1); 6.380 __ mov(count, G5); 6.381 - 6.382 - gen_write_ref_array_pre_barrier(G1, G5); 6.383 - 6.384 - address nooverlap_target = aligned ? 6.385 - StubRoutines::arrayof_oop_disjoint_arraycopy() : 6.386 - disjoint_oop_copy_entry; 6.387 - 6.388 - array_overlap_test(nooverlap_target, LogBytesPerHeapOop); 6.389 + gen_write_ref_array_pre_barrier(G1, G5, dest_uninitialized); 6.390 6.391 #ifdef _LP64 6.392 if (UseCompressedOops) { 6.393 @@ -2582,7 +2589,7 @@ 6.394 // ckval: O4 (super_klass) 6.395 // ret: O0 zero for success; (-1^K) where K is partial transfer count 6.396 // 6.397 - address generate_checkcast_copy(const char* name) { 6.398 + address generate_checkcast_copy(const char *name, address *entry, bool dest_uninitialized = false) { 6.399 6.400 const Register O0_from = O0; // source array address 6.401 const Register O1_to = O1; // destination array address 6.402 @@ -2600,8 +2607,6 @@ 6.403 StubCodeMark mark(this, "StubRoutines", name); 6.404 address start = __ pc(); 6.405 6.406 - gen_write_ref_array_pre_barrier(O1, O2); 6.407 - 6.408 #ifdef ASSERT 6.409 // We sometimes save a frame (see generate_type_check below). 6.410 // If this will cause trouble, let's fail now instead of later. 6.411 @@ -2625,9 +2630,12 @@ 6.412 } 6.413 #endif //ASSERT 6.414 6.415 - checkcast_copy_entry = __ pc(); 6.416 - // caller can pass a 64-bit byte count here (from generic stub) 6.417 - BLOCK_COMMENT("Entry:"); 6.418 + if (entry != NULL) { 6.419 + *entry = __ pc(); 6.420 + // caller can pass a 64-bit byte count here (from generic stub) 6.421 + BLOCK_COMMENT("Entry:"); 6.422 + } 6.423 + gen_write_ref_array_pre_barrier(O1_to, O2_count, dest_uninitialized); 6.424 6.425 Label load_element, store_element, do_card_marks, fail, done; 6.426 __ addcc(O2_count, 0, G1_remain); // initialize loop index, and test it 6.427 @@ -2700,7 +2708,11 @@ 6.428 // Examines the alignment of the operands and dispatches 6.429 // to a long, int, short, or byte copy loop. 6.430 // 6.431 - address generate_unsafe_copy(const char* name) { 6.432 + address generate_unsafe_copy(const char* name, 6.433 + address byte_copy_entry, 6.434 + address short_copy_entry, 6.435 + address int_copy_entry, 6.436 + address long_copy_entry) { 6.437 6.438 const Register O0_from = O0; // source array address 6.439 const Register O1_to = O1; // destination array address 6.440 @@ -2796,8 +2808,13 @@ 6.441 // O0 == 0 - success 6.442 // O0 == -1 - need to call System.arraycopy 6.443 // 6.444 - address generate_generic_copy(const char *name) { 6.445 - 6.446 + address generate_generic_copy(const char *name, 6.447 + address entry_jbyte_arraycopy, 6.448 + address entry_jshort_arraycopy, 6.449 + address entry_jint_arraycopy, 6.450 + address entry_oop_arraycopy, 6.451 + address entry_jlong_arraycopy, 6.452 + address entry_checkcast_arraycopy) { 6.453 Label L_failed, L_objArray; 6.454 6.455 // Input registers 6.456 @@ -2970,15 +2987,15 @@ 6.457 6.458 BLOCK_COMMENT("choose copy loop based on element size"); 6.459 __ cmp(G3_elsize, 0); 6.460 - __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jbyte_arraycopy); 6.461 + __ br(Assembler::equal, true, Assembler::pt, entry_jbyte_arraycopy); 6.462 __ delayed()->signx(length, count); // length 6.463 6.464 __ cmp(G3_elsize, LogBytesPerShort); 6.465 - __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jshort_arraycopy); 6.466 + __ br(Assembler::equal, true, Assembler::pt, entry_jshort_arraycopy); 6.467 __ delayed()->signx(length, count); // length 6.468 6.469 __ cmp(G3_elsize, LogBytesPerInt); 6.470 - __ br(Assembler::equal,true,Assembler::pt,StubRoutines::_jint_arraycopy); 6.471 + __ br(Assembler::equal, true, Assembler::pt, entry_jint_arraycopy); 6.472 __ delayed()->signx(length, count); // length 6.473 #ifdef ASSERT 6.474 { Label L; 6.475 @@ -2989,7 +3006,7 @@ 6.476 __ bind(L); 6.477 } 6.478 #endif 6.479 - __ br(Assembler::always,false,Assembler::pt,StubRoutines::_jlong_arraycopy); 6.480 + __ br(Assembler::always, false, Assembler::pt, entry_jlong_arraycopy); 6.481 __ delayed()->signx(length, count); // length 6.482 6.483 // objArrayKlass 6.484 @@ -3013,7 +3030,7 @@ 6.485 __ add(src, src_pos, from); // src_addr 6.486 __ add(dst, dst_pos, to); // dst_addr 6.487 __ BIND(L_plain_copy); 6.488 - __ br(Assembler::always, false, Assembler::pt,StubRoutines::_oop_arraycopy); 6.489 + __ br(Assembler::always, false, Assembler::pt, entry_oop_arraycopy); 6.490 __ delayed()->signx(length, count); // length 6.491 6.492 __ BIND(L_checkcast_copy); 6.493 @@ -3057,7 +3074,7 @@ 6.494 __ ld_ptr(G4_dst_klass, ek_offset, O4); // dest elem klass 6.495 // lduw(O4, sco_offset, O3); // sco of elem klass 6.496 6.497 - __ br(Assembler::always, false, Assembler::pt, checkcast_copy_entry); 6.498 + __ br(Assembler::always, false, Assembler::pt, entry_checkcast_arraycopy); 6.499 __ delayed()->lduw(O4, sco_offset, O3); 6.500 } 6.501 6.502 @@ -3068,39 +3085,124 @@ 6.503 } 6.504 6.505 void generate_arraycopy_stubs() { 6.506 - 6.507 - // Note: the disjoint stubs must be generated first, some of 6.508 - // the conjoint stubs use them. 6.509 - StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy"); 6.510 - StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); 6.511 - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, "jint_disjoint_arraycopy"); 6.512 - StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_copy(false, "jlong_disjoint_arraycopy"); 6.513 - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, "oop_disjoint_arraycopy"); 6.514 - StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, "arrayof_jbyte_disjoint_arraycopy"); 6.515 - StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, "arrayof_jshort_disjoint_arraycopy"); 6.516 - StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, "arrayof_jint_disjoint_arraycopy"); 6.517 - StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, "arrayof_jlong_disjoint_arraycopy"); 6.518 - StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, "arrayof_oop_disjoint_arraycopy"); 6.519 - 6.520 - StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy"); 6.521 - StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); 6.522 - StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, "jint_arraycopy"); 6.523 - StubRoutines::_jlong_arraycopy = generate_conjoint_long_copy(false, "jlong_arraycopy"); 6.524 - StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, "oop_arraycopy"); 6.525 - StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, "arrayof_jbyte_arraycopy"); 6.526 - StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, "arrayof_jshort_arraycopy"); 6.527 + address entry; 6.528 + address entry_jbyte_arraycopy; 6.529 + address entry_jshort_arraycopy; 6.530 + address entry_jint_arraycopy; 6.531 + address entry_oop_arraycopy; 6.532 + address entry_jlong_arraycopy; 6.533 + address entry_checkcast_arraycopy; 6.534 + 6.535 + //*** jbyte 6.536 + // Always need aligned and unaligned versions 6.537 + StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 6.538 + "jbyte_disjoint_arraycopy"); 6.539 + StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, 6.540 + &entry_jbyte_arraycopy, 6.541 + "jbyte_arraycopy"); 6.542 + StubRoutines::_arrayof_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(true, &entry, 6.543 + "arrayof_jbyte_disjoint_arraycopy"); 6.544 + StubRoutines::_arrayof_jbyte_arraycopy = generate_conjoint_byte_copy(true, entry, NULL, 6.545 + "arrayof_jbyte_arraycopy"); 6.546 + 6.547 + //*** jshort 6.548 + // Always need aligned and unaligned versions 6.549 + StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 6.550 + "jshort_disjoint_arraycopy"); 6.551 + StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, 6.552 + &entry_jshort_arraycopy, 6.553 + "jshort_arraycopy"); 6.554 + StubRoutines::_arrayof_jshort_disjoint_arraycopy = generate_disjoint_short_copy(true, &entry, 6.555 + "arrayof_jshort_disjoint_arraycopy"); 6.556 + StubRoutines::_arrayof_jshort_arraycopy = generate_conjoint_short_copy(true, entry, NULL, 6.557 + "arrayof_jshort_arraycopy"); 6.558 + 6.559 + //*** jint 6.560 + // Aligned versions 6.561 + StubRoutines::_arrayof_jint_disjoint_arraycopy = generate_disjoint_int_copy(true, &entry, 6.562 + "arrayof_jint_disjoint_arraycopy"); 6.563 + StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, entry, &entry_jint_arraycopy, 6.564 + "arrayof_jint_arraycopy"); 6.565 #ifdef _LP64 6.566 - // since sizeof(jint) < sizeof(HeapWord), there's a different flavor: 6.567 - StubRoutines::_arrayof_jint_arraycopy = generate_conjoint_int_copy(true, "arrayof_jint_arraycopy"); 6.568 - #else 6.569 - StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 6.570 + // In 64 bit we need both aligned and unaligned versions of jint arraycopy. 6.571 + // entry_jint_arraycopy always points to the unaligned version (notice that we overwrite it). 6.572 + StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_copy(false, &entry, 6.573 + "jint_disjoint_arraycopy"); 6.574 + StubRoutines::_jint_arraycopy = generate_conjoint_int_copy(false, entry, 6.575 + &entry_jint_arraycopy, 6.576 + "jint_arraycopy"); 6.577 +#else 6.578 + // In 32 bit jints are always HeapWordSize aligned, so always use the aligned version 6.579 + // (in fact in 32bit we always have a pre-loop part even in the aligned version, 6.580 + // because it uses 64-bit loads/stores, so the aligned flag is actually ignored). 6.581 + StubRoutines::_jint_disjoint_arraycopy = StubRoutines::_arrayof_jint_disjoint_arraycopy; 6.582 + StubRoutines::_jint_arraycopy = StubRoutines::_arrayof_jint_arraycopy; 6.583 #endif 6.584 - StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 6.585 - StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 6.586 - 6.587 - StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy"); 6.588 - StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy"); 6.589 - StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy"); 6.590 + 6.591 + 6.592 + //*** jlong 6.593 + // It is always aligned 6.594 + StubRoutines::_arrayof_jlong_disjoint_arraycopy = generate_disjoint_long_copy(true, &entry, 6.595 + "arrayof_jlong_disjoint_arraycopy"); 6.596 + StubRoutines::_arrayof_jlong_arraycopy = generate_conjoint_long_copy(true, entry, &entry_jlong_arraycopy, 6.597 + "arrayof_jlong_arraycopy"); 6.598 + StubRoutines::_jlong_disjoint_arraycopy = StubRoutines::_arrayof_jlong_disjoint_arraycopy; 6.599 + StubRoutines::_jlong_arraycopy = StubRoutines::_arrayof_jlong_arraycopy; 6.600 + 6.601 + 6.602 + //*** oops 6.603 + // Aligned versions 6.604 + StubRoutines::_arrayof_oop_disjoint_arraycopy = generate_disjoint_oop_copy(true, &entry, 6.605 + "arrayof_oop_disjoint_arraycopy"); 6.606 + StubRoutines::_arrayof_oop_arraycopy = generate_conjoint_oop_copy(true, entry, &entry_oop_arraycopy, 6.607 + "arrayof_oop_arraycopy"); 6.608 + // Aligned versions without pre-barriers 6.609 + StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(true, &entry, 6.610 + "arrayof_oop_disjoint_arraycopy_uninit", 6.611 + /*dest_uninitialized*/true); 6.612 + StubRoutines::_arrayof_oop_arraycopy_uninit = generate_conjoint_oop_copy(true, entry, NULL, 6.613 + "arrayof_oop_arraycopy_uninit", 6.614 + /*dest_uninitialized*/true); 6.615 +#ifdef _LP64 6.616 + if (UseCompressedOops) { 6.617 + // With compressed oops we need unaligned versions, notice that we overwrite entry_oop_arraycopy. 6.618 + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_oop_copy(false, &entry, 6.619 + "oop_disjoint_arraycopy"); 6.620 + StubRoutines::_oop_arraycopy = generate_conjoint_oop_copy(false, entry, &entry_oop_arraycopy, 6.621 + "oop_arraycopy"); 6.622 + // Unaligned versions without pre-barriers 6.623 + StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_oop_copy(false, &entry, 6.624 + "oop_disjoint_arraycopy_uninit", 6.625 + /*dest_uninitialized*/true); 6.626 + StubRoutines::_oop_arraycopy_uninit = generate_conjoint_oop_copy(false, entry, NULL, 6.627 + "oop_arraycopy_uninit", 6.628 + /*dest_uninitialized*/true); 6.629 + } else 6.630 +#endif 6.631 + { 6.632 + // oop arraycopy is always aligned on 32bit and 64bit without compressed oops 6.633 + StubRoutines::_oop_disjoint_arraycopy = StubRoutines::_arrayof_oop_disjoint_arraycopy; 6.634 + StubRoutines::_oop_arraycopy = StubRoutines::_arrayof_oop_arraycopy; 6.635 + StubRoutines::_oop_disjoint_arraycopy_uninit = StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit; 6.636 + StubRoutines::_oop_arraycopy_uninit = StubRoutines::_arrayof_oop_arraycopy_uninit; 6.637 + } 6.638 + 6.639 + StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 6.640 + StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 6.641 + /*dest_uninitialized*/true); 6.642 + 6.643 + StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 6.644 + entry_jbyte_arraycopy, 6.645 + entry_jshort_arraycopy, 6.646 + entry_jint_arraycopy, 6.647 + entry_jlong_arraycopy); 6.648 + StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 6.649 + entry_jbyte_arraycopy, 6.650 + entry_jshort_arraycopy, 6.651 + entry_jint_arraycopy, 6.652 + entry_oop_arraycopy, 6.653 + entry_jlong_arraycopy, 6.654 + entry_checkcast_arraycopy); 6.655 6.656 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 6.657 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 6.658 @@ -3224,21 +3326,6 @@ 6.659 6.660 }; // end class declaration 6.661 6.662 - 6.663 -address StubGenerator::disjoint_byte_copy_entry = NULL; 6.664 -address StubGenerator::disjoint_short_copy_entry = NULL; 6.665 -address StubGenerator::disjoint_int_copy_entry = NULL; 6.666 -address StubGenerator::disjoint_long_copy_entry = NULL; 6.667 -address StubGenerator::disjoint_oop_copy_entry = NULL; 6.668 - 6.669 -address StubGenerator::byte_copy_entry = NULL; 6.670 -address StubGenerator::short_copy_entry = NULL; 6.671 -address StubGenerator::int_copy_entry = NULL; 6.672 -address StubGenerator::long_copy_entry = NULL; 6.673 -address StubGenerator::oop_copy_entry = NULL; 6.674 - 6.675 -address StubGenerator::checkcast_copy_entry = NULL; 6.676 - 6.677 void StubGenerator_generate(CodeBuffer* code, bool all) { 6.678 StubGenerator g(code, all); 6.679 }
7.1 --- a/src/cpu/x86/vm/assembler_x86.cpp Thu Mar 03 15:13:18 2011 -0800 7.2 +++ b/src/cpu/x86/vm/assembler_x86.cpp Fri Mar 04 14:06:16 2011 -0800 7.3 @@ -1601,6 +1601,17 @@ 7.4 emit_byte(0xC0 | encode); 7.5 } 7.6 7.7 +void Assembler::movdl(XMMRegister dst, Address src) { 7.8 + NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7.9 + InstructionMark im(this); 7.10 + emit_byte(0x66); 7.11 + prefix(src, dst); 7.12 + emit_byte(0x0F); 7.13 + emit_byte(0x6E); 7.14 + emit_operand(dst, src); 7.15 +} 7.16 + 7.17 + 7.18 void Assembler::movdqa(XMMRegister dst, Address src) { 7.19 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7.20 InstructionMark im(this); 7.21 @@ -2412,7 +2423,10 @@ 7.22 } 7.23 7.24 void Assembler::psrlq(XMMRegister dst, int shift) { 7.25 - // HMM Table D-1 says sse2 or mmx 7.26 + // Shift 64 bit value logically right by specified number of bits. 7.27 + // HMM Table D-1 says sse2 or mmx. 7.28 + // Do not confuse it with psrldq SSE2 instruction which 7.29 + // shifts 128 bit value in xmm register by number of bytes. 7.30 NOT_LP64(assert(VM_Version::supports_sse(), "")); 7.31 7.32 int encode = prefixq_and_encode(xmm2->encoding(), dst->encoding()); 7.33 @@ -2423,6 +2437,18 @@ 7.34 emit_byte(shift); 7.35 } 7.36 7.37 +void Assembler::psrldq(XMMRegister dst, int shift) { 7.38 + // Shift 128 bit value in xmm register by number of bytes. 7.39 + NOT_LP64(assert(VM_Version::supports_sse2(), "")); 7.40 + 7.41 + int encode = prefixq_and_encode(xmm3->encoding(), dst->encoding()); 7.42 + emit_byte(0x66); 7.43 + emit_byte(0x0F); 7.44 + emit_byte(0x73); 7.45 + emit_byte(0xC0 | encode); 7.46 + emit_byte(shift); 7.47 +} 7.48 + 7.49 void Assembler::ptest(XMMRegister dst, Address src) { 7.50 assert(VM_Version::supports_sse4_1(), ""); 7.51 7.52 @@ -8567,101 +8593,418 @@ 7.53 } 7.54 #endif // _LP64 7.55 7.56 -// IndexOf substring. 7.57 -void MacroAssembler::string_indexof(Register str1, Register str2, 7.58 - Register cnt1, Register cnt2, Register result, 7.59 - XMMRegister vec, Register tmp) { 7.60 +// IndexOf for constant substrings with size >= 8 chars 7.61 +// which don't need to be loaded through stack. 7.62 +void MacroAssembler::string_indexofC8(Register str1, Register str2, 7.63 + Register cnt1, Register cnt2, 7.64 + int int_cnt2, Register result, 7.65 + XMMRegister vec, Register tmp) { 7.66 assert(UseSSE42Intrinsics, "SSE4.2 is required"); 7.67 7.68 - Label RELOAD_SUBSTR, PREP_FOR_SCAN, SCAN_TO_SUBSTR, 7.69 - SCAN_SUBSTR, RET_NOT_FOUND, CLEANUP; 7.70 - 7.71 - push(str1); // string addr 7.72 - push(str2); // substr addr 7.73 - push(cnt2); // substr count 7.74 - jmpb(PREP_FOR_SCAN); 7.75 - 7.76 - // Substr count saved at sp 7.77 - // Substr saved at sp+1*wordSize 7.78 - // String saved at sp+2*wordSize 7.79 - 7.80 - // Reload substr for rescan 7.81 - bind(RELOAD_SUBSTR); 7.82 - movl(cnt2, Address(rsp, 0)); 7.83 - movptr(str2, Address(rsp, wordSize)); 7.84 - // We came here after the beginninig of the substring was 7.85 - // matched but the rest of it was not so we need to search 7.86 - // again. Start from the next element after the previous match. 7.87 - subptr(str1, result); // Restore counter 7.88 - shrl(str1, 1); 7.89 - addl(cnt1, str1); 7.90 - decrementl(cnt1); 7.91 - lea(str1, Address(result, 2)); // Reload string 7.92 - 7.93 - // Load substr 7.94 - bind(PREP_FOR_SCAN); 7.95 - movdqu(vec, Address(str2, 0)); 7.96 - addl(cnt1, 8); // prime the loop 7.97 - subptr(str1, 16); 7.98 - 7.99 - // Scan string for substr in 16-byte vectors 7.100 - bind(SCAN_TO_SUBSTR); 7.101 - subl(cnt1, 8); 7.102 - addptr(str1, 16); 7.103 - 7.104 - // pcmpestri 7.105 + // This method uses pcmpestri inxtruction with bound registers 7.106 // inputs: 7.107 // xmm - substring 7.108 // rax - substring length (elements count) 7.109 - // mem - scaned string 7.110 + // mem - scanned string 7.111 // rdx - string length (elements count) 7.112 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts) 7.113 // outputs: 7.114 // rcx - matched index in string 7.115 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri"); 7.116 7.117 - pcmpestri(vec, Address(str1, 0), 0x0d); 7.118 - jcc(Assembler::above, SCAN_TO_SUBSTR); // CF == 0 && ZF == 0 7.119 - jccb(Assembler::aboveEqual, RET_NOT_FOUND); // CF == 0 7.120 - 7.121 - // Fallthrough: found a potential substr 7.122 + Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, 7.123 + RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR, 7.124 + MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE; 7.125 + 7.126 + // Note, inline_string_indexOf() generates checks: 7.127 + // if (substr.count > string.count) return -1; 7.128 + // if (substr.count == 0) return 0; 7.129 + assert(int_cnt2 >= 8, "this code isused only for cnt2 >= 8 chars"); 7.130 + 7.131 + // Load substring. 7.132 + movdqu(vec, Address(str2, 0)); 7.133 + movl(cnt2, int_cnt2); 7.134 + movptr(result, str1); // string addr 7.135 + 7.136 + if (int_cnt2 > 8) { 7.137 + jmpb(SCAN_TO_SUBSTR); 7.138 + 7.139 + // Reload substr for rescan, this code 7.140 + // is executed only for large substrings (> 8 chars) 7.141 + bind(RELOAD_SUBSTR); 7.142 + movdqu(vec, Address(str2, 0)); 7.143 + negptr(cnt2); // Jumped here with negative cnt2, convert to positive 7.144 + 7.145 + bind(RELOAD_STR); 7.146 + // We came here after the beginning of the substring was 7.147 + // matched but the rest of it was not so we need to search 7.148 + // again. Start from the next element after the previous match. 7.149 + 7.150 + // cnt2 is number of substring reminding elements and 7.151 + // cnt1 is number of string reminding elements when cmp failed. 7.152 + // Restored cnt1 = cnt1 - cnt2 + int_cnt2 7.153 + subl(cnt1, cnt2); 7.154 + addl(cnt1, int_cnt2); 7.155 + movl(cnt2, int_cnt2); // Now restore cnt2 7.156 + 7.157 + decrementl(cnt1); // Shift to next element 7.158 + cmpl(cnt1, cnt2); 7.159 + jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring 7.160 + 7.161 + addptr(result, 2); 7.162 + 7.163 + } // (int_cnt2 > 8) 7.164 + 7.165 + // Scan string for start of substr in 16-byte vectors 7.166 + bind(SCAN_TO_SUBSTR); 7.167 + pcmpestri(vec, Address(result, 0), 0x0d); 7.168 + jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1 7.169 + subl(cnt1, 8); 7.170 + jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string 7.171 + cmpl(cnt1, cnt2); 7.172 + jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring 7.173 + addptr(result, 16); 7.174 + jmpb(SCAN_TO_SUBSTR); 7.175 + 7.176 + // Found a potential substr 7.177 + bind(FOUND_CANDIDATE); 7.178 + // Matched whole vector if first element matched (tmp(rcx) == 0). 7.179 + if (int_cnt2 == 8) { 7.180 + jccb(Assembler::overflow, RET_FOUND); // OF == 1 7.181 + } else { // int_cnt2 > 8 7.182 + jccb(Assembler::overflow, FOUND_SUBSTR); 7.183 + } 7.184 + // After pcmpestri tmp(rcx) contains matched element index 7.185 + // Compute start addr of substr 7.186 + lea(result, Address(result, tmp, Address::times_2)); 7.187 7.188 // Make sure string is still long enough 7.189 subl(cnt1, tmp); 7.190 cmpl(cnt1, cnt2); 7.191 - jccb(Assembler::negative, RET_NOT_FOUND); 7.192 - // Compute start addr of substr 7.193 - lea(str1, Address(str1, tmp, Address::times_2)); 7.194 - movptr(result, str1); // save 7.195 - 7.196 - // Compare potential substr 7.197 - addl(cnt1, 8); // prime the loop 7.198 - addl(cnt2, 8); 7.199 - subptr(str1, 16); 7.200 - subptr(str2, 16); 7.201 - 7.202 - // Scan 16-byte vectors of string and substr 7.203 - bind(SCAN_SUBSTR); 7.204 - subl(cnt1, 8); 7.205 - subl(cnt2, 8); 7.206 - addptr(str1, 16); 7.207 - addptr(str2, 16); 7.208 - movdqu(vec, Address(str2, 0)); 7.209 - pcmpestri(vec, Address(str1, 0), 0x0d); 7.210 - jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0 7.211 - jcc(Assembler::positive, SCAN_SUBSTR); // SF == 0 7.212 - 7.213 - // Compute substr offset 7.214 - subptr(result, Address(rsp, 2*wordSize)); 7.215 - shrl(result, 1); // index 7.216 - jmpb(CLEANUP); 7.217 + if (int_cnt2 == 8) { 7.218 + jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR); 7.219 + } else { // int_cnt2 > 8 7.220 + jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD); 7.221 + } 7.222 + // Left less then substring. 7.223 7.224 bind(RET_NOT_FOUND); 7.225 movl(result, -1); 7.226 + jmpb(EXIT); 7.227 + 7.228 + if (int_cnt2 > 8) { 7.229 + // This code is optimized for the case when whole substring 7.230 + // is matched if its head is matched. 7.231 + bind(MATCH_SUBSTR_HEAD); 7.232 + pcmpestri(vec, Address(result, 0), 0x0d); 7.233 + // Reload only string if does not match 7.234 + jccb(Assembler::noOverflow, RELOAD_STR); // OF == 0 7.235 + 7.236 + Label CONT_SCAN_SUBSTR; 7.237 + // Compare the rest of substring (> 8 chars). 7.238 + bind(FOUND_SUBSTR); 7.239 + // First 8 chars are already matched. 7.240 + negptr(cnt2); 7.241 + addptr(cnt2, 8); 7.242 + 7.243 + bind(SCAN_SUBSTR); 7.244 + subl(cnt1, 8); 7.245 + cmpl(cnt2, -8); // Do not read beyond substring 7.246 + jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR); 7.247 + // Back-up strings to avoid reading beyond substring: 7.248 + // cnt1 = cnt1 - cnt2 + 8 7.249 + addl(cnt1, cnt2); // cnt2 is negative 7.250 + addl(cnt1, 8); 7.251 + movl(cnt2, 8); negptr(cnt2); 7.252 + bind(CONT_SCAN_SUBSTR); 7.253 + if (int_cnt2 < (int)G) { 7.254 + movdqu(vec, Address(str2, cnt2, Address::times_2, int_cnt2*2)); 7.255 + pcmpestri(vec, Address(result, cnt2, Address::times_2, int_cnt2*2), 0x0d); 7.256 + } else { 7.257 + // calculate index in register to avoid integer overflow (int_cnt2*2) 7.258 + movl(tmp, int_cnt2); 7.259 + addptr(tmp, cnt2); 7.260 + movdqu(vec, Address(str2, tmp, Address::times_2, 0)); 7.261 + pcmpestri(vec, Address(result, tmp, Address::times_2, 0), 0x0d); 7.262 + } 7.263 + // Need to reload strings pointers if not matched whole vector 7.264 + jccb(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0 7.265 + addptr(cnt2, 8); 7.266 + jccb(Assembler::negative, SCAN_SUBSTR); 7.267 + // Fall through if found full substring 7.268 + 7.269 + } // (int_cnt2 > 8) 7.270 + 7.271 + bind(RET_FOUND); 7.272 + // Found result if we matched full small substring. 7.273 + // Compute substr offset 7.274 + subptr(result, str1); 7.275 + shrl(result, 1); // index 7.276 + bind(EXIT); 7.277 + 7.278 +} // string_indexofC8 7.279 + 7.280 +// Small strings are loaded through stack if they cross page boundary. 7.281 +void MacroAssembler::string_indexof(Register str1, Register str2, 7.282 + Register cnt1, Register cnt2, 7.283 + int int_cnt2, Register result, 7.284 + XMMRegister vec, Register tmp) { 7.285 + assert(UseSSE42Intrinsics, "SSE4.2 is required"); 7.286 + // 7.287 + // int_cnt2 is length of small (< 8 chars) constant substring 7.288 + // or (-1) for non constant substring in which case its length 7.289 + // is in cnt2 register. 7.290 + // 7.291 + // Note, inline_string_indexOf() generates checks: 7.292 + // if (substr.count > string.count) return -1; 7.293 + // if (substr.count == 0) return 0; 7.294 + // 7.295 + assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < 8), "should be != 0"); 7.296 + 7.297 + // This method uses pcmpestri inxtruction with bound registers 7.298 + // inputs: 7.299 + // xmm - substring 7.300 + // rax - substring length (elements count) 7.301 + // mem - scanned string 7.302 + // rdx - string length (elements count) 7.303 + // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts) 7.304 + // outputs: 7.305 + // rcx - matched index in string 7.306 + assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri"); 7.307 + 7.308 + Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR, 7.309 + RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR, 7.310 + FOUND_CANDIDATE; 7.311 + 7.312 + { //======================================================== 7.313 + // We don't know where these strings are located 7.314 + // and we can't read beyond them. Load them through stack. 7.315 + Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR; 7.316 + 7.317 + movptr(tmp, rsp); // save old SP 7.318 + 7.319 + if (int_cnt2 > 0) { // small (< 8 chars) constant substring 7.320 + if (int_cnt2 == 1) { // One char 7.321 + load_unsigned_short(result, Address(str2, 0)); 7.322 + movdl(vec, result); // move 32 bits 7.323 + } else if (int_cnt2 == 2) { // Two chars 7.324 + movdl(vec, Address(str2, 0)); // move 32 bits 7.325 + } else if (int_cnt2 == 4) { // Four chars 7.326 + movq(vec, Address(str2, 0)); // move 64 bits 7.327 + } else { // cnt2 = { 3, 5, 6, 7 } 7.328 + // Array header size is 12 bytes in 32-bit VM 7.329 + // + 6 bytes for 3 chars == 18 bytes, 7.330 + // enough space to load vec and shift. 7.331 + assert(HeapWordSize*typeArrayKlass::header_size() >= 12,"sanity"); 7.332 + movdqu(vec, Address(str2, (int_cnt2*2)-16)); 7.333 + psrldq(vec, 16-(int_cnt2*2)); 7.334 + } 7.335 + } else { // not constant substring 7.336 + cmpl(cnt2, 8); 7.337 + jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough 7.338 + 7.339 + // We can read beyond string if srt+16 does not cross page boundary 7.340 + // since heaps are aligned and mapped by pages. 7.341 + assert(os::vm_page_size() < (int)G, "default page should be small"); 7.342 + movl(result, str2); // We need only low 32 bits 7.343 + andl(result, (os::vm_page_size()-1)); 7.344 + cmpl(result, (os::vm_page_size()-16)); 7.345 + jccb(Assembler::belowEqual, CHECK_STR); 7.346 + 7.347 + // Move small strings to stack to allow load 16 bytes into vec. 7.348 + subptr(rsp, 16); 7.349 + int stk_offset = wordSize-2; 7.350 + push(cnt2); 7.351 + 7.352 + bind(COPY_SUBSTR); 7.353 + load_unsigned_short(result, Address(str2, cnt2, Address::times_2, -2)); 7.354 + movw(Address(rsp, cnt2, Address::times_2, stk_offset), result); 7.355 + decrement(cnt2); 7.356 + jccb(Assembler::notZero, COPY_SUBSTR); 7.357 + 7.358 + pop(cnt2); 7.359 + movptr(str2, rsp); // New substring address 7.360 + } // non constant 7.361 + 7.362 + bind(CHECK_STR); 7.363 + cmpl(cnt1, 8); 7.364 + jccb(Assembler::aboveEqual, BIG_STRINGS); 7.365 + 7.366 + // Check cross page boundary. 7.367 + movl(result, str1); // We need only low 32 bits 7.368 + andl(result, (os::vm_page_size()-1)); 7.369 + cmpl(result, (os::vm_page_size()-16)); 7.370 + jccb(Assembler::belowEqual, BIG_STRINGS); 7.371 + 7.372 + subptr(rsp, 16); 7.373 + int stk_offset = -2; 7.374 + if (int_cnt2 < 0) { // not constant 7.375 + push(cnt2); 7.376 + stk_offset += wordSize; 7.377 + } 7.378 + movl(cnt2, cnt1); 7.379 + 7.380 + bind(COPY_STR); 7.381 + load_unsigned_short(result, Address(str1, cnt2, Address::times_2, -2)); 7.382 + movw(Address(rsp, cnt2, Address::times_2, stk_offset), result); 7.383 + decrement(cnt2); 7.384 + jccb(Assembler::notZero, COPY_STR); 7.385 + 7.386 + if (int_cnt2 < 0) { // not constant 7.387 + pop(cnt2); 7.388 + } 7.389 + movptr(str1, rsp); // New string address 7.390 + 7.391 + bind(BIG_STRINGS); 7.392 + // Load substring. 7.393 + if (int_cnt2 < 0) { // -1 7.394 + movdqu(vec, Address(str2, 0)); 7.395 + push(cnt2); // substr count 7.396 + push(str2); // substr addr 7.397 + push(str1); // string addr 7.398 + } else { 7.399 + // Small (< 8 chars) constant substrings are loaded already. 7.400 + movl(cnt2, int_cnt2); 7.401 + } 7.402 + push(tmp); // original SP 7.403 + 7.404 + } // Finished loading 7.405 + 7.406 + //======================================================== 7.407 + // Start search 7.408 + // 7.409 + 7.410 + movptr(result, str1); // string addr 7.411 + 7.412 + if (int_cnt2 < 0) { // Only for non constant substring 7.413 + jmpb(SCAN_TO_SUBSTR); 7.414 + 7.415 + // SP saved at sp+0 7.416 + // String saved at sp+1*wordSize 7.417 + // Substr saved at sp+2*wordSize 7.418 + // Substr count saved at sp+3*wordSize 7.419 + 7.420 + // Reload substr for rescan, this code 7.421 + // is executed only for large substrings (> 8 chars) 7.422 + bind(RELOAD_SUBSTR); 7.423 + movptr(str2, Address(rsp, 2*wordSize)); 7.424 + movl(cnt2, Address(rsp, 3*wordSize)); 7.425 + movdqu(vec, Address(str2, 0)); 7.426 + // We came here after the beginning of the substring was 7.427 + // matched but the rest of it was not so we need to search 7.428 + // again. Start from the next element after the previous match. 7.429 + subptr(str1, result); // Restore counter 7.430 + shrl(str1, 1); 7.431 + addl(cnt1, str1); 7.432 + decrementl(cnt1); // Shift to next element 7.433 + cmpl(cnt1, cnt2); 7.434 + jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring 7.435 + 7.436 + addptr(result, 2); 7.437 + } // non constant 7.438 + 7.439 + // Scan string for start of substr in 16-byte vectors 7.440 + bind(SCAN_TO_SUBSTR); 7.441 + assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri"); 7.442 + pcmpestri(vec, Address(result, 0), 0x0d); 7.443 + jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1 7.444 + subl(cnt1, 8); 7.445 + jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string 7.446 + cmpl(cnt1, cnt2); 7.447 + jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring 7.448 + addptr(result, 16); 7.449 + 7.450 + bind(ADJUST_STR); 7.451 + cmpl(cnt1, 8); // Do not read beyond string 7.452 + jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR); 7.453 + // Back-up string to avoid reading beyond string. 7.454 + lea(result, Address(result, cnt1, Address::times_2, -16)); 7.455 + movl(cnt1, 8); 7.456 + jmpb(SCAN_TO_SUBSTR); 7.457 + 7.458 + // Found a potential substr 7.459 + bind(FOUND_CANDIDATE); 7.460 + // After pcmpestri tmp(rcx) contains matched element index 7.461 + 7.462 + // Make sure string is still long enough 7.463 + subl(cnt1, tmp); 7.464 + cmpl(cnt1, cnt2); 7.465 + jccb(Assembler::greaterEqual, FOUND_SUBSTR); 7.466 + // Left less then substring. 7.467 + 7.468 + bind(RET_NOT_FOUND); 7.469 + movl(result, -1); 7.470 + jmpb(CLEANUP); 7.471 + 7.472 + bind(FOUND_SUBSTR); 7.473 + // Compute start addr of substr 7.474 + lea(result, Address(result, tmp, Address::times_2)); 7.475 + 7.476 + if (int_cnt2 > 0) { // Constant substring 7.477 + // Repeat search for small substring (< 8 chars) 7.478 + // from new point without reloading substring. 7.479 + // Have to check that we don't read beyond string. 7.480 + cmpl(tmp, 8-int_cnt2); 7.481 + jccb(Assembler::greater, ADJUST_STR); 7.482 + // Fall through if matched whole substring. 7.483 + } else { // non constant 7.484 + assert(int_cnt2 == -1, "should be != 0"); 7.485 + 7.486 + addl(tmp, cnt2); 7.487 + // Found result if we matched whole substring. 7.488 + cmpl(tmp, 8); 7.489 + jccb(Assembler::lessEqual, RET_FOUND); 7.490 + 7.491 + // Repeat search for small substring (<= 8 chars) 7.492 + // from new point 'str1' without reloading substring. 7.493 + cmpl(cnt2, 8); 7.494 + // Have to check that we don't read beyond string. 7.495 + jccb(Assembler::lessEqual, ADJUST_STR); 7.496 + 7.497 + Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG; 7.498 + // Compare the rest of substring (> 8 chars). 7.499 + movptr(str1, result); 7.500 + 7.501 + cmpl(tmp, cnt2); 7.502 + // First 8 chars are already matched. 7.503 + jccb(Assembler::equal, CHECK_NEXT); 7.504 + 7.505 + bind(SCAN_SUBSTR); 7.506 + pcmpestri(vec, Address(str1, 0), 0x0d); 7.507 + // Need to reload strings pointers if not matched whole vector 7.508 + jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0 7.509 + 7.510 + bind(CHECK_NEXT); 7.511 + subl(cnt2, 8); 7.512 + jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring 7.513 + addptr(str1, 16); 7.514 + addptr(str2, 16); 7.515 + subl(cnt1, 8); 7.516 + cmpl(cnt2, 8); // Do not read beyond substring 7.517 + jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR); 7.518 + // Back-up strings to avoid reading beyond substring. 7.519 + lea(str2, Address(str2, cnt2, Address::times_2, -16)); 7.520 + lea(str1, Address(str1, cnt2, Address::times_2, -16)); 7.521 + subl(cnt1, cnt2); 7.522 + movl(cnt2, 8); 7.523 + addl(cnt1, 8); 7.524 + bind(CONT_SCAN_SUBSTR); 7.525 + movdqu(vec, Address(str2, 0)); 7.526 + jmpb(SCAN_SUBSTR); 7.527 + 7.528 + bind(RET_FOUND_LONG); 7.529 + movptr(str1, Address(rsp, wordSize)); 7.530 + } // non constant 7.531 + 7.532 + bind(RET_FOUND); 7.533 + // Compute substr offset 7.534 + subptr(result, str1); 7.535 + shrl(result, 1); // index 7.536 7.537 bind(CLEANUP); 7.538 - addptr(rsp, 3*wordSize); 7.539 -} 7.540 + pop(rsp); // restore SP 7.541 + 7.542 +} // string_indexof 7.543 7.544 // Compare strings. 7.545 void MacroAssembler::string_compare(Register str1, Register str2,
8.1 --- a/src/cpu/x86/vm/assembler_x86.hpp Thu Mar 03 15:13:18 2011 -0800 8.2 +++ b/src/cpu/x86/vm/assembler_x86.hpp Fri Mar 04 14:06:16 2011 -0800 8.3 @@ -1121,6 +1121,7 @@ 8.4 8.5 void movdl(XMMRegister dst, Register src); 8.6 void movdl(Register dst, XMMRegister src); 8.7 + void movdl(XMMRegister dst, Address src); 8.8 8.9 // Move Double Quadword 8.10 void movdq(XMMRegister dst, Register src); 8.11 @@ -1288,9 +1289,12 @@ 8.12 void pshuflw(XMMRegister dst, XMMRegister src, int mode); 8.13 void pshuflw(XMMRegister dst, Address src, int mode); 8.14 8.15 - // Shift Right Logical Quadword Immediate 8.16 + // Shift Right by bits Logical Quadword Immediate 8.17 void psrlq(XMMRegister dst, int shift); 8.18 8.19 + // Shift Right by bytes Logical DoubleQuadword Immediate 8.20 + void psrldq(XMMRegister dst, int shift); 8.21 + 8.22 // Logical Compare Double Quadword 8.23 void ptest(XMMRegister dst, XMMRegister src); 8.24 void ptest(XMMRegister dst, Address src); 8.25 @@ -2290,10 +2294,22 @@ 8.26 void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); } 8.27 8.28 // IndexOf strings. 8.29 + // Small strings are loaded through stack if they cross page boundary. 8.30 void string_indexof(Register str1, Register str2, 8.31 - Register cnt1, Register cnt2, Register result, 8.32 + Register cnt1, Register cnt2, 8.33 + int int_cnt2, Register result, 8.34 XMMRegister vec, Register tmp); 8.35 8.36 + // IndexOf for constant substrings with size >= 8 elements 8.37 + // which don't need to be loaded through stack. 8.38 + void string_indexofC8(Register str1, Register str2, 8.39 + Register cnt1, Register cnt2, 8.40 + int int_cnt2, Register result, 8.41 + XMMRegister vec, Register tmp); 8.42 + 8.43 + // Smallest code: we don't need to load through stack, 8.44 + // check string tail. 8.45 + 8.46 // Compare strings. 8.47 void string_compare(Register str1, Register str2, 8.48 Register cnt1, Register cnt2, Register result,
9.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Mar 03 15:13:18 2011 -0800 9.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Fri Mar 04 14:06:16 2011 -0800 9.3 @@ -456,10 +456,8 @@ 9.4 __ verify_not_null_oop(rax); 9.5 9.6 // search an exception handler (rax: exception oop, rdx: throwing pc) 9.7 - __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_nofpu_id))); 9.8 - 9.9 - __ stop("should not reach here"); 9.10 - 9.11 + __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); 9.12 + __ should_not_reach_here(); 9.13 assert(code_offset() - offset <= exception_handler_size, "overflow"); 9.14 __ end_a_stub(); 9.15
10.1 --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp Thu Mar 03 15:13:18 2011 -0800 10.2 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp Fri Mar 04 14:06:16 2011 -0800 10.3 @@ -248,11 +248,14 @@ 10.4 #ifdef _LP64 10.5 align_dummy_0, align_dummy_1, 10.6 #endif // _LP64 10.7 - dummy1, SLOT2(dummy1H) // 0, 4 10.8 - dummy2, SLOT2(dummy2H) // 8, 12 10.9 - // Two temps to be used as needed by users of save/restore callee registers 10.10 - temp_2_off, SLOT2(temp_2H_off) // 16, 20 10.11 - temp_1_off, SLOT2(temp_1H_off) // 24, 28 10.12 +#ifdef _WIN64 10.13 + // Windows always allocates space for it's argument registers (see 10.14 + // frame::arg_reg_save_area_bytes). 10.15 + arg_reg_save_1, arg_reg_save_1H, // 0, 4 10.16 + arg_reg_save_2, arg_reg_save_2H, // 8, 12 10.17 + arg_reg_save_3, arg_reg_save_3H, // 16, 20 10.18 + arg_reg_save_4, arg_reg_save_4H, // 24, 28 10.19 +#endif // _WIN64 10.20 xmm_regs_as_doubles_off, // 32 10.21 float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160 10.22 fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224 10.23 @@ -282,24 +285,7 @@ 10.24 rax_off, SLOT2(raxH_off) // 480, 484 10.25 saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492 10.26 return_off, SLOT2(returnH_off) // 496, 500 10.27 - reg_save_frame_size, // As noted: neglects any parameters to runtime // 504 10.28 - 10.29 -#ifdef _WIN64 10.30 - c_rarg0_off = rcx_off, 10.31 -#else 10.32 - c_rarg0_off = rdi_off, 10.33 -#endif // WIN64 10.34 - 10.35 - // equates 10.36 - 10.37 - // illegal instruction handler 10.38 - continue_dest_off = temp_1_off, 10.39 - 10.40 - // deoptimization equates 10.41 - fp0_off = float_regs_as_doubles_off, // slot for java float/double return value 10.42 - xmm0_off = xmm_regs_as_doubles_off, // slot for java float/double return value 10.43 - deopt_type = temp_2_off, // slot for type of deopt in progress 10.44 - ret_type = temp_1_off // slot for return type 10.45 + reg_save_frame_size // As noted: neglects any parameters to runtime // 504 10.46 }; 10.47 10.48 10.49 @@ -405,11 +391,6 @@ 10.50 bool save_fpu_registers = true) { 10.51 __ block_comment("save_live_registers"); 10.52 10.53 - // 64bit passes the args in regs to the c++ runtime 10.54 - int frame_size_in_slots = reg_save_frame_size NOT_LP64(+ num_rt_args); // args + thread 10.55 - // frame_size = round_to(frame_size, 4); 10.56 - sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word ); 10.57 - 10.58 __ pusha(); // integer registers 10.59 10.60 // assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset"); 10.61 @@ -642,19 +623,58 @@ 10.62 } 10.63 10.64 10.65 -void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool save_fpu_registers) { 10.66 +OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) { 10.67 + __ block_comment("generate_handle_exception"); 10.68 + 10.69 // incoming parameters 10.70 const Register exception_oop = rax; 10.71 - const Register exception_pc = rdx; 10.72 + const Register exception_pc = rdx; 10.73 // other registers used in this stub 10.74 - const Register real_return_addr = rbx; 10.75 const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 10.76 10.77 - __ block_comment("generate_handle_exception"); 10.78 + // Save registers, if required. 10.79 + OopMapSet* oop_maps = new OopMapSet(); 10.80 + OopMap* oop_map = NULL; 10.81 + switch (id) { 10.82 + case forward_exception_id: 10.83 + // We're handling an exception in the context of a compiled frame. 10.84 + // The registers have been saved in the standard places. Perform 10.85 + // an exception lookup in the caller and dispatch to the handler 10.86 + // if found. Otherwise unwind and dispatch to the callers 10.87 + // exception handler. 10.88 + oop_map = generate_oop_map(sasm, 1 /*thread*/); 10.89 + 10.90 + // load and clear pending exception oop into RAX 10.91 + __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 10.92 + __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 10.93 + 10.94 + // load issuing PC (the return address for this stub) into rdx 10.95 + __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); 10.96 + 10.97 + // make sure that the vm_results are cleared (may be unnecessary) 10.98 + __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 10.99 + __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 10.100 + break; 10.101 + case handle_exception_nofpu_id: 10.102 + case handle_exception_id: 10.103 + // At this point all registers MAY be live. 10.104 + oop_map = save_live_registers(sasm, 1 /*thread*/, id == handle_exception_nofpu_id); 10.105 + break; 10.106 + case handle_exception_from_callee_id: { 10.107 + // At this point all registers except exception oop (RAX) and 10.108 + // exception pc (RDX) are dead. 10.109 + const int frame_size = 2 /*BP, return address*/ NOT_LP64(+ 1 /*thread*/) WIN64_ONLY(+ frame::arg_reg_save_area_bytes / BytesPerWord); 10.110 + oop_map = new OopMap(frame_size * VMRegImpl::slots_per_word, 0); 10.111 + sasm->set_frame_size(frame_size); 10.112 + WIN64_ONLY(__ subq(rsp, frame::arg_reg_save_area_bytes)); 10.113 + break; 10.114 + } 10.115 + default: ShouldNotReachHere(); 10.116 + } 10.117 10.118 #ifdef TIERED 10.119 // C2 can leave the fpu stack dirty 10.120 - if (UseSSE < 2 ) { 10.121 + if (UseSSE < 2) { 10.122 __ empty_FPU_stack(); 10.123 } 10.124 #endif // TIERED 10.125 @@ -686,11 +706,7 @@ 10.126 // save exception oop and issuing pc into JavaThread 10.127 // (exception handler will load it from here) 10.128 __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop); 10.129 - __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); 10.130 - 10.131 - // save real return address (pc that called this stub) 10.132 - __ movptr(real_return_addr, Address(rbp, 1*BytesPerWord)); 10.133 - __ movptr(Address(rsp, temp_1_off * VMRegImpl::stack_slot_size), real_return_addr); 10.134 + __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc); 10.135 10.136 // patch throwing pc into return address (has bci & oop map) 10.137 __ movptr(Address(rbp, 1*BytesPerWord), exception_pc); 10.138 @@ -700,33 +716,41 @@ 10.139 int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, exception_handler_for_pc)); 10.140 oop_maps->add_gc_map(call_offset, oop_map); 10.141 10.142 - // rax,: handler address 10.143 + // rax: handler address 10.144 // will be the deopt blob if nmethod was deoptimized while we looked up 10.145 // handler regardless of whether handler existed in the nmethod. 10.146 10.147 // only rax, is valid at this time, all other registers have been destroyed by the runtime call 10.148 __ invalidate_registers(false, true, true, true, true, true); 10.149 10.150 -#ifdef ASSERT 10.151 - // Do we have an exception handler in the nmethod? 10.152 - Label done; 10.153 - __ testptr(rax, rax); 10.154 - __ jcc(Assembler::notZero, done); 10.155 - __ stop("no handler found"); 10.156 - __ bind(done); 10.157 -#endif 10.158 - 10.159 - // exception handler found 10.160 - // patch the return address -> the stub will directly return to the exception handler 10.161 + // patch the return address, this stub will directly return to the exception handler 10.162 __ movptr(Address(rbp, 1*BytesPerWord), rax); 10.163 10.164 - // restore registers 10.165 - restore_live_registers(sasm, save_fpu_registers); 10.166 + switch (id) { 10.167 + case forward_exception_id: 10.168 + case handle_exception_nofpu_id: 10.169 + case handle_exception_id: 10.170 + // Restore the registers that were saved at the beginning. 10.171 + restore_live_registers(sasm, id == handle_exception_nofpu_id); 10.172 + break; 10.173 + case handle_exception_from_callee_id: 10.174 + // WIN64_ONLY: No need to add frame::arg_reg_save_area_bytes to SP 10.175 + // since we do a leave anyway. 10.176 10.177 - // return to exception handler 10.178 - __ leave(); 10.179 - __ ret(0); 10.180 + // Pop the return address since we are possibly changing SP (restoring from BP). 10.181 + __ leave(); 10.182 + __ pop(rcx); 10.183 10.184 + // Restore SP from BP if the exception PC is a method handle call site. 10.185 + NOT_LP64(__ get_thread(thread);) 10.186 + __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); 10.187 + __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); 10.188 + __ jmp(rcx); // jump to exception handler 10.189 + break; 10.190 + default: ShouldNotReachHere(); 10.191 + } 10.192 + 10.193 + return oop_maps; 10.194 } 10.195 10.196 10.197 @@ -791,7 +815,7 @@ 10.198 // the pop is also necessary to simulate the effect of a ret(0) 10.199 __ pop(exception_pc); 10.200 10.201 - // Restore SP from BP if the exception PC is a MethodHandle call site. 10.202 + // Restore SP from BP if the exception PC is a method handle call site. 10.203 NOT_LP64(__ get_thread(thread);) 10.204 __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); 10.205 __ cmovptr(Assembler::notEqual, rsp, rbp_mh_SP_save); 10.206 @@ -934,7 +958,6 @@ 10.207 __ ret(0); 10.208 10.209 return oop_maps; 10.210 - 10.211 } 10.212 10.213 10.214 @@ -952,35 +975,9 @@ 10.215 switch (id) { 10.216 case forward_exception_id: 10.217 { 10.218 - // we're handling an exception in the context of a compiled 10.219 - // frame. The registers have been saved in the standard 10.220 - // places. Perform an exception lookup in the caller and 10.221 - // dispatch to the handler if found. Otherwise unwind and 10.222 - // dispatch to the callers exception handler. 10.223 - 10.224 - const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); 10.225 - const Register exception_oop = rax; 10.226 - const Register exception_pc = rdx; 10.227 - 10.228 - // load pending exception oop into rax, 10.229 - __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset())); 10.230 - // clear pending exception 10.231 - __ movptr(Address(thread, Thread::pending_exception_offset()), NULL_WORD); 10.232 - 10.233 - // load issuing PC (the return address for this stub) into rdx 10.234 - __ movptr(exception_pc, Address(rbp, 1*BytesPerWord)); 10.235 - 10.236 - // make sure that the vm_results are cleared (may be unnecessary) 10.237 - __ movptr(Address(thread, JavaThread::vm_result_offset()), NULL_WORD); 10.238 - __ movptr(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD); 10.239 - 10.240 - // verify that that there is really a valid exception in rax, 10.241 - __ verify_not_null_oop(exception_oop); 10.242 - 10.243 - oop_maps = new OopMapSet(); 10.244 - OopMap* oop_map = generate_oop_map(sasm, 1); 10.245 - generate_handle_exception(sasm, oop_maps, oop_map); 10.246 - __ stop("should not reach here"); 10.247 + oop_maps = generate_handle_exception(id, sasm); 10.248 + __ leave(); 10.249 + __ ret(0); 10.250 } 10.251 break; 10.252 10.253 @@ -1315,13 +1312,15 @@ 10.254 break; 10.255 10.256 case handle_exception_nofpu_id: 10.257 - save_fpu_registers = false; 10.258 - // fall through 10.259 case handle_exception_id: 10.260 { StubFrame f(sasm, "handle_exception", dont_gc_arguments); 10.261 - oop_maps = new OopMapSet(); 10.262 - OopMap* oop_map = save_live_registers(sasm, 1, save_fpu_registers); 10.263 - generate_handle_exception(sasm, oop_maps, oop_map, save_fpu_registers); 10.264 + oop_maps = generate_handle_exception(id, sasm); 10.265 + } 10.266 + break; 10.267 + 10.268 + case handle_exception_from_callee_id: 10.269 + { StubFrame f(sasm, "handle_exception_from_callee", dont_gc_arguments); 10.270 + oop_maps = generate_handle_exception(id, sasm); 10.271 } 10.272 break; 10.273
11.1 --- a/src/cpu/x86/vm/methodHandles_x86.cpp Thu Mar 03 15:13:18 2011 -0800 11.2 +++ b/src/cpu/x86/vm/methodHandles_x86.cpp Fri Mar 04 14:06:16 2011 -0800 11.3 @@ -419,6 +419,7 @@ 11.4 11.5 // some handy addresses 11.6 Address rbx_method_fie( rbx, methodOopDesc::from_interpreted_offset() ); 11.7 + Address rbx_method_fce( rbx, methodOopDesc::from_compiled_offset() ); 11.8 11.9 Address rcx_mh_vmtarget( rcx_recv, java_dyn_MethodHandle::vmtarget_offset_in_bytes() ); 11.10 Address rcx_dmh_vmindex( rcx_recv, sun_dyn_DirectMethodHandle::vmindex_offset_in_bytes() ); 11.11 @@ -448,12 +449,10 @@ 11.12 case _raise_exception: 11.13 { 11.14 // Not a real MH entry, but rather shared code for raising an 11.15 - // exception. Since we use a C2I adapter to set up the 11.16 - // interpreter state, arguments are expected in compiler 11.17 - // argument registers. 11.18 + // exception. Since we use the compiled entry, arguments are 11.19 + // expected in compiler argument registers. 11.20 assert(raise_exception_method(), "must be set"); 11.21 - address c2i_entry = raise_exception_method()->get_c2i_entry(); 11.22 - assert(c2i_entry, "method must be linked"); 11.23 + assert(raise_exception_method()->from_compiled_entry(), "method must be linked"); 11.24 11.25 const Register rdi_pc = rax; 11.26 __ pop(rdi_pc); // caller PC 11.27 @@ -472,13 +471,10 @@ 11.28 __ jccb(Assembler::zero, L_no_method); 11.29 __ verify_oop(rbx_method); 11.30 11.31 - // 32-bit: push remaining arguments as if coming from the compiler. 11.32 NOT_LP64(__ push(rarg2_required)); 11.33 + __ push(rdi_pc); // restore caller PC 11.34 + __ jmp(rbx_method_fce); // jump to compiled entry 11.35 11.36 - __ push(rdi_pc); // restore caller PC 11.37 - __ jump(ExternalAddress(c2i_entry)); // do C2I transition 11.38 - 11.39 - // If we get here, the Java runtime did not do its job of creating the exception. 11.40 // Do something that is at least causes a valid throw from the interpreter. 11.41 __ bind(L_no_method); 11.42 __ push(rarg2_required);
12.1 --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp Thu Mar 03 15:13:18 2011 -0800 12.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Fri Mar 04 14:06:16 2011 -0800 12.3 @@ -439,10 +439,6 @@ 12.4 // Verify that there is really a valid exception in RAX. 12.5 __ verify_oop(exception_oop); 12.6 12.7 - // Restore SP from BP if the exception PC is a MethodHandle call site. 12.8 - __ cmpl(Address(thread, JavaThread::is_method_handle_return_offset()), 0); 12.9 - __ cmovptr(Assembler::notEqual, rsp, rbp); 12.10 - 12.11 // continue at exception handler (return address removed) 12.12 // rax: exception 12.13 // rbx: exception handler 12.14 @@ -733,18 +729,19 @@ 12.15 // Input: 12.16 // start - starting address 12.17 // count - element count 12.18 - void gen_write_ref_array_pre_barrier(Register start, Register count) { 12.19 + void gen_write_ref_array_pre_barrier(Register start, Register count, bool uninitialized_target) { 12.20 assert_different_registers(start, count); 12.21 BarrierSet* bs = Universe::heap()->barrier_set(); 12.22 switch (bs->kind()) { 12.23 case BarrierSet::G1SATBCT: 12.24 case BarrierSet::G1SATBCTLogging: 12.25 - { 12.26 - __ pusha(); // push registers 12.27 - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 12.28 - start, count); 12.29 - __ popa(); 12.30 - } 12.31 + // With G1, don't generate the call if we statically know that the target in uninitialized 12.32 + if (!uninitialized_target) { 12.33 + __ pusha(); // push registers 12.34 + __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 12.35 + start, count); 12.36 + __ popa(); 12.37 + } 12.38 break; 12.39 case BarrierSet::CardTableModRef: 12.40 case BarrierSet::CardTableExtension: 12.41 @@ -923,7 +920,8 @@ 12.42 12.43 address generate_disjoint_copy(BasicType t, bool aligned, 12.44 Address::ScaleFactor sf, 12.45 - address* entry, const char *name) { 12.46 + address* entry, const char *name, 12.47 + bool dest_uninitialized = false) { 12.48 __ align(CodeEntryAlignment); 12.49 StubCodeMark mark(this, "StubRoutines", name); 12.50 address start = __ pc(); 12.51 @@ -945,16 +943,19 @@ 12.52 __ movptr(from , Address(rsp, 12+ 4)); 12.53 __ movptr(to , Address(rsp, 12+ 8)); 12.54 __ movl(count, Address(rsp, 12+ 12)); 12.55 + 12.56 + if (entry != NULL) { 12.57 + *entry = __ pc(); // Entry point from conjoint arraycopy stub. 12.58 + BLOCK_COMMENT("Entry:"); 12.59 + } 12.60 + 12.61 if (t == T_OBJECT) { 12.62 __ testl(count, count); 12.63 __ jcc(Assembler::zero, L_0_count); 12.64 - gen_write_ref_array_pre_barrier(to, count); 12.65 + gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 12.66 __ mov(saved_to, to); // save 'to' 12.67 } 12.68 12.69 - *entry = __ pc(); // Entry point from conjoint arraycopy stub. 12.70 - BLOCK_COMMENT("Entry:"); 12.71 - 12.72 __ subptr(to, from); // to --> to_from 12.73 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 12.74 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp 12.75 @@ -1085,7 +1086,8 @@ 12.76 address generate_conjoint_copy(BasicType t, bool aligned, 12.77 Address::ScaleFactor sf, 12.78 address nooverlap_target, 12.79 - address* entry, const char *name) { 12.80 + address* entry, const char *name, 12.81 + bool dest_uninitialized = false) { 12.82 __ align(CodeEntryAlignment); 12.83 StubCodeMark mark(this, "StubRoutines", name); 12.84 address start = __ pc(); 12.85 @@ -1108,23 +1110,17 @@ 12.86 __ movptr(src , Address(rsp, 12+ 4)); // from 12.87 __ movptr(dst , Address(rsp, 12+ 8)); // to 12.88 __ movl2ptr(count, Address(rsp, 12+12)); // count 12.89 - if (t == T_OBJECT) { 12.90 - gen_write_ref_array_pre_barrier(dst, count); 12.91 - } 12.92 12.93 if (entry != NULL) { 12.94 *entry = __ pc(); // Entry point from generic arraycopy stub. 12.95 BLOCK_COMMENT("Entry:"); 12.96 } 12.97 12.98 - if (t == T_OBJECT) { 12.99 - __ testl(count, count); 12.100 - __ jcc(Assembler::zero, L_0_count); 12.101 - } 12.102 + // nooverlap_target expects arguments in rsi and rdi. 12.103 __ mov(from, src); 12.104 __ mov(to , dst); 12.105 12.106 - // arrays overlap test 12.107 + // arrays overlap test: dispatch to disjoint stub if necessary. 12.108 RuntimeAddress nooverlap(nooverlap_target); 12.109 __ cmpptr(dst, src); 12.110 __ lea(end, Address(src, count, sf, 0)); // src + count * elem_size 12.111 @@ -1132,6 +1128,12 @@ 12.112 __ cmpptr(dst, end); 12.113 __ jump_cc(Assembler::aboveEqual, nooverlap); 12.114 12.115 + if (t == T_OBJECT) { 12.116 + __ testl(count, count); 12.117 + __ jcc(Assembler::zero, L_0_count); 12.118 + gen_write_ref_array_pre_barrier(dst, count, dest_uninitialized); 12.119 + } 12.120 + 12.121 // copy from high to low 12.122 __ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element 12.123 __ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp 12.124 @@ -1416,7 +1418,7 @@ 12.125 // rax, == 0 - success 12.126 // rax, == -1^K - failure, where K is partial transfer count 12.127 // 12.128 - address generate_checkcast_copy(const char *name, address* entry) { 12.129 + address generate_checkcast_copy(const char *name, address* entry, bool dest_uninitialized = false) { 12.130 __ align(CodeEntryAlignment); 12.131 StubCodeMark mark(this, "StubRoutines", name); 12.132 address start = __ pc(); 12.133 @@ -1451,8 +1453,10 @@ 12.134 __ movptr(to, to_arg); 12.135 __ movl2ptr(length, length_arg); 12.136 12.137 - *entry = __ pc(); // Entry point from generic arraycopy stub. 12.138 - BLOCK_COMMENT("Entry:"); 12.139 + if (entry != NULL) { 12.140 + *entry = __ pc(); // Entry point from generic arraycopy stub. 12.141 + BLOCK_COMMENT("Entry:"); 12.142 + } 12.143 12.144 //--------------------------------------------------------------- 12.145 // Assembler stub will be used for this call to arraycopy 12.146 @@ -1475,7 +1479,7 @@ 12.147 Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes()); 12.148 12.149 // Copy from low to high addresses, indexed from the end of each array. 12.150 - gen_write_ref_array_pre_barrier(to, count); 12.151 + gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 12.152 __ lea(end_from, end_from_addr); 12.153 __ lea(end_to, end_to_addr); 12.154 assert(length == count, ""); // else fix next line: 12.155 @@ -2038,6 +2042,15 @@ 12.156 generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, 12.157 &entry_oop_arraycopy, "oop_arraycopy"); 12.158 12.159 + StubRoutines::_oop_disjoint_arraycopy_uninit = 12.160 + generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry, 12.161 + "oop_disjoint_arraycopy_uninit", 12.162 + /*dest_uninitialized*/true); 12.163 + StubRoutines::_oop_arraycopy_uninit = 12.164 + generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry, 12.165 + NULL, "oop_arraycopy_uninit", 12.166 + /*dest_uninitialized*/true); 12.167 + 12.168 StubRoutines::_jlong_disjoint_arraycopy = 12.169 generate_disjoint_long_copy(&entry, "jlong_disjoint_arraycopy"); 12.170 StubRoutines::_jlong_arraycopy = 12.171 @@ -2051,20 +2064,20 @@ 12.172 StubRoutines::_arrayof_jshort_fill = generate_fill(T_SHORT, true, "arrayof_jshort_fill"); 12.173 StubRoutines::_arrayof_jint_fill = generate_fill(T_INT, true, "arrayof_jint_fill"); 12.174 12.175 - StubRoutines::_arrayof_jint_disjoint_arraycopy = 12.176 - StubRoutines::_jint_disjoint_arraycopy; 12.177 - StubRoutines::_arrayof_oop_disjoint_arraycopy = 12.178 - StubRoutines::_oop_disjoint_arraycopy; 12.179 - StubRoutines::_arrayof_jlong_disjoint_arraycopy = 12.180 - StubRoutines::_jlong_disjoint_arraycopy; 12.181 + StubRoutines::_arrayof_jint_disjoint_arraycopy = StubRoutines::_jint_disjoint_arraycopy; 12.182 + StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 12.183 + StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 12.184 + StubRoutines::_arrayof_jlong_disjoint_arraycopy = StubRoutines::_jlong_disjoint_arraycopy; 12.185 12.186 - StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 12.187 - StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 12.188 - StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 12.189 + StubRoutines::_arrayof_jint_arraycopy = StubRoutines::_jint_arraycopy; 12.190 + StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 12.191 + StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 12.192 + StubRoutines::_arrayof_jlong_arraycopy = StubRoutines::_jlong_arraycopy; 12.193 12.194 StubRoutines::_checkcast_arraycopy = 12.195 - generate_checkcast_copy("checkcast_arraycopy", 12.196 - &entry_checkcast_arraycopy); 12.197 + generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 12.198 + StubRoutines::_checkcast_arraycopy_uninit = 12.199 + generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, /*dest_uninitialized*/true); 12.200 12.201 StubRoutines::_unsafe_arraycopy = 12.202 generate_unsafe_copy("unsafe_arraycopy",
13.1 --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Thu Mar 03 15:13:18 2011 -0800 13.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Fri Mar 04 14:06:16 2011 -0800 13.3 @@ -1,5 +1,5 @@ 13.4 /* 13.5 - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 13.6 + * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 13.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 13.8 * 13.9 * This code is free software; you can redistribute it and/or modify it 13.10 @@ -1057,20 +1057,6 @@ 13.11 return start; 13.12 } 13.13 13.14 - static address disjoint_byte_copy_entry; 13.15 - static address disjoint_short_copy_entry; 13.16 - static address disjoint_int_copy_entry; 13.17 - static address disjoint_long_copy_entry; 13.18 - static address disjoint_oop_copy_entry; 13.19 - 13.20 - static address byte_copy_entry; 13.21 - static address short_copy_entry; 13.22 - static address int_copy_entry; 13.23 - static address long_copy_entry; 13.24 - static address oop_copy_entry; 13.25 - 13.26 - static address checkcast_copy_entry; 13.27 - 13.28 // 13.29 // Verify that a register contains clean 32-bits positive value 13.30 // (high 32-bits are 0) so it could be used in 64-bits shifts. 13.31 @@ -1173,34 +1159,35 @@ 13.32 // Generate code for an array write pre barrier 13.33 // 13.34 // addr - starting address 13.35 - // count - element count 13.36 + // count - element count 13.37 + // tmp - scratch register 13.38 // 13.39 // Destroy no registers! 13.40 // 13.41 - void gen_write_ref_array_pre_barrier(Register addr, Register count) { 13.42 + void gen_write_ref_array_pre_barrier(Register addr, Register count, bool dest_uninitialized) { 13.43 BarrierSet* bs = Universe::heap()->barrier_set(); 13.44 switch (bs->kind()) { 13.45 case BarrierSet::G1SATBCT: 13.46 case BarrierSet::G1SATBCTLogging: 13.47 - { 13.48 - __ pusha(); // push registers 13.49 - if (count == c_rarg0) { 13.50 - if (addr == c_rarg1) { 13.51 - // exactly backwards!! 13.52 - __ xchgptr(c_rarg1, c_rarg0); 13.53 - } else { 13.54 - __ movptr(c_rarg1, count); 13.55 - __ movptr(c_rarg0, addr); 13.56 - } 13.57 - 13.58 - } else { 13.59 - __ movptr(c_rarg0, addr); 13.60 - __ movptr(c_rarg1, count); 13.61 - } 13.62 - __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); 13.63 - __ popa(); 13.64 + // With G1, don't generate the call if we statically know that the target in uninitialized 13.65 + if (!dest_uninitialized) { 13.66 + __ pusha(); // push registers 13.67 + if (count == c_rarg0) { 13.68 + if (addr == c_rarg1) { 13.69 + // exactly backwards!! 13.70 + __ xchgptr(c_rarg1, c_rarg0); 13.71 + } else { 13.72 + __ movptr(c_rarg1, count); 13.73 + __ movptr(c_rarg0, addr); 13.74 + } 13.75 + } else { 13.76 + __ movptr(c_rarg0, addr); 13.77 + __ movptr(c_rarg1, count); 13.78 + } 13.79 + __ call_VM_leaf(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre), 2); 13.80 + __ popa(); 13.81 } 13.82 - break; 13.83 + break; 13.84 case BarrierSet::CardTableModRef: 13.85 case BarrierSet::CardTableExtension: 13.86 case BarrierSet::ModRef: 13.87 @@ -1379,7 +1366,7 @@ 13.88 // disjoint_byte_copy_entry is set to the no-overlap entry point 13.89 // used by generate_conjoint_byte_copy(). 13.90 // 13.91 - address generate_disjoint_byte_copy(bool aligned, const char *name) { 13.92 + address generate_disjoint_byte_copy(bool aligned, address* entry, const char *name) { 13.93 __ align(CodeEntryAlignment); 13.94 StubCodeMark mark(this, "StubRoutines", name); 13.95 address start = __ pc(); 13.96 @@ -1399,9 +1386,11 @@ 13.97 __ enter(); // required for proper stackwalking of RuntimeStub frame 13.98 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 13.99 13.100 - disjoint_byte_copy_entry = __ pc(); 13.101 - BLOCK_COMMENT("Entry:"); 13.102 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.103 + if (entry != NULL) { 13.104 + *entry = __ pc(); 13.105 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.106 + BLOCK_COMMENT("Entry:"); 13.107 + } 13.108 13.109 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 13.110 // r9 and r10 may be used to save non-volatile registers 13.111 @@ -1479,7 +1468,8 @@ 13.112 // dwords or qwords that span cache line boundaries will still be loaded 13.113 // and stored atomically. 13.114 // 13.115 - address generate_conjoint_byte_copy(bool aligned, const char *name) { 13.116 + address generate_conjoint_byte_copy(bool aligned, address nooverlap_target, 13.117 + address* entry, const char *name) { 13.118 __ align(CodeEntryAlignment); 13.119 StubCodeMark mark(this, "StubRoutines", name); 13.120 address start = __ pc(); 13.121 @@ -1494,11 +1484,13 @@ 13.122 __ enter(); // required for proper stackwalking of RuntimeStub frame 13.123 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 13.124 13.125 - byte_copy_entry = __ pc(); 13.126 - BLOCK_COMMENT("Entry:"); 13.127 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.128 - 13.129 - array_overlap_test(disjoint_byte_copy_entry, Address::times_1); 13.130 + if (entry != NULL) { 13.131 + *entry = __ pc(); 13.132 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.133 + BLOCK_COMMENT("Entry:"); 13.134 + } 13.135 + 13.136 + array_overlap_test(nooverlap_target, Address::times_1); 13.137 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 13.138 // r9 and r10 may be used to save non-volatile registers 13.139 13.140 @@ -1574,7 +1566,7 @@ 13.141 // disjoint_short_copy_entry is set to the no-overlap entry point 13.142 // used by generate_conjoint_short_copy(). 13.143 // 13.144 - address generate_disjoint_short_copy(bool aligned, const char *name) { 13.145 + address generate_disjoint_short_copy(bool aligned, address *entry, const char *name) { 13.146 __ align(CodeEntryAlignment); 13.147 StubCodeMark mark(this, "StubRoutines", name); 13.148 address start = __ pc(); 13.149 @@ -1593,9 +1585,11 @@ 13.150 __ enter(); // required for proper stackwalking of RuntimeStub frame 13.151 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 13.152 13.153 - disjoint_short_copy_entry = __ pc(); 13.154 - BLOCK_COMMENT("Entry:"); 13.155 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.156 + if (entry != NULL) { 13.157 + *entry = __ pc(); 13.158 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.159 + BLOCK_COMMENT("Entry:"); 13.160 + } 13.161 13.162 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 13.163 // r9 and r10 may be used to save non-volatile registers 13.164 @@ -1686,7 +1680,8 @@ 13.165 // or qwords that span cache line boundaries will still be loaded 13.166 // and stored atomically. 13.167 // 13.168 - address generate_conjoint_short_copy(bool aligned, const char *name) { 13.169 + address generate_conjoint_short_copy(bool aligned, address nooverlap_target, 13.170 + address *entry, const char *name) { 13.171 __ align(CodeEntryAlignment); 13.172 StubCodeMark mark(this, "StubRoutines", name); 13.173 address start = __ pc(); 13.174 @@ -1701,11 +1696,13 @@ 13.175 __ enter(); // required for proper stackwalking of RuntimeStub frame 13.176 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 13.177 13.178 - short_copy_entry = __ pc(); 13.179 - BLOCK_COMMENT("Entry:"); 13.180 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.181 - 13.182 - array_overlap_test(disjoint_short_copy_entry, Address::times_2); 13.183 + if (entry != NULL) { 13.184 + *entry = __ pc(); 13.185 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.186 + BLOCK_COMMENT("Entry:"); 13.187 + } 13.188 + 13.189 + array_overlap_test(nooverlap_target, Address::times_2); 13.190 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 13.191 // r9 and r10 may be used to save non-volatile registers 13.192 13.193 @@ -1773,7 +1770,8 @@ 13.194 // disjoint_int_copy_entry is set to the no-overlap entry point 13.195 // used by generate_conjoint_int_oop_copy(). 13.196 // 13.197 - address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) { 13.198 + address generate_disjoint_int_oop_copy(bool aligned, bool is_oop, address* entry, 13.199 + const char *name, bool dest_uninitialized = false) { 13.200 __ align(CodeEntryAlignment); 13.201 StubCodeMark mark(this, "StubRoutines", name); 13.202 address start = __ pc(); 13.203 @@ -1793,21 +1791,17 @@ 13.204 __ enter(); // required for proper stackwalking of RuntimeStub frame 13.205 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 13.206 13.207 - (is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry) = __ pc(); 13.208 - 13.209 - if (is_oop) { 13.210 - // no registers are destroyed by this call 13.211 - gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); 13.212 + if (entry != NULL) { 13.213 + *entry = __ pc(); 13.214 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.215 + BLOCK_COMMENT("Entry:"); 13.216 } 13.217 13.218 - BLOCK_COMMENT("Entry:"); 13.219 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.220 - 13.221 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 13.222 // r9 and r10 may be used to save non-volatile registers 13.223 - 13.224 if (is_oop) { 13.225 __ movq(saved_to, to); 13.226 + gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 13.227 } 13.228 13.229 // 'from', 'to' and 'count' are now valid 13.230 @@ -1867,7 +1861,9 @@ 13.231 // the hardware handle it. The two dwords within qwords that span 13.232 // cache line boundaries will still be loaded and stored atomicly. 13.233 // 13.234 - address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, const char *name) { 13.235 + address generate_conjoint_int_oop_copy(bool aligned, bool is_oop, address nooverlap_target, 13.236 + address *entry, const char *name, 13.237 + bool dest_uninitialized = false) { 13.238 __ align(CodeEntryAlignment); 13.239 StubCodeMark mark(this, "StubRoutines", name); 13.240 address start = __ pc(); 13.241 @@ -1882,20 +1878,21 @@ 13.242 __ enter(); // required for proper stackwalking of RuntimeStub frame 13.243 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 13.244 13.245 + if (entry != NULL) { 13.246 + *entry = __ pc(); 13.247 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.248 + BLOCK_COMMENT("Entry:"); 13.249 + } 13.250 + 13.251 + array_overlap_test(nooverlap_target, Address::times_4); 13.252 + setup_arg_regs(); // from => rdi, to => rsi, count => rdx 13.253 + // r9 and r10 may be used to save non-volatile registers 13.254 + 13.255 if (is_oop) { 13.256 // no registers are destroyed by this call 13.257 - gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); 13.258 + gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 13.259 } 13.260 13.261 - (is_oop ? oop_copy_entry : int_copy_entry) = __ pc(); 13.262 - BLOCK_COMMENT("Entry:"); 13.263 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.264 - 13.265 - array_overlap_test(is_oop ? disjoint_oop_copy_entry : disjoint_int_copy_entry, 13.266 - Address::times_4); 13.267 - setup_arg_regs(); // from => rdi, to => rsi, count => rdx 13.268 - // r9 and r10 may be used to save non-volatile registers 13.269 - 13.270 assert_clean_int(count, rax); // Make sure 'count' is clean int. 13.271 // 'from', 'to' and 'count' are now valid 13.272 __ movptr(dword_count, count); 13.273 @@ -1959,7 +1956,8 @@ 13.274 // disjoint_oop_copy_entry or disjoint_long_copy_entry is set to the 13.275 // no-overlap entry point used by generate_conjoint_long_oop_copy(). 13.276 // 13.277 - address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) { 13.278 + address generate_disjoint_long_oop_copy(bool aligned, bool is_oop, address *entry, 13.279 + const char *name, bool dest_uninitialized = false) { 13.280 __ align(CodeEntryAlignment); 13.281 StubCodeMark mark(this, "StubRoutines", name); 13.282 address start = __ pc(); 13.283 @@ -1978,20 +1976,19 @@ 13.284 // Save no-overlap entry point for generate_conjoint_long_oop_copy() 13.285 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 13.286 13.287 - if (is_oop) { 13.288 - disjoint_oop_copy_entry = __ pc(); 13.289 - // no registers are destroyed by this call 13.290 - gen_write_ref_array_pre_barrier(/* dest */ c_rarg1, /* count */ c_rarg2); 13.291 - } else { 13.292 - disjoint_long_copy_entry = __ pc(); 13.293 + if (entry != NULL) { 13.294 + *entry = __ pc(); 13.295 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.296 + BLOCK_COMMENT("Entry:"); 13.297 } 13.298 - BLOCK_COMMENT("Entry:"); 13.299 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.300 13.301 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 13.302 // r9 and r10 may be used to save non-volatile registers 13.303 - 13.304 // 'from', 'to' and 'qword_count' are now valid 13.305 + if (is_oop) { 13.306 + // no registers are destroyed by this call 13.307 + gen_write_ref_array_pre_barrier(to, qword_count, dest_uninitialized); 13.308 + } 13.309 13.310 // Copy from low to high addresses. Use 'to' as scratch. 13.311 __ lea(end_from, Address(from, qword_count, Address::times_8, -8)); 13.312 @@ -2045,7 +2042,9 @@ 13.313 // c_rarg1 - destination array address 13.314 // c_rarg2 - element count, treated as ssize_t, can be zero 13.315 // 13.316 - address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, const char *name) { 13.317 + address generate_conjoint_long_oop_copy(bool aligned, bool is_oop, 13.318 + address nooverlap_target, address *entry, 13.319 + const char *name, bool dest_uninitialized = false) { 13.320 __ align(CodeEntryAlignment); 13.321 StubCodeMark mark(this, "StubRoutines", name); 13.322 address start = __ pc(); 13.323 @@ -2059,31 +2058,21 @@ 13.324 __ enter(); // required for proper stackwalking of RuntimeStub frame 13.325 assert_clean_int(c_rarg2, rax); // Make sure 'count' is clean int. 13.326 13.327 - address disjoint_copy_entry = NULL; 13.328 - if (is_oop) { 13.329 - assert(!UseCompressedOops, "shouldn't be called for compressed oops"); 13.330 - disjoint_copy_entry = disjoint_oop_copy_entry; 13.331 - oop_copy_entry = __ pc(); 13.332 - array_overlap_test(disjoint_oop_copy_entry, Address::times_8); 13.333 - } else { 13.334 - disjoint_copy_entry = disjoint_long_copy_entry; 13.335 - long_copy_entry = __ pc(); 13.336 - array_overlap_test(disjoint_long_copy_entry, Address::times_8); 13.337 + if (entry != NULL) { 13.338 + *entry = __ pc(); 13.339 + // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.340 + BLOCK_COMMENT("Entry:"); 13.341 } 13.342 - BLOCK_COMMENT("Entry:"); 13.343 - // caller can pass a 64-bit byte count here (from Unsafe.copyMemory) 13.344 - 13.345 - array_overlap_test(disjoint_copy_entry, Address::times_8); 13.346 + 13.347 + array_overlap_test(nooverlap_target, Address::times_8); 13.348 setup_arg_regs(); // from => rdi, to => rsi, count => rdx 13.349 // r9 and r10 may be used to save non-volatile registers 13.350 - 13.351 // 'from', 'to' and 'qword_count' are now valid 13.352 - 13.353 if (is_oop) { 13.354 // Save to and count for store barrier 13.355 __ movptr(saved_count, qword_count); 13.356 // No registers are destroyed by this call 13.357 - gen_write_ref_array_pre_barrier(to, saved_count); 13.358 + gen_write_ref_array_pre_barrier(to, saved_count, dest_uninitialized); 13.359 } 13.360 13.361 __ jmp(L_copy_32_bytes); 13.362 @@ -2162,7 +2151,8 @@ 13.363 // rax == 0 - success 13.364 // rax == -1^K - failure, where K is partial transfer count 13.365 // 13.366 - address generate_checkcast_copy(const char *name) { 13.367 + address generate_checkcast_copy(const char *name, address *entry, 13.368 + bool dest_uninitialized = false) { 13.369 13.370 Label L_load_element, L_store_element, L_do_card_marks, L_done; 13.371 13.372 @@ -2216,8 +2206,10 @@ 13.373 #endif 13.374 13.375 // Caller of this entry point must set up the argument registers. 13.376 - checkcast_copy_entry = __ pc(); 13.377 - BLOCK_COMMENT("Entry:"); 13.378 + if (entry != NULL) { 13.379 + *entry = __ pc(); 13.380 + BLOCK_COMMENT("Entry:"); 13.381 + } 13.382 13.383 // allocate spill slots for r13, r14 13.384 enum { 13.385 @@ -2254,7 +2246,7 @@ 13.386 Address from_element_addr(end_from, count, TIMES_OOP, 0); 13.387 Address to_element_addr(end_to, count, TIMES_OOP, 0); 13.388 13.389 - gen_write_ref_array_pre_barrier(to, count); 13.390 + gen_write_ref_array_pre_barrier(to, count, dest_uninitialized); 13.391 13.392 // Copy from low to high addresses, indexed from the end of each array. 13.393 __ lea(end_from, end_from_addr); 13.394 @@ -2334,7 +2326,9 @@ 13.395 // Examines the alignment of the operands and dispatches 13.396 // to a long, int, short, or byte copy loop. 13.397 // 13.398 - address generate_unsafe_copy(const char *name) { 13.399 + address generate_unsafe_copy(const char *name, 13.400 + address byte_copy_entry, address short_copy_entry, 13.401 + address int_copy_entry, address long_copy_entry) { 13.402 13.403 Label L_long_aligned, L_int_aligned, L_short_aligned; 13.404 13.405 @@ -2432,7 +2426,10 @@ 13.406 // rax == 0 - success 13.407 // rax == -1^K - failure, where K is partial transfer count 13.408 // 13.409 - address generate_generic_copy(const char *name) { 13.410 + address generate_generic_copy(const char *name, 13.411 + address byte_copy_entry, address short_copy_entry, 13.412 + address int_copy_entry, address long_copy_entry, 13.413 + address oop_copy_entry, address checkcast_copy_entry) { 13.414 13.415 Label L_failed, L_failed_0, L_objArray; 13.416 Label L_copy_bytes, L_copy_shorts, L_copy_ints, L_copy_longs; 13.417 @@ -2725,33 +2722,75 @@ 13.418 } 13.419 13.420 void generate_arraycopy_stubs() { 13.421 - // Call the conjoint generation methods immediately after 13.422 - // the disjoint ones so that short branches from the former 13.423 - // to the latter can be generated. 13.424 - StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, "jbyte_disjoint_arraycopy"); 13.425 - StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, "jbyte_arraycopy"); 13.426 - 13.427 - StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, "jshort_disjoint_arraycopy"); 13.428 - StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, "jshort_arraycopy"); 13.429 - 13.430 - StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, "jint_disjoint_arraycopy"); 13.431 - StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, "jint_arraycopy"); 13.432 - 13.433 - StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, "jlong_disjoint_arraycopy"); 13.434 - StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, "jlong_arraycopy"); 13.435 + address entry; 13.436 + address entry_jbyte_arraycopy; 13.437 + address entry_jshort_arraycopy; 13.438 + address entry_jint_arraycopy; 13.439 + address entry_oop_arraycopy; 13.440 + address entry_jlong_arraycopy; 13.441 + address entry_checkcast_arraycopy; 13.442 + 13.443 + StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry, 13.444 + "jbyte_disjoint_arraycopy"); 13.445 + StubRoutines::_jbyte_arraycopy = generate_conjoint_byte_copy(false, entry, &entry_jbyte_arraycopy, 13.446 + "jbyte_arraycopy"); 13.447 + 13.448 + StubRoutines::_jshort_disjoint_arraycopy = generate_disjoint_short_copy(false, &entry, 13.449 + "jshort_disjoint_arraycopy"); 13.450 + StubRoutines::_jshort_arraycopy = generate_conjoint_short_copy(false, entry, &entry_jshort_arraycopy, 13.451 + "jshort_arraycopy"); 13.452 + 13.453 + StubRoutines::_jint_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, false, &entry, 13.454 + "jint_disjoint_arraycopy"); 13.455 + StubRoutines::_jint_arraycopy = generate_conjoint_int_oop_copy(false, false, entry, 13.456 + &entry_jint_arraycopy, "jint_arraycopy"); 13.457 + 13.458 + StubRoutines::_jlong_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, false, &entry, 13.459 + "jlong_disjoint_arraycopy"); 13.460 + StubRoutines::_jlong_arraycopy = generate_conjoint_long_oop_copy(false, false, entry, 13.461 + &entry_jlong_arraycopy, "jlong_arraycopy"); 13.462 13.463 13.464 if (UseCompressedOops) { 13.465 - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, "oop_disjoint_arraycopy"); 13.466 - StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, "oop_arraycopy"); 13.467 + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_int_oop_copy(false, true, &entry, 13.468 + "oop_disjoint_arraycopy"); 13.469 + StubRoutines::_oop_arraycopy = generate_conjoint_int_oop_copy(false, true, entry, 13.470 + &entry_oop_arraycopy, "oop_arraycopy"); 13.471 + StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_int_oop_copy(false, true, &entry, 13.472 + "oop_disjoint_arraycopy_uninit", 13.473 + /*dest_uninitialized*/true); 13.474 + StubRoutines::_oop_arraycopy_uninit = generate_conjoint_int_oop_copy(false, true, entry, 13.475 + NULL, "oop_arraycopy_uninit", 13.476 + /*dest_uninitialized*/true); 13.477 } else { 13.478 - StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, "oop_disjoint_arraycopy"); 13.479 - StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, "oop_arraycopy"); 13.480 + StubRoutines::_oop_disjoint_arraycopy = generate_disjoint_long_oop_copy(false, true, &entry, 13.481 + "oop_disjoint_arraycopy"); 13.482 + StubRoutines::_oop_arraycopy = generate_conjoint_long_oop_copy(false, true, entry, 13.483 + &entry_oop_arraycopy, "oop_arraycopy"); 13.484 + StubRoutines::_oop_disjoint_arraycopy_uninit = generate_disjoint_long_oop_copy(false, true, &entry, 13.485 + "oop_disjoint_arraycopy_uninit", 13.486 + /*dest_uninitialized*/true); 13.487 + StubRoutines::_oop_arraycopy_uninit = generate_conjoint_long_oop_copy(false, true, entry, 13.488 + NULL, "oop_arraycopy_uninit", 13.489 + /*dest_uninitialized*/true); 13.490 } 13.491 13.492 - StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy"); 13.493 - StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy"); 13.494 - StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy"); 13.495 + StubRoutines::_checkcast_arraycopy = generate_checkcast_copy("checkcast_arraycopy", &entry_checkcast_arraycopy); 13.496 + StubRoutines::_checkcast_arraycopy_uninit = generate_checkcast_copy("checkcast_arraycopy_uninit", NULL, 13.497 + /*dest_uninitialized*/true); 13.498 + 13.499 + StubRoutines::_unsafe_arraycopy = generate_unsafe_copy("unsafe_arraycopy", 13.500 + entry_jbyte_arraycopy, 13.501 + entry_jshort_arraycopy, 13.502 + entry_jint_arraycopy, 13.503 + entry_jlong_arraycopy); 13.504 + StubRoutines::_generic_arraycopy = generate_generic_copy("generic_arraycopy", 13.505 + entry_jbyte_arraycopy, 13.506 + entry_jshort_arraycopy, 13.507 + entry_jint_arraycopy, 13.508 + entry_oop_arraycopy, 13.509 + entry_jlong_arraycopy, 13.510 + entry_checkcast_arraycopy); 13.511 13.512 StubRoutines::_jbyte_fill = generate_fill(T_BYTE, false, "jbyte_fill"); 13.513 StubRoutines::_jshort_fill = generate_fill(T_SHORT, false, "jshort_fill"); 13.514 @@ -2776,6 +2815,9 @@ 13.515 13.516 StubRoutines::_arrayof_oop_disjoint_arraycopy = StubRoutines::_oop_disjoint_arraycopy; 13.517 StubRoutines::_arrayof_oop_arraycopy = StubRoutines::_oop_arraycopy; 13.518 + 13.519 + StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = StubRoutines::_oop_disjoint_arraycopy_uninit; 13.520 + StubRoutines::_arrayof_oop_arraycopy_uninit = StubRoutines::_oop_arraycopy_uninit; 13.521 } 13.522 13.523 void generate_math_stubs() { 13.524 @@ -3069,20 +3111,6 @@ 13.525 } 13.526 }; // end class declaration 13.527 13.528 -address StubGenerator::disjoint_byte_copy_entry = NULL; 13.529 -address StubGenerator::disjoint_short_copy_entry = NULL; 13.530 -address StubGenerator::disjoint_int_copy_entry = NULL; 13.531 -address StubGenerator::disjoint_long_copy_entry = NULL; 13.532 -address StubGenerator::disjoint_oop_copy_entry = NULL; 13.533 - 13.534 -address StubGenerator::byte_copy_entry = NULL; 13.535 -address StubGenerator::short_copy_entry = NULL; 13.536 -address StubGenerator::int_copy_entry = NULL; 13.537 -address StubGenerator::long_copy_entry = NULL; 13.538 -address StubGenerator::oop_copy_entry = NULL; 13.539 - 13.540 -address StubGenerator::checkcast_copy_entry = NULL; 13.541 - 13.542 void StubGenerator_generate(CodeBuffer* code, bool all) { 13.543 StubGenerator g(code, all); 13.544 }
14.1 --- a/src/cpu/x86/vm/x86_32.ad Thu Mar 03 15:13:18 2011 -0800 14.2 +++ b/src/cpu/x86/vm/x86_32.ad Fri Mar 04 14:06:16 2011 -0800 14.3 @@ -1,5 +1,5 @@ 14.4 // 14.5 -// Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 14.6 +// Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 14.7 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 14.8 // 14.9 // This code is free software; you can redistribute it and/or modify it 14.10 @@ -12658,17 +12658,46 @@ 14.11 ins_pipe( pipe_slow ); 14.12 %} 14.13 14.14 +// fast search of substring with known size. 14.15 +instruct string_indexof_con(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, immI int_cnt2, 14.16 + eBXRegI result, regXD vec, eAXRegI cnt2, eCXRegI tmp, eFlagsReg cr) %{ 14.17 + predicate(UseSSE42Intrinsics); 14.18 + match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); 14.19 + effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr); 14.20 + 14.21 + format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %} 14.22 + ins_encode %{ 14.23 + int icnt2 = (int)$int_cnt2$$constant; 14.24 + if (icnt2 >= 8) { 14.25 + // IndexOf for constant substrings with size >= 8 elements 14.26 + // which don't need to be loaded through stack. 14.27 + __ string_indexofC8($str1$$Register, $str2$$Register, 14.28 + $cnt1$$Register, $cnt2$$Register, 14.29 + icnt2, $result$$Register, 14.30 + $vec$$XMMRegister, $tmp$$Register); 14.31 + } else { 14.32 + // Small strings are loaded through stack if they cross page boundary. 14.33 + __ string_indexof($str1$$Register, $str2$$Register, 14.34 + $cnt1$$Register, $cnt2$$Register, 14.35 + icnt2, $result$$Register, 14.36 + $vec$$XMMRegister, $tmp$$Register); 14.37 + } 14.38 + %} 14.39 + ins_pipe( pipe_slow ); 14.40 +%} 14.41 + 14.42 instruct string_indexof(eDIRegP str1, eDXRegI cnt1, eSIRegP str2, eAXRegI cnt2, 14.43 - eBXRegI result, regXD tmp1, eCXRegI tmp2, eFlagsReg cr) %{ 14.44 + eBXRegI result, regXD vec, eCXRegI tmp, eFlagsReg cr) %{ 14.45 predicate(UseSSE42Intrinsics); 14.46 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); 14.47 - effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp2, KILL cr); 14.48 - 14.49 - format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp2, $tmp1" %} 14.50 + effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr); 14.51 + 14.52 + format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %} 14.53 ins_encode %{ 14.54 __ string_indexof($str1$$Register, $str2$$Register, 14.55 - $cnt1$$Register, $cnt2$$Register, $result$$Register, 14.56 - $tmp1$$XMMRegister, $tmp2$$Register); 14.57 + $cnt1$$Register, $cnt2$$Register, 14.58 + (-1), $result$$Register, 14.59 + $vec$$XMMRegister, $tmp$$Register); 14.60 %} 14.61 ins_pipe( pipe_slow ); 14.62 %}
15.1 --- a/src/cpu/x86/vm/x86_64.ad Thu Mar 03 15:13:18 2011 -0800 15.2 +++ b/src/cpu/x86/vm/x86_64.ad Fri Mar 04 14:06:16 2011 -0800 15.3 @@ -1,5 +1,5 @@ 15.4 // 15.5 -// Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 15.6 +// Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved. 15.7 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 15.8 // 15.9 // This code is free software; you can redistribute it and/or modify it 15.10 @@ -11598,18 +11598,48 @@ 15.11 ins_pipe( pipe_slow ); 15.12 %} 15.13 15.14 +// fast search of substring with known size. 15.15 +instruct string_indexof_con(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, immI int_cnt2, 15.16 + rbx_RegI result, regD vec, rax_RegI cnt2, rcx_RegI tmp, rFlagsReg cr) 15.17 +%{ 15.18 + predicate(UseSSE42Intrinsics); 15.19 + match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); 15.20 + effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, KILL cnt2, KILL tmp, KILL cr); 15.21 + 15.22 + format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result // KILL $vec, $cnt1, $cnt2, $tmp" %} 15.23 + ins_encode %{ 15.24 + int icnt2 = (int)$int_cnt2$$constant; 15.25 + if (icnt2 >= 8) { 15.26 + // IndexOf for constant substrings with size >= 8 elements 15.27 + // which don't need to be loaded through stack. 15.28 + __ string_indexofC8($str1$$Register, $str2$$Register, 15.29 + $cnt1$$Register, $cnt2$$Register, 15.30 + icnt2, $result$$Register, 15.31 + $vec$$XMMRegister, $tmp$$Register); 15.32 + } else { 15.33 + // Small strings are loaded through stack if they cross page boundary. 15.34 + __ string_indexof($str1$$Register, $str2$$Register, 15.35 + $cnt1$$Register, $cnt2$$Register, 15.36 + icnt2, $result$$Register, 15.37 + $vec$$XMMRegister, $tmp$$Register); 15.38 + } 15.39 + %} 15.40 + ins_pipe( pipe_slow ); 15.41 +%} 15.42 + 15.43 instruct string_indexof(rdi_RegP str1, rdx_RegI cnt1, rsi_RegP str2, rax_RegI cnt2, 15.44 - rbx_RegI result, regD tmp1, rcx_RegI tmp2, rFlagsReg cr) 15.45 + rbx_RegI result, regD vec, rcx_RegI tmp, rFlagsReg cr) 15.46 %{ 15.47 predicate(UseSSE42Intrinsics); 15.48 match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); 15.49 - effect(TEMP tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp2, KILL cr); 15.50 - 15.51 - format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result // KILL $tmp1, $tmp2" %} 15.52 + effect(TEMP vec, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL tmp, KILL cr); 15.53 + 15.54 + format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result // KILL all" %} 15.55 ins_encode %{ 15.56 __ string_indexof($str1$$Register, $str2$$Register, 15.57 - $cnt1$$Register, $cnt2$$Register, $result$$Register, 15.58 - $tmp1$$XMMRegister, $tmp2$$Register); 15.59 + $cnt1$$Register, $cnt2$$Register, 15.60 + (-1), $result$$Register, 15.61 + $vec$$XMMRegister, $tmp$$Register); 15.62 %} 15.63 ins_pipe( pipe_slow ); 15.64 %}
16.1 --- a/src/os/linux/vm/os_linux.cpp Thu Mar 03 15:13:18 2011 -0800 16.2 +++ b/src/os/linux/vm/os_linux.cpp Fri Mar 04 14:06:16 2011 -0800 16.3 @@ -2213,7 +2213,7 @@ 16.4 if (rp == NULL) 16.5 return; 16.6 16.7 - if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) { 16.8 + if (Arguments::created_by_gamma_launcher()) { 16.9 // Support for the gamma launcher. Typical value for buf is 16.10 // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at 16.11 // the right place in the string, then assume we are installed in a JDK and
17.1 --- a/src/os/posix/vm/os_posix.cpp Thu Mar 03 15:13:18 2011 -0800 17.2 +++ b/src/os/posix/vm/os_posix.cpp Fri Mar 04 14:06:16 2011 -0800 17.3 @@ -59,3 +59,12 @@ 17.4 VMError::report_coredump_status(buffer, success); 17.5 } 17.6 17.7 +bool os::is_debugger_attached() { 17.8 + // not implemented 17.9 + return false; 17.10 +} 17.11 + 17.12 +void os::wait_for_keypress_at_exit(void) { 17.13 + // don't do anything on posix platforms 17.14 + return; 17.15 +}
18.1 --- a/src/os/solaris/vm/os_solaris.cpp Thu Mar 03 15:13:18 2011 -0800 18.2 +++ b/src/os/solaris/vm/os_solaris.cpp Fri Mar 04 14:06:16 2011 -0800 18.3 @@ -2511,7 +2511,7 @@ 18.4 assert(ret != 0, "cannot locate libjvm"); 18.5 realpath((char *)dlinfo.dli_fname, buf); 18.6 18.7 - if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) { 18.8 + if (Arguments::created_by_gamma_launcher()) { 18.9 // Support for the gamma launcher. Typical value for buf is 18.10 // "<JAVA_HOME>/jre/lib/<arch>/<vmtype>/libjvm.so". If "/jre/lib/" appears at 18.11 // the right place in the string, then assume we are installed in a JDK and
19.1 --- a/src/os/windows/vm/os_windows.cpp Thu Mar 03 15:13:18 2011 -0800 19.2 +++ b/src/os/windows/vm/os_windows.cpp Fri Mar 04 14:06:16 2011 -0800 19.3 @@ -22,10 +22,8 @@ 19.4 * 19.5 */ 19.6 19.7 -#ifdef _WIN64 19.8 -// Must be at least Windows 2000 or XP to use VectoredExceptions 19.9 +// Must be at least Windows 2000 or XP to use VectoredExceptions and IsDebuggerPresent 19.10 #define _WIN32_WINNT 0x500 19.11 -#endif 19.12 19.13 // no precompiled headers 19.14 #include "classfile/classLoader.hpp" 19.15 @@ -1788,7 +1786,7 @@ 19.16 } 19.17 19.18 buf[0] = '\0'; 19.19 - if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) { 19.20 + if (Arguments::created_by_gamma_launcher()) { 19.21 // Support for the gamma launcher. Check for an 19.22 // JAVA_HOME environment variable 19.23 // and fix up the path so it looks like 19.24 @@ -3418,6 +3416,19 @@ 19.25 } 19.26 19.27 19.28 +bool os::is_debugger_attached() { 19.29 + return IsDebuggerPresent() ? true : false; 19.30 +} 19.31 + 19.32 + 19.33 +void os::wait_for_keypress_at_exit(void) { 19.34 + if (PauseAtExit) { 19.35 + fprintf(stderr, "Press any key to continue...\n"); 19.36 + fgetc(stdin); 19.37 + } 19.38 +} 19.39 + 19.40 + 19.41 int os::message_box(const char* title, const char* message) { 19.42 int result = MessageBox(NULL, message, title, 19.43 MB_YESNO | MB_ICONERROR | MB_SYSTEMMODAL | MB_DEFAULT_DESKTOP_ONLY);
20.1 --- a/src/share/tools/hsdis/hsdis-demo.c Thu Mar 03 15:13:18 2011 -0800 20.2 +++ b/src/share/tools/hsdis/hsdis-demo.c Fri Mar 04 14:06:16 2011 -0800 20.3 @@ -22,8 +22,6 @@ 20.4 * 20.5 */ 20.6 20.7 -#include "precompiled.hpp" 20.8 - 20.9 /* hsdis-demo.c -- dump a range of addresses as native instructions 20.10 This demonstrates the protocol required by the HotSpot PrintAssembly option. 20.11 */
21.1 --- a/src/share/tools/hsdis/hsdis.c Thu Mar 03 15:13:18 2011 -0800 21.2 +++ b/src/share/tools/hsdis/hsdis.c Fri Mar 04 14:06:16 2011 -0800 21.3 @@ -22,8 +22,6 @@ 21.4 * 21.5 */ 21.6 21.7 -#include "precompiled.hpp" 21.8 - 21.9 /* hsdis.c -- dump a range of addresses as native instructions 21.10 This implements the plugin protocol required by the 21.11 HotSpot PrintAssembly option.
22.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp Thu Mar 03 15:13:18 2011 -0800 22.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Fri Mar 04 14:06:16 2011 -0800 22.3 @@ -3308,22 +3308,23 @@ 22.4 Value exception = append_with_bci(new ExceptionObject(), SynchronizationEntryBCI); 22.5 assert(exception->is_pinned(), "must be"); 22.6 22.7 + int bci = SynchronizationEntryBCI; 22.8 if (compilation()->env()->dtrace_method_probes()) { 22.9 - // Report exit from inline methods 22.10 + // Report exit from inline methods. We don't have a stream here 22.11 + // so pass an explicit bci of SynchronizationEntryBCI. 22.12 Values* args = new Values(1); 22.13 - args->push(append(new Constant(new ObjectConstant(method())))); 22.14 - append(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args)); 22.15 + args->push(append_with_bci(new Constant(new ObjectConstant(method())), bci)); 22.16 + append_with_bci(new RuntimeCall(voidType, "dtrace_method_exit", CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit), args), bci); 22.17 } 22.18 22.19 - int bci = SynchronizationEntryBCI; 22.20 if (lock) { 22.21 assert(state()->locks_size() > 0 && state()->lock_at(state()->locks_size() - 1) == lock, "lock is missing"); 22.22 if (!lock->is_linked()) { 22.23 - lock = append_with_bci(lock, -1); 22.24 + lock = append_with_bci(lock, bci); 22.25 } 22.26 22.27 // exit the monitor in the context of the synchronized method 22.28 - monitorexit(lock, SynchronizationEntryBCI); 22.29 + monitorexit(lock, bci); 22.30 22.31 // exit the context of the synchronized method 22.32 if (!default_handler) {
23.1 --- a/src/share/vm/c1/c1_Runtime1.cpp Thu Mar 03 15:13:18 2011 -0800 23.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp Fri Mar 04 14:06:16 2011 -0800 23.3 @@ -426,10 +426,9 @@ 23.4 // been deoptimized. If that is the case we return the deopt blob 23.5 // unpack_with_exception entry instead. This makes life for the exception blob easier 23.6 // because making that same check and diverting is painful from assembly language. 23.7 -// 23.8 - 23.9 - 23.10 JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm)) 23.11 + // Reset method handle flag. 23.12 + thread->set_is_method_handle_return(false); 23.13 23.14 Handle exception(thread, ex); 23.15 nm = CodeCache::find_nmethod(pc); 23.16 @@ -480,11 +479,12 @@ 23.17 return SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); 23.18 } 23.19 23.20 - // ExceptionCache is used only for exceptions at call and not for implicit exceptions 23.21 + // ExceptionCache is used only for exceptions at call sites and not for implicit exceptions 23.22 if (guard_pages_enabled) { 23.23 address fast_continuation = nm->handler_for_exception_and_pc(exception, pc); 23.24 if (fast_continuation != NULL) { 23.25 - if (fast_continuation == ExceptionCache::unwind_handler()) fast_continuation = NULL; 23.26 + // Set flag if return address is a method handle call site. 23.27 + thread->set_is_method_handle_return(nm->is_method_handle_return(pc)); 23.28 return fast_continuation; 23.29 } 23.30 } 23.31 @@ -522,14 +522,14 @@ 23.32 thread->set_exception_pc(pc); 23.33 23.34 // the exception cache is used only by non-implicit exceptions 23.35 - if (continuation == NULL) { 23.36 - nm->add_handler_for_exception_and_pc(exception, pc, ExceptionCache::unwind_handler()); 23.37 - } else { 23.38 + if (continuation != NULL) { 23.39 nm->add_handler_for_exception_and_pc(exception, pc, continuation); 23.40 } 23.41 } 23.42 23.43 thread->set_vm_result(exception()); 23.44 + // Set flag if return address is a method handle call site. 23.45 + thread->set_is_method_handle_return(nm->is_method_handle_return(pc)); 23.46 23.47 if (TraceExceptions) { 23.48 ttyLocker ttyl; 23.49 @@ -542,20 +542,19 @@ 23.50 JRT_END 23.51 23.52 // Enter this method from compiled code only if there is a Java exception handler 23.53 -// in the method handling the exception 23.54 +// in the method handling the exception. 23.55 // We are entering here from exception stub. We don't do a normal VM transition here. 23.56 // We do it in a helper. This is so we can check to see if the nmethod we have just 23.57 // searched for an exception handler has been deoptimized in the meantime. 23.58 -address Runtime1::exception_handler_for_pc(JavaThread* thread) { 23.59 +address Runtime1::exception_handler_for_pc(JavaThread* thread) { 23.60 oop exception = thread->exception_oop(); 23.61 address pc = thread->exception_pc(); 23.62 // Still in Java mode 23.63 - debug_only(ResetNoHandleMark rnhm); 23.64 + DEBUG_ONLY(ResetNoHandleMark rnhm); 23.65 nmethod* nm = NULL; 23.66 address continuation = NULL; 23.67 { 23.68 // Enter VM mode by calling the helper 23.69 - 23.70 ResetNoHandleMark rnhm; 23.71 continuation = exception_handler_for_pc_helper(thread, exception, pc, nm); 23.72 } 23.73 @@ -563,11 +562,11 @@ 23.74 23.75 // Now check to see if the nmethod we were called from is now deoptimized. 23.76 // If so we must return to the deopt blob and deoptimize the nmethod 23.77 - 23.78 if (nm != NULL && caller_is_deopted()) { 23.79 continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls(); 23.80 } 23.81 23.82 + assert(continuation != NULL, "no handler found"); 23.83 return continuation; 23.84 } 23.85
24.1 --- a/src/share/vm/c1/c1_Runtime1.hpp Thu Mar 03 15:13:18 2011 -0800 24.2 +++ b/src/share/vm/c1/c1_Runtime1.hpp Fri Mar 04 14:06:16 2011 -0800 24.3 @@ -54,6 +54,7 @@ 24.4 stub(new_multi_array) \ 24.5 stub(handle_exception_nofpu) /* optimized version that does not preserve fpu registers */ \ 24.6 stub(handle_exception) \ 24.7 + stub(handle_exception_from_callee) \ 24.8 stub(throw_array_store_exception) \ 24.9 stub(throw_class_cast_exception) \ 24.10 stub(throw_incompatible_class_change_error) \ 24.11 @@ -116,11 +117,11 @@ 24.12 static const char* _blob_names[]; 24.13 24.14 // stub generation 24.15 - static void generate_blob_for(BufferBlob* blob, StubID id); 24.16 - static OopMapSet* generate_code_for(StubID id, StubAssembler* masm); 24.17 + static void generate_blob_for(BufferBlob* blob, StubID id); 24.18 + static OopMapSet* generate_code_for(StubID id, StubAssembler* sasm); 24.19 static OopMapSet* generate_exception_throw(StubAssembler* sasm, address target, bool has_argument); 24.20 - static void generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_maps, OopMap* oop_map, bool ignore_fpu_registers = false); 24.21 - static void generate_unwind_exception(StubAssembler *sasm); 24.22 + static OopMapSet* generate_handle_exception(StubID id, StubAssembler* sasm); 24.23 + static void generate_unwind_exception(StubAssembler *sasm); 24.24 static OopMapSet* generate_patching(StubAssembler* sasm, address target); 24.25 24.26 static OopMapSet* generate_stub_call(StubAssembler* sasm, Register result, address entry,
25.1 --- a/src/share/vm/classfile/classLoader.cpp Thu Mar 03 15:13:18 2011 -0800 25.2 +++ b/src/share/vm/classfile/classLoader.cpp Fri Mar 04 14:06:16 2011 -0800 25.3 @@ -1332,7 +1332,7 @@ 25.4 } 25.5 25.6 if (_compile_the_world_counter >= CompileTheWorldStartAt) { 25.7 - if (k.is_null() || (exception_occurred && !CompileTheWorldIgnoreInitErrors)) { 25.8 + if (k.is_null() || exception_occurred) { 25.9 // If something went wrong (e.g. ExceptionInInitializerError) we skip this class 25.10 tty->print_cr("CompileTheWorld (%d) : Skipping %s", _compile_the_world_counter, buffer); 25.11 } else {
26.1 --- a/src/share/vm/classfile/stackMapFrame.cpp Thu Mar 03 15:13:18 2011 -0800 26.2 +++ b/src/share/vm/classfile/stackMapFrame.cpp Fri Mar 04 14:06:16 2011 -0800 26.3 @@ -170,6 +170,44 @@ 26.4 return true; 26.5 } 26.6 26.7 +bool StackMapFrame::has_flag_match_exception( 26.8 + const StackMapFrame* target) const { 26.9 + // We allow flags of {UninitThis} to assign to {} if-and-only-if the 26.10 + // target frame does not depend upon the current type. 26.11 + // This is slightly too strict, as we need only enforce that the 26.12 + // slots that were initialized by the <init> (the things that were 26.13 + // UninitializedThis before initialize_object() converted them) are unused. 26.14 + // However we didn't save that information so we'll enforce this upon 26.15 + // anything that might have been initialized. This is a rare situation 26.16 + // and javac never generates code that would end up here, but some profilers 26.17 + // (such as NetBeans) might, when adding exception handlers in <init> 26.18 + // methods to cover the invokespecial instruction. See 7020118. 26.19 + 26.20 + assert(max_locals() == target->max_locals() && 26.21 + stack_size() == target->stack_size(), "StackMap sizes must match"); 26.22 + 26.23 + VerificationType top = VerificationType::top_type(); 26.24 + VerificationType this_type = verifier()->current_type(); 26.25 + 26.26 + if (!flag_this_uninit() || target->flags() != 0) { 26.27 + return false; 26.28 + } 26.29 + 26.30 + for (int i = 0; i < target->locals_size(); ++i) { 26.31 + if (locals()[i] == this_type && target->locals()[i] != top) { 26.32 + return false; 26.33 + } 26.34 + } 26.35 + 26.36 + for (int i = 0; i < target->stack_size(); ++i) { 26.37 + if (stack()[i] == this_type && target->stack()[i] != top) { 26.38 + return false; 26.39 + } 26.40 + } 26.41 + 26.42 + return true; 26.43 +} 26.44 + 26.45 bool StackMapFrame::is_assignable_to(const StackMapFrame* target, TRAPS) const { 26.46 if (_max_locals != target->max_locals() || _stack_size != target->stack_size()) { 26.47 return false; 26.48 @@ -182,7 +220,9 @@ 26.49 bool match_stack = is_assignable_to( 26.50 _stack, target->stack(), _stack_size, CHECK_false); 26.51 bool match_flags = (_flags | target->flags()) == target->flags(); 26.52 - return (match_locals && match_stack && match_flags); 26.53 + 26.54 + return match_locals && match_stack && 26.55 + (match_flags || has_flag_match_exception(target)); 26.56 } 26.57 26.58 VerificationType StackMapFrame::pop_stack_ex(VerificationType type, TRAPS) {
27.1 --- a/src/share/vm/classfile/stackMapFrame.hpp Thu Mar 03 15:13:18 2011 -0800 27.2 +++ b/src/share/vm/classfile/stackMapFrame.hpp Fri Mar 04 14:06:16 2011 -0800 27.3 @@ -228,6 +228,8 @@ 27.4 bool is_assignable_to( 27.5 VerificationType* src, VerificationType* target, int32_t len, TRAPS) const; 27.6 27.7 + bool has_flag_match_exception(const StackMapFrame* target) const; 27.8 + 27.9 // Debugging 27.10 void print() const PRODUCT_RETURN; 27.11 };
28.1 --- a/src/share/vm/classfile/verificationType.hpp Thu Mar 03 15:13:18 2011 -0800 28.2 +++ b/src/share/vm/classfile/verificationType.hpp Fri Mar 04 14:06:16 2011 -0800 28.3 @@ -128,6 +128,7 @@ 28.4 28.5 // Create verification types 28.6 static VerificationType bogus_type() { return VerificationType(Bogus); } 28.7 + static VerificationType top_type() { return bogus_type(); } // alias 28.8 static VerificationType null_type() { return VerificationType(Null); } 28.9 static VerificationType integer_type() { return VerificationType(Integer); } 28.10 static VerificationType float_type() { return VerificationType(Float); }
29.1 --- a/src/share/vm/code/nmethod.cpp Thu Mar 03 15:13:18 2011 -0800 29.2 +++ b/src/share/vm/code/nmethod.cpp Fri Mar 04 14:06:16 2011 -0800 29.3 @@ -190,15 +190,10 @@ 29.4 } nmethod_stats; 29.5 #endif //PRODUCT 29.6 29.7 + 29.8 //--------------------------------------------------------------------------------- 29.9 29.10 29.11 -// The _unwind_handler is a special marker address, which says that 29.12 -// for given exception oop and address, the frame should be removed 29.13 -// as the tuple cannot be caught in the nmethod 29.14 -address ExceptionCache::_unwind_handler = (address) -1; 29.15 - 29.16 - 29.17 ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) { 29.18 assert(pc != NULL, "Must be non null"); 29.19 assert(exception.not_null(), "Must be non null");
30.1 --- a/src/share/vm/code/nmethod.hpp Thu Mar 03 15:13:18 2011 -0800 30.2 +++ b/src/share/vm/code/nmethod.hpp Fri Mar 04 14:06:16 2011 -0800 30.3 @@ -34,7 +34,6 @@ 30.4 class ExceptionCache : public CHeapObj { 30.5 friend class VMStructs; 30.6 private: 30.7 - static address _unwind_handler; 30.8 enum { cache_size = 16 }; 30.9 klassOop _exception_type; 30.10 address _pc[cache_size]; 30.11 @@ -62,8 +61,6 @@ 30.12 bool match_exception_with_space(Handle exception) ; 30.13 address test_address(address addr); 30.14 bool add_address_and_handler(address addr, address handler) ; 30.15 - 30.16 - static address unwind_handler() { return _unwind_handler; } 30.17 }; 30.18 30.19
31.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Thu Mar 03 15:13:18 2011 -0800 31.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Mar 04 14:06:16 2011 -0800 31.3 @@ -5474,8 +5474,6 @@ 31.4 _refine_cte_cl->set_concurrent(concurrent); 31.5 } 31.6 31.7 -#ifdef ASSERT 31.8 - 31.9 bool G1CollectedHeap::is_in_closed_subset(const void* p) const { 31.10 HeapRegion* hr = heap_region_containing(p); 31.11 if (hr == NULL) { 31.12 @@ -5484,7 +5482,6 @@ 31.13 return hr->is_in(p); 31.14 } 31.15 } 31.16 -#endif // ASSERT 31.17 31.18 class VerifyRegionListsClosure : public HeapRegionClosure { 31.19 private:
32.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Thu Mar 03 15:13:18 2011 -0800 32.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Fri Mar 04 14:06:16 2011 -0800 32.3 @@ -1134,7 +1134,7 @@ 32.4 return _g1_committed; 32.5 } 32.6 32.7 - NOT_PRODUCT(bool is_in_closed_subset(const void* p) const;) 32.8 + virtual bool is_in_closed_subset(const void* p) const; 32.9 32.10 // Dirty card table entries covering a list of young regions. 32.11 void dirtyCardsForYoungRegions(CardTableModRefBS* ct_bs, HeapRegion* list);
33.1 --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Thu Mar 03 15:13:18 2011 -0800 33.2 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.hpp Fri Mar 04 14:06:16 2011 -0800 33.3 @@ -83,11 +83,15 @@ 33.4 } 33.5 33.6 template <class T> void write_ref_array_pre_work(T* dst, int count); 33.7 - virtual void write_ref_array_pre(oop* dst, int count) { 33.8 - write_ref_array_pre_work(dst, count); 33.9 + virtual void write_ref_array_pre(oop* dst, int count, bool dest_uninitialized) { 33.10 + if (!dest_uninitialized) { 33.11 + write_ref_array_pre_work(dst, count); 33.12 + } 33.13 } 33.14 - virtual void write_ref_array_pre(narrowOop* dst, int count) { 33.15 - write_ref_array_pre_work(dst, count); 33.16 + virtual void write_ref_array_pre(narrowOop* dst, int count, bool dest_uninitialized) { 33.17 + if (!dest_uninitialized) { 33.18 + write_ref_array_pre_work(dst, count); 33.19 + } 33.20 } 33.21 }; 33.22
34.1 --- a/src/share/vm/memory/allocation.cpp Thu Mar 03 15:13:18 2011 -0800 34.2 +++ b/src/share/vm/memory/allocation.cpp Fri Mar 04 14:06:16 2011 -0800 34.3 @@ -422,6 +422,9 @@ 34.4 return sum; // Return total consumed space. 34.5 } 34.6 34.7 +void Arena::signal_out_of_memory(size_t sz, const char* whence) const { 34.8 + vm_exit_out_of_memory(sz, whence); 34.9 +} 34.10 34.11 // Grow a new Chunk 34.12 void* Arena::grow( size_t x ) { 34.13 @@ -431,8 +434,9 @@ 34.14 Chunk *k = _chunk; // Get filled-up chunk address 34.15 _chunk = new (len) Chunk(len); 34.16 34.17 - if (_chunk == NULL) 34.18 - vm_exit_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow"); 34.19 + if (_chunk == NULL) { 34.20 + signal_out_of_memory(len * Chunk::aligned_overhead_size(), "Arena::grow"); 34.21 + } 34.22 34.23 if (k) k->set_next(_chunk); // Append new chunk to end of linked list 34.24 else _first = _chunk; 34.25 @@ -529,6 +533,7 @@ 34.26 // for debugging with UseMallocOnly 34.27 void* Arena::internal_malloc_4(size_t x) { 34.28 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); 34.29 + check_for_overflow(x, "Arena::internal_malloc_4"); 34.30 if (_hwm + x > _max) { 34.31 return grow(x); 34.32 } else {
35.1 --- a/src/share/vm/memory/allocation.hpp Thu Mar 03 15:13:18 2011 -0800 35.2 +++ b/src/share/vm/memory/allocation.hpp Fri Mar 04 14:06:16 2011 -0800 35.3 @@ -207,6 +207,15 @@ 35.4 debug_only(void* malloc(size_t size);) 35.5 debug_only(void* internal_malloc_4(size_t x);) 35.6 NOT_PRODUCT(void inc_bytes_allocated(size_t x);) 35.7 + 35.8 + void signal_out_of_memory(size_t request, const char* whence) const; 35.9 + 35.10 + void check_for_overflow(size_t request, const char* whence) const { 35.11 + if (UINTPTR_MAX - request < (uintptr_t)_hwm) { 35.12 + signal_out_of_memory(request, whence); 35.13 + } 35.14 + } 35.15 + 35.16 public: 35.17 Arena(); 35.18 Arena(size_t init_size); 35.19 @@ -220,6 +229,7 @@ 35.20 assert(is_power_of_2(ARENA_AMALLOC_ALIGNMENT) , "should be a power of 2"); 35.21 x = ARENA_ALIGN(x); 35.22 debug_only(if (UseMallocOnly) return malloc(x);) 35.23 + check_for_overflow(x, "Arena::Amalloc"); 35.24 NOT_PRODUCT(inc_bytes_allocated(x);) 35.25 if (_hwm + x > _max) { 35.26 return grow(x); 35.27 @@ -233,6 +243,7 @@ 35.28 void *Amalloc_4(size_t x) { 35.29 assert( (x&(sizeof(char*)-1)) == 0, "misaligned size" ); 35.30 debug_only(if (UseMallocOnly) return malloc(x);) 35.31 + check_for_overflow(x, "Arena::Amalloc_4"); 35.32 NOT_PRODUCT(inc_bytes_allocated(x);) 35.33 if (_hwm + x > _max) { 35.34 return grow(x); 35.35 @@ -253,6 +264,7 @@ 35.36 size_t delta = (((size_t)_hwm + DALIGN_M1) & ~DALIGN_M1) - (size_t)_hwm; 35.37 x += delta; 35.38 #endif 35.39 + check_for_overflow(x, "Arena::Amalloc_D"); 35.40 NOT_PRODUCT(inc_bytes_allocated(x);) 35.41 if (_hwm + x > _max) { 35.42 return grow(x); // grow() returns a result aligned >= 8 bytes.
36.1 --- a/src/share/vm/memory/barrierSet.cpp Thu Mar 03 15:13:18 2011 -0800 36.2 +++ b/src/share/vm/memory/barrierSet.cpp Fri Mar 04 14:06:16 2011 -0800 36.3 @@ -35,9 +35,9 @@ 36.4 start, count); 36.5 #endif 36.6 if (UseCompressedOops) { 36.7 - Universe::heap()->barrier_set()->write_ref_array_pre((narrowOop*)start, (int)count); 36.8 + Universe::heap()->barrier_set()->write_ref_array_pre((narrowOop*)start, (int)count, false); 36.9 } else { 36.10 - Universe::heap()->barrier_set()->write_ref_array_pre( (oop*)start, (int)count); 36.11 + Universe::heap()->barrier_set()->write_ref_array_pre( (oop*)start, (int)count, false); 36.12 } 36.13 } 36.14
37.1 --- a/src/share/vm/memory/barrierSet.hpp Thu Mar 03 15:13:18 2011 -0800 37.2 +++ b/src/share/vm/memory/barrierSet.hpp Fri Mar 04 14:06:16 2011 -0800 37.3 @@ -1,5 +1,5 @@ 37.4 /* 37.5 - * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 37.6 + * Copyright (c) 2000, 2011, Oracle and/or its affiliates. All rights reserved. 37.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 37.8 * 37.9 * This code is free software; you can redistribute it and/or modify it 37.10 @@ -44,6 +44,10 @@ 37.11 Uninit 37.12 }; 37.13 37.14 + enum Flags { 37.15 + None = 0, 37.16 + TargetUninitialized = 1 37.17 + }; 37.18 protected: 37.19 int _max_covered_regions; 37.20 Name _kind; 37.21 @@ -128,8 +132,10 @@ 37.22 virtual void read_prim_array(MemRegion mr) = 0; 37.23 37.24 // Below length is the # array elements being written 37.25 - virtual void write_ref_array_pre( oop* dst, int length) {} 37.26 - virtual void write_ref_array_pre(narrowOop* dst, int length) {} 37.27 + virtual void write_ref_array_pre(oop* dst, int length, 37.28 + bool dest_uninitialized = false) {} 37.29 + virtual void write_ref_array_pre(narrowOop* dst, int length, 37.30 + bool dest_uninitialized = false) {} 37.31 // Below count is the # array elements being written, starting 37.32 // at the address "start", which may not necessarily be HeapWord-aligned 37.33 inline void write_ref_array(HeapWord* start, size_t count);
38.1 --- a/src/share/vm/opto/library_call.cpp Thu Mar 03 15:13:18 2011 -0800 38.2 +++ b/src/share/vm/opto/library_call.cpp Fri Mar 04 14:06:16 2011 -0800 38.3 @@ -1,5 +1,5 @@ 38.4 /* 38.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 38.6 + * Copyright (c) 1999, 2011, Oracle and/or its affiliates. All rights reserved. 38.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 38.8 * 38.9 * This code is free software; you can redistribute it and/or modify it 38.10 @@ -97,7 +97,7 @@ 38.11 RegionNode* region); 38.12 Node* generate_current_thread(Node* &tls_output); 38.13 address basictype2arraycopy(BasicType t, Node *src_offset, Node *dest_offset, 38.14 - bool disjoint_bases, const char* &name); 38.15 + bool disjoint_bases, const char* &name, bool dest_uninitialized); 38.16 Node* load_mirror_from_klass(Node* klass); 38.17 Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null, 38.18 int nargs, 38.19 @@ -212,26 +212,26 @@ 38.20 AllocateNode* alloc, 38.21 Node* src, Node* src_offset, 38.22 Node* dest, Node* dest_offset, 38.23 - Node* dest_size); 38.24 + Node* dest_size, bool dest_uninitialized); 38.25 void generate_slow_arraycopy(const TypePtr* adr_type, 38.26 Node* src, Node* src_offset, 38.27 Node* dest, Node* dest_offset, 38.28 - Node* copy_length); 38.29 + Node* copy_length, bool dest_uninitialized); 38.30 Node* generate_checkcast_arraycopy(const TypePtr* adr_type, 38.31 Node* dest_elem_klass, 38.32 Node* src, Node* src_offset, 38.33 Node* dest, Node* dest_offset, 38.34 - Node* copy_length); 38.35 + Node* copy_length, bool dest_uninitialized); 38.36 Node* generate_generic_arraycopy(const TypePtr* adr_type, 38.37 Node* src, Node* src_offset, 38.38 Node* dest, Node* dest_offset, 38.39 - Node* copy_length); 38.40 + Node* copy_length, bool dest_uninitialized); 38.41 void generate_unchecked_arraycopy(const TypePtr* adr_type, 38.42 BasicType basic_elem_type, 38.43 bool disjoint_bases, 38.44 Node* src, Node* src_offset, 38.45 Node* dest, Node* dest_offset, 38.46 - Node* copy_length); 38.47 + Node* copy_length, bool dest_uninitialized); 38.48 bool inline_unsafe_CAS(BasicType type); 38.49 bool inline_unsafe_ordered_store(BasicType type); 38.50 bool inline_fp_conversions(vmIntrinsics::ID id); 38.51 @@ -1193,7 +1193,7 @@ 38.52 Node* result; 38.53 // Disable the use of pcmpestri until it can be guaranteed that 38.54 // the load doesn't cross into the uncommited space. 38.55 - if (false && Matcher::has_match_rule(Op_StrIndexOf) && 38.56 + if (Matcher::has_match_rule(Op_StrIndexOf) && 38.57 UseSSE42Intrinsics) { 38.58 // Generate SSE4.2 version of indexOf 38.59 // We currently only have match rules that use SSE4.2 38.60 @@ -1211,14 +1211,14 @@ 38.61 return true; 38.62 } 38.63 38.64 + ciInstanceKlass* str_klass = env()->String_klass(); 38.65 + const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(str_klass); 38.66 + 38.67 // Make the merge point 38.68 - RegionNode* result_rgn = new (C, 3) RegionNode(3); 38.69 - Node* result_phi = new (C, 3) PhiNode(result_rgn, TypeInt::INT); 38.70 + RegionNode* result_rgn = new (C, 4) RegionNode(4); 38.71 + Node* result_phi = new (C, 4) PhiNode(result_rgn, TypeInt::INT); 38.72 Node* no_ctrl = NULL; 38.73 38.74 - ciInstanceKlass* klass = env()->String_klass(); 38.75 - const TypeOopPtr* string_type = TypeOopPtr::make_from_klass(klass); 38.76 - 38.77 // Get counts for string and substr 38.78 Node* source_cnta = basic_plus_adr(receiver, receiver, count_offset); 38.79 Node* source_cnt = make_load(no_ctrl, source_cnta, TypeInt::INT, T_INT, string_type->add_offset(count_offset)); 38.80 @@ -1236,6 +1236,17 @@ 38.81 } 38.82 38.83 if (!stopped()) { 38.84 + // Check for substr count == 0 38.85 + cmp = _gvn.transform( new(C, 3) CmpINode(substr_cnt, intcon(0)) ); 38.86 + bol = _gvn.transform( new(C, 2) BoolNode(cmp, BoolTest::eq) ); 38.87 + Node* if_zero = generate_slow_guard(bol, NULL); 38.88 + if (if_zero != NULL) { 38.89 + result_phi->init_req(3, intcon(0)); 38.90 + result_rgn->init_req(3, if_zero); 38.91 + } 38.92 + } 38.93 + 38.94 + if (!stopped()) { 38.95 result = make_string_method_node(Op_StrIndexOf, receiver, source_cnt, argument, substr_cnt); 38.96 result_phi->init_req(1, result); 38.97 result_rgn->init_req(1, control()); 38.98 @@ -1244,8 +1255,8 @@ 38.99 record_for_igvn(result_rgn); 38.100 result = _gvn.transform(result_phi); 38.101 38.102 - } else { //Use LibraryCallKit::string_indexOf 38.103 - // don't intrinsify is argument isn't a constant string. 38.104 + } else { // Use LibraryCallKit::string_indexOf 38.105 + // don't intrinsify if argument isn't a constant string. 38.106 if (!argument->is_Con()) { 38.107 return false; 38.108 } 38.109 @@ -1281,7 +1292,7 @@ 38.110 // No null check on the argument is needed since it's a constant String oop. 38.111 _sp -= 2; 38.112 if (stopped()) { 38.113 - return true; 38.114 + return true; 38.115 } 38.116 38.117 // The null string as a pattern always returns 0 (match at beginning of string) 38.118 @@ -4081,7 +4092,8 @@ 38.119 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; 38.120 bool disjoint_bases = true; 38.121 generate_unchecked_arraycopy(raw_adr_type, T_LONG, disjoint_bases, 38.122 - src, NULL, dest, NULL, countx); 38.123 + src, NULL, dest, NULL, countx, 38.124 + /*dest_uninitialized*/true); 38.125 38.126 // If necessary, emit some card marks afterwards. (Non-arrays only.) 38.127 if (card_mark) { 38.128 @@ -4295,7 +4307,7 @@ 38.129 // Note: The condition "disjoint" applies also for overlapping copies 38.130 // where an descending copy is permitted (i.e., dest_offset <= src_offset). 38.131 static address 38.132 -select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name) { 38.133 +select_arraycopy_function(BasicType t, bool aligned, bool disjoint, const char* &name, bool dest_uninitialized) { 38.134 int selector = 38.135 (aligned ? COPYFUNC_ALIGNED : COPYFUNC_UNALIGNED) + 38.136 (disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT); 38.137 @@ -4304,6 +4316,10 @@ 38.138 name = #xxx_arraycopy; \ 38.139 return StubRoutines::xxx_arraycopy(); } 38.140 38.141 +#define RETURN_STUB_PARM(xxx_arraycopy, parm) { \ 38.142 + name = #xxx_arraycopy; \ 38.143 + return StubRoutines::xxx_arraycopy(parm); } 38.144 + 38.145 switch (t) { 38.146 case T_BYTE: 38.147 case T_BOOLEAN: 38.148 @@ -4340,10 +4356,10 @@ 38.149 case T_ARRAY: 38.150 case T_OBJECT: 38.151 switch (selector) { 38.152 - case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(oop_arraycopy); 38.153 - case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_oop_arraycopy); 38.154 - case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB(oop_disjoint_arraycopy); 38.155 - case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB(arrayof_oop_disjoint_arraycopy); 38.156 + case COPYFUNC_CONJOINT | COPYFUNC_UNALIGNED: RETURN_STUB_PARM(oop_arraycopy, dest_uninitialized); 38.157 + case COPYFUNC_CONJOINT | COPYFUNC_ALIGNED: RETURN_STUB_PARM(arrayof_oop_arraycopy, dest_uninitialized); 38.158 + case COPYFUNC_DISJOINT | COPYFUNC_UNALIGNED: RETURN_STUB_PARM(oop_disjoint_arraycopy, dest_uninitialized); 38.159 + case COPYFUNC_DISJOINT | COPYFUNC_ALIGNED: RETURN_STUB_PARM(arrayof_oop_disjoint_arraycopy, dest_uninitialized); 38.160 } 38.161 default: 38.162 ShouldNotReachHere(); 38.163 @@ -4351,6 +4367,7 @@ 38.164 } 38.165 38.166 #undef RETURN_STUB 38.167 +#undef RETURN_STUB_PARM 38.168 } 38.169 38.170 //------------------------------basictype2arraycopy---------------------------- 38.171 @@ -4358,7 +4375,8 @@ 38.172 Node* src_offset, 38.173 Node* dest_offset, 38.174 bool disjoint_bases, 38.175 - const char* &name) { 38.176 + const char* &name, 38.177 + bool dest_uninitialized) { 38.178 const TypeInt* src_offset_inttype = gvn().find_int_type(src_offset);; 38.179 const TypeInt* dest_offset_inttype = gvn().find_int_type(dest_offset);; 38.180 38.181 @@ -4384,7 +4402,7 @@ 38.182 disjoint = true; 38.183 } 38.184 38.185 - return select_arraycopy_function(t, aligned, disjoint, name); 38.186 + return select_arraycopy_function(t, aligned, disjoint, name, dest_uninitialized); 38.187 } 38.188 38.189 38.190 @@ -4440,7 +4458,8 @@ 38.191 // The component types are not the same or are not recognized. Punt. 38.192 // (But, avoid the native method wrapper to JVM_ArrayCopy.) 38.193 generate_slow_arraycopy(TypePtr::BOTTOM, 38.194 - src, src_offset, dest, dest_offset, length); 38.195 + src, src_offset, dest, dest_offset, length, 38.196 + /*dest_uninitialized*/false); 38.197 return true; 38.198 } 38.199 38.200 @@ -4553,7 +4572,7 @@ 38.201 38.202 Node* original_dest = dest; 38.203 AllocateArrayNode* alloc = NULL; // used for zeroing, if needed 38.204 - bool must_clear_dest = false; 38.205 + bool dest_uninitialized = false; 38.206 38.207 // See if this is the initialization of a newly-allocated array. 38.208 // If so, we will take responsibility here for initializing it to zero. 38.209 @@ -4576,12 +4595,14 @@ 38.210 adr_type = TypeRawPtr::BOTTOM; // all initializations are into raw memory 38.211 // From this point on, every exit path is responsible for 38.212 // initializing any non-copied parts of the object to zero. 38.213 - must_clear_dest = true; 38.214 + // Also, if this flag is set we make sure that arraycopy interacts properly 38.215 + // with G1, eliding pre-barriers. See CR 6627983. 38.216 + dest_uninitialized = true; 38.217 } else { 38.218 // No zeroing elimination here. 38.219 alloc = NULL; 38.220 //original_dest = dest; 38.221 - //must_clear_dest = false; 38.222 + //dest_uninitialized = false; 38.223 } 38.224 38.225 // Results are placed here: 38.226 @@ -4613,10 +4634,10 @@ 38.227 Node* checked_value = NULL; 38.228 38.229 if (basic_elem_type == T_CONFLICT) { 38.230 - assert(!must_clear_dest, ""); 38.231 + assert(!dest_uninitialized, ""); 38.232 Node* cv = generate_generic_arraycopy(adr_type, 38.233 src, src_offset, dest, dest_offset, 38.234 - copy_length); 38.235 + copy_length, dest_uninitialized); 38.236 if (cv == NULL) cv = intcon(-1); // failure (no stub available) 38.237 checked_control = control(); 38.238 checked_i_o = i_o(); 38.239 @@ -4636,7 +4657,7 @@ 38.240 } 38.241 38.242 // copy_length is 0. 38.243 - if (!stopped() && must_clear_dest) { 38.244 + if (!stopped() && dest_uninitialized) { 38.245 Node* dest_length = alloc->in(AllocateNode::ALength); 38.246 if (_gvn.eqv_uncast(copy_length, dest_length) 38.247 || _gvn.find_int_con(dest_length, 1) <= 0) { 38.248 @@ -4662,7 +4683,7 @@ 38.249 result_memory->init_req(zero_path, memory(adr_type)); 38.250 } 38.251 38.252 - if (!stopped() && must_clear_dest) { 38.253 + if (!stopped() && dest_uninitialized) { 38.254 // We have to initialize the *uncopied* part of the array to zero. 38.255 // The copy destination is the slice dest[off..off+len]. The other slices 38.256 // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length]. 38.257 @@ -4698,7 +4719,7 @@ 38.258 { PreserveJVMState pjvms(this); 38.259 didit = generate_block_arraycopy(adr_type, basic_elem_type, alloc, 38.260 src, src_offset, dest, dest_offset, 38.261 - dest_size); 38.262 + dest_size, dest_uninitialized); 38.263 if (didit) { 38.264 // Present the results of the block-copying fast call. 38.265 result_region->init_req(bcopy_path, control()); 38.266 @@ -4774,7 +4795,7 @@ 38.267 Node* cv = generate_checkcast_arraycopy(adr_type, 38.268 dest_elem_klass, 38.269 src, src_offset, dest, dest_offset, 38.270 - ConvI2X(copy_length)); 38.271 + ConvI2X(copy_length), dest_uninitialized); 38.272 if (cv == NULL) cv = intcon(-1); // failure (no stub available) 38.273 checked_control = control(); 38.274 checked_i_o = i_o(); 38.275 @@ -4797,7 +4818,7 @@ 38.276 PreserveJVMState pjvms(this); 38.277 generate_unchecked_arraycopy(adr_type, copy_type, disjoint_bases, 38.278 src, src_offset, dest, dest_offset, 38.279 - ConvI2X(copy_length)); 38.280 + ConvI2X(copy_length), dest_uninitialized); 38.281 38.282 // Present the results of the fast call. 38.283 result_region->init_req(fast_path, control()); 38.284 @@ -4876,7 +4897,7 @@ 38.285 set_memory(slow_mem, adr_type); 38.286 set_i_o(slow_i_o); 38.287 38.288 - if (must_clear_dest) { 38.289 + if (dest_uninitialized) { 38.290 generate_clear_array(adr_type, dest, basic_elem_type, 38.291 intcon(0), NULL, 38.292 alloc->in(AllocateNode::AllocSize)); 38.293 @@ -4884,7 +4905,7 @@ 38.294 38.295 generate_slow_arraycopy(adr_type, 38.296 src, src_offset, dest, dest_offset, 38.297 - copy_length); 38.298 + copy_length, /*dest_uninitialized*/false); 38.299 38.300 result_region->init_req(slow_call_path, control()); 38.301 result_i_o ->init_req(slow_call_path, i_o()); 38.302 @@ -5128,7 +5149,7 @@ 38.303 AllocateNode* alloc, 38.304 Node* src, Node* src_offset, 38.305 Node* dest, Node* dest_offset, 38.306 - Node* dest_size) { 38.307 + Node* dest_size, bool dest_uninitialized) { 38.308 // See if there is an advantage from block transfer. 38.309 int scale = exact_log2(type2aelembytes(basic_elem_type)); 38.310 if (scale >= LogBytesPerLong) 38.311 @@ -5173,7 +5194,7 @@ 38.312 38.313 bool disjoint_bases = true; // since alloc != NULL 38.314 generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases, 38.315 - sptr, NULL, dptr, NULL, countx); 38.316 + sptr, NULL, dptr, NULL, countx, dest_uninitialized); 38.317 38.318 return true; 38.319 } 38.320 @@ -5186,7 +5207,8 @@ 38.321 LibraryCallKit::generate_slow_arraycopy(const TypePtr* adr_type, 38.322 Node* src, Node* src_offset, 38.323 Node* dest, Node* dest_offset, 38.324 - Node* copy_length) { 38.325 + Node* copy_length, bool dest_uninitialized) { 38.326 + assert(!dest_uninitialized, "Invariant"); 38.327 Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON, 38.328 OptoRuntime::slow_arraycopy_Type(), 38.329 OptoRuntime::slow_arraycopy_Java(), 38.330 @@ -5204,10 +5226,10 @@ 38.331 Node* dest_elem_klass, 38.332 Node* src, Node* src_offset, 38.333 Node* dest, Node* dest_offset, 38.334 - Node* copy_length) { 38.335 + Node* copy_length, bool dest_uninitialized) { 38.336 if (stopped()) return NULL; 38.337 38.338 - address copyfunc_addr = StubRoutines::checkcast_arraycopy(); 38.339 + address copyfunc_addr = StubRoutines::checkcast_arraycopy(dest_uninitialized); 38.340 if (copyfunc_addr == NULL) { // Stub was not generated, go slow path. 38.341 return NULL; 38.342 } 38.343 @@ -5245,9 +5267,9 @@ 38.344 LibraryCallKit::generate_generic_arraycopy(const TypePtr* adr_type, 38.345 Node* src, Node* src_offset, 38.346 Node* dest, Node* dest_offset, 38.347 - Node* copy_length) { 38.348 + Node* copy_length, bool dest_uninitialized) { 38.349 + assert(!dest_uninitialized, "Invariant"); 38.350 if (stopped()) return NULL; 38.351 - 38.352 address copyfunc_addr = StubRoutines::generic_arraycopy(); 38.353 if (copyfunc_addr == NULL) { // Stub was not generated, go slow path. 38.354 return NULL; 38.355 @@ -5268,7 +5290,7 @@ 38.356 bool disjoint_bases, 38.357 Node* src, Node* src_offset, 38.358 Node* dest, Node* dest_offset, 38.359 - Node* copy_length) { 38.360 + Node* copy_length, bool dest_uninitialized) { 38.361 if (stopped()) return; // nothing to do 38.362 38.363 Node* src_start = src; 38.364 @@ -5283,7 +5305,7 @@ 38.365 const char* copyfunc_name = "arraycopy"; 38.366 address copyfunc_addr = 38.367 basictype2arraycopy(basic_elem_type, src_offset, dest_offset, 38.368 - disjoint_bases, copyfunc_name); 38.369 + disjoint_bases, copyfunc_name, dest_uninitialized); 38.370 38.371 // Call it. Note that the count_ix value is not scaled to a byte-size. 38.372 make_runtime_call(RC_LEAF|RC_NO_FP,
39.1 --- a/src/share/vm/opto/memnode.cpp Thu Mar 03 15:13:18 2011 -0800 39.2 +++ b/src/share/vm/opto/memnode.cpp Fri Mar 04 14:06:16 2011 -0800 39.3 @@ -1,5 +1,5 @@ 39.4 /* 39.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 39.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 39.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 39.8 * 39.9 * This code is free software; you can redistribute it and/or modify it 39.10 @@ -1559,21 +1559,24 @@ 39.11 phase->C->has_unsafe_access(), 39.12 "Field accesses must be precise" ); 39.13 // For oop loads, we expect the _type to be precise 39.14 - if (OptimizeStringConcat && klass == phase->C->env()->String_klass() && 39.15 + if (klass == phase->C->env()->String_klass() && 39.16 adr->is_AddP() && off != Type::OffsetBot) { 39.17 - // For constant Strings treat the fields as compile time constants. 39.18 + // For constant Strings treat the final fields as compile time constants. 39.19 Node* base = adr->in(AddPNode::Base); 39.20 const TypeOopPtr* t = phase->type(base)->isa_oopptr(); 39.21 if (t != NULL && t->singleton()) { 39.22 - ciObject* string = t->const_oop(); 39.23 - ciConstant constant = string->as_instance()->field_value_by_offset(off); 39.24 - if (constant.basic_type() == T_INT) { 39.25 - return TypeInt::make(constant.as_int()); 39.26 - } else if (constant.basic_type() == T_ARRAY) { 39.27 - if (adr->bottom_type()->is_ptr_to_narrowoop()) { 39.28 - return TypeNarrowOop::make_from_constant(constant.as_object()); 39.29 - } else { 39.30 - return TypeOopPtr::make_from_constant(constant.as_object()); 39.31 + ciField* field = phase->C->env()->String_klass()->get_field_by_offset(off, false); 39.32 + if (field != NULL && field->is_final()) { 39.33 + ciObject* string = t->const_oop(); 39.34 + ciConstant constant = string->as_instance()->field_value(field); 39.35 + if (constant.basic_type() == T_INT) { 39.36 + return TypeInt::make(constant.as_int()); 39.37 + } else if (constant.basic_type() == T_ARRAY) { 39.38 + if (adr->bottom_type()->is_ptr_to_narrowoop()) { 39.39 + return TypeNarrowOop::make_from_constant(constant.as_object()); 39.40 + } else { 39.41 + return TypeOopPtr::make_from_constant(constant.as_object()); 39.42 + } 39.43 } 39.44 } 39.45 } 39.46 @@ -4077,6 +4080,7 @@ 39.47 n = base_memory(); 39.48 assert(Node::in_dump() 39.49 || n == NULL || n->bottom_type() == Type::TOP 39.50 + || n->adr_type() == NULL // address is TOP 39.51 || n->adr_type() == TypePtr::BOTTOM 39.52 || n->adr_type() == TypeRawPtr::BOTTOM 39.53 || Compile::current()->AliasLevel() == 0,
40.1 --- a/src/share/vm/prims/jvmtiExport.cpp Thu Mar 03 15:13:18 2011 -0800 40.2 +++ b/src/share/vm/prims/jvmtiExport.cpp Fri Mar 04 14:06:16 2011 -0800 40.3 @@ -1805,6 +1805,10 @@ 40.4 40.5 void JvmtiExport::post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) { 40.6 JavaThread* thread = JavaThread::current(); 40.7 + // In theory everyone coming thru here is in_vm but we need to be certain 40.8 + // because a callee will do a vm->native transition 40.9 + ThreadInVMfromUnknown __tiv; 40.10 + 40.11 EVT_TRIG_TRACE(JVMTI_EVENT_DYNAMIC_CODE_GENERATED, 40.12 ("JVMTI [%s] method dynamic code generated event triggered", 40.13 JvmtiTrace::safe_get_thread_name(thread))); 40.14 @@ -1826,19 +1830,18 @@ 40.15 } 40.16 40.17 void JvmtiExport::post_dynamic_code_generated(const char *name, const void *code_begin, const void *code_end) { 40.18 - // In theory everyone coming thru here is in_vm but we need to be certain 40.19 - // because a callee will do a vm->native transition 40.20 - ThreadInVMfromUnknown __tiv; 40.21 jvmtiPhase phase = JvmtiEnv::get_phase(); 40.22 if (phase == JVMTI_PHASE_PRIMORDIAL || phase == JVMTI_PHASE_START) { 40.23 post_dynamic_code_generated_internal(name, code_begin, code_end); 40.24 - return; 40.25 + } else { 40.26 + // It may not be safe to post the event from this thread. Defer all 40.27 + // postings to the service thread so that it can perform them in a safe 40.28 + // context and in-order. 40.29 + MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); 40.30 + JvmtiDeferredEvent event = JvmtiDeferredEvent::dynamic_code_generated_event( 40.31 + name, code_begin, code_end); 40.32 + JvmtiDeferredEventQueue::enqueue(event); 40.33 } 40.34 - 40.35 - // Blocks until everything now in the queue has been posted 40.36 - JvmtiDeferredEventQueue::flush_queue(Thread::current()); 40.37 - 40.38 - post_dynamic_code_generated_internal(name, code_begin, code_end); 40.39 } 40.40 40.41
41.1 --- a/src/share/vm/prims/jvmtiExport.hpp Thu Mar 03 15:13:18 2011 -0800 41.2 +++ b/src/share/vm/prims/jvmtiExport.hpp Fri Mar 04 14:06:16 2011 -0800 41.3 @@ -140,12 +140,12 @@ 41.4 char sig_type, jvalue *value); 41.5 41.6 41.7 - private: 41.8 // posts a DynamicCodeGenerated event (internal/private implementation). 41.9 // The public post_dynamic_code_generated* functions make use of the 41.10 - // internal implementation. 41.11 + // internal implementation. Also called from JvmtiDeferredEvent::post() 41.12 static void post_dynamic_code_generated_internal(const char *name, const void *code_begin, const void *code_end) KERNEL_RETURN; 41.13 41.14 + private: 41.15 41.16 // GenerateEvents support to allow posting of CompiledMethodLoad and 41.17 // DynamicCodeGenerated events for a given environment.
42.1 --- a/src/share/vm/prims/jvmtiImpl.cpp Thu Mar 03 15:13:18 2011 -0800 42.2 +++ b/src/share/vm/prims/jvmtiImpl.cpp Fri Mar 04 14:06:16 2011 -0800 42.3 @@ -918,7 +918,7 @@ 42.4 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_load_event( 42.5 nmethod* nm) { 42.6 JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_LOAD); 42.7 - event.set_compiled_method_load(nm); 42.8 + event._event_data.compiled_method_load = nm; 42.9 nmethodLocker::lock_nmethod(nm); // will be unlocked when posted 42.10 return event; 42.11 } 42.12 @@ -926,23 +926,39 @@ 42.13 JvmtiDeferredEvent JvmtiDeferredEvent::compiled_method_unload_event( 42.14 jmethodID id, const void* code) { 42.15 JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_COMPILED_METHOD_UNLOAD); 42.16 - event.set_compiled_method_unload(id, code); 42.17 + event._event_data.compiled_method_unload.method_id = id; 42.18 + event._event_data.compiled_method_unload.code_begin = code; 42.19 + return event; 42.20 +} 42.21 +JvmtiDeferredEvent JvmtiDeferredEvent::dynamic_code_generated_event( 42.22 + const char* name, const void* code_begin, const void* code_end) { 42.23 + JvmtiDeferredEvent event = JvmtiDeferredEvent(TYPE_DYNAMIC_CODE_GENERATED); 42.24 + event._event_data.dynamic_code_generated.name = name; 42.25 + event._event_data.dynamic_code_generated.code_begin = code_begin; 42.26 + event._event_data.dynamic_code_generated.code_end = code_end; 42.27 return event; 42.28 } 42.29 42.30 void JvmtiDeferredEvent::post() { 42.31 + assert(ServiceThread::is_service_thread(Thread::current()), 42.32 + "Service thread must post enqueued events"); 42.33 switch(_type) { 42.34 - case TYPE_COMPILED_METHOD_LOAD: 42.35 - JvmtiExport::post_compiled_method_load(compiled_method_load()); 42.36 - nmethodLocker::unlock_nmethod(compiled_method_load()); 42.37 + case TYPE_COMPILED_METHOD_LOAD: { 42.38 + nmethod* nm = _event_data.compiled_method_load; 42.39 + JvmtiExport::post_compiled_method_load(nm); 42.40 + nmethodLocker::unlock_nmethod(nm); 42.41 break; 42.42 + } 42.43 case TYPE_COMPILED_METHOD_UNLOAD: 42.44 JvmtiExport::post_compiled_method_unload( 42.45 - compiled_method_unload_method_id(), 42.46 - compiled_method_unload_code_begin()); 42.47 + _event_data.compiled_method_unload.method_id, 42.48 + _event_data.compiled_method_unload.code_begin); 42.49 break; 42.50 - case TYPE_FLUSH: 42.51 - JvmtiDeferredEventQueue::flush_complete(flush_state_addr()); 42.52 + case TYPE_DYNAMIC_CODE_GENERATED: 42.53 + JvmtiExport::post_dynamic_code_generated_internal( 42.54 + _event_data.dynamic_code_generated.name, 42.55 + _event_data.dynamic_code_generated.code_begin, 42.56 + _event_data.dynamic_code_generated.code_end); 42.57 break; 42.58 default: 42.59 ShouldNotReachHere(); 42.60 @@ -1065,54 +1081,4 @@ 42.61 } 42.62 } 42.63 42.64 -enum { 42.65 - // Random - used for debugging 42.66 - FLUSHING = 0x50403020, 42.67 - FLUSHED = 0x09080706 42.68 -}; 42.69 - 42.70 -void JvmtiDeferredEventQueue::flush_queue(Thread* thread) { 42.71 - 42.72 - volatile int flush_state = FLUSHING; 42.73 - 42.74 - JvmtiDeferredEvent flush(JvmtiDeferredEvent::TYPE_FLUSH); 42.75 - flush.set_flush_state_addr((int*)&flush_state); 42.76 - 42.77 - if (ServiceThread::is_service_thread(thread)) { 42.78 - // If we are the service thread we have to post all preceding events 42.79 - // Use the flush event as a token to indicate when we can stop 42.80 - JvmtiDeferredEvent event; 42.81 - { 42.82 - MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); 42.83 - enqueue(flush); 42.84 - event = dequeue(); 42.85 - } 42.86 - while (!event.is_flush_event() || 42.87 - event.flush_state_addr() != &flush_state) { 42.88 - event.post(); 42.89 - { 42.90 - MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); 42.91 - event = dequeue(); 42.92 - } 42.93 - } 42.94 - } else { 42.95 - // Wake up the service thread so it will process events. When it gets 42.96 - // to the flush event it will set 'flush_complete' and notify us. 42.97 - MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); 42.98 - enqueue(flush); 42.99 - while (flush_state != FLUSHED) { 42.100 - assert(flush_state == FLUSHING || flush_state == FLUSHED, 42.101 - "only valid values for this"); 42.102 - Service_lock->wait(Mutex::_no_safepoint_check_flag); 42.103 - } 42.104 - } 42.105 -} 42.106 - 42.107 -void JvmtiDeferredEventQueue::flush_complete(int* state_addr) { 42.108 - assert(state_addr != NULL && *state_addr == FLUSHING, "must be"); 42.109 - MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag); 42.110 - *state_addr = FLUSHED; 42.111 - Service_lock->notify_all(); 42.112 -} 42.113 - 42.114 #endif // ndef KERNEL
43.1 --- a/src/share/vm/prims/jvmtiImpl.hpp Thu Mar 03 15:13:18 2011 -0800 43.2 +++ b/src/share/vm/prims/jvmtiImpl.hpp Fri Mar 04 14:06:16 2011 -0800 43.3 @@ -451,7 +451,7 @@ 43.4 TYPE_NONE, 43.5 TYPE_COMPILED_METHOD_LOAD, 43.6 TYPE_COMPILED_METHOD_UNLOAD, 43.7 - TYPE_FLUSH // pseudo-event used to implement flush_queue() 43.8 + TYPE_DYNAMIC_CODE_GENERATED 43.9 } Type; 43.10 43.11 Type _type; 43.12 @@ -461,49 +461,15 @@ 43.13 jmethodID method_id; 43.14 const void* code_begin; 43.15 } compiled_method_unload; 43.16 - int* flush_state_addr; 43.17 + struct { 43.18 + const char* name; 43.19 + const void* code_begin; 43.20 + const void* code_end; 43.21 + } dynamic_code_generated; 43.22 } _event_data; 43.23 43.24 JvmtiDeferredEvent(Type t) : _type(t) {} 43.25 43.26 - void set_compiled_method_load(nmethod* nm) { 43.27 - assert(_type == TYPE_COMPILED_METHOD_LOAD, "must be"); 43.28 - _event_data.compiled_method_load = nm; 43.29 - } 43.30 - 43.31 - nmethod* compiled_method_load() const { 43.32 - assert(_type == TYPE_COMPILED_METHOD_LOAD, "must be"); 43.33 - return _event_data.compiled_method_load; 43.34 - } 43.35 - 43.36 - void set_compiled_method_unload(jmethodID id, const void* code) { 43.37 - assert(_type == TYPE_COMPILED_METHOD_UNLOAD, "must be"); 43.38 - _event_data.compiled_method_unload.method_id = id; 43.39 - _event_data.compiled_method_unload.code_begin = code; 43.40 - } 43.41 - 43.42 - jmethodID compiled_method_unload_method_id() const { 43.43 - assert(_type == TYPE_COMPILED_METHOD_UNLOAD, "must be"); 43.44 - return _event_data.compiled_method_unload.method_id; 43.45 - } 43.46 - 43.47 - const void* compiled_method_unload_code_begin() const { 43.48 - assert(_type == TYPE_COMPILED_METHOD_UNLOAD, "must be"); 43.49 - return _event_data.compiled_method_unload.code_begin; 43.50 - } 43.51 - 43.52 - bool is_flush_event() const { return _type == TYPE_FLUSH; } 43.53 - 43.54 - int* flush_state_addr() const { 43.55 - assert(is_flush_event(), "must be"); 43.56 - return _event_data.flush_state_addr; 43.57 - } 43.58 - 43.59 - void set_flush_state_addr(int* flag) { 43.60 - assert(is_flush_event(), "must be"); 43.61 - _event_data.flush_state_addr = flag; 43.62 - } 43.63 - 43.64 public: 43.65 43.66 JvmtiDeferredEvent() : _type(TYPE_NONE) {} 43.67 @@ -513,6 +479,9 @@ 43.68 KERNEL_RETURN_(JvmtiDeferredEvent()); 43.69 static JvmtiDeferredEvent compiled_method_unload_event( 43.70 jmethodID id, const void* code) KERNEL_RETURN_(JvmtiDeferredEvent()); 43.71 + static JvmtiDeferredEvent dynamic_code_generated_event( 43.72 + const char* name, const void* begin, const void* end) 43.73 + KERNEL_RETURN_(JvmtiDeferredEvent()); 43.74 43.75 // Actually posts the event. 43.76 void post() KERNEL_RETURN; 43.77 @@ -548,25 +517,12 @@ 43.78 // Transfers events from the _pending_list to the _queue. 43.79 static void process_pending_events() KERNEL_RETURN; 43.80 43.81 - static void flush_complete(int* flush_state) KERNEL_RETURN; 43.82 - 43.83 public: 43.84 // Must be holding Service_lock when calling these 43.85 static bool has_events() KERNEL_RETURN_(false); 43.86 static void enqueue(const JvmtiDeferredEvent& event) KERNEL_RETURN; 43.87 static JvmtiDeferredEvent dequeue() KERNEL_RETURN_(JvmtiDeferredEvent()); 43.88 43.89 - // This call blocks until all events enqueued prior to this call 43.90 - // have been posted. The Service_lock is acquired and waited upon. 43.91 - // 43.92 - // Implemented by creating a "flush" event and placing it in the queue. 43.93 - // When the flush event is "posted" it will call flush_complete(), which 43.94 - // will release the caller. 43.95 - // 43.96 - // Can be called by any thread (maybe even the service thread itself). 43.97 - // Not necessary for the caller to be a JavaThread. 43.98 - static void flush_queue(Thread* current) KERNEL_RETURN; 43.99 - 43.100 // Used to enqueue events without using a lock, for times (such as during 43.101 // safepoint) when we can't or don't want to lock the Service_lock. 43.102 //
44.1 --- a/src/share/vm/prims/unsafe.cpp Thu Mar 03 15:13:18 2011 -0800 44.2 +++ b/src/share/vm/prims/unsafe.cpp Fri Mar 04 14:06:16 2011 -0800 44.3 @@ -110,6 +110,8 @@ 44.4 44.5 inline void* index_oop_from_field_offset_long(oop p, jlong field_offset) { 44.6 jlong byte_offset = field_offset_to_byte_offset(field_offset); 44.7 + // Don't allow unsafe to be used to read or write the header word of oops 44.8 + assert(p == NULL || field_offset >= oopDesc::header_size(), "offset must be outside of header"); 44.9 #ifdef ASSERT 44.10 if (p != NULL) { 44.11 assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset");
45.1 --- a/src/share/vm/runtime/arguments.cpp Thu Mar 03 15:13:18 2011 -0800 45.2 +++ b/src/share/vm/runtime/arguments.cpp Fri Mar 04 14:06:16 2011 -0800 45.3 @@ -78,6 +78,7 @@ 45.4 const char* Arguments::_java_vendor_url_bug = DEFAULT_VENDOR_URL_BUG; 45.5 const char* Arguments::_sun_java_launcher = DEFAULT_JAVA_LAUNCHER; 45.6 int Arguments::_sun_java_launcher_pid = -1; 45.7 +bool Arguments::_created_by_gamma_launcher = false; 45.8 45.9 // These parameters are reset in method parse_vm_init_args(JavaVMInitArgs*) 45.10 bool Arguments::_AlwaysCompileLoopMethods = AlwaysCompileLoopMethods; 45.11 @@ -1656,6 +1657,9 @@ 45.12 45.13 void Arguments::process_java_launcher_argument(const char* launcher, void* extra_info) { 45.14 _sun_java_launcher = strdup(launcher); 45.15 + if (strcmp("gamma", _sun_java_launcher) == 0) { 45.16 + _created_by_gamma_launcher = true; 45.17 + } 45.18 } 45.19 45.20 bool Arguments::created_by_java_launcher() { 45.21 @@ -1663,6 +1667,10 @@ 45.22 return strcmp(DEFAULT_JAVA_LAUNCHER, _sun_java_launcher) != 0; 45.23 } 45.24 45.25 +bool Arguments::created_by_gamma_launcher() { 45.26 + return _created_by_gamma_launcher; 45.27 +} 45.28 + 45.29 //=========================================================================================================== 45.30 // Parsing of main arguments 45.31 45.32 @@ -2790,10 +2798,6 @@ 45.33 if (!FLAG_IS_DEFAULT(OptoLoopAlignment) && FLAG_IS_DEFAULT(MaxLoopPad)) { 45.34 FLAG_SET_DEFAULT(MaxLoopPad, OptoLoopAlignment-1); 45.35 } 45.36 - // Temporary disable bulk zeroing reduction with G1. See CR 6627983. 45.37 - if (UseG1GC) { 45.38 - FLAG_SET_DEFAULT(ReduceBulkZeroing, false); 45.39 - } 45.40 #endif 45.41 45.42 // If we are running in a headless jre, force java.awt.headless property 45.43 @@ -3155,6 +3159,16 @@ 45.44 } 45.45 } 45.46 45.47 + // set PauseAtExit if the gamma launcher was used and a debugger is attached 45.48 + // but only if not already set on the commandline 45.49 + if (Arguments::created_by_gamma_launcher() && os::is_debugger_attached()) { 45.50 + bool set = false; 45.51 + CommandLineFlags::wasSetOnCmdline("PauseAtExit", &set); 45.52 + if (!set) { 45.53 + FLAG_SET_DEFAULT(PauseAtExit, true); 45.54 + } 45.55 + } 45.56 + 45.57 return JNI_OK; 45.58 } 45.59
46.1 --- a/src/share/vm/runtime/arguments.hpp Thu Mar 03 15:13:18 2011 -0800 46.2 +++ b/src/share/vm/runtime/arguments.hpp Fri Mar 04 14:06:16 2011 -0800 46.3 @@ -1,5 +1,5 @@ 46.4 /* 46.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 46.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 46.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 46.8 * 46.9 * This code is free software; you can redistribute it and/or modify it 46.10 @@ -257,6 +257,9 @@ 46.11 // sun.java.launcher.pid, private property 46.12 static int _sun_java_launcher_pid; 46.13 46.14 + // was this VM created by the gamma launcher 46.15 + static bool _created_by_gamma_launcher; 46.16 + 46.17 // Option flags 46.18 static bool _has_profile; 46.19 static bool _has_alloc_profile; 46.20 @@ -444,6 +447,8 @@ 46.21 static const char* sun_java_launcher() { return _sun_java_launcher; } 46.22 // Was VM created by a Java launcher? 46.23 static bool created_by_java_launcher(); 46.24 + // Was VM created by the gamma Java launcher? 46.25 + static bool created_by_gamma_launcher(); 46.26 // -Dsun.java.launcher.pid 46.27 static int sun_java_launcher_pid() { return _sun_java_launcher_pid; } 46.28
47.1 --- a/src/share/vm/runtime/globals.hpp Thu Mar 03 15:13:18 2011 -0800 47.2 +++ b/src/share/vm/runtime/globals.hpp Fri Mar 04 14:06:16 2011 -0800 47.3 @@ -2610,9 +2610,6 @@ 47.4 develop(bool, CompileTheWorldPreloadClasses, true, \ 47.5 "Preload all classes used by a class before start loading") \ 47.6 \ 47.7 - notproduct(bool, CompileTheWorldIgnoreInitErrors, false, \ 47.8 - "Compile all methods although class initializer failed") \ 47.9 - \ 47.10 notproduct(intx, CompileTheWorldSafepointInterval, 100, \ 47.11 "Force a safepoint every n compiles so sweeper can keep up") \ 47.12 \ 47.13 @@ -3733,6 +3730,9 @@ 47.14 "The file to create and for whose removal to await when pausing " \ 47.15 "at startup. (default: ./vm.paused.<pid>)") \ 47.16 \ 47.17 + diagnostic(bool, PauseAtExit, false, \ 47.18 + "Pause and wait for keypress on exit if a debugger is attached") \ 47.19 + \ 47.20 product(bool, ExtendedDTraceProbes, false, \ 47.21 "Enable performance-impacting dtrace probes") \ 47.22 \
48.1 --- a/src/share/vm/runtime/java.cpp Thu Mar 03 15:13:18 2011 -0800 48.2 +++ b/src/share/vm/runtime/java.cpp Fri Mar 04 14:06:16 2011 -0800 48.3 @@ -551,6 +551,7 @@ 48.4 48.5 void vm_direct_exit(int code) { 48.6 notify_vm_shutdown(); 48.7 + os::wait_for_keypress_at_exit(); 48.8 ::exit(code); 48.9 } 48.10 48.11 @@ -577,11 +578,13 @@ 48.12 void vm_shutdown() 48.13 { 48.14 vm_perform_shutdown_actions(); 48.15 + os::wait_for_keypress_at_exit(); 48.16 os::shutdown(); 48.17 } 48.18 48.19 void vm_abort(bool dump_core) { 48.20 vm_perform_shutdown_actions(); 48.21 + os::wait_for_keypress_at_exit(); 48.22 os::abort(dump_core); 48.23 ShouldNotReachHere(); 48.24 }
49.1 --- a/src/share/vm/runtime/os.hpp Thu Mar 03 15:13:18 2011 -0800 49.2 +++ b/src/share/vm/runtime/os.hpp Fri Mar 04 14:06:16 2011 -0800 49.3 @@ -492,6 +492,12 @@ 49.4 static void print_location(outputStream* st, intptr_t x, bool verbose = false); 49.5 static size_t lasterror(char *buf, size_t len); 49.6 49.7 + // Determines whether the calling process is being debugged by a user-mode debugger. 49.8 + static bool is_debugger_attached(); 49.9 + 49.10 + // wait for a key press if PauseAtExit is set 49.11 + static void wait_for_keypress_at_exit(void); 49.12 + 49.13 // The following two functions are used by fatal error handler to trace 49.14 // native (C) frames. They are not part of frame.hpp/frame.cpp because 49.15 // frame.hpp/cpp assume thread is JavaThread, and also because different
50.1 --- a/src/share/vm/runtime/sharedRuntime.cpp Thu Mar 03 15:13:18 2011 -0800 50.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp Fri Mar 04 14:06:16 2011 -0800 50.3 @@ -431,25 +431,24 @@ 50.4 // previous frame depending on the return address. 50.5 50.6 address SharedRuntime::raw_exception_handler_for_return_address(JavaThread* thread, address return_address) { 50.7 - assert(frame::verify_return_pc(return_address), "must be a return pc"); 50.8 - 50.9 - // Reset MethodHandle flag. 50.10 + assert(frame::verify_return_pc(return_address), err_msg("must be a return address: " INTPTR_FORMAT, return_address)); 50.11 + 50.12 + // Reset method handle flag. 50.13 thread->set_is_method_handle_return(false); 50.14 50.15 - // the fastest case first 50.16 + // The fastest case first 50.17 CodeBlob* blob = CodeCache::find_blob(return_address); 50.18 - if (blob != NULL && blob->is_nmethod()) { 50.19 - nmethod* code = (nmethod*)blob; 50.20 - assert(code != NULL, "nmethod must be present"); 50.21 - // Check if the return address is a MethodHandle call site. 50.22 - thread->set_is_method_handle_return(code->is_method_handle_return(return_address)); 50.23 + nmethod* nm = (blob != NULL) ? blob->as_nmethod_or_null() : NULL; 50.24 + if (nm != NULL) { 50.25 + // Set flag if return address is a method handle call site. 50.26 + thread->set_is_method_handle_return(nm->is_method_handle_return(return_address)); 50.27 // native nmethods don't have exception handlers 50.28 - assert(!code->is_native_method(), "no exception handler"); 50.29 - assert(code->header_begin() != code->exception_begin(), "no exception handler"); 50.30 - if (code->is_deopt_pc(return_address)) { 50.31 + assert(!nm->is_native_method(), "no exception handler"); 50.32 + assert(nm->header_begin() != nm->exception_begin(), "no exception handler"); 50.33 + if (nm->is_deopt_pc(return_address)) { 50.34 return SharedRuntime::deopt_blob()->unpack_with_exception(); 50.35 } else { 50.36 - return code->exception_begin(); 50.37 + return nm->exception_begin(); 50.38 } 50.39 } 50.40 50.41 @@ -462,22 +461,9 @@ 50.42 return Interpreter::rethrow_exception_entry(); 50.43 } 50.44 50.45 - // Compiled code 50.46 - if (CodeCache::contains(return_address)) { 50.47 - CodeBlob* blob = CodeCache::find_blob(return_address); 50.48 - if (blob->is_nmethod()) { 50.49 - nmethod* code = (nmethod*)blob; 50.50 - assert(code != NULL, "nmethod must be present"); 50.51 - // Check if the return address is a MethodHandle call site. 50.52 - thread->set_is_method_handle_return(code->is_method_handle_return(return_address)); 50.53 - assert(code->header_begin() != code->exception_begin(), "no exception handler"); 50.54 - return code->exception_begin(); 50.55 - } 50.56 - if (blob->is_runtime_stub()) { 50.57 - ShouldNotReachHere(); // callers are responsible for skipping runtime stub frames 50.58 - } 50.59 - } 50.60 + guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub"); 50.61 guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!"); 50.62 + 50.63 #ifndef PRODUCT 50.64 { ResourceMark rm; 50.65 tty->print_cr("No exception handler found for exception at " INTPTR_FORMAT " - potential problems:", return_address); 50.66 @@ -485,6 +471,7 @@ 50.67 tty->print_cr("b) other problem"); 50.68 } 50.69 #endif // PRODUCT 50.70 + 50.71 ShouldNotReachHere(); 50.72 return NULL; 50.73 }
51.1 --- a/src/share/vm/runtime/stubRoutines.cpp Thu Mar 03 15:13:18 2011 -0800 51.2 +++ b/src/share/vm/runtime/stubRoutines.cpp Fri Mar 04 14:06:16 2011 -0800 51.3 @@ -1,5 +1,5 @@ 51.4 /* 51.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 51.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 51.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 51.8 * 51.9 * This code is free software; you can redistribute it and/or modify it 51.10 @@ -80,30 +80,36 @@ 51.11 jint StubRoutines::_fpu_subnormal_bias2[3] = { 0, 0, 0 }; 51.12 51.13 // Compiled code entry points default values 51.14 -// The dafault functions don't have separate disjoint versions. 51.15 +// The default functions don't have separate disjoint versions. 51.16 address StubRoutines::_jbyte_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jbyte_copy); 51.17 address StubRoutines::_jshort_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jshort_copy); 51.18 address StubRoutines::_jint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jint_copy); 51.19 address StubRoutines::_jlong_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jlong_copy); 51.20 address StubRoutines::_oop_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy); 51.21 +address StubRoutines::_oop_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy_uninit); 51.22 address StubRoutines::_jbyte_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jbyte_copy); 51.23 address StubRoutines::_jshort_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jshort_copy); 51.24 address StubRoutines::_jint_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jint_copy); 51.25 address StubRoutines::_jlong_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::jlong_copy); 51.26 address StubRoutines::_oop_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy); 51.27 +address StubRoutines::_oop_disjoint_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::oop_copy_uninit); 51.28 51.29 address StubRoutines::_arrayof_jbyte_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jbyte_copy); 51.30 address StubRoutines::_arrayof_jshort_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jshort_copy); 51.31 address StubRoutines::_arrayof_jint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jint_copy); 51.32 address StubRoutines::_arrayof_jlong_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jlong_copy); 51.33 address StubRoutines::_arrayof_oop_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy); 51.34 +address StubRoutines::_arrayof_oop_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy_uninit); 51.35 address StubRoutines::_arrayof_jbyte_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jbyte_copy); 51.36 address StubRoutines::_arrayof_jshort_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jshort_copy); 51.37 address StubRoutines::_arrayof_jint_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jint_copy); 51.38 address StubRoutines::_arrayof_jlong_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_jlong_copy); 51.39 -address StubRoutines::_arrayof_oop_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy); 51.40 +address StubRoutines::_arrayof_oop_disjoint_arraycopy = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy); 51.41 +address StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit = CAST_FROM_FN_PTR(address, StubRoutines::arrayof_oop_copy_uninit); 51.42 + 51.43 51.44 address StubRoutines::_checkcast_arraycopy = NULL; 51.45 +address StubRoutines::_checkcast_arraycopy_uninit = NULL; 51.46 address StubRoutines::_unsafe_arraycopy = NULL; 51.47 address StubRoutines::_generic_arraycopy = NULL; 51.48 51.49 @@ -282,12 +288,12 @@ 51.50 // Default versions of arraycopy functions 51.51 // 51.52 51.53 -static void gen_arraycopy_barrier_pre(oop* dest, size_t count) { 51.54 +static void gen_arraycopy_barrier_pre(oop* dest, size_t count, bool dest_uninitialized) { 51.55 assert(count != 0, "count should be non-zero"); 51.56 assert(count <= (size_t)max_intx, "count too large"); 51.57 BarrierSet* bs = Universe::heap()->barrier_set(); 51.58 assert(bs->has_write_ref_array_pre_opt(), "Must have pre-barrier opt"); 51.59 - bs->write_ref_array_pre(dest, (int)count); 51.60 + bs->write_ref_array_pre(dest, (int)count, dest_uninitialized); 51.61 } 51.62 51.63 static void gen_arraycopy_barrier(oop* dest, size_t count) { 51.64 @@ -330,7 +336,17 @@ 51.65 SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy 51.66 #endif // !PRODUCT 51.67 assert(count != 0, "count should be non-zero"); 51.68 - gen_arraycopy_barrier_pre(dest, count); 51.69 + gen_arraycopy_barrier_pre(dest, count, /*dest_uninitialized*/false); 51.70 + Copy::conjoint_oops_atomic(src, dest, count); 51.71 + gen_arraycopy_barrier(dest, count); 51.72 +JRT_END 51.73 + 51.74 +JRT_LEAF(void, StubRoutines::oop_copy_uninit(oop* src, oop* dest, size_t count)) 51.75 +#ifndef PRODUCT 51.76 + SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy 51.77 +#endif // !PRODUCT 51.78 + assert(count != 0, "count should be non-zero"); 51.79 + gen_arraycopy_barrier_pre(dest, count, /*dest_uninitialized*/true); 51.80 Copy::conjoint_oops_atomic(src, dest, count); 51.81 gen_arraycopy_barrier(dest, count); 51.82 JRT_END 51.83 @@ -368,11 +384,20 @@ 51.84 SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy 51.85 #endif // !PRODUCT 51.86 assert(count != 0, "count should be non-zero"); 51.87 - gen_arraycopy_barrier_pre((oop *) dest, count); 51.88 + gen_arraycopy_barrier_pre((oop *) dest, count, /*dest_uninitialized*/false); 51.89 Copy::arrayof_conjoint_oops(src, dest, count); 51.90 gen_arraycopy_barrier((oop *) dest, count); 51.91 JRT_END 51.92 51.93 +JRT_LEAF(void, StubRoutines::arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count)) 51.94 +#ifndef PRODUCT 51.95 + SharedRuntime::_oop_array_copy_ctr++; // Slow-path oop array copy 51.96 +#endif // !PRODUCT 51.97 + assert(count != 0, "count should be non-zero"); 51.98 + gen_arraycopy_barrier_pre((oop *) dest, count, /*dest_uninitialized*/true); 51.99 + Copy::arrayof_conjoint_oops(src, dest, count); 51.100 + gen_arraycopy_barrier((oop *) dest, count); 51.101 +JRT_END 51.102 51.103 address StubRoutines::select_fill_function(BasicType t, bool aligned, const char* &name) { 51.104 #define RETURN_STUB(xxx_fill) { \
52.1 --- a/src/share/vm/runtime/stubRoutines.hpp Thu Mar 03 15:13:18 2011 -0800 52.2 +++ b/src/share/vm/runtime/stubRoutines.hpp Fri Mar 04 14:06:16 2011 -0800 52.3 @@ -1,5 +1,5 @@ 52.4 /* 52.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 52.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 52.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 52.8 * 52.9 * This code is free software; you can redistribute it and/or modify it 52.10 @@ -164,12 +164,12 @@ 52.11 static address _jshort_arraycopy; 52.12 static address _jint_arraycopy; 52.13 static address _jlong_arraycopy; 52.14 - static address _oop_arraycopy; 52.15 + static address _oop_arraycopy, _oop_arraycopy_uninit; 52.16 static address _jbyte_disjoint_arraycopy; 52.17 static address _jshort_disjoint_arraycopy; 52.18 static address _jint_disjoint_arraycopy; 52.19 static address _jlong_disjoint_arraycopy; 52.20 - static address _oop_disjoint_arraycopy; 52.21 + static address _oop_disjoint_arraycopy, _oop_disjoint_arraycopy_uninit; 52.22 52.23 // arraycopy operands aligned on zero'th element boundary 52.24 // These are identical to the ones aligned aligned on an 52.25 @@ -179,15 +179,15 @@ 52.26 static address _arrayof_jshort_arraycopy; 52.27 static address _arrayof_jint_arraycopy; 52.28 static address _arrayof_jlong_arraycopy; 52.29 - static address _arrayof_oop_arraycopy; 52.30 + static address _arrayof_oop_arraycopy, _arrayof_oop_arraycopy_uninit; 52.31 static address _arrayof_jbyte_disjoint_arraycopy; 52.32 static address _arrayof_jshort_disjoint_arraycopy; 52.33 static address _arrayof_jint_disjoint_arraycopy; 52.34 static address _arrayof_jlong_disjoint_arraycopy; 52.35 - static address _arrayof_oop_disjoint_arraycopy; 52.36 + static address _arrayof_oop_disjoint_arraycopy, _arrayof_oop_disjoint_arraycopy_uninit; 52.37 52.38 // these are recommended but optional: 52.39 - static address _checkcast_arraycopy; 52.40 + static address _checkcast_arraycopy, _checkcast_arraycopy_uninit; 52.41 static address _unsafe_arraycopy; 52.42 static address _generic_arraycopy; 52.43 52.44 @@ -286,26 +286,36 @@ 52.45 static address jshort_arraycopy() { return _jshort_arraycopy; } 52.46 static address jint_arraycopy() { return _jint_arraycopy; } 52.47 static address jlong_arraycopy() { return _jlong_arraycopy; } 52.48 - static address oop_arraycopy() { return _oop_arraycopy; } 52.49 + static address oop_arraycopy(bool dest_uninitialized = false) { 52.50 + return dest_uninitialized ? _oop_arraycopy_uninit : _oop_arraycopy; 52.51 + } 52.52 static address jbyte_disjoint_arraycopy() { return _jbyte_disjoint_arraycopy; } 52.53 static address jshort_disjoint_arraycopy() { return _jshort_disjoint_arraycopy; } 52.54 static address jint_disjoint_arraycopy() { return _jint_disjoint_arraycopy; } 52.55 static address jlong_disjoint_arraycopy() { return _jlong_disjoint_arraycopy; } 52.56 - static address oop_disjoint_arraycopy() { return _oop_disjoint_arraycopy; } 52.57 + static address oop_disjoint_arraycopy(bool dest_uninitialized = false) { 52.58 + return dest_uninitialized ? _oop_disjoint_arraycopy_uninit : _oop_disjoint_arraycopy; 52.59 + } 52.60 52.61 static address arrayof_jbyte_arraycopy() { return _arrayof_jbyte_arraycopy; } 52.62 static address arrayof_jshort_arraycopy() { return _arrayof_jshort_arraycopy; } 52.63 static address arrayof_jint_arraycopy() { return _arrayof_jint_arraycopy; } 52.64 static address arrayof_jlong_arraycopy() { return _arrayof_jlong_arraycopy; } 52.65 - static address arrayof_oop_arraycopy() { return _arrayof_oop_arraycopy; } 52.66 + static address arrayof_oop_arraycopy(bool dest_uninitialized = false) { 52.67 + return dest_uninitialized ? _arrayof_oop_arraycopy_uninit : _arrayof_oop_arraycopy; 52.68 + } 52.69 52.70 static address arrayof_jbyte_disjoint_arraycopy() { return _arrayof_jbyte_disjoint_arraycopy; } 52.71 static address arrayof_jshort_disjoint_arraycopy() { return _arrayof_jshort_disjoint_arraycopy; } 52.72 static address arrayof_jint_disjoint_arraycopy() { return _arrayof_jint_disjoint_arraycopy; } 52.73 static address arrayof_jlong_disjoint_arraycopy() { return _arrayof_jlong_disjoint_arraycopy; } 52.74 - static address arrayof_oop_disjoint_arraycopy() { return _arrayof_oop_disjoint_arraycopy; } 52.75 + static address arrayof_oop_disjoint_arraycopy(bool dest_uninitialized = false) { 52.76 + return dest_uninitialized ? _arrayof_oop_disjoint_arraycopy_uninit : _arrayof_oop_disjoint_arraycopy; 52.77 + } 52.78 52.79 - static address checkcast_arraycopy() { return _checkcast_arraycopy; } 52.80 + static address checkcast_arraycopy(bool dest_uninitialized = false) { 52.81 + return dest_uninitialized ? _checkcast_arraycopy_uninit : _checkcast_arraycopy; 52.82 + } 52.83 static address unsafe_arraycopy() { return _unsafe_arraycopy; } 52.84 static address generic_arraycopy() { return _generic_arraycopy; } 52.85 52.86 @@ -352,17 +362,19 @@ 52.87 // Default versions of the above arraycopy functions for platforms which do 52.88 // not have specialized versions 52.89 // 52.90 - static void jbyte_copy (jbyte* src, jbyte* dest, size_t count); 52.91 - static void jshort_copy(jshort* src, jshort* dest, size_t count); 52.92 - static void jint_copy (jint* src, jint* dest, size_t count); 52.93 - static void jlong_copy (jlong* src, jlong* dest, size_t count); 52.94 - static void oop_copy (oop* src, oop* dest, size_t count); 52.95 + static void jbyte_copy (jbyte* src, jbyte* dest, size_t count); 52.96 + static void jshort_copy (jshort* src, jshort* dest, size_t count); 52.97 + static void jint_copy (jint* src, jint* dest, size_t count); 52.98 + static void jlong_copy (jlong* src, jlong* dest, size_t count); 52.99 + static void oop_copy (oop* src, oop* dest, size_t count); 52.100 + static void oop_copy_uninit(oop* src, oop* dest, size_t count); 52.101 52.102 - static void arrayof_jbyte_copy (HeapWord* src, HeapWord* dest, size_t count); 52.103 - static void arrayof_jshort_copy(HeapWord* src, HeapWord* dest, size_t count); 52.104 - static void arrayof_jint_copy (HeapWord* src, HeapWord* dest, size_t count); 52.105 - static void arrayof_jlong_copy (HeapWord* src, HeapWord* dest, size_t count); 52.106 - static void arrayof_oop_copy (HeapWord* src, HeapWord* dest, size_t count); 52.107 + static void arrayof_jbyte_copy (HeapWord* src, HeapWord* dest, size_t count); 52.108 + static void arrayof_jshort_copy (HeapWord* src, HeapWord* dest, size_t count); 52.109 + static void arrayof_jint_copy (HeapWord* src, HeapWord* dest, size_t count); 52.110 + static void arrayof_jlong_copy (HeapWord* src, HeapWord* dest, size_t count); 52.111 + static void arrayof_oop_copy (HeapWord* src, HeapWord* dest, size_t count); 52.112 + static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count); 52.113 }; 52.114 52.115 #endif // SHARE_VM_RUNTIME_STUBROUTINES_HPP
53.1 --- a/src/share/vm/runtime/thread.cpp Thu Mar 03 15:13:18 2011 -0800 53.2 +++ b/src/share/vm/runtime/thread.cpp Fri Mar 04 14:06:16 2011 -0800 53.3 @@ -3644,6 +3644,7 @@ 53.4 if (ShowMessageBoxOnError && is_error_reported()) { 53.5 os::infinite_sleep(); 53.6 } 53.7 + os::wait_for_keypress_at_exit(); 53.8 53.9 if (JDK_Version::is_jdk12x_version()) { 53.10 // We are the last thread running, so check if finalizers should be run.
54.1 --- a/src/share/vm/utilities/globalDefinitions_gcc.hpp Thu Mar 03 15:13:18 2011 -0800 54.2 +++ b/src/share/vm/utilities/globalDefinitions_gcc.hpp Fri Mar 04 14:06:16 2011 -0800 54.3 @@ -77,6 +77,7 @@ 54.4 # endif 54.5 54.6 #ifdef LINUX 54.7 +#define __STDC_LIMIT_MACROS 54.8 #include <inttypes.h> 54.9 #include <signal.h> 54.10 #include <ucontext.h>
55.1 --- a/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp Thu Mar 03 15:13:18 2011 -0800 55.2 +++ b/src/share/vm/utilities/globalDefinitions_sparcWorks.hpp Fri Mar 04 14:06:16 2011 -0800 55.3 @@ -148,6 +148,17 @@ 55.4 #endif 55.5 #endif 55.6 55.7 +// On solaris 8, UINTPTR_MAX is defined as empty. 55.8 +// Everywhere else it's an actual value. 55.9 +#if UINTPTR_MAX - 1 == -1 55.10 +#undef UINTPTR_MAX 55.11 +#ifdef _LP64 55.12 +#define UINTPTR_MAX UINT64_MAX 55.13 +#else 55.14 +#define UINTPTR_MAX UINT32_MAX 55.15 +#endif /* ifdef _LP64 */ 55.16 +#endif 55.17 + 55.18 // Additional Java basic types 55.19 55.20 typedef unsigned char jubyte;
56.1 --- a/src/share/vm/utilities/globalDefinitions_visCPP.hpp Thu Mar 03 15:13:18 2011 -0800 56.2 +++ b/src/share/vm/utilities/globalDefinitions_visCPP.hpp Fri Mar 04 14:06:16 2011 -0800 56.3 @@ -41,6 +41,7 @@ 56.4 # include <stdio.h> // for va_list 56.5 # include <time.h> 56.6 # include <fcntl.h> 56.7 +# include <limits.h> 56.8 // Need this on windows to get the math constants (e.g., M_PI). 56.9 #define _USE_MATH_DEFINES 56.10 # include <math.h> 56.11 @@ -99,6 +100,14 @@ 56.12 typedef signed int ssize_t; 56.13 #endif 56.14 56.15 +#ifndef UINTPTR_MAX 56.16 +#ifdef _WIN64 56.17 +#define UINTPTR_MAX _UI64_MAX 56.18 +#else 56.19 +#define UINTPTR_MAX _UI32_MAX 56.20 +#endif 56.21 +#endif 56.22 + 56.23 //---------------------------------------------------------------------------------------------------- 56.24 // Additional Java basic types 56.25
57.1 --- a/src/share/vm/utilities/macros.hpp Thu Mar 03 15:13:18 2011 -0800 57.2 +++ b/src/share/vm/utilities/macros.hpp Fri Mar 04 14:06:16 2011 -0800 57.3 @@ -1,5 +1,5 @@ 57.4 /* 57.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 57.6 + * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 57.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 57.8 * 57.9 * This code is free software; you can redistribute it and/or modify it 57.10 @@ -161,6 +161,14 @@ 57.11 #define NOT_WINDOWS(code) code 57.12 #endif 57.13 57.14 +#ifdef _WIN64 57.15 +#define WIN64_ONLY(code) code 57.16 +#define NOT_WIN64(code) 57.17 +#else 57.18 +#define WIN64_ONLY(code) 57.19 +#define NOT_WIN64(code) code 57.20 +#endif 57.21 + 57.22 #if defined(IA32) || defined(AMD64) 57.23 #define X86 57.24 #define X86_ONLY(code) code
58.1 --- a/src/share/vm/utilities/vmError.cpp Thu Mar 03 15:13:18 2011 -0800 58.2 +++ b/src/share/vm/utilities/vmError.cpp Fri Mar 04 14:06:16 2011 -0800 58.3 @@ -802,7 +802,7 @@ 58.4 first_error_tid = mytid; 58.5 set_error_reported(); 58.6 58.7 - if (ShowMessageBoxOnError) { 58.8 + if (ShowMessageBoxOnError || PauseAtExit) { 58.9 show_message_box(buffer, sizeof(buffer)); 58.10 58.11 // User has asked JVM to abort. Reset ShowMessageBoxOnError so the
59.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 59.2 +++ b/test/compiler/6942326/Test.java Fri Mar 04 14:06:16 2011 -0800 59.3 @@ -0,0 +1,409 @@ 59.4 +/* 59.5 + * Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved. 59.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 59.7 + * 59.8 + * This code is free software; you can redistribute it and/or modify it 59.9 + * under the terms of the GNU General Public License version 2 only, as 59.10 + * published by the Free Software Foundation. 59.11 + * 59.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 59.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 59.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 59.15 + * version 2 for more details (a copy is included in the LICENSE file that 59.16 + * accompanied this code). 59.17 + * 59.18 + * You should have received a copy of the GNU General Public License version 59.19 + * 2 along with this work; if not, write to the Free Software Foundation, 59.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 59.21 + * 59.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 59.23 + * or visit www.oracle.com if you need additional information or have any 59.24 + * questions. 59.25 + * 59.26 + */ 59.27 + 59.28 +/** 59.29 + * @test 59.30 + * @bug 6942326 59.31 + * @summary x86 code in string_indexof() could read beyond reserved heap space 59.32 + * 59.33 + * @run main/othervm/timeout=300 -Xmx32m -Xbatch -XX:+IgnoreUnrecognizedVMOptions -XX:CompileCommand=exclude,Test,main -XX:CompileCommand=exclude,Test,test_varsub_indexof -XX:CompileCommand=exclude,Test,test_varstr_indexof -XX:CompileCommand=exclude,Test,test_missub_indexof -XX:CompileCommand=exclude,Test,test_consub_indexof -XX:CompileCommand=exclude,Test,test_conmis_indexof -XX:CompileCommand=exclude,Test,test_subcon Test 59.34 + * 59.35 + */ 59.36 + 59.37 +public class Test { 59.38 + 59.39 + static String[] strings = new String[1024]; 59.40 + private static final int ITERATIONS = 100000; 59.41 + 59.42 + public static void main(String[] args) { 59.43 + 59.44 + long start_total = System.currentTimeMillis(); 59.45 + 59.46 + // search variable size substring in string (33 chars). 59.47 + String a = " 1111111111111xx1111111111111xx11y"; // +1 to execute a.substring(1) first 59.48 + String b = "1111111111111xx1111111111111xx11y"; 59.49 + test_varsub_indexof(a, b); 59.50 + 59.51 + // search variable size substring in string (32 chars). 59.52 + a = " 1111111111111xx1111111111111xx1y"; 59.53 + b = "1111111111111xx1111111111111xx1y"; 59.54 + test_varsub_indexof(a, b); 59.55 + 59.56 + // search variable size substring in string (17 chars). 59.57 + a = " 1111111111111xx1y"; 59.58 + b = "1111111111111xx1y"; 59.59 + test_varsub_indexof(a, b); 59.60 + 59.61 + // search variable size substring in string (16 chars). 59.62 + a = " 111111111111xx1y"; 59.63 + b = "111111111111xx1y"; 59.64 + test_varsub_indexof(a, b); 59.65 + 59.66 + // search variable size substring in string (8 chars). 59.67 + a = " 1111xx1y"; 59.68 + b = "1111xx1y"; 59.69 + test_varsub_indexof(a, b); 59.70 + 59.71 + // search variable size substring in string (7 chars). 59.72 + a = " 111xx1y"; 59.73 + b = "111xx1y"; 59.74 + test_varsub_indexof(a, b); 59.75 + 59.76 + 59.77 + 59.78 + // search substring (17 chars) in variable size string. 59.79 + a = "1111111111111xx1x"; 59.80 + b = " 1111111111111xx1111111111111xx1x"; // +1 to execute b.substring(1) first 59.81 + test_varstr_indexof(a, b); 59.82 + 59.83 + // search substring (16 chars) in variable size string. 59.84 + a = "111111111111xx1x"; 59.85 + b = " 1111111111111xx1111111111111xx1x"; 59.86 + test_varstr_indexof(a, b); 59.87 + 59.88 + // search substring (9 chars) in variable size string. 59.89 + a = "11111xx1x"; 59.90 + b = " 1111111111111xx1111111111111xx1x"; 59.91 + test_varstr_indexof(a, b); 59.92 + 59.93 + // search substring (8 chars) in variable size string. 59.94 + a = "1111xx1x"; 59.95 + b = " 1111111111111xx1111111111111xx1x"; 59.96 + test_varstr_indexof(a, b); 59.97 + 59.98 + // search substring (4 chars) in variable size string. 59.99 + a = "xx1x"; 59.100 + b = " 1111111111111xx1111111111111xx1x"; 59.101 + test_varstr_indexof(a, b); 59.102 + 59.103 + // search substring (3 chars) in variable size string. 59.104 + a = "x1x"; 59.105 + b = " 1111111111111xx1111111111111xx1x"; 59.106 + test_varstr_indexof(a, b); 59.107 + 59.108 + // search substring (2 chars) in variable size string. 59.109 + a = "1y"; 59.110 + b = " 1111111111111xx1111111111111xx1y"; 59.111 + test_varstr_indexof(a, b); 59.112 + 59.113 + 59.114 + 59.115 + // search non matching variable size substring in string (33 chars). 59.116 + a = " 1111111111111xx1111111111111xx11z"; // +1 to execute a.substring(1) first 59.117 + b = "1111111111111xx1111111111111xx11y"; 59.118 + test_missub_indexof(a, b); 59.119 + 59.120 + // search non matching variable size substring in string (32 chars). 59.121 + a = " 1111111111111xx1111111111111xx1z"; 59.122 + b = "1111111111111xx1111111111111xx1y"; 59.123 + test_missub_indexof(a, b); 59.124 + 59.125 + // search non matching variable size substring in string (17 chars). 59.126 + a = " 1111111111111xx1z"; 59.127 + b = "1111111111111xx1y"; 59.128 + test_missub_indexof(a, b); 59.129 + 59.130 + // search non matching variable size substring in string (16 chars). 59.131 + a = " 111111111111xx1z"; 59.132 + b = "111111111111xx1y"; 59.133 + test_missub_indexof(a, b); 59.134 + 59.135 + // search non matching variable size substring in string (8 chars). 59.136 + a = " 1111xx1z"; 59.137 + b = "1111xx1y"; 59.138 + test_missub_indexof(a, b); 59.139 + 59.140 + // search non matching variable size substring in string (7 chars). 59.141 + a = " 111xx1z"; 59.142 + b = "111xx1y"; 59.143 + test_missub_indexof(a, b); 59.144 + 59.145 + 59.146 + 59.147 + // Testing constant substring search in variable size string. 59.148 + 59.149 + // search constant substring (17 chars). 59.150 + b = " 1111111111111xx1111111111111xx1x"; // +1 to execute b.substring(1) first 59.151 + TestCon tc = new TestCon17(); 59.152 + test_consub_indexof(tc, b); 59.153 + 59.154 + // search constant substring (16 chars). 59.155 + b = " 1111111111111xx1111111111111xx1x"; 59.156 + tc = new TestCon16(); 59.157 + test_consub_indexof(tc, b); 59.158 + 59.159 + // search constant substring (9 chars). 59.160 + b = " 1111111111111xx1111111111111xx1x"; 59.161 + tc = new TestCon9(); 59.162 + test_consub_indexof(tc, b); 59.163 + 59.164 + // search constant substring (8 chars). 59.165 + b = " 1111111111111xx1111111111111xx1x"; 59.166 + tc = new TestCon8(); 59.167 + test_consub_indexof(tc, b); 59.168 + 59.169 + // search constant substring (4 chars). 59.170 + b = " 1111111111111xx1111111111111xx1x"; 59.171 + tc = new TestCon4(); 59.172 + test_consub_indexof(tc, b); 59.173 + 59.174 + // search constant substring (3 chars). 59.175 + b = " 1111111111111xx1111111111111xx1x"; 59.176 + tc = new TestCon3(); 59.177 + test_consub_indexof(tc, b); 59.178 + 59.179 + // search constant substring (2 chars). 59.180 + b = " 1111111111111xx1111111111111xx1y"; 59.181 + tc = new TestCon2(); 59.182 + test_consub_indexof(tc, b); 59.183 + 59.184 + // search constant substring (1 chars). 59.185 + b = " 1111111111111xx1111111111111xx1y"; 59.186 + tc = new TestCon1(); 59.187 + test_consub_indexof(tc, b); 59.188 + 59.189 + 59.190 + // search non matching constant substring (17 chars). 59.191 + b = " 1111111111111xx1111111111111xx1z"; // +1 to execute b.substring(1) first 59.192 + tc = new TestCon17(); 59.193 + test_conmis_indexof(tc, b); 59.194 + 59.195 + // search non matching constant substring (16 chars). 59.196 + b = " 1111111111111xx1111111111111xx1z"; 59.197 + tc = new TestCon16(); 59.198 + test_conmis_indexof(tc, b); 59.199 + 59.200 + // search non matching constant substring (9 chars). 59.201 + b = " 1111111111111xx1111111111111xx1z"; 59.202 + tc = new TestCon9(); 59.203 + test_conmis_indexof(tc, b); 59.204 + 59.205 + // search non matching constant substring (8 chars). 59.206 + b = " 1111111111111xx1111111111111xx1z"; 59.207 + tc = new TestCon8(); 59.208 + test_conmis_indexof(tc, b); 59.209 + 59.210 + // search non matching constant substring (4 chars). 59.211 + b = " 1111111111111xx1111111111111xx1z"; 59.212 + tc = new TestCon4(); 59.213 + test_conmis_indexof(tc, b); 59.214 + 59.215 + // search non matching constant substring (3 chars). 59.216 + b = " 1111111111111xx1111111111111xx1z"; 59.217 + tc = new TestCon3(); 59.218 + test_conmis_indexof(tc, b); 59.219 + 59.220 + // search non matching constant substring (2 chars). 59.221 + b = " 1111111111111xx1111111111111xx1z"; 59.222 + tc = new TestCon2(); 59.223 + test_conmis_indexof(tc, b); 59.224 + 59.225 + // search non matching constant substring (1 chars). 59.226 + b = " 1111111111111xx1111111111111xx1z"; 59.227 + tc = new TestCon1(); 59.228 + test_conmis_indexof(tc, b); 59.229 + 59.230 + long end_total = System.currentTimeMillis(); 59.231 + System.out.println("End run time: " + (end_total - start_total)); 59.232 + 59.233 + } 59.234 + 59.235 + public static long test_init(String a, String b) { 59.236 + for (int i = 0; i < 512; i++) { 59.237 + strings[i * 2] = new String(b.toCharArray()); 59.238 + strings[i * 2 + 1] = new String(a.toCharArray()); 59.239 + } 59.240 + System.out.print(a.length() + " " + b.length() + " "); 59.241 + return System.currentTimeMillis(); 59.242 + } 59.243 + 59.244 + public static void test_end(String a, String b, int v, int expected, long start) { 59.245 + long end = System.currentTimeMillis(); 59.246 + int res = (v/ITERATIONS); 59.247 + System.out.print(" " + res); 59.248 + System.out.println(" time:" + (end - start)); 59.249 + if (res != expected) { 59.250 + System.out.println("wrong indexOf result: " + res + ", expected " + expected); 59.251 + System.out.println("\"" + b + "\".indexOf(\"" + a + "\")"); 59.252 + System.exit(97); 59.253 + } 59.254 + } 59.255 + 59.256 + public static int test_subvar() { 59.257 + int s = 0; 59.258 + int v = 0; 59.259 + for (int i = 0; i < ITERATIONS; i++) { 59.260 + v += strings[s].indexOf(strings[s + 1]); 59.261 + s += 2; 59.262 + if (s >= strings.length) s = 0; 59.263 + } 59.264 + return v; 59.265 + } 59.266 + 59.267 + public static void test_varsub_indexof(String a, String b) { 59.268 + System.out.println("Start search variable size substring in string (" + b.length() + " chars)"); 59.269 + long start_it = System.currentTimeMillis(); 59.270 + int limit = 1; // last a.length() == 1 59.271 + while (a.length() > limit) { 59.272 + a = a.substring(1); 59.273 + long start = test_init(a, b); 59.274 + int v = test_subvar(); 59.275 + test_end(a, b, v, (b.length() - a.length()), start); 59.276 + } 59.277 + long end_it = System.currentTimeMillis(); 59.278 + System.out.println("End search variable size substring in string (" + b.length() + " chars), time: " + (end_it - start_it)); 59.279 + } 59.280 + 59.281 + public static void test_varstr_indexof(String a, String b) { 59.282 + System.out.println("Start search substring (" + a.length() + " chars) in variable size string"); 59.283 + long start_it = System.currentTimeMillis(); 59.284 + int limit = a.length(); 59.285 + while (b.length() > limit) { 59.286 + b = b.substring(1); 59.287 + long start = test_init(a, b); 59.288 + int v = test_subvar(); 59.289 + test_end(a, b, v, (b.length() - a.length()), start); 59.290 + } 59.291 + long end_it = System.currentTimeMillis(); 59.292 + System.out.println("End search substring (" + a.length() + " chars) in variable size string, time: " + (end_it - start_it)); 59.293 + } 59.294 + 59.295 + public static void test_missub_indexof(String a, String b) { 59.296 + System.out.println("Start search non matching variable size substring in string (" + b.length() + " chars)"); 59.297 + long start_it = System.currentTimeMillis(); 59.298 + int limit = 1; // last a.length() == 1 59.299 + while (a.length() > limit) { 59.300 + a = a.substring(1); 59.301 + long start = test_init(a, b); 59.302 + int v = test_subvar(); 59.303 + test_end(a, b, v, (-1), start); 59.304 + } 59.305 + long end_it = System.currentTimeMillis(); 59.306 + System.out.println("End search non matching variable size substring in string (" + b.length() + " chars), time: " + (end_it - start_it)); 59.307 + } 59.308 + 59.309 + 59.310 + 59.311 + public static void test_consub_indexof(TestCon tc, String b) { 59.312 + System.out.println("Start search constant substring (" + tc.constr().length() + " chars)"); 59.313 + long start_it = System.currentTimeMillis(); 59.314 + int limit = tc.constr().length(); 59.315 + while (b.length() > limit) { 59.316 + b = b.substring(1); 59.317 + long start = test_init(tc.constr(), b); 59.318 + int v = test_subcon(tc); 59.319 + test_end(tc.constr(), b, v, (b.length() - tc.constr().length()), start); 59.320 + } 59.321 + long end_it = System.currentTimeMillis(); 59.322 + System.out.println("End search constant substring (" + tc.constr().length() + " chars), time: " + (end_it - start_it)); 59.323 + } 59.324 + 59.325 + public static void test_conmis_indexof(TestCon tc, String b) { 59.326 + System.out.println("Start search non matching constant substring (" + tc.constr().length() + " chars)"); 59.327 + long start_it = System.currentTimeMillis(); 59.328 + int limit = tc.constr().length(); 59.329 + while (b.length() > limit) { 59.330 + b = b.substring(1); 59.331 + long start = test_init(tc.constr(), b); 59.332 + int v = test_subcon(tc); 59.333 + test_end(tc.constr(), b, v, (-1), start); 59.334 + } 59.335 + long end_it = System.currentTimeMillis(); 59.336 + System.out.println("End search non matching constant substring (" + tc.constr().length() + " chars), time: " + (end_it - start_it)); 59.337 + } 59.338 + 59.339 + public static int test_subcon(TestCon tc) { 59.340 + int s = 0; 59.341 + int v = 0; 59.342 + for (int i = 0; i < ITERATIONS; i++) { 59.343 + v += tc.indexOf(strings[s]); 59.344 + s += 2; 59.345 + if (s >= strings.length) s = 0; 59.346 + } 59.347 + return v; 59.348 + } 59.349 + 59.350 + private interface TestCon { 59.351 + public String constr(); 59.352 + public int indexOf(String str); 59.353 + } 59.354 + 59.355 + // search constant substring (17 chars). 59.356 + private final static class TestCon17 implements TestCon { 59.357 + private static final String constr = "1111111111111xx1x"; 59.358 + public String constr() { return constr; } 59.359 + public int indexOf(String str) { return str.indexOf(constr); } 59.360 + } 59.361 + 59.362 + // search constant substring (16 chars). 59.363 + private final static class TestCon16 implements TestCon { 59.364 + private static final String constr = "111111111111xx1x"; 59.365 + public String constr() { return constr; } 59.366 + public int indexOf(String str) { return str.indexOf(constr); } 59.367 + } 59.368 + 59.369 + // search constant substring (9 chars). 59.370 + private final static class TestCon9 implements TestCon { 59.371 + private static final String constr = "11111xx1x"; 59.372 + public String constr() { return constr; } 59.373 + public int indexOf(String str) { return str.indexOf(constr); } 59.374 + } 59.375 + 59.376 + // search constant substring (8 chars). 59.377 + private final static class TestCon8 implements TestCon { 59.378 + private static final String constr = "1111xx1x"; 59.379 + public String constr() { return constr; } 59.380 + public int indexOf(String str) { return str.indexOf(constr); } 59.381 + } 59.382 + 59.383 + // search constant substring (4 chars). 59.384 + private final static class TestCon4 implements TestCon { 59.385 + private static final String constr = "xx1x"; 59.386 + public String constr() { return constr; } 59.387 + public int indexOf(String str) { return str.indexOf(constr); } 59.388 + } 59.389 + 59.390 + // search constant substring (3 chars). 59.391 + private final static class TestCon3 implements TestCon { 59.392 + private static final String constr = "x1x"; 59.393 + public String constr() { return constr; } 59.394 + public int indexOf(String str) { return str.indexOf(constr); } 59.395 + } 59.396 + 59.397 + // search constant substring (2 chars). 59.398 + private final static class TestCon2 implements TestCon { 59.399 + private static final String constr = "1y"; 59.400 + public String constr() { return constr; } 59.401 + public int indexOf(String str) { return str.indexOf(constr); } 59.402 + } 59.403 + 59.404 + 59.405 + // search constant substring (1 chars). 59.406 + private final static class TestCon1 implements TestCon { 59.407 + private static final String constr = "y"; 59.408 + public String constr() { return constr; } 59.409 + public int indexOf(String str) { return str.indexOf(constr); } 59.410 + } 59.411 +} 59.412 +
60.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 60.2 +++ b/test/runtime/6878713/Test6878713.sh Fri Mar 04 14:06:16 2011 -0800 60.3 @@ -0,0 +1,74 @@ 60.4 +#!/bin/sh 60.5 + 60.6 +## 60.7 +## @test 60.8 +## @bug 6878713 60.9 +## @summary Verifier heap corruption, relating to backward jsrs 60.10 +## @run shell/timeout=120 Test6878713.sh 60.11 +## 60.12 + 60.13 +if [ "${TESTSRC}" = "" ] 60.14 +then TESTSRC=. 60.15 +fi 60.16 + 60.17 +if [ "${TESTJAVA}" = "" ] 60.18 +then 60.19 + PARENT=`dirname \`which java\`` 60.20 + TESTJAVA=`dirname ${PARENT}` 60.21 + echo "TESTJAVA not set, selecting " ${TESTJAVA} 60.22 + echo "If this is incorrect, try setting the variable manually." 60.23 +fi 60.24 + 60.25 +if [ "${TESTCLASSES}" = "" ] 60.26 +then 60.27 + echo "TESTCLASSES not set. Test cannot execute. Failed." 60.28 + exit 1 60.29 +fi 60.30 + 60.31 +BIT_FLAG="" 60.32 + 60.33 +# set platform-dependent variables 60.34 +OS=`uname -s` 60.35 +case "$OS" in 60.36 + SunOS | Linux ) 60.37 + NULL=/dev/null 60.38 + PS=":" 60.39 + FS="/" 60.40 + ## for solaris, linux it's HOME 60.41 + FILE_LOCATION=$HOME 60.42 + if [ -f ${FILE_LOCATION}${FS}JDK64BIT -a ${OS} = "SunOS" ] 60.43 + then 60.44 + BIT_FLAG=`cat ${FILE_LOCATION}${FS}JDK64BIT | grep -v '^#'` 60.45 + fi 60.46 + ;; 60.47 + Windows_* ) 60.48 + NULL=NUL 60.49 + PS=";" 60.50 + FS="\\" 60.51 + ;; 60.52 + * ) 60.53 + echo "Unrecognized system!" 60.54 + exit 1; 60.55 + ;; 60.56 +esac 60.57 + 60.58 +JEMMYPATH=${CPAPPEND} 60.59 +CLASSPATH=.${PS}${TESTCLASSES}${PS}${JEMMYPATH} ; export CLASSPATH 60.60 + 60.61 +THIS_DIR=`pwd` 60.62 + 60.63 +${TESTJAVA}${FS}bin${FS}java ${BIT_FLAG} -version 60.64 + 60.65 +${TESTJAVA}${FS}bin${FS}jar xvf ${TESTSRC}${FS}testcase.jar 60.66 + 60.67 +${TESTJAVA}${FS}bin${FS}java ${BIT_FLAG} OOMCrashClass1960_2 > test.out 2>&1 60.68 + 60.69 +if [ -s core -o -s "hs_*.log" ] 60.70 +then 60.71 + cat hs*.log 60.72 + echo "Test Failed" 60.73 + exit 1 60.74 +else 60.75 + echo "Test Passed" 60.76 + exit 0 60.77 +fi
61.1 Binary file test/runtime/6878713/testcase.jar has changed