Tue, 08 Sep 2009 09:02:48 +0100
Merge
1.1 --- a/.hgtags Tue Sep 08 09:01:16 2009 +0100 1.2 +++ b/.hgtags Tue Sep 08 09:02:48 2009 +0100 1.3 @@ -41,3 +41,7 @@ 1.4 ba36394eb84b949b31212bdb32a518a8f92bab5b jdk7-b64 1.5 ba313800759b678979434d6da8ed3bf49eb8bea4 jdk7-b65 1.6 57c71ad0341b8b64ed20f81151eb7f06324f8894 jdk7-b66 1.7 +18f526145aea355a9320b724373386fc2170f183 jdk7-b67 1.8 +d07e68298d4e17ebf93d8299e43fcc3ded26472a jdk7-b68 1.9 +54fd4d9232969ea6cd3d236e5ad276183bb0d423 jdk7-b69 1.10 +0632c3e615a315ff11e2ab1d64f4d82ff9853461 jdk7-b70
2.1 --- a/THIRD_PARTY_README Tue Sep 08 09:01:16 2009 +0100 2.2 +++ b/THIRD_PARTY_README Tue Sep 08 09:02:48 2009 +0100 2.3 @@ -32,7 +32,7 @@ 2.4 2.5 --- end of LICENSE file --- 2.6 %% This notice is provided with respect to ASM, which may be included with this software: 2.7 -Copyright (c) 2000-2005 INRIA, France Telecom 2.8 +Copyright (c) 2000-2007 INRIA, France Telecom 2.9 All rights reserved. 2.10 2.11 Redistribution and use in source and binary forms, with or without
3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/code/DebugInfoReadStream.java Tue Sep 08 09:01:16 2009 +0100 3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/DebugInfoReadStream.java Tue Sep 08 09:02:48 2009 +0100 3.3 @@ -81,4 +81,8 @@ 3.4 Assert.that(false, "should not reach here"); 3.5 return null; 3.6 } 3.7 + 3.8 + public int readBCI() { 3.9 + return readInt() + InvocationEntryBCI; 3.10 + } 3.11 }
4.1 --- a/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java Tue Sep 08 09:01:16 2009 +0100 4.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java Tue Sep 08 09:02:48 2009 +0100 4.3 @@ -1,5 +1,5 @@ 4.4 /* 4.5 - * Copyright 2000-2006 Sun Microsystems, Inc. All Rights Reserved. 4.6 + * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. 4.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4.8 * 4.9 * This code is free software; you can redistribute it and/or modify it 4.10 @@ -259,7 +259,7 @@ 4.11 if (Assert.ASSERTS_ENABLED) { 4.12 Assert.that(pd != null, "scope must be present"); 4.13 } 4.14 - return new ScopeDesc(this, pd.getScopeDecodeOffset()); 4.15 + return new ScopeDesc(this, pd.getScopeDecodeOffset(), pd.getReexecute()); 4.16 } 4.17 4.18 /** This is only for use by the debugging system, and is only 4.19 @@ -291,7 +291,7 @@ 4.20 public ScopeDesc getScopeDescNearDbg(Address pc) { 4.21 PCDesc pd = getPCDescNearDbg(pc); 4.22 if (pd == null) return null; 4.23 - return new ScopeDesc(this, pd.getScopeDecodeOffset()); 4.24 + return new ScopeDesc(this, pd.getScopeDecodeOffset(), pd.getReexecute()); 4.25 } 4.26 4.27 public Map/*<Address, PcDesc>*/ getSafepoints() {
5.1 --- a/agent/src/share/classes/sun/jvm/hotspot/code/PCDesc.java Tue Sep 08 09:01:16 2009 +0100 5.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/PCDesc.java Tue Sep 08 09:02:48 2009 +0100 5.3 @@ -1,5 +1,5 @@ 5.4 /* 5.5 - * Copyright 2000-2004 Sun Microsystems, Inc. All Rights Reserved. 5.6 + * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. 5.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5.8 * 5.9 * This code is free software; you can redistribute it and/or modify it 5.10 @@ -36,6 +36,7 @@ 5.11 public class PCDesc extends VMObject { 5.12 private static CIntegerField pcOffsetField; 5.13 private static CIntegerField scopeDecodeOffsetField; 5.14 + private static CIntegerField pcFlagsField; 5.15 5.16 static { 5.17 VM.registerVMInitializedObserver(new Observer() { 5.18 @@ -50,6 +51,7 @@ 5.19 5.20 pcOffsetField = type.getCIntegerField("_pc_offset"); 5.21 scopeDecodeOffsetField = type.getCIntegerField("_scope_decode_offset"); 5.22 + pcFlagsField = type.getCIntegerField("_flags"); 5.23 } 5.24 5.25 public PCDesc(Address addr) { 5.26 @@ -70,6 +72,12 @@ 5.27 return code.instructionsBegin().addOffsetTo(getPCOffset()); 5.28 } 5.29 5.30 + 5.31 + public boolean getReexecute() { 5.32 + int flags = (int)pcFlagsField.getValue(addr); 5.33 + return ((flags & 0x1)== 1); //first is the reexecute bit 5.34 + } 5.35 + 5.36 public void print(NMethod code) { 5.37 printOn(System.out, code); 5.38 }
6.1 --- a/agent/src/share/classes/sun/jvm/hotspot/code/ScopeDesc.java Tue Sep 08 09:01:16 2009 +0100 6.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/ScopeDesc.java Tue Sep 08 09:02:48 2009 +0100 6.3 @@ -52,44 +52,46 @@ 6.4 private List objects; // ArrayList<ScopeValue> 6.5 6.6 6.7 - public ScopeDesc(NMethod code, int decodeOffset) { 6.8 + public ScopeDesc(NMethod code, int decodeOffset, boolean reexecute) { 6.9 this.code = code; 6.10 this.decodeOffset = decodeOffset; 6.11 this.objects = decodeObjectValues(DebugInformationRecorder.SERIALIZED_NULL); 6.12 + this.reexecute = reexecute; 6.13 6.14 // Decode header 6.15 DebugInfoReadStream stream = streamAt(decodeOffset); 6.16 6.17 senderDecodeOffset = stream.readInt(); 6.18 method = (Method) VM.getVM().getObjectHeap().newOop(stream.readOopHandle()); 6.19 - setBCIAndReexecute(stream.readInt()); 6.20 + bci = stream.readBCI(); 6.21 // Decode offsets for body and sender 6.22 localsDecodeOffset = stream.readInt(); 6.23 expressionsDecodeOffset = stream.readInt(); 6.24 monitorsDecodeOffset = stream.readInt(); 6.25 } 6.26 6.27 - public ScopeDesc(NMethod code, int decodeOffset, int objectDecodeOffset) { 6.28 + public ScopeDesc(NMethod code, int decodeOffset, int objectDecodeOffset, boolean reexecute) { 6.29 this.code = code; 6.30 this.decodeOffset = decodeOffset; 6.31 this.objects = decodeObjectValues(objectDecodeOffset); 6.32 + this.reexecute = reexecute; 6.33 6.34 // Decode header 6.35 DebugInfoReadStream stream = streamAt(decodeOffset); 6.36 6.37 senderDecodeOffset = stream.readInt(); 6.38 method = (Method) VM.getVM().getObjectHeap().newOop(stream.readOopHandle()); 6.39 - setBCIAndReexecute(stream.readInt()); 6.40 + bci = stream.readBCI(); 6.41 // Decode offsets for body and sender 6.42 localsDecodeOffset = stream.readInt(); 6.43 expressionsDecodeOffset = stream.readInt(); 6.44 monitorsDecodeOffset = stream.readInt(); 6.45 } 6.46 6.47 - public NMethod getNMethod() { return code; } 6.48 - public Method getMethod() { return method; } 6.49 - public int getBCI() { return bci; } 6.50 - public boolean getReexecute() {return reexecute;} 6.51 + public NMethod getNMethod() { return code; } 6.52 + public Method getMethod() { return method; } 6.53 + public int getBCI() { return bci; } 6.54 + public boolean getReexecute() { return reexecute;} 6.55 6.56 /** Returns a List<ScopeValue> */ 6.57 public List getLocals() { 6.58 @@ -117,7 +119,7 @@ 6.59 return null; 6.60 } 6.61 6.62 - return new ScopeDesc(code, senderDecodeOffset); 6.63 + return new ScopeDesc(code, senderDecodeOffset, false); 6.64 } 6.65 6.66 /** Returns where the scope was decoded */ 6.67 @@ -151,8 +153,8 @@ 6.68 public void printValueOn(PrintStream tty) { 6.69 tty.print("ScopeDesc for "); 6.70 method.printValueOn(tty); 6.71 - tty.println(" @bci " + bci); 6.72 - tty.println(" reexecute: " + reexecute); 6.73 + tty.print(" @bci " + bci); 6.74 + tty.println(" reexecute=" + reexecute); 6.75 } 6.76 6.77 // FIXME: add more accessors 6.78 @@ -160,12 +162,6 @@ 6.79 //-------------------------------------------------------------------------------- 6.80 // Internals only below this point 6.81 // 6.82 - private void setBCIAndReexecute(int combination) { 6.83 - int InvocationEntryBci = VM.getVM().getInvocationEntryBCI(); 6.84 - bci = (combination >> 1) + InvocationEntryBci; 6.85 - reexecute = (combination & 1)==1 ? true : false; 6.86 - } 6.87 - 6.88 private DebugInfoReadStream streamAt(int decodeOffset) { 6.89 return new DebugInfoReadStream(code, decodeOffset, objects); 6.90 }
7.1 --- a/make/hotspot_version Tue Sep 08 09:01:16 2009 +0100 7.2 +++ b/make/hotspot_version Tue Sep 08 09:02:48 2009 +0100 7.3 @@ -33,9 +33,9 @@ 7.4 # Don't put quotes (fail windows build). 7.5 HOTSPOT_VM_COPYRIGHT=Copyright 2009 7.6 7.7 -HS_MAJOR_VER=16 7.8 +HS_MAJOR_VER=17 7.9 HS_MINOR_VER=0 7.10 -HS_BUILD_NUMBER=07 7.11 +HS_BUILD_NUMBER=01 7.12 7.13 JDK_MAJOR_VER=1 7.14 JDK_MINOR_VER=7
8.1 --- a/src/cpu/sparc/vm/c1_Defs_sparc.hpp Tue Sep 08 09:01:16 2009 +0100 8.2 +++ b/src/cpu/sparc/vm/c1_Defs_sparc.hpp Tue Sep 08 09:02:48 2009 +0100 8.3 @@ -1,5 +1,5 @@ 8.4 /* 8.5 - * Copyright 2000-2005 Sun Microsystems, Inc. All Rights Reserved. 8.6 + * Copyright 2000-2009 Sun Microsystems, Inc. All Rights Reserved. 8.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 8.8 * 8.9 * This code is free software; you can redistribute it and/or modify it 8.10 @@ -38,7 +38,7 @@ 8.11 // registers 8.12 enum { 8.13 pd_nof_cpu_regs_frame_map = 32, // number of registers used during code emission 8.14 - pd_nof_caller_save_cpu_regs_frame_map = 6, // number of cpu registers killed by calls 8.15 + pd_nof_caller_save_cpu_regs_frame_map = 10, // number of cpu registers killed by calls 8.16 pd_nof_cpu_regs_reg_alloc = 20, // number of registers that are visible to register allocator 8.17 pd_nof_cpu_regs_linearscan = 32,// number of registers visible linear scan 8.18 pd_first_cpu_reg = 0,
9.1 --- a/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp Tue Sep 08 09:01:16 2009 +0100 9.2 +++ b/src/cpu/sparc/vm/c1_FrameMap_sparc.cpp Tue Sep 08 09:02:48 2009 +0100 9.3 @@ -320,6 +320,10 @@ 9.4 _caller_save_cpu_regs[3] = FrameMap::O3_opr; 9.5 _caller_save_cpu_regs[4] = FrameMap::O4_opr; 9.6 _caller_save_cpu_regs[5] = FrameMap::O5_opr; 9.7 + _caller_save_cpu_regs[6] = FrameMap::G1_opr; 9.8 + _caller_save_cpu_regs[7] = FrameMap::G3_opr; 9.9 + _caller_save_cpu_regs[8] = FrameMap::G4_opr; 9.10 + _caller_save_cpu_regs[9] = FrameMap::G5_opr; 9.11 for (int i = 0; i < nof_caller_save_fpu_regs; i++) { 9.12 _caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i); 9.13 }
10.1 --- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Tue Sep 08 09:01:16 2009 +0100 10.2 +++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Tue Sep 08 09:02:48 2009 +0100 10.3 @@ -749,6 +749,10 @@ 10.4 10.5 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 10.6 assert(x->number_of_arguments() == 5, "wrong type"); 10.7 + 10.8 + // Make all state_for calls early since they can emit code 10.9 + CodeEmitInfo* info = state_for(x, x->state()); 10.10 + 10.11 // Note: spill caller save before setting the item 10.12 LIRItem src (x->argument_at(0), this); 10.13 LIRItem src_pos (x->argument_at(1), this); 10.14 @@ -767,7 +771,6 @@ 10.15 ciArrayKlass* expected_type; 10.16 arraycopy_helper(x, &flags, &expected_type); 10.17 10.18 - CodeEmitInfo* info = state_for(x, x->state()); 10.19 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), 10.20 length.result(), rlock_callee_saved(T_INT), 10.21 expected_type, flags, info); 10.22 @@ -878,6 +881,9 @@ 10.23 10.24 10.25 void LIRGenerator::do_NewTypeArray(NewTypeArray* x) { 10.26 + // Evaluate state_for early since it may emit code 10.27 + CodeEmitInfo* info = state_for(x, x->state()); 10.28 + 10.29 LIRItem length(x->length(), this); 10.30 length.load_item(); 10.31 10.32 @@ -892,7 +898,6 @@ 10.33 10.34 __ oop2reg(ciTypeArrayKlass::make(elem_type)->encoding(), klass_reg); 10.35 10.36 - CodeEmitInfo* info = state_for(x, x->state()); 10.37 CodeStub* slow_path = new NewTypeArrayStub(klass_reg, len, reg, info); 10.38 __ allocate_array(reg, len, tmp1, tmp2, tmp3, tmp4, elem_type, klass_reg, slow_path); 10.39 10.40 @@ -902,7 +907,8 @@ 10.41 10.42 10.43 void LIRGenerator::do_NewObjectArray(NewObjectArray* x) { 10.44 - LIRItem length(x->length(), this); 10.45 + // Evaluate state_for early since it may emit code. 10.46 + CodeEmitInfo* info = state_for(x, x->state()); 10.47 // in case of patching (i.e., object class is not yet loaded), we need to reexecute the instruction 10.48 // and therefore provide the state before the parameters have been consumed 10.49 CodeEmitInfo* patching_info = NULL; 10.50 @@ -910,6 +916,7 @@ 10.51 patching_info = state_for(x, x->state_before()); 10.52 } 10.53 10.54 + LIRItem length(x->length(), this); 10.55 length.load_item(); 10.56 10.57 const LIR_Opr reg = result_register_for(x->type()); 10.58 @@ -919,7 +926,6 @@ 10.59 LIR_Opr tmp4 = FrameMap::O1_oop_opr; 10.60 LIR_Opr klass_reg = FrameMap::G5_oop_opr; 10.61 LIR_Opr len = length.result(); 10.62 - CodeEmitInfo* info = state_for(x, x->state()); 10.63 10.64 CodeStub* slow_path = new NewObjectArrayStub(klass_reg, len, reg, info); 10.65 ciObject* obj = (ciObject*) ciObjArrayKlass::make(x->klass()); 10.66 @@ -943,25 +949,22 @@ 10.67 items->at_put(i, size); 10.68 } 10.69 10.70 - // need to get the info before, as the items may become invalid through item_free 10.71 + // Evaluate state_for early since it may emit code. 10.72 CodeEmitInfo* patching_info = NULL; 10.73 if (!x->klass()->is_loaded() || PatchALot) { 10.74 patching_info = state_for(x, x->state_before()); 10.75 10.76 // cannot re-use same xhandlers for multiple CodeEmitInfos, so 10.77 - // clone all handlers 10.78 + // clone all handlers. This is handled transparently in other 10.79 + // places by the CodeEmitInfo cloning logic but is handled 10.80 + // specially here because a stub isn't being used. 10.81 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 10.82 } 10.83 + CodeEmitInfo* info = state_for(x, x->state()); 10.84 10.85 i = dims->length(); 10.86 while (i-- > 0) { 10.87 LIRItem* size = items->at(i); 10.88 - // if a patching_info was generated above then debug information for the state before 10.89 - // the call is going to be emitted. The LIRGenerator calls above may have left some values 10.90 - // in registers and that's been recorded in the CodeEmitInfo. In that case the items 10.91 - // for those values can't simply be freed if they are registers because the values 10.92 - // might be destroyed by store_stack_parameter. So in the case of patching, delay the 10.93 - // freeing of the items that already were in registers 10.94 size->load_item(); 10.95 store_stack_parameter (size->result(), 10.96 in_ByteSize(STACK_BIAS + 10.97 @@ -972,8 +975,6 @@ 10.98 // This instruction can be deoptimized in the slow path : use 10.99 // O0 as result register. 10.100 const LIR_Opr reg = result_register_for(x->type()); 10.101 - CodeEmitInfo* info = state_for(x, x->state()); 10.102 - 10.103 jobject2reg_with_patching(reg, x->klass(), patching_info); 10.104 LIR_Opr rank = FrameMap::O1_opr; 10.105 __ move(LIR_OprFact::intConst(x->rank()), rank);
11.1 --- a/src/cpu/sparc/vm/interp_masm_sparc.cpp Tue Sep 08 09:01:16 2009 +0100 11.2 +++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp Tue Sep 08 09:02:48 2009 +0100 11.3 @@ -1696,6 +1696,9 @@ 11.4 void InterpreterMacroAssembler::record_klass_in_profile_helper( 11.5 Register receiver, Register scratch, 11.6 int start_row, Label& done) { 11.7 + if (TypeProfileWidth == 0) 11.8 + return; 11.9 + 11.10 int last_row = VirtualCallData::row_limit() - 1; 11.11 assert(start_row <= last_row, "must be work left to do"); 11.12 // Test this row for both the receiver and for null.
12.1 --- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Tue Sep 08 09:01:16 2009 +0100 12.2 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Tue Sep 08 09:02:48 2009 +0100 12.3 @@ -1047,16 +1047,17 @@ 12.4 items->at_put(i, size); 12.5 } 12.6 12.7 - // need to get the info before, as the items may become invalid through item_free 12.8 + // Evaluate state_for early since it may emit code. 12.9 CodeEmitInfo* patching_info = NULL; 12.10 if (!x->klass()->is_loaded() || PatchALot) { 12.11 patching_info = state_for(x, x->state_before()); 12.12 12.13 // cannot re-use same xhandlers for multiple CodeEmitInfos, so 12.14 - // clone all handlers. 12.15 + // clone all handlers. This is handled transparently in other 12.16 + // places by the CodeEmitInfo cloning logic but is handled 12.17 + // specially here because a stub isn't being used. 12.18 x->set_exception_handlers(new XHandlers(x->exception_handlers())); 12.19 } 12.20 - 12.21 CodeEmitInfo* info = state_for(x, x->state()); 12.22 12.23 i = dims->length();
13.1 --- a/src/cpu/x86/vm/interp_masm_x86_32.cpp Tue Sep 08 09:01:16 2009 +0100 13.2 +++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp Tue Sep 08 09:02:48 2009 +0100 13.3 @@ -1262,6 +1262,9 @@ 13.4 Register receiver, Register mdp, 13.5 Register reg2, 13.6 int start_row, Label& done) { 13.7 + if (TypeProfileWidth == 0) 13.8 + return; 13.9 + 13.10 int last_row = VirtualCallData::row_limit() - 1; 13.11 assert(start_row <= last_row, "must be work left to do"); 13.12 // Test this row for both the receiver and for null.
14.1 --- a/src/cpu/x86/vm/interp_masm_x86_64.cpp Tue Sep 08 09:01:16 2009 +0100 14.2 +++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp Tue Sep 08 09:02:48 2009 +0100 14.3 @@ -1272,6 +1272,9 @@ 14.4 Register receiver, Register mdp, 14.5 Register reg2, 14.6 int start_row, Label& done) { 14.7 + if (TypeProfileWidth == 0) 14.8 + return; 14.9 + 14.10 int last_row = VirtualCallData::row_limit() - 1; 14.11 assert(start_row <= last_row, "must be work left to do"); 14.12 // Test this row for both the receiver and for null.
15.1 --- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Tue Sep 08 09:01:16 2009 +0100 15.2 +++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp Tue Sep 08 09:02:48 2009 +0100 15.3 @@ -2381,7 +2381,7 @@ 15.4 15.5 // Save everything in sight. 15.6 15.7 - map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words); 15.8 + map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false); 15.9 // Normal deoptimization 15.10 __ push(Deoptimization::Unpack_deopt); 15.11 __ jmp(cont); 15.12 @@ -2392,7 +2392,7 @@ 15.13 // return address is the pc describes what bci to do re-execute at 15.14 15.15 // No need to update map as each call to save_live_registers will produce identical oopmap 15.16 - (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words); 15.17 + (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false); 15.18 15.19 __ push(Deoptimization::Unpack_reexecute); 15.20 __ jmp(cont); 15.21 @@ -2428,7 +2428,7 @@ 15.22 // Save everything in sight. 15.23 15.24 // No need to update map as each call to save_live_registers will produce identical oopmap 15.25 - (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words); 15.26 + (void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false); 15.27 15.28 // Now it is safe to overwrite any register 15.29 15.30 @@ -2515,6 +2515,11 @@ 15.31 15.32 RegisterSaver::restore_result_registers(masm); 15.33 15.34 + // Non standard control word may be leaked out through a safepoint blob, and we can 15.35 + // deopt at a poll point with the non standard control word. However, we should make 15.36 + // sure the control word is correct after restore_result_registers. 15.37 + __ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 15.38 + 15.39 // All of the register save area has been popped of the stack. Only the 15.40 // return address remains. 15.41
16.1 --- a/src/os/windows/vm/os_windows.cpp Tue Sep 08 09:01:16 2009 +0100 16.2 +++ b/src/os/windows/vm/os_windows.cpp Tue Sep 08 09:02:48 2009 +0100 16.3 @@ -1526,7 +1526,8 @@ 16.4 case 5000: st->print(" Windows 2000"); break; 16.5 case 5001: st->print(" Windows XP"); break; 16.6 case 5002: 16.7 - case 6000: { 16.8 + case 6000: 16.9 + case 6001: { 16.10 // Retrieve SYSTEM_INFO from GetNativeSystemInfo call so that we could 16.11 // find out whether we are running on 64 bit processor or not. 16.12 SYSTEM_INFO si; 16.13 @@ -1549,13 +1550,27 @@ 16.14 st->print(" Windows XP x64 Edition"); 16.15 else 16.16 st->print(" Windows Server 2003 family"); 16.17 - } else { // os_vers == 6000 16.18 + } else if (os_vers == 6000) { 16.19 if (osvi.wProductType == VER_NT_WORKSTATION) 16.20 st->print(" Windows Vista"); 16.21 else 16.22 st->print(" Windows Server 2008"); 16.23 if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) 16.24 st->print(" , 64 bit"); 16.25 + } else if (os_vers == 6001) { 16.26 + if (osvi.wProductType == VER_NT_WORKSTATION) { 16.27 + st->print(" Windows 7"); 16.28 + } else { 16.29 + // Unrecognized windows, print out its major and minor versions 16.30 + st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 16.31 + } 16.32 + if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) 16.33 + st->print(" , 64 bit"); 16.34 + } else { // future os 16.35 + // Unrecognized windows, print out its major and minor versions 16.36 + st->print(" Windows NT %d.%d", osvi.dwMajorVersion, osvi.dwMinorVersion); 16.37 + if (si.wProcessorArchitecture == PROCESSOR_ARCHITECTURE_AMD64) 16.38 + st->print(" , 64 bit"); 16.39 } 16.40 break; 16.41 }
17.1 --- a/src/share/vm/classfile/javaClasses.cpp Tue Sep 08 09:01:16 2009 +0100 17.2 +++ b/src/share/vm/classfile/javaClasses.cpp Tue Sep 08 09:02:48 2009 +0100 17.3 @@ -1229,13 +1229,10 @@ 17.4 17.5 // Compiled java method case. 17.6 if (decode_offset != 0) { 17.7 - bool dummy_reexecute = false; 17.8 DebugInfoReadStream stream(nm, decode_offset); 17.9 decode_offset = stream.read_int(); 17.10 method = (methodOop)nm->oop_at(stream.read_int()); 17.11 - //fill_in_stack_trace does not need the reexecute information which is designed 17.12 - //for the deopt to reexecute 17.13 - bci = stream.read_bci_and_reexecute(dummy_reexecute); 17.14 + bci = stream.read_bci(); 17.15 } else { 17.16 if (fr.is_first_frame()) break; 17.17 address pc = fr.pc();
18.1 --- a/src/share/vm/code/debugInfo.hpp Tue Sep 08 09:01:16 2009 +0100 18.2 +++ b/src/share/vm/code/debugInfo.hpp Tue Sep 08 09:02:48 2009 +0100 18.3 @@ -1,5 +1,5 @@ 18.4 /* 18.5 - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. 18.6 + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. 18.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 18.8 * 18.9 * This code is free software; you can redistribute it and/or modify it 18.10 @@ -255,8 +255,7 @@ 18.11 ScopeValue* read_object_value(); 18.12 ScopeValue* get_cached_object(); 18.13 // BCI encoding is mostly unsigned, but -1 is a distinguished value 18.14 - // Decoding based on encoding: bci = InvocationEntryBci + read_int()/2; reexecute = read_int()%2 == 1 ? true : false; 18.15 - int read_bci_and_reexecute(bool& reexecute) { int i = read_int(); reexecute = (i & 1) ? true : false; return (i >> 1) + InvocationEntryBci; } 18.16 + int read_bci() { return read_int() + InvocationEntryBci; } 18.17 }; 18.18 18.19 // DebugInfoWriteStream specializes CompressedWriteStream for 18.20 @@ -269,6 +268,5 @@ 18.21 public: 18.22 DebugInfoWriteStream(DebugInformationRecorder* recorder, int initial_size); 18.23 void write_handle(jobject h); 18.24 - //Encoding bci and reexecute into one word as (bci - InvocationEntryBci)*2 + reexecute 18.25 - void write_bci_and_reexecute(int bci, bool reexecute) { write_int(((bci - InvocationEntryBci) << 1) + (reexecute ? 1 : 0)); } 18.26 + void write_bci(int bci) { write_int(bci - InvocationEntryBci); } 18.27 };
19.1 --- a/src/share/vm/code/debugInfoRec.cpp Tue Sep 08 09:01:16 2009 +0100 19.2 +++ b/src/share/vm/code/debugInfoRec.cpp Tue Sep 08 09:02:48 2009 +0100 19.3 @@ -1,5 +1,5 @@ 19.4 /* 19.5 - * Copyright 1998-2006 Sun Microsystems, Inc. All Rights Reserved. 19.6 + * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. 19.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 19.8 * 19.9 * This code is free software; you can redistribute it and/or modify it 19.10 @@ -292,13 +292,16 @@ 19.11 int stream_offset = stream()->position(); 19.12 last_pd->set_scope_decode_offset(stream_offset); 19.13 19.14 + // Record reexecute bit into pcDesc 19.15 + last_pd->set_should_reexecute(reexecute); 19.16 + 19.17 // serialize sender stream offest 19.18 stream()->write_int(sender_stream_offset); 19.19 19.20 // serialize scope 19.21 jobject method_enc = (method == NULL)? NULL: method->encoding(); 19.22 stream()->write_int(oop_recorder()->find_index(method_enc)); 19.23 - stream()->write_bci_and_reexecute(bci, reexecute); 19.24 + stream()->write_bci(bci); 19.25 assert(method == NULL || 19.26 (method->is_native() && bci == 0) || 19.27 (!method->is_native() && 0 <= bci && bci < method->code_size()) ||
20.1 --- a/src/share/vm/code/nmethod.cpp Tue Sep 08 09:01:16 2009 +0100 20.2 +++ b/src/share/vm/code/nmethod.cpp Tue Sep 08 09:02:48 2009 +0100 20.3 @@ -1,5 +1,5 @@ 20.4 /* 20.5 - * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. 20.6 + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. 20.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 20.8 * 20.9 * This code is free software; you can redistribute it and/or modify it 20.10 @@ -966,7 +966,7 @@ 20.11 PcDesc* pd = pc_desc_at(pc); 20.12 guarantee(pd != NULL, "scope must be present"); 20.13 return new ScopeDesc(this, pd->scope_decode_offset(), 20.14 - pd->obj_decode_offset()); 20.15 + pd->obj_decode_offset(), pd->should_reexecute()); 20.16 } 20.17 20.18 20.19 @@ -1932,7 +1932,7 @@ 20.20 PcDesc* pd = pc_desc_at(ic->end_of_call()); 20.21 assert(pd != NULL, "PcDesc must exist"); 20.22 for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(), 20.23 - pd->obj_decode_offset()); 20.24 + pd->obj_decode_offset(), pd->should_reexecute()); 20.25 !sd->is_top(); sd = sd->sender()) { 20.26 sd->verify(); 20.27 } 20.28 @@ -2181,7 +2181,7 @@ 20.29 PcDesc* p = pc_desc_near(begin+1); 20.30 if (p != NULL && p->real_pc(this) <= end) { 20.31 return new ScopeDesc(this, p->scope_decode_offset(), 20.32 - p->obj_decode_offset()); 20.33 + p->obj_decode_offset(), p->should_reexecute()); 20.34 } 20.35 return NULL; 20.36 }
21.1 --- a/src/share/vm/code/pcDesc.cpp Tue Sep 08 09:01:16 2009 +0100 21.2 +++ b/src/share/vm/code/pcDesc.cpp Tue Sep 08 09:02:48 2009 +0100 21.3 @@ -1,5 +1,5 @@ 21.4 /* 21.5 - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. 21.6 + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. 21.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 21.8 * 21.9 * This code is free software; you can redistribute it and/or modify it 21.10 @@ -26,9 +26,11 @@ 21.11 # include "incls/_pcDesc.cpp.incl" 21.12 21.13 PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) { 21.14 + assert(sizeof(PcDescFlags) <= 4, "occupies more than a word"); 21.15 _pc_offset = pc_offset; 21.16 _scope_decode_offset = scope_decode_offset; 21.17 _obj_decode_offset = obj_decode_offset; 21.18 + _flags.word = 0; 21.19 } 21.20 21.21 address PcDesc::real_pc(const nmethod* code) const { 21.22 @@ -50,6 +52,7 @@ 21.23 tty->print(" "); 21.24 sd->method()->print_short_name(tty); 21.25 tty->print(" @%d", sd->bci()); 21.26 + tty->print(" reexecute=%s", sd->should_reexecute()?"true":"false"); 21.27 tty->cr(); 21.28 } 21.29 #endif
22.1 --- a/src/share/vm/code/pcDesc.hpp Tue Sep 08 09:01:16 2009 +0100 22.2 +++ b/src/share/vm/code/pcDesc.hpp Tue Sep 08 09:02:48 2009 +0100 22.3 @@ -1,5 +1,5 @@ 22.4 /* 22.5 - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. 22.6 + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. 22.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 22.8 * 22.9 * This code is free software; you can redistribute it and/or modify it 22.10 @@ -34,6 +34,13 @@ 22.11 int _scope_decode_offset; // offset for scope in nmethod 22.12 int _obj_decode_offset; 22.13 22.14 + union PcDescFlags { 22.15 + int word; 22.16 + struct { 22.17 + unsigned int reexecute: 1; 22.18 + } bits; 22.19 + } _flags; 22.20 + 22.21 public: 22.22 int pc_offset() const { return _pc_offset; } 22.23 int scope_decode_offset() const { return _scope_decode_offset; } 22.24 @@ -53,6 +60,10 @@ 22.25 upper_offset_limit = (unsigned int)-1 >> 1 22.26 }; 22.27 22.28 + // Flags 22.29 + bool should_reexecute() const { return _flags.bits.reexecute; } 22.30 + void set_should_reexecute(bool z) { _flags.bits.reexecute = z; } 22.31 + 22.32 // Returns the real pc 22.33 address real_pc(const nmethod* code) const; 22.34
23.1 --- a/src/share/vm/code/scopeDesc.cpp Tue Sep 08 09:01:16 2009 +0100 23.2 +++ b/src/share/vm/code/scopeDesc.cpp Tue Sep 08 09:02:48 2009 +0100 23.3 @@ -1,5 +1,5 @@ 23.4 /* 23.5 - * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. 23.6 + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. 23.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 23.8 * 23.9 * This code is free software; you can redistribute it and/or modify it 23.10 @@ -26,17 +26,19 @@ 23.11 # include "incls/_scopeDesc.cpp.incl" 23.12 23.13 23.14 -ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset) { 23.15 +ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute) { 23.16 _code = code; 23.17 _decode_offset = decode_offset; 23.18 _objects = decode_object_values(obj_decode_offset); 23.19 + _reexecute = reexecute; 23.20 decode_body(); 23.21 } 23.22 23.23 -ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset) { 23.24 +ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, bool reexecute) { 23.25 _code = code; 23.26 _decode_offset = decode_offset; 23.27 _objects = decode_object_values(DebugInformationRecorder::serialized_null); 23.28 + _reexecute = reexecute; 23.29 decode_body(); 23.30 } 23.31 23.32 @@ -45,8 +47,8 @@ 23.33 _code = parent->_code; 23.34 _decode_offset = parent->_sender_decode_offset; 23.35 _objects = parent->_objects; 23.36 + _reexecute = false; //reexecute only applies to the first scope 23.37 decode_body(); 23.38 - assert(_reexecute == false, "reexecute not allowed"); 23.39 } 23.40 23.41 23.42 @@ -57,7 +59,6 @@ 23.43 _sender_decode_offset = DebugInformationRecorder::serialized_null; 23.44 _method = methodHandle(_code->method()); 23.45 _bci = InvocationEntryBci; 23.46 - _reexecute = false; 23.47 _locals_decode_offset = DebugInformationRecorder::serialized_null; 23.48 _expressions_decode_offset = DebugInformationRecorder::serialized_null; 23.49 _monitors_decode_offset = DebugInformationRecorder::serialized_null; 23.50 @@ -67,7 +68,7 @@ 23.51 23.52 _sender_decode_offset = stream->read_int(); 23.53 _method = methodHandle((methodOop) stream->read_oop()); 23.54 - _bci = stream->read_bci_and_reexecute(_reexecute); 23.55 + _bci = stream->read_bci(); 23.56 23.57 // decode offsets for body and sender 23.58 _locals_decode_offset = stream->read_int();
24.1 --- a/src/share/vm/code/scopeDesc.hpp Tue Sep 08 09:01:16 2009 +0100 24.2 +++ b/src/share/vm/code/scopeDesc.hpp Tue Sep 08 09:02:48 2009 +0100 24.3 @@ -1,5 +1,5 @@ 24.4 /* 24.5 - * Copyright 1997-2006 Sun Microsystems, Inc. All Rights Reserved. 24.6 + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. 24.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 24.8 * 24.9 * This code is free software; you can redistribute it and/or modify it 24.10 @@ -39,8 +39,7 @@ 24.11 DebugInfoReadStream buffer(code, pc_desc->scope_decode_offset()); 24.12 int ignore_sender = buffer.read_int(); 24.13 _method = methodOop(buffer.read_oop()); 24.14 - bool dummy_reexecute; //only methodOop and bci are needed! 24.15 - _bci = buffer.read_bci_and_reexecute(dummy_reexecute); 24.16 + _bci = buffer.read_bci(); 24.17 } 24.18 24.19 methodOop method() { return _method; } 24.20 @@ -53,12 +52,12 @@ 24.21 class ScopeDesc : public ResourceObj { 24.22 public: 24.23 // Constructor 24.24 - ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset); 24.25 + ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute); 24.26 24.27 // Calls above, giving default value of "serialized_null" to the 24.28 // "obj_decode_offset" argument. (We don't use a default argument to 24.29 // avoid a .hpp-.hpp dependency.) 24.30 - ScopeDesc(const nmethod* code, int decode_offset); 24.31 + ScopeDesc(const nmethod* code, int decode_offset, bool reexecute); 24.32 24.33 // JVM state 24.34 methodHandle method() const { return _method; }
25.1 --- a/src/share/vm/includeDB_compiler1 Tue Sep 08 09:01:16 2009 +0100 25.2 +++ b/src/share/vm/includeDB_compiler1 Tue Sep 08 09:02:48 2009 +0100 25.3 @@ -409,8 +409,6 @@ 25.4 25.5 compileBroker.cpp c1_Compiler.hpp 25.6 25.7 -frame.hpp c1_Defs.hpp 25.8 - 25.9 frame_<arch>.cpp c1_Runtime1.hpp 25.10 25.11 globals.cpp c1_globals.hpp 25.12 @@ -433,8 +431,6 @@ 25.13 25.14 os_<os_arch>.cpp c1_Runtime1.hpp 25.15 25.16 -registerMap.hpp c1_Defs.hpp 25.17 - 25.18 safepoint.cpp c1_globals.hpp 25.19 25.20 sharedRuntime.cpp c1_Runtime1.hpp
26.1 --- a/src/share/vm/memory/universe.cpp Tue Sep 08 09:01:16 2009 +0100 26.2 +++ b/src/share/vm/memory/universe.cpp Tue Sep 08 09:02:48 2009 +0100 26.3 @@ -749,7 +749,10 @@ 26.4 assert(mode == UnscaledNarrowOop || 26.5 mode == ZeroBasedNarrowOop || 26.6 mode == HeapBasedNarrowOop, "mode is invalid"); 26.7 - 26.8 + // Return specified base for the first request. 26.9 + if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) { 26.10 + return (char*)HeapBaseMinAddress; 26.11 + } 26.12 const size_t total_size = heap_size + HeapBaseMinAddress; 26.13 if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) { 26.14 if (total_size <= NarrowOopHeapMax && (mode == UnscaledNarrowOop) && 26.15 @@ -857,7 +860,7 @@ 26.16 // Can't reserve heap below 4Gb. 26.17 Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes); 26.18 } else { 26.19 - assert(Universe::narrow_oop_shift() == 0, "use unscaled narrow oop"); 26.20 + Universe::set_narrow_oop_shift(0); 26.21 if (PrintCompressedOopsMode) { 26.22 tty->print(", 32-bits Oops"); 26.23 }
27.1 --- a/src/share/vm/oops/instanceKlass.cpp Tue Sep 08 09:01:16 2009 +0100 27.2 +++ b/src/share/vm/oops/instanceKlass.cpp Tue Sep 08 09:02:48 2009 +0100 27.3 @@ -1085,6 +1085,7 @@ 27.4 if (indices == NULL || (length = (size_t)indices[0]) <= idnum) { 27.5 size_t size = MAX2(idnum+1, (size_t)idnum_allocated_count()); 27.6 int* new_indices = NEW_C_HEAP_ARRAY(int, size+1); 27.7 + new_indices[0] =(int)size; // array size held in the first element 27.8 // Copy the existing entries, if any 27.9 size_t i; 27.10 for (i = 0; i < length; i++) {
28.1 --- a/src/share/vm/opto/c2_globals.hpp Tue Sep 08 09:01:16 2009 +0100 28.2 +++ b/src/share/vm/opto/c2_globals.hpp Tue Sep 08 09:02:48 2009 +0100 28.3 @@ -376,7 +376,7 @@ 28.4 product(intx, AutoBoxCacheMax, 128, \ 28.5 "Sets max value cached by the java.lang.Integer autobox cache") \ 28.6 \ 28.7 - product(bool, DoEscapeAnalysis, false, \ 28.8 + product(bool, DoEscapeAnalysis, true, \ 28.9 "Perform escape analysis") \ 28.10 \ 28.11 notproduct(bool, PrintEscapeAnalysis, false, \
29.1 --- a/src/share/vm/opto/callnode.cpp Tue Sep 08 09:01:16 2009 +0100 29.2 +++ b/src/share/vm/opto/callnode.cpp Tue Sep 08 09:02:48 2009 +0100 29.3 @@ -493,7 +493,8 @@ 29.4 if (!printed) 29.5 _method->print_short_name(st); 29.6 st->print(" @ bci:%d",_bci); 29.7 - st->print(" reexecute:%s", _reexecute==Reexecute_True?"true":"false"); 29.8 + if(_reexecute == Reexecute_True) 29.9 + st->print(" reexecute"); 29.10 } else { 29.11 st->print(" runtime stub"); 29.12 }
30.1 --- a/src/share/vm/opto/chaitin.hpp Tue Sep 08 09:01:16 2009 +0100 30.2 +++ b/src/share/vm/opto/chaitin.hpp Tue Sep 08 09:02:48 2009 +0100 30.3 @@ -458,6 +458,16 @@ 30.4 // Post-Allocation peephole copy removal 30.5 void post_allocate_copy_removal(); 30.6 Node *skip_copies( Node *c ); 30.7 + // Replace the old node with the current live version of that value 30.8 + // and yank the old value if it's dead. 30.9 + int replace_and_yank_if_dead( Node *old, OptoReg::Name nreg, 30.10 + Block *current_block, Node_List& value, Node_List& regnd ) { 30.11 + Node* v = regnd[nreg]; 30.12 + assert(v->outcnt() != 0, "no dead values"); 30.13 + old->replace_by(v); 30.14 + return yank_if_dead(old, current_block, &value, ®nd); 30.15 + } 30.16 + 30.17 int yank_if_dead( Node *old, Block *current_block, Node_List *value, Node_List *regnd ); 30.18 int elide_copy( Node *n, int k, Block *current_block, Node_List &value, Node_List ®nd, bool can_change_regs ); 30.19 int use_prior_register( Node *copy, uint idx, Node *def, Block *current_block, Node_List &value, Node_List ®nd );
31.1 --- a/src/share/vm/opto/compile.cpp Tue Sep 08 09:01:16 2009 +0100 31.2 +++ b/src/share/vm/opto/compile.cpp Tue Sep 08 09:02:48 2009 +0100 31.3 @@ -1545,7 +1545,7 @@ 31.4 if((loop_opts_cnt > 0) && (has_loops() || has_split_ifs())) { 31.5 { 31.6 TracePhase t2("idealLoop", &_t_idealLoop, true); 31.7 - PhaseIdealLoop ideal_loop( igvn, NULL, true ); 31.8 + PhaseIdealLoop ideal_loop( igvn, true ); 31.9 loop_opts_cnt--; 31.10 if (major_progress()) print_method("PhaseIdealLoop 1", 2); 31.11 if (failing()) return; 31.12 @@ -1553,7 +1553,7 @@ 31.13 // Loop opts pass if partial peeling occurred in previous pass 31.14 if(PartialPeelLoop && major_progress() && (loop_opts_cnt > 0)) { 31.15 TracePhase t3("idealLoop", &_t_idealLoop, true); 31.16 - PhaseIdealLoop ideal_loop( igvn, NULL, false ); 31.17 + PhaseIdealLoop ideal_loop( igvn, false ); 31.18 loop_opts_cnt--; 31.19 if (major_progress()) print_method("PhaseIdealLoop 2", 2); 31.20 if (failing()) return; 31.21 @@ -1561,10 +1561,15 @@ 31.22 // Loop opts pass for loop-unrolling before CCP 31.23 if(major_progress() && (loop_opts_cnt > 0)) { 31.24 TracePhase t4("idealLoop", &_t_idealLoop, true); 31.25 - PhaseIdealLoop ideal_loop( igvn, NULL, false ); 31.26 + PhaseIdealLoop ideal_loop( igvn, false ); 31.27 loop_opts_cnt--; 31.28 if (major_progress()) print_method("PhaseIdealLoop 3", 2); 31.29 } 31.30 + if (!failing()) { 31.31 + // Verify that last round of loop opts produced a valid graph 31.32 + NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); ) 31.33 + PhaseIdealLoop::verify(igvn); 31.34 + } 31.35 } 31.36 if (failing()) return; 31.37 31.38 @@ -1597,12 +1602,20 @@ 31.39 while(major_progress() && (loop_opts_cnt > 0)) { 31.40 TracePhase t2("idealLoop", &_t_idealLoop, true); 31.41 assert( cnt++ < 40, "infinite cycle in loop optimization" ); 31.42 - PhaseIdealLoop ideal_loop( igvn, NULL, true ); 31.43 + PhaseIdealLoop ideal_loop( igvn, true ); 31.44 loop_opts_cnt--; 31.45 if (major_progress()) print_method("PhaseIdealLoop iterations", 2); 31.46 if (failing()) return; 31.47 } 31.48 } 31.49 + 31.50 + { 31.51 + // Verify that all previous optimizations produced a valid graph 31.52 + // at least to this point, even if no loop optimizations were done. 31.53 + NOT_PRODUCT( TracePhase t2("idealLoopVerify", &_t_idealLoopVerify, TimeCompiler); ) 31.54 + PhaseIdealLoop::verify(igvn); 31.55 + } 31.56 + 31.57 { 31.58 NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); ) 31.59 PhaseMacroExpand mex(igvn); 31.60 @@ -2520,7 +2533,7 @@ 31.61 31.62 // If original bytecodes contained a mixture of floats and doubles 31.63 // check if the optimizer has made it homogenous, item (3). 31.64 - if( Use24BitFPMode && Use24BitFP && 31.65 + if( Use24BitFPMode && Use24BitFP && UseSSE == 0 && 31.66 frc.get_float_count() > 32 && 31.67 frc.get_double_count() == 0 && 31.68 (10 * frc.get_call_count() < frc.get_float_count()) ) {
32.1 --- a/src/share/vm/opto/domgraph.cpp Tue Sep 08 09:01:16 2009 +0100 32.2 +++ b/src/share/vm/opto/domgraph.cpp Tue Sep 08 09:02:48 2009 +0100 32.3 @@ -1,5 +1,5 @@ 32.4 /* 32.5 - * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved. 32.6 + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. 32.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 32.8 * 32.9 * This code is free software; you can redistribute it and/or modify it 32.10 @@ -396,7 +396,7 @@ 32.11 // nodes (using the is_CFG() call) and places them in a dominator tree. Thus, 32.12 // it needs a count of the CFG nodes for the mapping table. This is the 32.13 // Lengauer & Tarjan O(E-alpha(E,V)) algorithm. 32.14 -void PhaseIdealLoop::Dominators( ) { 32.15 +void PhaseIdealLoop::Dominators() { 32.16 ResourceMark rm; 32.17 // Setup mappings from my Graph to Tarjan's stuff and back 32.18 // Note: Tarjan uses 1-based arrays 32.19 @@ -454,7 +454,7 @@ 32.20 // flow into the main graph (and hence into ROOT) but are not reachable 32.21 // from above. Such code is dead, but requires a global pass to detect 32.22 // it; this global pass was the 'build_loop_tree' pass run just prior. 32.23 - if( whead->is_Region() ) { 32.24 + if( !_verify_only && whead->is_Region() ) { 32.25 for( uint i = 1; i < whead->req(); i++ ) { 32.26 if (!has_node(whead->in(i))) { 32.27 // Kill dead input path
33.1 --- a/src/share/vm/opto/loopnode.cpp Tue Sep 08 09:01:16 2009 +0100 33.2 +++ b/src/share/vm/opto/loopnode.cpp Tue Sep 08 09:02:48 2009 +0100 33.3 @@ -1420,13 +1420,12 @@ 33.4 } 33.5 33.6 //============================================================================= 33.7 -//------------------------------PhaseIdealLoop--------------------------------- 33.8 +//----------------------------build_and_optimize------------------------------- 33.9 // Create a PhaseLoop. Build the ideal Loop tree. Map each Ideal Node to 33.10 // its corresponding LoopNode. If 'optimize' is true, do some loop cleanups. 33.11 -PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me, bool do_split_ifs ) 33.12 - : PhaseTransform(Ideal_Loop), 33.13 - _igvn(igvn), 33.14 - _dom_lca_tags(C->comp_arena()) { 33.15 +void PhaseIdealLoop::build_and_optimize(bool do_split_ifs) { 33.16 + int old_progress = C->major_progress(); 33.17 + 33.18 // Reset major-progress flag for the driver's heuristics 33.19 C->clear_major_progress(); 33.20 33.21 @@ -1465,18 +1464,20 @@ 33.22 } 33.23 33.24 // No loops after all 33.25 - if( !_ltree_root->_child ) C->set_has_loops(false); 33.26 + if( !_ltree_root->_child && !_verify_only ) C->set_has_loops(false); 33.27 33.28 // There should always be an outer loop containing the Root and Return nodes. 33.29 // If not, we have a degenerate empty program. Bail out in this case. 33.30 if (!has_node(C->root())) { 33.31 - C->clear_major_progress(); 33.32 - C->record_method_not_compilable("empty program detected during loop optimization"); 33.33 + if (!_verify_only) { 33.34 + C->clear_major_progress(); 33.35 + C->record_method_not_compilable("empty program detected during loop optimization"); 33.36 + } 33.37 return; 33.38 } 33.39 33.40 // Nothing to do, so get out 33.41 - if( !C->has_loops() && !do_split_ifs && !verify_me) { 33.42 + if( !C->has_loops() && !do_split_ifs && !_verify_me && !_verify_only ) { 33.43 _igvn.optimize(); // Cleanup NeverBranches 33.44 return; 33.45 } 33.46 @@ -1486,7 +1487,7 @@ 33.47 33.48 // Split shared headers and insert loop landing pads. 33.49 // Do not bother doing this on the Root loop of course. 33.50 - if( !verify_me && _ltree_root->_child ) { 33.51 + if( !_verify_me && !_verify_only && _ltree_root->_child ) { 33.52 if( _ltree_root->_child->beautify_loops( this ) ) { 33.53 // Re-build loop tree! 33.54 _ltree_root->_child = NULL; 33.55 @@ -1515,24 +1516,26 @@ 33.56 33.57 Dominators(); 33.58 33.59 - // As a side effect, Dominators removed any unreachable CFG paths 33.60 - // into RegionNodes. It doesn't do this test against Root, so 33.61 - // we do it here. 33.62 - for( uint i = 1; i < C->root()->req(); i++ ) { 33.63 - if( !_nodes[C->root()->in(i)->_idx] ) { // Dead path into Root? 33.64 - _igvn.hash_delete(C->root()); 33.65 - C->root()->del_req(i); 33.66 - _igvn._worklist.push(C->root()); 33.67 - i--; // Rerun same iteration on compressed edges 33.68 + if (!_verify_only) { 33.69 + // As a side effect, Dominators removed any unreachable CFG paths 33.70 + // into RegionNodes. It doesn't do this test against Root, so 33.71 + // we do it here. 33.72 + for( uint i = 1; i < C->root()->req(); i++ ) { 33.73 + if( !_nodes[C->root()->in(i)->_idx] ) { // Dead path into Root? 33.74 + _igvn.hash_delete(C->root()); 33.75 + C->root()->del_req(i); 33.76 + _igvn._worklist.push(C->root()); 33.77 + i--; // Rerun same iteration on compressed edges 33.78 + } 33.79 } 33.80 + 33.81 + // Given dominators, try to find inner loops with calls that must 33.82 + // always be executed (call dominates loop tail). These loops do 33.83 + // not need a separate safepoint. 33.84 + Node_List cisstack(a); 33.85 + _ltree_root->check_safepts(visited, cisstack); 33.86 } 33.87 33.88 - // Given dominators, try to find inner loops with calls that must 33.89 - // always be executed (call dominates loop tail). These loops do 33.90 - // not need a separate safepoint. 33.91 - Node_List cisstack(a); 33.92 - _ltree_root->check_safepts(visited, cisstack); 33.93 - 33.94 // Walk the DATA nodes and place into loops. Find earliest control 33.95 // node. For CFG nodes, the _nodes array starts out and remains 33.96 // holding the associated IdealLoopTree pointer. For DATA nodes, the 33.97 @@ -1548,11 +1551,11 @@ 33.98 // it will be processed among C->top() inputs 33.99 worklist.push( C->top() ); 33.100 visited.set( C->top()->_idx ); // Set C->top() as visited now 33.101 - build_loop_early( visited, worklist, nstack, verify_me ); 33.102 + build_loop_early( visited, worklist, nstack ); 33.103 33.104 // Given early legal placement, try finding counted loops. This placement 33.105 // is good enough to discover most loop invariants. 33.106 - if( !verify_me ) 33.107 + if( !_verify_me && !_verify_only ) 33.108 _ltree_root->counted_loop( this ); 33.109 33.110 // Find latest loop placement. Find ideal loop placement. 33.111 @@ -1562,16 +1565,25 @@ 33.112 worklist.push( C->root() ); 33.113 NOT_PRODUCT( C->verify_graph_edges(); ) 33.114 worklist.push( C->top() ); 33.115 - build_loop_late( visited, worklist, nstack, verify_me ); 33.116 + build_loop_late( visited, worklist, nstack ); 33.117 + 33.118 + if (_verify_only) { 33.119 + // restore major progress flag 33.120 + for (int i = 0; i < old_progress; i++) 33.121 + C->set_major_progress(); 33.122 + assert(C->unique() == unique, "verification mode made Nodes? ? ?"); 33.123 + assert(_igvn._worklist.size() == 0, "shouldn't push anything"); 33.124 + return; 33.125 + } 33.126 33.127 // clear out the dead code 33.128 while(_deadlist.size()) { 33.129 - igvn.remove_globally_dead_node(_deadlist.pop()); 33.130 + _igvn.remove_globally_dead_node(_deadlist.pop()); 33.131 } 33.132 33.133 #ifndef PRODUCT 33.134 C->verify_graph_edges(); 33.135 - if( verify_me ) { // Nested verify pass? 33.136 + if( _verify_me ) { // Nested verify pass? 33.137 // Check to see if the verify mode is broken 33.138 assert(C->unique() == unique, "non-optimize mode made Nodes? ? ?"); 33.139 return; 33.140 @@ -1678,7 +1690,7 @@ 33.141 void PhaseIdealLoop::verify() const { 33.142 int old_progress = C->major_progress(); 33.143 ResourceMark rm; 33.144 - PhaseIdealLoop loop_verify( _igvn, this, false ); 33.145 + PhaseIdealLoop loop_verify( _igvn, this ); 33.146 VectorSet visited(Thread::current()->resource_area()); 33.147 33.148 fail = 0; 33.149 @@ -2138,54 +2150,58 @@ 33.150 // optimizing an infinite loop? 33.151 l = _ltree_root; // Oops, found infinite loop 33.152 33.153 - // Insert the NeverBranch between 'm' and it's control user. 33.154 - NeverBranchNode *iff = new (C, 1) NeverBranchNode( m ); 33.155 - _igvn.register_new_node_with_optimizer(iff); 33.156 - set_loop(iff, l); 33.157 - Node *if_t = new (C, 1) CProjNode( iff, 0 ); 33.158 - _igvn.register_new_node_with_optimizer(if_t); 33.159 - set_loop(if_t, l); 33.160 + if (!_verify_only) { 33.161 + // Insert the NeverBranch between 'm' and it's control user. 33.162 + NeverBranchNode *iff = new (C, 1) NeverBranchNode( m ); 33.163 + _igvn.register_new_node_with_optimizer(iff); 33.164 + set_loop(iff, l); 33.165 + Node *if_t = new (C, 1) CProjNode( iff, 0 ); 33.166 + _igvn.register_new_node_with_optimizer(if_t); 33.167 + set_loop(if_t, l); 33.168 33.169 - Node* cfg = NULL; // Find the One True Control User of m 33.170 - for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { 33.171 - Node* x = m->fast_out(j); 33.172 - if (x->is_CFG() && x != m && x != iff) 33.173 - { cfg = x; break; } 33.174 + Node* cfg = NULL; // Find the One True Control User of m 33.175 + for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) { 33.176 + Node* x = m->fast_out(j); 33.177 + if (x->is_CFG() && x != m && x != iff) 33.178 + { cfg = x; break; } 33.179 + } 33.180 + assert(cfg != NULL, "must find the control user of m"); 33.181 + uint k = 0; // Probably cfg->in(0) 33.182 + while( cfg->in(k) != m ) k++; // But check incase cfg is a Region 33.183 + cfg->set_req( k, if_t ); // Now point to NeverBranch 33.184 + 33.185 + // Now create the never-taken loop exit 33.186 + Node *if_f = new (C, 1) CProjNode( iff, 1 ); 33.187 + _igvn.register_new_node_with_optimizer(if_f); 33.188 + set_loop(if_f, l); 33.189 + // Find frame ptr for Halt. Relies on the optimizer 33.190 + // V-N'ing. Easier and quicker than searching through 33.191 + // the program structure. 33.192 + Node *frame = new (C, 1) ParmNode( C->start(), TypeFunc::FramePtr ); 33.193 + _igvn.register_new_node_with_optimizer(frame); 33.194 + // Halt & Catch Fire 33.195 + Node *halt = new (C, TypeFunc::Parms) HaltNode( if_f, frame ); 33.196 + _igvn.register_new_node_with_optimizer(halt); 33.197 + set_loop(halt, l); 33.198 + C->root()->add_req(halt); 33.199 } 33.200 - assert(cfg != NULL, "must find the control user of m"); 33.201 - uint k = 0; // Probably cfg->in(0) 33.202 - while( cfg->in(k) != m ) k++; // But check incase cfg is a Region 33.203 - cfg->set_req( k, if_t ); // Now point to NeverBranch 33.204 - 33.205 - // Now create the never-taken loop exit 33.206 - Node *if_f = new (C, 1) CProjNode( iff, 1 ); 33.207 - _igvn.register_new_node_with_optimizer(if_f); 33.208 - set_loop(if_f, l); 33.209 - // Find frame ptr for Halt. Relies on the optimizer 33.210 - // V-N'ing. Easier and quicker than searching through 33.211 - // the program structure. 33.212 - Node *frame = new (C, 1) ParmNode( C->start(), TypeFunc::FramePtr ); 33.213 - _igvn.register_new_node_with_optimizer(frame); 33.214 - // Halt & Catch Fire 33.215 - Node *halt = new (C, TypeFunc::Parms) HaltNode( if_f, frame ); 33.216 - _igvn.register_new_node_with_optimizer(halt); 33.217 - set_loop(halt, l); 33.218 - C->root()->add_req(halt); 33.219 set_loop(C->root(), _ltree_root); 33.220 } 33.221 } 33.222 // Weeny check for irreducible. This child was already visited (this 33.223 // IS the post-work phase). Is this child's loop header post-visited 33.224 // as well? If so, then I found another entry into the loop. 33.225 - while( is_postvisited(l->_head) ) { 33.226 - // found irreducible 33.227 - l->_irreducible = 1; // = true 33.228 - l = l->_parent; 33.229 - _has_irreducible_loops = true; 33.230 - // Check for bad CFG here to prevent crash, and bailout of compile 33.231 - if (l == NULL) { 33.232 - C->record_method_not_compilable("unhandled CFG detected during loop optimization"); 33.233 - return pre_order; 33.234 + if (!_verify_only) { 33.235 + while( is_postvisited(l->_head) ) { 33.236 + // found irreducible 33.237 + l->_irreducible = 1; // = true 33.238 + l = l->_parent; 33.239 + _has_irreducible_loops = true; 33.240 + // Check for bad CFG here to prevent crash, and bailout of compile 33.241 + if (l == NULL) { 33.242 + C->record_method_not_compilable("unhandled CFG detected during loop optimization"); 33.243 + return pre_order; 33.244 + } 33.245 } 33.246 } 33.247 33.248 @@ -2253,7 +2269,7 @@ 33.249 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping. 33.250 // First pass computes the earliest controlling node possible. This is the 33.251 // controlling input with the deepest dominating depth. 33.252 -void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me ) { 33.253 +void PhaseIdealLoop::build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) { 33.254 while (worklist.size() != 0) { 33.255 // Use local variables nstack_top_n & nstack_top_i to cache values 33.256 // on nstack's top. 33.257 @@ -2285,7 +2301,7 @@ 33.258 // (the old code here would yank a 2nd safepoint after seeing a 33.259 // first one, even though the 1st did not dominate in the loop body 33.260 // and thus could be avoided indefinitely) 33.261 - if( !verify_me && ilt->_has_sfpt && n->Opcode() == Op_SafePoint && 33.262 + if( !_verify_only && !_verify_me && ilt->_has_sfpt && n->Opcode() == Op_SafePoint && 33.263 is_deleteable_safept(n)) { 33.264 Node *in = n->in(TypeFunc::Control); 33.265 lazy_replace(n,in); // Pull safepoint now 33.266 @@ -2408,12 +2424,31 @@ 33.267 return LCA; 33.268 } 33.269 33.270 -//------------------------------get_late_ctrl---------------------------------- 33.271 -// Compute latest legal control. 33.272 -Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) { 33.273 - assert(early != NULL, "early control should not be NULL"); 33.274 +bool PhaseIdealLoop::verify_dominance(Node* n, Node* use, Node* LCA, Node* early) { 33.275 + bool had_error = false; 33.276 +#ifdef ASSERT 33.277 + if (early != C->root()) { 33.278 + // Make sure that there's a dominance path from use to LCA 33.279 + Node* d = use; 33.280 + while (d != LCA) { 33.281 + d = idom(d); 33.282 + if (d == C->root()) { 33.283 + tty->print_cr("*** Use %d isn't dominated by def %s", use->_idx, n->_idx); 33.284 + n->dump(); 33.285 + use->dump(); 33.286 + had_error = true; 33.287 + break; 33.288 + } 33.289 + } 33.290 + } 33.291 +#endif 33.292 + return had_error; 33.293 +} 33.294 33.295 + 33.296 +Node* PhaseIdealLoop::compute_lca_of_uses(Node* n, Node* early, bool verify) { 33.297 // Compute LCA over list of uses 33.298 + bool had_error = false; 33.299 Node *LCA = NULL; 33.300 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax && LCA != early; i++) { 33.301 Node* c = n->fast_out(i); 33.302 @@ -2423,15 +2458,34 @@ 33.303 for( uint j=1; j<c->req(); j++ ) {// For all inputs 33.304 if( c->in(j) == n ) { // Found matching input? 33.305 Node *use = c->in(0)->in(j); 33.306 + if (_verify_only && use->is_top()) continue; 33.307 LCA = dom_lca_for_get_late_ctrl( LCA, use, n ); 33.308 + if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error; 33.309 } 33.310 } 33.311 } else { 33.312 // For CFG data-users, use is in the block just prior 33.313 Node *use = has_ctrl(c) ? get_ctrl(c) : c->in(0); 33.314 LCA = dom_lca_for_get_late_ctrl( LCA, use, n ); 33.315 + if (verify) had_error = verify_dominance(n, use, LCA, early) || had_error; 33.316 } 33.317 } 33.318 + assert(!had_error, "bad dominance"); 33.319 + return LCA; 33.320 +} 33.321 + 33.322 +//------------------------------get_late_ctrl---------------------------------- 33.323 +// Compute latest legal control. 33.324 +Node *PhaseIdealLoop::get_late_ctrl( Node *n, Node *early ) { 33.325 + assert(early != NULL, "early control should not be NULL"); 33.326 + 33.327 + Node* LCA = compute_lca_of_uses(n, early); 33.328 +#ifdef ASSERT 33.329 + if (LCA == C->root() && LCA != early) { 33.330 + // def doesn't dominate uses so print some useful debugging output 33.331 + compute_lca_of_uses(n, early, true); 33.332 + } 33.333 +#endif 33.334 33.335 // if this is a load, check for anti-dependent stores 33.336 // We use a conservative algorithm to identify potential interfering 33.337 @@ -2576,7 +2630,7 @@ 33.338 //------------------------------build_loop_late-------------------------------- 33.339 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping. 33.340 // Second pass finds latest legal placement, and ideal loop placement. 33.341 -void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me ) { 33.342 +void PhaseIdealLoop::build_loop_late( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ) { 33.343 while (worklist.size() != 0) { 33.344 Node *n = worklist.pop(); 33.345 // Only visit once 33.346 @@ -2612,7 +2666,7 @@ 33.347 } 33.348 } else { 33.349 // All of n's children have been processed, complete post-processing. 33.350 - build_loop_late_post(n, verify_me); 33.351 + build_loop_late_post(n); 33.352 if (nstack.is_empty()) { 33.353 // Finished all nodes on stack. 33.354 // Process next node on the worklist. 33.355 @@ -2631,9 +2685,9 @@ 33.356 //------------------------------build_loop_late_post--------------------------- 33.357 // Put Data nodes into some loop nest, by setting the _nodes[]->loop mapping. 33.358 // Second pass finds latest legal placement, and ideal loop placement. 33.359 -void PhaseIdealLoop::build_loop_late_post( Node *n, const PhaseIdealLoop *verify_me ) { 33.360 +void PhaseIdealLoop::build_loop_late_post( Node *n ) { 33.361 33.362 - if (n->req() == 2 && n->Opcode() == Op_ConvI2L && !C->major_progress()) { 33.363 + if (n->req() == 2 && n->Opcode() == Op_ConvI2L && !C->major_progress() && !_verify_only) { 33.364 _igvn._worklist.push(n); // Maybe we'll normalize it, if no more loops. 33.365 } 33.366 33.367 @@ -2714,6 +2768,7 @@ 33.368 if( get_loop(legal)->_nest < get_loop(least)->_nest ) 33.369 least = legal; 33.370 } 33.371 + assert(early == legal || legal != C->root(), "bad dominance of inputs"); 33.372 33.373 // Try not to place code on a loop entry projection 33.374 // which can inhibit range check elimination. 33.375 @@ -2731,8 +2786,8 @@ 33.376 #ifdef ASSERT 33.377 // If verifying, verify that 'verify_me' has a legal location 33.378 // and choose it as our location. 33.379 - if( verify_me ) { 33.380 - Node *v_ctrl = verify_me->get_ctrl_no_update(n); 33.381 + if( _verify_me ) { 33.382 + Node *v_ctrl = _verify_me->get_ctrl_no_update(n); 33.383 Node *legal = LCA; 33.384 while( early != legal ) { // While not at earliest legal 33.385 if( legal == v_ctrl ) break; // Check for prior good location
34.1 --- a/src/share/vm/opto/loopnode.hpp Tue Sep 08 09:01:16 2009 +0100 34.2 +++ b/src/share/vm/opto/loopnode.hpp Tue Sep 08 09:02:48 2009 +0100 34.3 @@ -1,5 +1,5 @@ 34.4 /* 34.5 - * Copyright 1998-2008 Sun Microsystems, Inc. All Rights Reserved. 34.6 + * Copyright 1998-2009 Sun Microsystems, Inc. All Rights Reserved. 34.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 34.8 * 34.9 * This code is free software; you can redistribute it and/or modify it 34.10 @@ -442,6 +442,9 @@ 34.11 uint *_preorders; 34.12 uint _max_preorder; 34.13 34.14 + const PhaseIdealLoop* _verify_me; 34.15 + bool _verify_only; 34.16 + 34.17 // Allocate _preorders[] array 34.18 void allocate_preorders() { 34.19 _max_preorder = C->unique()+8; 34.20 @@ -497,6 +500,12 @@ 34.21 Node_Array _dom_lca_tags; 34.22 void init_dom_lca_tags(); 34.23 void clear_dom_lca_tags(); 34.24 + 34.25 + // Helper for debugging bad dominance relationships 34.26 + bool verify_dominance(Node* n, Node* use, Node* LCA, Node* early); 34.27 + 34.28 + Node* compute_lca_of_uses(Node* n, Node* early, bool verify = false); 34.29 + 34.30 // Inline wrapper for frequent cases: 34.31 // 1) only one use 34.32 // 2) a use is the same as the current LCA passed as 'n1' 34.33 @@ -511,6 +520,7 @@ 34.34 return find_non_split_ctrl(n); 34.35 } 34.36 Node *dom_lca_for_get_late_ctrl_internal( Node *lca, Node *n, Node *tag ); 34.37 + 34.38 // true if CFG node d dominates CFG node n 34.39 bool is_dominator(Node *d, Node *n); 34.40 34.41 @@ -621,9 +631,9 @@ 34.42 IdealLoopTree *sort( IdealLoopTree *loop, IdealLoopTree *innermost ); 34.43 34.44 // Place Data nodes in some loop nest 34.45 - void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me ); 34.46 - void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack, const PhaseIdealLoop *verify_me ); 34.47 - void build_loop_late_post ( Node* n, const PhaseIdealLoop *verify_me ); 34.48 + void build_loop_early( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ); 34.49 + void build_loop_late ( VectorSet &visited, Node_List &worklist, Node_Stack &nstack ); 34.50 + void build_loop_late_post ( Node* n ); 34.51 34.52 // Array of immediate dominance info for each CFG node indexed by node idx 34.53 private: 34.54 @@ -662,6 +672,19 @@ 34.55 // Is safept not required by an outer loop? 34.56 bool is_deleteable_safept(Node* sfpt); 34.57 34.58 + // Perform verification that the graph is valid. 34.59 + PhaseIdealLoop( PhaseIterGVN &igvn) : 34.60 + PhaseTransform(Ideal_Loop), 34.61 + _igvn(igvn), 34.62 + _dom_lca_tags(C->comp_arena()), 34.63 + _verify_me(NULL), 34.64 + _verify_only(true) { 34.65 + build_and_optimize(false); 34.66 + } 34.67 + 34.68 + // build the loop tree and perform any requested optimizations 34.69 + void build_and_optimize(bool do_split_if); 34.70 + 34.71 public: 34.72 // Dominators for the sea of nodes 34.73 void Dominators(); 34.74 @@ -671,7 +694,32 @@ 34.75 Node *dom_lca_internal( Node *n1, Node *n2 ) const; 34.76 34.77 // Compute the Ideal Node to Loop mapping 34.78 - PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me, bool do_split_ifs ); 34.79 + PhaseIdealLoop( PhaseIterGVN &igvn, bool do_split_ifs) : 34.80 + PhaseTransform(Ideal_Loop), 34.81 + _igvn(igvn), 34.82 + _dom_lca_tags(C->comp_arena()), 34.83 + _verify_me(NULL), 34.84 + _verify_only(false) { 34.85 + build_and_optimize(do_split_ifs); 34.86 + } 34.87 + 34.88 + // Verify that verify_me made the same decisions as a fresh run. 34.89 + PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify_me) : 34.90 + PhaseTransform(Ideal_Loop), 34.91 + _igvn(igvn), 34.92 + _dom_lca_tags(C->comp_arena()), 34.93 + _verify_me(verify_me), 34.94 + _verify_only(false) { 34.95 + build_and_optimize(false); 34.96 + } 34.97 + 34.98 + // Build and verify the loop tree without modifying the graph. This 34.99 + // is useful to verify that all inputs properly dominate their uses. 34.100 + static void verify(PhaseIterGVN& igvn) { 34.101 +#ifdef ASSERT 34.102 + PhaseIdealLoop v(igvn); 34.103 +#endif 34.104 + } 34.105 34.106 // True if the method has at least 1 irreducible loop 34.107 bool _has_irreducible_loops;
35.1 --- a/src/share/vm/opto/phase.cpp Tue Sep 08 09:01:16 2009 +0100 35.2 +++ b/src/share/vm/opto/phase.cpp Tue Sep 08 09:02:48 2009 +0100 35.3 @@ -1,5 +1,5 @@ 35.4 /* 35.5 - * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. 35.6 + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. 35.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 35.8 * 35.9 * This code is free software; you can redistribute it and/or modify it 35.10 @@ -53,6 +53,7 @@ 35.11 elapsedTimer Phase::_t_registerMethod; 35.12 elapsedTimer Phase::_t_temporaryTimer1; 35.13 elapsedTimer Phase::_t_temporaryTimer2; 35.14 +elapsedTimer Phase::_t_idealLoopVerify; 35.15 35.16 // Subtimers for _t_optimizer 35.17 elapsedTimer Phase::_t_iterGVN; 35.18 @@ -88,51 +89,52 @@ 35.19 tty->print_cr ("Accumulated compiler times:"); 35.20 tty->print_cr ("---------------------------"); 35.21 tty->print_cr (" Total compilation: %3.3f sec.", Phase::_t_totalCompilation.seconds()); 35.22 - tty->print (" method compilation : %3.3f sec", Phase::_t_methodCompilation.seconds()); 35.23 + tty->print (" method compilation : %3.3f sec", Phase::_t_methodCompilation.seconds()); 35.24 tty->print ("/%d bytes",_total_bytes_compiled); 35.25 tty->print_cr (" (%3.0f bytes per sec) ", Phase::_total_bytes_compiled / Phase::_t_methodCompilation.seconds()); 35.26 - tty->print_cr (" stub compilation : %3.3f sec.", Phase::_t_stubCompilation.seconds()); 35.27 + tty->print_cr (" stub compilation : %3.3f sec.", Phase::_t_stubCompilation.seconds()); 35.28 tty->print_cr (" Phases:"); 35.29 - tty->print_cr (" parse : %3.3f sec", Phase::_t_parser.seconds()); 35.30 + tty->print_cr (" parse : %3.3f sec", Phase::_t_parser.seconds()); 35.31 if (DoEscapeAnalysis) { 35.32 - tty->print_cr (" escape analysis : %3.3f sec", Phase::_t_escapeAnalysis.seconds()); 35.33 + tty->print_cr (" escape analysis : %3.3f sec", Phase::_t_escapeAnalysis.seconds()); 35.34 } 35.35 - tty->print_cr (" optimizer : %3.3f sec", Phase::_t_optimizer.seconds()); 35.36 + tty->print_cr (" optimizer : %3.3f sec", Phase::_t_optimizer.seconds()); 35.37 if( Verbose || WizardMode ) { 35.38 - tty->print_cr (" iterGVN : %3.3f sec", Phase::_t_iterGVN.seconds()); 35.39 - tty->print_cr (" idealLoop : %3.3f sec", Phase::_t_idealLoop.seconds()); 35.40 - tty->print_cr (" ccp : %3.3f sec", Phase::_t_ccp.seconds()); 35.41 - tty->print_cr (" iterGVN2 : %3.3f sec", Phase::_t_iterGVN2.seconds()); 35.42 - tty->print_cr (" graphReshape : %3.3f sec", Phase::_t_graphReshaping.seconds()); 35.43 + tty->print_cr (" iterGVN : %3.3f sec", Phase::_t_iterGVN.seconds()); 35.44 + tty->print_cr (" idealLoop : %3.3f sec", Phase::_t_idealLoop.seconds()); 35.45 + tty->print_cr (" idealLoopVerify: %3.3f sec", Phase::_t_idealLoopVerify.seconds()); 35.46 + tty->print_cr (" ccp : %3.3f sec", Phase::_t_ccp.seconds()); 35.47 + tty->print_cr (" iterGVN2 : %3.3f sec", Phase::_t_iterGVN2.seconds()); 35.48 + tty->print_cr (" graphReshape : %3.3f sec", Phase::_t_graphReshaping.seconds()); 35.49 double optimizer_subtotal = Phase::_t_iterGVN.seconds() + 35.50 Phase::_t_idealLoop.seconds() + Phase::_t_ccp.seconds() + 35.51 Phase::_t_graphReshaping.seconds(); 35.52 double percent_of_optimizer = ((optimizer_subtotal == 0.0) ? 0.0 : (optimizer_subtotal / Phase::_t_optimizer.seconds() * 100.0)); 35.53 - tty->print_cr (" subtotal : %3.3f sec, %3.2f %%", optimizer_subtotal, percent_of_optimizer); 35.54 + tty->print_cr (" subtotal : %3.3f sec, %3.2f %%", optimizer_subtotal, percent_of_optimizer); 35.55 } 35.56 - tty->print_cr (" matcher : %3.3f sec", Phase::_t_matcher.seconds()); 35.57 - tty->print_cr (" scheduler : %3.3f sec", Phase::_t_scheduler.seconds()); 35.58 - tty->print_cr (" regalloc : %3.3f sec", Phase::_t_registerAllocation.seconds()); 35.59 + tty->print_cr (" matcher : %3.3f sec", Phase::_t_matcher.seconds()); 35.60 + tty->print_cr (" scheduler : %3.3f sec", Phase::_t_scheduler.seconds()); 35.61 + tty->print_cr (" regalloc : %3.3f sec", Phase::_t_registerAllocation.seconds()); 35.62 if( Verbose || WizardMode ) { 35.63 - tty->print_cr (" ctorChaitin : %3.3f sec", Phase::_t_ctorChaitin.seconds()); 35.64 - tty->print_cr (" buildIFG : %3.3f sec", Phase::_t_buildIFGphysical.seconds()); 35.65 - tty->print_cr (" computeLive : %3.3f sec", Phase::_t_computeLive.seconds()); 35.66 - tty->print_cr (" regAllocSplit: %3.3f sec", Phase::_t_regAllocSplit.seconds()); 35.67 + tty->print_cr (" ctorChaitin : %3.3f sec", Phase::_t_ctorChaitin.seconds()); 35.68 + tty->print_cr (" buildIFG : %3.3f sec", Phase::_t_buildIFGphysical.seconds()); 35.69 + tty->print_cr (" computeLive : %3.3f sec", Phase::_t_computeLive.seconds()); 35.70 + tty->print_cr (" regAllocSplit : %3.3f sec", Phase::_t_regAllocSplit.seconds()); 35.71 tty->print_cr (" postAllocCopyRemoval: %3.3f sec", Phase::_t_postAllocCopyRemoval.seconds()); 35.72 - tty->print_cr (" fixupSpills : %3.3f sec", Phase::_t_fixupSpills.seconds()); 35.73 + tty->print_cr (" fixupSpills : %3.3f sec", Phase::_t_fixupSpills.seconds()); 35.74 double regalloc_subtotal = Phase::_t_ctorChaitin.seconds() + 35.75 Phase::_t_buildIFGphysical.seconds() + Phase::_t_computeLive.seconds() + 35.76 Phase::_t_regAllocSplit.seconds() + Phase::_t_fixupSpills.seconds() + 35.77 Phase::_t_postAllocCopyRemoval.seconds(); 35.78 double percent_of_regalloc = ((regalloc_subtotal == 0.0) ? 0.0 : (regalloc_subtotal / Phase::_t_registerAllocation.seconds() * 100.0)); 35.79 - tty->print_cr (" subtotal : %3.3f sec, %3.2f %%", regalloc_subtotal, percent_of_regalloc); 35.80 + tty->print_cr (" subtotal : %3.3f sec, %3.2f %%", regalloc_subtotal, percent_of_regalloc); 35.81 } 35.82 - tty->print_cr (" macroExpand : %3.3f sec", Phase::_t_macroExpand.seconds()); 35.83 - tty->print_cr (" blockOrdering: %3.3f sec", Phase::_t_blockOrdering.seconds()); 35.84 - tty->print_cr (" peephole : %3.3f sec", Phase::_t_peephole.seconds()); 35.85 - tty->print_cr (" codeGen : %3.3f sec", Phase::_t_codeGeneration.seconds()); 35.86 - tty->print_cr (" install_code : %3.3f sec", Phase::_t_registerMethod.seconds()); 35.87 - tty->print_cr (" ------------ : ----------"); 35.88 + tty->print_cr (" macroExpand : %3.3f sec", Phase::_t_macroExpand.seconds()); 35.89 + tty->print_cr (" blockOrdering : %3.3f sec", Phase::_t_blockOrdering.seconds()); 35.90 + tty->print_cr (" peephole : %3.3f sec", Phase::_t_peephole.seconds()); 35.91 + tty->print_cr (" codeGen : %3.3f sec", Phase::_t_codeGeneration.seconds()); 35.92 + tty->print_cr (" install_code : %3.3f sec", Phase::_t_registerMethod.seconds()); 35.93 + tty->print_cr (" -------------- : ----------"); 35.94 double phase_subtotal = Phase::_t_parser.seconds() + 35.95 (DoEscapeAnalysis ? Phase::_t_escapeAnalysis.seconds() : 0.0) + 35.96 Phase::_t_optimizer.seconds() + Phase::_t_graphReshaping.seconds() + 35.97 @@ -143,7 +145,7 @@ 35.98 double percent_of_method_compile = ((phase_subtotal == 0.0) ? 0.0 : phase_subtotal / Phase::_t_methodCompilation.seconds()) * 100.0; 35.99 // counters inside Compile::CodeGen include time for adapters and stubs 35.100 // so phase-total can be greater than 100% 35.101 - tty->print_cr (" total : %3.3f sec, %3.2f %%", phase_subtotal, percent_of_method_compile); 35.102 + tty->print_cr (" total : %3.3f sec, %3.2f %%", phase_subtotal, percent_of_method_compile); 35.103 35.104 assert( percent_of_method_compile > expected_method_compile_coverage || 35.105 phase_subtotal < minimum_meaningful_method_compile, 35.106 @@ -157,8 +159,8 @@ 35.107 tty->cr(); 35.108 tty->print_cr (" temporaryTimer2: %3.3f sec", Phase::_t_temporaryTimer2.seconds()); 35.109 } 35.110 - tty->print_cr (" output : %3.3f sec", Phase::_t_output.seconds()); 35.111 - tty->print_cr (" isched : %3.3f sec", Phase::_t_instrSched.seconds()); 35.112 - tty->print_cr (" bldOopMaps: %3.3f sec", Phase::_t_buildOopMaps.seconds()); 35.113 + tty->print_cr (" output : %3.3f sec", Phase::_t_output.seconds()); 35.114 + tty->print_cr (" isched : %3.3f sec", Phase::_t_instrSched.seconds()); 35.115 + tty->print_cr (" bldOopMaps : %3.3f sec", Phase::_t_buildOopMaps.seconds()); 35.116 } 35.117 #endif
36.1 --- a/src/share/vm/opto/phase.hpp Tue Sep 08 09:01:16 2009 +0100 36.2 +++ b/src/share/vm/opto/phase.hpp Tue Sep 08 09:02:48 2009 +0100 36.3 @@ -1,5 +1,5 @@ 36.4 /* 36.5 - * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved. 36.6 + * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved. 36.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 36.8 * 36.9 * This code is free software; you can redistribute it and/or modify it 36.10 @@ -83,6 +83,7 @@ 36.11 static elapsedTimer _t_registerMethod; 36.12 static elapsedTimer _t_temporaryTimer1; 36.13 static elapsedTimer _t_temporaryTimer2; 36.14 + static elapsedTimer _t_idealLoopVerify; 36.15 36.16 // Subtimers for _t_optimizer 36.17 static elapsedTimer _t_iterGVN;
37.1 --- a/src/share/vm/opto/phaseX.cpp Tue Sep 08 09:01:16 2009 +0100 37.2 +++ b/src/share/vm/opto/phaseX.cpp Tue Sep 08 09:02:48 2009 +0100 37.3 @@ -1622,9 +1622,11 @@ 37.4 // old goes dead? 37.5 if( old ) { 37.6 switch (old->outcnt()) { 37.7 - case 0: // Kill all his inputs, and recursively kill other dead nodes. 37.8 + case 0: 37.9 + // Put into the worklist to kill later. We do not kill it now because the 37.10 + // recursive kill will delete the current node (this) if dead-loop exists 37.11 if (!old->is_top()) 37.12 - igvn->remove_dead_node( old ); 37.13 + igvn->_worklist.push( old ); 37.14 break; 37.15 case 1: 37.16 if( old->is_Store() || old->has_special_unique_user() )
38.1 --- a/src/share/vm/opto/postaloc.cpp Tue Sep 08 09:01:16 2009 +0100 38.2 +++ b/src/share/vm/opto/postaloc.cpp Tue Sep 08 09:02:48 2009 +0100 38.3 @@ -88,6 +88,7 @@ 38.4 value->map(old_reg,NULL); // Yank from value/regnd maps 38.5 regnd->map(old_reg,NULL); // This register's value is now unknown 38.6 } 38.7 + assert(old->req() <= 2, "can't handle more inputs"); 38.8 Node *tmp = old->req() > 1 ? old->in(1) : NULL; 38.9 old->disconnect_inputs(NULL); 38.10 if( !tmp ) break; 38.11 @@ -530,6 +531,16 @@ 38.12 // Do not change from int to pointer 38.13 Node *val = skip_copies(n); 38.14 38.15 + // Clear out a dead definition before starting so that the 38.16 + // elimination code doesn't have to guard against it. The 38.17 + // definition could in fact be a kill projection with a count of 38.18 + // 0 which is safe but since those are uninteresting for copy 38.19 + // elimination just delete them as well. 38.20 + if (regnd[nreg] != NULL && regnd[nreg]->outcnt() == 0) { 38.21 + regnd.map(nreg, NULL); 38.22 + value.map(nreg, NULL); 38.23 + } 38.24 + 38.25 uint n_ideal_reg = n->ideal_reg(); 38.26 if( is_single_register(n_ideal_reg) ) { 38.27 // If Node 'n' does not change the value mapped by the register, 38.28 @@ -537,8 +548,7 @@ 38.29 // mapping so 'n' will go dead. 38.30 if( value[nreg] != val ) { 38.31 if (eliminate_copy_of_constant(val, n, b, value, regnd, nreg, OptoReg::Bad)) { 38.32 - n->replace_by(regnd[nreg]); 38.33 - j -= yank_if_dead(n,b,&value,®nd); 38.34 + j -= replace_and_yank_if_dead(n, nreg, b, value, regnd); 38.35 } else { 38.36 // Update the mapping: record new Node defined by the register 38.37 regnd.map(nreg,n); 38.38 @@ -546,10 +556,9 @@ 38.39 // Node after skipping all copies. 38.40 value.map(nreg,val); 38.41 } 38.42 - } else if( !may_be_copy_of_callee(n) && regnd[nreg]->outcnt() != 0 ) { 38.43 + } else if( !may_be_copy_of_callee(n) ) { 38.44 assert( n->is_Copy(), "" ); 38.45 - n->replace_by(regnd[nreg]); 38.46 - j -= yank_if_dead(n,b,&value,®nd); 38.47 + j -= replace_and_yank_if_dead(n, nreg, b, value, regnd); 38.48 } 38.49 } else { 38.50 // If the value occupies a register pair, record same info 38.51 @@ -565,18 +574,16 @@ 38.52 } 38.53 if( value[nreg] != val || value[nreg_lo] != val ) { 38.54 if (eliminate_copy_of_constant(val, n, b, value, regnd, nreg, nreg_lo)) { 38.55 - n->replace_by(regnd[nreg]); 38.56 - j -= yank_if_dead(n,b,&value,®nd); 38.57 + j -= replace_and_yank_if_dead(n, nreg, b, value, regnd); 38.58 } else { 38.59 regnd.map(nreg , n ); 38.60 regnd.map(nreg_lo, n ); 38.61 value.map(nreg ,val); 38.62 value.map(nreg_lo,val); 38.63 } 38.64 - } else if( !may_be_copy_of_callee(n) && regnd[nreg]->outcnt() != 0 ) { 38.65 + } else if( !may_be_copy_of_callee(n) ) { 38.66 assert( n->is_Copy(), "" ); 38.67 - n->replace_by(regnd[nreg]); 38.68 - j -= yank_if_dead(n,b,&value,®nd); 38.69 + j -= replace_and_yank_if_dead(n, nreg, b, value, regnd); 38.70 } 38.71 } 38.72
39.1 --- a/src/share/vm/prims/jvmtiCodeBlobEvents.cpp Tue Sep 08 09:01:16 2009 +0100 39.2 +++ b/src/share/vm/prims/jvmtiCodeBlobEvents.cpp Tue Sep 08 09:02:48 2009 +0100 39.3 @@ -1,5 +1,5 @@ 39.4 /* 39.5 - * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved. 39.6 + * Copyright 2003-2009 Sun Microsystems, Inc. All Rights Reserved. 39.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 39.8 * 39.9 * This code is free software; you can redistribute it and/or modify it 39.10 @@ -402,7 +402,7 @@ 39.11 39.12 address scopes_data = nm->scopes_data_begin(); 39.13 for( pcd = nm->scopes_pcs_begin(); pcd < nm->scopes_pcs_end(); ++pcd ) { 39.14 - ScopeDesc sc0(nm, pcd->scope_decode_offset()); 39.15 + ScopeDesc sc0(nm, pcd->scope_decode_offset(), pcd->should_reexecute()); 39.16 ScopeDesc *sd = &sc0; 39.17 while( !sd->is_top() ) { sd = sd->sender(); } 39.18 int bci = sd->bci();
40.1 --- a/src/share/vm/runtime/arguments.cpp Tue Sep 08 09:01:16 2009 +0100 40.2 +++ b/src/share/vm/runtime/arguments.cpp Tue Sep 08 09:02:48 2009 +0100 40.3 @@ -1233,10 +1233,8 @@ 40.4 // Check that UseCompressedOops can be set with the max heap size allocated 40.5 // by ergonomics. 40.6 if (MaxHeapSize <= max_heap_for_compressed_oops()) { 40.7 - if (FLAG_IS_DEFAULT(UseCompressedOops)) { 40.8 - // Turn off until bug is fixed. 40.9 - // the following line to return it to default status. 40.10 - // FLAG_SET_ERGO(bool, UseCompressedOops, true); 40.11 + if (FLAG_IS_DEFAULT(UseCompressedOops) && !UseG1GC) { 40.12 + FLAG_SET_ERGO(bool, UseCompressedOops, true); 40.13 } 40.14 #ifdef _WIN64 40.15 if (UseLargePages && UseCompressedOops) {
41.1 --- a/src/share/vm/runtime/vframe.hpp Tue Sep 08 09:01:16 2009 +0100 41.2 +++ b/src/share/vm/runtime/vframe.hpp Tue Sep 08 09:02:48 2009 +0100 41.3 @@ -402,12 +402,7 @@ 41.4 DebugInfoReadStream buffer(nm(), decode_offset); 41.5 _sender_decode_offset = buffer.read_int(); 41.6 _method = methodOop(buffer.read_oop()); 41.7 - // Deoptimization needs reexecute bit to determine whether to reexecute the bytecode 41.8 - // only at the time when it "unpack_frames", and the reexecute bit info could always 41.9 - // be obtained from the scopeDesc in the compiledVFrame. As a result, we don't keep 41.10 - // the reexecute bit here. 41.11 - bool dummy_reexecute; 41.12 - _bci = buffer.read_bci_and_reexecute(dummy_reexecute); 41.13 + _bci = buffer.read_bci(); 41.14 41.15 assert(_method->is_method(), "checking type of decoded method"); 41.16 }
42.1 --- a/src/share/vm/runtime/vmStructs.cpp Tue Sep 08 09:01:16 2009 +0100 42.2 +++ b/src/share/vm/runtime/vmStructs.cpp Tue Sep 08 09:02:48 2009 +0100 42.3 @@ -593,6 +593,7 @@ 42.4 \ 42.5 nonstatic_field(PcDesc, _pc_offset, int) \ 42.6 nonstatic_field(PcDesc, _scope_decode_offset, int) \ 42.7 + nonstatic_field(PcDesc, _flags, PcDesc::PcDescFlags) \ 42.8 \ 42.9 /***************************************************/ \ 42.10 /* CodeBlobs (NOTE: incomplete, but only a little) */ \ 42.11 @@ -1158,6 +1159,7 @@ 42.12 /***************************************/ \ 42.13 \ 42.14 declare_toplevel_type(PcDesc) \ 42.15 + declare_integer_type(PcDesc::PcDescFlags) \ 42.16 \ 42.17 /************************/ \ 42.18 /* OopMap and OopMapSet */ \
43.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 43.2 +++ b/test/compiler/6795465/Test6795465.java Tue Sep 08 09:02:48 2009 +0100 43.3 @@ -0,0 +1,47 @@ 43.4 +/* 43.5 + * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved. 43.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 43.7 + * 43.8 + * This code is free software; you can redistribute it and/or modify it 43.9 + * under the terms of the GNU General Public License version 2 only, as 43.10 + * published by the Free Software Foundation. 43.11 + * 43.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 43.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 43.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 43.15 + * version 2 for more details (a copy is included in the LICENSE file that 43.16 + * accompanied this code). 43.17 + * 43.18 + * You should have received a copy of the GNU General Public License version 43.19 + * 2 along with this work; if not, write to the Free Software Foundation, 43.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 43.21 + * 43.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 43.23 + * CA 95054 USA or visit www.sun.com if you need additional information or 43.24 + * have any questions. 43.25 + * 43.26 + */ 43.27 + 43.28 +/** 43.29 + * @test 43.30 + * @bug 6795465 43.31 + * @summary Crash in assembler_sparc.cpp with client compiler on solaris-sparc 43.32 + * 43.33 + * @run main Test6795465 43.34 + */ 43.35 + 43.36 +public class Test6795465 { 43.37 + static long var_1 = -1; 43.38 + 43.39 + void test() { 43.40 + long var_2 = var_1 * 1; 43.41 + var_2 = var_2 + (new byte[1])[0]; 43.42 + } 43.43 + 43.44 + public static void main(String[] args) { 43.45 + Test6795465 t = new Test6795465(); 43.46 + for (int i = 0; i < 200000; i++) { 43.47 + t.test(); 43.48 + } 43.49 + } 43.50 +}
44.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 44.2 +++ b/test/compiler/6866651/Test.java Tue Sep 08 09:02:48 2009 +0100 44.3 @@ -0,0 +1,47 @@ 44.4 +/* 44.5 + * Copyright 2009 Sun Microsystems, Inc. All Rights Reserved. 44.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 44.7 + * 44.8 + * This code is free software; you can redistribute it and/or modify it 44.9 + * under the terms of the GNU General Public License version 2 only, as 44.10 + * published by the Free Software Foundation. 44.11 + * 44.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 44.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 44.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 44.15 + * version 2 for more details (a copy is included in the LICENSE file that 44.16 + * accompanied this code). 44.17 + * 44.18 + * You should have received a copy of the GNU General Public License version 44.19 + * 2 along with this work; if not, write to the Free Software Foundation, 44.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 44.21 + * 44.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, 44.23 + * CA 95054 USA or visit www.sun.com if you need additional information or 44.24 + * have any questions. 44.25 + */ 44.26 + 44.27 +/** 44.28 + * @test 44.29 + * @bug 6866651 44.30 + * @summary delay dead node elimination in set_req_X to prevent killing the current node when it is in use 44.31 + * 44.32 + * @run main Test 44.33 + */ 44.34 + 44.35 +public class Test { 44.36 + 44.37 + static int sum() { 44.38 + int s = 0; 44.39 + for (int x = 1, y = 0; x != 0; x++, y--) { 44.40 + s ^= y; 44.41 + } 44.42 + return s; 44.43 + } 44.44 + 44.45 + public static void main(final String[] args) { 44.46 + for (int k = 0; k < 2; k++) { 44.47 + System.err.println(String.valueOf(sum())); 44.48 + } 44.49 + } 44.50 +}