Wed, 25 Aug 2010 10:31:45 -0700
Merge
src/share/vm/runtime/globals.hpp | file | annotate | diff | comparison | revisions |
1.1 --- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Mon Aug 23 08:44:03 2010 -0700 1.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Wed Aug 25 10:31:45 2010 -0700 1.3 @@ -1037,7 +1037,7 @@ 1.4 public void prologue(Address start, Address end) { 1.5 } 1.6 public void visit(CodeBlob blob) { 1.7 - fout.println(gen.genHTML(blob.instructionsBegin())); 1.8 + fout.println(gen.genHTML(blob.contentBegin())); 1.9 } 1.10 public void epilogue() { 1.11 }
2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/c1/Runtime1.java Mon Aug 23 08:44:03 2010 -0700 2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/c1/Runtime1.java Wed Aug 25 10:31:45 2010 -0700 2.3 @@ -1,5 +1,5 @@ 2.4 /* 2.5 - * Copyright (c) 2000, 2003, Oracle and/or its affiliates. All rights reserved. 2.6 + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 2.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 2.8 * 2.9 * This code is free software; you can redistribute it and/or modify it 2.10 @@ -54,7 +54,7 @@ 2.11 2.12 /** FIXME: consider making argument "type-safe" in Java port */ 2.13 public Address entryFor(int id) { 2.14 - return blobFor(id).instructionsBegin(); 2.15 + return blobFor(id).codeBegin(); 2.16 } 2.17 2.18 /** FIXME: consider making argument "type-safe" in Java port */
3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java Mon Aug 23 08:44:03 2010 -0700 3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/CodeBlob.java Wed Aug 25 10:31:45 2010 -0700 3.3 @@ -1,5 +1,5 @@ 3.4 /* 3.5 - * Copyright (c) 2000, 2005, Oracle and/or its affiliates. All rights reserved. 3.6 + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 3.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3.8 * 3.9 * This code is free software; you can redistribute it and/or modify it 3.10 @@ -39,7 +39,8 @@ 3.11 private static CIntegerField sizeField; 3.12 private static CIntegerField headerSizeField; 3.13 private static CIntegerField relocationSizeField; 3.14 - private static CIntegerField instructionsOffsetField; 3.15 + private static CIntegerField contentOffsetField; 3.16 + private static CIntegerField codeOffsetField; 3.17 private static CIntegerField frameCompleteOffsetField; 3.18 private static CIntegerField dataOffsetField; 3.19 private static CIntegerField frameSizeField; 3.20 @@ -68,7 +69,8 @@ 3.21 headerSizeField = type.getCIntegerField("_header_size"); 3.22 relocationSizeField = type.getCIntegerField("_relocation_size"); 3.23 frameCompleteOffsetField = type.getCIntegerField("_frame_complete_offset"); 3.24 - instructionsOffsetField = type.getCIntegerField("_instructions_offset"); 3.25 + contentOffsetField = type.getCIntegerField("_content_offset"); 3.26 + codeOffsetField = type.getCIntegerField("_code_offset"); 3.27 dataOffsetField = type.getCIntegerField("_data_offset"); 3.28 frameSizeField = type.getCIntegerField("_frame_size"); 3.29 oopMapsField = type.getAddressField("_oop_maps"); 3.30 @@ -111,11 +113,19 @@ 3.31 // public RelocInfo relocationBegin(); 3.32 // public RelocInfo relocationEnd(); 3.33 3.34 - public Address instructionsBegin() { 3.35 - return headerBegin().addOffsetTo(instructionsOffsetField.getValue(addr)); 3.36 + public Address contentBegin() { 3.37 + return headerBegin().addOffsetTo(contentOffsetField.getValue(addr)); 3.38 } 3.39 3.40 - public Address instructionsEnd() { 3.41 + public Address contentEnd() { 3.42 + return headerBegin().addOffsetTo(dataOffsetField.getValue(addr)); 3.43 + } 3.44 + 3.45 + public Address codeBegin() { 3.46 + return headerBegin().addOffsetTo(contentOffsetField.getValue(addr)); 3.47 + } 3.48 + 3.49 + public Address codeEnd() { 3.50 return headerBegin().addOffsetTo(dataOffsetField.getValue(addr)); 3.51 } 3.52 3.53 @@ -128,24 +138,27 @@ 3.54 } 3.55 3.56 // Offsets 3.57 - public int getRelocationOffset() { return (int) headerSizeField.getValue(addr); } 3.58 - public int getInstructionsOffset() { return (int) instructionsOffsetField.getValue(addr); } 3.59 - public int getDataOffset() { return (int) dataOffsetField.getValue(addr); } 3.60 + public int getRelocationOffset() { return (int) headerSizeField .getValue(addr); } 3.61 + public int getContentOffset() { return (int) contentOffsetField.getValue(addr); } 3.62 + public int getCodeOffset() { return (int) codeOffsetField .getValue(addr); } 3.63 + public int getDataOffset() { return (int) dataOffsetField .getValue(addr); } 3.64 3.65 // Sizes 3.66 - public int getSize() { return (int) sizeField.getValue(addr); } 3.67 - public int getHeaderSize() { return (int) headerSizeField.getValue(addr); } 3.68 + public int getSize() { return (int) sizeField .getValue(addr); } 3.69 + public int getHeaderSize() { return (int) headerSizeField.getValue(addr); } 3.70 // FIXME: add getRelocationSize() 3.71 - public int getInstructionsSize() { return (int) instructionsEnd().minus(instructionsBegin()); } 3.72 - public int getDataSize() { return (int) dataEnd().minus(dataBegin()); } 3.73 + public int getContentSize() { return (int) contentEnd().minus(contentBegin()); } 3.74 + public int getCodeSize() { return (int) codeEnd() .minus(codeBegin()); } 3.75 + public int getDataSize() { return (int) dataEnd() .minus(dataBegin()); } 3.76 3.77 // Containment 3.78 - public boolean blobContains(Address addr) { return headerBegin().lessThanOrEqual(addr) && dataEnd().greaterThan(addr); } 3.79 + public boolean blobContains(Address addr) { return headerBegin() .lessThanOrEqual(addr) && dataEnd() .greaterThan(addr); } 3.80 // FIXME: add relocationContains 3.81 - public boolean instructionsContains(Address addr) { return instructionsBegin().lessThanOrEqual(addr) && instructionsEnd().greaterThan(addr); } 3.82 - public boolean dataContains(Address addr) { return dataBegin().lessThanOrEqual(addr) && dataEnd().greaterThan(addr); } 3.83 - public boolean contains(Address addr) { return instructionsContains(addr); } 3.84 - public boolean isFrameCompleteAt(Address a) { return instructionsContains(a) && a.minus(instructionsBegin()) >= frameCompleteOffsetField.getValue(addr); } 3.85 + public boolean contentContains(Address addr) { return contentBegin().lessThanOrEqual(addr) && contentEnd().greaterThan(addr); } 3.86 + public boolean codeContains(Address addr) { return codeBegin() .lessThanOrEqual(addr) && codeEnd() .greaterThan(addr); } 3.87 + public boolean dataContains(Address addr) { return dataBegin() .lessThanOrEqual(addr) && dataEnd() .greaterThan(addr); } 3.88 + public boolean contains(Address addr) { return contentContains(addr); } 3.89 + public boolean isFrameCompleteAt(Address a) { return codeContains(a) && a.minus(codeBegin()) >= frameCompleteOffsetField.getValue(addr); } 3.90 3.91 // Reclamation support (really only used by the nmethods, but in order to get asserts to work 3.92 // in the CodeCache they are defined virtual here) 3.93 @@ -168,7 +181,7 @@ 3.94 if (Assert.ASSERTS_ENABLED) { 3.95 Assert.that(getOopMaps() != null, "nope"); 3.96 } 3.97 - return getOopMaps().findMapAtOffset(pc.minus(instructionsBegin()), debugging); 3.98 + return getOopMaps().findMapAtOffset(pc.minus(codeBegin()), debugging); 3.99 } 3.100 3.101 // virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, void f(oop*)) { ShouldNotReachHere(); } 3.102 @@ -200,7 +213,8 @@ 3.103 } 3.104 3.105 protected void printComponentsOn(PrintStream tty) { 3.106 - tty.println(" instructions: [" + instructionsBegin() + ", " + instructionsEnd() + "), " + 3.107 + tty.println(" content: [" + contentBegin() + ", " + contentEnd() + "), " + 3.108 + " code: [" + codeBegin() + ", " + codeEnd() + "), " + 3.109 " data: [" + dataBegin() + ", " + dataEnd() + "), " + 3.110 " frame size: " + getFrameSize()); 3.111 }
4.1 --- a/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java Mon Aug 23 08:44:03 2010 -0700 4.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/NMethod.java Wed Aug 25 10:31:45 2010 -0700 4.3 @@ -134,10 +134,10 @@ 4.4 public boolean isOSRMethod() { return getEntryBCI() != VM.getVM().getInvocationEntryBCI(); } 4.5 4.6 /** Boundaries for different parts */ 4.7 - public Address constantsBegin() { return instructionsBegin(); } 4.8 + public Address constantsBegin() { return contentBegin(); } 4.9 public Address constantsEnd() { return getEntryPoint(); } 4.10 - public Address codeBegin() { return getEntryPoint(); } 4.11 - public Address codeEnd() { return headerBegin().addOffsetTo(getStubOffset()); } 4.12 + public Address instsBegin() { return codeBegin(); } 4.13 + public Address instsEnd() { return headerBegin().addOffsetTo(getStubOffset()); } 4.14 public Address exceptionBegin() { return headerBegin().addOffsetTo(getExceptionOffset()); } 4.15 public Address deoptBegin() { return headerBegin().addOffsetTo(getDeoptOffset()); } 4.16 public Address stubBegin() { return headerBegin().addOffsetTo(getStubOffset()); } 4.17 @@ -156,7 +156,7 @@ 4.18 public Address nulChkTableEnd() { return headerBegin().addOffsetTo(getNMethodEndOffset()); } 4.19 4.20 public int constantsSize() { return (int) constantsEnd() .minus(constantsBegin()); } 4.21 - public int codeSize() { return (int) codeEnd() .minus(codeBegin()); } 4.22 + public int instsSize() { return (int) instsEnd() .minus(instsBegin()); } 4.23 public int stubSize() { return (int) stubEnd() .minus(stubBegin()); } 4.24 public int oopsSize() { return (int) oopsEnd() .minus(oopsBegin()); } 4.25 public int scopesDataSize() { return (int) scopesDataEnd() .minus(scopesDataBegin()); } 4.26 @@ -169,7 +169,7 @@ 4.27 public int totalSize() { 4.28 return 4.29 constantsSize() + 4.30 - codeSize() + 4.31 + instsSize() + 4.32 stubSize() + 4.33 scopesDataSize() + 4.34 scopesPCsSize() + 4.35 @@ -179,7 +179,7 @@ 4.36 } 4.37 4.38 public boolean constantsContains (Address addr) { return constantsBegin() .lessThanOrEqual(addr) && constantsEnd() .greaterThan(addr); } 4.39 - public boolean codeContains (Address addr) { return codeBegin() .lessThanOrEqual(addr) && codeEnd() .greaterThan(addr); } 4.40 + public boolean instsContains (Address addr) { return instsBegin() .lessThanOrEqual(addr) && instsEnd() .greaterThan(addr); } 4.41 public boolean stubContains (Address addr) { return stubBegin() .lessThanOrEqual(addr) && stubEnd() .greaterThan(addr); } 4.42 public boolean oopsContains (Address addr) { return oopsBegin() .lessThanOrEqual(addr) && oopsEnd() .greaterThan(addr); } 4.43 public boolean scopesDataContains (Address addr) { return scopesDataBegin() .lessThanOrEqual(addr) && scopesDataEnd() .greaterThan(addr); } 4.44 @@ -353,7 +353,8 @@ 4.45 4.46 protected void printComponentsOn(PrintStream tty) { 4.47 // FIXME: add relocation information 4.48 - tty.println(" instructions: [" + instructionsBegin() + ", " + instructionsEnd() + "), " + 4.49 + tty.println(" content: [" + contentBegin() + ", " + contentEnd() + "), " + 4.50 + " code: [" + codeBegin() + ", " + codeEnd() + "), " + 4.51 " data: [" + dataBegin() + ", " + dataEnd() + "), " + 4.52 " oops: [" + oopsBegin() + ", " + oopsEnd() + "), " + 4.53 " frame size: " + getFrameSize());
5.1 --- a/agent/src/share/classes/sun/jvm/hotspot/code/PCDesc.java Mon Aug 23 08:44:03 2010 -0700 5.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/code/PCDesc.java Wed Aug 25 10:31:45 2010 -0700 5.3 @@ -1,5 +1,5 @@ 5.4 /* 5.5 - * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved. 5.6 + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 5.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5.8 * 5.9 * This code is free software; you can redistribute it and/or modify it 5.10 @@ -75,7 +75,7 @@ 5.11 } 5.12 5.13 public Address getRealPC(NMethod code) { 5.14 - return code.instructionsBegin().addOffsetTo(getPCOffset()); 5.15 + return code.codeBegin().addOffsetTo(getPCOffset()); 5.16 } 5.17 5.18
6.1 --- a/agent/src/share/classes/sun/jvm/hotspot/ui/FindInCodeCachePanel.java Mon Aug 23 08:44:03 2010 -0700 6.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/ui/FindInCodeCachePanel.java Wed Aug 25 10:31:45 2010 -0700 6.3 @@ -1,5 +1,5 @@ 6.4 /* 6.5 - * Copyright (c) 2004, Oracle and/or its affiliates. All rights reserved. 6.6 + * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 6.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6.8 * 6.9 * This code is free software; you can redistribute it and/or modify it 6.10 @@ -190,11 +190,11 @@ 6.11 6.12 private void reportResult(StringBuffer result, CodeBlob blob) { 6.13 result.append("<a href='blob:"); 6.14 - result.append(blob.instructionsBegin().toString()); 6.15 + result.append(blob.contentBegin().toString()); 6.16 result.append("'>"); 6.17 result.append(blob.getName()); 6.18 result.append("@"); 6.19 - result.append(blob.instructionsBegin()); 6.20 + result.append(blob.contentBegin()); 6.21 result.append("</a><br>"); 6.22 } 6.23
7.1 --- a/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Mon Aug 23 08:44:03 2010 -0700 7.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Wed Aug 25 10:31:45 2010 -0700 7.3 @@ -1415,13 +1415,13 @@ 7.4 buf.append(genMethodAndKlassLink(nmethod.getMethod())); 7.5 7.6 buf.h3("Compiled Code"); 7.7 - sun.jvm.hotspot.debugger.Address codeBegin = nmethod.codeBegin(); 7.8 - sun.jvm.hotspot.debugger.Address codeEnd = nmethod.codeEnd(); 7.9 - final int codeSize = (int)codeEnd.minus(codeBegin); 7.10 - final long startPc = addressToLong(codeBegin); 7.11 - final byte[] code = new byte[codeSize]; 7.12 + sun.jvm.hotspot.debugger.Address instsBegin = nmethod.instsBegin(); 7.13 + sun.jvm.hotspot.debugger.Address instsEnd = nmethod.instsEnd(); 7.14 + final int instsSize = nmethod.instsSize(); 7.15 + final long startPc = addressToLong(instsBegin); 7.16 + final byte[] code = new byte[instsSize]; 7.17 for (int i=0; i < code.length; i++) 7.18 - code[i] = codeBegin.getJByteAt(i); 7.19 + code[i] = instsBegin.getJByteAt(i); 7.20 7.21 final long verifiedEntryPoint = addressToLong(nmethod.getVerifiedEntryPoint()); 7.22 final long entryPoint = addressToLong(nmethod.getEntryPoint()); 7.23 @@ -1499,8 +1499,8 @@ 7.24 buf.h3("CodeBlob"); 7.25 7.26 buf.h3("Compiled Code"); 7.27 - final sun.jvm.hotspot.debugger.Address codeBegin = blob.instructionsBegin(); 7.28 - final int codeSize = blob.getInstructionsSize(); 7.29 + final sun.jvm.hotspot.debugger.Address codeBegin = blob.codeBegin(); 7.30 + final int codeSize = blob.getCodeSize(); 7.31 final long startPc = addressToLong(codeBegin); 7.32 final byte[] code = new byte[codeSize]; 7.33 for (int i=0; i < code.length; i++)
8.1 --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerFinder.java Mon Aug 23 08:44:03 2010 -0700 8.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerFinder.java Wed Aug 25 10:31:45 2010 -0700 8.3 @@ -96,15 +96,15 @@ 8.4 if (Assert.ASSERTS_ENABLED) { 8.5 Assert.that(loc.blob != null, "Should have found CodeBlob"); 8.6 } 8.7 - loc.inBlobInstructions = loc.blob.instructionsContains(a); 8.8 - loc.inBlobData = loc.blob.dataContains(a); 8.9 + loc.inBlobCode = loc.blob.codeContains(a); 8.10 + loc.inBlobData = loc.blob.dataContains(a); 8.11 8.12 if (loc.blob.isNMethod()) { 8.13 NMethod nm = (NMethod) loc.blob; 8.14 loc.inBlobOops = nm.oopsContains(a); 8.15 } 8.16 8.17 - loc.inBlobUnknownLocation = (!(loc.inBlobInstructions || 8.18 + loc.inBlobUnknownLocation = (!(loc.inBlobCode || 8.19 loc.inBlobData || 8.20 loc.inBlobOops)); 8.21 return loc;
9.1 --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerLocation.java Mon Aug 23 08:44:03 2010 -0700 9.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/PointerLocation.java Wed Aug 25 10:31:45 2010 -0700 9.3 @@ -1,5 +1,5 @@ 9.4 /* 9.5 - * Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved. 9.6 + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 9.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 9.8 * 9.9 * This code is free software; you can redistribute it and/or modify it 9.10 @@ -65,7 +65,7 @@ 9.11 InterpreterCodelet interpreterCodelet; 9.12 CodeBlob blob; 9.13 // FIXME: add more detail about CodeBlob 9.14 - boolean inBlobInstructions; 9.15 + boolean inBlobCode; 9.16 boolean inBlobData; 9.17 boolean inBlobOops; 9.18 boolean inBlobUnknownLocation; 9.19 @@ -142,8 +142,8 @@ 9.20 return blob; 9.21 } 9.22 9.23 - public boolean isInBlobInstructions() { 9.24 - return inBlobInstructions; 9.25 + public boolean isInBlobCode() { 9.26 + return inBlobCode; 9.27 } 9.28 9.29 public boolean isInBlobData() { 9.30 @@ -233,8 +233,8 @@ 9.31 } else if (isInCodeCache()) { 9.32 CodeBlob b = getCodeBlob(); 9.33 tty.print("In "); 9.34 - if (isInBlobInstructions()) { 9.35 - tty.print("instructions"); 9.36 + if (isInBlobCode()) { 9.37 + tty.print("code"); 9.38 } else if (isInBlobData()) { 9.39 tty.print("data"); 9.40 } else if (isInBlobOops()) {
10.1 --- a/src/cpu/sparc/vm/assembler_sparc.cpp Mon Aug 23 08:44:03 2010 -0700 10.2 +++ b/src/cpu/sparc/vm/assembler_sparc.cpp Wed Aug 25 10:31:45 2010 -0700 10.3 @@ -4192,7 +4192,7 @@ 10.4 10.5 static void generate_satb_log_enqueue(bool with_frame) { 10.6 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize); 10.7 - CodeBuffer buf(bb->instructions_begin(), bb->instructions_size()); 10.8 + CodeBuffer buf(bb); 10.9 MacroAssembler masm(&buf); 10.10 address start = masm.pc(); 10.11 Register pre_val; 10.12 @@ -4421,7 +4421,7 @@ 10.13 // This gets to assume that o0 contains the object address. 10.14 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) { 10.15 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2); 10.16 - CodeBuffer buf(bb->instructions_begin(), bb->instructions_size()); 10.17 + CodeBuffer buf(bb); 10.18 MacroAssembler masm(&buf); 10.19 address start = masm.pc(); 10.20
11.1 --- a/src/cpu/sparc/vm/codeBuffer_sparc.hpp Mon Aug 23 08:44:03 2010 -0700 11.2 +++ b/src/cpu/sparc/vm/codeBuffer_sparc.hpp Wed Aug 25 10:31:45 2010 -0700 11.3 @@ -1,5 +1,5 @@ 11.4 /* 11.5 - * Copyright (c) 2002, 2005, Oracle and/or its affiliates. All rights reserved. 11.6 + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 11.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 11.8 * 11.9 * This code is free software; you can redistribute it and/or modify it 11.10 @@ -30,5 +30,5 @@ 11.11 11.12 // Heuristic for pre-packing the pt/pn bit of a predicted branch. 11.13 bool is_backward_branch(Label& L) { 11.14 - return L.is_bound() && code_end() <= locator_address(L.loc()); 11.15 + return L.is_bound() && insts_end() <= locator_address(L.loc()); 11.16 }
12.1 --- a/src/cpu/sparc/vm/frame_sparc.cpp Mon Aug 23 08:44:03 2010 -0700 12.2 +++ b/src/cpu/sparc/vm/frame_sparc.cpp Wed Aug 25 10:31:45 2010 -0700 12.3 @@ -253,11 +253,12 @@ 12.4 } 12.5 12.6 // Could just be some random pointer within the codeBlob 12.7 - if (!sender.cb()->instructions_contains(sender_pc)) return false; 12.8 + if (!sender.cb()->code_contains(sender_pc)) { 12.9 + return false; 12.10 + } 12.11 12.12 // We should never be able to see an adapter if the current frame is something from code cache 12.13 - 12.14 - if ( sender_blob->is_adapter_blob()) { 12.15 + if (sender_blob->is_adapter_blob()) { 12.16 return false; 12.17 } 12.18
13.1 --- a/src/cpu/sparc/vm/jniFastGetField_sparc.cpp Mon Aug 23 08:44:03 2010 -0700 13.2 +++ b/src/cpu/sparc/vm/jniFastGetField_sparc.cpp Wed Aug 25 10:31:45 2010 -0700 13.3 @@ -1,5 +1,5 @@ 13.4 /* 13.5 - * Copyright (c) 2004, 2009, Oracle and/or its affiliates. All rights reserved. 13.6 + * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 13.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 13.8 * 13.9 * This code is free software; you can redistribute it and/or modify it 13.10 @@ -50,10 +50,10 @@ 13.11 default: ShouldNotReachHere(); 13.12 } 13.13 ResourceMark rm; 13.14 - BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize); 13.15 - address fast_entry = b->instructions_begin(); 13.16 - CodeBuffer cbuf(fast_entry, b->instructions_size()); 13.17 + BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize); 13.18 + CodeBuffer cbuf(blob); 13.19 MacroAssembler* masm = new MacroAssembler(&cbuf); 13.20 + address fast_entry = __ pc(); 13.21 13.22 Label label1, label2; 13.23 13.24 @@ -129,10 +129,10 @@ 13.25 address JNI_FastGetField::generate_fast_get_long_field() { 13.26 const char *name = "jni_fast_GetLongField"; 13.27 ResourceMark rm; 13.28 - BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize); 13.29 - address fast_entry = b->instructions_begin(); 13.30 - CodeBuffer cbuf(fast_entry, b->instructions_size()); 13.31 + BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize); 13.32 + CodeBuffer cbuf(blob); 13.33 MacroAssembler* masm = new MacroAssembler(&cbuf); 13.34 + address fast_entry = __ pc(); 13.35 13.36 Label label1, label2; 13.37 13.38 @@ -201,10 +201,10 @@ 13.39 default: ShouldNotReachHere(); 13.40 } 13.41 ResourceMark rm; 13.42 - BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize); 13.43 - address fast_entry = b->instructions_begin(); 13.44 - CodeBuffer cbuf(fast_entry, b->instructions_size()); 13.45 + BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize); 13.46 + CodeBuffer cbuf(blob); 13.47 MacroAssembler* masm = new MacroAssembler(&cbuf); 13.48 + address fast_entry = __ pc(); 13.49 13.50 Label label1, label2; 13.51
14.1 --- a/src/cpu/sparc/vm/nativeInst_sparc.cpp Mon Aug 23 08:44:03 2010 -0700 14.2 +++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp Wed Aug 25 10:31:45 2010 -0700 14.3 @@ -193,17 +193,17 @@ 14.4 14.5 a->call( a->pc(), relocInfo::none ); 14.6 a->delayed()->nop(); 14.7 - nc = nativeCall_at( cb.code_begin() ); 14.8 + nc = nativeCall_at( cb.insts_begin() ); 14.9 nc->print(); 14.10 14.11 nc = nativeCall_overwriting_at( nc->next_instruction_address() ); 14.12 for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) { 14.13 - nc->set_destination( cb.code_begin() + offsets[idx] ); 14.14 - assert(nc->destination() == (cb.code_begin() + offsets[idx]), "check unit test"); 14.15 + nc->set_destination( cb.insts_begin() + offsets[idx] ); 14.16 + assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test"); 14.17 nc->print(); 14.18 } 14.19 14.20 - nc = nativeCall_before( cb.code_begin() + 8 ); 14.21 + nc = nativeCall_before( cb.insts_begin() + 8 ); 14.22 nc->print(); 14.23 14.24 VM_Version::revert(); 14.25 @@ -368,7 +368,7 @@ 14.26 a->sethi(al2, O2); 14.27 a->add(O2, al2.low10(), O2); 14.28 14.29 - nm = nativeMovConstReg_at( cb.code_begin() ); 14.30 + nm = nativeMovConstReg_at( cb.insts_begin() ); 14.31 nm->print(); 14.32 14.33 nm = nativeMovConstReg_at( nm->next_instruction_address() ); 14.34 @@ -480,7 +480,7 @@ 14.35 a->nop(); 14.36 a->add(O2, al2.low10(), O2); 14.37 14.38 - nm = nativeMovConstRegPatching_at( cb.code_begin() ); 14.39 + nm = nativeMovConstRegPatching_at( cb.insts_begin() ); 14.40 nm->print(); 14.41 14.42 nm = nativeMovConstRegPatching_at( nm->next_instruction_address() ); 14.43 @@ -616,7 +616,7 @@ 14.44 a->sethi(al2, I3); a->add(I3, al2.low10(), I3); 14.45 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; 14.46 14.47 - nm = nativeMovRegMem_at( cb.code_begin() ); 14.48 + nm = nativeMovRegMem_at( cb.insts_begin() ); 14.49 nm->print(); 14.50 nm->set_offset( low10(0) ); 14.51 nm->print(); 14.52 @@ -760,7 +760,7 @@ 14.53 a->sethi(al, I3); a->nop(); a->add(I3, al.low10(), I3); 14.54 a->stf( FloatRegisterImpl::S, F15, O0, I3 ); idx++; 14.55 14.56 - nm = nativeMovRegMemPatching_at( cb.code_begin() ); 14.57 + nm = nativeMovRegMemPatching_at( cb.insts_begin() ); 14.58 nm->print(); 14.59 nm->set_offset( low10(0) ); 14.60 nm->print(); 14.61 @@ -849,7 +849,7 @@ 14.62 a->jmpl(I3, al.low10(), L3, RelocationHolder::none); 14.63 a->delayed()->nop(); 14.64 14.65 - nj = nativeJump_at( cb.code_begin() ); 14.66 + nj = nativeJump_at( cb.insts_begin() ); 14.67 nj->print(); 14.68 14.69 nj = nativeJump_at( nj->next_instruction_address() );
15.1 --- a/src/cpu/sparc/vm/sparc.ad Mon Aug 23 08:44:03 2010 -0700 15.2 +++ b/src/cpu/sparc/vm/sparc.ad Wed Aug 25 10:31:45 2010 -0700 15.3 @@ -677,8 +677,7 @@ 15.4 (f20 << 20) | 15.5 (f19 << 19) | 15.6 (f0 << 0); 15.7 - *((int*)(cbuf.code_end())) = op; 15.8 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.9 + cbuf.insts()->emit_int32(op); 15.10 } 15.11 15.12 // Standard Sparc opcode form2 field breakdown 15.13 @@ -689,8 +688,7 @@ 15.14 (f25 << 25) | 15.15 (f22 << 22) | 15.16 (f0 << 0); 15.17 - *((int*)(cbuf.code_end())) = op; 15.18 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.19 + cbuf.insts()->emit_int32(op); 15.20 } 15.21 15.22 // Standard Sparc opcode form3 field breakdown 15.23 @@ -701,8 +699,7 @@ 15.24 (f14 << 14) | 15.25 (f5 << 5) | 15.26 (f0 << 0); 15.27 - *((int*)(cbuf.code_end())) = op; 15.28 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.29 + cbuf.insts()->emit_int32(op); 15.30 } 15.31 15.32 // Standard Sparc opcode form3 field breakdown 15.33 @@ -714,8 +711,7 @@ 15.34 (f14 << 14) | 15.35 (1 << 13) | // bit to indicate immediate-mode 15.36 (simm13<<0); 15.37 - *((int*)(cbuf.code_end())) = op; 15.38 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.39 + cbuf.insts()->emit_int32(op); 15.40 } 15.41 15.42 static inline void emit3_simm10(CodeBuffer &cbuf, int f30, int f25, int f19, int f14, int simm10 ) { 15.43 @@ -910,9 +906,7 @@ 15.44 instr |= disp & 0x1FFF; 15.45 } 15.46 15.47 - uint *code = (uint*)cbuf.code_end(); 15.48 - *code = instr; 15.49 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.50 + cbuf.insts()->emit_int32(instr); 15.51 15.52 #ifdef ASSERT 15.53 { 15.54 @@ -1532,7 +1526,7 @@ 15.55 // set (empty), G5 15.56 // jmp -1 15.57 15.58 - address mark = cbuf.inst_mark(); // get mark within main instrs section 15.59 + address mark = cbuf.insts_mark(); // get mark within main instrs section 15.60 15.61 MacroAssembler _masm(&cbuf); 15.62 15.63 @@ -1632,7 +1626,7 @@ 15.64 // Emit exception handler code. 15.65 int emit_exception_handler(CodeBuffer& cbuf) { 15.66 Register temp_reg = G3; 15.67 - AddressLiteral exception_blob(OptoRuntime::exception_blob()->instructions_begin()); 15.68 + AddressLiteral exception_blob(OptoRuntime::exception_blob()->entry_point()); 15.69 MacroAssembler _masm(&cbuf); 15.70 15.71 address base = 15.72 @@ -2292,8 +2286,7 @@ 15.73 (0 << 13) | // select register move 15.74 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' or 'xcc' 15.75 ($src$$reg << 0); 15.76 - *((int*)(cbuf.code_end())) = op; 15.77 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.78 + cbuf.insts()->emit_int32(op); 15.79 %} 15.80 15.81 enc_class enc_cmov_imm( cmpOp cmp, iRegI dst, immI11 src, immI pcc ) %{ 15.82 @@ -2306,8 +2299,7 @@ 15.83 (1 << 13) | // select immediate move 15.84 ($pcc$$constant << 11) | // cc1, cc0 bits for 'icc' 15.85 (simm11 << 0); 15.86 - *((int*)(cbuf.code_end())) = op; 15.87 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.88 + cbuf.insts()->emit_int32(op); 15.89 %} 15.90 15.91 enc_class enc_cmov_reg_f( cmpOpF cmp, iRegI dst, iRegI src, flagsRegF fcc ) %{ 15.92 @@ -2319,8 +2311,7 @@ 15.93 (0 << 13) | // select register move 15.94 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 15.95 ($src$$reg << 0); 15.96 - *((int*)(cbuf.code_end())) = op; 15.97 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.98 + cbuf.insts()->emit_int32(op); 15.99 %} 15.100 15.101 enc_class enc_cmov_imm_f( cmpOp cmp, iRegI dst, immI11 src, flagsRegF fcc ) %{ 15.102 @@ -2333,8 +2324,7 @@ 15.103 (1 << 13) | // select immediate move 15.104 ($fcc$$reg << 11) | // cc1, cc0 bits for fcc0-fcc3 15.105 (simm11 << 0); 15.106 - *((int*)(cbuf.code_end())) = op; 15.107 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.108 + cbuf.insts()->emit_int32(op); 15.109 %} 15.110 15.111 enc_class enc_cmovf_reg( cmpOp cmp, regD dst, regD src, immI pcc ) %{ 15.112 @@ -2347,8 +2337,7 @@ 15.113 ($pcc$$constant << 11) | // cc1-cc0 bits for 'icc' or 'xcc' 15.114 ($primary << 5) | // select single, double or quad 15.115 ($src$$reg << 0); 15.116 - *((int*)(cbuf.code_end())) = op; 15.117 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.118 + cbuf.insts()->emit_int32(op); 15.119 %} 15.120 15.121 enc_class enc_cmovff_reg( cmpOpF cmp, flagsRegF fcc, regD dst, regD src ) %{ 15.122 @@ -2360,8 +2349,7 @@ 15.123 ($fcc$$reg << 11) | // cc2-cc0 bits for 'fccX' 15.124 ($primary << 5) | // select single, double or quad 15.125 ($src$$reg << 0); 15.126 - *((int*)(cbuf.code_end())) = op; 15.127 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.128 + cbuf.insts()->emit_int32(op); 15.129 %} 15.130 15.131 // Used by the MIN/MAX encodings. Same as a CMOV, but 15.132 @@ -2375,8 +2363,7 @@ 15.133 (0 << 13) | // select register move 15.134 (0 << 11) | // cc1, cc0 bits for 'icc' 15.135 ($src$$reg << 0); 15.136 - *((int*)(cbuf.code_end())) = op; 15.137 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.138 + cbuf.insts()->emit_int32(op); 15.139 %} 15.140 15.141 enc_class enc_cmov_reg_minmax_long( iRegL dst, iRegL src ) %{ 15.142 @@ -2388,8 +2375,7 @@ 15.143 (0 << 13) | // select register move 15.144 (0 << 11) | // cc1, cc0 bits for 'icc' 15.145 ($src$$reg << 0); 15.146 - *((int*)(cbuf.code_end())) = op; 15.147 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.148 + cbuf.insts()->emit_int32(op); 15.149 %} 15.150 15.151 // Utility encoding for loading a 64 bit Pointer into a register 15.152 @@ -3055,7 +3041,7 @@ 15.153 %} 15.154 15.155 enc_class enc_rethrow() %{ 15.156 - cbuf.set_inst_mark(); 15.157 + cbuf.set_insts_mark(); 15.158 Register temp_reg = G3; 15.159 AddressLiteral rethrow_stub(OptoRuntime::rethrow_stub()); 15.160 assert(temp_reg != reg_to_register_object(R_I0_num), "temp must not break oop_reg"); 15.161 @@ -3076,23 +3062,17 @@ 15.162 15.163 enc_class emit_mem_nop() %{ 15.164 // Generates the instruction LDUXA [o6,g0],#0x82,g0 15.165 - unsigned int *code = (unsigned int*)cbuf.code_end(); 15.166 - *code = (unsigned int)0xc0839040; 15.167 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.168 + cbuf.insts()->emit_int32((unsigned int) 0xc0839040); 15.169 %} 15.170 15.171 enc_class emit_fadd_nop() %{ 15.172 // Generates the instruction FMOVS f31,f31 15.173 - unsigned int *code = (unsigned int*)cbuf.code_end(); 15.174 - *code = (unsigned int)0xbfa0003f; 15.175 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.176 + cbuf.insts()->emit_int32((unsigned int) 0xbfa0003f); 15.177 %} 15.178 15.179 enc_class emit_br_nop() %{ 15.180 // Generates the instruction BPN,PN . 15.181 - unsigned int *code = (unsigned int*)cbuf.code_end(); 15.182 - *code = (unsigned int)0x00400000; 15.183 - cbuf.set_code_end(cbuf.code_end() + BytesPerInstWord); 15.184 + cbuf.insts()->emit_int32((unsigned int) 0x00400000); 15.185 %} 15.186 15.187 enc_class enc_membar_acquire %{
16.1 --- a/src/cpu/x86/vm/frame_x86.cpp Mon Aug 23 08:44:03 2010 -0700 16.2 +++ b/src/cpu/x86/vm/frame_x86.cpp Wed Aug 25 10:31:45 2010 -0700 16.3 @@ -141,12 +141,12 @@ 16.4 } 16.5 16.6 // Could just be some random pointer within the codeBlob 16.7 - 16.8 - if (!sender_blob->instructions_contains(sender_pc)) return false; 16.9 + if (!sender_blob->code_contains(sender_pc)) { 16.10 + return false; 16.11 + } 16.12 16.13 // We should never be able to see an adapter if the current frame is something from code cache 16.14 - 16.15 - if ( sender_blob->is_adapter_blob()) { 16.16 + if (sender_blob->is_adapter_blob()) { 16.17 return false; 16.18 } 16.19 16.20 @@ -340,7 +340,7 @@ 16.21 fr._unextended_sp = unextended_sp; 16.22 16.23 address original_pc = nm->get_original_pc(&fr); 16.24 - assert(nm->code_contains(original_pc), "original PC must be in nmethod"); 16.25 + assert(nm->insts_contains(original_pc), "original PC must be in nmethod"); 16.26 assert(nm->is_method_handle_return(original_pc) == is_method_handle_return, "must be"); 16.27 } 16.28 #endif
17.1 --- a/src/cpu/x86/vm/frame_x86.inline.hpp Mon Aug 23 08:44:03 2010 -0700 17.2 +++ b/src/cpu/x86/vm/frame_x86.inline.hpp Wed Aug 25 10:31:45 2010 -0700 17.3 @@ -63,7 +63,7 @@ 17.4 address original_pc = nmethod::get_deopt_original_pc(this); 17.5 if (original_pc != NULL) { 17.6 _pc = original_pc; 17.7 - assert(((nmethod*)_cb)->code_contains(_pc), "original PC must be in nmethod"); 17.8 + assert(((nmethod*)_cb)->insts_contains(_pc), "original PC must be in nmethod"); 17.9 _deopt_state = is_deoptimized; 17.10 } else { 17.11 _deopt_state = not_deoptimized;
18.1 --- a/src/cpu/x86/vm/jniFastGetField_x86_32.cpp Mon Aug 23 08:44:03 2010 -0700 18.2 +++ b/src/cpu/x86/vm/jniFastGetField_x86_32.cpp Wed Aug 25 10:31:45 2010 -0700 18.3 @@ -1,5 +1,5 @@ 18.4 /* 18.5 - * Copyright (c) 2004, 2008, Oracle and/or its affiliates. All rights reserved. 18.6 + * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 18.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 18.8 * 18.9 * This code is free software; you can redistribute it and/or modify it 18.10 @@ -54,10 +54,10 @@ 18.11 default: ShouldNotReachHere(); 18.12 } 18.13 ResourceMark rm; 18.14 - BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize); 18.15 - address fast_entry = b->instructions_begin(); 18.16 - CodeBuffer cbuf(fast_entry, b->instructions_size()); 18.17 + BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize); 18.18 + CodeBuffer cbuf(blob); 18.19 MacroAssembler* masm = new MacroAssembler(&cbuf); 18.20 + address fast_entry = __ pc(); 18.21 18.22 Label slow; 18.23 18.24 @@ -135,11 +135,11 @@ 18.25 return fast_entry; 18.26 #else 18.27 switch (type) { 18.28 - case T_BOOLEAN: jni_fast_GetBooleanField_fp = (GetBooleanField_t)fast_entry; break; 18.29 - case T_BYTE: jni_fast_GetByteField_fp = (GetByteField_t)fast_entry; break; 18.30 - case T_CHAR: jni_fast_GetCharField_fp = (GetCharField_t)fast_entry; break; 18.31 - case T_SHORT: jni_fast_GetShortField_fp = (GetShortField_t)fast_entry; break; 18.32 - case T_INT: jni_fast_GetIntField_fp = (GetIntField_t)fast_entry; 18.33 + case T_BOOLEAN: jni_fast_GetBooleanField_fp = (GetBooleanField_t) fast_entry; break; 18.34 + case T_BYTE: jni_fast_GetByteField_fp = (GetByteField_t) fast_entry; break; 18.35 + case T_CHAR: jni_fast_GetCharField_fp = (GetCharField_t) fast_entry; break; 18.36 + case T_SHORT: jni_fast_GetShortField_fp = (GetShortField_t) fast_entry; break; 18.37 + case T_INT: jni_fast_GetIntField_fp = (GetIntField_t) fast_entry; break; 18.38 } 18.39 return os::win32::fast_jni_accessor_wrapper(type); 18.40 #endif 18.41 @@ -168,10 +168,10 @@ 18.42 address JNI_FastGetField::generate_fast_get_long_field() { 18.43 const char *name = "jni_fast_GetLongField"; 18.44 ResourceMark rm; 18.45 - BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize); 18.46 - address fast_entry = b->instructions_begin(); 18.47 - CodeBuffer cbuf(fast_entry, b->instructions_size()); 18.48 + BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize); 18.49 + CodeBuffer cbuf(blob); 18.50 MacroAssembler* masm = new MacroAssembler(&cbuf); 18.51 + address fast_entry = __ pc(); 18.52 18.53 Label slow; 18.54 18.55 @@ -246,7 +246,7 @@ 18.56 #ifndef _WINDOWS 18.57 return fast_entry; 18.58 #else 18.59 - jni_fast_GetLongField_fp = (GetLongField_t)fast_entry; 18.60 + jni_fast_GetLongField_fp = (GetLongField_t) fast_entry; 18.61 return os::win32::fast_jni_accessor_wrapper(T_LONG); 18.62 #endif 18.63 } 18.64 @@ -259,10 +259,10 @@ 18.65 default: ShouldNotReachHere(); 18.66 } 18.67 ResourceMark rm; 18.68 - BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE*wordSize); 18.69 - address fast_entry = b->instructions_begin(); 18.70 - CodeBuffer cbuf(fast_entry, b->instructions_size()); 18.71 + BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE*wordSize); 18.72 + CodeBuffer cbuf(blob); 18.73 MacroAssembler* masm = new MacroAssembler(&cbuf); 18.74 + address fast_entry = __ pc(); 18.75 18.76 Label slow_with_pop, slow; 18.77 18.78 @@ -348,8 +348,8 @@ 18.79 return fast_entry; 18.80 #else 18.81 switch (type) { 18.82 - case T_FLOAT: jni_fast_GetFloatField_fp = (GetFloatField_t)fast_entry; break; 18.83 - case T_DOUBLE: jni_fast_GetDoubleField_fp = (GetDoubleField_t)fast_entry; 18.84 + case T_FLOAT: jni_fast_GetFloatField_fp = (GetFloatField_t) fast_entry; break; 18.85 + case T_DOUBLE: jni_fast_GetDoubleField_fp = (GetDoubleField_t) fast_entry; break; 18.86 } 18.87 return os::win32::fast_jni_accessor_wrapper(type); 18.88 #endif
19.1 --- a/src/cpu/x86/vm/jniFastGetField_x86_64.cpp Mon Aug 23 08:44:03 2010 -0700 19.2 +++ b/src/cpu/x86/vm/jniFastGetField_x86_64.cpp Wed Aug 25 10:31:45 2010 -0700 19.3 @@ -1,5 +1,5 @@ 19.4 /* 19.5 - * Copyright (c) 2004, 2008, Oracle and/or its affiliates. All rights reserved. 19.6 + * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. 19.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 19.8 * 19.9 * This code is free software; you can redistribute it and/or modify it 19.10 @@ -58,10 +58,10 @@ 19.11 default: ShouldNotReachHere(); 19.12 } 19.13 ResourceMark rm; 19.14 - BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE); 19.15 - address fast_entry = b->instructions_begin(); 19.16 - CodeBuffer cbuf(fast_entry, b->instructions_size()); 19.17 + BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE); 19.18 + CodeBuffer cbuf(blob); 19.19 MacroAssembler* masm = new MacroAssembler(&cbuf); 19.20 + address fast_entry = __ pc(); 19.21 19.22 Label slow; 19.23 19.24 @@ -156,10 +156,10 @@ 19.25 default: ShouldNotReachHere(); 19.26 } 19.27 ResourceMark rm; 19.28 - BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE); 19.29 - address fast_entry = b->instructions_begin(); 19.30 - CodeBuffer cbuf(fast_entry, b->instructions_size()); 19.31 + BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE); 19.32 + CodeBuffer cbuf(blob); 19.33 MacroAssembler* masm = new MacroAssembler(&cbuf); 19.34 + address fast_entry = __ pc(); 19.35 19.36 Label slow; 19.37
20.1 --- a/src/cpu/x86/vm/vm_version_x86.cpp Mon Aug 23 08:44:03 2010 -0700 20.2 +++ b/src/cpu/x86/vm/vm_version_x86.cpp Wed Aug 25 10:31:45 2010 -0700 20.3 @@ -595,8 +595,7 @@ 20.4 if (stub_blob == NULL) { 20.5 vm_exit_during_initialization("Unable to allocate getPsrInfo_stub"); 20.6 } 20.7 - CodeBuffer c(stub_blob->instructions_begin(), 20.8 - stub_blob->instructions_size()); 20.9 + CodeBuffer c(stub_blob); 20.10 VM_Version_StubGenerator g(&c); 20.11 getPsrInfo_stub = CAST_TO_FN_PTR(getPsrInfo_stub_t, 20.12 g.generate_getPsrInfo());
21.1 --- a/src/cpu/x86/vm/x86_32.ad Mon Aug 23 08:44:03 2010 -0700 21.2 +++ b/src/cpu/x86/vm/x86_32.ad Wed Aug 25 10:31:45 2010 -0700 21.3 @@ -350,54 +350,46 @@ 21.4 // EMIT_RM() 21.5 void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) { 21.6 unsigned char c = (unsigned char)((f1 << 6) | (f2 << 3) | f3); 21.7 - *(cbuf.code_end()) = c; 21.8 - cbuf.set_code_end(cbuf.code_end() + 1); 21.9 + cbuf.insts()->emit_int8(c); 21.10 } 21.11 21.12 // EMIT_CC() 21.13 void emit_cc(CodeBuffer &cbuf, int f1, int f2) { 21.14 unsigned char c = (unsigned char)( f1 | f2 ); 21.15 - *(cbuf.code_end()) = c; 21.16 - cbuf.set_code_end(cbuf.code_end() + 1); 21.17 + cbuf.insts()->emit_int8(c); 21.18 } 21.19 21.20 // EMIT_OPCODE() 21.21 void emit_opcode(CodeBuffer &cbuf, int code) { 21.22 - *(cbuf.code_end()) = (unsigned char)code; 21.23 - cbuf.set_code_end(cbuf.code_end() + 1); 21.24 + cbuf.insts()->emit_int8((unsigned char) code); 21.25 } 21.26 21.27 // EMIT_OPCODE() w/ relocation information 21.28 void emit_opcode(CodeBuffer &cbuf, int code, relocInfo::relocType reloc, int offset = 0) { 21.29 - cbuf.relocate(cbuf.inst_mark() + offset, reloc); 21.30 + cbuf.relocate(cbuf.insts_mark() + offset, reloc); 21.31 emit_opcode(cbuf, code); 21.32 } 21.33 21.34 // EMIT_D8() 21.35 void emit_d8(CodeBuffer &cbuf, int d8) { 21.36 - *(cbuf.code_end()) = (unsigned char)d8; 21.37 - cbuf.set_code_end(cbuf.code_end() + 1); 21.38 + cbuf.insts()->emit_int8((unsigned char) d8); 21.39 } 21.40 21.41 // EMIT_D16() 21.42 void emit_d16(CodeBuffer &cbuf, int d16) { 21.43 - *((short *)(cbuf.code_end())) = d16; 21.44 - cbuf.set_code_end(cbuf.code_end() + 2); 21.45 + cbuf.insts()->emit_int16(d16); 21.46 } 21.47 21.48 // EMIT_D32() 21.49 void emit_d32(CodeBuffer &cbuf, int d32) { 21.50 - *((int *)(cbuf.code_end())) = d32; 21.51 - cbuf.set_code_end(cbuf.code_end() + 4); 21.52 + cbuf.insts()->emit_int32(d32); 21.53 } 21.54 21.55 // emit 32 bit value and construct relocation entry from relocInfo::relocType 21.56 void emit_d32_reloc(CodeBuffer &cbuf, int d32, relocInfo::relocType reloc, 21.57 int format) { 21.58 - cbuf.relocate(cbuf.inst_mark(), reloc, format); 21.59 - 21.60 - *((int *)(cbuf.code_end())) = d32; 21.61 - cbuf.set_code_end(cbuf.code_end() + 4); 21.62 + cbuf.relocate(cbuf.insts_mark(), reloc, format); 21.63 + cbuf.insts()->emit_int32(d32); 21.64 } 21.65 21.66 // emit 32 bit value and construct relocation entry from RelocationHolder 21.67 @@ -408,10 +400,8 @@ 21.68 assert(oop(d32)->is_oop() && (ScavengeRootsInCode || !oop(d32)->is_scavengable()), "cannot embed scavengable oops in code"); 21.69 } 21.70 #endif 21.71 - cbuf.relocate(cbuf.inst_mark(), rspec, format); 21.72 - 21.73 - *((int *)(cbuf.code_end())) = d32; 21.74 - cbuf.set_code_end(cbuf.code_end() + 4); 21.75 + cbuf.relocate(cbuf.insts_mark(), rspec, format); 21.76 + cbuf.insts()->emit_int32(d32); 21.77 } 21.78 21.79 // Access stack slot for load or store 21.80 @@ -613,7 +603,7 @@ 21.81 emit_rm(cbuf, 0x3, 0x05, ESP_enc); 21.82 emit_d32(cbuf, framesize); 21.83 } 21.84 - C->set_frame_complete(cbuf.code_end() - cbuf.code_begin()); 21.85 + C->set_frame_complete(cbuf.insts_size()); 21.86 21.87 #ifdef ASSERT 21.88 if (VerifyStackAtCalls) { 21.89 @@ -695,7 +685,7 @@ 21.90 emit_opcode(cbuf, 0x58 | EBP_enc); 21.91 21.92 if( do_polling() && C->is_method_compilation() ) { 21.93 - cbuf.relocate(cbuf.code_end(), relocInfo::poll_return_type, 0); 21.94 + cbuf.relocate(cbuf.insts_end(), relocInfo::poll_return_type, 0); 21.95 emit_opcode(cbuf,0x85); 21.96 emit_rm(cbuf, 0x0, EAX_enc, 0x5); // EAX 21.97 emit_d32(cbuf, (intptr_t)os::get_polling_page()); 21.98 @@ -1211,9 +1201,9 @@ 21.99 // mov rbx,0 21.100 // jmp -1 21.101 21.102 - address mark = cbuf.inst_mark(); // get mark within main instrs section 21.103 - 21.104 - // Note that the code buffer's inst_mark is always relative to insts. 21.105 + address mark = cbuf.insts_mark(); // get mark within main instrs section 21.106 + 21.107 + // Note that the code buffer's insts_mark is always relative to insts. 21.108 // That's why we must use the macroassembler to generate a stub. 21.109 MacroAssembler _masm(&cbuf); 21.110 21.111 @@ -1228,7 +1218,7 @@ 21.112 __ jump(RuntimeAddress(__ pc())); 21.113 21.114 __ end_a_stub(); 21.115 - // Update current stubs pointer and restore code_end. 21.116 + // Update current stubs pointer and restore insts_end. 21.117 } 21.118 // size of call stub, compiled java to interpretor 21.119 uint size_java_to_interp() { 21.120 @@ -1254,7 +1244,7 @@ 21.121 void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { 21.122 MacroAssembler masm(&cbuf); 21.123 #ifdef ASSERT 21.124 - uint code_size = cbuf.code_size(); 21.125 + uint insts_size = cbuf.insts_size(); 21.126 #endif 21.127 masm.cmpptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes())); 21.128 masm.jump_cc(Assembler::notEqual, 21.129 @@ -1266,7 +1256,7 @@ 21.130 nops_cnt += 1; 21.131 masm.nop(nops_cnt); 21.132 21.133 - assert(cbuf.code_size() - code_size == size(ra_), "checking code size of inline cache node"); 21.134 + assert(cbuf.insts_size() - insts_size == size(ra_), "checking code size of inline cache node"); 21.135 } 21.136 21.137 uint MachUEPNode::size(PhaseRegAlloc *ra_) const { 21.138 @@ -1288,14 +1278,14 @@ 21.139 // and call a VM stub routine. 21.140 int emit_exception_handler(CodeBuffer& cbuf) { 21.141 21.142 - // Note that the code buffer's inst_mark is always relative to insts. 21.143 + // Note that the code buffer's insts_mark is always relative to insts. 21.144 // That's why we must use the macroassembler to generate a handler. 21.145 MacroAssembler _masm(&cbuf); 21.146 address base = 21.147 __ start_a_stub(size_exception_handler()); 21.148 if (base == NULL) return 0; // CodeBuffer::expand failed 21.149 int offset = __ offset(); 21.150 - __ jump(RuntimeAddress(OptoRuntime::exception_blob()->instructions_begin())); 21.151 + __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point())); 21.152 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); 21.153 __ end_a_stub(); 21.154 return offset; 21.155 @@ -1313,7 +1303,7 @@ 21.156 // Emit deopt handler code. 21.157 int emit_deopt_handler(CodeBuffer& cbuf) { 21.158 21.159 - // Note that the code buffer's inst_mark is always relative to insts. 21.160 + // Note that the code buffer's insts_mark is always relative to insts. 21.161 // That's why we must use the macroassembler to generate a handler. 21.162 MacroAssembler _masm(&cbuf); 21.163 address base = 21.164 @@ -1728,12 +1718,12 @@ 21.165 21.166 enc_class Lbl (label labl) %{ // JMP, CALL 21.167 Label *l = $labl$$label; 21.168 - emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size()+4)) : 0); 21.169 + emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size()+4)) : 0); 21.170 %} 21.171 21.172 enc_class LblShort (label labl) %{ // JMP, CALL 21.173 Label *l = $labl$$label; 21.174 - int disp = l ? (l->loc_pos() - (cbuf.code_size()+1)) : 0; 21.175 + int disp = l ? (l->loc_pos() - (cbuf.insts_size()+1)) : 0; 21.176 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); 21.177 emit_d8(cbuf, disp); 21.178 %} 21.179 @@ -1764,13 +1754,13 @@ 21.180 Label *l = $labl$$label; 21.181 $$$emit8$primary; 21.182 emit_cc(cbuf, $secondary, $cop$$cmpcode); 21.183 - emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size()+4)) : 0); 21.184 + emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size()+4)) : 0); 21.185 %} 21.186 21.187 enc_class JccShort (cmpOp cop, label labl) %{ // JCC 21.188 Label *l = $labl$$label; 21.189 emit_cc(cbuf, $primary, $cop$$cmpcode); 21.190 - int disp = l ? (l->loc_pos() - (cbuf.code_size()+1)) : 0; 21.191 + int disp = l ? (l->loc_pos() - (cbuf.insts_size()+1)) : 0; 21.192 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); 21.193 emit_d8(cbuf, disp); 21.194 %} 21.195 @@ -1838,10 +1828,10 @@ 21.196 21.197 enc_class Java_To_Runtime (method meth) %{ // CALL Java_To_Runtime, Java_To_Runtime_Leaf 21.198 // This is the instruction starting address for relocation info. 21.199 - cbuf.set_inst_mark(); 21.200 + cbuf.set_insts_mark(); 21.201 $$$emit8$primary; 21.202 // CALL directly to the runtime 21.203 - emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4), 21.204 + emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), 21.205 runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.206 21.207 if (UseSSE >= 2) { 21.208 @@ -1871,12 +1861,12 @@ 21.209 21.210 enc_class pre_call_FPU %{ 21.211 // If method sets FPU control word restore it here 21.212 - debug_only(int off0 = cbuf.code_size()); 21.213 + debug_only(int off0 = cbuf.insts_size()); 21.214 if( Compile::current()->in_24_bit_fp_mode() ) { 21.215 MacroAssembler masm(&cbuf); 21.216 masm.fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_std())); 21.217 } 21.218 - debug_only(int off1 = cbuf.code_size()); 21.219 + debug_only(int off1 = cbuf.insts_size()); 21.220 assert(off1 - off0 == pre_call_FPU_size(), "correct size prediction"); 21.221 %} 21.222 21.223 @@ -1889,12 +1879,12 @@ 21.224 %} 21.225 21.226 enc_class preserve_SP %{ 21.227 - debug_only(int off0 = cbuf.code_size()); 21.228 + debug_only(int off0 = cbuf.insts_size()); 21.229 MacroAssembler _masm(&cbuf); 21.230 // RBP is preserved across all calls, even compiled calls. 21.231 // Use it to preserve RSP in places where the callee might change the SP. 21.232 __ movptr(rbp_mh_SP_save, rsp); 21.233 - debug_only(int off1 = cbuf.code_size()); 21.234 + debug_only(int off1 = cbuf.insts_size()); 21.235 assert(off1 - off0 == preserve_SP_size(), "correct size prediction"); 21.236 %} 21.237 21.238 @@ -1906,16 +1896,16 @@ 21.239 enc_class Java_Static_Call (method meth) %{ // JAVA STATIC CALL 21.240 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine 21.241 // who we intended to call. 21.242 - cbuf.set_inst_mark(); 21.243 + cbuf.set_insts_mark(); 21.244 $$$emit8$primary; 21.245 if ( !_method ) { 21.246 - emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4), 21.247 + emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), 21.248 runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.249 } else if(_optimized_virtual) { 21.250 - emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4), 21.251 + emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), 21.252 opt_virtual_call_Relocation::spec(), RELOC_IMM32 ); 21.253 } else { 21.254 - emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4), 21.255 + emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), 21.256 static_call_Relocation::spec(), RELOC_IMM32 ); 21.257 } 21.258 if( _method ) { // Emit stub for static call 21.259 @@ -1927,15 +1917,15 @@ 21.260 // !!!!! 21.261 // Generate "Mov EAX,0x00", placeholder instruction to load oop-info 21.262 // emit_call_dynamic_prologue( cbuf ); 21.263 - cbuf.set_inst_mark(); 21.264 + cbuf.set_insts_mark(); 21.265 emit_opcode(cbuf, 0xB8 + EAX_enc); // mov EAX,-1 21.266 emit_d32_reloc(cbuf, (int)Universe::non_oop_word(), oop_Relocation::spec_for_immediate(), RELOC_IMM32); 21.267 - address virtual_call_oop_addr = cbuf.inst_mark(); 21.268 + address virtual_call_oop_addr = cbuf.insts_mark(); 21.269 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine 21.270 // who we intended to call. 21.271 - cbuf.set_inst_mark(); 21.272 + cbuf.set_insts_mark(); 21.273 $$$emit8$primary; 21.274 - emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.code_end()) - 4), 21.275 + emit_d32_reloc(cbuf, ($meth$$method - (int)(cbuf.insts_end()) - 4), 21.276 virtual_call_Relocation::spec(virtual_call_oop_addr), RELOC_IMM32 ); 21.277 %} 21.278 21.279 @@ -1944,7 +1934,7 @@ 21.280 assert( -128 <= disp && disp <= 127, "compiled_code_offset isn't small"); 21.281 21.282 // CALL *[EAX+in_bytes(methodOopDesc::from_compiled_code_entry_point_offset())] 21.283 - cbuf.set_inst_mark(); 21.284 + cbuf.set_insts_mark(); 21.285 $$$emit8$primary; 21.286 emit_rm(cbuf, 0x01, $secondary, EAX_enc ); // R/M byte 21.287 emit_d8(cbuf, disp); // Displacement 21.288 @@ -1976,9 +1966,9 @@ 21.289 // emit_rm(cbuf, 0x3, EBP_enc, EBP_enc); 21.290 // 21.291 // // CALL to interpreter. 21.292 -// cbuf.set_inst_mark(); 21.293 +// cbuf.set_insts_mark(); 21.294 // $$$emit8$primary; 21.295 -// emit_d32_reloc(cbuf, ($labl$$label - (int)(cbuf.code_end()) - 4), 21.296 +// emit_d32_reloc(cbuf, ($labl$$label - (int)(cbuf.insts_end()) - 4), 21.297 // runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.298 // %} 21.299 21.300 @@ -2087,7 +2077,7 @@ 21.301 %} 21.302 21.303 enc_class Opc_MemImm_F(immF src) %{ 21.304 - cbuf.set_inst_mark(); 21.305 + cbuf.set_insts_mark(); 21.306 $$$emit8$primary; 21.307 emit_rm(cbuf, 0x0, $secondary, 0x5); 21.308 emit_float_constant(cbuf, $src$$constant); 21.309 @@ -2280,7 +2270,7 @@ 21.310 %} 21.311 21.312 enc_class set_instruction_start( ) %{ 21.313 - cbuf.set_inst_mark(); // Mark start of opcode for reloc info in mem operand 21.314 + cbuf.set_insts_mark(); // Mark start of opcode for reloc info in mem operand 21.315 %} 21.316 21.317 enc_class RegMem (eRegI ereg, memory mem) %{ // emit_reg_mem 21.318 @@ -2429,7 +2419,7 @@ 21.319 emit_opcode( cbuf, 0xD9 ); // FLD (i.e., push it) 21.320 emit_d8( cbuf, 0xC0-1+$src$$reg ); 21.321 } 21.322 - cbuf.set_inst_mark(); // Mark start of opcode for reloc info in mem operand 21.323 + cbuf.set_insts_mark(); // Mark start of opcode for reloc info in mem operand 21.324 emit_opcode(cbuf,$primary); 21.325 encode_RegMem(cbuf, reg_encoding, base, index, scale, displace, disp_is_oop); 21.326 %} 21.327 @@ -2474,7 +2464,7 @@ 21.328 emit_opcode(cbuf,0x1B); 21.329 emit_rm(cbuf, 0x3, tmpReg, tmpReg); 21.330 // AND $tmp,$y 21.331 - cbuf.set_inst_mark(); // Mark start of opcode for reloc info in mem operand 21.332 + cbuf.set_insts_mark(); // Mark start of opcode for reloc info in mem operand 21.333 emit_opcode(cbuf,0x23); 21.334 int reg_encoding = tmpReg; 21.335 int base = $mem$$base; 21.336 @@ -3157,9 +3147,9 @@ 21.337 // PUSH src2.lo 21.338 emit_opcode(cbuf, 0x50+$src2$$reg ); 21.339 // CALL directly to the runtime 21.340 - cbuf.set_inst_mark(); 21.341 + cbuf.set_insts_mark(); 21.342 emit_opcode(cbuf,0xE8); // Call into runtime 21.343 - emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::ldiv) - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.344 + emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::ldiv) - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.345 // Restore stack 21.346 emit_opcode(cbuf, 0x83); // add SP, #framesize 21.347 emit_rm(cbuf, 0x3, 0x00, ESP_enc); 21.348 @@ -3176,9 +3166,9 @@ 21.349 // PUSH src2.lo 21.350 emit_opcode(cbuf, 0x50+$src2$$reg ); 21.351 // CALL directly to the runtime 21.352 - cbuf.set_inst_mark(); 21.353 + cbuf.set_insts_mark(); 21.354 emit_opcode(cbuf,0xE8); // Call into runtime 21.355 - emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::lrem ) - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.356 + emit_d32_reloc(cbuf, (CAST_FROM_FN_PTR(address, SharedRuntime::lrem ) - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.357 // Restore stack 21.358 emit_opcode(cbuf, 0x83); // add SP, #framesize 21.359 emit_rm(cbuf, 0x3, 0x00, ESP_enc); 21.360 @@ -3824,9 +3814,9 @@ 21.361 %} 21.362 21.363 enc_class enc_rethrow() %{ 21.364 - cbuf.set_inst_mark(); 21.365 + cbuf.set_insts_mark(); 21.366 emit_opcode(cbuf, 0xE9); // jmp entry 21.367 - emit_d32_reloc(cbuf, (int)OptoRuntime::rethrow_stub() - ((int)cbuf.code_end())-4, 21.368 + emit_d32_reloc(cbuf, (int)OptoRuntime::rethrow_stub() - ((int)cbuf.insts_end())-4, 21.369 runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.370 %} 21.371 21.372 @@ -3873,9 +3863,9 @@ 21.373 emit_opcode(cbuf,0xD9 ); // FLD ST(i) 21.374 emit_d8 (cbuf,0xC0-1+$src$$reg ); 21.375 // CALL directly to the runtime 21.376 - cbuf.set_inst_mark(); 21.377 + cbuf.set_insts_mark(); 21.378 emit_opcode(cbuf,0xE8); // Call into runtime 21.379 - emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.380 + emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.381 // Carry on here... 21.382 %} 21.383 21.384 @@ -3915,9 +3905,9 @@ 21.385 emit_opcode(cbuf,0xD9 ); // FLD ST(i) 21.386 emit_d8 (cbuf,0xC0-1+$src$$reg ); 21.387 // CALL directly to the runtime 21.388 - cbuf.set_inst_mark(); 21.389 + cbuf.set_insts_mark(); 21.390 emit_opcode(cbuf,0xE8); // Call into runtime 21.391 - emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.392 + emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.393 // Carry on here... 21.394 %} 21.395 21.396 @@ -3988,9 +3978,9 @@ 21.397 emit_d8(cbuf,0x04); 21.398 21.399 // CALL directly to the runtime 21.400 - cbuf.set_inst_mark(); 21.401 + cbuf.set_insts_mark(); 21.402 emit_opcode(cbuf,0xE8); // Call into runtime 21.403 - emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.404 + emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.405 // Carry on here... 21.406 %} 21.407 21.408 @@ -4062,9 +4052,9 @@ 21.409 emit_d8(cbuf,0x08); 21.410 21.411 // CALL directly to the runtime 21.412 - cbuf.set_inst_mark(); 21.413 + cbuf.set_insts_mark(); 21.414 emit_opcode(cbuf,0xE8); // Call into runtime 21.415 - emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.416 + emit_d32_reloc(cbuf, (StubRoutines::d2l_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.417 // Carry on here... 21.418 %} 21.419 21.420 @@ -4122,9 +4112,9 @@ 21.421 emit_d8(cbuf, $primary ? 0x8 : 0x4); 21.422 21.423 // CALL directly to the runtime 21.424 - cbuf.set_inst_mark(); 21.425 + cbuf.set_insts_mark(); 21.426 emit_opcode(cbuf,0xE8); // Call into runtime 21.427 - emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.code_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.428 + emit_d32_reloc(cbuf, (StubRoutines::d2i_wrapper() - cbuf.insts_end()) - 4, runtime_call_Relocation::spec(), RELOC_IMM32 ); 21.429 21.430 // Carry on here... 21.431 %} 21.432 @@ -4321,7 +4311,7 @@ 21.433 // so the memory operand is used twice in the encoding. 21.434 enc_class enc_storeL_volatile( memory mem, stackSlotL src ) %{ 21.435 store_to_stackslot( cbuf, 0x0DF, 0x05, $src$$disp ); 21.436 - cbuf.set_inst_mark(); // Mark start of FIST in case $mem has an oop 21.437 + cbuf.set_insts_mark(); // Mark start of FIST in case $mem has an oop 21.438 emit_opcode(cbuf,0xDF); 21.439 int rm_byte_opcode = 0x07; 21.440 int base = $mem$$base; 21.441 @@ -4345,7 +4335,7 @@ 21.442 bool disp_is_oop = $src->disp_is_oop(); // disp-as-oop when working with static globals 21.443 encode_RegMem(cbuf, $tmp$$reg, base, index, scale, displace, disp_is_oop); 21.444 } 21.445 - cbuf.set_inst_mark(); // Mark start of MOVSD in case $mem has an oop 21.446 + cbuf.set_insts_mark(); // Mark start of MOVSD in case $mem has an oop 21.447 { // MOVSD $mem,$tmp ! atomic long store 21.448 emit_opcode(cbuf,0xF2); 21.449 emit_opcode(cbuf,0x0F); 21.450 @@ -4378,7 +4368,7 @@ 21.451 emit_opcode(cbuf,0x62); 21.452 emit_rm(cbuf, 0x3, $tmp$$reg, $tmp2$$reg); 21.453 } 21.454 - cbuf.set_inst_mark(); // Mark start of MOVSD in case $mem has an oop 21.455 + cbuf.set_insts_mark(); // Mark start of MOVSD in case $mem has an oop 21.456 { // MOVSD $mem,$tmp ! atomic long store 21.457 emit_opcode(cbuf,0xF2); 21.458 emit_opcode(cbuf,0x0F); 21.459 @@ -4399,7 +4389,7 @@ 21.460 // A better choice might be TESTB [spp + pagesize() - CacheLineSize()],0 21.461 21.462 enc_class Safepoint_Poll() %{ 21.463 - cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_type, 0); 21.464 + cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_type, 0); 21.465 emit_opcode(cbuf,0x85); 21.466 emit_rm (cbuf, 0x0, 0x7, 0x5); 21.467 emit_d32(cbuf, (intptr_t)os::get_polling_page()); 21.468 @@ -12932,7 +12922,7 @@ 21.469 bool ok = false; 21.470 if ($cop$$cmpcode == Assembler::notEqual) { 21.471 // the two jumps 6 bytes apart so the jump distances are too 21.472 - parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0; 21.473 + parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0; 21.474 } else if ($cop$$cmpcode == Assembler::equal) { 21.475 parity_disp = 6; 21.476 ok = true; 21.477 @@ -12942,7 +12932,7 @@ 21.478 emit_d32(cbuf, parity_disp); 21.479 $$$emit8$primary; 21.480 emit_cc(cbuf, $secondary, $cop$$cmpcode); 21.481 - int disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0; 21.482 + int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0; 21.483 emit_d32(cbuf, disp); 21.484 %} 21.485 ins_pipe(pipe_jcc); 21.486 @@ -13128,7 +13118,7 @@ 21.487 emit_cc(cbuf, $primary, Assembler::parity); 21.488 int parity_disp = -1; 21.489 if ($cop$$cmpcode == Assembler::notEqual) { 21.490 - parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0; 21.491 + parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0; 21.492 } else if ($cop$$cmpcode == Assembler::equal) { 21.493 parity_disp = 2; 21.494 } else { 21.495 @@ -13136,7 +13126,7 @@ 21.496 } 21.497 emit_d8(cbuf, parity_disp); 21.498 emit_cc(cbuf, $primary, $cop$$cmpcode); 21.499 - int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0; 21.500 + int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0; 21.501 emit_d8(cbuf, disp); 21.502 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); 21.503 assert(-128 <= parity_disp && parity_disp <= 127, "Displacement too large for short jmp");
22.1 --- a/src/cpu/x86/vm/x86_64.ad Mon Aug 23 08:44:03 2010 -0700 22.2 +++ b/src/cpu/x86/vm/x86_64.ad Wed Aug 25 10:31:45 2010 -0700 22.3 @@ -619,62 +619,48 @@ 22.4 #endif 22.5 22.6 // EMIT_RM() 22.7 -void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) 22.8 -{ 22.9 +void emit_rm(CodeBuffer &cbuf, int f1, int f2, int f3) { 22.10 unsigned char c = (unsigned char) ((f1 << 6) | (f2 << 3) | f3); 22.11 - *(cbuf.code_end()) = c; 22.12 - cbuf.set_code_end(cbuf.code_end() + 1); 22.13 + cbuf.insts()->emit_int8(c); 22.14 } 22.15 22.16 // EMIT_CC() 22.17 -void emit_cc(CodeBuffer &cbuf, int f1, int f2) 22.18 -{ 22.19 +void emit_cc(CodeBuffer &cbuf, int f1, int f2) { 22.20 unsigned char c = (unsigned char) (f1 | f2); 22.21 - *(cbuf.code_end()) = c; 22.22 - cbuf.set_code_end(cbuf.code_end() + 1); 22.23 + cbuf.insts()->emit_int8(c); 22.24 } 22.25 22.26 // EMIT_OPCODE() 22.27 -void emit_opcode(CodeBuffer &cbuf, int code) 22.28 -{ 22.29 - *(cbuf.code_end()) = (unsigned char) code; 22.30 - cbuf.set_code_end(cbuf.code_end() + 1); 22.31 +void emit_opcode(CodeBuffer &cbuf, int code) { 22.32 + cbuf.insts()->emit_int8((unsigned char) code); 22.33 } 22.34 22.35 // EMIT_OPCODE() w/ relocation information 22.36 void emit_opcode(CodeBuffer &cbuf, 22.37 int code, relocInfo::relocType reloc, int offset, int format) 22.38 { 22.39 - cbuf.relocate(cbuf.inst_mark() + offset, reloc, format); 22.40 + cbuf.relocate(cbuf.insts_mark() + offset, reloc, format); 22.41 emit_opcode(cbuf, code); 22.42 } 22.43 22.44 // EMIT_D8() 22.45 -void emit_d8(CodeBuffer &cbuf, int d8) 22.46 -{ 22.47 - *(cbuf.code_end()) = (unsigned char) d8; 22.48 - cbuf.set_code_end(cbuf.code_end() + 1); 22.49 +void emit_d8(CodeBuffer &cbuf, int d8) { 22.50 + cbuf.insts()->emit_int8((unsigned char) d8); 22.51 } 22.52 22.53 // EMIT_D16() 22.54 -void emit_d16(CodeBuffer &cbuf, int d16) 22.55 -{ 22.56 - *((short *)(cbuf.code_end())) = d16; 22.57 - cbuf.set_code_end(cbuf.code_end() + 2); 22.58 +void emit_d16(CodeBuffer &cbuf, int d16) { 22.59 + cbuf.insts()->emit_int16(d16); 22.60 } 22.61 22.62 // EMIT_D32() 22.63 -void emit_d32(CodeBuffer &cbuf, int d32) 22.64 -{ 22.65 - *((int *)(cbuf.code_end())) = d32; 22.66 - cbuf.set_code_end(cbuf.code_end() + 4); 22.67 +void emit_d32(CodeBuffer &cbuf, int d32) { 22.68 + cbuf.insts()->emit_int32(d32); 22.69 } 22.70 22.71 // EMIT_D64() 22.72 -void emit_d64(CodeBuffer &cbuf, int64_t d64) 22.73 -{ 22.74 - *((int64_t*) (cbuf.code_end())) = d64; 22.75 - cbuf.set_code_end(cbuf.code_end() + 8); 22.76 +void emit_d64(CodeBuffer &cbuf, int64_t d64) { 22.77 + cbuf.insts()->emit_int64(d64); 22.78 } 22.79 22.80 // emit 32 bit value and construct relocation entry from relocInfo::relocType 22.81 @@ -684,32 +670,24 @@ 22.82 int format) 22.83 { 22.84 assert(reloc != relocInfo::external_word_type, "use 2-arg emit_d32_reloc"); 22.85 - cbuf.relocate(cbuf.inst_mark(), reloc, format); 22.86 - 22.87 - *((int*) (cbuf.code_end())) = d32; 22.88 - cbuf.set_code_end(cbuf.code_end() + 4); 22.89 + cbuf.relocate(cbuf.insts_mark(), reloc, format); 22.90 + cbuf.insts()->emit_int32(d32); 22.91 } 22.92 22.93 // emit 32 bit value and construct relocation entry from RelocationHolder 22.94 -void emit_d32_reloc(CodeBuffer& cbuf, 22.95 - int d32, 22.96 - RelocationHolder const& rspec, 22.97 - int format) 22.98 -{ 22.99 +void emit_d32_reloc(CodeBuffer& cbuf, int d32, RelocationHolder const& rspec, int format) { 22.100 #ifdef ASSERT 22.101 if (rspec.reloc()->type() == relocInfo::oop_type && 22.102 d32 != 0 && d32 != (intptr_t) Universe::non_oop_word()) { 22.103 assert(oop((intptr_t)d32)->is_oop() && (ScavengeRootsInCode || !oop((intptr_t)d32)->is_scavengable()), "cannot embed scavengable oops in code"); 22.104 } 22.105 #endif 22.106 - cbuf.relocate(cbuf.inst_mark(), rspec, format); 22.107 - 22.108 - *((int* )(cbuf.code_end())) = d32; 22.109 - cbuf.set_code_end(cbuf.code_end() + 4); 22.110 + cbuf.relocate(cbuf.insts_mark(), rspec, format); 22.111 + cbuf.insts()->emit_int32(d32); 22.112 } 22.113 22.114 void emit_d32_reloc(CodeBuffer& cbuf, address addr) { 22.115 - address next_ip = cbuf.code_end() + 4; 22.116 + address next_ip = cbuf.insts_end() + 4; 22.117 emit_d32_reloc(cbuf, (int) (addr - next_ip), 22.118 external_word_Relocation::spec(addr), 22.119 RELOC_DISP32); 22.120 @@ -717,23 +695,13 @@ 22.121 22.122 22.123 // emit 64 bit value and construct relocation entry from relocInfo::relocType 22.124 -void emit_d64_reloc(CodeBuffer& cbuf, 22.125 - int64_t d64, 22.126 - relocInfo::relocType reloc, 22.127 - int format) 22.128 -{ 22.129 - cbuf.relocate(cbuf.inst_mark(), reloc, format); 22.130 - 22.131 - *((int64_t*) (cbuf.code_end())) = d64; 22.132 - cbuf.set_code_end(cbuf.code_end() + 8); 22.133 +void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, relocInfo::relocType reloc, int format) { 22.134 + cbuf.relocate(cbuf.insts_mark(), reloc, format); 22.135 + cbuf.insts()->emit_int64(d64); 22.136 } 22.137 22.138 // emit 64 bit value and construct relocation entry from RelocationHolder 22.139 -void emit_d64_reloc(CodeBuffer& cbuf, 22.140 - int64_t d64, 22.141 - RelocationHolder const& rspec, 22.142 - int format) 22.143 -{ 22.144 +void emit_d64_reloc(CodeBuffer& cbuf, int64_t d64, RelocationHolder const& rspec, int format) { 22.145 #ifdef ASSERT 22.146 if (rspec.reloc()->type() == relocInfo::oop_type && 22.147 d64 != 0 && d64 != (int64_t) Universe::non_oop_word()) { 22.148 @@ -741,10 +709,8 @@ 22.149 "cannot embed scavengable oops in code"); 22.150 } 22.151 #endif 22.152 - cbuf.relocate(cbuf.inst_mark(), rspec, format); 22.153 - 22.154 - *((int64_t*) (cbuf.code_end())) = d64; 22.155 - cbuf.set_code_end(cbuf.code_end() + 8); 22.156 + cbuf.relocate(cbuf.insts_mark(), rspec, format); 22.157 + cbuf.insts()->emit_int64(d64); 22.158 } 22.159 22.160 // Access stack slot for load or store 22.161 @@ -966,7 +932,7 @@ 22.162 } 22.163 } 22.164 22.165 - C->set_frame_complete(cbuf.code_end() - cbuf.code_begin()); 22.166 + C->set_frame_complete(cbuf.insts_size()); 22.167 22.168 #ifdef ASSERT 22.169 if (VerifyStackAtCalls) { 22.170 @@ -1050,11 +1016,11 @@ 22.171 if (do_polling() && C->is_method_compilation()) { 22.172 // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes 22.173 // XXX reg_mem doesn't support RIP-relative addressing yet 22.174 - cbuf.set_inst_mark(); 22.175 - cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_return_type, 0); // XXX 22.176 + cbuf.set_insts_mark(); 22.177 + cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_return_type, 0); // XXX 22.178 emit_opcode(cbuf, 0x85); // testl 22.179 emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5 22.180 - // cbuf.inst_mark() is beginning of instruction 22.181 + // cbuf.insts_mark() is beginning of instruction 22.182 emit_d32_reloc(cbuf, os::get_polling_page()); 22.183 // relocInfo::poll_return_type, 22.184 } 22.185 @@ -1814,9 +1780,9 @@ 22.186 // movq rbx, 0 22.187 // jmp -5 # to self 22.188 22.189 - address mark = cbuf.inst_mark(); // get mark within main instrs section 22.190 - 22.191 - // Note that the code buffer's inst_mark is always relative to insts. 22.192 + address mark = cbuf.insts_mark(); // get mark within main instrs section 22.193 + 22.194 + // Note that the code buffer's insts_mark is always relative to insts. 22.195 // That's why we must use the macroassembler to generate a stub. 22.196 MacroAssembler _masm(&cbuf); 22.197 22.198 @@ -1830,7 +1796,7 @@ 22.199 // This is recognized as unresolved by relocs/nativeinst/ic code 22.200 __ jump(RuntimeAddress(__ pc())); 22.201 22.202 - // Update current stubs pointer and restore code_end. 22.203 + // Update current stubs pointer and restore insts_end. 22.204 __ end_a_stub(); 22.205 } 22.206 22.207 @@ -1868,7 +1834,7 @@ 22.208 void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const 22.209 { 22.210 MacroAssembler masm(&cbuf); 22.211 - uint code_size = cbuf.code_size(); 22.212 + uint insts_size = cbuf.insts_size(); 22.213 if (UseCompressedOops) { 22.214 masm.load_klass(rscratch1, j_rarg0); 22.215 masm.cmpptr(rax, rscratch1); 22.216 @@ -1880,7 +1846,7 @@ 22.217 22.218 /* WARNING these NOPs are critical so that verified entry point is properly 22.219 4 bytes aligned for patching by NativeJump::patch_verified_entry() */ 22.220 - int nops_cnt = 4 - ((cbuf.code_size() - code_size) & 0x3); 22.221 + int nops_cnt = 4 - ((cbuf.insts_size() - insts_size) & 0x3); 22.222 if (OptoBreakpoint) { 22.223 // Leave space for int3 22.224 nops_cnt -= 1; 22.225 @@ -1910,14 +1876,14 @@ 22.226 int emit_exception_handler(CodeBuffer& cbuf) 22.227 { 22.228 22.229 - // Note that the code buffer's inst_mark is always relative to insts. 22.230 + // Note that the code buffer's insts_mark is always relative to insts. 22.231 // That's why we must use the macroassembler to generate a handler. 22.232 MacroAssembler _masm(&cbuf); 22.233 address base = 22.234 __ start_a_stub(size_exception_handler()); 22.235 if (base == NULL) return 0; // CodeBuffer::expand failed 22.236 int offset = __ offset(); 22.237 - __ jump(RuntimeAddress(OptoRuntime::exception_blob()->instructions_begin())); 22.238 + __ jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point())); 22.239 assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); 22.240 __ end_a_stub(); 22.241 return offset; 22.242 @@ -1933,7 +1899,7 @@ 22.243 int emit_deopt_handler(CodeBuffer& cbuf) 22.244 { 22.245 22.246 - // Note that the code buffer's inst_mark is always relative to insts. 22.247 + // Note that the code buffer's insts_mark is always relative to insts. 22.248 // That's why we must use the macroassembler to generate a handler. 22.249 MacroAssembler _masm(&cbuf); 22.250 address base = 22.251 @@ -1962,7 +1928,7 @@ 22.252 address double_address = __ double_constant(x); 22.253 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift 22.254 emit_d32_reloc(cbuf, 22.255 - (int) (double_address - cbuf.code_end() - 4), 22.256 + (int) (double_address - cbuf.insts_end() - 4), 22.257 internal_word_Relocation::spec(double_address), 22.258 RELOC_DISP32); 22.259 } 22.260 @@ -1973,7 +1939,7 @@ 22.261 address float_address = __ float_constant(x); 22.262 cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift 22.263 emit_d32_reloc(cbuf, 22.264 - (int) (float_address - cbuf.code_end() - 4), 22.265 + (int) (float_address - cbuf.insts_end() - 4), 22.266 internal_word_Relocation::spec(float_address), 22.267 RELOC_DISP32); 22.268 } 22.269 @@ -2481,14 +2447,14 @@ 22.270 %{ 22.271 // JMP, CALL 22.272 Label* l = $labl$$label; 22.273 - emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0); 22.274 + emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0); 22.275 %} 22.276 22.277 enc_class LblShort(label labl) 22.278 %{ 22.279 // JMP, CALL 22.280 Label* l = $labl$$label; 22.281 - int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0; 22.282 + int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0; 22.283 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); 22.284 emit_d8(cbuf, disp); 22.285 %} 22.286 @@ -2517,7 +2483,7 @@ 22.287 Label* l = $labl$$label; 22.288 $$$emit8$primary; 22.289 emit_cc(cbuf, $secondary, $cop$$cmpcode); 22.290 - emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0); 22.291 + emit_d32(cbuf, l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0); 22.292 %} 22.293 22.294 enc_class JccShort (cmpOp cop, label labl) 22.295 @@ -2525,7 +2491,7 @@ 22.296 // JCC 22.297 Label *l = $labl$$label; 22.298 emit_cc(cbuf, $primary, $cop$$cmpcode); 22.299 - int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0; 22.300 + int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0; 22.301 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); 22.302 emit_d8(cbuf, disp); 22.303 %} 22.304 @@ -2609,22 +2575,22 @@ 22.305 %{ 22.306 // CALL Java_To_Interpreter 22.307 // This is the instruction starting address for relocation info. 22.308 - cbuf.set_inst_mark(); 22.309 + cbuf.set_insts_mark(); 22.310 $$$emit8$primary; 22.311 // CALL directly to the runtime 22.312 emit_d32_reloc(cbuf, 22.313 - (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4), 22.314 + (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 22.315 runtime_call_Relocation::spec(), 22.316 RELOC_DISP32); 22.317 %} 22.318 22.319 enc_class preserve_SP %{ 22.320 - debug_only(int off0 = cbuf.code_size()); 22.321 + debug_only(int off0 = cbuf.insts_size()); 22.322 MacroAssembler _masm(&cbuf); 22.323 // RBP is preserved across all calls, even compiled calls. 22.324 // Use it to preserve RSP in places where the callee might change the SP. 22.325 __ movptr(rbp_mh_SP_save, rsp); 22.326 - debug_only(int off1 = cbuf.code_size()); 22.327 + debug_only(int off1 = cbuf.insts_size()); 22.328 assert(off1 - off0 == preserve_SP_size(), "correct size prediction"); 22.329 %} 22.330 22.331 @@ -2638,22 +2604,22 @@ 22.332 // JAVA STATIC CALL 22.333 // CALL to fixup routine. Fixup routine uses ScopeDesc info to 22.334 // determine who we intended to call. 22.335 - cbuf.set_inst_mark(); 22.336 + cbuf.set_insts_mark(); 22.337 $$$emit8$primary; 22.338 22.339 if (!_method) { 22.340 emit_d32_reloc(cbuf, 22.341 - (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4), 22.342 + (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 22.343 runtime_call_Relocation::spec(), 22.344 RELOC_DISP32); 22.345 } else if (_optimized_virtual) { 22.346 emit_d32_reloc(cbuf, 22.347 - (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4), 22.348 + (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 22.349 opt_virtual_call_Relocation::spec(), 22.350 RELOC_DISP32); 22.351 } else { 22.352 emit_d32_reloc(cbuf, 22.353 - (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4), 22.354 + (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 22.355 static_call_Relocation::spec(), 22.356 RELOC_DISP32); 22.357 } 22.358 @@ -2669,7 +2635,7 @@ 22.359 // !!!!! 22.360 // Generate "movq rax, -1", placeholder instruction to load oop-info 22.361 // emit_call_dynamic_prologue( cbuf ); 22.362 - cbuf.set_inst_mark(); 22.363 + cbuf.set_insts_mark(); 22.364 22.365 // movq rax, -1 22.366 emit_opcode(cbuf, Assembler::REX_W); 22.367 @@ -2677,13 +2643,13 @@ 22.368 emit_d64_reloc(cbuf, 22.369 (int64_t) Universe::non_oop_word(), 22.370 oop_Relocation::spec_for_immediate(), RELOC_IMM64); 22.371 - address virtual_call_oop_addr = cbuf.inst_mark(); 22.372 + address virtual_call_oop_addr = cbuf.insts_mark(); 22.373 // CALL to fixup routine. Fixup routine uses ScopeDesc info to determine 22.374 // who we intended to call. 22.375 - cbuf.set_inst_mark(); 22.376 + cbuf.set_insts_mark(); 22.377 $$$emit8$primary; 22.378 emit_d32_reloc(cbuf, 22.379 - (int) ($meth$$method - ((intptr_t) cbuf.code_end()) - 4), 22.380 + (int) ($meth$$method - ((intptr_t) cbuf.insts_end()) - 4), 22.381 virtual_call_Relocation::spec(virtual_call_oop_addr), 22.382 RELOC_DISP32); 22.383 %} 22.384 @@ -2697,7 +2663,7 @@ 22.385 // assert(-0x80 <= disp && disp < 0x80, "compiled_code_offset isn't small"); 22.386 22.387 // callq *disp(%rax) 22.388 - cbuf.set_inst_mark(); 22.389 + cbuf.set_insts_mark(); 22.390 $$$emit8$primary; 22.391 if (disp < 0x80) { 22.392 emit_rm(cbuf, 0x01, $secondary, RAX_enc); // R/M byte 22.393 @@ -3729,10 +3695,10 @@ 22.394 22.395 enc_class enc_rethrow() 22.396 %{ 22.397 - cbuf.set_inst_mark(); 22.398 + cbuf.set_insts_mark(); 22.399 emit_opcode(cbuf, 0xE9); // jmp entry 22.400 emit_d32_reloc(cbuf, 22.401 - (int) (OptoRuntime::rethrow_stub() - cbuf.code_end() - 4), 22.402 + (int) (OptoRuntime::rethrow_stub() - cbuf.insts_end() - 4), 22.403 runtime_call_Relocation::spec(), 22.404 RELOC_DISP32); 22.405 %} 22.406 @@ -3742,7 +3708,7 @@ 22.407 int dstenc = $dst$$reg; 22.408 address signmask_address = (address) StubRoutines::x86::float_sign_mask(); 22.409 22.410 - cbuf.set_inst_mark(); 22.411 + cbuf.set_insts_mark(); 22.412 if (dstenc >= 8) { 22.413 emit_opcode(cbuf, Assembler::REX_R); 22.414 dstenc -= 8; 22.415 @@ -3759,7 +3725,7 @@ 22.416 int dstenc = $dst$$reg; 22.417 address signmask_address = (address) StubRoutines::x86::double_sign_mask(); 22.418 22.419 - cbuf.set_inst_mark(); 22.420 + cbuf.set_insts_mark(); 22.421 emit_opcode(cbuf, 0x66); 22.422 if (dstenc >= 8) { 22.423 emit_opcode(cbuf, Assembler::REX_R); 22.424 @@ -3777,7 +3743,7 @@ 22.425 int dstenc = $dst$$reg; 22.426 address signflip_address = (address) StubRoutines::x86::float_sign_flip(); 22.427 22.428 - cbuf.set_inst_mark(); 22.429 + cbuf.set_insts_mark(); 22.430 if (dstenc >= 8) { 22.431 emit_opcode(cbuf, Assembler::REX_R); 22.432 dstenc -= 8; 22.433 @@ -3794,7 +3760,7 @@ 22.434 int dstenc = $dst$$reg; 22.435 address signflip_address = (address) StubRoutines::x86::double_sign_flip(); 22.436 22.437 - cbuf.set_inst_mark(); 22.438 + cbuf.set_insts_mark(); 22.439 emit_opcode(cbuf, 0x66); 22.440 if (dstenc >= 8) { 22.441 emit_opcode(cbuf, Assembler::REX_R); 22.442 @@ -3846,11 +3812,11 @@ 22.443 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes 22.444 22.445 // call f2i_fixup 22.446 - cbuf.set_inst_mark(); 22.447 + cbuf.set_insts_mark(); 22.448 emit_opcode(cbuf, 0xE8); 22.449 emit_d32_reloc(cbuf, 22.450 (int) 22.451 - (StubRoutines::x86::f2i_fixup() - cbuf.code_end() - 4), 22.452 + (StubRoutines::x86::f2i_fixup() - cbuf.insts_end() - 4), 22.453 runtime_call_Relocation::spec(), 22.454 RELOC_DISP32); 22.455 22.456 @@ -3870,7 +3836,7 @@ 22.457 address const_address = (address) StubRoutines::x86::double_sign_flip(); 22.458 22.459 // cmpq $dst, [0x8000000000000000] 22.460 - cbuf.set_inst_mark(); 22.461 + cbuf.set_insts_mark(); 22.462 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR); 22.463 emit_opcode(cbuf, 0x39); 22.464 // XXX reg_mem doesn't support RIP-relative addressing yet 22.465 @@ -3904,11 +3870,11 @@ 22.466 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes 22.467 22.468 // call f2l_fixup 22.469 - cbuf.set_inst_mark(); 22.470 + cbuf.set_insts_mark(); 22.471 emit_opcode(cbuf, 0xE8); 22.472 emit_d32_reloc(cbuf, 22.473 (int) 22.474 - (StubRoutines::x86::f2l_fixup() - cbuf.code_end() - 4), 22.475 + (StubRoutines::x86::f2l_fixup() - cbuf.insts_end() - 4), 22.476 runtime_call_Relocation::spec(), 22.477 RELOC_DISP32); 22.478 22.479 @@ -3960,11 +3926,11 @@ 22.480 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes 22.481 22.482 // call d2i_fixup 22.483 - cbuf.set_inst_mark(); 22.484 + cbuf.set_insts_mark(); 22.485 emit_opcode(cbuf, 0xE8); 22.486 emit_d32_reloc(cbuf, 22.487 (int) 22.488 - (StubRoutines::x86::d2i_fixup() - cbuf.code_end() - 4), 22.489 + (StubRoutines::x86::d2i_fixup() - cbuf.insts_end() - 4), 22.490 runtime_call_Relocation::spec(), 22.491 RELOC_DISP32); 22.492 22.493 @@ -3984,7 +3950,7 @@ 22.494 address const_address = (address) StubRoutines::x86::double_sign_flip(); 22.495 22.496 // cmpq $dst, [0x8000000000000000] 22.497 - cbuf.set_inst_mark(); 22.498 + cbuf.set_insts_mark(); 22.499 emit_opcode(cbuf, dstenc < 8 ? Assembler::REX_W : Assembler::REX_WR); 22.500 emit_opcode(cbuf, 0x39); 22.501 // XXX reg_mem doesn't support RIP-relative addressing yet 22.502 @@ -4018,11 +3984,11 @@ 22.503 encode_RegMem(cbuf, srcenc, RSP_enc, 0x4, 0, 0, false); // 2 bytes 22.504 22.505 // call d2l_fixup 22.506 - cbuf.set_inst_mark(); 22.507 + cbuf.set_insts_mark(); 22.508 emit_opcode(cbuf, 0xE8); 22.509 emit_d32_reloc(cbuf, 22.510 (int) 22.511 - (StubRoutines::x86::d2l_fixup() - cbuf.code_end() - 4), 22.512 + (StubRoutines::x86::d2l_fixup() - cbuf.insts_end() - 4), 22.513 runtime_call_Relocation::spec(), 22.514 RELOC_DISP32); 22.515 22.516 @@ -4042,11 +4008,11 @@ 22.517 %{ 22.518 // testl %rax, off(%rip) // Opcode + ModRM + Disp32 == 6 bytes 22.519 // XXX reg_mem doesn't support RIP-relative addressing yet 22.520 - cbuf.set_inst_mark(); 22.521 - cbuf.relocate(cbuf.inst_mark(), relocInfo::poll_type, 0); // XXX 22.522 + cbuf.set_insts_mark(); 22.523 + cbuf.relocate(cbuf.insts_mark(), relocInfo::poll_type, 0); // XXX 22.524 emit_opcode(cbuf, 0x85); // testl 22.525 emit_rm(cbuf, 0x0, RAX_enc, 0x5); // 00 rax 101 == 0x5 22.526 - // cbuf.inst_mark() is beginning of instruction 22.527 + // cbuf.insts_mark() is beginning of instruction 22.528 emit_d32_reloc(cbuf, os::get_polling_page()); 22.529 // relocInfo::poll_type, 22.530 %} 22.531 @@ -12304,7 +12270,7 @@ 22.532 int parity_disp = -1; 22.533 if ($cop$$cmpcode == Assembler::notEqual) { 22.534 // the two jumps 6 bytes apart so the jump distances are too 22.535 - parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0; 22.536 + parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0; 22.537 } else if ($cop$$cmpcode == Assembler::equal) { 22.538 parity_disp = 6; 22.539 } else { 22.540 @@ -12313,7 +12279,7 @@ 22.541 emit_d32(cbuf, parity_disp); 22.542 $$$emit8$primary; 22.543 emit_cc(cbuf, $secondary, $cop$$cmpcode); 22.544 - int disp = l ? (l->loc_pos() - (cbuf.code_size() + 4)) : 0; 22.545 + int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 4)) : 0; 22.546 emit_d32(cbuf, disp); 22.547 %} 22.548 ins_pipe(pipe_jcc); 22.549 @@ -12508,7 +12474,7 @@ 22.550 emit_cc(cbuf, $primary, Assembler::parity); 22.551 int parity_disp = -1; 22.552 if ($cop$$cmpcode == Assembler::notEqual) { 22.553 - parity_disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0; 22.554 + parity_disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0; 22.555 } else if ($cop$$cmpcode == Assembler::equal) { 22.556 parity_disp = 2; 22.557 } else { 22.558 @@ -12516,7 +12482,7 @@ 22.559 } 22.560 emit_d8(cbuf, parity_disp); 22.561 emit_cc(cbuf, $primary, $cop$$cmpcode); 22.562 - int disp = l ? (l->loc_pos() - (cbuf.code_size() + 1)) : 0; 22.563 + int disp = l ? (l->loc_pos() - (cbuf.insts_size() + 1)) : 0; 22.564 emit_d8(cbuf, disp); 22.565 assert(-128 <= disp && disp <= 127, "Displacement too large for short jmp"); 22.566 assert(-128 <= parity_disp && parity_disp <= 127, "Displacement too large for short jmp");
23.1 --- a/src/os/solaris/dtrace/generateJvmOffsets.cpp Mon Aug 23 08:44:03 2010 -0700 23.2 +++ b/src/os/solaris/dtrace/generateJvmOffsets.cpp Wed Aug 25 10:31:45 2010 -0700 23.3 @@ -230,7 +230,8 @@ 23.4 23.5 GEN_OFFS(CodeBlob, _name); 23.6 GEN_OFFS(CodeBlob, _header_size); 23.7 - GEN_OFFS(CodeBlob, _instructions_offset); 23.8 + GEN_OFFS(CodeBlob, _content_offset); 23.9 + GEN_OFFS(CodeBlob, _code_offset); 23.10 GEN_OFFS(CodeBlob, _data_offset); 23.11 GEN_OFFS(CodeBlob, _frame_size); 23.12 printf("\n");
24.1 --- a/src/os/solaris/dtrace/libjvm_db.c Mon Aug 23 08:44:03 2010 -0700 24.2 +++ b/src/os/solaris/dtrace/libjvm_db.c Wed Aug 25 10:31:45 2010 -0700 24.3 @@ -124,7 +124,7 @@ 24.4 uint64_t pc_desc; 24.5 24.6 int32_t orig_pc_offset; /* _orig_pc_offset */ 24.7 - int32_t instrs_beg; /* _instructions_offset */ 24.8 + int32_t instrs_beg; /* _code_offset */ 24.9 int32_t instrs_end; 24.10 int32_t deopt_beg; /* _deoptimize_offset */ 24.11 int32_t scopes_data_beg; /* _scopes_data_offset */ 24.12 @@ -587,7 +587,7 @@ 24.13 fprintf(stderr, "\t nmethod_info: BEGIN \n"); 24.14 24.15 /* Instructions */ 24.16 - err = ps_pread(J->P, nm + OFFSET_CodeBlob_instructions_offset, &N->instrs_beg, SZ32); 24.17 + err = ps_pread(J->P, nm + OFFSET_CodeBlob_code_offset, &N->instrs_beg, SZ32); 24.18 CHECK_FAIL(err); 24.19 err = ps_pread(J->P, nm + OFFSET_CodeBlob_data_offset, &N->instrs_end, SZ32); 24.20 CHECK_FAIL(err);
25.1 --- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Mon Aug 23 08:44:03 2010 -0700 25.2 +++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp Wed Aug 25 10:31:45 2010 -0700 25.3 @@ -1,5 +1,5 @@ 25.4 /* 25.5 - * Copyright (c) 1999, 2009, Oracle and/or its affiliates. All rights reserved. 25.6 + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 25.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 25.8 * 25.9 * This code is free software; you can redistribute it and/or modify it 25.10 @@ -149,8 +149,8 @@ 25.11 // If we are using Vectored Exceptions we don't need this registration 25.12 if (UseVectoredExceptions) return true; 25.13 25.14 - BufferBlob* b = BufferBlob::create("CodeCache Exception Handler", sizeof (DynamicCodeData)); 25.15 - CodeBuffer cb(b->instructions_begin(), b->instructions_size()); 25.16 + BufferBlob* blob = BufferBlob::create("CodeCache Exception Handler", sizeof(DynamicCodeData)); 25.17 + CodeBuffer cb(blob); 25.18 MacroAssembler* masm = new MacroAssembler(&cb); 25.19 pDCD = (pDynamicCodeData) masm->pc(); 25.20
26.1 --- a/src/os_cpu/windows_x86/vm/windows_x86_32.ad Mon Aug 23 08:44:03 2010 -0700 26.2 +++ b/src/os_cpu/windows_x86/vm/windows_x86_32.ad Wed Aug 25 10:31:45 2010 -0700 26.3 @@ -1,5 +1,5 @@ 26.4 // 26.5 -// Copyright (c) 1999, 2006, Oracle and/or its affiliates. All rights reserved. 26.6 +// Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 26.7 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 26.8 // 26.9 // This code is free software; you can redistribute it and/or modify it 26.10 @@ -141,8 +141,7 @@ 26.11 26.12 // emit an interrupt that is caught by the debugger 26.13 void emit_break(CodeBuffer &cbuf) { 26.14 - *(cbuf.code_end()) = (unsigned char)(0xcc); 26.15 - cbuf.set_code_end(cbuf.code_end() + 1); 26.16 + cbuf.insts()->emit_int8((unsigned char) 0xcc); 26.17 } 26.18 26.19 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
27.1 --- a/src/os_cpu/windows_x86/vm/windows_x86_64.ad Mon Aug 23 08:44:03 2010 -0700 27.2 +++ b/src/os_cpu/windows_x86/vm/windows_x86_64.ad Wed Aug 25 10:31:45 2010 -0700 27.3 @@ -1,5 +1,5 @@ 27.4 // 27.5 -// Copyright (c) 2003, 2006, Oracle and/or its affiliates. All rights reserved. 27.6 +// Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 27.7 // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 27.8 // 27.9 // This code is free software; you can redistribute it and/or modify it 27.10 @@ -144,8 +144,7 @@ 27.11 27.12 // emit an interrupt that is caught by the debugger 27.13 void emit_break(CodeBuffer &cbuf) { 27.14 - *(cbuf.code_end()) = (unsigned char)(0xcc); 27.15 - cbuf.set_code_end(cbuf.code_end() + 1); 27.16 + cbuf.insts()->emit_int8((unsigned char) 0xcc); 27.17 } 27.18 27.19 void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
28.1 --- a/src/share/vm/adlc/output_c.cpp Mon Aug 23 08:44:03 2010 -0700 28.2 +++ b/src/share/vm/adlc/output_c.cpp Wed Aug 25 10:31:45 2010 -0700 28.3 @@ -2482,7 +2482,7 @@ 28.4 } 28.5 28.6 // Save current instruction's starting address (helps with relocation). 28.7 - fprintf( fp, " cbuf.set_inst_mark();\n"); 28.8 + fprintf(fp, " cbuf.set_insts_mark();\n"); 28.9 28.10 // // // idx0 is only needed for syntactic purposes and only by "storeSSI" 28.11 // fprintf( fp, " unsigned idx0 = 0;\n");
29.1 --- a/src/share/vm/asm/codeBuffer.cpp Mon Aug 23 08:44:03 2010 -0700 29.2 +++ b/src/share/vm/asm/codeBuffer.cpp Wed Aug 25 10:31:45 2010 -0700 29.3 @@ -1,5 +1,5 @@ 29.4 /* 29.5 - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. 29.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 29.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 29.8 * 29.9 * This code is free software; you can redistribute it and/or modify it 29.10 @@ -74,12 +74,11 @@ 29.11 29.12 typedef CodeBuffer::csize_t csize_t; // file-local definition 29.13 29.14 -// external buffer, in a predefined CodeBlob or other buffer area 29.15 +// External buffer, in a predefined CodeBlob. 29.16 // Important: The code_start must be taken exactly, and not realigned. 29.17 -CodeBuffer::CodeBuffer(address code_start, csize_t code_size) { 29.18 - assert(code_start != NULL, "sanity"); 29.19 +CodeBuffer::CodeBuffer(CodeBlob* blob) { 29.20 initialize_misc("static buffer"); 29.21 - initialize(code_start, code_size); 29.22 + initialize(blob->content_begin(), blob->content_size()); 29.23 assert(verify_section_allocation(), "initial use of buffer OK"); 29.24 } 29.25 29.26 @@ -99,7 +98,7 @@ 29.27 // Set up various pointers into the blob. 29.28 initialize(_total_start, _total_size); 29.29 29.30 - assert((uintptr_t)code_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned"); 29.31 + assert((uintptr_t)insts_begin() % CodeEntryAlignment == 0, "instruction start not code entry aligned"); 29.32 29.33 pd_initialize(); 29.34 29.35 @@ -192,8 +191,8 @@ 29.36 void CodeBuffer::set_blob(BufferBlob* blob) { 29.37 _blob = blob; 29.38 if (blob != NULL) { 29.39 - address start = blob->instructions_begin(); 29.40 - address end = blob->instructions_end(); 29.41 + address start = blob->content_begin(); 29.42 + address end = blob->content_end(); 29.43 // Round up the starting address. 29.44 int align = _insts.alignment(); 29.45 start += (-(intptr_t)start) & (align-1); 29.46 @@ -422,21 +421,21 @@ 29.47 /// The pattern is the same for all functions. 29.48 /// We iterate over all the sections, padding each to alignment. 29.49 29.50 -csize_t CodeBuffer::total_code_size() const { 29.51 - csize_t code_size_so_far = 0; 29.52 +csize_t CodeBuffer::total_content_size() const { 29.53 + csize_t size_so_far = 0; 29.54 for (int n = 0; n < (int)SECT_LIMIT; n++) { 29.55 const CodeSection* cs = code_section(n); 29.56 if (cs->is_empty()) continue; // skip trivial section 29.57 - code_size_so_far = cs->align_at_start(code_size_so_far); 29.58 - code_size_so_far += cs->size(); 29.59 + size_so_far = cs->align_at_start(size_so_far); 29.60 + size_so_far += cs->size(); 29.61 } 29.62 - return code_size_so_far; 29.63 + return size_so_far; 29.64 } 29.65 29.66 void CodeBuffer::compute_final_layout(CodeBuffer* dest) const { 29.67 address buf = dest->_total_start; 29.68 csize_t buf_offset = 0; 29.69 - assert(dest->_total_size >= total_code_size(), "must be big enough"); 29.70 + assert(dest->_total_size >= total_content_size(), "must be big enough"); 29.71 29.72 { 29.73 // not sure why this is here, but why not... 29.74 @@ -489,7 +488,7 @@ 29.75 } 29.76 29.77 // Done calculating sections; did it come out to the right end? 29.78 - assert(buf_offset == total_code_size(), "sanity"); 29.79 + assert(buf_offset == total_content_size(), "sanity"); 29.80 assert(dest->verify_section_allocation(), "final configuration works"); 29.81 } 29.82 29.83 @@ -515,7 +514,7 @@ 29.84 29.85 csize_t CodeBuffer::total_relocation_size() const { 29.86 csize_t lsize = copy_relocations_to(NULL); // dry run only 29.87 - csize_t csize = total_code_size(); 29.88 + csize_t csize = total_content_size(); 29.89 csize_t total = RelocIterator::locs_and_index_size(csize, lsize); 29.90 return (csize_t) align_size_up(total, HeapWordSize); 29.91 } 29.92 @@ -601,7 +600,7 @@ 29.93 buf_offset += sizeof(relocInfo); 29.94 } 29.95 29.96 - assert(code_end_so_far == total_code_size(), "sanity"); 29.97 + assert(code_end_so_far == total_content_size(), "sanity"); 29.98 29.99 // Account for index: 29.100 if (buf != NULL) { 29.101 @@ -621,9 +620,8 @@ 29.102 } 29.103 #endif //PRODUCT 29.104 29.105 - CodeBuffer dest(dest_blob->instructions_begin(), 29.106 - dest_blob->instructions_size()); 29.107 - assert(dest_blob->instructions_size() >= total_code_size(), "good sizing"); 29.108 + CodeBuffer dest(dest_blob); 29.109 + assert(dest_blob->content_size() >= total_content_size(), "good sizing"); 29.110 this->compute_final_layout(&dest); 29.111 relocate_code_to(&dest); 29.112 29.113 @@ -631,11 +629,10 @@ 29.114 dest_blob->set_comments(_comments); 29.115 29.116 // Done moving code bytes; were they the right size? 29.117 - assert(round_to(dest.total_code_size(), oopSize) == dest_blob->instructions_size(), "sanity"); 29.118 + assert(round_to(dest.total_content_size(), oopSize) == dest_blob->content_size(), "sanity"); 29.119 29.120 // Flush generated code 29.121 - ICache::invalidate_range(dest_blob->instructions_begin(), 29.122 - dest_blob->instructions_size()); 29.123 + ICache::invalidate_range(dest_blob->code_begin(), dest_blob->code_size()); 29.124 } 29.125 29.126 // Move all my code into another code buffer. 29.127 @@ -844,8 +841,8 @@ 29.128 if (tstart == badAddress) return true; // smashed by set_blob(NULL) 29.129 address tend = tstart + _total_size; 29.130 if (_blob != NULL) { 29.131 - assert(tstart >= _blob->instructions_begin(), "sanity"); 29.132 - assert(tend <= _blob->instructions_end(), "sanity"); 29.133 + assert(tstart >= _blob->content_begin(), "sanity"); 29.134 + assert(tend <= _blob->content_end(), "sanity"); 29.135 } 29.136 address tcheck = tstart; // advancing pointer to verify disjointness 29.137 for (int n = 0; n < (int)SECT_LIMIT; n++) { 29.138 @@ -981,13 +978,13 @@ 29.139 29.140 29.141 void CodeBuffer::decode() { 29.142 - Disassembler::decode(decode_begin(), code_end()); 29.143 - _decode_begin = code_end(); 29.144 + Disassembler::decode(decode_begin(), insts_end()); 29.145 + _decode_begin = insts_end(); 29.146 } 29.147 29.148 29.149 void CodeBuffer::skip_decode() { 29.150 - _decode_begin = code_end(); 29.151 + _decode_begin = insts_end(); 29.152 } 29.153 29.154
30.1 --- a/src/share/vm/asm/codeBuffer.hpp Mon Aug 23 08:44:03 2010 -0700 30.2 +++ b/src/share/vm/asm/codeBuffer.hpp Wed Aug 25 10:31:45 2010 -0700 30.3 @@ -186,6 +186,12 @@ 30.4 _locs_point = pc; 30.5 } 30.6 30.7 + // Code emission 30.8 + void emit_int8 (int8_t x) { *((int8_t*) end()) = x; set_end(end() + 1); } 30.9 + void emit_int16(int16_t x) { *((int16_t*) end()) = x; set_end(end() + 2); } 30.10 + void emit_int32(int32_t x) { *((int32_t*) end()) = x; set_end(end() + 4); } 30.11 + void emit_int64(int64_t x) { *((int64_t*) end()) = x; set_end(end() + 8); } 30.12 + 30.13 // Share a scratch buffer for relocinfo. (Hacky; saves a resource allocation.) 30.14 void initialize_shared_locs(relocInfo* buf, int length); 30.15 30.16 @@ -374,9 +380,17 @@ 30.17 30.18 public: 30.19 // (1) code buffer referring to pre-allocated instruction memory 30.20 - CodeBuffer(address code_start, csize_t code_size); 30.21 + CodeBuffer(address code_start, csize_t code_size) { 30.22 + assert(code_start != NULL, "sanity"); 30.23 + initialize_misc("static buffer"); 30.24 + initialize(code_start, code_size); 30.25 + assert(verify_section_allocation(), "initial use of buffer OK"); 30.26 + } 30.27 30.28 - // (2) code buffer allocating codeBlob memory for code & relocation 30.29 + // (2) CodeBuffer referring to pre-allocated CodeBlob. 30.30 + CodeBuffer(CodeBlob* blob); 30.31 + 30.32 + // (3) code buffer allocating codeBlob memory for code & relocation 30.33 // info but with lazy initialization. The name must be something 30.34 // informative. 30.35 CodeBuffer(const char* name) { 30.36 @@ -384,7 +398,7 @@ 30.37 } 30.38 30.39 30.40 - // (3) code buffer allocating codeBlob memory for code & relocation 30.41 + // (4) code buffer allocating codeBlob memory for code & relocation 30.42 // info. The name must be something informative and code_size must 30.43 // include both code and stubs sizes. 30.44 CodeBuffer(const char* name, csize_t code_size, csize_t locs_size) { 30.45 @@ -394,8 +408,8 @@ 30.46 30.47 ~CodeBuffer(); 30.48 30.49 - // Initialize a CodeBuffer constructed using constructor 2. Using 30.50 - // constructor 3 is equivalent to calling constructor 2 and then 30.51 + // Initialize a CodeBuffer constructed using constructor 3. Using 30.52 + // constructor 4 is equivalent to calling constructor 3 and then 30.53 // calling this method. It's been factored out for convenience of 30.54 // construction. 30.55 void initialize(csize_t code_size, csize_t locs_size); 30.56 @@ -438,36 +452,37 @@ 30.57 void free_blob(); // Free the blob, if we own one. 30.58 30.59 // Properties relative to the insts section: 30.60 - address code_begin() const { return _insts.start(); } 30.61 - address code_end() const { return _insts.end(); } 30.62 - void set_code_end(address end) { _insts.set_end(end); } 30.63 - address code_limit() const { return _insts.limit(); } 30.64 - address inst_mark() const { return _insts.mark(); } 30.65 - void set_inst_mark() { _insts.set_mark(); } 30.66 - void clear_inst_mark() { _insts.clear_mark(); } 30.67 + address insts_begin() const { return _insts.start(); } 30.68 + address insts_end() const { return _insts.end(); } 30.69 + void set_insts_end(address end) { _insts.set_end(end); } 30.70 + address insts_limit() const { return _insts.limit(); } 30.71 + address insts_mark() const { return _insts.mark(); } 30.72 + void set_insts_mark() { _insts.set_mark(); } 30.73 + void clear_insts_mark() { _insts.clear_mark(); } 30.74 30.75 // is there anything in the buffer other than the current section? 30.76 - bool is_pure() const { return code_size() == total_code_size(); } 30.77 + bool is_pure() const { return insts_size() == total_content_size(); } 30.78 30.79 // size in bytes of output so far in the insts sections 30.80 - csize_t code_size() const { return _insts.size(); } 30.81 + csize_t insts_size() const { return _insts.size(); } 30.82 30.83 - // same as code_size(), except that it asserts there is no non-code here 30.84 - csize_t pure_code_size() const { assert(is_pure(), "no non-code"); 30.85 - return code_size(); } 30.86 + // same as insts_size(), except that it asserts there is no non-code here 30.87 + csize_t pure_insts_size() const { assert(is_pure(), "no non-code"); 30.88 + return insts_size(); } 30.89 // capacity in bytes of the insts sections 30.90 - csize_t code_capacity() const { return _insts.capacity(); } 30.91 + csize_t insts_capacity() const { return _insts.capacity(); } 30.92 30.93 // number of bytes remaining in the insts section 30.94 - csize_t code_remaining() const { return _insts.remaining(); } 30.95 + csize_t insts_remaining() const { return _insts.remaining(); } 30.96 30.97 // is a given address in the insts section? (2nd version is end-inclusive) 30.98 - bool code_contains(address pc) const { return _insts.contains(pc); } 30.99 - bool code_contains2(address pc) const { return _insts.contains2(pc); } 30.100 + bool insts_contains(address pc) const { return _insts.contains(pc); } 30.101 + bool insts_contains2(address pc) const { return _insts.contains2(pc); } 30.102 30.103 - // allocated size of code in all sections, when aligned and concatenated 30.104 - // (this is the eventual state of the code in its final CodeBlob) 30.105 - csize_t total_code_size() const; 30.106 + // Allocated size in all sections, when aligned and concatenated 30.107 + // (this is the eventual state of the content in its final 30.108 + // CodeBlob). 30.109 + csize_t total_content_size() const; 30.110 30.111 // combined offset (relative to start of insts) of given address, 30.112 // as eventually found in the final CodeBlob
31.1 --- a/src/share/vm/c1/c1_Compilation.cpp Mon Aug 23 08:44:03 2010 -0700 31.2 +++ b/src/share/vm/c1/c1_Compilation.cpp Wed Aug 25 10:31:45 2010 -0700 31.3 @@ -454,8 +454,7 @@ 31.4 , _allocator(NULL) 31.5 , _next_id(0) 31.6 , _next_block_id(0) 31.7 -, _code(buffer_blob->instructions_begin(), 31.8 - buffer_blob->instructions_size()) 31.9 +, _code(buffer_blob) 31.10 , _current_instruction(NULL) 31.11 #ifndef PRODUCT 31.12 , _last_instruction_printed(NULL)
32.1 --- a/src/share/vm/c1/c1_Runtime1.cpp Mon Aug 23 08:44:03 2010 -0700 32.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp Wed Aug 25 10:31:45 2010 -0700 32.3 @@ -118,8 +118,7 @@ 32.4 assert(0 <= id && id < number_of_ids, "illegal stub id"); 32.5 ResourceMark rm; 32.6 // create code buffer for code storage 32.7 - CodeBuffer code(buffer_blob->instructions_begin(), 32.8 - buffer_blob->instructions_size()); 32.9 + CodeBuffer code(buffer_blob); 32.10 32.11 Compilation::setup_code_buffer(&code, 0); 32.12
33.1 --- a/src/share/vm/c1/c1_Runtime1.hpp Mon Aug 23 08:44:03 2010 -0700 33.2 +++ b/src/share/vm/c1/c1_Runtime1.hpp Wed Aug 25 10:31:45 2010 -0700 33.3 @@ -1,5 +1,5 @@ 33.4 /* 33.5 - * Copyright (c) 1999, 2007, Oracle and/or its affiliates. All rights reserved. 33.6 + * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 33.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 33.8 * 33.9 * This code is free software; you can redistribute it and/or modify it 33.10 @@ -155,7 +155,7 @@ 33.11 33.12 // stubs 33.13 static CodeBlob* blob_for (StubID id); 33.14 - static address entry_for(StubID id) { return blob_for(id)->instructions_begin(); } 33.15 + static address entry_for(StubID id) { return blob_for(id)->code_begin(); } 33.16 static const char* name_for (StubID id); 33.17 static const char* name_for_address(address entry); 33.18
34.1 --- a/src/share/vm/ci/bcEscapeAnalyzer.cpp Mon Aug 23 08:44:03 2010 -0700 34.2 +++ b/src/share/vm/ci/bcEscapeAnalyzer.cpp Wed Aug 25 10:31:45 2010 -0700 34.3 @@ -92,11 +92,11 @@ 34.4 empty_map.clear(); 34.5 } 34.6 34.7 - ArgumentMap raw_pop() { assert(_stack_height > 0, "stack underflow"); return _stack[--_stack_height]; } 34.8 + ArgumentMap raw_pop() { guarantee(_stack_height > 0, "stack underflow"); return _stack[--_stack_height]; } 34.9 ArgumentMap apop() { return raw_pop(); } 34.10 void spop() { raw_pop(); } 34.11 void lpop() { spop(); spop(); } 34.12 - void raw_push(ArgumentMap i) { assert(_stack_height < _max_stack, "stack overflow"); _stack[_stack_height++] = i; } 34.13 + void raw_push(ArgumentMap i) { guarantee(_stack_height < _max_stack, "stack overflow"); _stack[_stack_height++] = i; } 34.14 void apush(ArgumentMap i) { raw_push(i); } 34.15 void spush() { raw_push(empty_map); } 34.16 void lpush() { spush(); spush(); } 34.17 @@ -365,12 +365,19 @@ 34.18 case Bytecodes::_ldc: 34.19 case Bytecodes::_ldc_w: 34.20 case Bytecodes::_ldc2_w: 34.21 - if (type2size[s.get_constant().basic_type()] == 1) { 34.22 + { 34.23 + // Avoid calling get_constant() which will try to allocate 34.24 + // unloaded constant. We need only constant's type. 34.25 + int index = s.get_constant_pool_index(); 34.26 + constantTag tag = s.get_constant_pool_tag(index); 34.27 + if (tag.is_long() || tag.is_double()) { 34.28 + // Only longs and doubles use 2 stack slots. 34.29 + state.lpush(); 34.30 + } else { 34.31 state.spush(); 34.32 - } else { 34.33 - state.lpush(); 34.34 } 34.35 break; 34.36 + } 34.37 case Bytecodes::_aload: 34.38 state.apush(state._vars[s.get_index()]); 34.39 break;
35.1 --- a/src/share/vm/ci/ciMethod.cpp Mon Aug 23 08:44:03 2010 -0700 35.2 +++ b/src/share/vm/ci/ciMethod.cpp Wed Aug 25 10:31:45 2010 -0700 35.3 @@ -922,12 +922,12 @@ 35.4 35.5 // ------------------------------------------------------------------ 35.6 // ciMethod::instructions_size 35.7 -// This is a rough metric for "fat" methods, compared 35.8 -// before inlining with InlineSmallCode. 35.9 -// The CodeBlob::instructions_size accessor includes 35.10 -// junk like exception handler, stubs, and constant table, 35.11 -// which are not highly relevant to an inlined method. 35.12 -// So we use the more specific accessor nmethod::code_size. 35.13 +// 35.14 +// This is a rough metric for "fat" methods, compared before inlining 35.15 +// with InlineSmallCode. The CodeBlob::code_size accessor includes 35.16 +// junk like exception handler, stubs, and constant table, which are 35.17 +// not highly relevant to an inlined method. So we use the more 35.18 +// specific accessor nmethod::insts_size. 35.19 int ciMethod::instructions_size() { 35.20 GUARDED_VM_ENTRY( 35.21 nmethod* code = get_methodOop()->code(); 35.22 @@ -939,7 +939,7 @@ 35.23 (TieredCompilation && code->compiler() != NULL && code->compiler()->is_c1())) { 35.24 return 0; 35.25 } 35.26 - return code->code_end() - code->verified_entry_point(); 35.27 + return code->insts_end() - code->verified_entry_point(); 35.28 ) 35.29 } 35.30
36.1 --- a/src/share/vm/code/codeBlob.cpp Mon Aug 23 08:44:03 2010 -0700 36.2 +++ b/src/share/vm/code/codeBlob.cpp Wed Aug 25 10:31:45 2010 -0700 36.3 @@ -39,7 +39,7 @@ 36.4 size += round_to(cb->total_relocation_size(), oopSize); 36.5 // align the size to CodeEntryAlignment 36.6 size = align_code_offset(size); 36.7 - size += round_to(cb->total_code_size(), oopSize); 36.8 + size += round_to(cb->total_content_size(), oopSize); 36.9 size += round_to(cb->total_oop_size(), oopSize); 36.10 return size; 36.11 } 36.12 @@ -47,8 +47,8 @@ 36.13 36.14 // Creates a simple CodeBlob. Sets up the size of the different regions. 36.15 CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) { 36.16 - assert(size == round_to(size, oopSize), "unaligned size"); 36.17 - assert(locs_size == round_to(locs_size, oopSize), "unaligned size"); 36.18 + assert(size == round_to(size, oopSize), "unaligned size"); 36.19 + assert(locs_size == round_to(locs_size, oopSize), "unaligned size"); 36.20 assert(header_size == round_to(header_size, oopSize), "unaligned size"); 36.21 assert(!UseRelocIndex, "no space allocated for reloc index yet"); 36.22 36.23 @@ -64,7 +64,8 @@ 36.24 _frame_complete_offset = frame_complete; 36.25 _header_size = header_size; 36.26 _relocation_size = locs_size; 36.27 - _instructions_offset = align_code_offset(header_size + locs_size); 36.28 + _content_offset = align_code_offset(header_size + _relocation_size); 36.29 + _code_offset = _content_offset; 36.30 _data_offset = size; 36.31 _frame_size = 0; 36.32 set_oop_maps(NULL); 36.33 @@ -82,7 +83,7 @@ 36.34 int frame_size, 36.35 OopMapSet* oop_maps 36.36 ) { 36.37 - assert(size == round_to(size, oopSize), "unaligned size"); 36.38 + assert(size == round_to(size, oopSize), "unaligned size"); 36.39 assert(header_size == round_to(header_size, oopSize), "unaligned size"); 36.40 36.41 _name = name; 36.42 @@ -90,8 +91,9 @@ 36.43 _frame_complete_offset = frame_complete; 36.44 _header_size = header_size; 36.45 _relocation_size = round_to(cb->total_relocation_size(), oopSize); 36.46 - _instructions_offset = align_code_offset(header_size + _relocation_size); 36.47 - _data_offset = _instructions_offset + round_to(cb->total_code_size(), oopSize); 36.48 + _content_offset = align_code_offset(header_size + _relocation_size); 36.49 + _code_offset = _content_offset + cb->total_offset_of(cb->insts()->start()); 36.50 + _data_offset = _content_offset + round_to(cb->total_content_size(), oopSize); 36.51 assert(_data_offset <= size, "codeBlob is too small"); 36.52 36.53 cb->copy_code_and_locs_to(this); 36.54 @@ -127,9 +129,8 @@ 36.55 36.56 36.57 OopMap* CodeBlob::oop_map_for_return_address(address return_address) { 36.58 - address pc = return_address ; 36.59 - assert (oop_maps() != NULL, "nope"); 36.60 - return oop_maps()->find_map_at_offset ((intptr_t) pc - (intptr_t) instructions_begin()); 36.61 + assert(oop_maps() != NULL, "nope"); 36.62 + return oop_maps()->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin()); 36.63 } 36.64 36.65 36.66 @@ -284,12 +285,12 @@ 36.67 jio_snprintf(stub_id, sizeof(stub_id), "RuntimeStub - %s", stub_name); 36.68 if (PrintStubCode) { 36.69 tty->print_cr("Decoding %s " INTPTR_FORMAT, stub_id, stub); 36.70 - Disassembler::decode(stub->instructions_begin(), stub->instructions_end()); 36.71 + Disassembler::decode(stub->code_begin(), stub->code_end()); 36.72 } 36.73 - Forte::register_stub(stub_id, stub->instructions_begin(), stub->instructions_end()); 36.74 + Forte::register_stub(stub_id, stub->code_begin(), stub->code_end()); 36.75 36.76 if (JvmtiExport::should_post_dynamic_code_generated()) { 36.77 - JvmtiExport::post_dynamic_code_generated(stub_name, stub->instructions_begin(), stub->instructions_end()); 36.78 + JvmtiExport::post_dynamic_code_generated(stub_name, stub->code_begin(), stub->code_end()); 36.79 } 36.80 } 36.81 36.82 @@ -355,17 +356,15 @@ 36.83 // Do not hold the CodeCache lock during name formatting. 36.84 if (blob != NULL) { 36.85 char blob_id[256]; 36.86 - jio_snprintf(blob_id, sizeof(blob_id), "DeoptimizationBlob@" PTR_FORMAT, blob->instructions_begin()); 36.87 + jio_snprintf(blob_id, sizeof(blob_id), "DeoptimizationBlob@" PTR_FORMAT, blob->code_begin()); 36.88 if (PrintStubCode) { 36.89 tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); 36.90 - Disassembler::decode(blob->instructions_begin(), blob->instructions_end()); 36.91 + Disassembler::decode(blob->code_begin(), blob->code_end()); 36.92 } 36.93 - Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); 36.94 + Forte::register_stub(blob_id, blob->code_begin(), blob->code_end()); 36.95 36.96 if (JvmtiExport::should_post_dynamic_code_generated()) { 36.97 - JvmtiExport::post_dynamic_code_generated("DeoptimizationBlob", 36.98 - blob->instructions_begin(), 36.99 - blob->instructions_end()); 36.100 + JvmtiExport::post_dynamic_code_generated("DeoptimizationBlob", blob->code_begin(), blob->code_end()); 36.101 } 36.102 } 36.103 36.104 @@ -412,17 +411,15 @@ 36.105 // Do not hold the CodeCache lock during name formatting. 36.106 if (blob != NULL) { 36.107 char blob_id[256]; 36.108 - jio_snprintf(blob_id, sizeof(blob_id), "UncommonTrapBlob@" PTR_FORMAT, blob->instructions_begin()); 36.109 + jio_snprintf(blob_id, sizeof(blob_id), "UncommonTrapBlob@" PTR_FORMAT, blob->code_begin()); 36.110 if (PrintStubCode) { 36.111 tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); 36.112 - Disassembler::decode(blob->instructions_begin(), blob->instructions_end()); 36.113 + Disassembler::decode(blob->code_begin(), blob->code_end()); 36.114 } 36.115 - Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); 36.116 + Forte::register_stub(blob_id, blob->code_begin(), blob->code_end()); 36.117 36.118 if (JvmtiExport::should_post_dynamic_code_generated()) { 36.119 - JvmtiExport::post_dynamic_code_generated("UncommonTrapBlob", 36.120 - blob->instructions_begin(), 36.121 - blob->instructions_end()); 36.122 + JvmtiExport::post_dynamic_code_generated("UncommonTrapBlob", blob->code_begin(), blob->code_end()); 36.123 } 36.124 } 36.125 36.126 @@ -471,17 +468,15 @@ 36.127 // We do not need to hold the CodeCache lock during name formatting 36.128 if (blob != NULL) { 36.129 char blob_id[256]; 36.130 - jio_snprintf(blob_id, sizeof(blob_id), "ExceptionBlob@" PTR_FORMAT, blob->instructions_begin()); 36.131 + jio_snprintf(blob_id, sizeof(blob_id), "ExceptionBlob@" PTR_FORMAT, blob->code_begin()); 36.132 if (PrintStubCode) { 36.133 tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); 36.134 - Disassembler::decode(blob->instructions_begin(), blob->instructions_end()); 36.135 + Disassembler::decode(blob->code_begin(), blob->code_end()); 36.136 } 36.137 - Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); 36.138 + Forte::register_stub(blob_id, blob->code_begin(), blob->code_end()); 36.139 36.140 if (JvmtiExport::should_post_dynamic_code_generated()) { 36.141 - JvmtiExport::post_dynamic_code_generated("ExceptionBlob", 36.142 - blob->instructions_begin(), 36.143 - blob->instructions_end()); 36.144 + JvmtiExport::post_dynamic_code_generated("ExceptionBlob", blob->code_begin(), blob->code_end()); 36.145 } 36.146 } 36.147 36.148 @@ -529,17 +524,15 @@ 36.149 // We do not need to hold the CodeCache lock during name formatting. 36.150 if (blob != NULL) { 36.151 char blob_id[256]; 36.152 - jio_snprintf(blob_id, sizeof(blob_id), "SafepointBlob@" PTR_FORMAT, blob->instructions_begin()); 36.153 + jio_snprintf(blob_id, sizeof(blob_id), "SafepointBlob@" PTR_FORMAT, blob->code_begin()); 36.154 if (PrintStubCode) { 36.155 tty->print_cr("Decoding %s " INTPTR_FORMAT, blob_id, blob); 36.156 - Disassembler::decode(blob->instructions_begin(), blob->instructions_end()); 36.157 + Disassembler::decode(blob->code_begin(), blob->code_end()); 36.158 } 36.159 - Forte::register_stub(blob_id, blob->instructions_begin(), blob->instructions_end()); 36.160 + Forte::register_stub(blob_id, blob->code_begin(), blob->code_end()); 36.161 36.162 if (JvmtiExport::should_post_dynamic_code_generated()) { 36.163 - JvmtiExport::post_dynamic_code_generated("SafepointBlob", 36.164 - blob->instructions_begin(), 36.165 - blob->instructions_end()); 36.166 + JvmtiExport::post_dynamic_code_generated("SafepointBlob", blob->code_begin(), blob->code_end()); 36.167 } 36.168 } 36.169
37.1 --- a/src/share/vm/code/codeBlob.hpp Mon Aug 23 08:44:03 2010 -0700 37.2 +++ b/src/share/vm/code/codeBlob.hpp Wed Aug 25 10:31:45 2010 -0700 37.3 @@ -35,7 +35,8 @@ 37.4 // Layout: 37.5 // - header 37.6 // - relocation 37.7 -// - instruction space 37.8 +// - content space 37.9 +// - instruction space 37.10 // - data space 37.11 class DeoptimizationBlob; 37.12 37.13 @@ -48,7 +49,8 @@ 37.14 int _size; // total size of CodeBlob in bytes 37.15 int _header_size; // size of header (depends on subclass) 37.16 int _relocation_size; // size of relocation 37.17 - int _instructions_offset; // offset to where instructions region begins 37.18 + int _content_offset; // offset to where content region begins (this includes consts, insts, stubs) 37.19 + int _code_offset; // offset to where instructions region begins (this includes insts, stubs) 37.20 int _frame_complete_offset; // instruction offsets in [0.._frame_complete_offset) have 37.21 // not finished setting up their frame. Beware of pc's in 37.22 // that range. There is a similar range(s) on returns 37.23 @@ -106,31 +108,36 @@ 37.24 address header_end() const { return ((address) this) + _header_size; }; 37.25 relocInfo* relocation_begin() const { return (relocInfo*) header_end(); }; 37.26 relocInfo* relocation_end() const { return (relocInfo*)(header_end() + _relocation_size); } 37.27 - address instructions_begin() const { return (address) header_begin() + _instructions_offset; } 37.28 - address instructions_end() const { return (address) header_begin() + _data_offset; } 37.29 + address content_begin() const { return (address) header_begin() + _content_offset; } 37.30 + address content_end() const { return (address) header_begin() + _data_offset; } 37.31 + address code_begin() const { return (address) header_begin() + _code_offset; } 37.32 + address code_end() const { return (address) header_begin() + _data_offset; } 37.33 address data_begin() const { return (address) header_begin() + _data_offset; } 37.34 address data_end() const { return (address) header_begin() + _size; } 37.35 37.36 // Offsets 37.37 int relocation_offset() const { return _header_size; } 37.38 - int instructions_offset() const { return _instructions_offset; } 37.39 + int content_offset() const { return _content_offset; } 37.40 + int code_offset() const { return _code_offset; } 37.41 int data_offset() const { return _data_offset; } 37.42 37.43 // Sizes 37.44 int size() const { return _size; } 37.45 int header_size() const { return _header_size; } 37.46 int relocation_size() const { return (address) relocation_end() - (address) relocation_begin(); } 37.47 - int instructions_size() const { return instructions_end() - instructions_begin(); } 37.48 - int data_size() const { return data_end() - data_begin(); } 37.49 + int content_size() const { return content_end() - content_begin(); } 37.50 + int code_size() const { return code_end() - code_begin(); } 37.51 + int data_size() const { return data_end() - data_begin(); } 37.52 37.53 // Containment 37.54 - bool blob_contains(address addr) const { return header_begin() <= addr && addr < data_end(); } 37.55 + bool blob_contains(address addr) const { return header_begin() <= addr && addr < data_end(); } 37.56 bool relocation_contains(relocInfo* addr) const{ return relocation_begin() <= addr && addr < relocation_end(); } 37.57 - bool instructions_contains(address addr) const { return instructions_begin() <= addr && addr < instructions_end(); } 37.58 - bool data_contains(address addr) const { return data_begin() <= addr && addr < data_end(); } 37.59 - bool contains(address addr) const { return instructions_contains(addr); } 37.60 - bool is_frame_complete_at(address addr) const { return instructions_contains(addr) && 37.61 - addr >= instructions_begin() + _frame_complete_offset; } 37.62 + bool content_contains(address addr) const { return content_begin() <= addr && addr < content_end(); } 37.63 + bool code_contains(address addr) const { return code_begin() <= addr && addr < code_end(); } 37.64 + bool data_contains(address addr) const { return data_begin() <= addr && addr < data_end(); } 37.65 + bool contains(address addr) const { return content_contains(addr); } 37.66 + bool is_frame_complete_at(address addr) const { return code_contains(addr) && 37.67 + addr >= code_begin() + _frame_complete_offset; } 37.68 37.69 // CodeCache support: really only used by the nmethods, but in order to get 37.70 // asserts and certain bookkeeping to work in the CodeCache they are defined 37.71 @@ -169,7 +176,7 @@ 37.72 37.73 // Print the comment associated with offset on stream, if there is one 37.74 virtual void print_block_comment(outputStream* stream, address block_begin) { 37.75 - intptr_t offset = (intptr_t)(block_begin - instructions_begin()); 37.76 + intptr_t offset = (intptr_t)(block_begin - code_begin()); 37.77 _comments.print_block_comment(stream, offset); 37.78 } 37.79 37.80 @@ -286,7 +293,7 @@ 37.81 // GC support 37.82 bool caller_must_gc_arguments(JavaThread* thread) const { return _caller_must_gc_arguments; } 37.83 37.84 - address entry_point() { return instructions_begin(); } 37.85 + address entry_point() { return code_begin(); } 37.86 37.87 // GC/Verification support 37.88 void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ } 37.89 @@ -313,13 +320,15 @@ 37.90 OopMapSet* oop_maps 37.91 ) 37.92 : CodeBlob(name, cb, header_size, size, CodeOffsets::frame_never_safe, frame_size, oop_maps) 37.93 - {}; 37.94 + {}; 37.95 37.96 - bool is_alive() const { return true; } 37.97 + address entry_point() { return code_begin(); } 37.98 37.99 - void verify(); // does nothing 37.100 - void print_on(outputStream* st) const; 37.101 - void print_value_on(outputStream* st) const; 37.102 + bool is_alive() const { return true; } 37.103 + 37.104 + void verify(); // does nothing 37.105 + void print_on(outputStream* st) const; 37.106 + void print_value_on(outputStream* st) const; 37.107 }; 37.108 37.109 37.110 @@ -376,9 +385,9 @@ 37.111 // Printing 37.112 void print_value_on(outputStream* st) const; 37.113 37.114 - address unpack() const { return instructions_begin() + _unpack_offset; } 37.115 - address unpack_with_exception() const { return instructions_begin() + _unpack_with_exception; } 37.116 - address unpack_with_reexecution() const { return instructions_begin() + _unpack_with_reexecution; } 37.117 + address unpack() const { return code_begin() + _unpack_offset; } 37.118 + address unpack_with_exception() const { return code_begin() + _unpack_with_exception; } 37.119 + address unpack_with_reexecution() const { return code_begin() + _unpack_with_reexecution; } 37.120 37.121 // Alternate entry point for C1 where the exception and issuing pc 37.122 // are in JavaThread::_exception_oop and JavaThread::_exception_pc 37.123 @@ -387,9 +396,9 @@ 37.124 // there may be live values in those registers during deopt. 37.125 void set_unpack_with_exception_in_tls_offset(int offset) { 37.126 _unpack_with_exception_in_tls = offset; 37.127 - assert(contains(instructions_begin() + _unpack_with_exception_in_tls), "must be PC inside codeblob"); 37.128 + assert(code_contains(code_begin() + _unpack_with_exception_in_tls), "must be PC inside codeblob"); 37.129 } 37.130 - address unpack_with_exception_in_tls() const { return instructions_begin() + _unpack_with_exception_in_tls; } 37.131 + address unpack_with_exception_in_tls() const { return code_begin() + _unpack_with_exception_in_tls; } 37.132 }; 37.133 37.134
38.1 --- a/src/share/vm/code/codeCache.cpp Mon Aug 23 08:44:03 2010 -0700 38.2 +++ b/src/share/vm/code/codeCache.cpp Wed Aug 25 10:31:45 2010 -0700 38.3 @@ -76,14 +76,14 @@ 38.4 relocation_size += cb->relocation_size(); 38.5 if (cb->is_nmethod()) { 38.6 nmethod* nm = cb->as_nmethod_or_null(); 38.7 - code_size += nm->code_size(); 38.8 + code_size += nm->insts_size(); 38.9 stub_size += nm->stub_size(); 38.10 38.11 scopes_oop_size += nm->oops_size(); 38.12 scopes_data_size += nm->scopes_data_size(); 38.13 scopes_pcs_size += nm->scopes_pcs_size(); 38.14 } else { 38.15 - code_size += cb->instructions_size(); 38.16 + code_size += cb->code_size(); 38.17 } 38.18 } 38.19 }; 38.20 @@ -210,7 +210,7 @@ 38.21 } 38.22 38.23 // flush the hardware I-cache 38.24 - ICache::invalidate_range(cb->instructions_begin(), cb->instructions_size()); 38.25 + ICache::invalidate_range(cb->content_begin(), cb->content_size()); 38.26 } 38.27 38.28 38.29 @@ -804,8 +804,8 @@ 38.30 38.31 if(nm->method() != NULL && nm->is_java_method()) { 38.32 nmethodJava++; 38.33 - if(nm->code_size() > maxCodeSize) { 38.34 - maxCodeSize = nm->code_size(); 38.35 + if (nm->insts_size() > maxCodeSize) { 38.36 + maxCodeSize = nm->insts_size(); 38.37 } 38.38 } 38.39 } else if (cb->is_runtime_stub()) { 38.40 @@ -830,7 +830,7 @@ 38.41 if (cb->is_nmethod()) { 38.42 nmethod* nm = (nmethod*)cb; 38.43 if(nm->is_java_method()) { 38.44 - buckets[nm->code_size() / bucketSize]++; 38.45 + buckets[nm->insts_size() / bucketSize]++; 38.46 } 38.47 } 38.48 } 38.49 @@ -896,11 +896,11 @@ 38.50 FOR_ALL_BLOBS(p) { 38.51 if (p->is_alive()) { 38.52 number_of_blobs++; 38.53 - code_size += p->instructions_size(); 38.54 + code_size += p->code_size(); 38.55 OopMapSet* set = p->oop_maps(); 38.56 if (set != NULL) { 38.57 number_of_oop_maps += set->size(); 38.58 - map_size += set->heap_size(); 38.59 + map_size += set->heap_size(); 38.60 } 38.61 } 38.62 }
39.1 --- a/src/share/vm/code/exceptionHandlerTable.cpp Mon Aug 23 08:44:03 2010 -0700 39.2 +++ b/src/share/vm/code/exceptionHandlerTable.cpp Wed Aug 25 10:31:45 2010 -0700 39.3 @@ -1,5 +1,5 @@ 39.4 /* 39.5 - * Copyright (c) 1998, 2005, Oracle and/or its affiliates. All rights reserved. 39.6 + * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. 39.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 39.8 * 39.9 * This code is free software; you can redistribute it and/or modify it 39.10 @@ -219,8 +219,8 @@ 39.11 39.12 void ImplicitExceptionTable::verify(nmethod *nm) const { 39.13 for (uint i = 0; i < len(); i++) { 39.14 - if ((*adr(i) > (unsigned int)nm->code_size()) || 39.15 - (*(adr(i)+1) > (unsigned int)nm->code_size())) 39.16 + if ((*adr(i) > (unsigned int)nm->insts_size()) || 39.17 + (*(adr(i)+1) > (unsigned int)nm->insts_size())) 39.18 fatal(err_msg("Invalid offset in ImplicitExceptionTable at " PTR_FORMAT, _data)); 39.19 } 39.20 }
40.1 --- a/src/share/vm/code/nmethod.cpp Mon Aug 23 08:44:03 2010 -0700 40.2 +++ b/src/share/vm/code/nmethod.cpp Wed Aug 25 10:31:45 2010 -0700 40.3 @@ -87,7 +87,7 @@ 40.4 int nmethod_count; 40.5 int total_size; 40.6 int relocation_size; 40.7 - int code_size; 40.8 + int insts_size; 40.9 int stub_size; 40.10 int consts_size; 40.11 int scopes_data_size; 40.12 @@ -101,7 +101,7 @@ 40.13 nmethod_count += 1; 40.14 total_size += nm->size(); 40.15 relocation_size += nm->relocation_size(); 40.16 - code_size += nm->code_size(); 40.17 + insts_size += nm->insts_size(); 40.18 stub_size += nm->stub_size(); 40.19 consts_size += nm->consts_size(); 40.20 oops_size += nm->oops_size(); 40.21 @@ -116,7 +116,7 @@ 40.22 tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count); 40.23 if (total_size != 0) tty->print_cr(" total in heap = %d", total_size); 40.24 if (relocation_size != 0) tty->print_cr(" relocation = %d", relocation_size); 40.25 - if (code_size != 0) tty->print_cr(" main code = %d", code_size); 40.26 + if (insts_size != 0) tty->print_cr(" main code = %d", insts_size); 40.27 if (stub_size != 0) tty->print_cr(" stub code = %d", stub_size); 40.28 if (consts_size != 0) tty->print_cr(" constants = %d", consts_size); 40.29 if (oops_size != 0) tty->print_cr(" oops = %d", oops_size); 40.30 @@ -130,13 +130,13 @@ 40.31 int native_nmethod_count; 40.32 int native_total_size; 40.33 int native_relocation_size; 40.34 - int native_code_size; 40.35 + int native_insts_size; 40.36 int native_oops_size; 40.37 void note_native_nmethod(nmethod* nm) { 40.38 native_nmethod_count += 1; 40.39 native_total_size += nm->size(); 40.40 native_relocation_size += nm->relocation_size(); 40.41 - native_code_size += nm->code_size(); 40.42 + native_insts_size += nm->insts_size(); 40.43 native_oops_size += nm->oops_size(); 40.44 } 40.45 void print_native_nmethod_stats() { 40.46 @@ -144,7 +144,7 @@ 40.47 tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count); 40.48 if (native_total_size != 0) tty->print_cr(" N. total size = %d", native_total_size); 40.49 if (native_relocation_size != 0) tty->print_cr(" N. relocation = %d", native_relocation_size); 40.50 - if (native_code_size != 0) tty->print_cr(" N. main code = %d", native_code_size); 40.51 + if (native_insts_size != 0) tty->print_cr(" N. main code = %d", native_insts_size); 40.52 if (native_oops_size != 0) tty->print_cr(" N. oops = %d", native_oops_size); 40.53 } 40.54 40.55 @@ -404,7 +404,7 @@ 40.56 40.57 int nmethod::total_size() const { 40.58 return 40.59 - code_size() + 40.60 + insts_size() + 40.61 stub_size() + 40.62 consts_size() + 40.63 scopes_data_size() + 40.64 @@ -618,8 +618,8 @@ 40.65 _deoptimize_mh_offset = 0; 40.66 _orig_pc_offset = 0; 40.67 40.68 + _consts_offset = data_offset(); 40.69 _stub_offset = data_offset(); 40.70 - _consts_offset = data_offset(); 40.71 _oops_offset = data_offset(); 40.72 _scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); 40.73 _scopes_pcs_offset = _scopes_data_offset; 40.74 @@ -629,8 +629,8 @@ 40.75 _nmethod_end_offset = _nul_chk_table_offset; 40.76 _compile_id = 0; // default 40.77 _comp_level = CompLevel_none; 40.78 - _entry_point = instructions_begin(); 40.79 - _verified_entry_point = instructions_begin() + offsets->value(CodeOffsets::Verified_Entry); 40.80 + _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); 40.81 + _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); 40.82 _osr_entry_point = NULL; 40.83 _exception_cache = NULL; 40.84 _pc_desc_cache.reset_to(NULL); 40.85 @@ -696,8 +696,8 @@ 40.86 _unwind_handler_offset = -1; 40.87 _trap_offset = offsets->value(CodeOffsets::Dtrace_trap); 40.88 _orig_pc_offset = 0; 40.89 + _consts_offset = data_offset(); 40.90 _stub_offset = data_offset(); 40.91 - _consts_offset = data_offset(); 40.92 _oops_offset = data_offset(); 40.93 _scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size(), oopSize); 40.94 _scopes_pcs_offset = _scopes_data_offset; 40.95 @@ -707,8 +707,8 @@ 40.96 _nmethod_end_offset = _nul_chk_table_offset; 40.97 _compile_id = 0; // default 40.98 _comp_level = CompLevel_none; 40.99 - _entry_point = instructions_begin(); 40.100 - _verified_entry_point = instructions_begin() + offsets->value(CodeOffsets::Verified_Entry); 40.101 + _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); 40.102 + _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); 40.103 _osr_entry_point = NULL; 40.104 _exception_cache = NULL; 40.105 _pc_desc_cache.reset_to(NULL); 40.106 @@ -787,18 +787,21 @@ 40.107 _comp_level = comp_level; 40.108 _compiler = compiler; 40.109 _orig_pc_offset = orig_pc_offset; 40.110 - _stub_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->stubs()->start()); 40.111 + 40.112 + // Section offsets 40.113 + _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts()->start()); 40.114 + _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs()->start()); 40.115 40.116 // Exception handler and deopt handler are in the stub section 40.117 - _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions); 40.118 - _deoptimize_offset = _stub_offset + offsets->value(CodeOffsets::Deopt); 40.119 - _deoptimize_mh_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH); 40.120 + _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions); 40.121 + _deoptimize_offset = _stub_offset + offsets->value(CodeOffsets::Deopt); 40.122 + _deoptimize_mh_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH); 40.123 if (offsets->value(CodeOffsets::UnwindHandler) != -1) { 40.124 - _unwind_handler_offset = instructions_offset() + offsets->value(CodeOffsets::UnwindHandler); 40.125 + _unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler); 40.126 } else { 40.127 - _unwind_handler_offset = -1; 40.128 + _unwind_handler_offset = -1; 40.129 } 40.130 - _consts_offset = instructions_offset() + code_buffer->total_offset_of(code_buffer->consts()->start()); 40.131 + 40.132 _oops_offset = data_offset(); 40.133 _scopes_data_offset = _oops_offset + round_to(code_buffer->total_oop_size (), oopSize); 40.134 _scopes_pcs_offset = _scopes_data_offset + round_to(debug_info->data_size (), oopSize); 40.135 @@ -807,9 +810,9 @@ 40.136 _nul_chk_table_offset = _handler_table_offset + round_to(handler_table->size_in_bytes(), oopSize); 40.137 _nmethod_end_offset = _nul_chk_table_offset + round_to(nul_chk_table->size_in_bytes(), oopSize); 40.138 40.139 - _entry_point = instructions_begin(); 40.140 - _verified_entry_point = instructions_begin() + offsets->value(CodeOffsets::Verified_Entry); 40.141 - _osr_entry_point = instructions_begin() + offsets->value(CodeOffsets::OSR_Entry); 40.142 + _entry_point = code_begin() + offsets->value(CodeOffsets::Entry); 40.143 + _verified_entry_point = code_begin() + offsets->value(CodeOffsets::Verified_Entry); 40.144 + _osr_entry_point = code_begin() + offsets->value(CodeOffsets::OSR_Entry); 40.145 _exception_cache = NULL; 40.146 _pc_desc_cache.reset_to(scopes_pcs_begin()); 40.147 40.148 @@ -878,12 +881,11 @@ 40.149 HandleMark hm; 40.150 xtty->begin_elem("nmethod"); 40.151 log_identity(xtty); 40.152 - xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", 40.153 - instructions_begin(), size()); 40.154 + xtty->print(" entry='" INTPTR_FORMAT "' size='%d'", code_begin(), size()); 40.155 xtty->print(" address='" INTPTR_FORMAT "'", (intptr_t) this); 40.156 40.157 LOG_OFFSET(xtty, relocation); 40.158 - LOG_OFFSET(xtty, code); 40.159 + LOG_OFFSET(xtty, insts); 40.160 LOG_OFFSET(xtty, stub); 40.161 LOG_OFFSET(xtty, consts); 40.162 LOG_OFFSET(xtty, scopes_data); 40.163 @@ -1460,7 +1462,7 @@ 40.164 moop->name()->utf8_length(), 40.165 moop->signature()->bytes(), 40.166 moop->signature()->utf8_length(), 40.167 - code_begin(), code_size()); 40.168 + insts_begin(), insts_size()); 40.169 40.170 if (JvmtiExport::should_post_compiled_method_load() || 40.171 JvmtiExport::should_post_compiled_method_unload()) { 40.172 @@ -1502,7 +1504,7 @@ 40.173 if (_jmethod_id != NULL && JvmtiExport::should_post_compiled_method_unload()) { 40.174 assert(!unload_reported(), "already unloaded"); 40.175 HandleMark hm; 40.176 - JvmtiExport::post_compiled_method_unload(_jmethod_id, code_begin()); 40.177 + JvmtiExport::post_compiled_method_unload(_jmethod_id, insts_begin()); 40.178 } 40.179 40.180 // The JVMTI CompiledMethodUnload event can be enabled or disabled at 40.181 @@ -1854,7 +1856,7 @@ 40.182 // Adjust the final sentinel downward. 40.183 PcDesc* last_pc = &scopes_pcs_begin()[count-1]; 40.184 assert(last_pc->pc_offset() == PcDesc::upper_offset_limit, "sanity"); 40.185 - last_pc->set_pc_offset(instructions_size() + 1); 40.186 + last_pc->set_pc_offset(content_size() + 1); 40.187 for (; last_pc + 1 < scopes_pcs_end(); last_pc += 1) { 40.188 // Fill any rounding gaps with copies of the last record. 40.189 last_pc[1] = last_pc[0]; 40.190 @@ -1894,7 +1896,7 @@ 40.191 40.192 // Finds a PcDesc with real-pc equal to "pc" 40.193 PcDesc* nmethod::find_pc_desc_internal(address pc, bool approximate) { 40.194 - address base_address = instructions_begin(); 40.195 + address base_address = code_begin(); 40.196 if ((pc < base_address) || 40.197 (pc - base_address) >= (ptrdiff_t) PcDesc::upper_offset_limit) { 40.198 return NULL; // PC is wildly out of range 40.199 @@ -2042,7 +2044,7 @@ 40.200 40.201 40.202 bool nmethod::is_patchable_at(address instr_addr) { 40.203 - assert (code_contains(instr_addr), "wrong nmethod used"); 40.204 + assert(insts_contains(instr_addr), "wrong nmethod used"); 40.205 if (is_zombie()) { 40.206 // a zombie may never be patched 40.207 return false; 40.208 @@ -2054,7 +2056,7 @@ 40.209 address nmethod::continuation_for_implicit_exception(address pc) { 40.210 // Exception happened outside inline-cache check code => we are inside 40.211 // an active nmethod => use cpc to determine a return address 40.212 - int exception_offset = pc - instructions_begin(); 40.213 + int exception_offset = pc - code_begin(); 40.214 int cont_offset = ImplicitExceptionTable(this).at( exception_offset ); 40.215 #ifdef ASSERT 40.216 if (cont_offset == 0) { 40.217 @@ -2075,7 +2077,7 @@ 40.218 // Let the normal error handling report the exception 40.219 return NULL; 40.220 } 40.221 - return instructions_begin() + cont_offset; 40.222 + return code_begin() + cont_offset; 40.223 } 40.224 40.225 40.226 @@ -2334,10 +2336,10 @@ 40.227 relocation_begin(), 40.228 relocation_end(), 40.229 relocation_size()); 40.230 - if (code_size () > 0) tty->print_cr(" main code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 40.231 - code_begin(), 40.232 - code_end(), 40.233 - code_size()); 40.234 + if (insts_size () > 0) tty->print_cr(" main code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 40.235 + insts_begin(), 40.236 + insts_end(), 40.237 + insts_size()); 40.238 if (stub_size () > 0) tty->print_cr(" stub code [" INTPTR_FORMAT "," INTPTR_FORMAT "] = %d", 40.239 stub_begin(), 40.240 stub_end(), 40.241 @@ -2607,7 +2609,7 @@ 40.242 // First, find an oopmap in (begin, end]. 40.243 // We use the odd half-closed interval so that oop maps and scope descs 40.244 // which are tied to the byte after a call are printed with the call itself. 40.245 - address base = instructions_begin(); 40.246 + address base = code_begin(); 40.247 OopMapSet* oms = oop_maps(); 40.248 if (oms != NULL) { 40.249 for (int i = 0, imax = oms->size(); i < imax; i++) { 40.250 @@ -2695,10 +2697,10 @@ 40.251 st->move_to(column); 40.252 st->print("; {%s}", str); 40.253 } 40.254 - int cont_offset = ImplicitExceptionTable(this).at(begin - instructions_begin()); 40.255 + int cont_offset = ImplicitExceptionTable(this).at(begin - code_begin()); 40.256 if (cont_offset != 0) { 40.257 st->move_to(column); 40.258 - st->print("; implicit exception: dispatches to " INTPTR_FORMAT, instructions_begin() + cont_offset); 40.259 + st->print("; implicit exception: dispatches to " INTPTR_FORMAT, code_begin() + cont_offset); 40.260 } 40.261 40.262 } 40.263 @@ -2732,7 +2734,7 @@ 40.264 } 40.265 40.266 void nmethod::print_nul_chk_table() { 40.267 - ImplicitExceptionTable(this).print(instructions_begin()); 40.268 + ImplicitExceptionTable(this).print(code_begin()); 40.269 } 40.270 40.271 void nmethod::print_statistics() {
41.1 --- a/src/share/vm/code/nmethod.hpp Mon Aug 23 08:44:03 2010 -0700 41.2 +++ b/src/share/vm/code/nmethod.hpp Wed Aug 25 10:31:45 2010 -0700 41.3 @@ -312,7 +312,7 @@ 41.4 int frame_size); 41.5 41.6 int trap_offset() const { return _trap_offset; } 41.7 - address trap_address() const { return code_begin() + _trap_offset; } 41.8 + address trap_address() const { return insts_begin() + _trap_offset; } 41.9 41.10 #endif // def HAVE_DTRACE_H 41.11 41.12 @@ -336,8 +336,8 @@ 41.13 bool is_compiled_by_shark() const; 41.14 41.15 // boundaries for different parts 41.16 - address code_begin () const { return _entry_point; } 41.17 - address code_end () const { return header_begin() + _stub_offset ; } 41.18 + address insts_begin () const { return code_begin(); } 41.19 + address insts_end () const { return header_begin() + _stub_offset ; } 41.20 address exception_begin () const { return header_begin() + _exception_offset ; } 41.21 address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; } 41.22 address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; } 41.23 @@ -361,7 +361,7 @@ 41.24 address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; } 41.25 41.26 // Sizes 41.27 - int code_size () const { return code_end () - code_begin (); } 41.28 + int insts_size () const { return insts_end () - insts_begin (); } 41.29 int stub_size () const { return stub_end () - stub_begin (); } 41.30 int consts_size () const { return consts_end () - consts_begin (); } 41.31 int oops_size () const { return (address) oops_end () - (address) oops_begin (); } 41.32 @@ -374,7 +374,7 @@ 41.33 int total_size () const; 41.34 41.35 // Containment 41.36 - bool code_contains (address addr) const { return code_begin () <= addr && addr < code_end (); } 41.37 + bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); } 41.38 bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); } 41.39 bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); } 41.40 bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); } 41.41 @@ -506,7 +506,7 @@ 41.42 void clear_inline_caches(); 41.43 void cleanup_inline_caches(); 41.44 bool inlinecache_check_contains(address addr) const { 41.45 - return (addr >= instructions_begin() && addr < verified_entry_point()); 41.46 + return (addr >= code_begin() && addr < verified_entry_point()); 41.47 } 41.48 41.49 // unlink and deallocate this nmethod 41.50 @@ -559,7 +559,7 @@ 41.51 41.52 PcDesc* find_pc_desc(address pc, bool approximate) { 41.53 PcDesc* desc = _pc_desc_cache.last_pc_desc(); 41.54 - if (desc != NULL && desc->pc_offset() == pc - instructions_begin()) { 41.55 + if (desc != NULL && desc->pc_offset() == pc - code_begin()) { 41.56 return desc; 41.57 } 41.58 return find_pc_desc_internal(pc, approximate);
42.1 --- a/src/share/vm/code/pcDesc.cpp Mon Aug 23 08:44:03 2010 -0700 42.2 +++ b/src/share/vm/code/pcDesc.cpp Wed Aug 25 10:31:45 2010 -0700 42.3 @@ -34,7 +34,7 @@ 42.4 } 42.5 42.6 address PcDesc::real_pc(const nmethod* code) const { 42.7 - return code->instructions_begin() + pc_offset(); 42.8 + return code->code_begin() + pc_offset(); 42.9 } 42.10 42.11 void PcDesc::print(nmethod* code) {
43.1 --- a/src/share/vm/code/relocInfo.cpp Mon Aug 23 08:44:03 2010 -0700 43.2 +++ b/src/share/vm/code/relocInfo.cpp Wed Aug 25 10:31:45 2010 -0700 43.3 @@ -128,13 +128,11 @@ 43.4 _code = nm; 43.5 _current = nm->relocation_begin() - 1; 43.6 _end = nm->relocation_end(); 43.7 - _addr = (address) nm->instructions_begin(); 43.8 + _addr = (address) nm->code_begin(); 43.9 43.10 assert(!has_current(), "just checking"); 43.11 - address code_end = nm->instructions_end(); 43.12 - 43.13 - assert(begin == NULL || begin >= nm->instructions_begin(), "in bounds"); 43.14 - // FIX THIS assert(limit == NULL || limit <= code_end, "in bounds"); 43.15 + assert(begin == NULL || begin >= nm->code_begin(), "in bounds"); 43.16 + assert(limit == NULL || limit <= nm->code_end(), "in bounds"); 43.17 set_limits(begin, limit); 43.18 } 43.19 43.20 @@ -267,7 +265,7 @@ 43.21 // skip ahead 43.22 RelocIndexEntry* index = (RelocIndexEntry*)_end; 43.23 RelocIndexEntry* index_limit = (RelocIndexEntry*)((address)index + index_size); 43.24 - assert(_addr == _code->instructions_begin(), "_addr must be unadjusted"); 43.25 + assert(_addr == _code->code_begin(), "_addr must be unadjusted"); 43.26 int card = (begin - _addr) / indexCardSize; 43.27 if (card > 0) { 43.28 if (index+card-1 < index_limit) index += card-1; 43.29 @@ -369,7 +367,7 @@ 43.30 CodeBlob* cb = code(); 43.31 guarantee(cb != NULL, "must have a code blob"); 43.32 if (n == CodeBuffer::SECT_INSTS) 43.33 - return CACHE = cb->instructions_begin(); 43.34 + return CACHE = cb->code_begin(); 43.35 assert(cb->is_nmethod(), "only nmethods have these sections"); 43.36 nmethod* nm = (nmethod*) cb; 43.37 address res = NULL; 43.38 @@ -383,7 +381,7 @@ 43.39 default: 43.40 ShouldNotReachHere(); 43.41 } 43.42 - assert(nm->contains(res) || res == nm->instructions_end(), "tame pointer"); 43.43 + assert(nm->contains(res) || res == nm->code_end(), "tame pointer"); 43.44 CACHE = res; 43.45 return res; 43.46 #undef CACHE
44.1 --- a/src/share/vm/code/scopeDesc.cpp Mon Aug 23 08:44:03 2010 -0700 44.2 +++ b/src/share/vm/code/scopeDesc.cpp Wed Aug 25 10:31:45 2010 -0700 44.3 @@ -1,5 +1,5 @@ 44.4 /* 44.5 - * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. 44.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 44.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 44.8 * 44.9 * This code is free software; you can redistribute it and/or modify it 44.10 @@ -174,7 +174,7 @@ 44.11 print_value_on(st); 44.12 // decode offsets 44.13 if (WizardMode) { 44.14 - st->print("ScopeDesc[%d]@" PTR_FORMAT " ", _decode_offset, _code->instructions_begin()); 44.15 + st->print("ScopeDesc[%d]@" PTR_FORMAT " ", _decode_offset, _code->content_begin()); 44.16 st->print_cr(" offset: %d", _decode_offset); 44.17 st->print_cr(" bci: %d", bci()); 44.18 st->print_cr(" reexecute: %s", should_reexecute() ? "true" : "false");
45.1 --- a/src/share/vm/code/stubs.cpp Mon Aug 23 08:44:03 2010 -0700 45.2 +++ b/src/share/vm/code/stubs.cpp Wed Aug 25 10:31:45 2010 -0700 45.3 @@ -1,5 +1,5 @@ 45.4 /* 45.5 - * Copyright (c) 1997, 2005, Oracle and/or its affiliates. All rights reserved. 45.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 45.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 45.8 * 45.9 * This code is free software; you can redistribute it and/or modify it 45.10 @@ -66,9 +66,9 @@ 45.11 vm_exit_out_of_memory(size, err_msg("CodeCache: no room for %s", name)); 45.12 } 45.13 _stub_interface = stub_interface; 45.14 - _buffer_size = blob->instructions_size(); 45.15 - _buffer_limit = blob->instructions_size(); 45.16 - _stub_buffer = blob->instructions_begin(); 45.17 + _buffer_size = blob->content_size(); 45.18 + _buffer_limit = blob->content_size(); 45.19 + _stub_buffer = blob->content_begin(); 45.20 _queue_begin = 0; 45.21 _queue_end = 0; 45.22 _number_of_stubs = 0;
46.1 --- a/src/share/vm/code/vtableStubs.cpp Mon Aug 23 08:44:03 2010 -0700 46.2 +++ b/src/share/vm/code/vtableStubs.cpp Wed Aug 25 10:31:45 2010 -0700 46.3 @@ -48,7 +48,7 @@ 46.4 if (blob == NULL) { 46.5 vm_exit_out_of_memory(bytes, "CodeCache: no room for vtable chunks"); 46.6 } 46.7 - _chunk = blob->instructions_begin(); 46.8 + _chunk = blob->content_begin(); 46.9 _chunk_end = _chunk + bytes; 46.10 Forte::register_stub("vtable stub", _chunk, _chunk_end); 46.11 // Notify JVMTI about this stub. The event will be recorded by the enclosing
47.1 --- a/src/share/vm/compiler/compileBroker.cpp Mon Aug 23 08:44:03 2010 -0700 47.2 +++ b/src/share/vm/compiler/compileBroker.cpp Wed Aug 25 10:31:45 2010 -0700 47.3 @@ -399,7 +399,7 @@ 47.4 // <task_done ... stamp='1.234'> </task> 47.5 nmethod* nm = code(); 47.6 log->begin_elem("task_done success='%d' nmsize='%d' count='%d'", 47.7 - _is_success, nm == NULL ? 0 : nm->instructions_size(), 47.8 + _is_success, nm == NULL ? 0 : nm->content_size(), 47.9 method->invocation_count()); 47.10 int bec = method->backedge_count(); 47.11 if (bec != 0) log->print(" backedge_count='%d'", bec); 47.12 @@ -1847,13 +1847,13 @@ 47.13 } 47.14 47.15 // Collect counts of successful compilations 47.16 - _sum_nmethod_size += code->total_size(); 47.17 - _sum_nmethod_code_size += code->code_size(); 47.18 + _sum_nmethod_size += code->total_size(); 47.19 + _sum_nmethod_code_size += code->insts_size(); 47.20 _total_compile_count++; 47.21 47.22 if (UsePerfData) { 47.23 - _perf_sum_nmethod_size->inc(code->total_size()); 47.24 - _perf_sum_nmethod_code_size->inc(code->code_size()); 47.25 + _perf_sum_nmethod_size->inc( code->total_size()); 47.26 + _perf_sum_nmethod_code_size->inc(code->insts_size()); 47.27 _perf_total_compile_count->inc(); 47.28 } 47.29
48.1 --- a/src/share/vm/compiler/disassembler.cpp Mon Aug 23 08:44:03 2010 -0700 48.2 +++ b/src/share/vm/compiler/disassembler.cpp Wed Aug 25 10:31:45 2010 -0700 48.3 @@ -407,7 +407,7 @@ 48.4 if (!load_library()) return; 48.5 decode_env env(cb, st); 48.6 env.output()->print_cr("Decoding CodeBlob " INTPTR_FORMAT, cb); 48.7 - env.decode_instructions(cb->instructions_begin(), cb->instructions_end()); 48.8 + env.decode_instructions(cb->code_begin(), cb->code_end()); 48.9 } 48.10 48.11 48.12 @@ -424,12 +424,12 @@ 48.13 env.output()->print_cr("Code:"); 48.14 48.15 #ifdef SHARK 48.16 - SharkEntry* entry = (SharkEntry *) nm->instructions_begin(); 48.17 - unsigned char* p = entry->code_start(); 48.18 + SharkEntry* entry = (SharkEntry *) nm->code_begin(); 48.19 + unsigned char* p = entry->code_start(); 48.20 unsigned char* end = entry->code_limit(); 48.21 #else 48.22 - unsigned char* p = nm->instructions_begin(); 48.23 - unsigned char* end = nm->instructions_end(); 48.24 + unsigned char* p = nm->code_begin(); 48.25 + unsigned char* end = nm->code_end(); 48.26 #endif // SHARK 48.27 48.28 // If there has been profiling, print the buckets.
49.1 --- a/src/share/vm/includeDB_compiler2 Mon Aug 23 08:44:03 2010 -0700 49.2 +++ b/src/share/vm/includeDB_compiler2 Wed Aug 25 10:31:45 2010 -0700 49.3 @@ -504,6 +504,7 @@ 49.4 graphKit.hpp callnode.hpp 49.5 graphKit.hpp cfgnode.hpp 49.6 graphKit.hpp ciEnv.hpp 49.7 +graphKit.hpp ciMethodData.hpp 49.8 graphKit.hpp divnode.hpp 49.9 graphKit.hpp compile.hpp 49.10 graphKit.hpp deoptimization.hpp
50.1 --- a/src/share/vm/interpreter/interpreter.hpp Mon Aug 23 08:44:03 2010 -0700 50.2 +++ b/src/share/vm/interpreter/interpreter.hpp Wed Aug 25 10:31:45 2010 -0700 50.3 @@ -1,5 +1,5 @@ 50.4 /* 50.5 - * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved. 50.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 50.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 50.8 * 50.9 * This code is free software; you can redistribute it and/or modify it 50.10 @@ -117,7 +117,7 @@ 50.11 50.12 50.13 // commit Codelet 50.14 - AbstractInterpreter::code()->commit((*_masm)->code()->pure_code_size()); 50.15 + AbstractInterpreter::code()->commit((*_masm)->code()->pure_insts_size()); 50.16 // make sure nobody can use _masm outside a CodeletMark lifespan 50.17 *_masm = NULL; 50.18 }
51.1 --- a/src/share/vm/interpreter/interpreterRuntime.cpp Mon Aug 23 08:44:03 2010 -0700 51.2 +++ b/src/share/vm/interpreter/interpreterRuntime.cpp Wed Aug 25 10:31:45 2010 -0700 51.3 @@ -1124,7 +1124,7 @@ 51.4 if (handler_blob == NULL) { 51.5 return NULL; 51.6 } 51.7 - address handler = handler_blob->instructions_begin(); 51.8 + address handler = handler_blob->code_begin(); 51.9 _handler_blob = handler_blob; 51.10 _handler = handler; 51.11 return handler; 51.12 @@ -1140,7 +1140,7 @@ 51.13 51.14 BufferBlob* bb = BufferBlob::create("Signature Handler Temp Buffer", 51.15 SignatureHandlerLibrary::buffer_size); 51.16 - _buffer = bb->instructions_begin(); 51.17 + _buffer = bb->code_begin(); 51.18 51.19 _fingerprints = new(ResourceObj::C_HEAP)GrowableArray<uint64_t>(32, true); 51.20 _handlers = new(ResourceObj::C_HEAP)GrowableArray<address>(32, true); 51.21 @@ -1148,16 +1148,16 @@ 51.22 51.23 address SignatureHandlerLibrary::set_handler(CodeBuffer* buffer) { 51.24 address handler = _handler; 51.25 - int code_size = buffer->pure_code_size(); 51.26 - if (handler + code_size > _handler_blob->instructions_end()) { 51.27 + int insts_size = buffer->pure_insts_size(); 51.28 + if (handler + insts_size > _handler_blob->code_end()) { 51.29 // get a new handler blob 51.30 handler = set_handler_blob(); 51.31 } 51.32 if (handler != NULL) { 51.33 - memcpy(handler, buffer->code_begin(), code_size); 51.34 + memcpy(handler, buffer->insts_begin(), insts_size); 51.35 pd_set_handler(handler); 51.36 - ICache::invalidate_range(handler, code_size); 51.37 - _handler = handler + code_size; 51.38 + ICache::invalidate_range(handler, insts_size); 51.39 + _handler = handler + insts_size; 51.40 } 51.41 return handler; 51.42 } 51.43 @@ -1196,8 +1196,8 @@ 51.44 (method->is_static() ? "static" : "receiver"), 51.45 method->name_and_sig_as_C_string(), 51.46 fingerprint, 51.47 - buffer.code_size()); 51.48 - Disassembler::decode(handler, handler + buffer.code_size()); 51.49 + buffer.insts_size()); 51.50 + Disassembler::decode(handler, handler + buffer.insts_size()); 51.51 #ifndef PRODUCT 51.52 tty->print_cr(" --- associated result handler ---"); 51.53 address rh_begin = Interpreter::result_handler(method()->result_type());
52.1 --- a/src/share/vm/opto/compile.cpp Mon Aug 23 08:44:03 2010 -0700 52.2 +++ b/src/share/vm/opto/compile.cpp Wed Aug 25 10:31:45 2010 -0700 52.3 @@ -400,7 +400,7 @@ 52.4 } 52.5 52.6 // Initialize the relocation buffers 52.7 - relocInfo* locs_buf = (relocInfo*) blob->instructions_end() - MAX_locs_size; 52.8 + relocInfo* locs_buf = (relocInfo*) blob->content_end() - MAX_locs_size; 52.9 set_scratch_locs_memory(locs_buf); 52.10 } 52.11 52.12 @@ -422,9 +422,9 @@ 52.13 assert(blob != NULL, "Initialize BufferBlob at start"); 52.14 assert(blob->size() > MAX_inst_size, "sanity"); 52.15 relocInfo* locs_buf = scratch_locs_memory(); 52.16 - address blob_begin = blob->instructions_begin(); 52.17 + address blob_begin = blob->content_begin(); 52.18 address blob_end = (address)locs_buf; 52.19 - assert(blob->instructions_contains(blob_end), "sanity"); 52.20 + assert(blob->content_contains(blob_end), "sanity"); 52.21 CodeBuffer buf(blob_begin, blob_end - blob_begin); 52.22 buf.initialize_consts_size(MAX_const_size); 52.23 buf.initialize_stubs_size(MAX_stubs_size); 52.24 @@ -433,7 +433,7 @@ 52.25 buf.insts()->initialize_shared_locs(&locs_buf[0], lsize); 52.26 buf.stubs()->initialize_shared_locs(&locs_buf[lsize], lsize); 52.27 n->emit(buf, this->regalloc()); 52.28 - return buf.code_size(); 52.29 + return buf.insts_size(); 52.30 } 52.31 52.32
53.1 --- a/src/share/vm/opto/graphKit.cpp Mon Aug 23 08:44:03 2010 -0700 53.2 +++ b/src/share/vm/opto/graphKit.cpp Wed Aug 25 10:31:45 2010 -0700 53.3 @@ -1891,7 +1891,7 @@ 53.4 kill_dead_locals(); 53.5 53.6 // Now insert the uncommon trap subroutine call 53.7 - address call_addr = SharedRuntime::uncommon_trap_blob()->instructions_begin(); 53.8 + address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point(); 53.9 const TypePtr* no_memory_effects = NULL; 53.10 // Pass the index of the class to be loaded 53.11 Node* call = make_runtime_call(RC_NO_LEAF | RC_UNCOMMON | 53.12 @@ -2451,11 +2451,79 @@ 53.13 } 53.14 53.15 53.16 +//------------------------------seems_never_null------------------------------- 53.17 +// Use null_seen information if it is available from the profile. 53.18 +// If we see an unexpected null at a type check we record it and force a 53.19 +// recompile; the offending check will be recompiled to handle NULLs. 53.20 +// If we see several offending BCIs, then all checks in the 53.21 +// method will be recompiled. 53.22 +bool GraphKit::seems_never_null(Node* obj, ciProfileData* data) { 53.23 + if (UncommonNullCast // Cutout for this technique 53.24 + && obj != null() // And not the -Xcomp stupid case? 53.25 + && !too_many_traps(Deoptimization::Reason_null_check) 53.26 + ) { 53.27 + if (data == NULL) 53.28 + // Edge case: no mature data. Be optimistic here. 53.29 + return true; 53.30 + // If the profile has not seen a null, assume it won't happen. 53.31 + assert(java_bc() == Bytecodes::_checkcast || 53.32 + java_bc() == Bytecodes::_instanceof || 53.33 + java_bc() == Bytecodes::_aastore, "MDO must collect null_seen bit here"); 53.34 + return !data->as_BitData()->null_seen(); 53.35 + } 53.36 + return false; 53.37 +} 53.38 + 53.39 +//------------------------maybe_cast_profiled_receiver------------------------- 53.40 +// If the profile has seen exactly one type, narrow to exactly that type. 53.41 +// Subsequent type checks will always fold up. 53.42 +Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj, 53.43 + ciProfileData* data, 53.44 + ciKlass* require_klass) { 53.45 + if (!UseTypeProfile || !TypeProfileCasts) return NULL; 53.46 + if (data == NULL) return NULL; 53.47 + 53.48 + // Make sure we haven't already deoptimized from this tactic. 53.49 + if (too_many_traps(Deoptimization::Reason_class_check)) 53.50 + return NULL; 53.51 + 53.52 + // (No, this isn't a call, but it's enough like a virtual call 53.53 + // to use the same ciMethod accessor to get the profile info...) 53.54 + ciCallProfile profile = method()->call_profile_at_bci(bci()); 53.55 + if (profile.count() >= 0 && // no cast failures here 53.56 + profile.has_receiver(0) && 53.57 + profile.morphism() == 1) { 53.58 + ciKlass* exact_kls = profile.receiver(0); 53.59 + if (require_klass == NULL || 53.60 + static_subtype_check(require_klass, exact_kls) == SSC_always_true) { 53.61 + // If we narrow the type to match what the type profile sees, 53.62 + // we can then remove the rest of the cast. 53.63 + // This is a win, even if the exact_kls is very specific, 53.64 + // because downstream operations, such as method calls, 53.65 + // will often benefit from the sharper type. 53.66 + Node* exact_obj = not_null_obj; // will get updated in place... 53.67 + Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, 53.68 + &exact_obj); 53.69 + { PreserveJVMState pjvms(this); 53.70 + set_control(slow_ctl); 53.71 + uncommon_trap(Deoptimization::Reason_class_check, 53.72 + Deoptimization::Action_maybe_recompile); 53.73 + } 53.74 + replace_in_map(not_null_obj, exact_obj); 53.75 + return exact_obj; 53.76 + } 53.77 + // assert(ssc == SSC_always_true)... except maybe the profile lied to us. 53.78 + } 53.79 + 53.80 + return NULL; 53.81 +} 53.82 + 53.83 + 53.84 //-------------------------------gen_instanceof-------------------------------- 53.85 // Generate an instance-of idiom. Used by both the instance-of bytecode 53.86 // and the reflective instance-of call. 53.87 -Node* GraphKit::gen_instanceof( Node *subobj, Node* superklass ) { 53.88 - C->set_has_split_ifs(true); // Has chance for split-if optimization 53.89 +Node* GraphKit::gen_instanceof(Node* obj, Node* superklass) { 53.90 + kill_dead_locals(); // Benefit all the uncommon traps 53.91 assert( !stopped(), "dead parse path should be checked in callers" ); 53.92 assert(!TypePtr::NULL_PTR->higher_equal(_gvn.type(superklass)->is_klassptr()), 53.93 "must check for not-null not-dead klass in callers"); 53.94 @@ -2466,9 +2534,16 @@ 53.95 Node* phi = new(C, PATH_LIMIT) PhiNode(region, TypeInt::BOOL); 53.96 C->set_has_split_ifs(true); // Has chance for split-if optimization 53.97 53.98 + ciProfileData* data = NULL; 53.99 + if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode 53.100 + data = method()->method_data()->bci_to_data(bci()); 53.101 + } 53.102 + bool never_see_null = (ProfileDynamicTypes // aggressive use of profile 53.103 + && seems_never_null(obj, data)); 53.104 + 53.105 // Null check; get casted pointer; set region slot 3 53.106 Node* null_ctl = top(); 53.107 - Node* not_null_obj = null_check_oop(subobj, &null_ctl); 53.108 + Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null); 53.109 53.110 // If not_null_obj is dead, only null-path is taken 53.111 if (stopped()) { // Doing instance-of on a NULL? 53.112 @@ -2477,6 +2552,23 @@ 53.113 } 53.114 region->init_req(_null_path, null_ctl); 53.115 phi ->init_req(_null_path, intcon(0)); // Set null path value 53.116 + if (null_ctl == top()) { 53.117 + // Do this eagerly, so that pattern matches like is_diamond_phi 53.118 + // will work even during parsing. 53.119 + assert(_null_path == PATH_LIMIT-1, "delete last"); 53.120 + region->del_req(_null_path); 53.121 + phi ->del_req(_null_path); 53.122 + } 53.123 + 53.124 + if (ProfileDynamicTypes && data != NULL) { 53.125 + Node* cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, NULL); 53.126 + if (stopped()) { // Profile disagrees with this path. 53.127 + set_control(null_ctl); // Null is the only remaining possibility. 53.128 + return intcon(0); 53.129 + } 53.130 + if (cast_obj != NULL) 53.131 + not_null_obj = cast_obj; 53.132 + } 53.133 53.134 // Load the object's klass 53.135 Node* obj_klass = load_object_klass(not_null_obj); 53.136 @@ -2546,20 +2638,8 @@ 53.137 C->set_has_split_ifs(true); // Has chance for split-if optimization 53.138 53.139 // Use null-cast information if it is available 53.140 - bool never_see_null = false; 53.141 - // If we see an unexpected null at a check-cast we record it and force a 53.142 - // recompile; the offending check-cast will be compiled to handle NULLs. 53.143 - // If we see several offending BCIs, then all checkcasts in the 53.144 - // method will be compiled to handle NULLs. 53.145 - if (UncommonNullCast // Cutout for this technique 53.146 - && failure_control == NULL // regular case 53.147 - && obj != null() // And not the -Xcomp stupid case? 53.148 - && !too_many_traps(Deoptimization::Reason_null_check)) { 53.149 - // Finally, check the "null_seen" bit from the interpreter. 53.150 - if (data == NULL || !data->as_BitData()->null_seen()) { 53.151 - never_see_null = true; 53.152 - } 53.153 - } 53.154 + bool never_see_null = ((failure_control == NULL) // regular case only 53.155 + && seems_never_null(obj, data)); 53.156 53.157 // Null check; get casted pointer; set region slot 3 53.158 Node* null_ctl = top(); 53.159 @@ -2572,47 +2652,26 @@ 53.160 } 53.161 region->init_req(_null_path, null_ctl); 53.162 phi ->init_req(_null_path, null()); // Set null path value 53.163 - 53.164 - Node* cast_obj = NULL; // the casted version of the object 53.165 - 53.166 - // If the profile has seen exactly one type, narrow to that type. 53.167 - // (The subsequent subtype check will always fold up.) 53.168 - if (UseTypeProfile && TypeProfileCasts && data != NULL && 53.169 + if (null_ctl == top()) { 53.170 + // Do this eagerly, so that pattern matches like is_diamond_phi 53.171 + // will work even during parsing. 53.172 + assert(_null_path == PATH_LIMIT-1, "delete last"); 53.173 + region->del_req(_null_path); 53.174 + phi ->del_req(_null_path); 53.175 + } 53.176 + 53.177 + Node* cast_obj = NULL; 53.178 + if (data != NULL && 53.179 // Counter has never been decremented (due to cast failure). 53.180 // ...This is a reasonable thing to expect. It is true of 53.181 // all casts inserted by javac to implement generic types. 53.182 - data->as_CounterData()->count() >= 0 && 53.183 - !too_many_traps(Deoptimization::Reason_class_check)) { 53.184 - // (No, this isn't a call, but it's enough like a virtual call 53.185 - // to use the same ciMethod accessor to get the profile info...) 53.186 - ciCallProfile profile = method()->call_profile_at_bci(bci()); 53.187 - if (profile.count() >= 0 && // no cast failures here 53.188 - profile.has_receiver(0) && 53.189 - profile.morphism() == 1) { 53.190 - ciKlass* exact_kls = profile.receiver(0); 53.191 - int ssc = static_subtype_check(tk->klass(), exact_kls); 53.192 - if (ssc == SSC_always_true) { 53.193 - // If we narrow the type to match what the type profile sees, 53.194 - // we can then remove the rest of the cast. 53.195 - // This is a win, even if the exact_kls is very specific, 53.196 - // because downstream operations, such as method calls, 53.197 - // will often benefit from the sharper type. 53.198 - Node* exact_obj = not_null_obj; // will get updated in place... 53.199 - Node* slow_ctl = type_check_receiver(exact_obj, exact_kls, 1.0, 53.200 - &exact_obj); 53.201 - { PreserveJVMState pjvms(this); 53.202 - set_control(slow_ctl); 53.203 - uncommon_trap(Deoptimization::Reason_class_check, 53.204 - Deoptimization::Action_maybe_recompile); 53.205 - } 53.206 - if (failure_control != NULL) // failure is now impossible 53.207 - (*failure_control) = top(); 53.208 - replace_in_map(not_null_obj, exact_obj); 53.209 - // adjust the type of the phi to the exact klass: 53.210 - phi->raise_bottom_type(_gvn.type(exact_obj)->meet(TypePtr::NULL_PTR)); 53.211 - cast_obj = exact_obj; 53.212 - } 53.213 - // assert(cast_obj != NULL)... except maybe the profile lied to us. 53.214 + data->as_CounterData()->count() >= 0) { 53.215 + cast_obj = maybe_cast_profiled_receiver(not_null_obj, data, tk->klass()); 53.216 + if (cast_obj != NULL) { 53.217 + if (failure_control != NULL) // failure is now impossible 53.218 + (*failure_control) = top(); 53.219 + // adjust the type of the phi to the exact klass: 53.220 + phi->raise_bottom_type(_gvn.type(cast_obj)->meet(TypePtr::NULL_PTR)); 53.221 } 53.222 } 53.223
54.1 --- a/src/share/vm/opto/graphKit.hpp Mon Aug 23 08:44:03 2010 -0700 54.2 +++ b/src/share/vm/opto/graphKit.hpp Wed Aug 25 10:31:45 2010 -0700 54.3 @@ -341,6 +341,14 @@ 54.4 Node* null_check_oop(Node* value, Node* *null_control, 54.5 bool never_see_null = false); 54.6 54.7 + // Check the null_seen bit. 54.8 + bool seems_never_null(Node* obj, ciProfileData* data); 54.9 + 54.10 + // Use the type profile to narrow an object type. 54.11 + Node* maybe_cast_profiled_receiver(Node* not_null_obj, 54.12 + ciProfileData* data, 54.13 + ciKlass* require_klass); 54.14 + 54.15 // Cast obj to not-null on this path 54.16 Node* cast_not_null(Node* obj, bool do_replace_in_map = true); 54.17 // Replace all occurrences of one node by another.
55.1 --- a/src/share/vm/opto/lcm.cpp Mon Aug 23 08:44:03 2010 -0700 55.2 +++ b/src/share/vm/opto/lcm.cpp Wed Aug 25 10:31:45 2010 -0700 55.3 @@ -1,5 +1,5 @@ 55.4 /* 55.5 - * Copyright (c) 1998, 2009, Oracle and/or its affiliates. All rights reserved. 55.6 + * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. 55.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 55.8 * 55.9 * This code is free software; you can redistribute it and/or modify it 55.10 @@ -72,8 +72,7 @@ 55.11 for (uint i1 = 0; i1 < null_block->_nodes.size(); i1++) { 55.12 Node* nn = null_block->_nodes[i1]; 55.13 if (nn->is_MachCall() && 55.14 - nn->as_MachCall()->entry_point() == 55.15 - SharedRuntime::uncommon_trap_blob()->instructions_begin()) { 55.16 + nn->as_MachCall()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point()) { 55.17 const Type* trtype = nn->in(TypeFunc::Parms)->bottom_type(); 55.18 if (trtype->isa_int() && trtype->is_int()->is_con()) { 55.19 jint tr_con = trtype->is_int()->get_con();
56.1 --- a/src/share/vm/opto/library_call.cpp Mon Aug 23 08:44:03 2010 -0700 56.2 +++ b/src/share/vm/opto/library_call.cpp Wed Aug 25 10:31:45 2010 -0700 56.3 @@ -906,7 +906,8 @@ 56.4 const int count_offset = java_lang_String::count_offset_in_bytes(); 56.5 const int offset_offset = java_lang_String::offset_offset_in_bytes(); 56.6 56.7 - _sp += 2; 56.8 + int nargs = 2; 56.9 + _sp += nargs; 56.10 Node* argument = pop(); // pop non-receiver first: it was pushed second 56.11 Node* receiver = pop(); 56.12 56.13 @@ -914,11 +915,11 @@ 56.14 // null check technically happens in the wrong place, which can lead to 56.15 // invalid stack traces when string compare is inlined into a method 56.16 // which handles NullPointerExceptions. 56.17 - _sp += 2; 56.18 + _sp += nargs; 56.19 receiver = do_null_check(receiver, T_OBJECT); 56.20 //should not do null check for argument for String.equals(), because spec 56.21 //allows to specify NULL as argument. 56.22 - _sp -= 2; 56.23 + _sp -= nargs; 56.24 56.25 if (stopped()) { 56.26 return true; 56.27 @@ -943,7 +944,9 @@ 56.28 ciInstanceKlass* klass = env()->String_klass(); 56.29 56.30 if (!stopped()) { 56.31 + _sp += nargs; // gen_instanceof might do an uncommon trap 56.32 Node* inst = gen_instanceof(argument, makecon(TypeKlassPtr::make(klass))); 56.33 + _sp -= nargs; 56.34 Node* cmp = _gvn.transform(new (C, 3) CmpINode(inst, intcon(1))); 56.35 Node* bol = _gvn.transform(new (C, 2) BoolNode(cmp, BoolTest::ne)); 56.36 56.37 @@ -2935,7 +2938,9 @@ 56.38 switch (id) { 56.39 case vmIntrinsics::_isInstance: 56.40 // nothing is an instance of a primitive type 56.41 + _sp += nargs; // gen_instanceof might do an uncommon trap 56.42 query_value = gen_instanceof(obj, kls); 56.43 + _sp -= nargs; 56.44 break; 56.45 56.46 case vmIntrinsics::_getModifiers: 56.47 @@ -4957,8 +4962,7 @@ 56.48 for (DUIterator_Fast jmax, j = not_ctl->fast_outs(jmax); j < jmax; j++) { 56.49 Node* obs = not_ctl->fast_out(j); 56.50 if (obs->in(0) == not_ctl && obs->is_Call() && 56.51 - (obs->as_Call()->entry_point() == 56.52 - SharedRuntime::uncommon_trap_blob()->instructions_begin())) { 56.53 + (obs->as_Call()->entry_point() == SharedRuntime::uncommon_trap_blob()->entry_point())) { 56.54 found_trap = true; break; 56.55 } 56.56 }
57.1 --- a/src/share/vm/opto/output.cpp Mon Aug 23 08:44:03 2010 -0700 57.2 +++ b/src/share/vm/opto/output.cpp Wed Aug 25 10:31:45 2010 -0700 57.3 @@ -1184,7 +1184,7 @@ 57.4 MacroAssembler(cb).bind( blk_labels[b->_pre_order] ); 57.5 57.6 else 57.7 - assert( blk_labels[b->_pre_order].loc_pos() == cb->code_size(), 57.8 + assert( blk_labels[b->_pre_order].loc_pos() == cb->insts_size(), 57.9 "label position does not match code offset" ); 57.10 57.11 uint last_inst = b->_nodes.size(); 57.12 @@ -1225,7 +1225,7 @@ 57.13 // If this requires all previous instructions be flushed, then do so 57.14 if( is_sfn || is_mcall || mach->alignment_required() != 1) { 57.15 cb->flush_bundle(true); 57.16 - current_offset = cb->code_size(); 57.17 + current_offset = cb->insts_size(); 57.18 } 57.19 57.20 // align the instruction if necessary 57.21 @@ -1246,7 +1246,7 @@ 57.22 _cfg->_bbs.map( nop->_idx, b ); 57.23 nop->emit(*cb, _regalloc); 57.24 cb->flush_bundle(true); 57.25 - current_offset = cb->code_size(); 57.26 + current_offset = cb->insts_size(); 57.27 } 57.28 57.29 // Remember the start of the last call in a basic block 57.30 @@ -1348,12 +1348,12 @@ 57.31 // Save the offset for the listing 57.32 #ifndef PRODUCT 57.33 if( node_offsets && n->_idx < node_offset_limit ) 57.34 - node_offsets[n->_idx] = cb->code_size(); 57.35 + node_offsets[n->_idx] = cb->insts_size(); 57.36 #endif 57.37 57.38 // "Normal" instruction case 57.39 n->emit(*cb, _regalloc); 57.40 - current_offset = cb->code_size(); 57.41 + current_offset = cb->insts_size(); 57.42 non_safepoints.observe_instruction(n, current_offset); 57.43 57.44 // mcall is last "call" that can be a safepoint 57.45 @@ -1372,13 +1372,12 @@ 57.46 assert(delay_slot != NULL, "expecting delay slot node"); 57.47 57.48 // Back up 1 instruction 57.49 - cb->set_code_end( 57.50 - cb->code_end()-Pipeline::instr_unit_size()); 57.51 + cb->set_insts_end(cb->insts_end() - Pipeline::instr_unit_size()); 57.52 57.53 // Save the offset for the listing 57.54 #ifndef PRODUCT 57.55 if( node_offsets && delay_slot->_idx < node_offset_limit ) 57.56 - node_offsets[delay_slot->_idx] = cb->code_size(); 57.57 + node_offsets[delay_slot->_idx] = cb->insts_size(); 57.58 #endif 57.59 57.60 // Support a SafePoint in the delay slot 57.61 @@ -1420,7 +1419,7 @@ 57.62 b->_nodes.insert( b->_nodes.size(), nop ); 57.63 _cfg->_bbs.map( nop->_idx, b ); 57.64 nop->emit(*cb, _regalloc); 57.65 - current_offset = cb->code_size(); 57.66 + current_offset = cb->insts_size(); 57.67 } 57.68 } 57.69 57.70 @@ -1437,13 +1436,13 @@ 57.71 // Compute the size of the first block 57.72 _first_block_size = blk_labels[1].loc_pos() - blk_labels[0].loc_pos(); 57.73 57.74 - assert(cb->code_size() < 500000, "method is unreasonably large"); 57.75 + assert(cb->insts_size() < 500000, "method is unreasonably large"); 57.76 57.77 // ------------------ 57.78 57.79 #ifndef PRODUCT 57.80 // Information on the size of the method, without the extraneous code 57.81 - Scheduling::increment_method_size(cb->code_size()); 57.82 + Scheduling::increment_method_size(cb->insts_size()); 57.83 #endif 57.84 57.85 // ------------------
58.1 --- a/src/share/vm/opto/parse.hpp Mon Aug 23 08:44:03 2010 -0700 58.2 +++ b/src/share/vm/opto/parse.hpp Wed Aug 25 10:31:45 2010 -0700 58.3 @@ -494,6 +494,7 @@ 58.4 float dynamic_branch_prediction(float &cnt); 58.5 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci); 58.6 bool seems_never_taken(float prob); 58.7 + bool seems_stable_comparison(BoolTest::mask btest, Node* c); 58.8 58.9 void do_ifnull(BoolTest::mask btest, Node* c); 58.10 void do_if(BoolTest::mask btest, Node* c);
59.1 --- a/src/share/vm/opto/parse2.cpp Mon Aug 23 08:44:03 2010 -0700 59.2 +++ b/src/share/vm/opto/parse2.cpp Wed Aug 25 10:31:45 2010 -0700 59.3 @@ -892,6 +892,62 @@ 59.4 return prob < PROB_MIN; 59.5 } 59.6 59.7 +// True if the comparison seems to be the kind that will not change its 59.8 +// statistics from true to false. See comments in adjust_map_after_if. 59.9 +// This question is only asked along paths which are already 59.10 +// classifed as untaken (by seems_never_taken), so really, 59.11 +// if a path is never taken, its controlling comparison is 59.12 +// already acting in a stable fashion. If the comparison 59.13 +// seems stable, we will put an expensive uncommon trap 59.14 +// on the untaken path. To be conservative, and to allow 59.15 +// partially executed counted loops to be compiled fully, 59.16 +// we will plant uncommon traps only after pointer comparisons. 59.17 +bool Parse::seems_stable_comparison(BoolTest::mask btest, Node* cmp) { 59.18 + for (int depth = 4; depth > 0; depth--) { 59.19 + // The following switch can find CmpP here over half the time for 59.20 + // dynamic language code rich with type tests. 59.21 + // Code using counted loops or array manipulations (typical 59.22 + // of benchmarks) will have many (>80%) CmpI instructions. 59.23 + switch (cmp->Opcode()) { 59.24 + case Op_CmpP: 59.25 + // A never-taken null check looks like CmpP/BoolTest::eq. 59.26 + // These certainly should be closed off as uncommon traps. 59.27 + if (btest == BoolTest::eq) 59.28 + return true; 59.29 + // A never-failed type check looks like CmpP/BoolTest::ne. 59.30 + // Let's put traps on those, too, so that we don't have to compile 59.31 + // unused paths with indeterminate dynamic type information. 59.32 + if (ProfileDynamicTypes) 59.33 + return true; 59.34 + return false; 59.35 + 59.36 + case Op_CmpI: 59.37 + // A small minority (< 10%) of CmpP are masked as CmpI, 59.38 + // as if by boolean conversion ((p == q? 1: 0) != 0). 59.39 + // Detect that here, even if it hasn't optimized away yet. 59.40 + // Specifically, this covers the 'instanceof' operator. 59.41 + if (btest == BoolTest::ne || btest == BoolTest::eq) { 59.42 + if (_gvn.type(cmp->in(2))->singleton() && 59.43 + cmp->in(1)->is_Phi()) { 59.44 + PhiNode* phi = cmp->in(1)->as_Phi(); 59.45 + int true_path = phi->is_diamond_phi(); 59.46 + if (true_path > 0 && 59.47 + _gvn.type(phi->in(1))->singleton() && 59.48 + _gvn.type(phi->in(2))->singleton()) { 59.49 + // phi->region->if_proj->ifnode->bool->cmp 59.50 + BoolNode* bol = phi->in(0)->in(1)->in(0)->in(1)->as_Bool(); 59.51 + btest = bol->_test._test; 59.52 + cmp = bol->in(1); 59.53 + continue; 59.54 + } 59.55 + } 59.56 + } 59.57 + return false; 59.58 + } 59.59 + } 59.60 + return false; 59.61 +} 59.62 + 59.63 //-------------------------------repush_if_args-------------------------------- 59.64 // Push arguments of an "if" bytecode back onto the stack by adjusting _sp. 59.65 inline int Parse::repush_if_args() { 59.66 @@ -1137,19 +1193,22 @@ 59.67 59.68 bool is_fallthrough = (path == successor_for_bci(iter().next_bci())); 59.69 59.70 - int cop = c->Opcode(); 59.71 - if (seems_never_taken(prob) && cop == Op_CmpP && btest == BoolTest::eq) { 59.72 - // (An earlier version of do_if omitted '&& btest == BoolTest::eq'.) 59.73 - // 59.74 + if (seems_never_taken(prob) && seems_stable_comparison(btest, c)) { 59.75 // If this might possibly turn into an implicit null check, 59.76 // and the null has never yet been seen, we need to generate 59.77 // an uncommon trap, so as to recompile instead of suffering 59.78 // with very slow branches. (We'll get the slow branches if 59.79 // the program ever changes phase and starts seeing nulls here.) 59.80 // 59.81 - // The tests we worry about are of the form (p == null). 59.82 - // We do not simply inspect for a null constant, since a node may 59.83 + // We do not inspect for a null constant, since a node may 59.84 // optimize to 'null' later on. 59.85 + // 59.86 + // Null checks, and other tests which expect inequality, 59.87 + // show btest == BoolTest::eq along the non-taken branch. 59.88 + // On the other hand, type tests, must-be-null tests, 59.89 + // and other tests which expect pointer equality, 59.90 + // show btest == BoolTest::ne along the non-taken branch. 59.91 + // We prune both types of branches if they look unused. 59.92 repush_if_args(); 59.93 // We need to mark this branch as taken so that if we recompile we will 59.94 // see that it is possible. In the tiered system the interpreter doesn't
60.1 --- a/src/share/vm/opto/parseHelper.cpp Mon Aug 23 08:44:03 2010 -0700 60.2 +++ b/src/share/vm/opto/parseHelper.cpp Wed Aug 25 10:31:45 2010 -0700 60.3 @@ -119,7 +119,11 @@ 60.4 } 60.5 60.6 // Push the bool result back on stack 60.7 - push( gen_instanceof( pop(), makecon(TypeKlassPtr::make(klass)) ) ); 60.8 + Node* res = gen_instanceof(peek(), makecon(TypeKlassPtr::make(klass))); 60.9 + 60.10 + // Pop from stack AFTER gen_instanceof because it can uncommon trap. 60.11 + pop(); 60.12 + push(res); 60.13 } 60.14 60.15 //------------------------------array_store_check------------------------------
61.1 --- a/src/share/vm/opto/stringopts.cpp Mon Aug 23 08:44:03 2010 -0700 61.2 +++ b/src/share/vm/opto/stringopts.cpp Wed Aug 25 10:31:45 2010 -0700 61.3 @@ -157,7 +157,7 @@ 61.4 Node* uct = _uncommon_traps.at(u); 61.5 61.6 // Build a new call using the jvms state of the allocate 61.7 - address call_addr = SharedRuntime::uncommon_trap_blob()->instructions_begin(); 61.8 + address call_addr = SharedRuntime::uncommon_trap_blob()->entry_point(); 61.9 const TypeFunc* call_type = OptoRuntime::uncommon_trap_Type(); 61.10 int size = call_type->domain()->cnt(); 61.11 const TypePtr* no_memory_effects = NULL;
62.1 --- a/src/share/vm/prims/jvmtiCodeBlobEvents.cpp Mon Aug 23 08:44:03 2010 -0700 62.2 +++ b/src/share/vm/prims/jvmtiCodeBlobEvents.cpp Wed Aug 25 10:31:45 2010 -0700 62.3 @@ -114,7 +114,7 @@ 62.4 // check if this starting address has been seen already - the 62.5 // assumption is that stubs are inserted into the list before the 62.6 // enclosing BufferBlobs. 62.7 - address addr = cb->instructions_begin(); 62.8 + address addr = cb->code_begin(); 62.9 for (int i=0; i<_global_code_blobs->length(); i++) { 62.10 JvmtiCodeBlobDesc* scb = _global_code_blobs->at(i); 62.11 if (addr == scb->code_begin()) { 62.12 @@ -123,8 +123,7 @@ 62.13 } 62.14 62.15 // record the CodeBlob details as a JvmtiCodeBlobDesc 62.16 - JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(cb->name(), cb->instructions_begin(), 62.17 - cb->instructions_end()); 62.18 + JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(cb->name(), cb->code_begin(), cb->code_end()); 62.19 _global_code_blobs->append(scb); 62.20 } 62.21
63.1 --- a/src/share/vm/prims/jvmtiExport.cpp Mon Aug 23 08:44:03 2010 -0700 63.2 +++ b/src/share/vm/prims/jvmtiExport.cpp Wed Aug 25 10:31:45 2010 -0700 63.3 @@ -687,8 +687,8 @@ 63.4 public: 63.5 JvmtiCompiledMethodLoadEventMark(JavaThread *thread, nmethod *nm, void* compile_info_ptr = NULL) 63.6 : JvmtiMethodEventMark(thread,methodHandle(thread, nm->method())) { 63.7 - _code_data = nm->code_begin(); 63.8 - _code_size = nm->code_size(); 63.9 + _code_data = nm->insts_begin(); 63.10 + _code_size = nm->insts_size(); 63.11 _compile_info = compile_info_ptr; // Set void pointer of compiledMethodLoad Event. Default value is NULL. 63.12 JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &_map, &_map_length); 63.13 }
64.1 --- a/src/share/vm/prims/methodHandles.cpp Mon Aug 23 08:44:03 2010 -0700 64.2 +++ b/src/share/vm/prims/methodHandles.cpp Wed Aug 25 10:31:45 2010 -0700 64.3 @@ -113,8 +113,7 @@ 64.4 _adapter_code = MethodHandlesAdapterBlob::create(_adapter_code_size); 64.5 if (_adapter_code == NULL) 64.6 vm_exit_out_of_memory(_adapter_code_size, "CodeCache: no room for MethodHandles adapters"); 64.7 - CodeBuffer code(_adapter_code->instructions_begin(), _adapter_code->instructions_size()); 64.8 - 64.9 + CodeBuffer code(_adapter_code); 64.10 MethodHandlesAdapterGenerator g(&code); 64.11 g.generate(); 64.12 }
65.1 --- a/src/share/vm/runtime/compilationPolicy.cpp Mon Aug 23 08:44:03 2010 -0700 65.2 +++ b/src/share/vm/runtime/compilationPolicy.cpp Wed Aug 25 10:31:45 2010 -0700 65.3 @@ -439,7 +439,7 @@ 65.4 if (!instanceKlass::cast(m->method_holder())->is_initialized()) return (_msg = "method holder not initialized"); 65.5 if (m->is_native()) return (_msg = "native method"); 65.6 nmethod* m_code = m->code(); 65.7 - if( m_code != NULL && m_code->instructions_size() > InlineSmallCode ) 65.8 + if (m_code != NULL && m_code->code_size() > InlineSmallCode) 65.9 return (_msg = "already compiled into a big method"); 65.10 65.11 // use frequency-based objections only for non-trivial methods
66.1 --- a/src/share/vm/runtime/frame.cpp Mon Aug 23 08:44:03 2010 -0700 66.2 +++ b/src/share/vm/runtime/frame.cpp Wed Aug 25 10:31:45 2010 -0700 66.3 @@ -537,8 +537,8 @@ 66.4 st->cr(); 66.5 #ifndef PRODUCT 66.6 if (end == NULL) { 66.7 - begin = _cb->instructions_begin(); 66.8 - end = _cb->instructions_end(); 66.9 + begin = _cb->code_begin(); 66.10 + end = _cb->code_end(); 66.11 } 66.12 #endif 66.13 }
67.1 --- a/src/share/vm/runtime/globals.hpp Mon Aug 23 08:44:03 2010 -0700 67.2 +++ b/src/share/vm/runtime/globals.hpp Wed Aug 25 10:31:45 2010 -0700 67.3 @@ -2476,6 +2476,9 @@ 67.4 develop(bool, MonomorphicArrayCheck, true, \ 67.5 "Uncommon-trap array store checks that require full type check") \ 67.6 \ 67.7 + diagnostic(bool, ProfileDynamicTypes, true, \ 67.8 + "do extra type profiling and use it more aggressively") \ 67.9 + \ 67.10 develop(bool, DelayCompilationDuringStartup, true, \ 67.11 "Delay invoking the compiler until main application class is " \ 67.12 "loaded") \
68.1 --- a/src/share/vm/runtime/icache.cpp Mon Aug 23 08:44:03 2010 -0700 68.2 +++ b/src/share/vm/runtime/icache.cpp Wed Aug 25 10:31:45 2010 -0700 68.3 @@ -1,5 +1,5 @@ 68.4 /* 68.5 - * Copyright (c) 1997, 2006, Oracle and/or its affiliates. All rights reserved. 68.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 68.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 68.8 * 68.9 * This code is free software; you can redistribute it and/or modify it 68.10 @@ -33,7 +33,7 @@ 68.11 ResourceMark rm; 68.12 68.13 BufferBlob* b = BufferBlob::create("flush_icache_stub", ICache::stub_size); 68.14 - CodeBuffer c(b->instructions_begin(), b->instructions_size()); 68.15 + CodeBuffer c(b); 68.16 68.17 ICacheStubGenerator g(&c); 68.18 g.generate_icache_flush(&_flush_icache_stub);
69.1 --- a/src/share/vm/runtime/rframe.cpp Mon Aug 23 08:44:03 2010 -0700 69.2 +++ b/src/share/vm/runtime/rframe.cpp Wed Aug 25 10:31:45 2010 -0700 69.3 @@ -1,5 +1,5 @@ 69.4 /* 69.5 - * Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved. 69.6 + * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 69.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 69.8 * 69.9 * This code is free software; you can redistribute it and/or modify it 69.10 @@ -120,7 +120,7 @@ 69.11 int CompiledRFrame::cost() const { 69.12 nmethod* nm = top_method()->code(); 69.13 if (nm != NULL) { 69.14 - return nm->code_size(); 69.15 + return nm->insts_size(); 69.16 } else { 69.17 return top_method()->code_size(); 69.18 }
70.1 --- a/src/share/vm/runtime/sharedRuntime.cpp Mon Aug 23 08:44:03 2010 -0700 70.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp Wed Aug 25 10:31:45 2010 -0700 70.3 @@ -455,11 +455,11 @@ 70.4 if (at_poll_return) { 70.5 assert(SharedRuntime::polling_page_return_handler_blob() != NULL, 70.6 "polling page return stub not created yet"); 70.7 - stub = SharedRuntime::polling_page_return_handler_blob()->instructions_begin(); 70.8 + stub = SharedRuntime::polling_page_return_handler_blob()->entry_point(); 70.9 } else { 70.10 assert(SharedRuntime::polling_page_safepoint_handler_blob() != NULL, 70.11 "polling page safepoint stub not created yet"); 70.12 - stub = SharedRuntime::polling_page_safepoint_handler_blob()->instructions_begin(); 70.13 + stub = SharedRuntime::polling_page_safepoint_handler_blob()->entry_point(); 70.14 } 70.15 #ifndef PRODUCT 70.16 if( TraceSafepoint ) { 70.17 @@ -574,7 +574,7 @@ 70.18 } 70.19 70.20 // found handling method => lookup exception handler 70.21 - int catch_pco = ret_pc - nm->instructions_begin(); 70.22 + int catch_pco = ret_pc - nm->code_begin(); 70.23 70.24 ExceptionHandlerTable table(nm); 70.25 HandlerTableEntry *t = table.entry_for(catch_pco, handler_bci, scope_depth); 70.26 @@ -607,7 +607,7 @@ 70.27 return NULL; 70.28 } 70.29 70.30 - return nm->instructions_begin() + t->pco(); 70.31 + return nm->code_begin() + t->pco(); 70.32 } 70.33 70.34 JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread)) 70.35 @@ -2252,7 +2252,7 @@ 70.36 70.37 ResourceMark rm; 70.38 70.39 - NOT_PRODUCT(int code_size); 70.40 + NOT_PRODUCT(int insts_size); 70.41 AdapterBlob* B = NULL; 70.42 AdapterHandlerEntry* entry = NULL; 70.43 AdapterFingerPrint* fingerprint = NULL; 70.44 @@ -2305,7 +2305,7 @@ 70.45 70.46 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache 70.47 if (buf != NULL) { 70.48 - CodeBuffer buffer(buf->instructions_begin(), buf->instructions_size()); 70.49 + CodeBuffer buffer(buf); 70.50 short buffer_locs[20]; 70.51 buffer.insts()->initialize_shared_locs((relocInfo*)buffer_locs, 70.52 sizeof(buffer_locs)/sizeof(relocInfo)); 70.53 @@ -2321,19 +2321,19 @@ 70.54 #ifdef ASSERT 70.55 if (VerifyAdapterSharing) { 70.56 if (shared_entry != NULL) { 70.57 - assert(shared_entry->compare_code(buf->instructions_begin(), buffer.code_size(), total_args_passed, sig_bt), 70.58 + assert(shared_entry->compare_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt), 70.59 "code must match"); 70.60 // Release the one just created and return the original 70.61 _adapters->free_entry(entry); 70.62 return shared_entry; 70.63 } else { 70.64 - entry->save_code(buf->instructions_begin(), buffer.code_size(), total_args_passed, sig_bt); 70.65 + entry->save_code(buf->code_begin(), buffer.insts_size(), total_args_passed, sig_bt); 70.66 } 70.67 } 70.68 #endif 70.69 70.70 B = AdapterBlob::create(&buffer); 70.71 - NOT_PRODUCT(code_size = buffer.code_size()); 70.72 + NOT_PRODUCT(insts_size = buffer.insts_size()); 70.73 } 70.74 if (B == NULL) { 70.75 // CodeCache is full, disable compilation 70.76 @@ -2343,16 +2343,16 @@ 70.77 CompileBroker::handle_full_code_cache(); 70.78 return NULL; // Out of CodeCache space 70.79 } 70.80 - entry->relocate(B->instructions_begin()); 70.81 + entry->relocate(B->content_begin()); 70.82 #ifndef PRODUCT 70.83 // debugging suppport 70.84 if (PrintAdapterHandlers) { 70.85 tty->cr(); 70.86 tty->print_cr("i2c argument handler #%d for: %s %s (fingerprint = %s, %d bytes generated)", 70.87 _adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"), 70.88 - method->signature()->as_C_string(), fingerprint->as_string(), code_size ); 70.89 + method->signature()->as_C_string(), fingerprint->as_string(), insts_size ); 70.90 tty->print_cr("c2i argument handler starts at %p",entry->get_c2i_entry()); 70.91 - Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + code_size); 70.92 + Disassembler::decode(entry->get_i2c_entry(), entry->get_i2c_entry() + insts_size); 70.93 } 70.94 #endif 70.95 70.96 @@ -2366,13 +2366,11 @@ 70.97 "%s(%s)@" PTR_FORMAT, 70.98 B->name(), 70.99 fingerprint->as_string(), 70.100 - B->instructions_begin()); 70.101 - Forte::register_stub(blob_id, B->instructions_begin(), B->instructions_end()); 70.102 + B->content_begin()); 70.103 + Forte::register_stub(blob_id, B->content_begin(), B->content_end()); 70.104 70.105 if (JvmtiExport::should_post_dynamic_code_generated()) { 70.106 - JvmtiExport::post_dynamic_code_generated(blob_id, 70.107 - B->instructions_begin(), 70.108 - B->instructions_end()); 70.109 + JvmtiExport::post_dynamic_code_generated(blob_id, B->content_begin(), B->content_end()); 70.110 } 70.111 } 70.112 return entry; 70.113 @@ -2456,7 +2454,7 @@ 70.114 70.115 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache 70.116 if (buf != NULL) { 70.117 - CodeBuffer buffer(buf->instructions_begin(), buf->instructions_size()); 70.118 + CodeBuffer buffer(buf); 70.119 double locs_buf[20]; 70.120 buffer.insts()->initialize_shared_locs((relocInfo*)locs_buf, sizeof(locs_buf) / sizeof(relocInfo)); 70.121 MacroAssembler _masm(&buffer); 70.122 @@ -2540,7 +2538,7 @@ 70.123 70.124 BufferBlob* buf = buffer_blob(); // the temporary code buffer in CodeCache 70.125 if (buf != NULL) { 70.126 - CodeBuffer buffer(buf->instructions_begin(), buf->instructions_size()); 70.127 + CodeBuffer buffer(buf); 70.128 // Need a few relocation entries 70.129 double locs_buf[20]; 70.130 buffer.insts()->initialize_shared_locs(
71.1 --- a/src/share/vm/runtime/sharedRuntime.hpp Mon Aug 23 08:44:03 2010 -0700 71.2 +++ b/src/share/vm/runtime/sharedRuntime.hpp Wed Aug 25 10:31:45 2010 -0700 71.3 @@ -173,12 +173,12 @@ 71.4 71.5 static address get_ic_miss_stub() { 71.6 assert(_ic_miss_blob!= NULL, "oops"); 71.7 - return _ic_miss_blob->instructions_begin(); 71.8 + return _ic_miss_blob->entry_point(); 71.9 } 71.10 71.11 static address get_handle_wrong_method_stub() { 71.12 assert(_wrong_method_blob!= NULL, "oops"); 71.13 - return _wrong_method_blob->instructions_begin(); 71.14 + return _wrong_method_blob->entry_point(); 71.15 } 71.16 71.17 #ifdef COMPILER2 71.18 @@ -188,15 +188,15 @@ 71.19 71.20 static address get_resolve_opt_virtual_call_stub(){ 71.21 assert(_resolve_opt_virtual_call_blob != NULL, "oops"); 71.22 - return _resolve_opt_virtual_call_blob->instructions_begin(); 71.23 + return _resolve_opt_virtual_call_blob->entry_point(); 71.24 } 71.25 static address get_resolve_virtual_call_stub() { 71.26 assert(_resolve_virtual_call_blob != NULL, "oops"); 71.27 - return _resolve_virtual_call_blob->instructions_begin(); 71.28 + return _resolve_virtual_call_blob->entry_point(); 71.29 } 71.30 static address get_resolve_static_call_stub() { 71.31 assert(_resolve_static_call_blob != NULL, "oops"); 71.32 - return _resolve_static_call_blob->instructions_begin(); 71.33 + return _resolve_static_call_blob->entry_point(); 71.34 } 71.35 71.36 static SafepointBlob* polling_page_return_handler_blob() { return _polling_page_return_handler_blob; } 71.37 @@ -548,16 +548,17 @@ 71.38 // This library manages argument marshaling adapters and native wrappers. 71.39 // There are 2 flavors of adapters: I2C and C2I. 71.40 // 71.41 -// The I2C flavor takes a stock interpreted call setup, marshals the arguments 71.42 -// for a Java-compiled call, and jumps to Rmethod-> code()-> 71.43 -// instructions_begin(). It is broken to call it without an nmethod assigned. 71.44 -// The usual behavior is to lift any register arguments up out of the stack 71.45 -// and possibly re-pack the extra arguments to be contigious. I2C adapters 71.46 -// will save what the interpreter's stack pointer will be after arguments are 71.47 -// popped, then adjust the interpreter's frame size to force alignment and 71.48 -// possibly to repack the arguments. After re-packing, it jumps to the 71.49 -// compiled code start. There are no safepoints in this adapter code and a GC 71.50 -// cannot happen while marshaling is in progress. 71.51 +// The I2C flavor takes a stock interpreted call setup, marshals the 71.52 +// arguments for a Java-compiled call, and jumps to Rmethod-> code()-> 71.53 +// code_begin(). It is broken to call it without an nmethod assigned. 71.54 +// The usual behavior is to lift any register arguments up out of the 71.55 +// stack and possibly re-pack the extra arguments to be contigious. 71.56 +// I2C adapters will save what the interpreter's stack pointer will be 71.57 +// after arguments are popped, then adjust the interpreter's frame 71.58 +// size to force alignment and possibly to repack the arguments. 71.59 +// After re-packing, it jumps to the compiled code start. There are 71.60 +// no safepoints in this adapter code and a GC cannot happen while 71.61 +// marshaling is in progress. 71.62 // 71.63 // The C2I flavor takes a stock compiled call setup plus the target method in 71.64 // Rmethod, marshals the arguments for an interpreted call and jumps to
72.1 --- a/src/share/vm/runtime/stubRoutines.cpp Mon Aug 23 08:44:03 2010 -0700 72.2 +++ b/src/share/vm/runtime/stubRoutines.cpp Wed Aug 25 10:31:45 2010 -0700 72.3 @@ -119,10 +119,9 @@ 72.4 TraceTime timer("StubRoutines generation 1", TraceStartupTime); 72.5 _code1 = BufferBlob::create("StubRoutines (1)", code_size1); 72.6 if (_code1 == NULL) { 72.7 - vm_exit_out_of_memory(code_size1, 72.8 - "CodeCache: no room for StubRoutines (1)"); 72.9 + vm_exit_out_of_memory(code_size1, "CodeCache: no room for StubRoutines (1)"); 72.10 } 72.11 - CodeBuffer buffer(_code1->instructions_begin(), _code1->instructions_size()); 72.12 + CodeBuffer buffer(_code1); 72.13 StubGenerator_generate(&buffer, false); 72.14 } 72.15 } 72.16 @@ -172,10 +171,9 @@ 72.17 TraceTime timer("StubRoutines generation 2", TraceStartupTime); 72.18 _code2 = BufferBlob::create("StubRoutines (2)", code_size2); 72.19 if (_code2 == NULL) { 72.20 - vm_exit_out_of_memory(code_size2, 72.21 - "CodeCache: no room for StubRoutines (2)"); 72.22 + vm_exit_out_of_memory(code_size2, "CodeCache: no room for StubRoutines (2)"); 72.23 } 72.24 - CodeBuffer buffer(_code2->instructions_begin(), _code2->instructions_size()); 72.25 + CodeBuffer buffer(_code2); 72.26 StubGenerator_generate(&buffer, true); 72.27 } 72.28
73.1 --- a/src/share/vm/runtime/vmStructs.cpp Mon Aug 23 08:44:03 2010 -0700 73.2 +++ b/src/share/vm/runtime/vmStructs.cpp Wed Aug 25 10:31:45 2010 -0700 73.3 @@ -604,7 +604,8 @@ 73.4 nonstatic_field(CodeBlob, _size, int) \ 73.5 nonstatic_field(CodeBlob, _header_size, int) \ 73.6 nonstatic_field(CodeBlob, _relocation_size, int) \ 73.7 - nonstatic_field(CodeBlob, _instructions_offset, int) \ 73.8 + nonstatic_field(CodeBlob, _content_offset, int) \ 73.9 + nonstatic_field(CodeBlob, _code_offset, int) \ 73.10 nonstatic_field(CodeBlob, _frame_complete_offset, int) \ 73.11 nonstatic_field(CodeBlob, _data_offset, int) \ 73.12 nonstatic_field(CodeBlob, _frame_size, int) \