Mon, 27 Dec 2010 09:56:29 -0500
Merge
make/windows/makefiles/compile.make | file | annotate | diff | comparison | revisions |
1.1 --- a/.hgtags Mon Dec 27 09:30:20 2010 -0500 1.2 +++ b/.hgtags Mon Dec 27 09:56:29 2010 -0500 1.3 @@ -134,4 +134,5 @@ 1.4 5484e7c53fa7da5e869902437ee08a9ae10c1c69 jdk7-b119 1.5 f5603a6e50422046ebc0d2f1671d55cb8f1bf1e9 jdk7-b120 1.6 3f3653ab7af8dc1ddb9fa75dad56bf94f89e81a8 jdk7-b121 1.7 +3a548dc9cb456110ca8fc1514441a8c3bda0014d jdk7-b122 1.8 5484e7c53fa7da5e869902437ee08a9ae10c1c69 hs20-b03
2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Mon Dec 27 09:30:20 2010 -0500 2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/HotSpotTypeDataBase.java Mon Dec 27 09:56:29 2010 -0500 2.3 @@ -1,5 +1,5 @@ 2.4 /* 2.5 - * Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved. 2.6 + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 2.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 2.8 * 2.9 * This code is free software; you can redistribute it and/or modify it 2.10 @@ -99,15 +99,8 @@ 2.11 long typeEntrySizeOffset; 2.12 long typeEntryArrayStride; 2.13 2.14 - typeEntryTypeNameOffset = getLongValueFromProcess("gHotSpotVMTypeEntryTypeNameOffset"); 2.15 - typeEntrySuperclassNameOffset = getLongValueFromProcess("gHotSpotVMTypeEntrySuperclassNameOffset"); 2.16 - typeEntryIsOopTypeOffset = getLongValueFromProcess("gHotSpotVMTypeEntryIsOopTypeOffset"); 2.17 - typeEntryIsIntegerTypeOffset = getLongValueFromProcess("gHotSpotVMTypeEntryIsIntegerTypeOffset"); 2.18 - typeEntryIsUnsignedOffset = getLongValueFromProcess("gHotSpotVMTypeEntryIsUnsignedOffset"); 2.19 - typeEntrySizeOffset = getLongValueFromProcess("gHotSpotVMTypeEntrySizeOffset"); 2.20 - typeEntryArrayStride = getLongValueFromProcess("gHotSpotVMTypeEntryArrayStride"); 2.21 - 2.22 - // Fetch the address of the VMTypeEntry* 2.23 + // Fetch the address of the VMTypeEntry*. We get this symbol first 2.24 + // and try to use it to make sure that symbol lookup is working. 2.25 Address entryAddr = lookupInProcess("gHotSpotVMTypes"); 2.26 // System.err.println("gHotSpotVMTypes address = " + entryAddr); 2.27 // Dereference this once to get the pointer to the first VMTypeEntry 2.28 @@ -118,6 +111,14 @@ 2.29 throw new RuntimeException("gHotSpotVMTypes was not initialized properly in the remote process; can not continue"); 2.30 } 2.31 2.32 + typeEntryTypeNameOffset = getLongValueFromProcess("gHotSpotVMTypeEntryTypeNameOffset"); 2.33 + typeEntrySuperclassNameOffset = getLongValueFromProcess("gHotSpotVMTypeEntrySuperclassNameOffset"); 2.34 + typeEntryIsOopTypeOffset = getLongValueFromProcess("gHotSpotVMTypeEntryIsOopTypeOffset"); 2.35 + typeEntryIsIntegerTypeOffset = getLongValueFromProcess("gHotSpotVMTypeEntryIsIntegerTypeOffset"); 2.36 + typeEntryIsUnsignedOffset = getLongValueFromProcess("gHotSpotVMTypeEntryIsUnsignedOffset"); 2.37 + typeEntrySizeOffset = getLongValueFromProcess("gHotSpotVMTypeEntrySizeOffset"); 2.38 + typeEntryArrayStride = getLongValueFromProcess("gHotSpotVMTypeEntryArrayStride"); 2.39 + 2.40 // Start iterating down it until we find an entry with no name 2.41 Address typeNameAddr = null; 2.42 do {
3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/coff/COFFFileParser.java Mon Dec 27 09:30:20 2010 -0500 3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/coff/COFFFileParser.java Mon Dec 27 09:56:29 2010 -0500 3.3 @@ -1,5 +1,5 @@ 3.4 /* 3.5 - * Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved. 3.6 + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 3.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 3.8 * 3.9 * This code is free software; you can redistribute it and/or modify it 3.10 @@ -122,10 +122,14 @@ 3.11 private MemoizedObject[] sectionHeaders; 3.12 private MemoizedObject[] symbols; 3.13 3.14 + // Init stringTable at decl time since other fields init'ed in the 3.15 + // constructor need the String Table. 3.16 private MemoizedObject stringTable = new MemoizedObject() { 3.17 public Object computeValue() { 3.18 + // the String Table follows the Symbol Table 3.19 int ptr = getPointerToSymbolTable(); 3.20 if (ptr == 0) { 3.21 + // no Symbol Table so no String Table 3.22 return new StringTable(0); 3.23 } else { 3.24 return new StringTable(ptr + SYMBOL_SIZE * getNumberOfSymbols()); 3.25 @@ -140,6 +144,8 @@ 3.26 timeDateStamp = readInt(); 3.27 pointerToSymbolTable = readInt(); 3.28 numberOfSymbols = readInt(); 3.29 + // String Table can be accessed at this point because 3.30 + // pointerToSymbolTable and numberOfSymbols fields are set. 3.31 sizeOfOptionalHeader = readShort(); 3.32 characteristics = readShort(); 3.33 3.34 @@ -222,6 +228,8 @@ 3.35 private MemoizedObject windowsSpecificFields; 3.36 private MemoizedObject dataDirectories; 3.37 3.38 + // We use an offset of 2 because OptionalHeaderStandardFieldsImpl doesn't 3.39 + // include the 'magic' field. 3.40 private static final int STANDARD_FIELDS_OFFSET = 2; 3.41 private static final int PE32_WINDOWS_SPECIFIC_FIELDS_OFFSET = 28; 3.42 private static final int PE32_DATA_DIRECTORIES_OFFSET = 96; 3.43 @@ -288,7 +296,7 @@ 3.44 private int sizeOfUninitializedData; 3.45 private int addressOfEntryPoint; 3.46 private int baseOfCode; 3.47 - private int baseOfData; 3.48 + private int baseOfData; // only set in PE32 3.49 3.50 OptionalHeaderStandardFieldsImpl(int offset, 3.51 boolean isPE32Plus) { 3.52 @@ -301,7 +309,8 @@ 3.53 sizeOfUninitializedData = readInt(); 3.54 addressOfEntryPoint = readInt(); 3.55 baseOfCode = readInt(); 3.56 - if (isPE32Plus) { 3.57 + if (!isPE32Plus) { 3.58 + // only available in PE32 3.59 baseOfData = readInt(); 3.60 } 3.61 } 3.62 @@ -433,7 +442,10 @@ 3.63 if (dir.getRVA() == 0 || dir.getSize() == 0) { 3.64 return null; 3.65 } 3.66 - return new ExportDirectoryTableImpl(rvaToFileOffset(dir.getRVA()), dir.getSize()); 3.67 + // ExportDirectoryTableImpl needs both the RVA and the 3.68 + // RVA converted to a file offset. 3.69 + return new 3.70 + ExportDirectoryTableImpl(dir.getRVA(), dir.getSize()); 3.71 } 3.72 }; 3.73 3.74 @@ -526,6 +538,7 @@ 3.75 } 3.76 3.77 class ExportDirectoryTableImpl implements ExportDirectoryTable { 3.78 + private int exportDataDirRVA; 3.79 private int offset; 3.80 private int size; 3.81 3.82 @@ -548,8 +561,9 @@ 3.83 private MemoizedObject exportOrdinalTable; 3.84 private MemoizedObject exportAddressTable; 3.85 3.86 - ExportDirectoryTableImpl(int offset, int size) { 3.87 - this.offset = offset; 3.88 + ExportDirectoryTableImpl(int exportDataDirRVA, int size) { 3.89 + this.exportDataDirRVA = exportDataDirRVA; 3.90 + offset = rvaToFileOffset(exportDataDirRVA); 3.91 this.size = size; 3.92 seek(offset); 3.93 exportFlags = readInt(); 3.94 @@ -595,6 +609,7 @@ 3.95 3.96 exportOrdinalTable = new MemoizedObject() { 3.97 public Object computeValue() { 3.98 + // number of ordinals is same as the number of name pointers 3.99 short[] ordinals = new short[getNumberOfNamePointers()]; 3.100 seek(rvaToFileOffset(getOrdinalTableRVA())); 3.101 for (int i = 0; i < ordinals.length; i++) { 3.102 @@ -608,14 +623,18 @@ 3.103 public Object computeValue() { 3.104 int[] addresses = new int[getNumberOfAddressTableEntries()]; 3.105 seek(rvaToFileOffset(getExportAddressTableRVA())); 3.106 - // Must make two passes to avoid rvaToFileOffset 3.107 - // destroying seek() position 3.108 + // The Export Address Table values are a union of two 3.109 + // possible values: 3.110 + // Export RVA - The address of the exported symbol when 3.111 + // loaded into memory, relative to the image base. 3.112 + // This value doesn't get converted into a file offset. 3.113 + // Forwarder RVA - The pointer to a null-terminated ASCII 3.114 + // string in the export section. This value gets 3.115 + // converted into a file offset because we have to 3.116 + // fetch the string. 3.117 for (int i = 0; i < addresses.length; i++) { 3.118 addresses[i] = readInt(); 3.119 } 3.120 - for (int i = 0; i < addresses.length; i++) { 3.121 - addresses[i] = rvaToFileOffset(addresses[i]); 3.122 - } 3.123 return addresses; 3.124 } 3.125 }; 3.126 @@ -648,11 +667,12 @@ 3.127 3.128 public boolean isExportAddressForwarder(short ordinal) { 3.129 int addr = getExportAddress(ordinal); 3.130 - return ((offset <= addr) && (addr < (offset + size))); 3.131 + return ((exportDataDirRVA <= addr) && 3.132 + (addr < (exportDataDirRVA + size))); 3.133 } 3.134 3.135 public String getExportAddressForwarder(short ordinal) { 3.136 - seek(getExportAddress(ordinal)); 3.137 + seek(rvaToFileOffset(getExportAddress(ordinal))); 3.138 return readCString(); 3.139 } 3.140 3.141 @@ -3371,10 +3391,17 @@ 3.142 throw new COFFException(e); 3.143 } 3.144 // Look up in string table 3.145 + // FIXME: this index value is assumed to be in the valid range 3.146 name = getStringTable().get(index); 3.147 } else { 3.148 try { 3.149 - name = new String(tmpName, US_ASCII); 3.150 + int length = 0; 3.151 + // find last non-NULL 3.152 + for (; length < tmpName.length && tmpName[length] != '\0';) { 3.153 + length++; 3.154 + } 3.155 + // don't include NULL chars in returned name String 3.156 + name = new String(tmpName, 0, length, US_ASCII); 3.157 } catch (UnsupportedEncodingException e) { 3.158 throw new COFFException(e); 3.159 } 3.160 @@ -3487,6 +3514,7 @@ 3.161 tmpName[5] << 16 | 3.162 tmpName[6] << 8 | 3.163 tmpName[7]); 3.164 + // FIXME: stringOffset is assumed to be in the valid range 3.165 name = getStringTable().getAtOffset(stringOffset); 3.166 } 3.167 3.168 @@ -3698,12 +3726,13 @@ 3.169 3.170 StringTable(int offset) { 3.171 if (offset == 0) { 3.172 + // no String Table 3.173 strings = new COFFString[0]; 3.174 return; 3.175 } 3.176 3.177 seek(offset); 3.178 - int length = readInt(); 3.179 + int length = readInt(); // length includes itself 3.180 byte[] data = new byte[length - 4]; 3.181 int numBytesRead = readBytes(data); 3.182 if (numBytesRead != data.length) {
4.1 --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/coff/DumpExports.java Mon Dec 27 09:30:20 2010 -0500 4.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/coff/DumpExports.java Mon Dec 27 09:56:29 2010 -0500 4.3 @@ -1,5 +1,5 @@ 4.4 /* 4.5 - * Copyright (c) 2001, Oracle and/or its affiliates. All rights reserved. 4.6 + * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved. 4.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4.8 * 4.9 * This code is free software; you can redistribute it and/or modify it 4.10 @@ -37,35 +37,48 @@ 4.11 4.12 String filename = args[0]; 4.13 COFFFile file = COFFFileParser.getParser().parse(filename); 4.14 - ExportDirectoryTable exports = 4.15 - file.getHeader(). 4.16 - getOptionalHeader(). 4.17 - getDataDirectories(). 4.18 - getExportDirectoryTable(); 4.19 + 4.20 + // get common point for both things we want to dump 4.21 + OptionalHeaderDataDirectories dataDirs = file.getHeader().getOptionalHeader(). 4.22 + getDataDirectories(); 4.23 + 4.24 + // dump the header data directory for the Export Table: 4.25 + DataDirectory dir = dataDirs.getExportTable(); 4.26 + System.out.println("Export table: RVA = " + dir.getRVA() + "/0x" + 4.27 + Integer.toHexString(dir.getRVA()) + ", size = " + dir.getSize() + "/0x" + 4.28 + Integer.toHexString(dir.getSize())); 4.29 + 4.30 + System.out.println(file.getHeader().getNumberOfSections() + " sections in file"); 4.31 + for (int i = 1; i <= file.getHeader().getNumberOfSections(); i++) { 4.32 + SectionHeader sec = file.getHeader().getSectionHeader(i); 4.33 + System.out.println(" Section " + i + ":"); 4.34 + System.out.println(" Name = '" + sec.getName() + "'"); 4.35 + System.out.println(" VirtualSize = " + sec.getSize() + "/0x" + 4.36 + Integer.toHexString(sec.getSize())); 4.37 + System.out.println(" VirtualAddress = " + sec.getVirtualAddress() + "/0x" + 4.38 + Integer.toHexString(sec.getVirtualAddress())); 4.39 + System.out.println(" SizeOfRawData = " + sec.getSizeOfRawData() + "/0x" + 4.40 + Integer.toHexString(sec.getSizeOfRawData())); 4.41 + System.out.println(" PointerToRawData = " + sec.getPointerToRawData() + "/0x" + 4.42 + Integer.toHexString(sec.getPointerToRawData())); 4.43 + } 4.44 + 4.45 + ExportDirectoryTable exports = dataDirs.getExportDirectoryTable(); 4.46 if (exports == null) { 4.47 System.out.println("No exports found."); 4.48 } else { 4.49 - System.out.println(file.getHeader().getNumberOfSections() + " sections in file"); 4.50 - for (int i = 0; i < file.getHeader().getNumberOfSections(); i++) { 4.51 - System.out.println(" Section " + i + ": " + file.getHeader().getSectionHeader(1 + i).getName()); 4.52 - } 4.53 - 4.54 - DataDirectory dir = file.getHeader().getOptionalHeader().getDataDirectories().getExportTable(); 4.55 - System.out.println("Export table: RVA = 0x" + Integer.toHexString(dir.getRVA()) + 4.56 - ", size = 0x" + Integer.toHexString(dir.getSize())); 4.57 - 4.58 System.out.println("DLL name: " + exports.getDLLName()); 4.59 System.out.println("Time/date stamp 0x" + Integer.toHexString(exports.getTimeDateStamp())); 4.60 System.out.println("Major version 0x" + Integer.toHexString(exports.getMajorVersion() & 0xFFFF)); 4.61 System.out.println("Minor version 0x" + Integer.toHexString(exports.getMinorVersion() & 0xFFFF)); 4.62 - System.out.println(exports.getNumberOfNamePointers() + " functions found"); 4.63 + System.out.println(exports.getNumberOfNamePointers() + " exports found"); 4.64 for (int i = 0; i < exports.getNumberOfNamePointers(); i++) { 4.65 - System.out.println(" 0x" + 4.66 - Integer.toHexString(exports.getExportAddress(exports.getExportOrdinal(i))) + 4.67 - " " + 4.68 - (exports.isExportAddressForwarder(exports.getExportOrdinal(i)) ? 4.69 - ("Forwarded to " + exports.getExportAddressForwarder(exports.getExportOrdinal(i))) : 4.70 - exports.getExportName(i))); 4.71 + short ordinal = exports.getExportOrdinal(i); 4.72 + System.out.print("[" + i + "] '" + exports.getExportName(i) + "': [" + 4.73 + ordinal + "] = 0x" + Integer.toHexString(exports.getExportAddress(ordinal))); 4.74 + System.out.println(exports.isExportAddressForwarder(ordinal) 4.75 + ? " Forwarded to '" + exports.getExportAddressForwarder(ordinal) + "'" 4.76 + : ""); 4.77 } 4.78 } 4.79 }
5.1 --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/coff/TestParser.java Mon Dec 27 09:30:20 2010 -0500 5.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/win32/coff/TestParser.java Mon Dec 27 09:56:29 2010 -0500 5.3 @@ -1,5 +1,5 @@ 5.4 /* 5.5 - * Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved. 5.6 + * Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 5.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5.8 * 5.9 * This code is free software; you can redistribute it and/or modify it 5.10 @@ -42,8 +42,8 @@ 5.11 COFFHeader header = file.getHeader(); 5.12 int numSections = header.getNumberOfSections(); 5.13 System.out.println(numSections + " sections detected."); 5.14 - for (int i = 0; i < numSections; i++) { 5.15 - SectionHeader secHeader = header.getSectionHeader(1 + i); 5.16 + for (int i = 1; i <= numSections; i++) { 5.17 + SectionHeader secHeader = header.getSectionHeader(i); 5.18 System.out.println(secHeader.getName()); 5.19 } 5.20
6.1 --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java Mon Dec 27 09:30:20 2010 -0500 6.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/windbg/WindbgDebuggerLocal.java Mon Dec 27 09:56:29 2010 -0500 6.3 @@ -1,5 +1,5 @@ 6.4 /* 6.5 - * Copyright (c) 2002, 2008, Oracle and/or its affiliates. All rights reserved. 6.6 + * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. 6.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 6.8 * 6.9 * This code is free software; you can redistribute it and/or modify it 6.10 @@ -506,7 +506,6 @@ 6.11 throw new DebuggerException("Unimplemented"); 6.12 } 6.13 6.14 - private static String DTFWHome; 6.15 private static String imagePath; 6.16 private static String symbolPath; 6.17 private static boolean useNativeLookup; 6.18 @@ -514,81 +513,143 @@ 6.19 static { 6.20 6.21 /* 6.22 - * sawindbg.dll depends on dbgeng.dll which 6.23 - * itself depends on dbghelp.dll. dbgeng.dll and dbghelp.dll. 6.24 - * On systems newer than Windows 2000, these two .dlls are 6.25 - * in the standard system directory so we will find them there. 6.26 - * On Windows 2000 and earlier, these files do not exist. 6.27 - * The user must download Debugging Tools For Windows (DTFW) 6.28 - * and install it in order to use SA. 6.29 + * sawindbg.dll depends on dbgeng.dll which itself depends on 6.30 + * dbghelp.dll. We have to make sure that the dbgeng.dll and 6.31 + * dbghelp.dll that we load are compatible with each other. We 6.32 + * load both of those libraries from the same directory based 6.33 + * on the theory that co-located libraries are compatible. 6.34 * 6.35 - * We have to make sure we use the two files from the same directory 6.36 - * in case there are more than one copy on the system because 6.37 - * one version of dbgeng.dll might not be compatible with a 6.38 - * different version of dbghelp.dll. 6.39 - * We first look for them in the directory pointed at by 6.40 - * env. var. DEBUGGINGTOOLSFORWINDOWS, next in the default 6.41 - * installation dir for DTFW, and lastly in the standard 6.42 - * system directory. We expect that that we will find 6.43 - * them in the standard system directory on all systems 6.44 - * newer than Windows 2000. 6.45 + * On Windows 2000 and earlier, dbgeng.dll and dbghelp.dll were 6.46 + * not included as part of the standard system directory. On 6.47 + * systems newer than Windows 2000, dbgeng.dll and dbghelp.dll 6.48 + * are included in the standard system directory. However, the 6.49 + * versions included in the standard system directory may not 6.50 + * be able to handle symbol information for the newer compilers. 6.51 + * 6.52 + * We search for and explicitly load the libraries using the 6.53 + * following directory search order: 6.54 + * 6.55 + * - java.home/bin (same as $JAVA_HOME/jre/bin) 6.56 + * - dir named by DEBUGGINGTOOLSFORWINDOWS environment variable 6.57 + * - various "Debugging Tools For Windows" program directories 6.58 + * - the system directory ($SYSROOT/system32) 6.59 + * 6.60 + * If SA is invoked with -Dsun.jvm.hotspot.loadLibrary.DEBUG=1, 6.61 + * then debug messages about library loading are printed to 6.62 + * System.err. 6.63 */ 6.64 - String dirName = null; 6.65 - DTFWHome = System.getenv("DEBUGGINGTOOLSFORWINDOWS"); 6.66 6.67 - if (DTFWHome == null) { 6.68 - // See if we have the files in the default location. 6.69 + String dbgengPath = null; 6.70 + String dbghelpPath = null; 6.71 + String sawindbgPath = null; 6.72 + List searchList = new ArrayList(); 6.73 + 6.74 + boolean loadLibraryDEBUG = 6.75 + System.getProperty("sun.jvm.hotspot.loadLibrary.DEBUG") != null; 6.76 + 6.77 + { 6.78 + // First place to search is co-located with sawindbg.dll in 6.79 + // $JAVA_HOME/jre/bin (java.home property is set to $JAVA_HOME/jre): 6.80 + searchList.add(System.getProperty("java.home") + File.separator + "bin"); 6.81 + sawindbgPath = (String) searchList.get(0) + File.separator + 6.82 + "sawindbg.dll"; 6.83 + 6.84 + // second place to search is specified by an environment variable: 6.85 + String DTFWHome = System.getenv("DEBUGGINGTOOLSFORWINDOWS"); 6.86 + if (DTFWHome != null) { 6.87 + searchList.add(DTFWHome); 6.88 + } 6.89 + 6.90 + // The third place to search is the install directory for the 6.91 + // "Debugging Tools For Windows" package; so far there are three 6.92 + // name variations that we know of: 6.93 String sysRoot = System.getenv("SYSTEMROOT"); 6.94 - DTFWHome = sysRoot + File.separator + 6.95 - ".." + File.separator + "Program Files" + 6.96 - File.separator + "Debugging Tools For Windows"; 6.97 + DTFWHome = sysRoot + File.separator + ".." + File.separator + 6.98 + "Program Files" + File.separator + "Debugging Tools For Windows"; 6.99 + searchList.add(DTFWHome); 6.100 + searchList.add(DTFWHome + " (x86)"); 6.101 + searchList.add(DTFWHome + " (x64)"); 6.102 + 6.103 + // The last place to search is the system directory: 6.104 + searchList.add(sysRoot + File.separator + "system32"); 6.105 } 6.106 6.107 - { 6.108 - String dbghelp = DTFWHome + File.separator + "dbghelp.dll"; 6.109 - String dbgeng = DTFWHome + File.separator + "dbgeng.dll"; 6.110 - File fhelp = new File(dbghelp); 6.111 - File feng = new File(dbgeng); 6.112 - if (fhelp.exists() && feng.exists()) { 6.113 - // found both, we are happy. 6.114 - // NOTE: The order of loads is important! If we load dbgeng.dll 6.115 - // first, then the dependency - dbghelp.dll - will be loaded 6.116 - // from usual DLL search thereby defeating the purpose! 6.117 - System.load(dbghelp); 6.118 - System.load(dbgeng); 6.119 - } else if (! fhelp.exists() && ! feng.exists()) { 6.120 - // neither exist. We will ignore this dir and assume 6.121 - // they are in the system dir. 6.122 - DTFWHome = null; 6.123 - } else { 6.124 - // one exists but not the other 6.125 - //System.err.println("Error: Both files dbghelp.dll and dbgeng.dll " 6.126 - // "must exist in directory " + DTFWHome); 6.127 - throw new UnsatisfiedLinkError("Both files dbghelp.dll and " + 6.128 - "dbgeng.dll must exist in " + 6.129 - "directory " + DTFWHome); 6.130 + for (int i = 0; i < searchList.size(); i++) { 6.131 + File dir = new File((String) searchList.get(i)); 6.132 + if (!dir.exists()) { 6.133 + if (loadLibraryDEBUG) { 6.134 + System.err.println("DEBUG: '" + searchList.get(i) + 6.135 + "': directory does not exist."); 6.136 + } 6.137 + // this search directory doesn't exist so skip it 6.138 + continue; 6.139 } 6.140 - } 6.141 - if (DTFWHome == null) { 6.142 - // The files better be in the system dir. 6.143 - String sysDir = System.getenv("SYSTEMROOT") + 6.144 - File.separator + "system32"; 6.145 6.146 - File feng = new File(sysDir + File.separator + "dbgeng.dll"); 6.147 - if (!feng.exists()) { 6.148 - throw new UnsatisfiedLinkError("File dbgeng.dll does not exist in " + 6.149 - sysDir + ". Please search microsoft.com " + 6.150 - "for Debugging Tools For Windows, and " + 6.151 - "either download it to the default " + 6.152 - "location, or download it to a custom " + 6.153 - "location and set environment variable " + 6.154 - " DEBUGGINGTOOLSFORWINDOWS " + 6.155 - "to the pathname of that location."); 6.156 + dbgengPath = (String) searchList.get(i) + File.separator + "dbgeng.dll"; 6.157 + dbghelpPath = (String) searchList.get(i) + File.separator + "dbghelp.dll"; 6.158 + 6.159 + File feng = new File(dbgengPath); 6.160 + File fhelp = new File(dbghelpPath); 6.161 + if (feng.exists() && fhelp.exists()) { 6.162 + // both files exist so we have a match 6.163 + break; 6.164 } 6.165 + 6.166 + // At least one of the files does not exist; no warning if both 6.167 + // don't exist. If just one doesn't exist then we don't check 6.168 + // loadLibraryDEBUG because we have a mis-configured system. 6.169 + if (feng.exists()) { 6.170 + System.err.println("WARNING: found '" + dbgengPath + 6.171 + "' but did not find '" + dbghelpPath + "'; ignoring '" + 6.172 + dbgengPath + "'."); 6.173 + } else if (fhelp.exists()) { 6.174 + System.err.println("WARNING: found '" + dbghelpPath + 6.175 + "' but did not find '" + dbgengPath + "'; ignoring '" + 6.176 + dbghelpPath + "'."); 6.177 + } else if (loadLibraryDEBUG) { 6.178 + System.err.println("DEBUG: searched '" + searchList.get(i) + 6.179 + "': dbgeng.dll and dbghelp.dll were not found."); 6.180 + } 6.181 + dbgengPath = null; 6.182 + dbghelpPath = null; 6.183 } 6.184 6.185 + if (dbgengPath == null || dbghelpPath == null) { 6.186 + // at least one of the files wasn't found anywhere we searched 6.187 + String mesg = null; 6.188 + 6.189 + if (dbgengPath == null && dbghelpPath == null) { 6.190 + mesg = "dbgeng.dll and dbghelp.dll cannot be found. "; 6.191 + } else if (dbgengPath == null) { 6.192 + mesg = "dbgeng.dll cannot be found (dbghelp.dll was found). "; 6.193 + } else { 6.194 + mesg = "dbghelp.dll cannot be found (dbgeng.dll was found). "; 6.195 + } 6.196 + throw new UnsatisfiedLinkError(mesg + 6.197 + "Please search microsoft.com for 'Debugging Tools For Windows', " + 6.198 + "and either download it to the default location, or download it " + 6.199 + "to a custom location and set environment variable " + 6.200 + "'DEBUGGINGTOOLSFORWINDOWS' to the pathname of that location."); 6.201 + } 6.202 + 6.203 + // NOTE: The order of loads is important! If we load dbgeng.dll 6.204 + // first, then the dependency - dbghelp.dll - will be loaded 6.205 + // from usual DLL search thereby defeating the purpose! 6.206 + if (loadLibraryDEBUG) { 6.207 + System.err.println("DEBUG: loading '" + dbghelpPath + "'."); 6.208 + } 6.209 + System.load(dbghelpPath); 6.210 + if (loadLibraryDEBUG) { 6.211 + System.err.println("DEBUG: loading '" + dbgengPath + "'."); 6.212 + } 6.213 + System.load(dbgengPath); 6.214 + 6.215 // Now, load sawindbg.dll 6.216 - System.loadLibrary("sawindbg"); 6.217 + if (loadLibraryDEBUG) { 6.218 + System.err.println("DEBUG: loading '" + sawindbgPath + "'."); 6.219 + } 6.220 + System.load(sawindbgPath); 6.221 + 6.222 // where do I find '.exe', '.dll' files? 6.223 imagePath = System.getProperty("sun.jvm.hotspot.debugger.windbg.imagePath"); 6.224 if (imagePath == null) {
7.1 --- a/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Mon Dec 27 09:30:20 2010 -0500 7.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/oops/ConstantPool.java Mon Dec 27 09:56:29 2010 -0500 7.3 @@ -60,10 +60,7 @@ 7.4 headerSize = type.getSize(); 7.5 elementSize = 0; 7.6 // fetch constants: 7.7 - MULTI_OPERAND_COUNT_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_multi_operand_count_offset").intValue(); 7.8 - MULTI_OPERAND_BASE_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_multi_operand_base_offset").intValue(); 7.9 INDY_BSM_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_indy_bsm_offset").intValue(); 7.10 - INDY_NT_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_indy_nt_offset").intValue(); 7.11 INDY_ARGC_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_indy_argc_offset").intValue(); 7.12 INDY_ARGV_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_indy_argv_offset").intValue(); 7.13 } 7.14 @@ -83,10 +80,7 @@ 7.15 private static long headerSize; 7.16 private static long elementSize; 7.17 7.18 - private static int MULTI_OPERAND_COUNT_OFFSET; 7.19 - private static int MULTI_OPERAND_BASE_OFFSET; 7.20 private static int INDY_BSM_OFFSET; 7.21 - private static int INDY_NT_OFFSET; 7.22 private static int INDY_ARGC_OFFSET; 7.23 private static int INDY_ARGV_OFFSET; 7.24 7.25 @@ -296,20 +290,23 @@ 7.26 } 7.27 7.28 /** Lookup for multi-operand (InvokeDynamic) entries. */ 7.29 - public int[] getMultiOperandsAt(int i) { 7.30 + public short[] getBootstrapSpecifierAt(int i) { 7.31 if (Assert.ASSERTS_ENABLED) { 7.32 Assert.that(getTagAt(i).isInvokeDynamic(), "Corrupted constant pool"); 7.33 } 7.34 - int pos = this.getIntAt(i); 7.35 - int countPos = pos + MULTI_OPERAND_COUNT_OFFSET; // == pos-1 7.36 - int basePos = pos + MULTI_OPERAND_BASE_OFFSET; // == pos 7.37 - if (countPos < 0) return null; // safety first 7.38 + if (getTagAt(i).value() == JVM_CONSTANT_InvokeDynamicTrans) 7.39 + return null; 7.40 + int bsmSpec = extractLowShortFromInt(this.getIntAt(i)); 7.41 TypeArray operands = getOperands(); 7.42 if (operands == null) return null; // safety first 7.43 - int length = operands.getIntAt(countPos); 7.44 - int[] values = new int[length]; 7.45 - for (int j = 0; j < length; j++) { 7.46 - values[j] = operands.getIntAt(basePos+j); 7.47 + int basePos = VM.getVM().buildIntFromShorts(operands.getShortAt(bsmSpec * 2 + 0), 7.48 + operands.getShortAt(bsmSpec * 2 + 1)); 7.49 + int argv = basePos + INDY_ARGV_OFFSET; 7.50 + int argc = operands.getShortAt(basePos + INDY_ARGC_OFFSET); 7.51 + int endPos = argv + argc; 7.52 + short[] values = new short[endPos - basePos]; 7.53 + for (int j = 0; j < values.length; j++) { 7.54 + values[j] = operands.getShortAt(basePos+j); 7.55 } 7.56 return values; 7.57 } 7.58 @@ -334,6 +331,7 @@ 7.59 case JVM_CONSTANT_MethodHandle: return "JVM_CONSTANT_MethodHandle"; 7.60 case JVM_CONSTANT_MethodType: return "JVM_CONSTANT_MethodType"; 7.61 case JVM_CONSTANT_InvokeDynamic: return "JVM_CONSTANT_InvokeDynamic"; 7.62 + case JVM_CONSTANT_InvokeDynamicTrans: return "JVM_CONSTANT_InvokeDynamic/transitional"; 7.63 case JVM_CONSTANT_Invalid: return "JVM_CONSTANT_Invalid"; 7.64 case JVM_CONSTANT_UnresolvedClass: return "JVM_CONSTANT_UnresolvedClass"; 7.65 case JVM_CONSTANT_UnresolvedClassInError: return "JVM_CONSTANT_UnresolvedClassInError"; 7.66 @@ -393,6 +391,7 @@ 7.67 case JVM_CONSTANT_MethodHandle: 7.68 case JVM_CONSTANT_MethodType: 7.69 case JVM_CONSTANT_InvokeDynamic: 7.70 + case JVM_CONSTANT_InvokeDynamicTrans: 7.71 visitor.doInt(new IntField(new NamedFieldIdentifier(nameForTag(ctag)), indexOffset(index), true), true); 7.72 break; 7.73 } 7.74 @@ -556,19 +555,16 @@ 7.75 break; 7.76 } 7.77 7.78 + case JVM_CONSTANT_InvokeDynamicTrans: 7.79 case JVM_CONSTANT_InvokeDynamic: { 7.80 dos.writeByte(cpConstType); 7.81 - int[] values = getMultiOperandsAt(ci); 7.82 - for (int vn = 0; vn < values.length; vn++) { 7.83 - dos.writeShort(values[vn]); 7.84 - } 7.85 - int bootstrapMethodIndex = values[INDY_BSM_OFFSET]; 7.86 - int nameAndTypeIndex = values[INDY_NT_OFFSET]; 7.87 - int argumentCount = values[INDY_ARGC_OFFSET]; 7.88 - assert(INDY_ARGV_OFFSET + argumentCount == values.length); 7.89 - if (DEBUG) debugMessage("CP[" + ci + "] = indy BSM = " + bootstrapMethodIndex 7.90 - + ", N&T = " + nameAndTypeIndex 7.91 - + ", argc = " + argumentCount); 7.92 + int value = getIntAt(ci); 7.93 + short bsmIndex = (short) extractLowShortFromInt(value); 7.94 + short nameAndTypeIndex = (short) extractHighShortFromInt(value); 7.95 + dos.writeShort(bsmIndex); 7.96 + dos.writeShort(nameAndTypeIndex); 7.97 + if (DEBUG) debugMessage("CP[" + ci + "] = indy BSM = " + bsmIndex 7.98 + + ", N&T = " + nameAndTypeIndex); 7.99 break; 7.100 } 7.101
8.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Mon Dec 27 09:30:20 2010 -0500 8.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassWriter.java Mon Dec 27 09:56:29 2010 -0500 8.3 @@ -321,13 +321,16 @@ 8.4 break; 8.5 } 8.6 8.7 + case JVM_CONSTANT_InvokeDynamicTrans: 8.8 case JVM_CONSTANT_InvokeDynamic: { 8.9 dos.writeByte(cpConstType); 8.10 - int[] values = cpool.getMultiOperandsAt(ci); 8.11 - for (int vn = 0; vn < values.length; vn++) { 8.12 - dos.writeShort(values[vn]); 8.13 - } 8.14 - if (DEBUG) debugMessage("CP[" + ci + "] = INDY indexes = " + Arrays.toString(values)); 8.15 + int value = cpool.getIntAt(ci); 8.16 + short bsmIndex = (short) extractLowShortFromInt(value); 8.17 + short nameAndTypeIndex = (short) extractHighShortFromInt(value); 8.18 + dos.writeShort(bsmIndex); 8.19 + dos.writeShort(nameAndTypeIndex); 8.20 + if (DEBUG) debugMessage("CP[" + ci + "] = INDY bsm = " + 8.21 + bsmIndex + ", N&T = " + nameAndTypeIndex); 8.22 break; 8.23 } 8.24
9.1 --- a/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Mon Dec 27 09:30:20 2010 -0500 9.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/ui/classbrowser/HTMLGenerator.java Mon Dec 27 09:56:29 2010 -0500 9.3 @@ -30,6 +30,7 @@ 9.4 import sun.jvm.hotspot.asm.sparc.*; 9.5 import sun.jvm.hotspot.asm.x86.*; 9.6 import sun.jvm.hotspot.asm.ia64.*; 9.7 +import sun.jvm.hotspot.asm.amd64.*; 9.8 import sun.jvm.hotspot.code.*; 9.9 import sun.jvm.hotspot.compiler.*; 9.10 import sun.jvm.hotspot.debugger.*; 9.11 @@ -198,6 +199,8 @@ 9.12 cpuHelper = new SPARCHelper(); 9.13 } else if (cpu.equals("x86")) { 9.14 cpuHelper = new X86Helper(); 9.15 + } else if (cpu.equals("amd64")) { 9.16 + cpuHelper = new AMD64Helper(); 9.17 } else if (cpu.equals("ia64")) { 9.18 cpuHelper = new IA64Helper(); 9.19 } else { 9.20 @@ -460,7 +463,8 @@ 9.21 return buf.toString(); 9.22 } 9.23 9.24 - private String genListOfShort(int[] values) { 9.25 + private String genListOfShort(short[] values) { 9.26 + if (values == null || values.length == 0) return ""; 9.27 Formatter buf = new Formatter(genHTML); 9.28 buf.append('['); 9.29 for (int i = 0; i < values.length; i++) { 9.30 @@ -594,9 +598,11 @@ 9.31 buf.cell(Integer.toString(cpool.getIntAt(index))); 9.32 break; 9.33 9.34 + case JVM_CONSTANT_InvokeDynamicTrans: 9.35 case JVM_CONSTANT_InvokeDynamic: 9.36 buf.cell("JVM_CONSTANT_InvokeDynamic"); 9.37 - buf.cell(genListOfShort(cpool.getMultiOperandsAt(index))); 9.38 + buf.cell(genLowHighShort(cpool.getIntAt(index)) + 9.39 + genListOfShort(cpool.getBootstrapSpecifierAt(index))); 9.40 break; 9.41 9.42 default:
10.1 --- a/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java Mon Dec 27 09:30:20 2010 -0500 10.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/utilities/ConstantTag.java Mon Dec 27 09:56:29 2010 -0500 10.3 @@ -40,7 +40,7 @@ 10.4 private static int JVM_CONSTANT_NameAndType = 12; 10.5 private static int JVM_CONSTANT_MethodHandle = 15; // JSR 292 10.6 private static int JVM_CONSTANT_MethodType = 16; // JSR 292 10.7 - // static int JVM_CONSTANT_InvokeDynamicTrans = 17; // JSR 292, only occurs in old class files 10.8 + private static int JVM_CONSTANT_InvokeDynamicTrans = 17; // JSR 292, only occurs in old class files 10.9 private static int JVM_CONSTANT_InvokeDynamic = 18; // JSR 292 10.10 private static int JVM_CONSTANT_Invalid = 0; // For bad value initialization 10.11 private static int JVM_CONSTANT_UnresolvedClass = 100; // Temporary tag until actual use 10.12 @@ -67,6 +67,8 @@ 10.13 this.tag = tag; 10.14 } 10.15 10.16 + public int value() { return tag; } 10.17 + 10.18 public boolean isKlass() { return tag == JVM_CONSTANT_Class; } 10.19 public boolean isField () { return tag == JVM_CONSTANT_Fieldref; } 10.20 public boolean isMethod() { return tag == JVM_CONSTANT_Methodref; } 10.21 @@ -81,6 +83,7 @@ 10.22 public boolean isMethodHandle() { return tag == JVM_CONSTANT_MethodHandle; } 10.23 public boolean isMethodType() { return tag == JVM_CONSTANT_MethodType; } 10.24 public boolean isInvokeDynamic() { return tag == JVM_CONSTANT_InvokeDynamic; } 10.25 + public boolean isInvokeDynamicTrans() { return tag == JVM_CONSTANT_InvokeDynamicTrans; } 10.26 10.27 public boolean isInvalid() { return tag == JVM_CONSTANT_Invalid; } 10.28
11.1 --- a/make/linux/makefiles/buildtree.make Mon Dec 27 09:30:20 2010 -0500 11.2 +++ b/make/linux/makefiles/buildtree.make Mon Dec 27 09:56:29 2010 -0500 11.3 @@ -124,7 +124,7 @@ 11.4 BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make 11.5 11.6 BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \ 11.7 - env.sh env.csh .dbxrc test_gamma 11.8 + env.sh env.csh jdkpath.sh .dbxrc test_gamma 11.9 11.10 BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ 11.11 ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) 11.12 @@ -318,6 +318,13 @@ 11.13 sed -n 's/^\([A-Za-z_][A-Za-z0-9_]*\)=/setenv \1 /p' $?; \ 11.14 ) > $@ 11.15 11.16 +jdkpath.sh: $(BUILDTREE_MAKE) 11.17 + @echo Creating $@ ... 11.18 + $(QUIETLY) ( \ 11.19 + $(BUILDTREE_COMMENT); \ 11.20 + echo "JDK=${JAVA_HOME}"; \ 11.21 + ) > $@ 11.22 + 11.23 .dbxrc: $(BUILDTREE_MAKE) 11.24 @echo Creating $@ ... 11.25 $(QUIETLY) ( \
12.1 --- a/make/linux/makefiles/vm.make Mon Dec 27 09:30:20 2010 -0500 12.2 +++ b/make/linux/makefiles/vm.make Mon Dec 27 09:56:29 2010 -0500 12.3 @@ -168,7 +168,9 @@ 12.4 12.5 # Locate all source files in the given directory, excluding files in Src_Files_EXCLUDE. 12.6 define findsrc 12.7 - $(notdir $(shell find $(1) \( -name \*.c -o -name \*.cpp -o -name \*.s \) -a \! \( -name DUMMY $(addprefix -o -name ,$(Src_Files_EXCLUDE)) \) )) 12.8 + $(notdir $(shell find $(1)/. ! -name . -prune \ 12.9 + -a \( -name \*.c -o -name \*.cpp -o -name \*.s \) \ 12.10 + -a ! \( -name DUMMY $(addprefix -o -name ,$(Src_Files_EXCLUDE)) \))) 12.11 endef 12.12 12.13 Src_Files := $(foreach e,$(Src_Dirs),$(call findsrc,$(e)))
13.1 --- a/make/solaris/makefiles/buildtree.make Mon Dec 27 09:30:20 2010 -0500 13.2 +++ b/make/solaris/makefiles/buildtree.make Mon Dec 27 09:56:29 2010 -0500 13.3 @@ -117,7 +117,7 @@ 13.4 BUILDTREE_MAKE = $(GAMMADIR)/make/$(OS_FAMILY)/makefiles/buildtree.make 13.5 13.6 BUILDTREE_TARGETS = Makefile flags.make flags_vm.make vm.make adlc.make jvmti.make sa.make \ 13.7 - env.ksh env.csh .dbxrc test_gamma 13.8 + env.ksh env.csh jdkpath.sh .dbxrc test_gamma 13.9 13.10 BUILDTREE_VARS = GAMMADIR=$(GAMMADIR) OS_FAMILY=$(OS_FAMILY) \ 13.11 ARCH=$(ARCH) BUILDARCH=$(BUILDARCH) LIBARCH=$(LIBARCH) VARIANT=$(VARIANT) 13.12 @@ -314,6 +314,13 @@ 13.13 sed -n 's/^\([A-Za-z_][A-Za-z0-9_]*\)=/setenv \1 /p' $?; \ 13.14 ) > $@ 13.15 13.16 +jdkpath.sh: $(BUILDTREE_MAKE) 13.17 + @echo Creating $@ ... 13.18 + $(QUIETLY) ( \ 13.19 + $(BUILDTREE_COMMENT); \ 13.20 + echo "JDK=${JAVA_HOME}"; \ 13.21 + ) > $@ 13.22 + 13.23 .dbxrc: $(BUILDTREE_MAKE) 13.24 @echo Creating $@ ... 13.25 $(QUIETLY) ( \
14.1 --- a/make/solaris/makefiles/vm.make Mon Dec 27 09:30:20 2010 -0500 14.2 +++ b/make/solaris/makefiles/vm.make Mon Dec 27 09:56:29 2010 -0500 14.3 @@ -184,7 +184,9 @@ 14.4 14.5 # Locate all source files in the given directory, excluding files in Src_Files_EXCLUDE. 14.6 define findsrc 14.7 - $(notdir $(shell find $(1) \( -name \*.c -o -name \*.cpp -o -name \*.s \) -a \! \( -name DUMMY $(addprefix -o -name ,$(Src_Files_EXCLUDE)) \) )) 14.8 + $(notdir $(shell find $(1)/. ! -name . -prune \ 14.9 + -a \( -name \*.c -o -name \*.cpp -o -name \*.s \) \ 14.10 + -a ! \( -name DUMMY $(addprefix -o -name ,$(Src_Files_EXCLUDE)) \))) 14.11 endef 14.12 14.13 Src_Files := $(foreach e,$(Src_Dirs),$(call findsrc,$(e)))
15.1 --- a/make/windows/build_vm_def.sh Mon Dec 27 09:30:20 2010 -0500 15.2 +++ b/make/windows/build_vm_def.sh Mon Dec 27 09:56:29 2010 -0500 15.3 @@ -1,5 +1,5 @@ 15.4 # 15.5 -# Copyright (c) 2000, 2009, Oracle and/or its affiliates. All rights reserved. 15.6 +# Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved. 15.7 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 15.8 # 15.9 # This code is free software; you can redistribute it and/or modify it 15.10 @@ -45,6 +45,9 @@ 15.11 echo "EXPORTS" > vm1.def 15.12 15.13 AWK="$MKS_HOME/awk.exe" 15.14 +if [ ! -e $AWK ]; then 15.15 + AWK="$MKS_HOME/gawk.exe" 15.16 +fi 15.17 GREP="$MKS_HOME/grep.exe" 15.18 SORT="$MKS_HOME/sort.exe" 15.19 UNIQ="$MKS_HOME/uniq.exe" 15.20 @@ -57,7 +60,7 @@ 15.21 LINK_VER="$1" 15.22 fi 15.23 15.24 -if [ "x$LINK_VER" != "x800" -a "x$LINK_VER" != "x900" ]; then 15.25 +if [ "x$LINK_VER" != "x800" -a "x$LINK_VER" != "x900" -a "x$LINK_VER" != "x1000" ]; then 15.26 $DUMPBIN /symbols *.obj | "$GREP" "??_7.*@@6B@" | "$GREP" -v "type_info" | "$AWK" '{print $7}' | "$SORT" | "$UNIQ" > vm2.def 15.27 else 15.28 # Can't use pipes when calling cl.exe or link.exe from IDE. Using transit file vm3.def
16.1 --- a/make/windows/create.bat Mon Dec 27 09:30:20 2010 -0500 16.2 +++ b/make/windows/create.bat Mon Dec 27 09:56:29 2010 -0500 16.3 @@ -36,6 +36,20 @@ 16.4 REM Note: Running this batch file from the Windows command shell requires 16.5 REM that "grep" be accessible on the PATH. An MKS install does this. 16.6 REM 16.7 + 16.8 +cl 2>NUL >NUL 16.9 +if %errorlevel% == 0 goto nexttest 16.10 +echo Make sure cl.exe is in your PATH before running this script. 16.11 +goto end 16.12 + 16.13 +:nexttest 16.14 +grep -V 2>NUL >NUL 16.15 +if %errorlevel% == 0 goto testit 16.16 +echo Make sure grep.exe is in your PATH before running this script. Either cygwin or MKS should work. 16.17 +goto end 16.18 + 16.19 + 16.20 +:testit 16.21 cl 2>&1 | grep "IA-64" >NUL 16.22 if %errorlevel% == 0 goto isia64 16.23 cl 2>&1 | grep "AMD64" >NUL 16.24 @@ -44,37 +58,40 @@ 16.25 set BUILDARCH=i486 16.26 set Platform_arch=x86 16.27 set Platform_arch_model=x86_32 16.28 -goto end 16.29 +goto done 16.30 :amd64 16.31 set ARCH=x86 16.32 set BUILDARCH=amd64 16.33 set Platform_arch=x86 16.34 set Platform_arch_model=x86_64 16.35 -goto end 16.36 +goto done 16.37 :isia64 16.38 set ARCH=ia64 16.39 set BUILDARCH=ia64 16.40 set Platform_arch=ia64 16.41 set Platform_arch_model=ia64 16.42 -:end 16.43 +:done 16.44 16.45 setlocal 16.46 16.47 if "%1" == "" goto usage 16.48 16.49 -if not "%4" == "" goto usage 16.50 +if not "%2" == "" goto usage 16.51 16.52 -set HotSpotWorkSpace=%1 16.53 -set HotSpotBuildSpace=%2 16.54 -set HotSpotJDKDist=%3 16.55 +REM Set HotSpotWorkSpace to the directy two steps above this script 16.56 +for %%i in ("%~dp0..") do ( set HotSpotWorkSpace=%%~dpi) 16.57 +set HotSpotBuildRoot=%HotSpotWorkSpace%build 16.58 +set HotSpotBuildSpace=%HotSpotBuildRoot%\vs 16.59 +set HotSpotJDKDist=%1 16.60 + 16.61 16.62 REM figure out MSC version 16.63 for /F %%i in ('sh %HotSpotWorkSpace%/make/windows/get_msc_ver.sh') do set %%i 16.64 16.65 echo ************************************************************** 16.66 -set ProjectFile=vm.vcproj 16.67 +set ProjectFile=jvm.vcproj 16.68 if "%MSC_VER%" == "1200" ( 16.69 -set ProjectFile=vm.dsp 16.70 +set ProjectFile=jvm.dsp 16.71 echo Will generate VC6 project {unsupported} 16.72 ) else ( 16.73 if "%MSC_VER%" == "1400" ( 16.74 @@ -83,10 +100,16 @@ 16.75 if "%MSC_VER%" == "1500" ( 16.76 echo Will generate VC9 {Visual Studio 2008} 16.77 ) else ( 16.78 +if "%MSC_VER%" == "1600" ( 16.79 +echo Detected Visual Studio 2010, but 16.80 +echo will generate VC9 {Visual Studio 2008} 16.81 +echo Use conversion wizard in VS 2010. 16.82 +) else ( 16.83 echo Will generate VC7 project {Visual Studio 2003 .NET} 16.84 ) 16.85 ) 16.86 ) 16.87 +) 16.88 echo %ProjectFile% 16.89 echo ************************************************************** 16.90 16.91 @@ -118,6 +141,8 @@ 16.92 16.93 :test3 16.94 if not "%HOTSPOTMKSHOME%" == "" goto makedir 16.95 +if exist c:\cygwin\bin set HOTSPOTMKSHOME=c:\cygwin\bin 16.96 +if not "%HOTSPOTMKSHOME%" == "" goto makedir 16.97 echo Warning: please set variable HOTSPOTMKSHOME to place where 16.98 echo your MKS/Cygwin installation is 16.99 echo. 16.100 @@ -133,21 +158,24 @@ 16.101 REM This is now safe to do. 16.102 :copyfiles 16.103 for /D %%i in (compiler1, compiler2, tiered, core, kernel) do ( 16.104 -if NOT EXIST %HotSpotBuildSpace%\%%i mkdir %HotSpotBuildSpace%\%%i 16.105 -copy %HotSpotWorkSpace%\make\windows\projectfiles\%%i\* %HotSpotBuildSpace%\%%i\ > NUL 16.106 +if NOT EXIST %HotSpotBuildSpace%\%%i\generated mkdir %HotSpotBuildSpace%\%%i\generated 16.107 +copy %HotSpotWorkSpace%\make\windows\projectfiles\%%i\* %HotSpotBuildSpace%\%%i\generated > NUL 16.108 ) 16.109 16.110 REM force regneration of ProjectFile 16.111 if exist %HotSpotBuildSpace%\%ProjectFile% del %HotSpotBuildSpace%\%ProjectFile% 16.112 16.113 for /D %%i in (compiler1, compiler2, tiered, core, kernel) do ( 16.114 - 16.115 -echo # Generated file! > %HotSpotBuildSpace%\%%i\local.make 16.116 +echo -- %%i -- 16.117 +echo # Generated file! > %HotSpotBuildSpace%\%%i\local.make 16.118 echo # Changing a variable below and then deleting %ProjectFile% will cause >> %HotSpotBuildSpace%\%%i\local.make 16.119 echo # %ProjectFile% to be regenerated with the new values. Changing the >> %HotSpotBuildSpace%\%%i\local.make 16.120 -echo # version requires rerunning create.bat. >> %HotSpotBuildSpace%\%%i\local.make 16.121 +echo # version requires rerunning create.bat. >> %HotSpotBuildSpace%\%%i\local.make 16.122 echo. >> %HotSpotBuildSpace%\%%i\local.make 16.123 +echo Variant=%%i >> %HotSpotBuildSpace%\%%i\local.make 16.124 +echo WorkSpace=%HotSpotWorkSpace% >> %HotSpotBuildSpace%\%%i\local.make 16.125 echo HOTSPOTWORKSPACE=%HotSpotWorkSpace% >> %HotSpotBuildSpace%\%%i\local.make 16.126 +echo HOTSPOTBUILDROOT=%HotSpotBuildRoot% >> %HotSpotBuildSpace%\%%i\local.make 16.127 echo HOTSPOTBUILDSPACE=%HotSpotBuildSpace% >> %HotSpotBuildSpace%\%%i\local.make 16.128 echo HOTSPOTJDKDIST=%HotSpotJDKDist% >> %HotSpotBuildSpace%\%%i\local.make 16.129 echo ARCH=%ARCH% >> %HotSpotBuildSpace%\%%i\local.make 16.130 @@ -155,42 +183,35 @@ 16.131 echo Platform_arch=%Platform_arch% >> %HotSpotBuildSpace%\%%i\local.make 16.132 echo Platform_arch_model=%Platform_arch_model% >> %HotSpotBuildSpace%\%%i\local.make 16.133 16.134 -pushd %HotSpotBuildSpace%\%%i 16.135 +for /D %%j in (debug, fastdebug, product) do ( 16.136 +if NOT EXIST %HotSpotBuildSpace%\%%i\%%j mkdir %HotSpotBuildSpace%\%%i\%%j 16.137 +) 16.138 + 16.139 +pushd %HotSpotBuildSpace%\%%i\generated 16.140 nmake /nologo 16.141 popd 16.142 16.143 ) 16.144 16.145 -pushd %HotSpotBuildSpace% 16.146 +pushd %HotSpotBuildRoot% 16.147 16.148 -echo # Generated file! > local.make 16.149 -echo # Changing a variable below and then deleting %ProjectFile% will cause >> local.make 16.150 -echo # %ProjectFile% to be regenerated with the new values. Changing the >> local.make 16.151 -echo # version requires rerunning create.bat. >> local.make 16.152 -echo. >> local.make 16.153 -echo HOTSPOTWORKSPACE=%HotSpotWorkSpace% >> local.make 16.154 -echo HOTSPOTBUILDSPACE=%HotSpotBuildSpace% >> local.make 16.155 -echo HOTSPOTJDKDIST=%HotSpotJDKDist% >> local.make 16.156 -echo ARCH=%ARCH% >> local.make 16.157 -echo BUILDARCH=%BUILDARCH% >> local.make 16.158 -echo Platform_arch=%Platform_arch% >> local.make 16.159 -echo Platform_arch_model=%Platform_arch_model% >> local.make 16.160 - 16.161 -nmake /nologo /F %HotSpotWorkSpace%/make/windows/projectfiles/common/Makefile %HotSpotBuildSpace%/%ProjectFile% 16.162 +REM It doesn't matter which variant we use here, "compiler1" is as good as any of the others - we need the common variables 16.163 +nmake /nologo /F %HotSpotWorkSpace%/make/windows/projectfiles/common/Makefile LOCAL_MAKE=%HotSpotBuildSpace%\compiler1\local.make %HotSpotBuildRoot%/%ProjectFile% 16.164 16.165 popd 16.166 16.167 goto end 16.168 16.169 :usage 16.170 -echo Usage: create HotSpotWorkSpace HotSpotBuildSpace HotSpotJDKDist 16.171 +echo Usage: create HotSpotJDKDist 16.172 echo. 16.173 -echo This is the interactive build setup script (as opposed to the batch 16.174 -echo build execution script). It creates HotSpotBuildSpace if necessary, 16.175 -echo copies the appropriate files out of HotSpotWorkSpace into it, and 16.176 +echo This is the VS build setup script (as opposed to the batch 16.177 +echo build execution script). It creates a build directory if necessary, 16.178 +echo copies the appropriate files out of the workspace into it, and 16.179 echo builds and runs ProjectCreator in it. This has the side-effect of creating 16.180 echo the %ProjectFile% file in the build space, which is then used in Visual C++. 16.181 -echo The HotSpotJDKDist defines place where JVM binaries should be placed. 16.182 +echo. 16.183 +echo The HotSpotJDKDist defines the JDK that should be used when running the JVM. 16.184 echo Environment variable FORCE_MSC_VER allows to override MSVC version autodetection. 16.185 echo. 16.186 echo NOTE that it is now NOT safe to modify any of the files in the build
17.1 --- a/make/windows/create_obj_files.sh Mon Dec 27 09:30:20 2010 -0500 17.2 +++ b/make/windows/create_obj_files.sh Mon Dec 27 09:56:29 2010 -0500 17.3 @@ -107,8 +107,12 @@ 17.4 "x86_64") Src_Files_EXCLUDE="${Src_Files_EXCLUDE} *x86_32*" ;; 17.5 esac 17.6 17.7 +# Locate all source files in the given directory, excluding files in Src_Files_EXCLUDE. 17.8 function findsrc { 17.9 - $FIND ${1} \( -name \*.c -o -name \*.cpp -o -name \*.s \) -a \! \( -name ${Src_Files_EXCLUDE// / -o -name } \) | sed 's/.*\/\(.*\)/\1/'; 17.10 + $FIND ${1}/. ! -name . -prune \ 17.11 + -a \( -name \*.c -o -name \*.cpp -o -name \*.s \) \ 17.12 + -a \! \( -name ${Src_Files_EXCLUDE// / -o -name } \) \ 17.13 + | sed 's/.*\/\(.*\)/\1/'; 17.14 } 17.15 17.16 Src_Files=
18.1 --- a/make/windows/makefiles/adlc.make Mon Dec 27 09:30:20 2010 -0500 18.2 +++ b/make/windows/makefiles/adlc.make Mon Dec 27 09:56:29 2010 -0500 18.3 @@ -22,7 +22,6 @@ 18.4 # 18.5 # 18.6 18.7 -!include $(WorkSpace)/make/windows/makefiles/compile.make 18.8 18.9 # Rules for building adlc.exe 18.10 18.11 @@ -46,15 +45,7 @@ 18.12 ADLCFLAGS=-q -T -U_LP64 18.13 !endif 18.14 18.15 -CPP_FLAGS=$(CPP_FLAGS) \ 18.16 - /D TARGET_OS_FAMILY_windows \ 18.17 - /D TARGET_ARCH_$(Platform_arch) \ 18.18 - /D TARGET_ARCH_MODEL_$(Platform_arch_model) \ 18.19 - /D TARGET_OS_ARCH_windows_$(Platform_arch) \ 18.20 - /D TARGET_OS_ARCH_MODEL_windows_$(Platform_arch_model) \ 18.21 - /D TARGET_COMPILER_visCPP 18.22 - 18.23 -CPP_FLAGS=$(CPP_FLAGS) /D _CRT_SECURE_NO_WARNINGS /D _CRT_SECURE_NO_DEPRECATE 18.24 +ADLC_CPP_FLAGS=$(CPP_FLAGS) /D _CRT_SECURE_NO_WARNINGS /D _CRT_SECURE_NO_DEPRECATE 18.25 18.26 CPP_INCLUDE_DIRS=\ 18.27 /I "..\generated" \ 18.28 @@ -92,10 +83,10 @@ 18.29 $(AdlcOutDir)\dfa_$(Platform_arch_model).cpp 18.30 18.31 {$(WorkSpace)\src\share\vm\adlc}.cpp.obj:: 18.32 - $(CPP) $(CPP_FLAGS) $(EXH_FLAGS) $(CPP_INCLUDE_DIRS) /c $< 18.33 + $(CPP) $(ADLC_CPP_FLAGS) $(EXH_FLAGS) $(CPP_INCLUDE_DIRS) /c $< 18.34 18.35 {$(WorkSpace)\src\share\vm\opto}.cpp.obj:: 18.36 - $(CPP) $(CPP_FLAGS) $(EXH_FLAGS) $(CPP_INCLUDE_DIRS) /c $< 18.37 + $(CPP) $(ADLC_CPP_FLAGS) $(EXH_FLAGS) $(CPP_INCLUDE_DIRS) /c $< 18.38 18.39 adlc.exe: main.obj adlparse.obj archDesc.obj arena.obj dfa.obj dict2.obj filebuff.obj \ 18.40 forms.obj formsopt.obj formssel.obj opcodes.obj output_c.obj output_h.obj
19.1 --- a/make/windows/makefiles/compile.make Mon Dec 27 09:30:20 2010 -0500 19.2 +++ b/make/windows/makefiles/compile.make Mon Dec 27 09:56:29 2010 -0500 19.3 @@ -1,5 +1,5 @@ 19.4 # 19.5 -# Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved. 19.6 +# Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 19.7 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 19.8 # 19.9 # This code is free software; you can redistribute it and/or modify it 19.10 @@ -80,6 +80,20 @@ 19.11 CPP=ARCH_ERROR 19.12 !endif 19.13 19.14 +CPP_FLAGS=$(CPP_FLAGS) /D "WIN32" /D "_WINDOWS" 19.15 + 19.16 +# Must specify this for sharedRuntimeTrig.cpp 19.17 +CPP_FLAGS=$(CPP_FLAGS) /D "VM_LITTLE_ENDIAN" 19.18 + 19.19 +# Used for platform dispatching 19.20 +CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_FAMILY_windows 19.21 +CPP_FLAGS=$(CPP_FLAGS) /D TARGET_ARCH_$(Platform_arch) 19.22 +CPP_FLAGS=$(CPP_FLAGS) /D TARGET_ARCH_MODEL_$(Platform_arch_model) 19.23 +CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_ARCH_windows_$(Platform_arch) 19.24 +CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_ARCH_MODEL_windows_$(Platform_arch_model) 19.25 +CPP_FLAGS=$(CPP_FLAGS) /D TARGET_COMPILER_visCPP 19.26 + 19.27 + 19.28 # MSC_VER is a 4 digit number that tells us what compiler is being used 19.29 # and is generated when the local.make file is created by build.make 19.30 # via the script get_msc_ver.sh
20.1 --- a/make/windows/makefiles/debug.make Mon Dec 27 09:30:20 2010 -0500 20.2 +++ b/make/windows/makefiles/debug.make Mon Dec 27 09:56:29 2010 -0500 20.3 @@ -26,7 +26,6 @@ 20.4 HS_FNAME=$(HS_INTERNAL_NAME).dll 20.5 AOUT=$(HS_FNAME) 20.6 SAWINDBG=sawindbg.dll 20.7 -LAUNCHER_NAME=hotspot.exe 20.8 GENERATED=../generated 20.9 20.10 # Allow the user to turn off precompiled headers from the command line. 20.11 @@ -34,7 +33,7 @@ 20.12 BUILD_PCH_FILE=_build_pch_file.obj 20.13 !endif 20.14 20.15 -default:: $(BUILD_PCH_FILE) $(AOUT) $(LAUNCHER_NAME) checkAndBuildSA 20.16 +default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA 20.17 20.18 !include ../local.make 20.19 !include compile.make 20.20 @@ -49,8 +48,10 @@ 20.21 # Force resources to be rebuilt every time 20.22 $(Res_Files): FORCE 20.23 20.24 -$(AOUT): $(Res_Files) $(Obj_Files) 20.25 +vm.def: $(Obj_Files) 20.26 sh $(WorkSpace)/make/windows/build_vm_def.sh 20.27 + 20.28 +$(AOUT): $(Res_Files) $(Obj_Files) vm.def 20.29 $(LINK) @<< 20.30 $(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files) 20.31 <<
21.1 --- a/make/windows/makefiles/fastdebug.make Mon Dec 27 09:30:20 2010 -0500 21.2 +++ b/make/windows/makefiles/fastdebug.make Mon Dec 27 09:56:29 2010 -0500 21.3 @@ -26,7 +26,6 @@ 21.4 HS_FNAME=$(HS_INTERNAL_NAME).dll 21.5 AOUT=$(HS_FNAME) 21.6 SAWINDBG=sawindbg.dll 21.7 -LAUNCHER_NAME=hotspot.exe 21.8 GENERATED=../generated 21.9 21.10 # Allow the user to turn off precompiled headers from the command line. 21.11 @@ -34,7 +33,7 @@ 21.12 BUILD_PCH_FILE=_build_pch_file.obj 21.13 !endif 21.14 21.15 -default:: $(BUILD_PCH_FILE) $(AOUT) $(LAUNCHER_NAME) checkAndBuildSA 21.16 +default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA 21.17 21.18 !include ../local.make 21.19 !include compile.make 21.20 @@ -49,8 +48,10 @@ 21.21 # Force resources to be rebuilt every time 21.22 $(Res_Files): FORCE 21.23 21.24 -$(AOUT): $(Res_Files) $(Obj_Files) 21.25 +vm.def: $(Obj_Files) 21.26 sh $(WorkSpace)/make/windows/build_vm_def.sh 21.27 + 21.28 +$(AOUT): $(Res_Files) $(Obj_Files) vm.def 21.29 $(LINK) @<< 21.30 $(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files) 21.31 <<
22.1 --- a/make/windows/makefiles/generated.make Mon Dec 27 09:30:20 2010 -0500 22.2 +++ b/make/windows/makefiles/generated.make Mon Dec 27 09:56:29 2010 -0500 22.3 @@ -51,6 +51,7 @@ 22.4 22.5 !if ("$(Variant)" == "compiler2") || ("$(Variant)" == "tiered") 22.6 22.7 +!include $(WorkSpace)/make/windows/makefiles/compile.make 22.8 !include $(WorkSpace)/make/windows/makefiles/adlc.make 22.9 22.10 !endif
23.1 --- a/make/windows/makefiles/launcher.make Mon Dec 27 09:30:20 2010 -0500 23.2 +++ b/make/windows/makefiles/launcher.make Mon Dec 27 09:56:29 2010 -0500 23.3 @@ -22,7 +22,8 @@ 23.4 # 23.5 # 23.6 23.7 -LAUNCHER_FLAGS=$(ARCHFLAG) \ 23.8 + 23.9 +LAUNCHER_FLAGS=$(CPP_FLAGS) $(ARCHFLAG) \ 23.10 /D FULL_VERSION=\"$(HOTSPOT_RELEASE_VERSION)\" \ 23.11 /D JDK_MAJOR_VERSION=\"$(JDK_MAJOR_VERSION)\" \ 23.12 /D JDK_MINOR_VERSION=\"$(JDK_MINOR_VERSION)\" \ 23.13 @@ -32,9 +33,11 @@ 23.14 /D _CRT_SECURE_NO_DEPRECATE \ 23.15 /D LINK_INTO_LIBJVM \ 23.16 /I $(WorkSpace)\src\os\windows\launcher \ 23.17 - /I $(WorkSpace)\src\share\tools\launcher 23.18 - 23.19 -CPP_FLAGS=$(CPP_FLAGS) $(LAUNCHER_FLAGS) 23.20 + /I $(WorkSpace)\src\share\tools\launcher \ 23.21 + /I $(WorkSpace)\src\share\vm\prims \ 23.22 + /I $(WorkSpace)\src\share\vm \ 23.23 + /I $(WorkSpace)\src\cpu\$(Platform_arch)\vm \ 23.24 + /I $(WorkSpace)\src\os\windows\vm 23.25 23.26 LINK_FLAGS=/manifest $(HS_INTERNAL_NAME).lib kernel32.lib user32.lib /nologo /machine:$(MACHINE) /map /debug /subsystem:console 23.27 23.28 @@ -46,22 +49,23 @@ 23.29 LINK_FLAGS = $(LINK_FLAGS) $(BUFFEROVERFLOWLIB) 23.30 !endif 23.31 23.32 -LAUNCHERDIR = $(GAMMADIR)/src/os/windows/launcher 23.33 -LAUNCHERDIR_SHARE = $(GAMMADIR)/src/share/tools/launcher 23.34 +LAUNCHERDIR = $(WorkSpace)/src/os/windows/launcher 23.35 +LAUNCHERDIR_SHARE = $(WorkSpace)/src/share/tools/launcher 23.36 23.37 OUTDIR = launcher 23.38 23.39 {$(LAUNCHERDIR)}.c{$(OUTDIR)}.obj: 23.40 - -mkdir $(OUTDIR) 23.41 - $(CPP) $(CPP_FLAGS) /c /Fo$@ $< 23.42 + -mkdir $(OUTDIR) 2>NUL >NUL 23.43 + $(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $< 23.44 23.45 {$(LAUNCHERDIR_SHARE)}.c{$(OUTDIR)}.obj: 23.46 - -mkdir $(OUTDIR) 23.47 - $(CPP) $(CPP_FLAGS) /c /Fo$@ $< 23.48 + -mkdir $(OUTDIR) 2>NUL >NUL 23.49 + $(CPP) $(LAUNCHER_FLAGS) /c /Fo$@ $< 23.50 23.51 $(OUTDIR)\*.obj: $(LAUNCHERDIR)\*.c $(LAUNCHERDIR)\*.h $(LAUNCHERDIR_SHARE)\*.c $(LAUNCHERDIR_SHARE)\*.h 23.52 23.53 -$(LAUNCHER_NAME): $(OUTDIR)\java.obj $(OUTDIR)\java_md.obj $(OUTDIR)\jli_util.obj 23.54 - $(LINK) $(LINK_FLAGS) /out:$@ $** 23.55 +launcher: $(OUTDIR)\java.obj $(OUTDIR)\java_md.obj $(OUTDIR)\jli_util.obj 23.56 + echo $(JAVA_HOME) > jdkpath.txt 23.57 + $(LINK) $(LINK_FLAGS) /out:hotspot.exe $** 23.58 23.59
24.1 --- a/make/windows/makefiles/product.make Mon Dec 27 09:30:20 2010 -0500 24.2 +++ b/make/windows/makefiles/product.make Mon Dec 27 09:56:29 2010 -0500 24.3 @@ -25,7 +25,6 @@ 24.4 HS_INTERNAL_NAME=jvm 24.5 HS_FNAME=$(HS_INTERNAL_NAME).dll 24.6 AOUT=$(HS_FNAME) 24.7 -LAUNCHER_NAME=hotspot.exe 24.8 GENERATED=../generated 24.9 24.10 # Allow the user to turn off precompiled headers from the command line. 24.11 @@ -33,7 +32,7 @@ 24.12 BUILD_PCH_FILE=_build_pch_file.obj 24.13 !endif 24.14 24.15 -default:: $(BUILD_PCH_FILE) $(AOUT) $(LAUNCHER_NAME) checkAndBuildSA 24.16 +default:: $(BUILD_PCH_FILE) $(AOUT) launcher checkAndBuildSA 24.17 24.18 !include ../local.make 24.19 !include compile.make 24.20 @@ -59,8 +58,10 @@ 24.21 $(LINK_FLAGS) /out:$@ /implib:$*.lib $(Obj_Files) $(Res_Files) 24.22 << 24.23 !else 24.24 -$(AOUT): $(Res_Files) $(Obj_Files) 24.25 +vm.def: $(Obj_Files) 24.26 sh $(WorkSpace)/make/windows/build_vm_def.sh 24.27 + 24.28 +$(AOUT): $(Res_Files) $(Obj_Files) vm.def 24.29 $(LINK) @<< 24.30 $(LINK_FLAGS) /out:$@ /implib:$*.lib /def:vm.def $(Obj_Files) $(Res_Files) 24.31 <<
25.1 --- a/make/windows/makefiles/projectcreator.make Mon Dec 27 09:30:20 2010 -0500 25.2 +++ b/make/windows/makefiles/projectcreator.make Mon Dec 27 09:56:29 2010 -0500 25.3 @@ -84,11 +84,12 @@ 25.4 -buildBase $(HOTSPOTBUILDSPACE)\%f\%b \ 25.5 -startAt src \ 25.6 -compiler $(VcVersion) \ 25.7 - -projectFileName $(HOTSPOTBUILDSPACE)\$(ProjectFile) \ 25.8 + -projectFileName $(HOTSPOTBUILDROOT)\$(ProjectFile) \ 25.9 -jdkTargetRoot $(HOTSPOTJDKDIST) \ 25.10 -define ALIGN_STACK_FRAMES \ 25.11 -define VM_LITTLE_ENDIAN \ 25.12 -prelink "" "Generating vm.def..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) $(HOTSPOTMKSHOME)\sh $(HOTSPOTWORKSPACE)\make\windows\build_vm_def.sh $(LINK_VER)" \ 25.13 + -postbuild "" "Building hotspot.exe..." "cd $(HOTSPOTBUILDSPACE)\%f\%b set HOTSPOTMKSHOME=$(HOTSPOTMKSHOME) nmake -f $(HOTSPOTWORKSPACE)\make\windows\projectfiles\common\Makefile LOCAL_MAKE=$(HOTSPOTBUILDSPACE)\%f\local.make JAVA_HOME=$(HOTSPOTJDKDIST) launcher" \ 25.14 -ignoreFile jsig.c \ 25.15 -ignoreFile jvmtiEnvRecommended.cpp \ 25.16 -ignoreFile jvmtiEnvStub.cpp \
26.1 --- a/make/windows/makefiles/rules.make Mon Dec 27 09:30:20 2010 -0500 26.2 +++ b/make/windows/makefiles/rules.make Mon Dec 27 09:56:29 2010 -0500 26.3 @@ -1,5 +1,5 @@ 26.4 # 26.5 -# Copyright (c) 2003, 2009, Oracle and/or its affiliates. All rights reserved. 26.6 +# Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 26.7 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 26.8 # 26.9 # This code is free software; you can redistribute it and/or modify it 26.10 @@ -48,7 +48,7 @@ 26.11 JAVAC_FLAGS=-g -encoding ascii 26.12 BOOTSTRAP_JAVAC_FLAGS=$(JAVAC_FLAGS) -source $(BOOT_SOURCE_LANGUAGE_VERSION) -target $(BOOT_TARGET_CLASS_VERSION) 26.13 26.14 -ProjectFile=vm.vcproj 26.15 +ProjectFile=jvm.vcproj 26.16 26.17 !if "$(MSC_VER)" == "1200" 26.18 26.19 @@ -63,6 +63,11 @@ 26.20 26.21 VcVersion=VC9 26.22 26.23 +!elseif "$(MSC_VER)" == "1600" 26.24 + 26.25 +# for compatibility - we don't yet have a ProjectCreator for VC10 26.26 +VcVersion=VC9 26.27 + 26.28 !else 26.29 26.30 VcVersion=VC7
27.1 --- a/make/windows/makefiles/vm.make Mon Dec 27 09:30:20 2010 -0500 27.2 +++ b/make/windows/makefiles/vm.make Mon Dec 27 09:56:29 2010 -0500 27.3 @@ -71,22 +71,11 @@ 27.4 CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_BUILD_USER=\"$(BuildUser)\"" 27.5 CPP_FLAGS=$(CPP_FLAGS) /D "HOTSPOT_VM_DISTRO=\"$(HOTSPOT_VM_DISTRO)\"" 27.6 27.7 -CPP_FLAGS=$(CPP_FLAGS) /D "WIN32" /D "_WINDOWS" $(CPP_INCLUDE_DIRS) 27.8 - 27.9 -# Must specify this for sharedRuntimeTrig.cpp 27.10 -CPP_FLAGS=$(CPP_FLAGS) /D "VM_LITTLE_ENDIAN" 27.11 +CPP_FLAGS=$(CPP_FLAGS) $(CPP_INCLUDE_DIRS) 27.12 27.13 # Define that so jni.h is on correct side 27.14 CPP_FLAGS=$(CPP_FLAGS) /D "_JNI_IMPLEMENTATION_" 27.15 27.16 -# Used for platform dispatching 27.17 -CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_FAMILY_windows 27.18 -CPP_FLAGS=$(CPP_FLAGS) /D TARGET_ARCH_$(Platform_arch) 27.19 -CPP_FLAGS=$(CPP_FLAGS) /D TARGET_ARCH_MODEL_$(Platform_arch_model) 27.20 -CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_ARCH_windows_$(Platform_arch) 27.21 -CPP_FLAGS=$(CPP_FLAGS) /D TARGET_OS_ARCH_MODEL_windows_$(Platform_arch_model) 27.22 -CPP_FLAGS=$(CPP_FLAGS) /D TARGET_COMPILER_visCPP 27.23 - 27.24 !if "$(BUILDARCH)" == "ia64" 27.25 STACK_SIZE="/STACK:1048576,262144" 27.26 !else 27.27 @@ -104,6 +93,8 @@ 27.28 !endif 27.29 !endif 27.30 27.31 +# If you modify exports below please do the corresponding changes in 27.32 +# src/share/tools/ProjectCreator/WinGammaPlatformVC7.java 27.33 LINK_FLAGS=$(LINK_FLAGS) $(STACK_SIZE) /subsystem:windows /dll /base:0x8000000 \ 27.34 /export:JNI_GetDefaultJavaVMInitArgs \ 27.35 /export:JNI_CreateJavaVM \
28.1 --- a/make/windows/projectfiles/common/Makefile Mon Dec 27 09:30:20 2010 -0500 28.2 +++ b/make/windows/projectfiles/common/Makefile Mon Dec 27 09:56:29 2010 -0500 28.3 @@ -22,7 +22,10 @@ 28.4 # 28.5 # 28.6 28.7 -!include local.make 28.8 +!ifdef LOCAL_MAKE 28.9 +!include $(LOCAL_MAKE) 28.10 +!endif 28.11 + 28.12 28.13 WorkSpace=$(HOTSPOTWORKSPACE) 28.14 28.15 @@ -34,11 +37,18 @@ 28.16 !else 28.17 !ifdef JAVA_HOME 28.18 BootStrapDir=$(JAVA_HOME) 28.19 +!else 28.20 +!ifdef HOTSPOTJDKDIST 28.21 +BootStrapDir=$(HOTSPOTJDKDIST) 28.22 +!endif 28.23 !endif 28.24 !endif 28.25 !endif 28.26 28.27 + 28.28 + 28.29 !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/projectcreator.make 28.30 +!include $(WorkSpace)/make/windows/makefiles/compile.make 28.31 28.32 # Pick up rules for building JVMTI (JSR-163) 28.33 JvmtiOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\jvmtifiles 28.34 @@ -56,6 +66,9 @@ 28.35 !include $(HOTSPOTWORKSPACE)/make/windows/makefiles/adlc.make 28.36 !endif 28.37 28.38 +HS_INTERNAL_NAME=jvm 28.39 +!include $(HOTSPOTWORKSPACE)/make/windows/makefiles/launcher.make 28.40 + 28.41 default:: $(AdditionalTargets) $(JvmtiGeneratedFiles) 28.42 28.43 !include $(HOTSPOTWORKSPACE)/make/hotspot_version 28.44 @@ -97,7 +110,7 @@ 28.45 -define JRE_RELEASE_VERSION=\\\"$(JRE_RELEASE_VERSION)\\\" \ 28.46 -define HOTSPOT_VM_DISTRO=\\\"$(HOTSPOT_VM_DISTRO)\\\" 28.47 28.48 -$(HOTSPOTBUILDSPACE)/$(ProjectFile): local.make $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class 28.49 +$(HOTSPOTBUILDROOT)/$(ProjectFile): $(HOTSPOTBUILDSPACE)/classes/ProjectCreator.class 28.50 @$(RUN_JAVA) -Djava.class.path=$(HOTSPOTBUILDSPACE)/classes ProjectCreator WinGammaPlatform$(VcVersion) $(ProjectCreatorIDEOptions) 28.51 28.52 clean:
29.1 --- a/make/windows/projectfiles/compiler1/Makefile Mon Dec 27 09:30:20 2010 -0500 29.2 +++ b/make/windows/projectfiles/compiler1/Makefile Mon Dec 27 09:56:29 2010 -0500 29.3 @@ -1,5 +1,5 @@ 29.4 # 29.5 -# Copyright (c) 1999, 2008, Oracle and/or its affiliates. All rights reserved. 29.6 +# Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 29.7 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 29.8 # 29.9 # This code is free software; you can redistribute it and/or modify it 29.10 @@ -22,7 +22,6 @@ 29.11 # 29.12 # 29.13 29.14 -Variant=compiler1 29.15 -!include local.make 29.16 +!include ../local.make 29.17 29.18 !include $(HOTSPOTWORKSPACE)/make/windows/projectfiles/common/Makefile
30.1 --- a/make/windows/projectfiles/compiler1/vm.def Mon Dec 27 09:30:20 2010 -0500 30.2 +++ b/make/windows/projectfiles/compiler1/vm.def Mon Dec 27 09:56:29 2010 -0500 30.3 @@ -2,6 +2,6 @@ 30.4 ; This .DEF file is a placeholder for one which is automatically 30.5 ; generated during the build process. See 30.6 ; make\windows\build_vm_def.sh and 30.7 -; make\windows\makefiles\makedeps.make (esp. the "-prelink" 30.8 +; make\windows\makefiles\projectcreator.make (esp. the "-prelink" 30.9 ; options). 30.10 ;
31.1 --- a/make/windows/projectfiles/compiler2/Makefile Mon Dec 27 09:30:20 2010 -0500 31.2 +++ b/make/windows/projectfiles/compiler2/Makefile Mon Dec 27 09:56:29 2010 -0500 31.3 @@ -22,8 +22,7 @@ 31.4 # 31.5 # 31.6 31.7 -Variant=compiler2 31.8 -!include local.make 31.9 +!include ../local.make 31.10 AdlcOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\adfiles 31.11 AdditionalTargets=$(AdlcOutDir)\ad_$(Platform_arch_model).cpp $(AdlcOutDir)\dfa_$(Platform_arch_model).cpp 31.12
32.1 --- a/make/windows/projectfiles/compiler2/vm.def Mon Dec 27 09:30:20 2010 -0500 32.2 +++ b/make/windows/projectfiles/compiler2/vm.def Mon Dec 27 09:56:29 2010 -0500 32.3 @@ -2,6 +2,6 @@ 32.4 ; This .DEF file is a placeholder for one which is automatically 32.5 ; generated during the build process. See 32.6 ; make\windows\build_vm_def.sh and 32.7 -; make\windows\makefiles\makedeps.make (esp. the "-prelink" 32.8 +; make\windows\makefiles\projectcreator.make (esp. the "-prelink" 32.9 ; options). 32.10 ;
33.1 --- a/make/windows/projectfiles/core/Makefile Mon Dec 27 09:30:20 2010 -0500 33.2 +++ b/make/windows/projectfiles/core/Makefile Mon Dec 27 09:56:29 2010 -0500 33.3 @@ -1,5 +1,5 @@ 33.4 # 33.5 -# Copyright (c) 1998, 2008, Oracle and/or its affiliates. All rights reserved. 33.6 +# Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved. 33.7 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 33.8 # 33.9 # This code is free software; you can redistribute it and/or modify it 33.10 @@ -22,7 +22,6 @@ 33.11 # 33.12 # 33.13 33.14 -Variant=core 33.15 -!include local.make 33.16 +!include ../local.make 33.17 33.18 !include $(HOTSPOTWORKSPACE)/make/windows/projectfiles/common/Makefile
34.1 --- a/make/windows/projectfiles/core/vm.def Mon Dec 27 09:30:20 2010 -0500 34.2 +++ b/make/windows/projectfiles/core/vm.def Mon Dec 27 09:56:29 2010 -0500 34.3 @@ -2,6 +2,6 @@ 34.4 ; This .DEF file is a placeholder for one which is automatically 34.5 ; generated during the build process. See 34.6 ; make\windows\build_vm_def.sh and 34.7 -; make\windows\makefiles\makedeps.make (esp. the "-prelink" 34.8 +; make\windows\makefiles\projectcreator.make (esp. the "-prelink" 34.9 ; options). 34.10 ;
35.1 --- a/make/windows/projectfiles/kernel/Makefile Mon Dec 27 09:30:20 2010 -0500 35.2 +++ b/make/windows/projectfiles/kernel/Makefile Mon Dec 27 09:56:29 2010 -0500 35.3 @@ -1,5 +1,5 @@ 35.4 # 35.5 -# Copyright (c) 2007, Oracle and/or its affiliates. All rights reserved. 35.6 +# Copyright (c) 2007, 2010 Oracle and/or its affiliates. All rights reserved. 35.7 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 35.8 # 35.9 # This code is free software; you can redistribute it and/or modify it 35.10 @@ -22,7 +22,6 @@ 35.11 # 35.12 # 35.13 35.14 -Variant=kernel 35.15 -!include local.make 35.16 +!include ../local.make 35.17 35.18 !include $(HOTSPOTWORKSPACE)/make/windows/projectfiles/common/Makefile
36.1 --- a/make/windows/projectfiles/kernel/vm.def Mon Dec 27 09:30:20 2010 -0500 36.2 +++ b/make/windows/projectfiles/kernel/vm.def Mon Dec 27 09:56:29 2010 -0500 36.3 @@ -2,6 +2,6 @@ 36.4 ; This .DEF file is a placeholder for one which is automatically 36.5 ; generated during the build process. See 36.6 ; make\windows\build_vm_def.sh and 36.7 -; make\windows\makefiles\makedeps.make (esp. the "-prelink" 36.8 +; make\windows\makefiles\projectcreator.make (esp. the "-prelink" 36.9 ; options). 36.10 ;
37.1 --- a/make/windows/projectfiles/tiered/Makefile Mon Dec 27 09:30:20 2010 -0500 37.2 +++ b/make/windows/projectfiles/tiered/Makefile Mon Dec 27 09:56:29 2010 -0500 37.3 @@ -22,8 +22,7 @@ 37.4 # 37.5 # 37.6 37.7 -Variant=tiered 37.8 -!include local.make 37.9 +!include ../local.make 37.10 AdlcOutDir=$(HOTSPOTBUILDSPACE)\$(Variant)\generated\adfiles 37.11 AdditionalTargets=$(AdlcOutDir)\ad_$(Platform_arch_model).cpp $(AdlcOutDir)\dfa_$(Platform_arch_model).cpp 37.12
38.1 --- a/make/windows/projectfiles/tiered/vm.def Mon Dec 27 09:30:20 2010 -0500 38.2 +++ b/make/windows/projectfiles/tiered/vm.def Mon Dec 27 09:56:29 2010 -0500 38.3 @@ -2,6 +2,6 @@ 38.4 ; This .DEF file is a placeholder for one which is automatically 38.5 ; generated during the build process. See 38.6 ; make\windows\build_vm_def.sh and 38.7 -; make\windows\makefiles\makedeps.make (esp. the "-prelink" 38.8 +; make\windows\makefiles\projectcreator.make (esp. the "-prelink" 38.9 ; options). 38.10 ;
39.1 --- a/src/cpu/sparc/vm/assembler_sparc.cpp Mon Dec 27 09:30:20 2010 -0500 39.2 +++ b/src/cpu/sparc/vm/assembler_sparc.cpp Mon Dec 27 09:56:29 2010 -0500 39.3 @@ -909,10 +909,10 @@ 39.4 #if defined(COMPILER2) && !defined(_LP64) 39.5 // Save & restore possible 64-bit Long arguments in G-regs 39.6 sllx(L0,32,G2); // Move old high G1 bits high in G2 39.7 - sllx(G1, 0,G1); // Clear current high G1 bits 39.8 + srl(G1, 0,G1); // Clear current high G1 bits 39.9 or3 (G1,G2,G1); // Recover 64-bit G1 39.10 sllx(L6,32,G2); // Move old high G4 bits high in G2 39.11 - sllx(G4, 0,G4); // Clear current high G4 bits 39.12 + srl(G4, 0,G4); // Clear current high G4 bits 39.13 or3 (G4,G2,G4); // Recover 64-bit G4 39.14 #endif 39.15 restore(O0, 0, G2_thread); 39.16 @@ -1443,6 +1443,45 @@ 39.17 } 39.18 } 39.19 39.20 +int MacroAssembler::size_of_set64(jlong value) { 39.21 + v9_dep(); 39.22 + 39.23 + int hi = (int)(value >> 32); 39.24 + int lo = (int)(value & ~0); 39.25 + int count = 0; 39.26 + 39.27 + // (Matcher::isSimpleConstant64 knows about the following optimizations.) 39.28 + if (Assembler::is_simm13(lo) && value == lo) { 39.29 + count++; 39.30 + } else if (hi == 0) { 39.31 + count++; 39.32 + if (low10(lo) != 0) 39.33 + count++; 39.34 + } 39.35 + else if (hi == -1) { 39.36 + count += 2; 39.37 + } 39.38 + else if (lo == 0) { 39.39 + if (Assembler::is_simm13(hi)) { 39.40 + count++; 39.41 + } else { 39.42 + count++; 39.43 + if (low10(hi) != 0) 39.44 + count++; 39.45 + } 39.46 + count++; 39.47 + } 39.48 + else { 39.49 + count += 2; 39.50 + if (low10(hi) != 0) 39.51 + count++; 39.52 + if (low10(lo) != 0) 39.53 + count++; 39.54 + count += 2; 39.55 + } 39.56 + return count; 39.57 +} 39.58 + 39.59 // compute size in bytes of sparc frame, given 39.60 // number of extraWords 39.61 int MacroAssembler::total_frame_size_in_bytes(int extraWords) {
40.1 --- a/src/cpu/sparc/vm/assembler_sparc.hpp Mon Dec 27 09:30:20 2010 -0500 40.2 +++ b/src/cpu/sparc/vm/assembler_sparc.hpp Mon Dec 27 09:56:29 2010 -0500 40.3 @@ -1621,6 +1621,10 @@ 40.4 40.5 void sub( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); } 40.6 void sub( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } 40.7 + 40.8 + // Note: offset is added to s2. 40.9 + inline void sub(Register s1, RegisterOrConstant s2, Register d, int offset = 0); 40.10 + 40.11 void subcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | rs2(s2) ); } 40.12 void subcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } 40.13 void subc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 ) | rs1(s1) | rs2(s2) ); } 40.14 @@ -1798,6 +1802,7 @@ 40.15 // branches that use right instruction for v8 vs. v9 40.16 inline void br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none ); 40.17 inline void br( Condition c, bool a, Predict p, Label& L ); 40.18 + 40.19 inline void fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none ); 40.20 inline void fb( Condition c, bool a, Predict p, Label& L ); 40.21 40.22 @@ -1894,6 +1899,9 @@ 40.23 void patchable_set(intptr_t value, Register d); 40.24 void set64(jlong value, Register d, Register tmp); 40.25 40.26 + // Compute size of set64. 40.27 + static int size_of_set64(jlong value); 40.28 + 40.29 // sign-extend 32 to 64 40.30 inline void signx( Register s, Register d ) { sra( s, G0, d); } 40.31 inline void signx( Register d ) { sra( d, G0, d); }
41.1 --- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp Mon Dec 27 09:30:20 2010 -0500 41.2 +++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp Mon Dec 27 09:56:29 2010 -0500 41.3 @@ -328,6 +328,11 @@ 41.4 inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); } 41.5 inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 41.6 41.7 +inline void Assembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) { 41.8 + if (s2.is_register()) sub(s1, s2.as_register(), d); 41.9 + else { sub(s1, s2.as_constant() + offset, d); offset = 0; } 41.10 + if (offset != 0) sub(d, offset, d); 41.11 +} 41.12 41.13 // pp 231 41.14
42.1 --- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Mon Dec 27 09:30:20 2010 -0500 42.2 +++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Mon Dec 27 09:56:29 2010 -0500 42.3 @@ -434,7 +434,7 @@ 42.4 42.5 Register pre_val_reg = pre_val()->as_register(); 42.6 42.7 - ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false); 42.8 + ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); 42.9 if (__ is_in_wdisp16_range(_continuation)) { 42.10 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt, 42.11 pre_val_reg, _continuation);
43.1 --- a/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp Mon Dec 27 09:30:20 2010 -0500 43.2 +++ b/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp Mon Dec 27 09:56:29 2010 -0500 43.3 @@ -155,4 +155,7 @@ 43.4 static bool is_caller_save_register (LIR_Opr reg); 43.5 static bool is_caller_save_register (Register r); 43.6 43.7 + static int nof_caller_save_cpu_regs() { return pd_nof_caller_save_cpu_regs_frame_map; } 43.8 + static int last_cpu_reg() { return pd_last_cpu_reg; } 43.9 + 43.10 #endif // CPU_SPARC_VM_C1_FRAMEMAP_SPARC_HPP
44.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Mon Dec 27 09:30:20 2010 -0500 44.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Mon Dec 27 09:56:29 2010 -0500 44.3 @@ -100,6 +100,11 @@ 44.4 return false; 44.5 } 44.6 44.7 + if (UseCompressedOops) { 44.8 + if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false; 44.9 + if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false; 44.10 + } 44.11 + 44.12 if (dst->is_register()) { 44.13 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) { 44.14 return !PatchALot; 44.15 @@ -253,7 +258,7 @@ 44.16 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position 44.17 int count_offset = java_lang_String:: count_offset_in_bytes(); 44.18 44.19 - __ ld_ptr(str0, value_offset, tmp0); 44.20 + __ load_heap_oop(str0, value_offset, tmp0); 44.21 __ ld(str0, offset_offset, tmp2); 44.22 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); 44.23 __ ld(str0, count_offset, str0); 44.24 @@ -262,7 +267,7 @@ 44.25 // str1 may be null 44.26 add_debug_info_for_null_check_here(info); 44.27 44.28 - __ ld_ptr(str1, value_offset, tmp1); 44.29 + __ load_heap_oop(str1, value_offset, tmp1); 44.30 __ add(tmp0, tmp2, tmp0); 44.31 44.32 __ ld(str1, offset_offset, tmp2); 44.33 @@ -766,7 +771,7 @@ 44.34 44.35 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 44.36 add_debug_info_for_null_check_here(op->info()); 44.37 - __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch); 44.38 + __ load_klass(O0, G3_scratch); 44.39 if (__ is_simm13(op->vtable_offset())) { 44.40 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method); 44.41 } else { 44.42 @@ -780,138 +785,17 @@ 44.43 // the peephole pass fills the delay slot 44.44 } 44.45 44.46 - 44.47 -// load with 32-bit displacement 44.48 -int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) { 44.49 - int load_offset = code_offset(); 44.50 - if (Assembler::is_simm13(disp)) { 44.51 - if (info != NULL) add_debug_info_for_null_check_here(info); 44.52 - switch(ld_type) { 44.53 - case T_BOOLEAN: // fall through 44.54 - case T_BYTE : __ ldsb(s, disp, d); break; 44.55 - case T_CHAR : __ lduh(s, disp, d); break; 44.56 - case T_SHORT : __ ldsh(s, disp, d); break; 44.57 - case T_INT : __ ld(s, disp, d); break; 44.58 - case T_ADDRESS:// fall through 44.59 - case T_ARRAY : // fall through 44.60 - case T_OBJECT: __ ld_ptr(s, disp, d); break; 44.61 - default : ShouldNotReachHere(); 44.62 - } 44.63 - } else { 44.64 - __ set(disp, O7); 44.65 - if (info != NULL) add_debug_info_for_null_check_here(info); 44.66 - load_offset = code_offset(); 44.67 - switch(ld_type) { 44.68 - case T_BOOLEAN: // fall through 44.69 - case T_BYTE : __ ldsb(s, O7, d); break; 44.70 - case T_CHAR : __ lduh(s, O7, d); break; 44.71 - case T_SHORT : __ ldsh(s, O7, d); break; 44.72 - case T_INT : __ ld(s, O7, d); break; 44.73 - case T_ADDRESS:// fall through 44.74 - case T_ARRAY : // fall through 44.75 - case T_OBJECT: __ ld_ptr(s, O7, d); break; 44.76 - default : ShouldNotReachHere(); 44.77 - } 44.78 - } 44.79 - if (ld_type == T_ARRAY || ld_type == T_OBJECT) __ verify_oop(d); 44.80 - return load_offset; 44.81 -} 44.82 - 44.83 - 44.84 -// store with 32-bit displacement 44.85 -void LIR_Assembler::store(Register value, Register base, int offset, BasicType type, CodeEmitInfo *info) { 44.86 - if (Assembler::is_simm13(offset)) { 44.87 - if (info != NULL) add_debug_info_for_null_check_here(info); 44.88 - switch (type) { 44.89 - case T_BOOLEAN: // fall through 44.90 - case T_BYTE : __ stb(value, base, offset); break; 44.91 - case T_CHAR : __ sth(value, base, offset); break; 44.92 - case T_SHORT : __ sth(value, base, offset); break; 44.93 - case T_INT : __ stw(value, base, offset); break; 44.94 - case T_ADDRESS:// fall through 44.95 - case T_ARRAY : // fall through 44.96 - case T_OBJECT: __ st_ptr(value, base, offset); break; 44.97 - default : ShouldNotReachHere(); 44.98 - } 44.99 - } else { 44.100 - __ set(offset, O7); 44.101 - if (info != NULL) add_debug_info_for_null_check_here(info); 44.102 - switch (type) { 44.103 - case T_BOOLEAN: // fall through 44.104 - case T_BYTE : __ stb(value, base, O7); break; 44.105 - case T_CHAR : __ sth(value, base, O7); break; 44.106 - case T_SHORT : __ sth(value, base, O7); break; 44.107 - case T_INT : __ stw(value, base, O7); break; 44.108 - case T_ADDRESS:// fall through 44.109 - case T_ARRAY : //fall through 44.110 - case T_OBJECT: __ st_ptr(value, base, O7); break; 44.111 - default : ShouldNotReachHere(); 44.112 - } 44.113 - } 44.114 - // Note: Do the store before verification as the code might be patched! 44.115 - if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(value); 44.116 -} 44.117 - 44.118 - 44.119 -// load float with 32-bit displacement 44.120 -void LIR_Assembler::load(Register s, int disp, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) { 44.121 - FloatRegisterImpl::Width w; 44.122 - switch(ld_type) { 44.123 - case T_FLOAT : w = FloatRegisterImpl::S; break; 44.124 - case T_DOUBLE: w = FloatRegisterImpl::D; break; 44.125 - default : ShouldNotReachHere(); 44.126 - } 44.127 - 44.128 - if (Assembler::is_simm13(disp)) { 44.129 - if (info != NULL) add_debug_info_for_null_check_here(info); 44.130 - if (disp % BytesPerLong != 0 && w == FloatRegisterImpl::D) { 44.131 - __ ldf(FloatRegisterImpl::S, s, disp + BytesPerWord, d->successor()); 44.132 - __ ldf(FloatRegisterImpl::S, s, disp , d); 44.133 - } else { 44.134 - __ ldf(w, s, disp, d); 44.135 - } 44.136 - } else { 44.137 - __ set(disp, O7); 44.138 - if (info != NULL) add_debug_info_for_null_check_here(info); 44.139 - __ ldf(w, s, O7, d); 44.140 - } 44.141 -} 44.142 - 44.143 - 44.144 -// store float with 32-bit displacement 44.145 -void LIR_Assembler::store(FloatRegister value, Register base, int offset, BasicType type, CodeEmitInfo *info) { 44.146 - FloatRegisterImpl::Width w; 44.147 - switch(type) { 44.148 - case T_FLOAT : w = FloatRegisterImpl::S; break; 44.149 - case T_DOUBLE: w = FloatRegisterImpl::D; break; 44.150 - default : ShouldNotReachHere(); 44.151 - } 44.152 - 44.153 - if (Assembler::is_simm13(offset)) { 44.154 - if (info != NULL) add_debug_info_for_null_check_here(info); 44.155 - if (w == FloatRegisterImpl::D && offset % BytesPerLong != 0) { 44.156 - __ stf(FloatRegisterImpl::S, value->successor(), base, offset + BytesPerWord); 44.157 - __ stf(FloatRegisterImpl::S, value , base, offset); 44.158 - } else { 44.159 - __ stf(w, value, base, offset); 44.160 - } 44.161 - } else { 44.162 - __ set(offset, O7); 44.163 - if (info != NULL) add_debug_info_for_null_check_here(info); 44.164 - __ stf(w, value, O7, base); 44.165 - } 44.166 -} 44.167 - 44.168 - 44.169 -int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned) { 44.170 +int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 44.171 int store_offset; 44.172 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 44.173 assert(!unaligned, "can't handle this"); 44.174 // for offsets larger than a simm13 we setup the offset in O7 44.175 __ set(offset, O7); 44.176 - store_offset = store(from_reg, base, O7, type); 44.177 + store_offset = store(from_reg, base, O7, type, wide); 44.178 } else { 44.179 - if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register()); 44.180 + if (type == T_ARRAY || type == T_OBJECT) { 44.181 + __ verify_oop(from_reg->as_register()); 44.182 + } 44.183 store_offset = code_offset(); 44.184 switch (type) { 44.185 case T_BOOLEAN: // fall through 44.186 @@ -934,9 +818,22 @@ 44.187 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes); 44.188 #endif 44.189 break; 44.190 - case T_ADDRESS:// fall through 44.191 + case T_ADDRESS: 44.192 + __ st_ptr(from_reg->as_register(), base, offset); 44.193 + break; 44.194 case T_ARRAY : // fall through 44.195 - case T_OBJECT: __ st_ptr(from_reg->as_register(), base, offset); break; 44.196 + case T_OBJECT: 44.197 + { 44.198 + if (UseCompressedOops && !wide) { 44.199 + __ encode_heap_oop(from_reg->as_register(), G3_scratch); 44.200 + store_offset = code_offset(); 44.201 + __ stw(G3_scratch, base, offset); 44.202 + } else { 44.203 + __ st_ptr(from_reg->as_register(), base, offset); 44.204 + } 44.205 + break; 44.206 + } 44.207 + 44.208 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break; 44.209 case T_DOUBLE: 44.210 { 44.211 @@ -958,8 +855,10 @@ 44.212 } 44.213 44.214 44.215 -int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type) { 44.216 - if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register()); 44.217 +int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 44.218 + if (type == T_ARRAY || type == T_OBJECT) { 44.219 + __ verify_oop(from_reg->as_register()); 44.220 + } 44.221 int store_offset = code_offset(); 44.222 switch (type) { 44.223 case T_BOOLEAN: // fall through 44.224 @@ -975,9 +874,21 @@ 44.225 __ std(from_reg->as_register_hi(), base, disp); 44.226 #endif 44.227 break; 44.228 - case T_ADDRESS:// fall through 44.229 + case T_ADDRESS: 44.230 + __ st_ptr(from_reg->as_register(), base, disp); 44.231 + break; 44.232 case T_ARRAY : // fall through 44.233 - case T_OBJECT: __ st_ptr(from_reg->as_register(), base, disp); break; 44.234 + case T_OBJECT: 44.235 + { 44.236 + if (UseCompressedOops && !wide) { 44.237 + __ encode_heap_oop(from_reg->as_register(), G3_scratch); 44.238 + store_offset = code_offset(); 44.239 + __ stw(G3_scratch, base, disp); 44.240 + } else { 44.241 + __ st_ptr(from_reg->as_register(), base, disp); 44.242 + } 44.243 + break; 44.244 + } 44.245 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break; 44.246 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break; 44.247 default : ShouldNotReachHere(); 44.248 @@ -986,14 +897,14 @@ 44.249 } 44.250 44.251 44.252 -int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool unaligned) { 44.253 +int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 44.254 int load_offset; 44.255 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 44.256 assert(base != O7, "destroying register"); 44.257 assert(!unaligned, "can't handle this"); 44.258 // for offsets larger than a simm13 we setup the offset in O7 44.259 __ set(offset, O7); 44.260 - load_offset = load(base, O7, to_reg, type); 44.261 + load_offset = load(base, O7, to_reg, type, wide); 44.262 } else { 44.263 load_offset = code_offset(); 44.264 switch(type) { 44.265 @@ -1030,9 +941,18 @@ 44.266 #endif 44.267 } 44.268 break; 44.269 - case T_ADDRESS:// fall through 44.270 + case T_ADDRESS: __ ld_ptr(base, offset, to_reg->as_register()); break; 44.271 case T_ARRAY : // fall through 44.272 - case T_OBJECT: __ ld_ptr(base, offset, to_reg->as_register()); break; 44.273 + case T_OBJECT: 44.274 + { 44.275 + if (UseCompressedOops && !wide) { 44.276 + __ lduw(base, offset, to_reg->as_register()); 44.277 + __ decode_heap_oop(to_reg->as_register()); 44.278 + } else { 44.279 + __ ld_ptr(base, offset, to_reg->as_register()); 44.280 + } 44.281 + break; 44.282 + } 44.283 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break; 44.284 case T_DOUBLE: 44.285 { 44.286 @@ -1048,23 +968,34 @@ 44.287 } 44.288 default : ShouldNotReachHere(); 44.289 } 44.290 - if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register()); 44.291 + if (type == T_ARRAY || type == T_OBJECT) { 44.292 + __ verify_oop(to_reg->as_register()); 44.293 + } 44.294 } 44.295 return load_offset; 44.296 } 44.297 44.298 44.299 -int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type) { 44.300 +int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 44.301 int load_offset = code_offset(); 44.302 switch(type) { 44.303 case T_BOOLEAN: // fall through 44.304 - case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break; 44.305 - case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break; 44.306 - case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break; 44.307 - case T_INT : __ ld(base, disp, to_reg->as_register()); break; 44.308 - case T_ADDRESS:// fall through 44.309 + case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break; 44.310 + case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break; 44.311 + case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break; 44.312 + case T_INT : __ ld(base, disp, to_reg->as_register()); break; 44.313 + case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break; 44.314 case T_ARRAY : // fall through 44.315 - case T_OBJECT: __ ld_ptr(base, disp, to_reg->as_register()); break; 44.316 + case T_OBJECT: 44.317 + { 44.318 + if (UseCompressedOops && !wide) { 44.319 + __ lduw(base, disp, to_reg->as_register()); 44.320 + __ decode_heap_oop(to_reg->as_register()); 44.321 + } else { 44.322 + __ ld_ptr(base, disp, to_reg->as_register()); 44.323 + } 44.324 + break; 44.325 + } 44.326 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break; 44.327 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break; 44.328 case T_LONG : 44.329 @@ -1078,60 +1009,28 @@ 44.330 break; 44.331 default : ShouldNotReachHere(); 44.332 } 44.333 - if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register()); 44.334 + if (type == T_ARRAY || type == T_OBJECT) { 44.335 + __ verify_oop(to_reg->as_register()); 44.336 + } 44.337 return load_offset; 44.338 } 44.339 44.340 - 44.341 -// load/store with an Address 44.342 -void LIR_Assembler::load(const Address& a, Register d, BasicType ld_type, CodeEmitInfo *info, int offset) { 44.343 - load(a.base(), a.disp() + offset, d, ld_type, info); 44.344 -} 44.345 - 44.346 - 44.347 -void LIR_Assembler::store(Register value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) { 44.348 - store(value, dest.base(), dest.disp() + offset, type, info); 44.349 -} 44.350 - 44.351 - 44.352 -// loadf/storef with an Address 44.353 -void LIR_Assembler::load(const Address& a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info, int offset) { 44.354 - load(a.base(), a.disp() + offset, d, ld_type, info); 44.355 -} 44.356 - 44.357 - 44.358 -void LIR_Assembler::store(FloatRegister value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) { 44.359 - store(value, dest.base(), dest.disp() + offset, type, info); 44.360 -} 44.361 - 44.362 - 44.363 -// load/store with an Address 44.364 -void LIR_Assembler::load(LIR_Address* a, Register d, BasicType ld_type, CodeEmitInfo *info) { 44.365 - load(as_Address(a), d, ld_type, info); 44.366 -} 44.367 - 44.368 - 44.369 -void LIR_Assembler::store(Register value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) { 44.370 - store(value, as_Address(dest), type, info); 44.371 -} 44.372 - 44.373 - 44.374 -// loadf/storef with an Address 44.375 -void LIR_Assembler::load(LIR_Address* a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) { 44.376 - load(as_Address(a), d, ld_type, info); 44.377 -} 44.378 - 44.379 - 44.380 -void LIR_Assembler::store(FloatRegister value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) { 44.381 - store(value, as_Address(dest), type, info); 44.382 -} 44.383 - 44.384 - 44.385 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 44.386 LIR_Const* c = src->as_constant_ptr(); 44.387 switch (c->type()) { 44.388 case T_INT: 44.389 - case T_FLOAT: 44.390 + case T_FLOAT: { 44.391 + Register src_reg = O7; 44.392 + int value = c->as_jint_bits(); 44.393 + if (value == 0) { 44.394 + src_reg = G0; 44.395 + } else { 44.396 + __ set(value, O7); 44.397 + } 44.398 + Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 44.399 + __ stw(src_reg, addr.base(), addr.disp()); 44.400 + break; 44.401 + } 44.402 case T_ADDRESS: { 44.403 Register src_reg = O7; 44.404 int value = c->as_jint_bits(); 44.405 @@ -1141,7 +1040,7 @@ 44.406 __ set(value, O7); 44.407 } 44.408 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 44.409 - __ stw(src_reg, addr.base(), addr.disp()); 44.410 + __ st_ptr(src_reg, addr.base(), addr.disp()); 44.411 break; 44.412 } 44.413 case T_OBJECT: { 44.414 @@ -1178,14 +1077,12 @@ 44.415 } 44.416 44.417 44.418 -void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) { 44.419 +void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 44.420 LIR_Const* c = src->as_constant_ptr(); 44.421 LIR_Address* addr = dest->as_address_ptr(); 44.422 Register base = addr->base()->as_pointer_register(); 44.423 - 44.424 - if (info != NULL) { 44.425 - add_debug_info_for_null_check_here(info); 44.426 - } 44.427 + int offset = -1; 44.428 + 44.429 switch (c->type()) { 44.430 case T_INT: 44.431 case T_FLOAT: 44.432 @@ -1199,10 +1096,10 @@ 44.433 } 44.434 if (addr->index()->is_valid()) { 44.435 assert(addr->disp() == 0, "must be zero"); 44.436 - store(tmp, base, addr->index()->as_pointer_register(), type); 44.437 + offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 44.438 } else { 44.439 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 44.440 - store(tmp, base, addr->disp(), type); 44.441 + offset = store(tmp, base, addr->disp(), type, wide, false); 44.442 } 44.443 break; 44.444 } 44.445 @@ -1212,21 +1109,21 @@ 44.446 assert(Assembler::is_simm13(addr->disp()) && 44.447 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses"); 44.448 44.449 - Register tmp = O7; 44.450 + LIR_Opr tmp = FrameMap::O7_opr; 44.451 int value_lo = c->as_jint_lo_bits(); 44.452 if (value_lo == 0) { 44.453 - tmp = G0; 44.454 + tmp = FrameMap::G0_opr; 44.455 } else { 44.456 __ set(value_lo, O7); 44.457 } 44.458 - store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT); 44.459 + offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false); 44.460 int value_hi = c->as_jint_hi_bits(); 44.461 if (value_hi == 0) { 44.462 - tmp = G0; 44.463 + tmp = FrameMap::G0_opr; 44.464 } else { 44.465 __ set(value_hi, O7); 44.466 } 44.467 - store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT); 44.468 + offset = store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false); 44.469 break; 44.470 } 44.471 case T_OBJECT: { 44.472 @@ -1241,10 +1138,10 @@ 44.473 // handle either reg+reg or reg+disp address 44.474 if (addr->index()->is_valid()) { 44.475 assert(addr->disp() == 0, "must be zero"); 44.476 - store(tmp, base, addr->index()->as_pointer_register(), type); 44.477 + offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 44.478 } else { 44.479 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 44.480 - store(tmp, base, addr->disp(), type); 44.481 + offset = store(tmp, base, addr->disp(), type, wide, false); 44.482 } 44.483 44.484 break; 44.485 @@ -1252,6 +1149,10 @@ 44.486 default: 44.487 Unimplemented(); 44.488 } 44.489 + if (info != NULL) { 44.490 + assert(offset != -1, "offset should've been set"); 44.491 + add_debug_info_for_null_check(offset, info); 44.492 + } 44.493 } 44.494 44.495 44.496 @@ -1336,7 +1237,7 @@ 44.497 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 44.498 44.499 __ set(const_addrlit, O7); 44.500 - load(O7, 0, to_reg->as_register(), T_INT); 44.501 + __ ld(O7, 0, to_reg->as_register()); 44.502 } 44.503 } 44.504 break; 44.505 @@ -1429,7 +1330,7 @@ 44.506 44.507 44.508 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 44.509 - LIR_PatchCode patch_code, CodeEmitInfo* info, bool unaligned) { 44.510 + LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { 44.511 44.512 LIR_Address* addr = src_opr->as_address_ptr(); 44.513 LIR_Opr to_reg = dest; 44.514 @@ -1475,16 +1376,15 @@ 44.515 44.516 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 44.517 if (disp_reg == noreg) { 44.518 - offset = load(src, disp_value, to_reg, type, unaligned); 44.519 + offset = load(src, disp_value, to_reg, type, wide, unaligned); 44.520 } else { 44.521 assert(!unaligned, "can't handle this"); 44.522 - offset = load(src, disp_reg, to_reg, type); 44.523 + offset = load(src, disp_reg, to_reg, type, wide); 44.524 } 44.525 44.526 if (patch != NULL) { 44.527 patching_epilog(patch, patch_code, src, info); 44.528 } 44.529 - 44.530 if (info != NULL) add_debug_info_for_null_check(offset, info); 44.531 } 44.532 44.533 @@ -1518,7 +1418,7 @@ 44.534 } 44.535 44.536 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 44.537 - load(addr.base(), addr.disp(), dest, dest->type(), unaligned); 44.538 + load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 44.539 } 44.540 44.541 44.542 @@ -1530,7 +1430,7 @@ 44.543 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 44.544 } 44.545 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 44.546 - store(from_reg, addr.base(), addr.disp(), from_reg->type(), unaligned); 44.547 + store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 44.548 } 44.549 44.550 44.551 @@ -1578,7 +1478,7 @@ 44.552 44.553 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 44.554 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 44.555 - bool unaligned) { 44.556 + bool wide, bool unaligned) { 44.557 LIR_Address* addr = dest->as_address_ptr(); 44.558 44.559 Register src = addr->base()->as_pointer_register(); 44.560 @@ -1622,10 +1522,10 @@ 44.561 44.562 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 44.563 if (disp_reg == noreg) { 44.564 - offset = store(from_reg, src, disp_value, type, unaligned); 44.565 + offset = store(from_reg, src, disp_value, type, wide, unaligned); 44.566 } else { 44.567 assert(!unaligned, "can't handle this"); 44.568 - offset = store(from_reg, src, disp_reg, type); 44.569 + offset = store(from_reg, src, disp_reg, type, wide); 44.570 } 44.571 44.572 if (patch != NULL) { 44.573 @@ -2184,13 +2084,13 @@ 44.574 // make sure src and dst are non-null and load array length 44.575 if (flags & LIR_OpArrayCopy::src_null_check) { 44.576 __ tst(src); 44.577 - __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); 44.578 + __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 44.579 __ delayed()->nop(); 44.580 } 44.581 44.582 if (flags & LIR_OpArrayCopy::dst_null_check) { 44.583 __ tst(dst); 44.584 - __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); 44.585 + __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 44.586 __ delayed()->nop(); 44.587 } 44.588 44.589 @@ -2232,10 +2132,18 @@ 44.590 } 44.591 44.592 if (flags & LIR_OpArrayCopy::type_check) { 44.593 - __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); 44.594 - __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 44.595 - __ cmp(tmp, tmp2); 44.596 - __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 44.597 + if (UseCompressedOops) { 44.598 + // We don't need decode because we just need to compare 44.599 + __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); 44.600 + __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 44.601 + __ cmp(tmp, tmp2); 44.602 + __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 44.603 + } else { 44.604 + __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); 44.605 + __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 44.606 + __ cmp(tmp, tmp2); 44.607 + __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 44.608 + } 44.609 __ delayed()->nop(); 44.610 } 44.611 44.612 @@ -2250,20 +2158,44 @@ 44.613 // but not necessarily exactly of type default_type. 44.614 Label known_ok, halt; 44.615 jobject2reg(op->expected_type()->constant_encoding(), tmp); 44.616 - __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 44.617 - if (basic_type != T_OBJECT) { 44.618 - __ cmp(tmp, tmp2); 44.619 - __ br(Assembler::notEqual, false, Assembler::pn, halt); 44.620 - __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2); 44.621 - __ cmp(tmp, tmp2); 44.622 - __ br(Assembler::equal, false, Assembler::pn, known_ok); 44.623 - __ delayed()->nop(); 44.624 + if (UseCompressedOops) { 44.625 + // tmp holds the default type. It currently comes uncompressed after the 44.626 + // load of a constant, so encode it. 44.627 + __ encode_heap_oop(tmp); 44.628 + // load the raw value of the dst klass, since we will be comparing 44.629 + // uncompressed values directly. 44.630 + __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 44.631 + if (basic_type != T_OBJECT) { 44.632 + __ cmp(tmp, tmp2); 44.633 + __ br(Assembler::notEqual, false, Assembler::pn, halt); 44.634 + // load the raw value of the src klass. 44.635 + __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2); 44.636 + __ cmp(tmp, tmp2); 44.637 + __ br(Assembler::equal, false, Assembler::pn, known_ok); 44.638 + __ delayed()->nop(); 44.639 + } else { 44.640 + __ cmp(tmp, tmp2); 44.641 + __ br(Assembler::equal, false, Assembler::pn, known_ok); 44.642 + __ delayed()->cmp(src, dst); 44.643 + __ brx(Assembler::equal, false, Assembler::pn, known_ok); 44.644 + __ delayed()->nop(); 44.645 + } 44.646 } else { 44.647 - __ cmp(tmp, tmp2); 44.648 - __ br(Assembler::equal, false, Assembler::pn, known_ok); 44.649 - __ delayed()->cmp(src, dst); 44.650 - __ br(Assembler::equal, false, Assembler::pn, known_ok); 44.651 - __ delayed()->nop(); 44.652 + __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 44.653 + if (basic_type != T_OBJECT) { 44.654 + __ cmp(tmp, tmp2); 44.655 + __ brx(Assembler::notEqual, false, Assembler::pn, halt); 44.656 + __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2); 44.657 + __ cmp(tmp, tmp2); 44.658 + __ brx(Assembler::equal, false, Assembler::pn, known_ok); 44.659 + __ delayed()->nop(); 44.660 + } else { 44.661 + __ cmp(tmp, tmp2); 44.662 + __ brx(Assembler::equal, false, Assembler::pn, known_ok); 44.663 + __ delayed()->cmp(src, dst); 44.664 + __ brx(Assembler::equal, false, Assembler::pn, known_ok); 44.665 + __ delayed()->nop(); 44.666 + } 44.667 } 44.668 __ bind(halt); 44.669 __ stop("incorrect type information in arraycopy"); 44.670 @@ -2471,7 +2403,7 @@ 44.671 Label next_test; 44.672 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 44.673 mdo_offset_bias); 44.674 - load(recv_addr, tmp1, T_OBJECT); 44.675 + __ ld_ptr(recv_addr, tmp1); 44.676 __ br_notnull(tmp1, false, Assembler::pt, next_test); 44.677 __ delayed()->nop(); 44.678 __ st_ptr(recv, recv_addr); 44.679 @@ -2487,11 +2419,8 @@ 44.680 44.681 void LIR_Assembler::setup_md_access(ciMethod* method, int bci, 44.682 ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) { 44.683 - md = method->method_data(); 44.684 - if (md == NULL) { 44.685 - bailout("out of memory building methodDataOop"); 44.686 - return; 44.687 - } 44.688 + md = method->method_data_or_null(); 44.689 + assert(md != NULL, "Sanity"); 44.690 data = md->bci_to_data(bci); 44.691 assert(data != NULL, "need data for checkcast"); 44.692 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 44.693 @@ -2563,7 +2492,7 @@ 44.694 44.695 // get object class 44.696 // not a safepoint as obj null check happens earlier 44.697 - load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL); 44.698 + __ load_klass(obj, klass_RInfo); 44.699 if (op->fast_check()) { 44.700 assert_different_registers(klass_RInfo, k_RInfo); 44.701 __ cmp(k_RInfo, klass_RInfo); 44.702 @@ -2605,7 +2534,7 @@ 44.703 __ set(mdo_offset_bias, tmp1); 44.704 __ add(mdo, tmp1, mdo); 44.705 } 44.706 - load(Address(obj, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); 44.707 + __ load_klass(obj, recv); 44.708 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); 44.709 // Jump over the failure case 44.710 __ ba(false, *success); 44.711 @@ -2674,11 +2603,12 @@ 44.712 __ br_null(value, false, Assembler::pn, done); 44.713 __ delayed()->nop(); 44.714 } 44.715 - load(array, oopDesc::klass_offset_in_bytes(), k_RInfo, T_OBJECT, op->info_for_exception()); 44.716 - load(value, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL); 44.717 + add_debug_info_for_null_check_here(op->info_for_exception()); 44.718 + __ load_klass(array, k_RInfo); 44.719 + __ load_klass(value, klass_RInfo); 44.720 44.721 // get instance klass 44.722 - load(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc), k_RInfo, T_OBJECT, NULL); 44.723 + __ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)), k_RInfo); 44.724 // perform the fast part of the checking logic 44.725 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL); 44.726 44.727 @@ -2700,7 +2630,7 @@ 44.728 __ set(mdo_offset_bias, tmp1); 44.729 __ add(mdo, tmp1, mdo); 44.730 } 44.731 - load(Address(value, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); 44.732 + __ load_klass(value, recv); 44.733 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 44.734 __ ba(false, done); 44.735 __ delayed()->nop(); 44.736 @@ -2781,14 +2711,17 @@ 44.737 Register t2 = op->tmp2()->as_register(); 44.738 __ mov(cmp_value, t1); 44.739 __ mov(new_value, t2); 44.740 -#ifdef _LP64 44.741 if (op->code() == lir_cas_obj) { 44.742 - __ casx(addr, t1, t2); 44.743 - } else 44.744 -#endif 44.745 - { 44.746 + if (UseCompressedOops) { 44.747 + __ encode_heap_oop(t1); 44.748 + __ encode_heap_oop(t2); 44.749 __ cas(addr, t1, t2); 44.750 + } else { 44.751 + __ cas_ptr(addr, t1, t2); 44.752 } 44.753 + } else { 44.754 + __ cas(addr, t1, t2); 44.755 + } 44.756 __ cmp(t1, t2); 44.757 } else { 44.758 Unimplemented(); 44.759 @@ -2885,11 +2818,8 @@ 44.760 int bci = op->profiled_bci(); 44.761 44.762 // Update counter for all call types 44.763 - ciMethodData* md = method->method_data(); 44.764 - if (md == NULL) { 44.765 - bailout("out of memory building methodDataOop"); 44.766 - return; 44.767 - } 44.768 + ciMethodData* md = method->method_data_or_null(); 44.769 + assert(md != NULL, "Sanity"); 44.770 ciProfileData* data = md->bci_to_data(bci); 44.771 assert(data->is_CounterData(), "need CounterData for calls"); 44.772 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 44.773 @@ -2966,7 +2896,7 @@ 44.774 } 44.775 } 44.776 } else { 44.777 - load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); 44.778 + __ load_klass(recv, recv); 44.779 Label update_done; 44.780 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 44.781 // Receiver did not match any saved receiver and there is no empty row for it. 44.782 @@ -3160,7 +3090,7 @@ 44.783 } else { 44.784 // use normal move for all other volatiles since they don't need 44.785 // special handling to remain atomic. 44.786 - move_op(src, dest, type, lir_patch_none, info, false, false); 44.787 + move_op(src, dest, type, lir_patch_none, info, false, false, false); 44.788 } 44.789 } 44.790
45.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp Mon Dec 27 09:30:20 2010 -0500 45.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp Mon Dec 27 09:56:29 2010 -0500 45.3 @@ -40,33 +40,11 @@ 45.4 // and then a load or store is emitted with ([O7] + [d]). 45.5 // 45.6 45.7 - // some load/store variants return the code_offset for proper positioning of debug info for null checks 45.8 + int store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned); 45.9 + int store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide); 45.10 45.11 - // load/store with 32 bit displacement 45.12 - int load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo* info = NULL); 45.13 - void store(Register value, Register base, int offset, BasicType type, CodeEmitInfo *info = NULL); 45.14 - 45.15 - // loadf/storef with 32 bit displacement 45.16 - void load(Register s, int disp, FloatRegister d, BasicType ld_type, CodeEmitInfo* info = NULL); 45.17 - void store(FloatRegister d, Register s1, int disp, BasicType st_type, CodeEmitInfo* info = NULL); 45.18 - 45.19 - // convienence methods for calling load/store with an Address 45.20 - void load(const Address& a, Register d, BasicType ld_type, CodeEmitInfo* info = NULL, int offset = 0); 45.21 - void store(Register d, const Address& a, BasicType st_type, CodeEmitInfo* info = NULL, int offset = 0); 45.22 - void load(const Address& a, FloatRegister d, BasicType ld_type, CodeEmitInfo* info = NULL, int offset = 0); 45.23 - void store(FloatRegister d, const Address& a, BasicType st_type, CodeEmitInfo* info = NULL, int offset = 0); 45.24 - 45.25 - // convienence methods for calling load/store with an LIR_Address 45.26 - void load(LIR_Address* a, Register d, BasicType ld_type, CodeEmitInfo* info = NULL); 45.27 - void store(Register d, LIR_Address* a, BasicType st_type, CodeEmitInfo* info = NULL); 45.28 - void load(LIR_Address* a, FloatRegister d, BasicType ld_type, CodeEmitInfo* info = NULL); 45.29 - void store(FloatRegister d, LIR_Address* a, BasicType st_type, CodeEmitInfo* info = NULL); 45.30 - 45.31 - int store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned = false); 45.32 - int store(LIR_Opr from_reg, Register base, Register disp, BasicType type); 45.33 - 45.34 - int load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool unaligned = false); 45.35 - int load(Register base, Register disp, LIR_Opr to_reg, BasicType type); 45.36 + int load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned); 45.37 + int load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide); 45.38 45.39 void monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no); 45.40
46.1 --- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Mon Dec 27 09:30:20 2010 -0500 46.2 +++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Mon Dec 27 09:56:29 2010 -0500 46.3 @@ -40,7 +40,7 @@ 46.4 const Register temp_reg = G3_scratch; 46.5 // Note: needs more testing of out-of-line vs. inline slow case 46.6 verify_oop(receiver); 46.7 - ld_ptr(receiver, oopDesc::klass_offset_in_bytes(), temp_reg); 46.8 + load_klass(receiver, temp_reg); 46.9 cmp(temp_reg, iCache); 46.10 brx(Assembler::equal, true, Assembler::pt, L); 46.11 delayed()->nop(); 46.12 @@ -185,9 +185,19 @@ 46.13 } else { 46.14 set((intx)markOopDesc::prototype(), t1); 46.15 } 46.16 - st_ptr(t1 , obj, oopDesc::mark_offset_in_bytes ()); 46.17 - st_ptr(klass, obj, oopDesc::klass_offset_in_bytes ()); 46.18 - if (len->is_valid()) st(len , obj, arrayOopDesc::length_offset_in_bytes()); 46.19 + st_ptr(t1, obj, oopDesc::mark_offset_in_bytes()); 46.20 + if (UseCompressedOops) { 46.21 + // Save klass 46.22 + mov(klass, t1); 46.23 + encode_heap_oop_not_null(t1); 46.24 + stw(t1, obj, oopDesc::klass_offset_in_bytes()); 46.25 + } else { 46.26 + st_ptr(klass, obj, oopDesc::klass_offset_in_bytes()); 46.27 + } 46.28 + if (len->is_valid()) st(len, obj, arrayOopDesc::length_offset_in_bytes()); 46.29 + else if (UseCompressedOops) { 46.30 + store_klass_gap(G0, obj); 46.31 + } 46.32 } 46.33 46.34 46.35 @@ -235,7 +245,7 @@ 46.36 Register t1, // temp register 46.37 Register t2 // temp register 46.38 ) { 46.39 - const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes(); 46.40 + const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize; 46.41 46.42 initialize_header(obj, klass, noreg, t1, t2); 46.43
47.1 --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Mon Dec 27 09:30:20 2010 -0500 47.2 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Mon Dec 27 09:56:29 2010 -0500 47.3 @@ -612,7 +612,7 @@ 47.4 // load the klass and check the has finalizer flag 47.5 Label register_finalizer; 47.6 Register t = O1; 47.7 - __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), t); 47.8 + __ load_klass(O0, t); 47.9 __ ld(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), t); 47.10 __ set(JVM_ACC_HAS_FINALIZER, G3); 47.11 __ andcc(G3, t, G0);
48.1 --- a/src/cpu/sparc/vm/methodHandles_sparc.cpp Mon Dec 27 09:30:20 2010 -0500 48.2 +++ b/src/cpu/sparc/vm/methodHandles_sparc.cpp Mon Dec 27 09:56:29 2010 -0500 48.3 @@ -689,8 +689,8 @@ 48.4 { 48.5 // Perform an in-place conversion to int or an int subword. 48.6 __ ldsw(G3_amh_vmargslot, O0_argslot); 48.7 + Address value; 48.8 Address vmarg = __ argument_address(O0_argslot); 48.9 - Address value; 48.10 bool value_left_justified = false; 48.11 48.12 switch (ek) { 48.13 @@ -700,9 +700,21 @@ 48.14 case _adapter_opt_l2i: 48.15 { 48.16 // just delete the extra slot 48.17 +#ifdef _LP64 48.18 + // In V9, longs are given 2 64-bit slots in the interpreter, but the 48.19 + // data is passed in only 1 slot. 48.20 + // Keep the second slot. 48.21 + __ add(Gargs, __ argument_offset(O0_argslot, -1), O0_argslot); 48.22 + remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); 48.23 + value = Address(O0_argslot, 4); // Get least-significant 32-bit of 64-bit value. 48.24 + vmarg = Address(O0_argslot, Interpreter::stackElementSize); 48.25 +#else 48.26 + // Keep the first slot. 48.27 __ add(Gargs, __ argument_offset(O0_argslot), O0_argslot); 48.28 remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch); 48.29 - value = vmarg = Address(O0_argslot, 0); 48.30 + value = Address(O0_argslot, 0); 48.31 + vmarg = value; 48.32 +#endif 48.33 } 48.34 break; 48.35 case _adapter_opt_unboxi:
49.1 --- a/src/cpu/sparc/vm/sparc.ad Mon Dec 27 09:30:20 2010 -0500 49.2 +++ b/src/cpu/sparc/vm/sparc.ad Mon Dec 27 09:56:29 2010 -0500 49.3 @@ -667,6 +667,20 @@ 49.4 return offset; 49.5 } 49.6 49.7 +static inline jdouble replicate_immI(int con, int count, int width) { 49.8 + // Load a constant replicated "count" times with width "width" 49.9 + int bit_width = width * 8; 49.10 + jlong elt_val = con; 49.11 + elt_val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits 49.12 + jlong val = elt_val; 49.13 + for (int i = 0; i < count - 1; i++) { 49.14 + val <<= bit_width; 49.15 + val |= elt_val; 49.16 + } 49.17 + jdouble dval = *((jdouble*) &val); // coerce to double type 49.18 + return dval; 49.19 +} 49.20 + 49.21 // Standard Sparc opcode form2 field breakdown 49.22 static inline void emit2_19(CodeBuffer &cbuf, int f30, int f29, int f25, int f22, int f20, int f19, int f0 ) { 49.23 f0 &= (1<<19)-1; // Mask displacement to 19 bits 49.24 @@ -1008,6 +1022,90 @@ 49.25 49.26 49.27 //============================================================================= 49.28 +const bool Matcher::constant_table_absolute_addressing = false; 49.29 +const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask; 49.30 + 49.31 +void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 49.32 + Compile* C = ra_->C; 49.33 + Compile::ConstantTable& constant_table = C->constant_table(); 49.34 + MacroAssembler _masm(&cbuf); 49.35 + 49.36 + Register r = as_Register(ra_->get_encode(this)); 49.37 + CodeSection* cs = __ code()->consts(); 49.38 + int consts_size = cs->align_at_start(cs->size()); 49.39 + 49.40 + if (UseRDPCForConstantTableBase) { 49.41 + // For the following RDPC logic to work correctly the consts 49.42 + // section must be allocated right before the insts section. This 49.43 + // assert checks for that. The layout and the SECT_* constants 49.44 + // are defined in src/share/vm/asm/codeBuffer.hpp. 49.45 + assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be"); 49.46 + int offset = __ offset(); 49.47 + int disp; 49.48 + 49.49 + // If the displacement from the current PC to the constant table 49.50 + // base fits into simm13 we set the constant table base to the 49.51 + // current PC. 49.52 + if (__ is_simm13(-(consts_size + offset))) { 49.53 + constant_table.set_table_base_offset(-(consts_size + offset)); 49.54 + disp = 0; 49.55 + } else { 49.56 + // If the offset of the top constant (last entry in the table) 49.57 + // fits into simm13 we set the constant table base to the actual 49.58 + // table base. 49.59 + if (__ is_simm13(constant_table.top_offset())) { 49.60 + constant_table.set_table_base_offset(0); 49.61 + disp = consts_size + offset; 49.62 + } else { 49.63 + // Otherwise we set the constant table base in the middle of the 49.64 + // constant table. 49.65 + int half_consts_size = consts_size / 2; 49.66 + assert(half_consts_size * 2 == consts_size, "sanity"); 49.67 + constant_table.set_table_base_offset(-half_consts_size); // table base offset gets added to the load displacement. 49.68 + disp = half_consts_size + offset; 49.69 + } 49.70 + } 49.71 + 49.72 + __ rdpc(r); 49.73 + 49.74 + if (disp != 0) { 49.75 + assert(r != O7, "need temporary"); 49.76 + __ sub(r, __ ensure_simm13_or_reg(disp, O7), r); 49.77 + } 49.78 + } 49.79 + else { 49.80 + // Materialize the constant table base. 49.81 + assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size)); 49.82 + address baseaddr = cs->start() + -(constant_table.table_base_offset()); 49.83 + RelocationHolder rspec = internal_word_Relocation::spec(baseaddr); 49.84 + AddressLiteral base(baseaddr, rspec); 49.85 + __ set(base, r); 49.86 + } 49.87 +} 49.88 + 49.89 +uint MachConstantBaseNode::size(PhaseRegAlloc*) const { 49.90 + if (UseRDPCForConstantTableBase) { 49.91 + // This is really the worst case but generally it's only 1 instruction. 49.92 + return 4 /*rdpc*/ + 4 /*sub*/ + MacroAssembler::worst_case_size_of_set(); 49.93 + } else { 49.94 + return MacroAssembler::worst_case_size_of_set(); 49.95 + } 49.96 +} 49.97 + 49.98 +#ifndef PRODUCT 49.99 +void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 49.100 + char reg[128]; 49.101 + ra_->dump_register(this, reg); 49.102 + if (UseRDPCForConstantTableBase) { 49.103 + st->print("RDPC %s\t! constant table base", reg); 49.104 + } else { 49.105 + st->print("SET &constanttable,%s\t! constant table base", reg); 49.106 + } 49.107 +} 49.108 +#endif 49.109 + 49.110 + 49.111 +//============================================================================= 49.112 49.113 #ifndef PRODUCT 49.114 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream *st ) const { 49.115 @@ -2247,25 +2345,6 @@ 49.116 __ delayed()->nop(); 49.117 %} 49.118 49.119 - enc_class jump_enc( iRegX switch_val, o7RegI table) %{ 49.120 - MacroAssembler _masm(&cbuf); 49.121 - 49.122 - Register switch_reg = as_Register($switch_val$$reg); 49.123 - Register table_reg = O7; 49.124 - 49.125 - address table_base = __ address_table_constant(_index2label); 49.126 - RelocationHolder rspec = internal_word_Relocation::spec(table_base); 49.127 - 49.128 - // Move table address into a register. 49.129 - __ set(table_base, table_reg, rspec); 49.130 - 49.131 - // Jump to base address + switch value 49.132 - __ ld_ptr(table_reg, switch_reg, table_reg); 49.133 - __ jmp(table_reg, G0); 49.134 - __ delayed()->nop(); 49.135 - 49.136 - %} 49.137 - 49.138 enc_class enc_ba( Label labl ) %{ 49.139 MacroAssembler _masm(&cbuf); 49.140 Label &L = *($labl$$label); 49.141 @@ -2384,20 +2463,6 @@ 49.142 cbuf.insts()->emit_int32(op); 49.143 %} 49.144 49.145 - // Utility encoding for loading a 64 bit Pointer into a register 49.146 - // The 64 bit pointer is stored in the generated code stream 49.147 - enc_class SetPtr( immP src, iRegP rd ) %{ 49.148 - Register dest = reg_to_register_object($rd$$reg); 49.149 - MacroAssembler _masm(&cbuf); 49.150 - // [RGV] This next line should be generated from ADLC 49.151 - if ( _opnds[1]->constant_is_oop() ) { 49.152 - intptr_t val = $src$$constant; 49.153 - __ set_oop_constant((jobject)val, dest); 49.154 - } else { // non-oop pointers, e.g. card mark base, heap top 49.155 - __ set($src$$constant, dest); 49.156 - } 49.157 - %} 49.158 - 49.159 enc_class Set13( immI13 src, iRegI rd ) %{ 49.160 emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, $src$$constant ); 49.161 %} 49.162 @@ -2411,10 +2476,6 @@ 49.163 __ set($src$$constant, reg_to_register_object($rd$$reg)); 49.164 %} 49.165 49.166 - enc_class SetNull( iRegI rd ) %{ 49.167 - emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0 ); 49.168 - %} 49.169 - 49.170 enc_class call_epilog %{ 49.171 if( VerifyStackAtCalls ) { 49.172 MacroAssembler _masm(&cbuf); 49.173 @@ -2778,35 +2839,6 @@ 49.174 __ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst); 49.175 %} 49.176 49.177 - enc_class LdImmL (immL src, iRegL dst, o7RegL tmp) %{ // Load Immediate 49.178 - MacroAssembler _masm(&cbuf); 49.179 - Register dest = reg_to_register_object($dst$$reg); 49.180 - Register temp = reg_to_register_object($tmp$$reg); 49.181 - __ set64( $src$$constant, dest, temp ); 49.182 - %} 49.183 - 49.184 - enc_class LdReplImmI(immI src, regD dst, o7RegP tmp, int count, int width) %{ 49.185 - // Load a constant replicated "count" times with width "width" 49.186 - int bit_width = $width$$constant * 8; 49.187 - jlong elt_val = $src$$constant; 49.188 - elt_val &= (((jlong)1) << bit_width) - 1; // mask off sign bits 49.189 - jlong val = elt_val; 49.190 - for (int i = 0; i < $count$$constant - 1; i++) { 49.191 - val <<= bit_width; 49.192 - val |= elt_val; 49.193 - } 49.194 - jdouble dval = *(jdouble*)&val; // coerce to double type 49.195 - MacroAssembler _masm(&cbuf); 49.196 - address double_address = __ double_constant(dval); 49.197 - RelocationHolder rspec = internal_word_Relocation::spec(double_address); 49.198 - AddressLiteral addrlit(double_address, rspec); 49.199 - 49.200 - __ sethi(addrlit, $tmp$$Register); 49.201 - // XXX This is a quick fix for 6833573. 49.202 - //__ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), $dst$$FloatRegister, rspec); 49.203 - __ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), as_DoubleFloatRegister($dst$$reg), rspec); 49.204 - %} 49.205 - 49.206 // Compiler ensures base is doubleword aligned and cnt is count of doublewords 49.207 enc_class enc_Clear_Array(iRegX cnt, iRegP base, iRegX temp) %{ 49.208 MacroAssembler _masm(&cbuf); 49.209 @@ -3521,6 +3553,29 @@ 49.210 interface(CONST_INTER); 49.211 %} 49.212 49.213 +// Pointer Immediate: 32 or 64-bit 49.214 +operand immP_set() %{ 49.215 + predicate(!VM_Version::is_niagara1_plus()); 49.216 + match(ConP); 49.217 + 49.218 + op_cost(5); 49.219 + // formats are generated automatically for constants and base registers 49.220 + format %{ %} 49.221 + interface(CONST_INTER); 49.222 +%} 49.223 + 49.224 +// Pointer Immediate: 32 or 64-bit 49.225 +// From Niagara2 processors on a load should be better than materializing. 49.226 +operand immP_load() %{ 49.227 + predicate(VM_Version::is_niagara1_plus()); 49.228 + match(ConP); 49.229 + 49.230 + op_cost(5); 49.231 + // formats are generated automatically for constants and base registers 49.232 + format %{ %} 49.233 + interface(CONST_INTER); 49.234 +%} 49.235 + 49.236 operand immP13() %{ 49.237 predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095)); 49.238 match(ConP); 49.239 @@ -3616,6 +3671,26 @@ 49.240 interface(CONST_INTER); 49.241 %} 49.242 49.243 +// Long Immediate: cheap (materialize in <= 3 instructions) 49.244 +operand immL_cheap() %{ 49.245 + predicate(!VM_Version::is_niagara1_plus() || MacroAssembler::size_of_set64(n->get_long()) <= 3); 49.246 + match(ConL); 49.247 + op_cost(0); 49.248 + 49.249 + format %{ %} 49.250 + interface(CONST_INTER); 49.251 +%} 49.252 + 49.253 +// Long Immediate: expensive (materialize in > 3 instructions) 49.254 +operand immL_expensive() %{ 49.255 + predicate(VM_Version::is_niagara1_plus() && MacroAssembler::size_of_set64(n->get_long()) > 3); 49.256 + match(ConL); 49.257 + op_cost(0); 49.258 + 49.259 + format %{ %} 49.260 + interface(CONST_INTER); 49.261 +%} 49.262 + 49.263 // Double Immediate 49.264 operand immD() %{ 49.265 match(ConD); 49.266 @@ -5981,25 +6056,59 @@ 49.267 ins_pipe(ialu_imm); 49.268 %} 49.269 49.270 -instruct loadConP(iRegP dst, immP src) %{ 49.271 - match(Set dst src); 49.272 +#ifndef _LP64 49.273 +instruct loadConP(iRegP dst, immP con) %{ 49.274 + match(Set dst con); 49.275 ins_cost(DEFAULT_COST * 3/2); 49.276 - format %{ "SET $src,$dst\t!ptr" %} 49.277 - // This rule does not use "expand" unlike loadConI because then 49.278 - // the result type is not known to be an Oop. An ADLC 49.279 - // enhancement will be needed to make that work - not worth it! 49.280 - 49.281 - ins_encode( SetPtr( src, dst ) ); 49.282 + format %{ "SET $con,$dst\t!ptr" %} 49.283 + ins_encode %{ 49.284 + // [RGV] This next line should be generated from ADLC 49.285 + if (_opnds[1]->constant_is_oop()) { 49.286 + intptr_t val = $con$$constant; 49.287 + __ set_oop_constant((jobject) val, $dst$$Register); 49.288 + } else { // non-oop pointers, e.g. card mark base, heap top 49.289 + __ set($con$$constant, $dst$$Register); 49.290 + } 49.291 + %} 49.292 ins_pipe(loadConP); 49.293 - 49.294 -%} 49.295 +%} 49.296 +#else 49.297 +instruct loadConP_set(iRegP dst, immP_set con) %{ 49.298 + match(Set dst con); 49.299 + ins_cost(DEFAULT_COST * 3/2); 49.300 + format %{ "SET $con,$dst\t! ptr" %} 49.301 + ins_encode %{ 49.302 + // [RGV] This next line should be generated from ADLC 49.303 + if (_opnds[1]->constant_is_oop()) { 49.304 + intptr_t val = $con$$constant; 49.305 + __ set_oop_constant((jobject) val, $dst$$Register); 49.306 + } else { // non-oop pointers, e.g. card mark base, heap top 49.307 + __ set($con$$constant, $dst$$Register); 49.308 + } 49.309 + %} 49.310 + ins_pipe(loadConP); 49.311 +%} 49.312 + 49.313 +instruct loadConP_load(iRegP dst, immP_load con) %{ 49.314 + match(Set dst con); 49.315 + ins_cost(MEMORY_REF_COST); 49.316 + format %{ "LD [$constanttablebase + $constantoffset],$dst\t! load from constant table: ptr=$con" %} 49.317 + ins_encode %{ 49.318 + RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 49.319 + __ ld_ptr($constanttablebase, con_offset, $dst$$Register); 49.320 + %} 49.321 + ins_pipe(loadConP); 49.322 +%} 49.323 +#endif // _LP64 49.324 49.325 instruct loadConP0(iRegP dst, immP0 src) %{ 49.326 match(Set dst src); 49.327 49.328 size(4); 49.329 format %{ "CLR $dst\t!ptr" %} 49.330 - ins_encode( SetNull( dst ) ); 49.331 + ins_encode %{ 49.332 + __ clr($dst$$Register); 49.333 + %} 49.334 ins_pipe(ialu_imm); 49.335 %} 49.336 49.337 @@ -6019,7 +6128,9 @@ 49.338 49.339 size(4); 49.340 format %{ "CLR $dst\t! compressed NULL ptr" %} 49.341 - ins_encode( SetNull( dst ) ); 49.342 + ins_encode %{ 49.343 + __ clr($dst$$Register); 49.344 + %} 49.345 ins_pipe(ialu_imm); 49.346 %} 49.347 49.348 @@ -6034,13 +6145,27 @@ 49.349 ins_pipe(ialu_hi_lo_reg); 49.350 %} 49.351 49.352 -instruct loadConL(iRegL dst, immL src, o7RegL tmp) %{ 49.353 - // %%% maybe this should work like loadConD 49.354 - match(Set dst src); 49.355 +// Materialize long value (predicated by immL_cheap). 49.356 +instruct loadConL_set64(iRegL dst, immL_cheap con, o7RegL tmp) %{ 49.357 + match(Set dst con); 49.358 effect(KILL tmp); 49.359 - ins_cost(DEFAULT_COST * 4); 49.360 - format %{ "SET64 $src,$dst KILL $tmp\t! long" %} 49.361 - ins_encode( LdImmL(src, dst, tmp) ); 49.362 + ins_cost(DEFAULT_COST * 3); 49.363 + format %{ "SET64 $con,$dst KILL $tmp\t! cheap long" %} 49.364 + ins_encode %{ 49.365 + __ set64($con$$constant, $dst$$Register, $tmp$$Register); 49.366 + %} 49.367 + ins_pipe(loadConL); 49.368 +%} 49.369 + 49.370 +// Load long value from constant table (predicated by immL_expensive). 49.371 +instruct loadConL_ldx(iRegL dst, immL_expensive con) %{ 49.372 + match(Set dst con); 49.373 + ins_cost(MEMORY_REF_COST); 49.374 + format %{ "LDX [$constanttablebase + $constantoffset],$dst\t! load from constant table: long=$con" %} 49.375 + ins_encode %{ 49.376 + RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register); 49.377 + __ ldx($constanttablebase, con_offset, $dst$$Register); 49.378 + %} 49.379 ins_pipe(loadConL); 49.380 %} 49.381 49.382 @@ -6063,50 +6188,26 @@ 49.383 ins_pipe(ialu_imm); 49.384 %} 49.385 49.386 -instruct loadConF(regF dst, immF src, o7RegP tmp) %{ 49.387 - match(Set dst src); 49.388 +instruct loadConF(regF dst, immF con, o7RegI tmp) %{ 49.389 + match(Set dst con); 49.390 effect(KILL tmp); 49.391 - 49.392 -#ifdef _LP64 49.393 - size(8*4); 49.394 -#else 49.395 - size(2*4); 49.396 -#endif 49.397 - 49.398 - format %{ "SETHI hi(&$src),$tmp\t!get float $src from table\n\t" 49.399 - "LDF [$tmp+lo(&$src)],$dst" %} 49.400 + format %{ "LDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: float=$con" %} 49.401 ins_encode %{ 49.402 - address float_address = __ float_constant($src$$constant); 49.403 - RelocationHolder rspec = internal_word_Relocation::spec(float_address); 49.404 - AddressLiteral addrlit(float_address, rspec); 49.405 - 49.406 - __ sethi(addrlit, $tmp$$Register); 49.407 - __ ldf(FloatRegisterImpl::S, $tmp$$Register, addrlit.low10(), $dst$$FloatRegister, rspec); 49.408 + RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 49.409 + __ ldf(FloatRegisterImpl::S, $constanttablebase, con_offset, $dst$$FloatRegister); 49.410 %} 49.411 ins_pipe(loadConFD); 49.412 %} 49.413 49.414 -instruct loadConD(regD dst, immD src, o7RegP tmp) %{ 49.415 - match(Set dst src); 49.416 +instruct loadConD(regD dst, immD con, o7RegI tmp) %{ 49.417 + match(Set dst con); 49.418 effect(KILL tmp); 49.419 - 49.420 -#ifdef _LP64 49.421 - size(8*4); 49.422 -#else 49.423 - size(2*4); 49.424 -#endif 49.425 - 49.426 - format %{ "SETHI hi(&$src),$tmp\t!get double $src from table\n\t" 49.427 - "LDDF [$tmp+lo(&$src)],$dst" %} 49.428 + format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: double=$con" %} 49.429 ins_encode %{ 49.430 - address double_address = __ double_constant($src$$constant); 49.431 - RelocationHolder rspec = internal_word_Relocation::spec(double_address); 49.432 - AddressLiteral addrlit(double_address, rspec); 49.433 - 49.434 - __ sethi(addrlit, $tmp$$Register); 49.435 // XXX This is a quick fix for 6833573. 49.436 - //__ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), $dst$$FloatRegister, rspec); 49.437 - __ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), as_DoubleFloatRegister($dst$$reg), rspec); 49.438 + //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset($con), $dst$$FloatRegister); 49.439 + RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register); 49.440 + __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 49.441 %} 49.442 ins_pipe(loadConFD); 49.443 %} 49.444 @@ -8558,16 +8659,16 @@ 49.445 %} 49.446 49.447 // Replicate scalar constant to packed byte values in Double register 49.448 -instruct Repl8B_immI(regD dst, immI13 src, o7RegP tmp) %{ 49.449 - match(Set dst (Replicate8B src)); 49.450 -#ifdef _LP64 49.451 - size(36); 49.452 -#else 49.453 - size(8); 49.454 -#endif 49.455 - format %{ "SETHI hi(&Repl8($src)),$tmp\t!get Repl8B($src) from table\n\t" 49.456 - "LDDF [$tmp+lo(&Repl8($src))],$dst" %} 49.457 - ins_encode( LdReplImmI(src, dst, tmp, (8), (1)) ); 49.458 +instruct Repl8B_immI(regD dst, immI13 con, o7RegI tmp) %{ 49.459 + match(Set dst (Replicate8B con)); 49.460 + effect(KILL tmp); 49.461 + format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl8B($con)" %} 49.462 + ins_encode %{ 49.463 + // XXX This is a quick fix for 6833573. 49.464 + //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 8, 1)), $dst$$FloatRegister); 49.465 + RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 8, 1)), $tmp$$Register); 49.466 + __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 49.467 + %} 49.468 ins_pipe(loadConFD); 49.469 %} 49.470 49.471 @@ -8594,16 +8695,16 @@ 49.472 %} 49.473 49.474 // Replicate scalar constant to packed char values in Double register 49.475 -instruct Repl4C_immI(regD dst, immI src, o7RegP tmp) %{ 49.476 - match(Set dst (Replicate4C src)); 49.477 -#ifdef _LP64 49.478 - size(36); 49.479 -#else 49.480 - size(8); 49.481 -#endif 49.482 - format %{ "SETHI hi(&Repl4($src)),$tmp\t!get Repl4C($src) from table\n\t" 49.483 - "LDDF [$tmp+lo(&Repl4($src))],$dst" %} 49.484 - ins_encode( LdReplImmI(src, dst, tmp, (4), (2)) ); 49.485 +instruct Repl4C_immI(regD dst, immI con, o7RegI tmp) %{ 49.486 + match(Set dst (Replicate4C con)); 49.487 + effect(KILL tmp); 49.488 + format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4C($con)" %} 49.489 + ins_encode %{ 49.490 + // XXX This is a quick fix for 6833573. 49.491 + //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister); 49.492 + RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register); 49.493 + __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 49.494 + %} 49.495 ins_pipe(loadConFD); 49.496 %} 49.497 49.498 @@ -8630,16 +8731,16 @@ 49.499 %} 49.500 49.501 // Replicate scalar constant to packed short values in Double register 49.502 -instruct Repl4S_immI(regD dst, immI src, o7RegP tmp) %{ 49.503 - match(Set dst (Replicate4S src)); 49.504 -#ifdef _LP64 49.505 - size(36); 49.506 -#else 49.507 - size(8); 49.508 -#endif 49.509 - format %{ "SETHI hi(&Repl4($src)),$tmp\t!get Repl4S($src) from table\n\t" 49.510 - "LDDF [$tmp+lo(&Repl4($src))],$dst" %} 49.511 - ins_encode( LdReplImmI(src, dst, tmp, (4), (2)) ); 49.512 +instruct Repl4S_immI(regD dst, immI con, o7RegI tmp) %{ 49.513 + match(Set dst (Replicate4S con)); 49.514 + effect(KILL tmp); 49.515 + format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4S($con)" %} 49.516 + ins_encode %{ 49.517 + // XXX This is a quick fix for 6833573. 49.518 + //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister); 49.519 + RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register); 49.520 + __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 49.521 + %} 49.522 ins_pipe(loadConFD); 49.523 %} 49.524 49.525 @@ -8664,16 +8765,16 @@ 49.526 %} 49.527 49.528 // Replicate scalar zero constant to packed int values in Double register 49.529 -instruct Repl2I_immI(regD dst, immI src, o7RegP tmp) %{ 49.530 - match(Set dst (Replicate2I src)); 49.531 -#ifdef _LP64 49.532 - size(36); 49.533 -#else 49.534 - size(8); 49.535 -#endif 49.536 - format %{ "SETHI hi(&Repl2($src)),$tmp\t!get Repl2I($src) from table\n\t" 49.537 - "LDDF [$tmp+lo(&Repl2($src))],$dst" %} 49.538 - ins_encode( LdReplImmI(src, dst, tmp, (2), (4)) ); 49.539 +instruct Repl2I_immI(regD dst, immI con, o7RegI tmp) %{ 49.540 + match(Set dst (Replicate2I con)); 49.541 + effect(KILL tmp); 49.542 + format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2I($con)" %} 49.543 + ins_encode %{ 49.544 + // XXX This is a quick fix for 6833573. 49.545 + //__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 2, 4)), $dst$$FloatRegister); 49.546 + RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 2, 4)), $tmp$$Register); 49.547 + __ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg)); 49.548 + %} 49.549 ins_pipe(loadConFD); 49.550 %} 49.551 49.552 @@ -8929,12 +9030,27 @@ 49.553 49.554 ins_cost(350); 49.555 49.556 - format %{ "SETHI [hi(table_base)],O7\n\t" 49.557 - "ADD O7, lo(table_base), O7\n\t" 49.558 - "LD [O7+$switch_val], O7\n\t" 49.559 + format %{ "ADD $constanttablebase, $constantoffset, O7\n\t" 49.560 + "LD [O7 + $switch_val], O7\n\t" 49.561 "JUMP O7" 49.562 %} 49.563 - ins_encode( jump_enc( switch_val, table) ); 49.564 + ins_encode %{ 49.565 + // Calculate table address into a register. 49.566 + Register table_reg; 49.567 + Register label_reg = O7; 49.568 + if (constant_offset() == 0) { 49.569 + table_reg = $constanttablebase; 49.570 + } else { 49.571 + table_reg = O7; 49.572 + RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset, O7); 49.573 + __ add($constanttablebase, con_offset, table_reg); 49.574 + } 49.575 + 49.576 + // Jump to base address + switch value 49.577 + __ ld_ptr(table_reg, $switch_val$$Register, label_reg); 49.578 + __ jmp(label_reg, G0); 49.579 + __ delayed()->nop(); 49.580 + %} 49.581 ins_pc_relative(1); 49.582 ins_pipe(ialu_reg_reg); 49.583 %}
50.1 --- a/src/cpu/sparc/vm/vm_version_sparc.hpp Mon Dec 27 09:30:20 2010 -0500 50.2 +++ b/src/cpu/sparc/vm/vm_version_sparc.hpp Mon Dec 27 09:56:29 2010 -0500 50.3 @@ -80,9 +80,6 @@ 50.4 static bool is_sparc64(int features) { return (features & fmaf_instructions_m) != 0; } 50.5 50.6 static int maximum_niagara1_processor_count() { return 32; } 50.7 - // Returns true if the platform is in the niagara line and 50.8 - // newer than the niagara1. 50.9 - static bool is_niagara1_plus(); 50.10 50.11 public: 50.12 // Initialization 50.13 @@ -105,6 +102,9 @@ 50.14 static bool is_ultra3() { return (_features & ultra3_m) == ultra3_m; } 50.15 static bool is_sun4v() { return (_features & sun4v_m) != 0; } 50.16 static bool is_niagara1() { return is_niagara1(_features); } 50.17 + // Returns true if the platform is in the niagara line and 50.18 + // newer than the niagara1. 50.19 + static bool is_niagara1_plus(); 50.20 static bool is_sparc64() { return is_sparc64(_features); } 50.21 50.22 static bool has_fast_fxtof() { return has_v9() && !is_ultra3(); }
51.1 --- a/src/cpu/x86/vm/assembler_x86.cpp Mon Dec 27 09:30:20 2010 -0500 51.2 +++ b/src/cpu/x86/vm/assembler_x86.cpp Mon Dec 27 09:56:29 2010 -0500 51.3 @@ -2649,6 +2649,37 @@ 51.4 emit_byte(0xC0 | encode); 51.5 } 51.6 51.7 +void Assembler::sqrtsd(XMMRegister dst, Address src) { 51.8 + NOT_LP64(assert(VM_Version::supports_sse2(), "")); 51.9 + InstructionMark im(this); 51.10 + emit_byte(0xF2); 51.11 + prefix(src, dst); 51.12 + emit_byte(0x0F); 51.13 + emit_byte(0x51); 51.14 + emit_operand(dst, src); 51.15 +} 51.16 + 51.17 +void Assembler::sqrtss(XMMRegister dst, XMMRegister src) { 51.18 + // HMM Table D-1 says sse2 51.19 + // NOT_LP64(assert(VM_Version::supports_sse(), "")); 51.20 + NOT_LP64(assert(VM_Version::supports_sse2(), "")); 51.21 + emit_byte(0xF3); 51.22 + int encode = prefix_and_encode(dst->encoding(), src->encoding()); 51.23 + emit_byte(0x0F); 51.24 + emit_byte(0x51); 51.25 + emit_byte(0xC0 | encode); 51.26 +} 51.27 + 51.28 +void Assembler::sqrtss(XMMRegister dst, Address src) { 51.29 + NOT_LP64(assert(VM_Version::supports_sse2(), "")); 51.30 + InstructionMark im(this); 51.31 + emit_byte(0xF3); 51.32 + prefix(src, dst); 51.33 + emit_byte(0x0F); 51.34 + emit_byte(0x51); 51.35 + emit_operand(dst, src); 51.36 +} 51.37 + 51.38 void Assembler::stmxcsr( Address dst) { 51.39 NOT_LP64(assert(VM_Version::supports_sse(), "")); 51.40 InstructionMark im(this); 51.41 @@ -4358,16 +4389,6 @@ 51.42 emit_byte(0xE8 | encode); 51.43 } 51.44 51.45 -void Assembler::sqrtsd(XMMRegister dst, Address src) { 51.46 - NOT_LP64(assert(VM_Version::supports_sse2(), "")); 51.47 - InstructionMark im(this); 51.48 - emit_byte(0xF2); 51.49 - prefix(src, dst); 51.50 - emit_byte(0x0F); 51.51 - emit_byte(0x51); 51.52 - emit_operand(dst, src); 51.53 -} 51.54 - 51.55 void Assembler::subq(Address dst, int32_t imm32) { 51.56 InstructionMark im(this); 51.57 prefixq(dst); 51.58 @@ -4929,10 +4950,6 @@ 51.59 } 51.60 51.61 51.62 -void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) { 51.63 - movsd(dst, as_Address(src)); 51.64 -} 51.65 - 51.66 void MacroAssembler::pop_callee_saved_registers() { 51.67 pop(rcx); 51.68 pop(rdx);
52.1 --- a/src/cpu/x86/vm/assembler_x86.hpp Mon Dec 27 09:30:20 2010 -0500 52.2 +++ b/src/cpu/x86/vm/assembler_x86.hpp Mon Dec 27 09:56:29 2010 -0500 52.3 @@ -135,6 +135,7 @@ 52.4 // Using noreg ensures if the dead code is incorrectly live and executed it 52.5 // will cause an assertion failure 52.6 #define rscratch1 noreg 52.7 +#define rscratch2 noreg 52.8 52.9 #endif // _LP64 52.10 52.11 @@ -1352,6 +1353,10 @@ 52.12 void sqrtsd(XMMRegister dst, Address src); 52.13 void sqrtsd(XMMRegister dst, XMMRegister src); 52.14 52.15 + // Compute Square Root of Scalar Single-Precision Floating-Point Value 52.16 + void sqrtss(XMMRegister dst, Address src); 52.17 + void sqrtss(XMMRegister dst, XMMRegister src); 52.18 + 52.19 void std() { emit_byte(0xfd); } 52.20 52.21 void stmxcsr( Address dst ); 52.22 @@ -2124,6 +2129,9 @@ 52.23 void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); } 52.24 void comisd(XMMRegister dst, AddressLiteral src); 52.25 52.26 + void fadd_s(Address src) { Assembler::fadd_s(src); } 52.27 + void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); } 52.28 + 52.29 void fldcw(Address src) { Assembler::fldcw(src); } 52.30 void fldcw(AddressLiteral src); 52.31 52.32 @@ -2137,6 +2145,9 @@ 52.33 void fld_x(Address src) { Assembler::fld_x(src); } 52.34 void fld_x(AddressLiteral src); 52.35 52.36 + void fmul_s(Address src) { Assembler::fmul_s(src); } 52.37 + void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); } 52.38 + 52.39 void ldmxcsr(Address src) { Assembler::ldmxcsr(src); } 52.40 void ldmxcsr(AddressLiteral src); 52.41 52.42 @@ -2153,10 +2164,50 @@ 52.43 52.44 public: 52.45 52.46 - void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 52.47 - void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 52.48 - void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 52.49 - void movsd(XMMRegister dst, AddressLiteral src); 52.50 + void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); } 52.51 + void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); } 52.52 + void addsd(XMMRegister dst, AddressLiteral src) { Assembler::addsd(dst, as_Address(src)); } 52.53 + 52.54 + void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); } 52.55 + void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); } 52.56 + void addss(XMMRegister dst, AddressLiteral src) { Assembler::addss(dst, as_Address(src)); } 52.57 + 52.58 + void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); } 52.59 + void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); } 52.60 + void divsd(XMMRegister dst, AddressLiteral src) { Assembler::divsd(dst, as_Address(src)); } 52.61 + 52.62 + void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); } 52.63 + void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); } 52.64 + void divss(XMMRegister dst, AddressLiteral src) { Assembler::divss(dst, as_Address(src)); } 52.65 + 52.66 + void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 52.67 + void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 52.68 + void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 52.69 + void movsd(XMMRegister dst, AddressLiteral src) { Assembler::movsd(dst, as_Address(src)); } 52.70 + 52.71 + void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); } 52.72 + void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); } 52.73 + void mulsd(XMMRegister dst, AddressLiteral src) { Assembler::mulsd(dst, as_Address(src)); } 52.74 + 52.75 + void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); } 52.76 + void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); } 52.77 + void mulss(XMMRegister dst, AddressLiteral src) { Assembler::mulss(dst, as_Address(src)); } 52.78 + 52.79 + void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); } 52.80 + void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); } 52.81 + void sqrtsd(XMMRegister dst, AddressLiteral src) { Assembler::sqrtsd(dst, as_Address(src)); } 52.82 + 52.83 + void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); } 52.84 + void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); } 52.85 + void sqrtss(XMMRegister dst, AddressLiteral src) { Assembler::sqrtss(dst, as_Address(src)); } 52.86 + 52.87 + void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); } 52.88 + void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); } 52.89 + void subsd(XMMRegister dst, AddressLiteral src) { Assembler::subsd(dst, as_Address(src)); } 52.90 + 52.91 + void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); } 52.92 + void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); } 52.93 + void subss(XMMRegister dst, AddressLiteral src) { Assembler::subss(dst, as_Address(src)); } 52.94 52.95 void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); } 52.96 void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
53.1 --- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Mon Dec 27 09:30:20 2010 -0500 53.2 +++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Mon Dec 27 09:56:29 2010 -0500 53.3 @@ -483,7 +483,7 @@ 53.4 53.5 Register pre_val_reg = pre_val()->as_register(); 53.6 53.7 - ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false); 53.8 + ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); 53.9 53.10 __ cmpptr(pre_val_reg, (int32_t) NULL_WORD); 53.11 __ jcc(Assembler::equal, _continuation);
54.1 --- a/src/cpu/x86/vm/c1_Defs_x86.hpp Mon Dec 27 09:30:20 2010 -0500 54.2 +++ b/src/cpu/x86/vm/c1_Defs_x86.hpp Mon Dec 27 09:56:29 2010 -0500 54.3 @@ -61,8 +61,8 @@ 54.4 pd_nof_xmm_regs_linearscan = pd_nof_xmm_regs_frame_map, // number of registers visible to linear scan 54.5 pd_first_cpu_reg = 0, 54.6 pd_last_cpu_reg = NOT_LP64(5) LP64_ONLY(11), 54.7 - pd_first_byte_reg = 2, 54.8 - pd_last_byte_reg = 5, 54.9 + pd_first_byte_reg = NOT_LP64(2) LP64_ONLY(0), 54.10 + pd_last_byte_reg = NOT_LP64(5) LP64_ONLY(11), 54.11 pd_first_fpu_reg = pd_nof_cpu_regs_frame_map, 54.12 pd_last_fpu_reg = pd_first_fpu_reg + 7, 54.13 pd_first_xmm_reg = pd_nof_cpu_regs_frame_map + pd_nof_fpu_regs_frame_map,
55.1 --- a/src/cpu/x86/vm/c1_FrameMap_x86.cpp Mon Dec 27 09:30:20 2010 -0500 55.2 +++ b/src/cpu/x86/vm/c1_FrameMap_x86.cpp Mon Dec 27 09:56:29 2010 -0500 55.3 @@ -158,9 +158,11 @@ 55.4 map_register( 6, r8); r8_opr = LIR_OprFact::single_cpu(6); 55.5 map_register( 7, r9); r9_opr = LIR_OprFact::single_cpu(7); 55.6 map_register( 8, r11); r11_opr = LIR_OprFact::single_cpu(8); 55.7 - map_register( 9, r12); r12_opr = LIR_OprFact::single_cpu(9); 55.8 - map_register(10, r13); r13_opr = LIR_OprFact::single_cpu(10); 55.9 - map_register(11, r14); r14_opr = LIR_OprFact::single_cpu(11); 55.10 + map_register( 9, r13); r13_opr = LIR_OprFact::single_cpu(9); 55.11 + map_register(10, r14); r14_opr = LIR_OprFact::single_cpu(10); 55.12 + // r12 is allocated conditionally. With compressed oops it holds 55.13 + // the heapbase value and is not visible to the allocator. 55.14 + map_register(11, r12); r12_opr = LIR_OprFact::single_cpu(11); 55.15 // The unallocatable registers are at the end 55.16 map_register(12, r10); r10_opr = LIR_OprFact::single_cpu(12); 55.17 map_register(13, r15); r15_opr = LIR_OprFact::single_cpu(13); 55.18 @@ -191,9 +193,9 @@ 55.19 _caller_save_cpu_regs[6] = r8_opr; 55.20 _caller_save_cpu_regs[7] = r9_opr; 55.21 _caller_save_cpu_regs[8] = r11_opr; 55.22 - _caller_save_cpu_regs[9] = r12_opr; 55.23 - _caller_save_cpu_regs[10] = r13_opr; 55.24 - _caller_save_cpu_regs[11] = r14_opr; 55.25 + _caller_save_cpu_regs[9] = r13_opr; 55.26 + _caller_save_cpu_regs[10] = r14_opr; 55.27 + _caller_save_cpu_regs[11] = r12_opr; 55.28 #endif // _LP64 55.29 55.30
56.1 --- a/src/cpu/x86/vm/c1_FrameMap_x86.hpp Mon Dec 27 09:30:20 2010 -0500 56.2 +++ b/src/cpu/x86/vm/c1_FrameMap_x86.hpp Mon Dec 27 09:56:29 2010 -0500 56.3 @@ -130,4 +130,15 @@ 56.4 return _caller_save_xmm_regs[i]; 56.5 } 56.6 56.7 + static int adjust_reg_range(int range) { 56.8 + // Reduce the number of available regs (to free r12) in case of compressed oops 56.9 + if (UseCompressedOops) return range - 1; 56.10 + return range; 56.11 + } 56.12 + 56.13 + static int nof_caller_save_cpu_regs() { return adjust_reg_range(pd_nof_caller_save_cpu_regs_frame_map); } 56.14 + static int last_cpu_reg() { return adjust_reg_range(pd_last_cpu_reg); } 56.15 + static int last_byte_reg() { return adjust_reg_range(pd_last_byte_reg); } 56.16 + 56.17 #endif // CPU_X86_VM_C1_FRAMEMAP_X86_HPP 56.18 +
57.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Mon Dec 27 09:30:20 2010 -0500 57.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Mon Dec 27 09:56:29 2010 -0500 57.3 @@ -343,8 +343,8 @@ 57.4 Register receiver = FrameMap::receiver_opr->as_register(); 57.5 Register ic_klass = IC_Klass; 57.6 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); 57.7 - 57.8 - if (!VerifyOops) { 57.9 + const bool do_post_padding = VerifyOops || UseCompressedOops; 57.10 + if (!do_post_padding) { 57.11 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment 57.12 while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) { 57.13 __ nop(); 57.14 @@ -352,8 +352,8 @@ 57.15 } 57.16 int offset = __ offset(); 57.17 __ inline_cache_check(receiver, IC_Klass); 57.18 - assert(__ offset() % CodeEntryAlignment == 0 || VerifyOops, "alignment must be correct"); 57.19 - if (VerifyOops) { 57.20 + assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct"); 57.21 + if (do_post_padding) { 57.22 // force alignment after the cache check. 57.23 // It's been verified to be aligned if !VerifyOops 57.24 __ align(CodeEntryAlignment); 57.25 @@ -559,16 +559,16 @@ 57.26 __ movptr (rax, arg1->as_register()); 57.27 57.28 // Get addresses of first characters from both Strings 57.29 - __ movptr (rsi, Address(rax, java_lang_String::value_offset_in_bytes())); 57.30 - __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes())); 57.31 - __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 57.32 + __ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes())); 57.33 + __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes())); 57.34 + __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 57.35 57.36 57.37 // rbx, may be NULL 57.38 add_debug_info_for_null_check_here(info); 57.39 - __ movptr (rdi, Address(rbx, java_lang_String::value_offset_in_bytes())); 57.40 - __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes())); 57.41 - __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 57.42 + __ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes())); 57.43 + __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes())); 57.44 + __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 57.45 57.46 // compute minimum length (in rax) and difference of lengths (on top of stack) 57.47 if (VM_Version::supports_cmov()) { 57.48 @@ -696,10 +696,15 @@ 57.49 LIR_Const* c = src->as_constant_ptr(); 57.50 57.51 switch (c->type()) { 57.52 - case T_INT: 57.53 + case T_INT: { 57.54 + assert(patch_code == lir_patch_none, "no patching handled here"); 57.55 + __ movl(dest->as_register(), c->as_jint()); 57.56 + break; 57.57 + } 57.58 + 57.59 case T_ADDRESS: { 57.60 assert(patch_code == lir_patch_none, "no patching handled here"); 57.61 - __ movl(dest->as_register(), c->as_jint()); 57.62 + __ movptr(dest->as_register(), c->as_jint()); 57.63 break; 57.64 } 57.65 57.66 @@ -780,8 +785,11 @@ 57.67 switch (c->type()) { 57.68 case T_INT: // fall through 57.69 case T_FLOAT: 57.70 + __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 57.71 + break; 57.72 + 57.73 case T_ADDRESS: 57.74 - __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 57.75 + __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 57.76 break; 57.77 57.78 case T_OBJECT: 57.79 @@ -806,7 +814,7 @@ 57.80 } 57.81 } 57.82 57.83 -void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) { 57.84 +void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 57.85 assert(src->is_constant(), "should not call otherwise"); 57.86 assert(dest->is_address(), "should not call otherwise"); 57.87 LIR_Const* c = src->as_constant_ptr(); 57.88 @@ -816,14 +824,21 @@ 57.89 switch (type) { 57.90 case T_INT: // fall through 57.91 case T_FLOAT: 57.92 + __ movl(as_Address(addr), c->as_jint_bits()); 57.93 + break; 57.94 + 57.95 case T_ADDRESS: 57.96 - __ movl(as_Address(addr), c->as_jint_bits()); 57.97 + __ movptr(as_Address(addr), c->as_jint_bits()); 57.98 break; 57.99 57.100 case T_OBJECT: // fall through 57.101 case T_ARRAY: 57.102 if (c->as_jobject() == NULL) { 57.103 - __ movptr(as_Address(addr), NULL_WORD); 57.104 + if (UseCompressedOops && !wide) { 57.105 + __ movl(as_Address(addr), (int32_t)NULL_WORD); 57.106 + } else { 57.107 + __ movptr(as_Address(addr), NULL_WORD); 57.108 + } 57.109 } else { 57.110 if (is_literal_address(addr)) { 57.111 ShouldNotReachHere(); 57.112 @@ -831,8 +846,14 @@ 57.113 } else { 57.114 #ifdef _LP64 57.115 __ movoop(rscratch1, c->as_jobject()); 57.116 - null_check_here = code_offset(); 57.117 - __ movptr(as_Address_lo(addr), rscratch1); 57.118 + if (UseCompressedOops && !wide) { 57.119 + __ encode_heap_oop(rscratch1); 57.120 + null_check_here = code_offset(); 57.121 + __ movl(as_Address_lo(addr), rscratch1); 57.122 + } else { 57.123 + null_check_here = code_offset(); 57.124 + __ movptr(as_Address_lo(addr), rscratch1); 57.125 + } 57.126 #else 57.127 __ movoop(as_Address(addr), c->as_jobject()); 57.128 #endif 57.129 @@ -1009,22 +1030,28 @@ 57.130 } 57.131 57.132 57.133 -void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool /* unaligned */) { 57.134 +void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) { 57.135 LIR_Address* to_addr = dest->as_address_ptr(); 57.136 PatchingStub* patch = NULL; 57.137 + Register compressed_src = rscratch1; 57.138 57.139 if (type == T_ARRAY || type == T_OBJECT) { 57.140 __ verify_oop(src->as_register()); 57.141 +#ifdef _LP64 57.142 + if (UseCompressedOops && !wide) { 57.143 + __ movptr(compressed_src, src->as_register()); 57.144 + __ encode_heap_oop(compressed_src); 57.145 + } 57.146 +#endif 57.147 } 57.148 + 57.149 if (patch_code != lir_patch_none) { 57.150 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 57.151 Address toa = as_Address(to_addr); 57.152 assert(toa.disp() != 0, "must have"); 57.153 } 57.154 - if (info != NULL) { 57.155 - add_debug_info_for_null_check_here(info); 57.156 - } 57.157 - 57.158 + 57.159 + int null_check_here = code_offset(); 57.160 switch (type) { 57.161 case T_FLOAT: { 57.162 if (src->is_single_xmm()) { 57.163 @@ -1050,13 +1077,17 @@ 57.164 break; 57.165 } 57.166 57.167 - case T_ADDRESS: // fall through 57.168 case T_ARRAY: // fall through 57.169 case T_OBJECT: // fall through 57.170 -#ifdef _LP64 57.171 + if (UseCompressedOops && !wide) { 57.172 + __ movl(as_Address(to_addr), compressed_src); 57.173 + } else { 57.174 + __ movptr(as_Address(to_addr), src->as_register()); 57.175 + } 57.176 + break; 57.177 + case T_ADDRESS: 57.178 __ movptr(as_Address(to_addr), src->as_register()); 57.179 break; 57.180 -#endif // _LP64 57.181 case T_INT: 57.182 __ movl(as_Address(to_addr), src->as_register()); 57.183 break; 57.184 @@ -1113,6 +1144,9 @@ 57.185 default: 57.186 ShouldNotReachHere(); 57.187 } 57.188 + if (info != NULL) { 57.189 + add_debug_info_for_null_check(null_check_here, info); 57.190 + } 57.191 57.192 if (patch_code != lir_patch_none) { 57.193 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info); 57.194 @@ -1196,7 +1230,7 @@ 57.195 } 57.196 57.197 57.198 -void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool /* unaligned */) { 57.199 +void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) { 57.200 assert(src->is_address(), "should not call otherwise"); 57.201 assert(dest->is_register(), "should not call otherwise"); 57.202 57.203 @@ -1250,13 +1284,18 @@ 57.204 break; 57.205 } 57.206 57.207 - case T_ADDRESS: // fall through 57.208 case T_OBJECT: // fall through 57.209 case T_ARRAY: // fall through 57.210 -#ifdef _LP64 57.211 + if (UseCompressedOops && !wide) { 57.212 + __ movl(dest->as_register(), from_addr); 57.213 + } else { 57.214 + __ movptr(dest->as_register(), from_addr); 57.215 + } 57.216 + break; 57.217 + 57.218 + case T_ADDRESS: 57.219 __ movptr(dest->as_register(), from_addr); 57.220 break; 57.221 -#endif // _L64 57.222 case T_INT: 57.223 __ movl(dest->as_register(), from_addr); 57.224 break; 57.225 @@ -1351,6 +1390,11 @@ 57.226 } 57.227 57.228 if (type == T_ARRAY || type == T_OBJECT) { 57.229 +#ifdef _LP64 57.230 + if (UseCompressedOops && !wide) { 57.231 + __ decode_heap_oop(dest->as_register()); 57.232 + } 57.233 +#endif 57.234 __ verify_oop(dest->as_register()); 57.235 } 57.236 } 57.237 @@ -1672,11 +1716,8 @@ 57.238 ciMethod* method = op->profiled_method(); 57.239 assert(method != NULL, "Should have method"); 57.240 int bci = op->profiled_bci(); 57.241 - md = method->method_data(); 57.242 - if (md == NULL) { 57.243 - bailout("out of memory building methodDataOop"); 57.244 - return; 57.245 - } 57.246 + md = method->method_data_or_null(); 57.247 + assert(md != NULL, "Sanity"); 57.248 data = md->bci_to_data(bci); 57.249 assert(data != NULL, "need data for type check"); 57.250 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 57.251 @@ -1690,7 +1731,7 @@ 57.252 } else if (obj == klass_RInfo) { 57.253 klass_RInfo = dst; 57.254 } 57.255 - if (k->is_loaded()) { 57.256 + if (k->is_loaded() && !UseCompressedOops) { 57.257 select_different_registers(obj, dst, k_RInfo, klass_RInfo); 57.258 } else { 57.259 Rtmp1 = op->tmp3()->as_register(); 57.260 @@ -1727,21 +1768,26 @@ 57.261 if (op->fast_check()) { 57.262 // get object class 57.263 // not a safepoint as obj null check happens earlier 57.264 - if (k->is_loaded()) { 57.265 #ifdef _LP64 57.266 - __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 57.267 -#else 57.268 - __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); 57.269 -#endif // _LP64 57.270 + if (UseCompressedOops) { 57.271 + __ load_klass(Rtmp1, obj); 57.272 + __ cmpptr(k_RInfo, Rtmp1); 57.273 } else { 57.274 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 57.275 } 57.276 +#else 57.277 + if (k->is_loaded()) { 57.278 + __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); 57.279 + } else { 57.280 + __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 57.281 + } 57.282 +#endif 57.283 __ jcc(Assembler::notEqual, *failure_target); 57.284 // successful cast, fall through to profile or jump 57.285 } else { 57.286 // get object class 57.287 // not a safepoint as obj null check happens earlier 57.288 - __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 57.289 + __ load_klass(klass_RInfo, obj); 57.290 if (k->is_loaded()) { 57.291 // See if we get an immediate positive hit 57.292 #ifdef _LP64 57.293 @@ -1796,7 +1842,7 @@ 57.294 Register mdo = klass_RInfo, recv = k_RInfo; 57.295 __ bind(profile_cast_success); 57.296 __ movoop(mdo, md->constant_encoding()); 57.297 - __ movptr(recv, Address(obj, oopDesc::klass_offset_in_bytes())); 57.298 + __ load_klass(recv, obj); 57.299 Label update_done; 57.300 type_profile_helper(mdo, md, data, recv, success); 57.301 __ jmp(*success); 57.302 @@ -1830,11 +1876,8 @@ 57.303 ciMethod* method = op->profiled_method(); 57.304 assert(method != NULL, "Should have method"); 57.305 int bci = op->profiled_bci(); 57.306 - md = method->method_data(); 57.307 - if (md == NULL) { 57.308 - bailout("out of memory building methodDataOop"); 57.309 - return; 57.310 - } 57.311 + md = method->method_data_or_null(); 57.312 + assert(md != NULL, "Sanity"); 57.313 data = md->bci_to_data(bci); 57.314 assert(data != NULL, "need data for type check"); 57.315 assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check"); 57.316 @@ -1860,10 +1903,10 @@ 57.317 } 57.318 57.319 add_debug_info_for_null_check_here(op->info_for_exception()); 57.320 - __ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes())); 57.321 - __ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes())); 57.322 - 57.323 - // get instance klass 57.324 + __ load_klass(k_RInfo, array); 57.325 + __ load_klass(klass_RInfo, value); 57.326 + 57.327 + // get instance klass (it's already uncompressed) 57.328 __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); 57.329 // perform the fast part of the checking logic 57.330 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 57.331 @@ -1882,7 +1925,7 @@ 57.332 Register mdo = klass_RInfo, recv = k_RInfo; 57.333 __ bind(profile_cast_success); 57.334 __ movoop(mdo, md->constant_encoding()); 57.335 - __ movptr(recv, Address(value, oopDesc::klass_offset_in_bytes())); 57.336 + __ load_klass(recv, value); 57.337 Label update_done; 57.338 type_profile_helper(mdo, md, data, recv, &done); 57.339 __ jmpb(done); 57.340 @@ -1946,12 +1989,31 @@ 57.341 assert(cmpval != newval, "cmp and new values must be in different registers"); 57.342 assert(cmpval != addr, "cmp and addr must be in different registers"); 57.343 assert(newval != addr, "new value and addr must be in different registers"); 57.344 - if (os::is_MP()) { 57.345 - __ lock(); 57.346 - } 57.347 + 57.348 if ( op->code() == lir_cas_obj) { 57.349 - __ cmpxchgptr(newval, Address(addr, 0)); 57.350 - } else if (op->code() == lir_cas_int) { 57.351 +#ifdef _LP64 57.352 + if (UseCompressedOops) { 57.353 + __ encode_heap_oop(cmpval); 57.354 + __ mov(rscratch1, newval); 57.355 + __ encode_heap_oop(rscratch1); 57.356 + if (os::is_MP()) { 57.357 + __ lock(); 57.358 + } 57.359 + // cmpval (rax) is implicitly used by this instruction 57.360 + __ cmpxchgl(rscratch1, Address(addr, 0)); 57.361 + } else 57.362 +#endif 57.363 + { 57.364 + if (os::is_MP()) { 57.365 + __ lock(); 57.366 + } 57.367 + __ cmpxchgptr(newval, Address(addr, 0)); 57.368 + } 57.369 + } else { 57.370 + assert(op->code() == lir_cas_int, "lir_cas_int expected"); 57.371 + if (os::is_MP()) { 57.372 + __ lock(); 57.373 + } 57.374 __ cmpxchgl(newval, Address(addr, 0)); 57.375 } 57.376 #ifdef _LP64 57.377 @@ -3193,8 +3255,13 @@ 57.378 } 57.379 57.380 if (flags & LIR_OpArrayCopy::type_check) { 57.381 - __ movptr(tmp, src_klass_addr); 57.382 - __ cmpptr(tmp, dst_klass_addr); 57.383 + if (UseCompressedOops) { 57.384 + __ movl(tmp, src_klass_addr); 57.385 + __ cmpl(tmp, dst_klass_addr); 57.386 + } else { 57.387 + __ movptr(tmp, src_klass_addr); 57.388 + __ cmpptr(tmp, dst_klass_addr); 57.389 + } 57.390 __ jcc(Assembler::notEqual, *stub->entry()); 57.391 } 57.392 57.393 @@ -3209,13 +3276,23 @@ 57.394 // but not necessarily exactly of type default_type. 57.395 Label known_ok, halt; 57.396 __ movoop(tmp, default_type->constant_encoding()); 57.397 +#ifdef _LP64 57.398 + if (UseCompressedOops) { 57.399 + __ encode_heap_oop(tmp); 57.400 + } 57.401 +#endif 57.402 + 57.403 if (basic_type != T_OBJECT) { 57.404 - __ cmpptr(tmp, dst_klass_addr); 57.405 + 57.406 + if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr); 57.407 + else __ cmpptr(tmp, dst_klass_addr); 57.408 __ jcc(Assembler::notEqual, halt); 57.409 - __ cmpptr(tmp, src_klass_addr); 57.410 + if (UseCompressedOops) __ cmpl(tmp, src_klass_addr); 57.411 + else __ cmpptr(tmp, src_klass_addr); 57.412 __ jcc(Assembler::equal, known_ok); 57.413 } else { 57.414 - __ cmpptr(tmp, dst_klass_addr); 57.415 + if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr); 57.416 + else __ cmpptr(tmp, dst_klass_addr); 57.417 __ jcc(Assembler::equal, known_ok); 57.418 __ cmpptr(src, dst); 57.419 __ jcc(Assembler::equal, known_ok); 57.420 @@ -3289,11 +3366,8 @@ 57.421 int bci = op->profiled_bci(); 57.422 57.423 // Update counter for all call types 57.424 - ciMethodData* md = method->method_data(); 57.425 - if (md == NULL) { 57.426 - bailout("out of memory building methodDataOop"); 57.427 - return; 57.428 - } 57.429 + ciMethodData* md = method->method_data_or_null(); 57.430 + assert(md != NULL, "Sanity"); 57.431 ciProfileData* data = md->bci_to_data(bci); 57.432 assert(data->is_CounterData(), "need CounterData for calls"); 57.433 assert(op->mdo()->is_single_cpu(), "mdo must be allocated"); 57.434 @@ -3344,7 +3418,7 @@ 57.435 } 57.436 } 57.437 } else { 57.438 - __ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes())); 57.439 + __ load_klass(recv, recv); 57.440 Label update_done; 57.441 type_profile_helper(mdo, md, data, recv, &update_done); 57.442 // Receiver did not match any saved receiver and there is no empty row for it.
58.1 --- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Mon Dec 27 09:30:20 2010 -0500 58.2 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Mon Dec 27 09:56:29 2010 -0500 58.3 @@ -874,6 +874,10 @@ 58.4 58.5 void LIRGenerator::do_ArrayCopy(Intrinsic* x) { 58.6 assert(x->number_of_arguments() == 5, "wrong type"); 58.7 + 58.8 + // Make all state_for calls early since they can emit code 58.9 + CodeEmitInfo* info = state_for(x, x->state()); 58.10 + 58.11 LIRItem src(x->argument_at(0), this); 58.12 LIRItem src_pos(x->argument_at(1), this); 58.13 LIRItem dst(x->argument_at(2), this); 58.14 @@ -916,7 +920,6 @@ 58.15 ciArrayKlass* expected_type; 58.16 arraycopy_helper(x, &flags, &expected_type); 58.17 58.18 - CodeEmitInfo* info = state_for(x, x->state()); // we may want to have stack (deoptimization?) 58.19 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint 58.20 } 58.21 58.22 @@ -1151,9 +1154,12 @@ 58.23 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); 58.24 } 58.25 LIR_Opr reg = rlock_result(x); 58.26 + LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 58.27 + if (!x->klass()->is_loaded() || UseCompressedOops) { 58.28 + tmp3 = new_register(objectType); 58.29 + } 58.30 __ checkcast(reg, obj.result(), x->klass(), 58.31 - new_register(objectType), new_register(objectType), 58.32 - !x->klass()->is_loaded() ? new_register(objectType) : LIR_OprFact::illegalOpr, 58.33 + new_register(objectType), new_register(objectType), tmp3, 58.34 x->direct_compare(), info_for_exception, patching_info, stub, 58.35 x->profiled_method(), x->profiled_bci()); 58.36 } 58.37 @@ -1170,9 +1176,12 @@ 58.38 patching_info = state_for(x, x->state_before()); 58.39 } 58.40 obj.load_item(); 58.41 + LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 58.42 + if (!x->klass()->is_loaded() || UseCompressedOops) { 58.43 + tmp3 = new_register(objectType); 58.44 + } 58.45 __ instanceof(reg, obj.result(), x->klass(), 58.46 - new_register(objectType), new_register(objectType), 58.47 - !x->klass()->is_loaded() ? new_register(objectType) : LIR_OprFact::illegalOpr, 58.48 + new_register(objectType), new_register(objectType), tmp3, 58.49 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 58.50 } 58.51
59.1 --- a/src/cpu/x86/vm/c1_LinearScan_x86.hpp Mon Dec 27 09:30:20 2010 -0500 59.2 +++ b/src/cpu/x86/vm/c1_LinearScan_x86.hpp Mon Dec 27 09:56:29 2010 -0500 59.3 @@ -31,18 +31,17 @@ 59.4 assert(FrameMap::rsp_opr->cpu_regnr() == 6, "wrong assumption below"); 59.5 assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below"); 59.6 assert(reg_num >= 0, "invalid reg_num"); 59.7 - 59.8 - return reg_num < 6 || reg_num > 7; 59.9 #else 59.10 - // rsp and rbp, r10, r15 (numbers 6 ancd 7) are ignored 59.11 + // rsp and rbp, r10, r15 (numbers [12,15]) are ignored 59.12 + // r12 (number 11) is conditional on compressed oops. 59.13 + assert(FrameMap::r12_opr->cpu_regnr() == 11, "wrong assumption below"); 59.14 assert(FrameMap::r10_opr->cpu_regnr() == 12, "wrong assumption below"); 59.15 assert(FrameMap::r15_opr->cpu_regnr() == 13, "wrong assumption below"); 59.16 assert(FrameMap::rsp_opr->cpu_regnrLo() == 14, "wrong assumption below"); 59.17 assert(FrameMap::rbp_opr->cpu_regnrLo() == 15, "wrong assumption below"); 59.18 assert(reg_num >= 0, "invalid reg_num"); 59.19 - 59.20 - return reg_num < 12 || reg_num > 15; 59.21 #endif // _LP64 59.22 + return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map; 59.23 } 59.24 59.25 inline int LinearScan::num_physical_regs(BasicType type) { 59.26 @@ -104,7 +103,7 @@ 59.27 if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::byte_reg)) { 59.28 assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only"); 59.29 _first_reg = pd_first_byte_reg; 59.30 - _last_reg = pd_last_byte_reg; 59.31 + _last_reg = FrameMap::last_byte_reg(); 59.32 return true; 59.33 } else if ((UseSSE >= 1 && cur->type() == T_FLOAT) || (UseSSE >= 2 && cur->type() == T_DOUBLE)) { 59.34 _first_reg = pd_first_xmm_reg;
60.1 --- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp Mon Dec 27 09:30:20 2010 -0500 60.2 +++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp Mon Dec 27 09:56:29 2010 -0500 60.3 @@ -155,11 +155,26 @@ 60.4 // This assumes that all prototype bits fit in an int32_t 60.5 movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype()); 60.6 } 60.7 +#ifdef _LP64 60.8 + if (UseCompressedOops) { // Take care not to kill klass 60.9 + movptr(t1, klass); 60.10 + encode_heap_oop_not_null(t1); 60.11 + movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); 60.12 + } else 60.13 +#endif 60.14 + { 60.15 + movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); 60.16 + } 60.17 60.18 - movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); 60.19 if (len->is_valid()) { 60.20 movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); 60.21 } 60.22 +#ifdef _LP64 60.23 + else if (UseCompressedOops) { 60.24 + xorptr(t1, t1); 60.25 + store_klass_gap(obj, t1); 60.26 + } 60.27 +#endif 60.28 } 60.29 60.30 60.31 @@ -230,7 +245,7 @@ 60.32 void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) { 60.33 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, 60.34 "con_size_in_bytes is not multiple of alignment"); 60.35 - const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes(); 60.36 + const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize; 60.37 60.38 initialize_header(obj, klass, noreg, t1, t2); 60.39 60.40 @@ -317,13 +332,19 @@ 60.41 // check against inline cache 60.42 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); 60.43 int start_offset = offset(); 60.44 - cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); 60.45 + 60.46 + if (UseCompressedOops) { 60.47 + load_klass(rscratch1, receiver); 60.48 + cmpptr(rscratch1, iCache); 60.49 + } else { 60.50 + cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); 60.51 + } 60.52 // if icache check fails, then jump to runtime routine 60.53 // Note: RECEIVER must still contain the receiver! 60.54 jump_cc(Assembler::notEqual, 60.55 RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 60.56 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); 60.57 - assert(offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry"); 60.58 + assert(UseCompressedOops || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry"); 60.59 } 60.60 60.61
61.1 --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp Mon Dec 27 09:30:20 2010 -0500 61.2 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp Mon Dec 27 09:56:29 2010 -0500 61.3 @@ -1261,7 +1261,7 @@ 61.4 // load the klass and check the has finalizer flag 61.5 Label register_finalizer; 61.6 Register t = rsi; 61.7 - __ movptr(t, Address(rax, oopDesc::klass_offset_in_bytes())); 61.8 + __ load_klass(t, rax); 61.9 __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); 61.10 __ testl(t, JVM_ACC_HAS_FINALIZER); 61.11 __ jcc(Assembler::notZero, register_finalizer);
62.1 --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Mon Dec 27 09:30:20 2010 -0500 62.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Mon Dec 27 09:56:29 2010 -0500 62.3 @@ -2197,9 +2197,6 @@ 62.4 62.5 __ enter(); // required for proper stackwalking of RuntimeStub frame 62.6 62.7 - checkcast_copy_entry = __ pc(); 62.8 - BLOCK_COMMENT("Entry:"); 62.9 - 62.10 #ifdef ASSERT 62.11 // caller guarantees that the arrays really are different 62.12 // otherwise, we would have to make conjoint checks 62.13 @@ -2210,26 +2207,28 @@ 62.14 } 62.15 #endif //ASSERT 62.16 62.17 - // allocate spill slots for r13, r14 62.18 - enum { 62.19 - saved_r13_offset, 62.20 - saved_r14_offset, 62.21 - saved_rbp_offset, 62.22 - saved_rip_offset, 62.23 - saved_rarg0_offset 62.24 - }; 62.25 - __ subptr(rsp, saved_rbp_offset * wordSize); 62.26 - __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 62.27 - __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 62.28 setup_arg_regs(4); // from => rdi, to => rsi, length => rdx 62.29 // ckoff => rcx, ckval => r8 62.30 // r9 and r10 may be used to save non-volatile registers 62.31 #ifdef _WIN64 62.32 // last argument (#4) is on stack on Win64 62.33 - const int ckval_offset = saved_rarg0_offset + 4; 62.34 - __ movptr(ckval, Address(rsp, ckval_offset * wordSize)); 62.35 + __ movptr(ckval, Address(rsp, 6 * wordSize)); 62.36 #endif 62.37 62.38 + // Caller of this entry point must set up the argument registers. 62.39 + checkcast_copy_entry = __ pc(); 62.40 + BLOCK_COMMENT("Entry:"); 62.41 + 62.42 + // allocate spill slots for r13, r14 62.43 + enum { 62.44 + saved_r13_offset, 62.45 + saved_r14_offset, 62.46 + saved_rbp_offset 62.47 + }; 62.48 + __ subptr(rsp, saved_rbp_offset * wordSize); 62.49 + __ movptr(Address(rsp, saved_r13_offset * wordSize), r13); 62.50 + __ movptr(Address(rsp, saved_r14_offset * wordSize), r14); 62.51 + 62.52 // check that int operands are properly extended to size_t 62.53 assert_clean_int(length, rax); 62.54 assert_clean_int(ckoff, rax); 62.55 @@ -2443,11 +2442,10 @@ 62.56 const Register src_pos = c_rarg1; // source position 62.57 const Register dst = c_rarg2; // destination array oop 62.58 const Register dst_pos = c_rarg3; // destination position 62.59 - // elements count is on stack on Win64 62.60 -#ifdef _WIN64 62.61 -#define C_RARG4 Address(rsp, 6 * wordSize) 62.62 +#ifndef _WIN64 62.63 + const Register length = c_rarg4; 62.64 #else 62.65 -#define C_RARG4 c_rarg4 62.66 + const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 62.67 #endif 62.68 62.69 { int modulus = CodeEntryAlignment; 62.70 @@ -2514,27 +2512,27 @@ 62.71 // registers used as temp 62.72 const Register r11_length = r11; // elements count to copy 62.73 const Register r10_src_klass = r10; // array klass 62.74 - const Register r9_dst_klass = r9; // dest array klass 62.75 62.76 // if (length < 0) return -1; 62.77 - __ movl(r11_length, C_RARG4); // length (elements count, 32-bits value) 62.78 + __ movl(r11_length, length); // length (elements count, 32-bits value) 62.79 __ testl(r11_length, r11_length); 62.80 __ jccb(Assembler::negative, L_failed_0); 62.81 62.82 __ load_klass(r10_src_klass, src); 62.83 #ifdef ASSERT 62.84 // assert(src->klass() != NULL); 62.85 - BLOCK_COMMENT("assert klasses not null"); 62.86 - { Label L1, L2; 62.87 + { 62.88 + BLOCK_COMMENT("assert klasses not null {"); 62.89 + Label L1, L2; 62.90 __ testptr(r10_src_klass, r10_src_klass); 62.91 __ jcc(Assembler::notZero, L2); // it is broken if klass is NULL 62.92 __ bind(L1); 62.93 __ stop("broken null klass"); 62.94 __ bind(L2); 62.95 - __ load_klass(r9_dst_klass, dst); 62.96 - __ cmpq(r9_dst_klass, 0); 62.97 + __ load_klass(rax, dst); 62.98 + __ cmpq(rax, 0); 62.99 __ jcc(Assembler::equal, L1); // this would be broken also 62.100 - BLOCK_COMMENT("assert done"); 62.101 + BLOCK_COMMENT("} assert klasses not null done"); 62.102 } 62.103 #endif 62.104 62.105 @@ -2546,34 +2544,36 @@ 62.106 // array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0 62.107 // 62.108 62.109 - int lh_offset = klassOopDesc::header_size() * HeapWordSize + 62.110 - Klass::layout_helper_offset_in_bytes(); 62.111 + const int lh_offset = klassOopDesc::header_size() * HeapWordSize + 62.112 + Klass::layout_helper_offset_in_bytes(); 62.113 + 62.114 + // Handle objArrays completely differently... 62.115 + const jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 62.116 + __ cmpl(Address(r10_src_klass, lh_offset), objArray_lh); 62.117 + __ jcc(Assembler::equal, L_objArray); 62.118 + 62.119 + // if (src->klass() != dst->klass()) return -1; 62.120 + __ load_klass(rax, dst); 62.121 + __ cmpq(r10_src_klass, rax); 62.122 + __ jcc(Assembler::notEqual, L_failed); 62.123 62.124 const Register rax_lh = rax; // layout helper 62.125 - 62.126 __ movl(rax_lh, Address(r10_src_klass, lh_offset)); 62.127 62.128 - // Handle objArrays completely differently... 62.129 - jint objArray_lh = Klass::array_layout_helper(T_OBJECT); 62.130 - __ cmpl(rax_lh, objArray_lh); 62.131 - __ jcc(Assembler::equal, L_objArray); 62.132 - 62.133 - // if (src->klass() != dst->klass()) return -1; 62.134 - __ load_klass(r9_dst_klass, dst); 62.135 - __ cmpq(r10_src_klass, r9_dst_klass); 62.136 - __ jcc(Assembler::notEqual, L_failed); 62.137 - 62.138 // if (!src->is_Array()) return -1; 62.139 __ cmpl(rax_lh, Klass::_lh_neutral_value); 62.140 __ jcc(Assembler::greaterEqual, L_failed); 62.141 62.142 // At this point, it is known to be a typeArray (array_tag 0x3). 62.143 #ifdef ASSERT 62.144 - { Label L; 62.145 + { 62.146 + BLOCK_COMMENT("assert primitive array {"); 62.147 + Label L; 62.148 __ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift)); 62.149 __ jcc(Assembler::greaterEqual, L); 62.150 __ stop("must be a primitive array"); 62.151 __ bind(L); 62.152 + BLOCK_COMMENT("} assert primitive array done"); 62.153 } 62.154 #endif 62.155 62.156 @@ -2631,11 +2631,14 @@ 62.157 62.158 __ BIND(L_copy_longs); 62.159 #ifdef ASSERT 62.160 - { Label L; 62.161 + { 62.162 + BLOCK_COMMENT("assert long copy {"); 62.163 + Label L; 62.164 __ cmpl(rax_elsize, LogBytesPerLong); 62.165 __ jcc(Assembler::equal, L); 62.166 __ stop("must be long copy, but elsize is wrong"); 62.167 __ bind(L); 62.168 + BLOCK_COMMENT("} assert long copy done"); 62.169 } 62.170 #endif 62.171 __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr 62.172 @@ -2645,12 +2648,12 @@ 62.173 62.174 // objArrayKlass 62.175 __ BIND(L_objArray); 62.176 - // live at this point: r10_src_klass, src[_pos], dst[_pos] 62.177 + // live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos] 62.178 62.179 Label L_plain_copy, L_checkcast_copy; 62.180 // test array classes for subtyping 62.181 - __ load_klass(r9_dst_klass, dst); 62.182 - __ cmpq(r10_src_klass, r9_dst_klass); // usual case is exact equality 62.183 + __ load_klass(rax, dst); 62.184 + __ cmpq(r10_src_klass, rax); // usual case is exact equality 62.185 __ jcc(Assembler::notEqual, L_checkcast_copy); 62.186 62.187 // Identically typed arrays can be copied without element-wise checks. 62.188 @@ -2666,41 +2669,33 @@ 62.189 __ jump(RuntimeAddress(oop_copy_entry)); 62.190 62.191 __ BIND(L_checkcast_copy); 62.192 - // live at this point: r10_src_klass, !r11_length 62.193 + // live at this point: r10_src_klass, r11_length, rax (dst_klass) 62.194 { 62.195 - // assert(r11_length == C_RARG4); // will reload from here 62.196 - Register r11_dst_klass = r11; 62.197 - __ load_klass(r11_dst_klass, dst); 62.198 - 62.199 // Before looking at dst.length, make sure dst is also an objArray. 62.200 - __ cmpl(Address(r11_dst_klass, lh_offset), objArray_lh); 62.201 + __ cmpl(Address(rax, lh_offset), objArray_lh); 62.202 __ jcc(Assembler::notEqual, L_failed); 62.203 62.204 // It is safe to examine both src.length and dst.length. 62.205 -#ifndef _WIN64 62.206 - arraycopy_range_checks(src, src_pos, dst, dst_pos, C_RARG4, 62.207 - rax, L_failed); 62.208 -#else 62.209 - __ movl(r11_length, C_RARG4); // reload 62.210 arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length, 62.211 rax, L_failed); 62.212 + 62.213 + const Register r11_dst_klass = r11; 62.214 __ load_klass(r11_dst_klass, dst); // reload 62.215 -#endif 62.216 62.217 // Marshal the base address arguments now, freeing registers. 62.218 __ lea(from, Address(src, src_pos, TIMES_OOP, 62.219 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 62.220 __ lea(to, Address(dst, dst_pos, TIMES_OOP, 62.221 arrayOopDesc::base_offset_in_bytes(T_OBJECT))); 62.222 - __ movl(count, C_RARG4); // length (reloaded) 62.223 + __ movl(count, length); // length (reloaded) 62.224 Register sco_temp = c_rarg3; // this register is free now 62.225 assert_different_registers(from, to, count, sco_temp, 62.226 r11_dst_klass, r10_src_klass); 62.227 assert_clean_int(count, sco_temp); 62.228 62.229 // Generate the type check. 62.230 - int sco_offset = (klassOopDesc::header_size() * HeapWordSize + 62.231 - Klass::super_check_offset_offset_in_bytes()); 62.232 + const int sco_offset = (klassOopDesc::header_size() * HeapWordSize + 62.233 + Klass::super_check_offset_offset_in_bytes()); 62.234 __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 62.235 assert_clean_int(sco_temp, rax); 62.236 generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy); 62.237 @@ -2709,12 +2704,14 @@ 62.238 int ek_offset = (klassOopDesc::header_size() * HeapWordSize + 62.239 objArrayKlass::element_klass_offset_in_bytes()); 62.240 __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset)); 62.241 - __ movl(sco_temp, Address(r11_dst_klass, sco_offset)); 62.242 + __ movl( sco_temp, Address(r11_dst_klass, sco_offset)); 62.243 assert_clean_int(sco_temp, rax); 62.244 62.245 // the checkcast_copy loop needs two extra arguments: 62.246 assert(c_rarg3 == sco_temp, "#3 already in place"); 62.247 - __ movptr(C_RARG4, r11_dst_klass); // dst.klass.element_klass 62.248 + // Set up arguments for checkcast_copy_entry. 62.249 + setup_arg_regs(4); 62.250 + __ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris 62.251 __ jump(RuntimeAddress(checkcast_copy_entry)); 62.252 } 62.253 62.254 @@ -2727,8 +2724,6 @@ 62.255 return start; 62.256 } 62.257 62.258 -#undef length_arg 62.259 - 62.260 void generate_arraycopy_stubs() { 62.261 // Call the conjoint generation methods immediately after 62.262 // the disjoint ones so that short branches from the former
63.1 --- a/src/cpu/x86/vm/x86_32.ad Mon Dec 27 09:30:20 2010 -0500 63.2 +++ b/src/cpu/x86/vm/x86_32.ad Mon Dec 27 09:56:29 2010 -0500 63.3 @@ -507,6 +507,25 @@ 63.4 63.5 63.6 //============================================================================= 63.7 +const bool Matcher::constant_table_absolute_addressing = true; 63.8 +const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty; 63.9 + 63.10 +void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 63.11 + // Empty encoding 63.12 +} 63.13 + 63.14 +uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const { 63.15 + return 0; 63.16 +} 63.17 + 63.18 +#ifndef PRODUCT 63.19 +void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 63.20 + st->print("# MachConstantBaseNode (empty encoding)"); 63.21 +} 63.22 +#endif 63.23 + 63.24 + 63.25 +//============================================================================= 63.26 #ifndef PRODUCT 63.27 void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const { 63.28 Compile* C = ra_->C; 63.29 @@ -1320,29 +1339,6 @@ 63.30 } 63.31 63.32 63.33 -static void emit_double_constant(CodeBuffer& cbuf, double x) { 63.34 - int mark = cbuf.insts()->mark_off(); 63.35 - MacroAssembler _masm(&cbuf); 63.36 - address double_address = __ double_constant(x); 63.37 - cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift 63.38 - emit_d32_reloc(cbuf, 63.39 - (int)double_address, 63.40 - internal_word_Relocation::spec(double_address), 63.41 - RELOC_DISP32); 63.42 -} 63.43 - 63.44 -static void emit_float_constant(CodeBuffer& cbuf, float x) { 63.45 - int mark = cbuf.insts()->mark_off(); 63.46 - MacroAssembler _masm(&cbuf); 63.47 - address float_address = __ float_constant(x); 63.48 - cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift 63.49 - emit_d32_reloc(cbuf, 63.50 - (int)float_address, 63.51 - internal_word_Relocation::spec(float_address), 63.52 - RELOC_DISP32); 63.53 -} 63.54 - 63.55 - 63.56 const bool Matcher::match_rule_supported(int opcode) { 63.57 if (!has_match_rule(opcode)) 63.58 return false; 63.59 @@ -1354,22 +1350,6 @@ 63.60 return regnum - 32; // The FP registers are in the second chunk 63.61 } 63.62 63.63 -bool is_positive_zero_float(jfloat f) { 63.64 - return jint_cast(f) == jint_cast(0.0F); 63.65 -} 63.66 - 63.67 -bool is_positive_one_float(jfloat f) { 63.68 - return jint_cast(f) == jint_cast(1.0F); 63.69 -} 63.70 - 63.71 -bool is_positive_zero_double(jdouble d) { 63.72 - return jlong_cast(d) == jlong_cast(0.0); 63.73 -} 63.74 - 63.75 -bool is_positive_one_double(jdouble d) { 63.76 - return jlong_cast(d) == jlong_cast(1.0); 63.77 -} 63.78 - 63.79 // This is UltraSparc specific, true just means we have fast l2f conversion 63.80 const bool Matcher::convL2FSupported(void) { 63.81 return true; 63.82 @@ -2036,67 +2016,6 @@ 63.83 %} 63.84 63.85 63.86 - enc_class LdImmD (immD src) %{ // Load Immediate 63.87 - if( is_positive_zero_double($src$$constant)) { 63.88 - // FLDZ 63.89 - emit_opcode(cbuf,0xD9); 63.90 - emit_opcode(cbuf,0xEE); 63.91 - } else if( is_positive_one_double($src$$constant)) { 63.92 - // FLD1 63.93 - emit_opcode(cbuf,0xD9); 63.94 - emit_opcode(cbuf,0xE8); 63.95 - } else { 63.96 - emit_opcode(cbuf,0xDD); 63.97 - emit_rm(cbuf, 0x0, 0x0, 0x5); 63.98 - emit_double_constant(cbuf, $src$$constant); 63.99 - } 63.100 - %} 63.101 - 63.102 - 63.103 - enc_class LdImmF (immF src) %{ // Load Immediate 63.104 - if( is_positive_zero_float($src$$constant)) { 63.105 - emit_opcode(cbuf,0xD9); 63.106 - emit_opcode(cbuf,0xEE); 63.107 - } else if( is_positive_one_float($src$$constant)) { 63.108 - emit_opcode(cbuf,0xD9); 63.109 - emit_opcode(cbuf,0xE8); 63.110 - } else { 63.111 - $$$emit8$primary; 63.112 - // Load immediate does not have a zero or sign extended version 63.113 - // for 8-bit immediates 63.114 - // First load to TOS, then move to dst 63.115 - emit_rm(cbuf, 0x0, 0x0, 0x5); 63.116 - emit_float_constant(cbuf, $src$$constant); 63.117 - } 63.118 - %} 63.119 - 63.120 - enc_class LdImmX (regX dst, immXF con) %{ // Load Immediate 63.121 - emit_rm(cbuf, 0x0, $dst$$reg, 0x5); 63.122 - emit_float_constant(cbuf, $con$$constant); 63.123 - %} 63.124 - 63.125 - enc_class LdImmXD (regXD dst, immXD con) %{ // Load Immediate 63.126 - emit_rm(cbuf, 0x0, $dst$$reg, 0x5); 63.127 - emit_double_constant(cbuf, $con$$constant); 63.128 - %} 63.129 - 63.130 - enc_class load_conXD (regXD dst, immXD con) %{ // Load double constant 63.131 - // UseXmmLoadAndClearUpper ? movsd(dst, con) : movlpd(dst, con) 63.132 - emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66); 63.133 - emit_opcode(cbuf, 0x0F); 63.134 - emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12); 63.135 - emit_rm(cbuf, 0x0, $dst$$reg, 0x5); 63.136 - emit_double_constant(cbuf, $con$$constant); 63.137 - %} 63.138 - 63.139 - enc_class Opc_MemImm_F(immF src) %{ 63.140 - cbuf.set_insts_mark(); 63.141 - $$$emit8$primary; 63.142 - emit_rm(cbuf, 0x0, $secondary, 0x5); 63.143 - emit_float_constant(cbuf, $src$$constant); 63.144 - %} 63.145 - 63.146 - 63.147 enc_class MovI2X_reg(regX dst, eRegI src) %{ 63.148 emit_opcode(cbuf, 0x66 ); // MOVD dst,src 63.149 emit_opcode(cbuf, 0x0F ); 63.150 @@ -4801,7 +4720,7 @@ 63.151 interface(CONST_INTER); 63.152 %} 63.153 63.154 -// Double Immediate 63.155 +// Double Immediate one 63.156 operand immD1() %{ 63.157 predicate( UseSSE<=1 && n->getd() == 1.0 ); 63.158 match(ConD); 63.159 @@ -4844,7 +4763,17 @@ 63.160 63.161 // Float Immediate zero 63.162 operand immF0() %{ 63.163 - predicate( UseSSE == 0 && n->getf() == 0.0 ); 63.164 + predicate(UseSSE == 0 && n->getf() == 0.0F); 63.165 + match(ConF); 63.166 + 63.167 + op_cost(5); 63.168 + format %{ %} 63.169 + interface(CONST_INTER); 63.170 +%} 63.171 + 63.172 +// Float Immediate one 63.173 +operand immF1() %{ 63.174 + predicate(UseSSE == 0 && n->getf() == 1.0F); 63.175 match(ConF); 63.176 63.177 op_cost(5); 63.178 @@ -7215,24 +7144,53 @@ 63.179 %} 63.180 63.181 // The instruction usage is guarded by predicate in operand immF(). 63.182 -instruct loadConF(regF dst, immF src) %{ 63.183 - match(Set dst src); 63.184 +instruct loadConF(regF dst, immF con) %{ 63.185 + match(Set dst con); 63.186 ins_cost(125); 63.187 - 63.188 - format %{ "FLD_S ST,$src\n\t" 63.189 + format %{ "FLD_S ST,[$constantaddress]\t# load from constant table: float=$con\n\t" 63.190 "FSTP $dst" %} 63.191 - opcode(0xD9, 0x00); /* D9 /0 */ 63.192 - ins_encode(LdImmF(src), Pop_Reg_F(dst) ); 63.193 - ins_pipe( fpu_reg_con ); 63.194 + ins_encode %{ 63.195 + __ fld_s($constantaddress($con)); 63.196 + __ fstp_d($dst$$reg); 63.197 + %} 63.198 + ins_pipe(fpu_reg_con); 63.199 +%} 63.200 + 63.201 +// The instruction usage is guarded by predicate in operand immF0(). 63.202 +instruct loadConF0(regF dst, immF0 con) %{ 63.203 + match(Set dst con); 63.204 + ins_cost(125); 63.205 + format %{ "FLDZ ST\n\t" 63.206 + "FSTP $dst" %} 63.207 + ins_encode %{ 63.208 + __ fldz(); 63.209 + __ fstp_d($dst$$reg); 63.210 + %} 63.211 + ins_pipe(fpu_reg_con); 63.212 +%} 63.213 + 63.214 +// The instruction usage is guarded by predicate in operand immF1(). 63.215 +instruct loadConF1(regF dst, immF1 con) %{ 63.216 + match(Set dst con); 63.217 + ins_cost(125); 63.218 + format %{ "FLD1 ST\n\t" 63.219 + "FSTP $dst" %} 63.220 + ins_encode %{ 63.221 + __ fld1(); 63.222 + __ fstp_d($dst$$reg); 63.223 + %} 63.224 + ins_pipe(fpu_reg_con); 63.225 %} 63.226 63.227 // The instruction usage is guarded by predicate in operand immXF(). 63.228 instruct loadConX(regX dst, immXF con) %{ 63.229 match(Set dst con); 63.230 ins_cost(125); 63.231 - format %{ "MOVSS $dst,[$con]" %} 63.232 - ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x10), LdImmX(dst, con)); 63.233 - ins_pipe( pipe_slow ); 63.234 + format %{ "MOVSS $dst,[$constantaddress]\t# load from constant table: float=$con" %} 63.235 + ins_encode %{ 63.236 + __ movflt($dst$$XMMRegister, $constantaddress($con)); 63.237 + %} 63.238 + ins_pipe(pipe_slow); 63.239 %} 63.240 63.241 // The instruction usage is guarded by predicate in operand immXF0(). 63.242 @@ -7240,28 +7198,63 @@ 63.243 match(Set dst src); 63.244 ins_cost(100); 63.245 format %{ "XORPS $dst,$dst\t# float 0.0" %} 63.246 - ins_encode( Opcode(0x0F), Opcode(0x57), RegReg(dst,dst)); 63.247 - ins_pipe( pipe_slow ); 63.248 + ins_encode %{ 63.249 + __ xorps($dst$$XMMRegister, $dst$$XMMRegister); 63.250 + %} 63.251 + ins_pipe(pipe_slow); 63.252 %} 63.253 63.254 // The instruction usage is guarded by predicate in operand immD(). 63.255 -instruct loadConD(regD dst, immD src) %{ 63.256 - match(Set dst src); 63.257 +instruct loadConD(regD dst, immD con) %{ 63.258 + match(Set dst con); 63.259 ins_cost(125); 63.260 63.261 - format %{ "FLD_D ST,$src\n\t" 63.262 + format %{ "FLD_D ST,[$constantaddress]\t# load from constant table: double=$con\n\t" 63.263 "FSTP $dst" %} 63.264 - ins_encode(LdImmD(src), Pop_Reg_D(dst) ); 63.265 - ins_pipe( fpu_reg_con ); 63.266 + ins_encode %{ 63.267 + __ fld_d($constantaddress($con)); 63.268 + __ fstp_d($dst$$reg); 63.269 + %} 63.270 + ins_pipe(fpu_reg_con); 63.271 +%} 63.272 + 63.273 +// The instruction usage is guarded by predicate in operand immD0(). 63.274 +instruct loadConD0(regD dst, immD0 con) %{ 63.275 + match(Set dst con); 63.276 + ins_cost(125); 63.277 + 63.278 + format %{ "FLDZ ST\n\t" 63.279 + "FSTP $dst" %} 63.280 + ins_encode %{ 63.281 + __ fldz(); 63.282 + __ fstp_d($dst$$reg); 63.283 + %} 63.284 + ins_pipe(fpu_reg_con); 63.285 +%} 63.286 + 63.287 +// The instruction usage is guarded by predicate in operand immD1(). 63.288 +instruct loadConD1(regD dst, immD1 con) %{ 63.289 + match(Set dst con); 63.290 + ins_cost(125); 63.291 + 63.292 + format %{ "FLD1 ST\n\t" 63.293 + "FSTP $dst" %} 63.294 + ins_encode %{ 63.295 + __ fld1(); 63.296 + __ fstp_d($dst$$reg); 63.297 + %} 63.298 + ins_pipe(fpu_reg_con); 63.299 %} 63.300 63.301 // The instruction usage is guarded by predicate in operand immXD(). 63.302 instruct loadConXD(regXD dst, immXD con) %{ 63.303 match(Set dst con); 63.304 ins_cost(125); 63.305 - format %{ "MOVSD $dst,[$con]" %} 63.306 - ins_encode(load_conXD(dst, con)); 63.307 - ins_pipe( pipe_slow ); 63.308 + format %{ "MOVSD $dst,[$constantaddress]\t# load from constant table: double=$con" %} 63.309 + ins_encode %{ 63.310 + __ movdbl($dst$$XMMRegister, $constantaddress($con)); 63.311 + %} 63.312 + ins_pipe(pipe_slow); 63.313 %} 63.314 63.315 // The instruction usage is guarded by predicate in operand immXD0(). 63.316 @@ -10303,41 +10296,45 @@ 63.317 ins_pipe( fpu_reg_mem ); 63.318 %} 63.319 63.320 -instruct addD_reg_imm1(regD dst, immD1 src) %{ 63.321 +instruct addD_reg_imm1(regD dst, immD1 con) %{ 63.322 predicate(UseSSE<=1); 63.323 - match(Set dst (AddD dst src)); 63.324 + match(Set dst (AddD dst con)); 63.325 ins_cost(125); 63.326 format %{ "FLD1\n\t" 63.327 "DADDp $dst,ST" %} 63.328 - opcode(0xDE, 0x00); 63.329 - ins_encode( LdImmD(src), 63.330 - OpcP, RegOpc(dst) ); 63.331 - ins_pipe( fpu_reg ); 63.332 -%} 63.333 - 63.334 -instruct addD_reg_imm(regD dst, immD src) %{ 63.335 + ins_encode %{ 63.336 + __ fld1(); 63.337 + __ faddp($dst$$reg); 63.338 + %} 63.339 + ins_pipe(fpu_reg); 63.340 +%} 63.341 + 63.342 +instruct addD_reg_imm(regD dst, immD con) %{ 63.343 predicate(UseSSE<=1 && _kids[1]->_leaf->getd() != 0.0 && _kids[1]->_leaf->getd() != 1.0 ); 63.344 - match(Set dst (AddD dst src)); 63.345 + match(Set dst (AddD dst con)); 63.346 ins_cost(200); 63.347 - format %{ "FLD_D [$src]\n\t" 63.348 + format %{ "FLD_D [$constantaddress]\t# load from constant table: double=$con\n\t" 63.349 "DADDp $dst,ST" %} 63.350 - opcode(0xDE, 0x00); /* DE /0 */ 63.351 - ins_encode( LdImmD(src), 63.352 - OpcP, RegOpc(dst)); 63.353 - ins_pipe( fpu_reg_mem ); 63.354 + ins_encode %{ 63.355 + __ fld_d($constantaddress($con)); 63.356 + __ faddp($dst$$reg); 63.357 + %} 63.358 + ins_pipe(fpu_reg_mem); 63.359 %} 63.360 63.361 instruct addD_reg_imm_round(stackSlotD dst, regD src, immD con) %{ 63.362 predicate(UseSSE<=1 && _kids[0]->_kids[1]->_leaf->getd() != 0.0 && _kids[0]->_kids[1]->_leaf->getd() != 1.0 ); 63.363 match(Set dst (RoundDouble (AddD src con))); 63.364 ins_cost(200); 63.365 - format %{ "FLD_D [$con]\n\t" 63.366 + format %{ "FLD_D [$constantaddress]\t# load from constant table: double=$con\n\t" 63.367 "DADD ST,$src\n\t" 63.368 "FSTP_D $dst\t# D-round" %} 63.369 - opcode(0xD8, 0x00); /* D8 /0 */ 63.370 - ins_encode( LdImmD(con), 63.371 - OpcP, RegOpc(src), Pop_Mem_D(dst)); 63.372 - ins_pipe( fpu_mem_reg_con ); 63.373 + ins_encode %{ 63.374 + __ fld_d($constantaddress($con)); 63.375 + __ fadd($src$$reg); 63.376 + __ fstp_d(Address(rsp, $dst$$disp)); 63.377 + %} 63.378 + ins_pipe(fpu_mem_reg_con); 63.379 %} 63.380 63.381 // Add two double precision floating point values in xmm 63.382 @@ -10352,9 +10349,11 @@ 63.383 instruct addXD_imm(regXD dst, immXD con) %{ 63.384 predicate(UseSSE>=2); 63.385 match(Set dst (AddD dst con)); 63.386 - format %{ "ADDSD $dst,[$con]" %} 63.387 - ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x58), LdImmXD(dst, con) ); 63.388 - ins_pipe( pipe_slow ); 63.389 + format %{ "ADDSD $dst,[$constantaddress]\t# load from constant table: double=$con" %} 63.390 + ins_encode %{ 63.391 + __ addsd($dst$$XMMRegister, $constantaddress($con)); 63.392 + %} 63.393 + ins_pipe(pipe_slow); 63.394 %} 63.395 63.396 instruct addXD_mem(regXD dst, memory mem) %{ 63.397 @@ -10377,9 +10376,11 @@ 63.398 instruct subXD_imm(regXD dst, immXD con) %{ 63.399 predicate(UseSSE>=2); 63.400 match(Set dst (SubD dst con)); 63.401 - format %{ "SUBSD $dst,[$con]" %} 63.402 - ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x5C), LdImmXD(dst, con) ); 63.403 - ins_pipe( pipe_slow ); 63.404 + format %{ "SUBSD $dst,[$constantaddress]\t# load from constant table: double=$con" %} 63.405 + ins_encode %{ 63.406 + __ subsd($dst$$XMMRegister, $constantaddress($con)); 63.407 + %} 63.408 + ins_pipe(pipe_slow); 63.409 %} 63.410 63.411 instruct subXD_mem(regXD dst, memory mem) %{ 63.412 @@ -10402,9 +10403,11 @@ 63.413 instruct mulXD_imm(regXD dst, immXD con) %{ 63.414 predicate(UseSSE>=2); 63.415 match(Set dst (MulD dst con)); 63.416 - format %{ "MULSD $dst,[$con]" %} 63.417 - ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x59), LdImmXD(dst, con) ); 63.418 - ins_pipe( pipe_slow ); 63.419 + format %{ "MULSD $dst,[$constantaddress]\t# load from constant table: double=$con" %} 63.420 + ins_encode %{ 63.421 + __ mulsd($dst$$XMMRegister, $constantaddress($con)); 63.422 + %} 63.423 + ins_pipe(pipe_slow); 63.424 %} 63.425 63.426 instruct mulXD_mem(regXD dst, memory mem) %{ 63.427 @@ -10428,9 +10431,11 @@ 63.428 instruct divXD_imm(regXD dst, immXD con) %{ 63.429 predicate(UseSSE>=2); 63.430 match(Set dst (DivD dst con)); 63.431 - format %{ "DIVSD $dst,[$con]" %} 63.432 - ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x5E), LdImmXD(dst, con)); 63.433 - ins_pipe( pipe_slow ); 63.434 + format %{ "DIVSD $dst,[$constantaddress]\t# load from constant table: double=$con" %} 63.435 + ins_encode %{ 63.436 + __ divsd($dst$$XMMRegister, $constantaddress($con)); 63.437 + %} 63.438 + ins_pipe(pipe_slow); 63.439 %} 63.440 63.441 instruct divXD_mem(regXD dst, memory mem) %{ 63.442 @@ -10481,16 +10486,17 @@ 63.443 ins_pipe( fpu_reg_reg ); 63.444 %} 63.445 63.446 -instruct mulD_reg_imm(regD dst, immD src) %{ 63.447 +instruct mulD_reg_imm(regD dst, immD con) %{ 63.448 predicate( UseSSE<=1 && _kids[1]->_leaf->getd() != 0.0 && _kids[1]->_leaf->getd() != 1.0 ); 63.449 - match(Set dst (MulD dst src)); 63.450 + match(Set dst (MulD dst con)); 63.451 ins_cost(200); 63.452 - format %{ "FLD_D [$src]\n\t" 63.453 + format %{ "FLD_D [$constantaddress]\t# load from constant table: double=$con\n\t" 63.454 "DMULp $dst,ST" %} 63.455 - opcode(0xDE, 0x1); /* DE /1 */ 63.456 - ins_encode( LdImmD(src), 63.457 - OpcP, RegOpc(dst) ); 63.458 - ins_pipe( fpu_reg_mem ); 63.459 + ins_encode %{ 63.460 + __ fld_d($constantaddress($con)); 63.461 + __ fmulp($dst$$reg); 63.462 + %} 63.463 + ins_pipe(fpu_reg_mem); 63.464 %} 63.465 63.466 63.467 @@ -11224,9 +11230,11 @@ 63.468 instruct addX_imm(regX dst, immXF con) %{ 63.469 predicate(UseSSE>=1); 63.470 match(Set dst (AddF dst con)); 63.471 - format %{ "ADDSS $dst,[$con]" %} 63.472 - ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x58), LdImmX(dst, con) ); 63.473 - ins_pipe( pipe_slow ); 63.474 + format %{ "ADDSS $dst,[$constantaddress]\t# load from constant table: float=$con" %} 63.475 + ins_encode %{ 63.476 + __ addss($dst$$XMMRegister, $constantaddress($con)); 63.477 + %} 63.478 + ins_pipe(pipe_slow); 63.479 %} 63.480 63.481 instruct addX_mem(regX dst, memory mem) %{ 63.482 @@ -11249,9 +11257,11 @@ 63.483 instruct subX_imm(regX dst, immXF con) %{ 63.484 predicate(UseSSE>=1); 63.485 match(Set dst (SubF dst con)); 63.486 - format %{ "SUBSS $dst,[$con]" %} 63.487 - ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x5C), LdImmX(dst, con) ); 63.488 - ins_pipe( pipe_slow ); 63.489 + format %{ "SUBSS $dst,[$constantaddress]\t# load from constant table: float=$con" %} 63.490 + ins_encode %{ 63.491 + __ subss($dst$$XMMRegister, $constantaddress($con)); 63.492 + %} 63.493 + ins_pipe(pipe_slow); 63.494 %} 63.495 63.496 instruct subX_mem(regX dst, memory mem) %{ 63.497 @@ -11274,9 +11284,11 @@ 63.498 instruct mulX_imm(regX dst, immXF con) %{ 63.499 predicate(UseSSE>=1); 63.500 match(Set dst (MulF dst con)); 63.501 - format %{ "MULSS $dst,[$con]" %} 63.502 - ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x59), LdImmX(dst, con) ); 63.503 - ins_pipe( pipe_slow ); 63.504 + format %{ "MULSS $dst,[$constantaddress]\t# load from constant table: float=$con" %} 63.505 + ins_encode %{ 63.506 + __ mulss($dst$$XMMRegister, $constantaddress($con)); 63.507 + %} 63.508 + ins_pipe(pipe_slow); 63.509 %} 63.510 63.511 instruct mulX_mem(regX dst, memory mem) %{ 63.512 @@ -11299,9 +11311,11 @@ 63.513 instruct divX_imm(regX dst, immXF con) %{ 63.514 predicate(UseSSE>=1); 63.515 match(Set dst (DivF dst con)); 63.516 - format %{ "DIVSS $dst,[$con]" %} 63.517 - ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x5E), LdImmX(dst, con) ); 63.518 - ins_pipe( pipe_slow ); 63.519 + format %{ "DIVSS $dst,[$constantaddress]\t# load from constant table: float=$con" %} 63.520 + ins_encode %{ 63.521 + __ divss($dst$$XMMRegister, $constantaddress($con)); 63.522 + %} 63.523 + ins_pipe(pipe_slow); 63.524 %} 63.525 63.526 instruct divX_mem(regX dst, memory mem) %{ 63.527 @@ -11456,31 +11470,33 @@ 63.528 63.529 63.530 // Spill to obtain 24-bit precision 63.531 -instruct addF24_reg_imm(stackSlotF dst, regF src1, immF src2) %{ 63.532 +instruct addF24_reg_imm(stackSlotF dst, regF src, immF con) %{ 63.533 predicate(UseSSE==0 && Compile::current()->select_24_bit_instr()); 63.534 - match(Set dst (AddF src1 src2)); 63.535 - format %{ "FLD $src1\n\t" 63.536 - "FADD $src2\n\t" 63.537 + match(Set dst (AddF src con)); 63.538 + format %{ "FLD $src\n\t" 63.539 + "FADD_S [$constantaddress]\t# load from constant table: float=$con\n\t" 63.540 "FSTP_S $dst" %} 63.541 - opcode(0xD8, 0x00); /* D8 /0 */ 63.542 - ins_encode( Push_Reg_F(src1), 63.543 - Opc_MemImm_F(src2), 63.544 - Pop_Mem_F(dst)); 63.545 - ins_pipe( fpu_mem_reg_con ); 63.546 + ins_encode %{ 63.547 + __ fld_s($src$$reg - 1); // FLD ST(i-1) 63.548 + __ fadd_s($constantaddress($con)); 63.549 + __ fstp_s(Address(rsp, $dst$$disp)); 63.550 + %} 63.551 + ins_pipe(fpu_mem_reg_con); 63.552 %} 63.553 // 63.554 // This instruction does not round to 24-bits 63.555 -instruct addF_reg_imm(regF dst, regF src1, immF src2) %{ 63.556 +instruct addF_reg_imm(regF dst, regF src, immF con) %{ 63.557 predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr()); 63.558 - match(Set dst (AddF src1 src2)); 63.559 - format %{ "FLD $src1\n\t" 63.560 - "FADD $src2\n\t" 63.561 - "FSTP_S $dst" %} 63.562 - opcode(0xD8, 0x00); /* D8 /0 */ 63.563 - ins_encode( Push_Reg_F(src1), 63.564 - Opc_MemImm_F(src2), 63.565 - Pop_Reg_F(dst)); 63.566 - ins_pipe( fpu_reg_reg_con ); 63.567 + match(Set dst (AddF src con)); 63.568 + format %{ "FLD $src\n\t" 63.569 + "FADD_S [$constantaddress]\t# load from constant table: float=$con\n\t" 63.570 + "FSTP $dst" %} 63.571 + ins_encode %{ 63.572 + __ fld_s($src$$reg - 1); // FLD ST(i-1) 63.573 + __ fadd_s($constantaddress($con)); 63.574 + __ fstp_d($dst$$reg); 63.575 + %} 63.576 + ins_pipe(fpu_reg_reg_con); 63.577 %} 63.578 63.579 // Spill to obtain 24-bit precision 63.580 @@ -11559,29 +11575,35 @@ 63.581 %} 63.582 63.583 // Spill to obtain 24-bit precision 63.584 -instruct mulF24_reg_imm(stackSlotF dst, regF src1, immF src2) %{ 63.585 +instruct mulF24_reg_imm(stackSlotF dst, regF src, immF con) %{ 63.586 predicate(UseSSE==0 && Compile::current()->select_24_bit_instr()); 63.587 - match(Set dst (MulF src1 src2)); 63.588 - 63.589 - format %{ "FMULc $dst,$src1,$src2" %} 63.590 - opcode(0xD8, 0x1); /* D8 /1*/ 63.591 - ins_encode( Push_Reg_F(src1), 63.592 - Opc_MemImm_F(src2), 63.593 - Pop_Mem_F(dst)); 63.594 - ins_pipe( fpu_mem_reg_con ); 63.595 + match(Set dst (MulF src con)); 63.596 + 63.597 + format %{ "FLD $src\n\t" 63.598 + "FMUL_S [$constantaddress]\t# load from constant table: float=$con\n\t" 63.599 + "FSTP_S $dst" %} 63.600 + ins_encode %{ 63.601 + __ fld_s($src$$reg - 1); // FLD ST(i-1) 63.602 + __ fmul_s($constantaddress($con)); 63.603 + __ fstp_s(Address(rsp, $dst$$disp)); 63.604 + %} 63.605 + ins_pipe(fpu_mem_reg_con); 63.606 %} 63.607 // 63.608 // This instruction does not round to 24-bits 63.609 -instruct mulF_reg_imm(regF dst, regF src1, immF src2) %{ 63.610 +instruct mulF_reg_imm(regF dst, regF src, immF con) %{ 63.611 predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr()); 63.612 - match(Set dst (MulF src1 src2)); 63.613 - 63.614 - format %{ "FMULc $dst. $src1, $src2" %} 63.615 - opcode(0xD8, 0x1); /* D8 /1*/ 63.616 - ins_encode( Push_Reg_F(src1), 63.617 - Opc_MemImm_F(src2), 63.618 - Pop_Reg_F(dst)); 63.619 - ins_pipe( fpu_reg_reg_con ); 63.620 + match(Set dst (MulF src con)); 63.621 + 63.622 + format %{ "FLD $src\n\t" 63.623 + "FMUL_S [$constantaddress]\t# load from constant table: float=$con\n\t" 63.624 + "FSTP $dst" %} 63.625 + ins_encode %{ 63.626 + __ fld_s($src$$reg - 1); // FLD ST(i-1) 63.627 + __ fmul_s($constantaddress($con)); 63.628 + __ fstp_d($dst$$reg); 63.629 + %} 63.630 + ins_pipe(fpu_reg_reg_con); 63.631 %} 63.632 63.633 63.634 @@ -12939,16 +12961,11 @@ 63.635 instruct jumpXtnd(eRegI switch_val) %{ 63.636 match(Jump switch_val); 63.637 ins_cost(350); 63.638 - 63.639 - format %{ "JMP [table_base](,$switch_val,1)\n\t" %} 63.640 - 63.641 - ins_encode %{ 63.642 - address table_base = __ address_table_constant(_index2label); 63.643 - 63.644 + format %{ "JMP [$constantaddress](,$switch_val,1)\n\t" %} 63.645 + ins_encode %{ 63.646 // Jump to Address(table_base + switch_reg) 63.647 - InternalAddress table(table_base); 63.648 Address index(noreg, $switch_val$$Register, Address::times_1); 63.649 - __ jump(ArrayAddress(table, index)); 63.650 + __ jump(ArrayAddress($constantaddress, index)); 63.651 %} 63.652 ins_pc_relative(1); 63.653 ins_pipe(pipe_jmp);
64.1 --- a/src/cpu/x86/vm/x86_64.ad Mon Dec 27 09:30:20 2010 -0500 64.2 +++ b/src/cpu/x86/vm/x86_64.ad Mon Dec 27 09:56:29 2010 -0500 64.3 @@ -833,6 +833,25 @@ 64.4 64.5 64.6 //============================================================================= 64.7 +const bool Matcher::constant_table_absolute_addressing = true; 64.8 +const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty; 64.9 + 64.10 +void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { 64.11 + // Empty encoding 64.12 +} 64.13 + 64.14 +uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const { 64.15 + return 0; 64.16 +} 64.17 + 64.18 +#ifndef PRODUCT 64.19 +void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { 64.20 + st->print("# MachConstantBaseNode (empty encoding)"); 64.21 +} 64.22 +#endif 64.23 + 64.24 + 64.25 +//============================================================================= 64.26 #ifndef PRODUCT 64.27 void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const 64.28 { 64.29 @@ -1922,28 +1941,6 @@ 64.30 return offset; 64.31 } 64.32 64.33 -static void emit_double_constant(CodeBuffer& cbuf, double x) { 64.34 - int mark = cbuf.insts()->mark_off(); 64.35 - MacroAssembler _masm(&cbuf); 64.36 - address double_address = __ double_constant(x); 64.37 - cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift 64.38 - emit_d32_reloc(cbuf, 64.39 - (int) (double_address - cbuf.insts_end() - 4), 64.40 - internal_word_Relocation::spec(double_address), 64.41 - RELOC_DISP32); 64.42 -} 64.43 - 64.44 -static void emit_float_constant(CodeBuffer& cbuf, float x) { 64.45 - int mark = cbuf.insts()->mark_off(); 64.46 - MacroAssembler _masm(&cbuf); 64.47 - address float_address = __ float_constant(x); 64.48 - cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift 64.49 - emit_d32_reloc(cbuf, 64.50 - (int) (float_address - cbuf.insts_end() - 4), 64.51 - internal_word_Relocation::spec(float_address), 64.52 - RELOC_DISP32); 64.53 -} 64.54 - 64.55 64.56 const bool Matcher::match_rule_supported(int opcode) { 64.57 if (!has_match_rule(opcode)) 64.58 @@ -2789,43 +2786,6 @@ 64.59 } 64.60 %} 64.61 64.62 - enc_class load_immF(regF dst, immF con) 64.63 - %{ 64.64 - // XXX reg_mem doesn't support RIP-relative addressing yet 64.65 - emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101 64.66 - emit_float_constant(cbuf, $con$$constant); 64.67 - %} 64.68 - 64.69 - enc_class load_immD(regD dst, immD con) 64.70 - %{ 64.71 - // XXX reg_mem doesn't support RIP-relative addressing yet 64.72 - emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101 64.73 - emit_double_constant(cbuf, $con$$constant); 64.74 - %} 64.75 - 64.76 - enc_class load_conF (regF dst, immF con) %{ // Load float constant 64.77 - emit_opcode(cbuf, 0xF3); 64.78 - if ($dst$$reg >= 8) { 64.79 - emit_opcode(cbuf, Assembler::REX_R); 64.80 - } 64.81 - emit_opcode(cbuf, 0x0F); 64.82 - emit_opcode(cbuf, 0x10); 64.83 - emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101 64.84 - emit_float_constant(cbuf, $con$$constant); 64.85 - %} 64.86 - 64.87 - enc_class load_conD (regD dst, immD con) %{ // Load double constant 64.88 - // UseXmmLoadAndClearUpper ? movsd(dst, con) : movlpd(dst, con) 64.89 - emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66); 64.90 - if ($dst$$reg >= 8) { 64.91 - emit_opcode(cbuf, Assembler::REX_R); 64.92 - } 64.93 - emit_opcode(cbuf, 0x0F); 64.94 - emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12); 64.95 - emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101 64.96 - emit_double_constant(cbuf, $con$$constant); 64.97 - %} 64.98 - 64.99 // Encode a reg-reg copy. If it is useless, then empty encoding. 64.100 enc_class enc_copy(rRegI dst, rRegI src) 64.101 %{ 64.102 @@ -2926,63 +2886,6 @@ 64.103 emit_d32(cbuf, 0x00); 64.104 %} 64.105 64.106 - enc_class jump_enc(rRegL switch_val, rRegI dest) %{ 64.107 - MacroAssembler masm(&cbuf); 64.108 - 64.109 - Register switch_reg = as_Register($switch_val$$reg); 64.110 - Register dest_reg = as_Register($dest$$reg); 64.111 - address table_base = masm.address_table_constant(_index2label); 64.112 - 64.113 - // We could use jump(ArrayAddress) except that the macro assembler needs to use r10 64.114 - // to do that and the compiler is using that register as one it can allocate. 64.115 - // So we build it all by hand. 64.116 - // Address index(noreg, switch_reg, Address::times_1); 64.117 - // ArrayAddress dispatch(table, index); 64.118 - 64.119 - Address dispatch(dest_reg, switch_reg, Address::times_1); 64.120 - 64.121 - masm.lea(dest_reg, InternalAddress(table_base)); 64.122 - masm.jmp(dispatch); 64.123 - %} 64.124 - 64.125 - enc_class jump_enc_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{ 64.126 - MacroAssembler masm(&cbuf); 64.127 - 64.128 - Register switch_reg = as_Register($switch_val$$reg); 64.129 - Register dest_reg = as_Register($dest$$reg); 64.130 - address table_base = masm.address_table_constant(_index2label); 64.131 - 64.132 - // We could use jump(ArrayAddress) except that the macro assembler needs to use r10 64.133 - // to do that and the compiler is using that register as one it can allocate. 64.134 - // So we build it all by hand. 64.135 - // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant); 64.136 - // ArrayAddress dispatch(table, index); 64.137 - 64.138 - Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant); 64.139 - 64.140 - masm.lea(dest_reg, InternalAddress(table_base)); 64.141 - masm.jmp(dispatch); 64.142 - %} 64.143 - 64.144 - enc_class jump_enc_offset(rRegL switch_val, immI2 shift, rRegI dest) %{ 64.145 - MacroAssembler masm(&cbuf); 64.146 - 64.147 - Register switch_reg = as_Register($switch_val$$reg); 64.148 - Register dest_reg = as_Register($dest$$reg); 64.149 - address table_base = masm.address_table_constant(_index2label); 64.150 - 64.151 - // We could use jump(ArrayAddress) except that the macro assembler needs to use r10 64.152 - // to do that and the compiler is using that register as one it can allocate. 64.153 - // So we build it all by hand. 64.154 - // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant); 64.155 - // ArrayAddress dispatch(table, index); 64.156 - 64.157 - Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant); 64.158 - masm.lea(dest_reg, InternalAddress(table_base)); 64.159 - masm.jmp(dispatch); 64.160 - 64.161 - %} 64.162 - 64.163 enc_class lock_prefix() 64.164 %{ 64.165 if (os::is_MP()) { 64.166 @@ -6641,12 +6544,11 @@ 64.167 ins_pipe(ialu_reg); 64.168 %} 64.169 64.170 -instruct loadConP(rRegP dst, immP src) 64.171 -%{ 64.172 - match(Set dst src); 64.173 - 64.174 - format %{ "movq $dst, $src\t# ptr" %} 64.175 - ins_encode(load_immP(dst, src)); 64.176 +instruct loadConP(rRegP dst, immP con) %{ 64.177 + match(Set dst con); 64.178 + 64.179 + format %{ "movq $dst, $con\t# ptr" %} 64.180 + ins_encode(load_immP(dst, con)); 64.181 ins_pipe(ialu_reg_fat); // XXX 64.182 %} 64.183 64.184 @@ -6673,13 +6575,13 @@ 64.185 ins_pipe(ialu_reg); 64.186 %} 64.187 64.188 -instruct loadConF(regF dst, immF src) 64.189 -%{ 64.190 - match(Set dst src); 64.191 +instruct loadConF(regF dst, immF con) %{ 64.192 + match(Set dst con); 64.193 ins_cost(125); 64.194 - 64.195 - format %{ "movss $dst, [$src]" %} 64.196 - ins_encode(load_conF(dst, src)); 64.197 + format %{ "movss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 64.198 + ins_encode %{ 64.199 + __ movflt($dst$$XMMRegister, $constantaddress($con)); 64.200 + %} 64.201 ins_pipe(pipe_slow); 64.202 %} 64.203 64.204 @@ -6721,13 +6623,13 @@ 64.205 %} 64.206 64.207 // Use the same format since predicate() can not be used here. 64.208 -instruct loadConD(regD dst, immD src) 64.209 -%{ 64.210 - match(Set dst src); 64.211 +instruct loadConD(regD dst, immD con) %{ 64.212 + match(Set dst con); 64.213 ins_cost(125); 64.214 - 64.215 - format %{ "movsd $dst, [$src]" %} 64.216 - ins_encode(load_conD(dst, src)); 64.217 + format %{ "movsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 64.218 + ins_encode %{ 64.219 + __ movdbl($dst$$XMMRegister, $constantaddress($con)); 64.220 + %} 64.221 ins_pipe(pipe_slow); 64.222 %} 64.223 64.224 @@ -7694,9 +7596,18 @@ 64.225 predicate(false); 64.226 effect(TEMP dest); 64.227 64.228 - format %{ "leaq $dest, table_base\n\t" 64.229 + format %{ "leaq $dest, [$constantaddress]\n\t" 64.230 "jmp [$dest + $switch_val << $shift]\n\t" %} 64.231 - ins_encode(jump_enc_offset(switch_val, shift, dest)); 64.232 + ins_encode %{ 64.233 + // We could use jump(ArrayAddress) except that the macro assembler needs to use r10 64.234 + // to do that and the compiler is using that register as one it can allocate. 64.235 + // So we build it all by hand. 64.236 + // Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant); 64.237 + // ArrayAddress dispatch(table, index); 64.238 + Address dispatch($dest$$Register, $switch_val$$Register, (Address::ScaleFactor) $shift$$constant); 64.239 + __ lea($dest$$Register, $constantaddress); 64.240 + __ jmp(dispatch); 64.241 + %} 64.242 ins_pipe(pipe_jmp); 64.243 ins_pc_relative(1); 64.244 %} 64.245 @@ -7706,9 +7617,18 @@ 64.246 ins_cost(350); 64.247 effect(TEMP dest); 64.248 64.249 - format %{ "leaq $dest, table_base\n\t" 64.250 + format %{ "leaq $dest, [$constantaddress]\n\t" 64.251 "jmp [$dest + $switch_val << $shift + $offset]\n\t" %} 64.252 - ins_encode(jump_enc_addr(switch_val, shift, offset, dest)); 64.253 + ins_encode %{ 64.254 + // We could use jump(ArrayAddress) except that the macro assembler needs to use r10 64.255 + // to do that and the compiler is using that register as one it can allocate. 64.256 + // So we build it all by hand. 64.257 + // Address index(noreg, switch_reg, (Address::ScaleFactor) $shift$$constant, (int) $offset$$constant); 64.258 + // ArrayAddress dispatch(table, index); 64.259 + Address dispatch($dest$$Register, $switch_val$$Register, (Address::ScaleFactor) $shift$$constant, (int) $offset$$constant); 64.260 + __ lea($dest$$Register, $constantaddress); 64.261 + __ jmp(dispatch); 64.262 + %} 64.263 ins_pipe(pipe_jmp); 64.264 ins_pc_relative(1); 64.265 %} 64.266 @@ -7718,9 +7638,18 @@ 64.267 ins_cost(350); 64.268 effect(TEMP dest); 64.269 64.270 - format %{ "leaq $dest, table_base\n\t" 64.271 + format %{ "leaq $dest, [$constantaddress]\n\t" 64.272 "jmp [$dest + $switch_val]\n\t" %} 64.273 - ins_encode(jump_enc(switch_val, dest)); 64.274 + ins_encode %{ 64.275 + // We could use jump(ArrayAddress) except that the macro assembler needs to use r10 64.276 + // to do that and the compiler is using that register as one it can allocate. 64.277 + // So we build it all by hand. 64.278 + // Address index(noreg, switch_reg, Address::times_1); 64.279 + // ArrayAddress dispatch(table, index); 64.280 + Address dispatch($dest$$Register, $switch_val$$Register, Address::times_1); 64.281 + __ lea($dest$$Register, $constantaddress); 64.282 + __ jmp(dispatch); 64.283 + %} 64.284 ins_pipe(pipe_jmp); 64.285 ins_pc_relative(1); 64.286 %} 64.287 @@ -10376,30 +10305,36 @@ 64.288 ins_pipe(pipe_slow); 64.289 %} 64.290 64.291 -instruct cmpF_cc_imm(rFlagsRegU cr, regF src1, immF src2) 64.292 -%{ 64.293 - match(Set cr (CmpF src1 src2)); 64.294 +instruct cmpF_cc_imm(rFlagsRegU cr, regF src, immF con) %{ 64.295 + match(Set cr (CmpF src con)); 64.296 64.297 ins_cost(145); 64.298 - format %{ "ucomiss $src1, $src2\n\t" 64.299 + format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con\n\t" 64.300 "jnp,s exit\n\t" 64.301 "pushfq\t# saw NaN, set CF\n\t" 64.302 "andq [rsp], #0xffffff2b\n\t" 64.303 "popfq\n" 64.304 "exit: nop\t# avoid branch to branch" %} 64.305 - opcode(0x0F, 0x2E); 64.306 - ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2), 64.307 - cmpfp_fixup); 64.308 - ins_pipe(pipe_slow); 64.309 -%} 64.310 - 64.311 -instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src1, immF src2) %{ 64.312 - match(Set cr (CmpF src1 src2)); 64.313 - 64.314 + ins_encode %{ 64.315 + Label L_exit; 64.316 + __ ucomiss($src$$XMMRegister, $constantaddress($con)); 64.317 + __ jcc(Assembler::noParity, L_exit); 64.318 + __ pushf(); 64.319 + __ andq(rsp, 0xffffff2b); 64.320 + __ popf(); 64.321 + __ bind(L_exit); 64.322 + __ nop(); 64.323 + %} 64.324 + ins_pipe(pipe_slow); 64.325 +%} 64.326 + 64.327 +instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src, immF con) %{ 64.328 + match(Set cr (CmpF src con)); 64.329 ins_cost(100); 64.330 - format %{ "ucomiss $src1, $src2" %} 64.331 - opcode(0x0F, 0x2E); 64.332 - ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2)); 64.333 + format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con" %} 64.334 + ins_encode %{ 64.335 + __ ucomiss($src$$XMMRegister, $constantaddress($con)); 64.336 + %} 64.337 ins_pipe(pipe_slow); 64.338 %} 64.339 64.340 @@ -10458,30 +10393,36 @@ 64.341 ins_pipe(pipe_slow); 64.342 %} 64.343 64.344 -instruct cmpD_cc_imm(rFlagsRegU cr, regD src1, immD src2) 64.345 -%{ 64.346 - match(Set cr (CmpD src1 src2)); 64.347 +instruct cmpD_cc_imm(rFlagsRegU cr, regD src, immD con) %{ 64.348 + match(Set cr (CmpD src con)); 64.349 64.350 ins_cost(145); 64.351 - format %{ "ucomisd $src1, [$src2]\n\t" 64.352 + format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con\n\t" 64.353 "jnp,s exit\n\t" 64.354 "pushfq\t# saw NaN, set CF\n\t" 64.355 "andq [rsp], #0xffffff2b\n\t" 64.356 "popfq\n" 64.357 "exit: nop\t# avoid branch to branch" %} 64.358 - opcode(0x66, 0x0F, 0x2E); 64.359 - ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2), 64.360 - cmpfp_fixup); 64.361 - ins_pipe(pipe_slow); 64.362 -%} 64.363 - 64.364 -instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src1, immD src2) %{ 64.365 - match(Set cr (CmpD src1 src2)); 64.366 - 64.367 + ins_encode %{ 64.368 + Label L_exit; 64.369 + __ ucomisd($src$$XMMRegister, $constantaddress($con)); 64.370 + __ jcc(Assembler::noParity, L_exit); 64.371 + __ pushf(); 64.372 + __ andq(rsp, 0xffffff2b); 64.373 + __ popf(); 64.374 + __ bind(L_exit); 64.375 + __ nop(); 64.376 + %} 64.377 + ins_pipe(pipe_slow); 64.378 +%} 64.379 + 64.380 +instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src, immD con) %{ 64.381 + match(Set cr (CmpD src con)); 64.382 ins_cost(100); 64.383 - format %{ "ucomisd $src1, [$src2]" %} 64.384 - opcode(0x66, 0x0F, 0x2E); 64.385 - ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2)); 64.386 + format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con" %} 64.387 + ins_encode %{ 64.388 + __ ucomisd($src$$XMMRegister, $constantaddress($con)); 64.389 + %} 64.390 ins_pipe(pipe_slow); 64.391 %} 64.392 64.393 @@ -10528,23 +10469,29 @@ 64.394 %} 64.395 64.396 // Compare into -1,0,1 64.397 -instruct cmpF_imm(rRegI dst, regF src1, immF src2, rFlagsReg cr) 64.398 -%{ 64.399 - match(Set dst (CmpF3 src1 src2)); 64.400 +instruct cmpF_imm(rRegI dst, regF src, immF con, rFlagsReg cr) %{ 64.401 + match(Set dst (CmpF3 src con)); 64.402 effect(KILL cr); 64.403 64.404 ins_cost(275); 64.405 - format %{ "ucomiss $src1, [$src2]\n\t" 64.406 + format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con\n\t" 64.407 "movl $dst, #-1\n\t" 64.408 "jp,s done\n\t" 64.409 "jb,s done\n\t" 64.410 "setne $dst\n\t" 64.411 "movzbl $dst, $dst\n" 64.412 "done:" %} 64.413 - 64.414 - opcode(0x0F, 0x2E); 64.415 - ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2), 64.416 - cmpfp3(dst)); 64.417 + ins_encode %{ 64.418 + Label L_done; 64.419 + Register Rdst = $dst$$Register; 64.420 + __ ucomiss($src$$XMMRegister, $constantaddress($con)); 64.421 + __ movl(Rdst, -1); 64.422 + __ jcc(Assembler::parity, L_done); 64.423 + __ jcc(Assembler::below, L_done); 64.424 + __ setb(Assembler::notEqual, Rdst); 64.425 + __ movzbl(Rdst, Rdst); 64.426 + __ bind(L_done); 64.427 + %} 64.428 ins_pipe(pipe_slow); 64.429 %} 64.430 64.431 @@ -10591,23 +10538,29 @@ 64.432 %} 64.433 64.434 // Compare into -1,0,1 64.435 -instruct cmpD_imm(rRegI dst, regD src1, immD src2, rFlagsReg cr) 64.436 -%{ 64.437 - match(Set dst (CmpD3 src1 src2)); 64.438 +instruct cmpD_imm(rRegI dst, regD src, immD con, rFlagsReg cr) %{ 64.439 + match(Set dst (CmpD3 src con)); 64.440 effect(KILL cr); 64.441 64.442 ins_cost(275); 64.443 - format %{ "ucomisd $src1, [$src2]\n\t" 64.444 + format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con\n\t" 64.445 "movl $dst, #-1\n\t" 64.446 "jp,s done\n\t" 64.447 "jb,s done\n\t" 64.448 "setne $dst\n\t" 64.449 "movzbl $dst, $dst\n" 64.450 "done:" %} 64.451 - 64.452 - opcode(0x66, 0x0F, 0x2E); 64.453 - ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2), 64.454 - cmpfp3(dst)); 64.455 + ins_encode %{ 64.456 + Register Rdst = $dst$$Register; 64.457 + Label L_done; 64.458 + __ ucomisd($src$$XMMRegister, $constantaddress($con)); 64.459 + __ movl(Rdst, -1); 64.460 + __ jcc(Assembler::parity, L_done); 64.461 + __ jcc(Assembler::below, L_done); 64.462 + __ setb(Assembler::notEqual, Rdst); 64.463 + __ movzbl(Rdst, Rdst); 64.464 + __ bind(L_done); 64.465 + %} 64.466 ins_pipe(pipe_slow); 64.467 %} 64.468 64.469 @@ -10633,14 +10586,13 @@ 64.470 ins_pipe(pipe_slow); 64.471 %} 64.472 64.473 -instruct addF_imm(regF dst, immF src) 64.474 -%{ 64.475 - match(Set dst (AddF dst src)); 64.476 - 64.477 - format %{ "addss $dst, [$src]" %} 64.478 +instruct addF_imm(regF dst, immF con) %{ 64.479 + match(Set dst (AddF dst con)); 64.480 + format %{ "addss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 64.481 ins_cost(150); // XXX 64.482 - opcode(0xF3, 0x0F, 0x58); 64.483 - ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src)); 64.484 + ins_encode %{ 64.485 + __ addss($dst$$XMMRegister, $constantaddress($con)); 64.486 + %} 64.487 ins_pipe(pipe_slow); 64.488 %} 64.489 64.490 @@ -10666,14 +10618,13 @@ 64.491 ins_pipe(pipe_slow); 64.492 %} 64.493 64.494 -instruct addD_imm(regD dst, immD src) 64.495 -%{ 64.496 - match(Set dst (AddD dst src)); 64.497 - 64.498 - format %{ "addsd $dst, [$src]" %} 64.499 +instruct addD_imm(regD dst, immD con) %{ 64.500 + match(Set dst (AddD dst con)); 64.501 + format %{ "addsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 64.502 ins_cost(150); // XXX 64.503 - opcode(0xF2, 0x0F, 0x58); 64.504 - ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src)); 64.505 + ins_encode %{ 64.506 + __ addsd($dst$$XMMRegister, $constantaddress($con)); 64.507 + %} 64.508 ins_pipe(pipe_slow); 64.509 %} 64.510 64.511 @@ -10699,14 +10650,13 @@ 64.512 ins_pipe(pipe_slow); 64.513 %} 64.514 64.515 -instruct subF_imm(regF dst, immF src) 64.516 -%{ 64.517 - match(Set dst (SubF dst src)); 64.518 - 64.519 - format %{ "subss $dst, [$src]" %} 64.520 +instruct subF_imm(regF dst, immF con) %{ 64.521 + match(Set dst (SubF dst con)); 64.522 + format %{ "subss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 64.523 ins_cost(150); // XXX 64.524 - opcode(0xF3, 0x0F, 0x5C); 64.525 - ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src)); 64.526 + ins_encode %{ 64.527 + __ subss($dst$$XMMRegister, $constantaddress($con)); 64.528 + %} 64.529 ins_pipe(pipe_slow); 64.530 %} 64.531 64.532 @@ -10732,14 +10682,13 @@ 64.533 ins_pipe(pipe_slow); 64.534 %} 64.535 64.536 -instruct subD_imm(regD dst, immD src) 64.537 -%{ 64.538 - match(Set dst (SubD dst src)); 64.539 - 64.540 - format %{ "subsd $dst, [$src]" %} 64.541 +instruct subD_imm(regD dst, immD con) %{ 64.542 + match(Set dst (SubD dst con)); 64.543 + format %{ "subsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 64.544 ins_cost(150); // XXX 64.545 - opcode(0xF2, 0x0F, 0x5C); 64.546 - ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src)); 64.547 + ins_encode %{ 64.548 + __ subsd($dst$$XMMRegister, $constantaddress($con)); 64.549 + %} 64.550 ins_pipe(pipe_slow); 64.551 %} 64.552 64.553 @@ -10765,14 +10714,13 @@ 64.554 ins_pipe(pipe_slow); 64.555 %} 64.556 64.557 -instruct mulF_imm(regF dst, immF src) 64.558 -%{ 64.559 - match(Set dst (MulF dst src)); 64.560 - 64.561 - format %{ "mulss $dst, [$src]" %} 64.562 +instruct mulF_imm(regF dst, immF con) %{ 64.563 + match(Set dst (MulF dst con)); 64.564 + format %{ "mulss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 64.565 ins_cost(150); // XXX 64.566 - opcode(0xF3, 0x0F, 0x59); 64.567 - ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src)); 64.568 + ins_encode %{ 64.569 + __ mulss($dst$$XMMRegister, $constantaddress($con)); 64.570 + %} 64.571 ins_pipe(pipe_slow); 64.572 %} 64.573 64.574 @@ -10798,14 +10746,13 @@ 64.575 ins_pipe(pipe_slow); 64.576 %} 64.577 64.578 -instruct mulD_imm(regD dst, immD src) 64.579 -%{ 64.580 - match(Set dst (MulD dst src)); 64.581 - 64.582 - format %{ "mulsd $dst, [$src]" %} 64.583 +instruct mulD_imm(regD dst, immD con) %{ 64.584 + match(Set dst (MulD dst con)); 64.585 + format %{ "mulsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 64.586 ins_cost(150); // XXX 64.587 - opcode(0xF2, 0x0F, 0x59); 64.588 - ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src)); 64.589 + ins_encode %{ 64.590 + __ mulsd($dst$$XMMRegister, $constantaddress($con)); 64.591 + %} 64.592 ins_pipe(pipe_slow); 64.593 %} 64.594 64.595 @@ -10831,14 +10778,13 @@ 64.596 ins_pipe(pipe_slow); 64.597 %} 64.598 64.599 -instruct divF_imm(regF dst, immF src) 64.600 -%{ 64.601 - match(Set dst (DivF dst src)); 64.602 - 64.603 - format %{ "divss $dst, [$src]" %} 64.604 +instruct divF_imm(regF dst, immF con) %{ 64.605 + match(Set dst (DivF dst con)); 64.606 + format %{ "divss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 64.607 ins_cost(150); // XXX 64.608 - opcode(0xF3, 0x0F, 0x5E); 64.609 - ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src)); 64.610 + ins_encode %{ 64.611 + __ divss($dst$$XMMRegister, $constantaddress($con)); 64.612 + %} 64.613 ins_pipe(pipe_slow); 64.614 %} 64.615 64.616 @@ -10864,14 +10810,13 @@ 64.617 ins_pipe(pipe_slow); 64.618 %} 64.619 64.620 -instruct divD_imm(regD dst, immD src) 64.621 -%{ 64.622 - match(Set dst (DivD dst src)); 64.623 - 64.624 - format %{ "divsd $dst, [$src]" %} 64.625 +instruct divD_imm(regD dst, immD con) %{ 64.626 + match(Set dst (DivD dst con)); 64.627 + format %{ "divsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 64.628 ins_cost(150); // XXX 64.629 - opcode(0xF2, 0x0F, 0x5E); 64.630 - ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src)); 64.631 + ins_encode %{ 64.632 + __ divsd($dst$$XMMRegister, $constantaddress($con)); 64.633 + %} 64.634 ins_pipe(pipe_slow); 64.635 %} 64.636 64.637 @@ -10897,14 +10842,13 @@ 64.638 ins_pipe(pipe_slow); 64.639 %} 64.640 64.641 -instruct sqrtF_imm(regF dst, immF src) 64.642 -%{ 64.643 - match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); 64.644 - 64.645 - format %{ "sqrtss $dst, [$src]" %} 64.646 +instruct sqrtF_imm(regF dst, immF con) %{ 64.647 + match(Set dst (ConvD2F (SqrtD (ConvF2D con)))); 64.648 + format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %} 64.649 ins_cost(150); // XXX 64.650 - opcode(0xF3, 0x0F, 0x51); 64.651 - ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src)); 64.652 + ins_encode %{ 64.653 + __ sqrtss($dst$$XMMRegister, $constantaddress($con)); 64.654 + %} 64.655 ins_pipe(pipe_slow); 64.656 %} 64.657 64.658 @@ -10930,14 +10874,13 @@ 64.659 ins_pipe(pipe_slow); 64.660 %} 64.661 64.662 -instruct sqrtD_imm(regD dst, immD src) 64.663 -%{ 64.664 - match(Set dst (SqrtD src)); 64.665 - 64.666 - format %{ "sqrtsd $dst, [$src]" %} 64.667 +instruct sqrtD_imm(regD dst, immD con) %{ 64.668 + match(Set dst (SqrtD con)); 64.669 + format %{ "sqrtsd $dst, [$constantaddress]\t# load from constant table: double=$con" %} 64.670 ins_cost(150); // XXX 64.671 - opcode(0xF2, 0x0F, 0x51); 64.672 - ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src)); 64.673 + ins_encode %{ 64.674 + __ sqrtsd($dst$$XMMRegister, $constantaddress($con)); 64.675 + %} 64.676 ins_pipe(pipe_slow); 64.677 %} 64.678
65.1 --- a/src/os/linux/vm/os_linux.cpp Mon Dec 27 09:30:20 2010 -0500 65.2 +++ b/src/os/linux/vm/os_linux.cpp Mon Dec 27 09:56:29 2010 -0500 65.3 @@ -115,6 +115,7 @@ 65.4 # include <link.h> 65.5 # include <stdint.h> 65.6 # include <inttypes.h> 65.7 +# include <sys/ioctl.h> 65.8 65.9 #define MAX_PATH (2 * K) 65.10 65.11 @@ -4433,6 +4434,15 @@ 65.12 return 1; 65.13 } 65.14 65.15 +int os::socket_available(int fd, jint *pbytes) { 65.16 + // Linux doc says EINTR not returned, unlike Solaris 65.17 + int ret = ::ioctl(fd, FIONREAD, pbytes); 65.18 + 65.19 + //%% note ioctl can return 0 when successful, JVM_SocketAvailable 65.20 + // is expected to return 0 on failure and 1 on success to the jdk. 65.21 + return (ret < 0) ? 0 : 1; 65.22 +} 65.23 + 65.24 // Map a block of memory. 65.25 char* os::map_memory(int fd, const char* file_name, size_t file_offset, 65.26 char *addr, size_t bytes, bool read_only,
66.1 --- a/src/os/linux/vm/os_linux.inline.hpp Mon Dec 27 09:30:20 2010 -0500 66.2 +++ b/src/os/linux/vm/os_linux.inline.hpp Mon Dec 27 09:56:29 2010 -0500 66.3 @@ -45,7 +45,6 @@ 66.4 #include <unistd.h> 66.5 #include <sys/socket.h> 66.6 #include <sys/poll.h> 66.7 -#include <sys/ioctl.h> 66.8 #include <netdb.h> 66.9 66.10 inline void* os::thread_local_storage_at(int index) { 66.11 @@ -268,16 +267,6 @@ 66.12 RESTARTABLE_RETURN_INT(::sendto(fd, buf, len, (unsigned int) flags, to, tolen)); 66.13 } 66.14 66.15 -inline int os::socket_available(int fd, jint *pbytes) { 66.16 - // Linux doc says EINTR not returned, unlike Solaris 66.17 - int ret = ::ioctl(fd, FIONREAD, pbytes); 66.18 - 66.19 - //%% note ioctl can return 0 when successful, JVM_SocketAvailable 66.20 - // is expected to return 0 on failure and 1 on success to the jdk. 66.21 - return (ret < 0) ? 0 : 1; 66.22 -} 66.23 - 66.24 - 66.25 inline int os::socket_shutdown(int fd, int howto){ 66.26 return ::shutdown(fd, howto); 66.27 }
67.1 --- a/src/os/linux/vm/perfMemory_linux.cpp Mon Dec 27 09:30:20 2010 -0500 67.2 +++ b/src/os/linux/vm/perfMemory_linux.cpp Mon Dec 27 09:56:29 2010 -0500 67.3 @@ -635,7 +635,29 @@ 67.4 return -1; 67.5 } 67.6 67.7 - return fd; 67.8 + // Verify that we have enough disk space for this file. 67.9 + // We'll get random SIGBUS crashes on memory accesses if 67.10 + // we don't. 67.11 + 67.12 + for (size_t seekpos = 0; seekpos < size; seekpos += os::vm_page_size()) { 67.13 + int zero_int = 0; 67.14 + result = (int)os::seek_to_file_offset(fd, (jlong)(seekpos)); 67.15 + if (result == -1 ) break; 67.16 + RESTARTABLE(::write(fd, &zero_int, 1), result); 67.17 + if (result != 1) { 67.18 + if (errno == ENOSPC) { 67.19 + warning("Insufficient space for shared memory file:\n %s\nTry using the -Djava.io.tmpdir= option to select an alternate temp location.\n", filename); 67.20 + } 67.21 + break; 67.22 + } 67.23 + } 67.24 + 67.25 + if (result != -1) { 67.26 + return fd; 67.27 + } else { 67.28 + RESTARTABLE(::close(fd), result); 67.29 + return -1; 67.30 + } 67.31 } 67.32 67.33 // open the shared memory file for the given user and vmid. returns
68.1 --- a/src/os/linux/vm/vmError_linux.cpp Mon Dec 27 09:30:20 2010 -0500 68.2 +++ b/src/os/linux/vm/vmError_linux.cpp Mon Dec 27 09:56:29 2010 -0500 68.3 @@ -44,11 +44,11 @@ 68.4 jio_snprintf(p, buflen - len, 68.5 "\n\n" 68.6 "Do you want to debug the problem?\n\n" 68.7 - "To debug, run 'gdb /proc/%d/exe %d'; then switch to thread " INTX_FORMAT "\n" 68.8 + "To debug, run 'gdb /proc/%d/exe %d'; then switch to thread " INTX_FORMAT " (" INTPTR_FORMAT ")\n" 68.9 "Enter 'yes' to launch gdb automatically (PATH must include gdb)\n" 68.10 "Otherwise, press RETURN to abort...", 68.11 os::current_process_id(), os::current_process_id(), 68.12 - os::current_thread_id()); 68.13 + os::current_thread_id(), os::current_thread_id()); 68.14 68.15 yes = os::message_box("Unexpected Error", buf); 68.16
69.1 --- a/src/os/posix/launcher/java_md.c Mon Dec 27 09:30:20 2010 -0500 69.2 +++ b/src/os/posix/launcher/java_md.c Mon Dec 27 09:56:29 2010 -0500 69.3 @@ -812,13 +812,10 @@ 69.4 69.5 #ifdef GAMMA 69.6 { 69.7 - /* gamma launcher uses JAVA_HOME or ALT_JAVA_HOME environment variable to find JDK/JRE */ 69.8 - char* java_home_var = getenv("ALT_JAVA_HOME"); 69.9 + /* gamma launcher uses JAVA_HOME environment variable to find JDK/JRE */ 69.10 + char* java_home_var = getenv("JAVA_HOME"); 69.11 if (java_home_var == NULL) { 69.12 - java_home_var = getenv("JAVA_HOME"); 69.13 - } 69.14 - if (java_home_var == NULL) { 69.15 - printf("JAVA_HOME or ALT_JAVA_HOME must point to a valid JDK/JRE to run gamma\n"); 69.16 + printf("JAVA_HOME must point to a valid JDK/JRE to run gamma\n"); 69.17 return JNI_FALSE; 69.18 } 69.19 snprintf(buf, bufsize, "%s", java_home_var); 69.20 @@ -1837,7 +1834,7 @@ 69.21 if (pthread_create(&tid, &attr, (void *(*)(void*))continuation, (void*)args) == 0) { 69.22 void * tmp; 69.23 pthread_join(tid, &tmp); 69.24 - rslt = (int)tmp; 69.25 + rslt = (int)(intptr_t)tmp; 69.26 } else { 69.27 /* 69.28 * Continue execution in current thread if for some reason (e.g. out of 69.29 @@ -1855,7 +1852,7 @@ 69.30 if (thr_create(NULL, stack_size, (void *(*)(void *))continuation, args, flags, &tid) == 0) { 69.31 void * tmp; 69.32 thr_join(tid, NULL, &tmp); 69.33 - rslt = (int)tmp; 69.34 + rslt = (int)(intptr_t)tmp; 69.35 } else { 69.36 /* See above. Continue in current thread if thr_create() failed */ 69.37 rslt = continuation(args);
70.1 --- a/src/os/posix/launcher/launcher.script Mon Dec 27 09:30:20 2010 -0500 70.2 +++ b/src/os/posix/launcher/launcher.script Mon Dec 27 09:56:29 2010 -0500 70.3 @@ -95,17 +95,21 @@ 70.4 ;; 70.5 esac 70.6 70.7 +# Find out the absolute path to this script 70.8 +MYDIR=$(cd $(dirname $SCRIPT) && pwd) 70.9 + 70.10 +JDK= 70.11 if [ "${ALT_JAVA_HOME}" = "" ]; then 70.12 - if [ "${JAVA_HOME}" = "" ]; then 70.13 - echo "Neither ALT_JAVA_HOME nor JAVA_HOME is set. Aborting."; 70.14 - exit 1; 70.15 - else 70.16 - JDK=${JAVA_HOME%%/jre}; 70.17 - fi 70.18 + source ${MYDIR}/jdkpath.sh 70.19 else 70.20 JDK=${ALT_JAVA_HOME%%/jre}; 70.21 fi 70.22 70.23 +if [ "${JDK}" = "" ]; then 70.24 + echo Failed to find JDK. ALT_JAVA_HOME is not set or ./jdkpath.sh is empty or not found. 70.25 + exit 1 70.26 +fi 70.27 + 70.28 # We will set the LD_LIBRARY_PATH as follows: 70.29 # o $JVMPATH (directory portion only) 70.30 # o $JRE/lib/$ARCH
71.1 --- a/src/os/solaris/vm/os_solaris.cpp Mon Dec 27 09:30:20 2010 -0500 71.2 +++ b/src/os/solaris/vm/os_solaris.cpp Mon Dec 27 09:56:29 2010 -0500 71.3 @@ -80,6 +80,7 @@ 71.4 // put OS-includes here 71.5 # include <dlfcn.h> 71.6 # include <errno.h> 71.7 +# include <exception> 71.8 # include <link.h> 71.9 # include <poll.h> 71.10 # include <pthread.h> 71.11 @@ -1475,6 +1476,13 @@ 71.12 return &allowdebug_blocked_sigs; 71.13 } 71.14 71.15 + 71.16 +void _handle_uncaught_cxx_exception() { 71.17 + VMError err("An uncaught C++ exception"); 71.18 + err.report_and_die(); 71.19 +} 71.20 + 71.21 + 71.22 // First crack at OS-specific initialization, from inside the new thread. 71.23 void os::initialize_thread() { 71.24 int r = thr_main() ; 71.25 @@ -1564,6 +1572,7 @@ 71.26 // use the dynamic check for T2 libthread. 71.27 71.28 os::Solaris::init_thread_fpu_state(); 71.29 + std::set_terminate(_handle_uncaught_cxx_exception); 71.30 } 71.31 71.32
72.1 --- a/src/os/windows/launcher/java_md.c Mon Dec 27 09:30:20 2010 -0500 72.2 +++ b/src/os/windows/launcher/java_md.c Mon Dec 27 09:56:29 2010 -0500 72.3 @@ -22,6 +22,7 @@ 72.4 * 72.5 */ 72.6 72.7 +#include <ctype.h> 72.8 #include <windows.h> 72.9 #include <io.h> 72.10 #include <process.h> 72.11 @@ -486,16 +487,62 @@ 72.12 72.13 #else /* ifndef GAMMA */ 72.14 72.15 - /* gamma launcher uses JAVA_HOME or ALT_JAVA_HOME environment variable to find JDK/JRE */ 72.16 - char* java_home_var = getenv("ALT_JAVA_HOME"); 72.17 - if (java_home_var == NULL) { 72.18 - java_home_var = getenv("JAVA_HOME"); 72.19 + char env[MAXPATHLEN + 1]; 72.20 + 72.21 + /* gamma launcher uses ALT_JAVA_HOME environment variable or jdkpath.txt file to find JDK/JRE */ 72.22 + 72.23 + if (getenv("ALT_JAVA_HOME") != NULL) { 72.24 + snprintf(buf, bufsize, "%s", getenv("ALT_JAVA_HOME")); 72.25 } 72.26 - if (java_home_var == NULL) { 72.27 - printf("JAVA_HOME or ALT_JAVA_HOME must point to a valid JDK/JRE to run gamma\n"); 72.28 - return JNI_FALSE; 72.29 + else { 72.30 + char path[MAXPATHLEN + 1]; 72.31 + char* p; 72.32 + int len; 72.33 + FILE* fp; 72.34 + 72.35 + // find the path to the currect executable 72.36 + len = GetModuleFileName(NULL, path, MAXPATHLEN + 1); 72.37 + if (len == 0 || len > MAXPATHLEN) { 72.38 + printf("Could not get directory of current executable."); 72.39 + return JNI_FALSE; 72.40 + } 72.41 + // remove last path component ("hotspot.exe") 72.42 + p = strrchr(path, '\\'); 72.43 + if (p == NULL) { 72.44 + printf("Could not parse directory of current executable.\n"); 72.45 + return JNI_FALSE; 72.46 + } 72.47 + *p = '\0'; 72.48 + 72.49 + // open jdkpath.txt and read JAVA_HOME from it 72.50 + if (strlen(path) + strlen("\\jdkpath.txt") + 1 >= MAXPATHLEN) { 72.51 + printf("Path too long: %s\n", path); 72.52 + return JNI_FALSE; 72.53 + } 72.54 + strcat(path, "\\jdkpath.txt"); 72.55 + fp = fopen(path, "r"); 72.56 + if (fp == NULL) { 72.57 + printf("Could not open file %s to get path to JDK.\n", path); 72.58 + return JNI_FALSE; 72.59 + } 72.60 + 72.61 + if (fgets(buf, bufsize, fp) == NULL) { 72.62 + printf("Could not read from file %s to get path to JDK.\n", path); 72.63 + fclose(fp); 72.64 + return JNI_FALSE; 72.65 + } 72.66 + // trim the buffer 72.67 + p = buf + strlen(buf) - 1; 72.68 + while(isspace(*p)) { 72.69 + *p = '\0'; 72.70 + p--; 72.71 + } 72.72 + fclose(fp); 72.73 } 72.74 - snprintf(buf, bufsize, "%s", java_home_var); 72.75 + 72.76 + _snprintf(env, MAXPATHLEN, "JAVA_HOME=%s", buf); 72.77 + _putenv(env); 72.78 + 72.79 return JNI_TRUE; 72.80 #endif /* ifndef GAMMA */ 72.81 }
73.1 --- a/src/os/windows/vm/os_windows.cpp Mon Dec 27 09:30:20 2010 -0500 73.2 +++ b/src/os/windows/vm/os_windows.cpp Mon Dec 27 09:56:29 2010 -0500 73.3 @@ -1,5 +1,5 @@ 73.4 /* 73.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 73.6 + * CopyrighT (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 73.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 73.8 * 73.9 * This code is free software; you can redistribute it and/or modify it 73.10 @@ -1711,14 +1711,11 @@ 73.11 buf[0] = '\0'; 73.12 if (strcmp(Arguments::sun_java_launcher(), "gamma") == 0) { 73.13 // Support for the gamma launcher. Check for an 73.14 - // ALT_JAVA_HOME or JAVA_HOME environment variable 73.15 + // JAVA_HOME environment variable 73.16 // and fix up the path so it looks like 73.17 // libjvm.so is installed there (append a fake suffix 73.18 // hotspot/libjvm.so). 73.19 - char* java_home_var = ::getenv("ALT_JAVA_HOME"); 73.20 - if (java_home_var == NULL) { 73.21 - java_home_var = ::getenv("JAVA_HOME"); 73.22 - } 73.23 + char* java_home_var = ::getenv("JAVA_HOME"); 73.24 if (java_home_var != NULL && java_home_var[0] != 0) { 73.25 73.26 strncpy(buf, java_home_var, buflen); 73.27 @@ -2007,6 +2004,16 @@ 73.28 int number; 73.29 }; 73.30 73.31 +// All Visual C++ exceptions thrown from code generated by the Microsoft Visual 73.32 +// C++ compiler contain this error code. Because this is a compiler-generated 73.33 +// error, the code is not listed in the Win32 API header files. 73.34 +// The code is actually a cryptic mnemonic device, with the initial "E" 73.35 +// standing for "exception" and the final 3 bytes (0x6D7363) representing the 73.36 +// ASCII values of "msc". 73.37 + 73.38 +#define EXCEPTION_UNCAUGHT_CXX_EXCEPTION 0xE06D7363 73.39 + 73.40 + 73.41 struct siglabel exceptlabels[] = { 73.42 def_excpt(EXCEPTION_ACCESS_VIOLATION), 73.43 def_excpt(EXCEPTION_DATATYPE_MISALIGNMENT), 73.44 @@ -2031,6 +2038,7 @@ 73.45 def_excpt(EXCEPTION_INVALID_DISPOSITION), 73.46 def_excpt(EXCEPTION_GUARD_PAGE), 73.47 def_excpt(EXCEPTION_INVALID_HANDLE), 73.48 + def_excpt(EXCEPTION_UNCAUGHT_CXX_EXCEPTION), 73.49 NULL, 0 73.50 }; 73.51 73.52 @@ -2264,7 +2272,6 @@ 73.53 } 73.54 } 73.55 73.56 - 73.57 if (t != NULL && t->is_Java_thread()) { 73.58 JavaThread* thread = (JavaThread*) t; 73.59 bool in_java = thread->thread_state() == _thread_in_Java; 73.60 @@ -2468,8 +2475,9 @@ 73.61 } // switch 73.62 } 73.63 #ifndef _WIN64 73.64 - if ((thread->thread_state() == _thread_in_Java) || 73.65 - (thread->thread_state() == _thread_in_native) ) 73.66 + if (((thread->thread_state() == _thread_in_Java) || 73.67 + (thread->thread_state() == _thread_in_native)) && 73.68 + exception_code != EXCEPTION_UNCAUGHT_CXX_EXCEPTION) 73.69 { 73.70 LONG result=Handle_FLT_Exception(exceptionInfo); 73.71 if (result==EXCEPTION_CONTINUE_EXECUTION) return result; 73.72 @@ -2493,6 +2501,7 @@ 73.73 case EXCEPTION_ILLEGAL_INSTRUCTION_2: 73.74 case EXCEPTION_INT_OVERFLOW: 73.75 case EXCEPTION_INT_DIVIDE_BY_ZERO: 73.76 + case EXCEPTION_UNCAUGHT_CXX_EXCEPTION: 73.77 { report_error(t, exception_code, pc, exceptionInfo->ExceptionRecord, 73.78 exceptionInfo->ContextRecord); 73.79 }
74.1 --- a/src/share/tools/ProjectCreator/BuildConfig.java Mon Dec 27 09:30:20 2010 -0500 74.2 +++ b/src/share/tools/ProjectCreator/BuildConfig.java Mon Dec 27 09:56:29 2010 -0500 74.3 @@ -22,8 +22,11 @@ 74.4 * 74.5 */ 74.6 74.7 -import java.util.*; 74.8 import java.io.File; 74.9 +import java.util.Enumeration; 74.10 +import java.util.Hashtable; 74.11 +import java.util.Iterator; 74.12 +import java.util.Vector; 74.13 74.14 class BuildConfig { 74.15 Hashtable vars; 74.16 @@ -57,7 +60,6 @@ 74.17 74.18 // ones mentioned above were needed to expand format 74.19 String buildBase = expandFormat(getFieldString(null, "BuildBase")); 74.20 - String jdkDir = getFieldString(null, "JdkTargetRoot"); 74.21 String sourceBase = getFieldString(null, "SourceBase"); 74.22 String outDir = buildBase; 74.23 74.24 @@ -65,7 +67,7 @@ 74.25 put("OutputDir", outDir); 74.26 put("SourceBase", sourceBase); 74.27 put("BuildBase", buildBase); 74.28 - put("OutputDll", jdkDir + Util.sep + outDll); 74.29 + put("OutputDll", outDir + Util.sep + outDll); 74.30 74.31 context = new String [] {flavourBuild, flavour, build, null}; 74.32 } 74.33 @@ -537,68 +539,75 @@ 74.34 } 74.35 } 74.36 74.37 -class C1DebugConfig extends GenericDebugConfig { 74.38 +abstract class GenericDebugNonKernelConfig extends GenericDebugConfig { 74.39 + protected void init(Vector includes, Vector defines) { 74.40 + super.init(includes, defines); 74.41 + getCI().getAdditionalNonKernelLinkerFlags(getV("LinkerFlags")); 74.42 + } 74.43 +} 74.44 + 74.45 +class C1DebugConfig extends GenericDebugNonKernelConfig { 74.46 String getOptFlag() { 74.47 return getCI().getNoOptFlag(); 74.48 } 74.49 74.50 C1DebugConfig() { 74.51 - initNames("compiler1", "debug", "fastdebug\\jre\\bin\\client\\jvm.dll"); 74.52 + initNames("compiler1", "debug", "jvm.dll"); 74.53 init(getIncludes(), getDefines()); 74.54 } 74.55 } 74.56 74.57 -class C1FastDebugConfig extends GenericDebugConfig { 74.58 +class C1FastDebugConfig extends GenericDebugNonKernelConfig { 74.59 String getOptFlag() { 74.60 return getCI().getOptFlag(); 74.61 } 74.62 74.63 C1FastDebugConfig() { 74.64 - initNames("compiler1", "fastdebug", "fastdebug\\jre\\bin\\client\\jvm.dll"); 74.65 + initNames("compiler1", "fastdebug", "jvm.dll"); 74.66 init(getIncludes(), getDefines()); 74.67 } 74.68 } 74.69 74.70 -class C2DebugConfig extends GenericDebugConfig { 74.71 +class C2DebugConfig extends GenericDebugNonKernelConfig { 74.72 String getOptFlag() { 74.73 return getCI().getNoOptFlag(); 74.74 } 74.75 74.76 C2DebugConfig() { 74.77 - initNames("compiler2", "debug", "fastdebug\\jre\\bin\\server\\jvm.dll"); 74.78 + initNames("compiler2", "debug", "jvm.dll"); 74.79 init(getIncludes(), getDefines()); 74.80 } 74.81 } 74.82 74.83 -class C2FastDebugConfig extends GenericDebugConfig { 74.84 +class C2FastDebugConfig extends GenericDebugNonKernelConfig { 74.85 String getOptFlag() { 74.86 return getCI().getOptFlag(); 74.87 } 74.88 74.89 C2FastDebugConfig() { 74.90 - initNames("compiler2", "fastdebug", "fastdebug\\jre\\bin\\server\\jvm.dll"); 74.91 + initNames("compiler2", "fastdebug", "jvm.dll"); 74.92 init(getIncludes(), getDefines()); 74.93 } 74.94 } 74.95 74.96 -class TieredDebugConfig extends GenericDebugConfig { 74.97 +class TieredDebugConfig extends GenericDebugNonKernelConfig { 74.98 String getOptFlag() { 74.99 return getCI().getNoOptFlag(); 74.100 } 74.101 74.102 TieredDebugConfig() { 74.103 - initNames("tiered", "debug", "fastdebug\\jre\\bin\\server\\jvm.dll"); 74.104 + initNames("tiered", "debug", "jvm.dll"); 74.105 init(getIncludes(), getDefines()); 74.106 } 74.107 } 74.108 74.109 -class TieredFastDebugConfig extends GenericDebugConfig { 74.110 +class TieredFastDebugConfig extends GenericDebugNonKernelConfig { 74.111 String getOptFlag() { 74.112 return getCI().getOptFlag(); 74.113 } 74.114 74.115 TieredFastDebugConfig() { 74.116 - initNames("tiered", "fastdebug", "fastdebug\\jre\\bin\\server\\jvm.dll"); 74.117 + initNames("tiered", "fastdebug", "jvm.dll"); 74.118 init(getIncludes(), getDefines()); 74.119 } 74.120 } 74.121 @@ -618,45 +627,45 @@ 74.122 74.123 class C1ProductConfig extends ProductConfig { 74.124 C1ProductConfig() { 74.125 - initNames("compiler1", "product", "jre\\bin\\client\\jvm.dll"); 74.126 + initNames("compiler1", "product", "jvm.dll"); 74.127 init(getIncludes(), getDefines()); 74.128 } 74.129 } 74.130 74.131 class C2ProductConfig extends ProductConfig { 74.132 C2ProductConfig() { 74.133 - initNames("compiler2", "product", "jre\\bin\\server\\jvm.dll"); 74.134 + initNames("compiler2", "product", "jvm.dll"); 74.135 init(getIncludes(), getDefines()); 74.136 } 74.137 } 74.138 74.139 class TieredProductConfig extends ProductConfig { 74.140 TieredProductConfig() { 74.141 - initNames("tiered", "product", "jre\\bin\\server\\jvm.dll"); 74.142 + initNames("tiered", "product", "jvm.dll"); 74.143 init(getIncludes(), getDefines()); 74.144 } 74.145 } 74.146 74.147 74.148 -class CoreDebugConfig extends GenericDebugConfig { 74.149 +class CoreDebugConfig extends GenericDebugNonKernelConfig { 74.150 String getOptFlag() { 74.151 return getCI().getNoOptFlag(); 74.152 } 74.153 74.154 CoreDebugConfig() { 74.155 - initNames("core", "debug", "fastdebug\\jre\\bin\\core\\jvm.dll"); 74.156 + initNames("core", "debug", "jvm.dll"); 74.157 init(getIncludes(), getDefines()); 74.158 } 74.159 } 74.160 74.161 74.162 -class CoreFastDebugConfig extends GenericDebugConfig { 74.163 +class CoreFastDebugConfig extends GenericDebugNonKernelConfig { 74.164 String getOptFlag() { 74.165 return getCI().getOptFlag(); 74.166 } 74.167 74.168 CoreFastDebugConfig() { 74.169 - initNames("core", "fastdebug", "fastdebug\\jre\\bin\\core\\jvm.dll"); 74.170 + initNames("core", "fastdebug", "jvm.dll"); 74.171 init(getIncludes(), getDefines()); 74.172 } 74.173 } 74.174 @@ -664,7 +673,7 @@ 74.175 74.176 class CoreProductConfig extends ProductConfig { 74.177 CoreProductConfig() { 74.178 - initNames("core", "product", "jre\\bin\\core\\jvm.dll"); 74.179 + initNames("core", "product", "jvm.dll"); 74.180 init(getIncludes(), getDefines()); 74.181 } 74.182 } 74.183 @@ -675,7 +684,7 @@ 74.184 } 74.185 74.186 KernelDebugConfig() { 74.187 - initNames("kernel", "debug", "fastdebug\\jre\\bin\\kernel\\jvm.dll"); 74.188 + initNames("kernel", "debug", "jvm.dll"); 74.189 init(getIncludes(), getDefines()); 74.190 } 74.191 } 74.192 @@ -687,7 +696,7 @@ 74.193 } 74.194 74.195 KernelFastDebugConfig() { 74.196 - initNames("kernel", "fastdebug", "fastdebug\\jre\\bin\\kernel\\jvm.dll"); 74.197 + initNames("kernel", "fastdebug", "jvm.dll"); 74.198 init(getIncludes(), getDefines()); 74.199 } 74.200 } 74.201 @@ -695,7 +704,7 @@ 74.202 74.203 class KernelProductConfig extends ProductConfig { 74.204 KernelProductConfig() { 74.205 - initNames("kernel", "product", "jre\\bin\\kernel\\jvm.dll"); 74.206 + initNames("kernel", "product", "jvm.dll"); 74.207 init(getIncludes(), getDefines()); 74.208 } 74.209 } 74.210 @@ -704,6 +713,7 @@ 74.211 abstract Vector getBaseLinkerFlags(String outDir, String outDll); 74.212 abstract Vector getDebugCompilerFlags(String opt); 74.213 abstract Vector getDebugLinkerFlags(); 74.214 + abstract void getAdditionalNonKernelLinkerFlags(Vector rv); 74.215 abstract Vector getProductCompilerFlags(); 74.216 abstract Vector getProductLinkerFlags(); 74.217 abstract String getOptFlag(); 74.218 @@ -713,4 +723,14 @@ 74.219 void addAttr(Vector receiver, String attr, String value) { 74.220 receiver.add(attr); receiver.add(value); 74.221 } 74.222 + void extAttr(Vector receiver, String attr, String value) { 74.223 + int attr_pos=receiver.indexOf(attr) ; 74.224 + if ( attr_pos == -1) { 74.225 + // If attr IS NOT present in the Vector - add it 74.226 + receiver.add(attr); receiver.add(value); 74.227 + } else { 74.228 + // If attr IS present in the Vector - append value to it 74.229 + receiver.set(attr_pos+1,receiver.get(attr_pos+1)+value); 74.230 + } 74.231 + } 74.232 }
75.1 --- a/src/share/tools/ProjectCreator/WinGammaPlatform.java Mon Dec 27 09:30:20 2010 -0500 75.2 +++ b/src/share/tools/ProjectCreator/WinGammaPlatform.java Mon Dec 27 09:56:29 2010 -0500 75.3 @@ -22,8 +22,15 @@ 75.4 * 75.5 */ 75.6 75.7 -import java.io.*; 75.8 -import java.util.*; 75.9 +import java.io.File; 75.10 +import java.io.IOException; 75.11 +import java.io.PrintWriter; 75.12 +import java.util.Enumeration; 75.13 +import java.util.Hashtable; 75.14 +import java.util.Iterator; 75.15 +import java.util.List; 75.16 +import java.util.TreeSet; 75.17 +import java.util.Vector; 75.18 75.19 abstract class HsArgHandler extends ArgHandler { 75.20 static final int STRING = 1; 75.21 @@ -345,11 +352,23 @@ 75.22 new ArgsParser(args, 75.23 new ArgRule[] 75.24 { 75.25 - new HsArgRule("-sourceBase", 75.26 - "SourceBase", 75.27 - " (Did you set the HotSpotWorkSpace environment variable?)", 75.28 - HsArgHandler.STRING 75.29 - ), 75.30 + new ArgRule("-sourceBase", 75.31 + new HsArgHandler() { 75.32 + public void handle(ArgIterator it) { 75.33 + String cfg = getCfg(it.get()); 75.34 + if (nextNotKey(it)) { 75.35 + String sb = (String) it.get(); 75.36 + if (sb.endsWith(Util.sep)) { 75.37 + sb = sb.substring(0, sb.length() - 1); 75.38 + } 75.39 + BuildConfig.putField(cfg, "SourceBase", sb); 75.40 + it.next(); 75.41 + } else { 75.42 + empty("-sourceBase", null); 75.43 + } 75.44 + } 75.45 + } 75.46 + ), 75.47 75.48 new HsArgRule("-buildBase", 75.49 "BuildBase", 75.50 @@ -512,7 +531,6 @@ 75.51 new HsArgHandler() { 75.52 public void handle(ArgIterator it) { 75.53 if (nextNotKey(it)) { 75.54 - String build = it.get(); 75.55 if (nextNotKey(it)) { 75.56 String description = it.get(); 75.57 if (nextNotKey(it)) { 75.58 @@ -528,7 +546,28 @@ 75.59 empty(null, "** Error: wrong number of args to -prelink"); 75.60 } 75.61 } 75.62 - ) 75.63 + ), 75.64 + 75.65 + new ArgRule("-postbuild", 75.66 + new HsArgHandler() { 75.67 + public void handle(ArgIterator it) { 75.68 + if (nextNotKey(it)) { 75.69 + if (nextNotKey(it)) { 75.70 + String description = it.get(); 75.71 + if (nextNotKey(it)) { 75.72 + String command = it.get(); 75.73 + BuildConfig.putField(null, "PostbuildDescription", description); 75.74 + BuildConfig.putField(null, "PostbuildCommand", command); 75.75 + it.next(); 75.76 + return; 75.77 + } 75.78 + } 75.79 + } 75.80 + 75.81 + empty(null, "** Error: wrong number of args to -postbuild"); 75.82 + } 75.83 + } 75.84 + ), 75.85 }, 75.86 new ArgHandler() { 75.87 public void handle(ArgIterator it) { 75.88 @@ -618,10 +657,6 @@ 75.89 75.90 public int compareTo(Object o) { 75.91 FileInfo oo = (FileInfo)o; 75.92 - // Don't squelch identical short file names where the full 75.93 - // paths are different 75.94 - if (!attr.shortName.equals(oo.attr.shortName)) 75.95 - return attr.shortName.compareTo(oo.attr.shortName); 75.96 return full.compareTo(oo.full); 75.97 } 75.98
76.1 --- a/src/share/tools/ProjectCreator/WinGammaPlatformVC6.java Mon Dec 27 09:30:20 2010 -0500 76.2 +++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC6.java Mon Dec 27 09:56:29 2010 -0500 76.3 @@ -260,6 +260,8 @@ 76.4 return rv; 76.5 } 76.6 76.7 + void getAdditionalNonKernelLinkerFlags(Vector rv) {} 76.8 + 76.9 Vector getProductCompilerFlags() { 76.10 Vector rv = new Vector(); 76.11
77.1 --- a/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java Mon Dec 27 09:30:20 2010 -0500 77.2 +++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC7.java Mon Dec 27 09:56:29 2010 -0500 77.3 @@ -22,8 +22,13 @@ 77.4 * 77.5 */ 77.6 77.7 -import java.io.*; 77.8 -import java.util.*; 77.9 +import java.io.FileWriter; 77.10 +import java.io.IOException; 77.11 +import java.io.PrintWriter; 77.12 +import java.util.Hashtable; 77.13 +import java.util.Iterator; 77.14 +import java.util.TreeSet; 77.15 +import java.util.Vector; 77.16 77.17 public class WinGammaPlatformVC7 extends WinGammaPlatform { 77.18 77.19 @@ -104,7 +109,9 @@ 77.20 77.21 77.22 boolean match(FileInfo fi) { 77.23 - return fi.full.regionMatches(true, baseLen, dir, 0, dirLen); 77.24 + int lastSlashIndex = fi.full.lastIndexOf('/'); 77.25 + String fullDir = fi.full.substring(0, lastSlashIndex); 77.26 + return fullDir.endsWith(dir); 77.27 } 77.28 } 77.29 77.30 @@ -217,65 +224,41 @@ 77.31 // - container filter just provides a container to group together real filters 77.32 // - real filter can select elements from the set according to some rule, put it into XML 77.33 // and remove from the list 77.34 - Vector makeFilters(TreeSet files) { 77.35 + Vector makeFilters(TreeSet<FileInfo> files) { 77.36 Vector rv = new Vector(); 77.37 String sbase = Util.normalize(BuildConfig.getFieldString(null, "SourceBase")+"/src/"); 77.38 77.39 - ContainerFilter rt = new ContainerFilter("Runtime"); 77.40 - rt.add(new DirectoryFilter("share/vm/prims", sbase)); 77.41 - rt.add(new DirectoryFilter("share/vm/runtime", sbase)); 77.42 - rt.add(new DirectoryFilter("share/vm/oops", sbase)); 77.43 - rv.add(rt); 77.44 + String currentDir = ""; 77.45 + DirectoryFilter container = null; 77.46 + for(FileInfo fileInfo : files) { 77.47 77.48 - ContainerFilter gc = new ContainerFilter("GC"); 77.49 - gc.add(new DirectoryFilter("share/vm/memory", sbase)); 77.50 - gc.add(new DirectoryFilter("share/vm/gc_interface", sbase)); 77.51 + if (!fileInfo.full.startsWith(sbase)) { 77.52 + continue; 77.53 + } 77.54 77.55 - ContainerFilter gc_impl = new ContainerFilter("Implementations"); 77.56 - gc_impl.add(new DirectoryFilter("CMS", 77.57 - "share/vm/gc_implementation/concurrentMarkSweep", 77.58 - sbase)); 77.59 - gc_impl.add(new DirectoryFilter("Parallel Scavenge", 77.60 - "share/vm/gc_implementation/parallelScavenge", 77.61 - sbase)); 77.62 - gc_impl.add(new DirectoryFilter("Shared", 77.63 - "share/vm/gc_implementation/shared", 77.64 - sbase)); 77.65 - // for all leftovers 77.66 - gc_impl.add(new DirectoryFilter("Misc", 77.67 - "share/vm/gc_implementation", 77.68 - sbase)); 77.69 + int lastSlash = fileInfo.full.lastIndexOf('/'); 77.70 + String dir = fileInfo.full.substring(sbase.length(), lastSlash); 77.71 + if(dir.equals("share/vm")) { 77.72 + // skip files directly in share/vm - should only be precompiled.hpp which is handled below 77.73 + continue; 77.74 + } 77.75 + if (!dir.equals(currentDir)) { 77.76 + currentDir = dir; 77.77 + if (container != null) { 77.78 + rv.add(container); 77.79 + } 77.80 77.81 - gc.add(gc_impl); 77.82 - rv.add(gc); 77.83 - 77.84 - rv.add(new DirectoryFilter("C1", "share/vm/c1", sbase)); 77.85 - 77.86 - rv.add(new DirectoryFilter("C2", "share/vm/opto", sbase)); 77.87 - 77.88 - ContainerFilter comp = new ContainerFilter("Compiler Common"); 77.89 - comp.add(new DirectoryFilter("share/vm/asm", sbase)); 77.90 - comp.add(new DirectoryFilter("share/vm/ci", sbase)); 77.91 - comp.add(new DirectoryFilter("share/vm/code", sbase)); 77.92 - comp.add(new DirectoryFilter("share/vm/compiler", sbase)); 77.93 - rv.add(comp); 77.94 - 77.95 - rv.add(new DirectoryFilter("Interpreter", 77.96 - "share/vm/interpreter", 77.97 - sbase)); 77.98 - 77.99 - ContainerFilter misc = new ContainerFilter("Misc"); 77.100 - misc.add(new DirectoryFilter("share/vm/libadt", sbase)); 77.101 - misc.add(new DirectoryFilter("share/vm/services", sbase)); 77.102 - misc.add(new DirectoryFilter("share/vm/utilities", sbase)); 77.103 - misc.add(new DirectoryFilter("share/vm/classfile", sbase)); 77.104 - rv.add(misc); 77.105 - 77.106 - rv.add(new DirectoryFilter("os_cpu", sbase)); 77.107 - 77.108 - rv.add(new DirectoryFilter("cpu", sbase)); 77.109 - 77.110 - rv.add(new DirectoryFilter("os", sbase)); 77.111 + // remove "share/vm/" from names 77.112 + String name = dir; 77.113 + if (dir.startsWith("share/vm/")) { 77.114 + name = dir.substring("share/vm/".length(), dir.length()); 77.115 + } 77.116 + container = new DirectoryFilter(name, dir, sbase); 77.117 + } 77.118 + } 77.119 + if (container != null) { 77.120 + rv.add(container); 77.121 + } 77.122 77.123 ContainerFilter generated = new ContainerFilter("Generated"); 77.124 ContainerFilter c1Generated = new ContainerFilter("C1"); 77.125 @@ -397,7 +380,6 @@ 77.126 "Name", cfg, 77.127 "ExcludedFromBuild", "TRUE" 77.128 }); 77.129 - tag("Tool", new String[] {"Name", "VCCLCompilerTool"}); 77.130 endTag("FileConfiguration"); 77.131 77.132 } 77.133 @@ -441,7 +423,11 @@ 77.134 77.135 tag("Tool", 77.136 new String[] { 77.137 - "Name", "VCPostBuildEventTool" 77.138 + "Name", "VCPostBuildEventTool", 77.139 + "Description", BuildConfig.getFieldString(null, "PostbuildDescription"), 77.140 + //Caution: String.replace(String,String) is available from JDK5 onwards only 77.141 + "CommandLine", cfg.expandFormat(BuildConfig.getFieldString(null, "PostbuildCommand").replace 77.142 + ("\t", "
")) 77.143 } 77.144 ); 77.145 77.146 @@ -469,33 +455,6 @@ 77.147 "Culture", "1033" 77.148 } 77.149 ); 77.150 - tag("Tool", 77.151 - new String[] { 77.152 - "Name", "VCWebServiceProxyGeneratorTool" 77.153 - } 77.154 - ); 77.155 - 77.156 - tag ("Tool", 77.157 - new String[] { 77.158 - "Name", "VCXMLDataGeneratorTool" 77.159 - } 77.160 - ); 77.161 - 77.162 - tag("Tool", 77.163 - new String[] { 77.164 - "Name", "VCWebDeploymentTool" 77.165 - } 77.166 - ); 77.167 - tag("Tool", 77.168 - new String[] { 77.169 - "Name", "VCManagedWrapperGeneratorTool" 77.170 - } 77.171 - ); 77.172 - tag("Tool", 77.173 - new String[] { 77.174 - "Name", "VCAuxiliaryManagedWrapperGeneratorTool" 77.175 - } 77.176 - ); 77.177 77.178 tag("Tool", 77.179 new String[] { 77.180 @@ -597,7 +556,7 @@ 77.181 addAttr(rv, "PrecompiledHeaderFile", outDir+Util.sep+"vm.pch"); 77.182 addAttr(rv, "AssemblerListingLocation", outDir); 77.183 addAttr(rv, "ObjectFile", outDir+Util.sep); 77.184 - addAttr(rv, "ProgramDataBaseFileName", outDir+Util.sep+"vm.pdb"); 77.185 + addAttr(rv, "ProgramDataBaseFileName", outDir+Util.sep+"jvm.pdb"); 77.186 // Set /nologo optin 77.187 addAttr(rv, "SuppressStartupBanner", "TRUE"); 77.188 // Surpass the default /Tc or /Tp. 0 is compileAsDefault 77.189 @@ -631,17 +590,22 @@ 77.190 addAttr(rv, "AdditionalOptions", 77.191 "/export:JNI_GetDefaultJavaVMInitArgs " + 77.192 "/export:JNI_CreateJavaVM " + 77.193 + "/export:JVM_FindClassFromBootLoader "+ 77.194 "/export:JNI_GetCreatedJavaVMs "+ 77.195 "/export:jio_snprintf /export:jio_printf "+ 77.196 "/export:jio_fprintf /export:jio_vfprintf "+ 77.197 - "/export:jio_vsnprintf "); 77.198 + "/export:jio_vsnprintf "+ 77.199 + "/export:JVM_GetVersionInfo "+ 77.200 + "/export:JVM_GetThreadStateNames "+ 77.201 + "/export:JVM_GetThreadStateValues "+ 77.202 + "/export:JVM_InitAgentProperties "); 77.203 addAttr(rv, "AdditionalDependencies", "Wsock32.lib winmm.lib"); 77.204 addAttr(rv, "OutputFile", outDll); 77.205 // Set /INCREMENTAL option. 1 is linkIncrementalNo 77.206 addAttr(rv, "LinkIncremental", "1"); 77.207 addAttr(rv, "SuppressStartupBanner", "TRUE"); 77.208 addAttr(rv, "ModuleDefinitionFile", outDir+Util.sep+"vm.def"); 77.209 - addAttr(rv, "ProgramDatabaseFile", outDir+Util.sep+"vm.pdb"); 77.210 + addAttr(rv, "ProgramDatabaseFile", outDir+Util.sep+"jvm.pdb"); 77.211 // Set /SUBSYSTEM option. 2 is subSystemWindows 77.212 addAttr(rv, "SubSystem", "2"); 77.213 addAttr(rv, "BaseAddress", "0x8000000"); 77.214 @@ -682,6 +646,11 @@ 77.215 return rv; 77.216 } 77.217 77.218 + void getAdditionalNonKernelLinkerFlags(Vector rv) { 77.219 + extAttr(rv, "AdditionalOptions", 77.220 + "/export:AsyncGetCallTrace "); 77.221 + } 77.222 + 77.223 void getProductCompilerFlags_common(Vector rv) { 77.224 // Set /O2 option. 2 is optimizeMaxSpeed 77.225 addAttr(rv, "Optimization", "2");
78.1 --- a/src/share/tools/ProjectCreator/WinGammaPlatformVC8.java Mon Dec 27 09:30:20 2010 -0500 78.2 +++ b/src/share/tools/ProjectCreator/WinGammaPlatformVC8.java Mon Dec 27 09:56:29 2010 -0500 78.3 @@ -22,7 +22,7 @@ 78.4 * 78.5 */ 78.6 78.7 -import java.util.*; 78.8 +import java.util.Vector; 78.9 78.10 public class WinGammaPlatformVC8 extends WinGammaPlatformVC7 { 78.11 78.12 @@ -41,6 +41,9 @@ 78.13 // Set /EHsc- option. 0 is cppExceptionHandlingNo 78.14 addAttr(rv, "ExceptionHandling", "0"); 78.15 78.16 + // enable multi process builds 78.17 + extAttr(rv, "AdditionalOptions", "/MP"); 78.18 + 78.19 return rv; 78.20 } 78.21
79.1 --- a/src/share/tools/launcher/java.c Mon Dec 27 09:30:20 2010 -0500 79.2 +++ b/src/share/tools/launcher/java.c Mon Dec 27 09:56:29 2010 -0500 79.3 @@ -275,6 +275,8 @@ 79.4 jvmpath, sizeof(jvmpath), 79.5 original_argv); 79.6 79.7 + printf("Using java runtime at: %s\n", jrepath); 79.8 + 79.9 ifn.CreateJavaVM = 0; 79.10 ifn.GetDefaultJavaVMInitArgs = 0; 79.11
80.1 --- a/src/share/tools/launcher/jli_util.c Mon Dec 27 09:30:20 2010 -0500 80.2 +++ b/src/share/tools/launcher/jli_util.c Mon Dec 27 09:56:29 2010 -0500 80.3 @@ -1,3 +1,4 @@ 80.4 + 80.5 /* 80.6 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 80.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 80.8 @@ -27,7 +28,7 @@ 80.9 #include "jli_util.h" 80.10 80.11 #ifdef GAMMA 80.12 -#ifdef _WINDOWS 80.13 +#ifdef TARGET_OS_FAMILY_windows 80.14 #define strdup _strdup 80.15 #endif 80.16 #endif
81.1 --- a/src/share/vm/adlc/adlparse.cpp Mon Dec 27 09:30:20 2010 -0500 81.2 +++ b/src/share/vm/adlc/adlparse.cpp Mon Dec 27 09:56:29 2010 -0500 81.3 @@ -95,7 +95,7 @@ 81.4 if (ident == NULL) { // Empty line 81.5 continue; // Get the next line 81.6 } 81.7 - if (!strcmp(ident, "instruct")) instr_parse(); 81.8 + if (!strcmp(ident, "instruct")) instr_parse(); 81.9 else if (!strcmp(ident, "operand")) oper_parse(); 81.10 else if (!strcmp(ident, "opclass")) opclass_parse(); 81.11 else if (!strcmp(ident, "ins_attrib")) ins_attr_parse(); 81.12 @@ -216,24 +216,23 @@ 81.13 else if (!strcmp(ident, "encode")) { 81.14 parse_err(SYNERR, "Instructions specify ins_encode, not encode\n"); 81.15 } 81.16 - else if (!strcmp(ident, "ins_encode")) 81.17 - instr->_insencode = ins_encode_parse(*instr); 81.18 - else if (!strcmp(ident, "opcode")) instr->_opcode = opcode_parse(instr); 81.19 - else if (!strcmp(ident, "size")) instr->_size = size_parse(instr); 81.20 - else if (!strcmp(ident, "effect")) effect_parse(instr); 81.21 - else if (!strcmp(ident, "expand")) instr->_exprule = expand_parse(instr); 81.22 - else if (!strcmp(ident, "rewrite")) instr->_rewrule = rewrite_parse(); 81.23 + else if (!strcmp(ident, "ins_encode")) ins_encode_parse(*instr); 81.24 + else if (!strcmp(ident, "opcode")) instr->_opcode = opcode_parse(instr); 81.25 + else if (!strcmp(ident, "size")) instr->_size = size_parse(instr); 81.26 + else if (!strcmp(ident, "effect")) effect_parse(instr); 81.27 + else if (!strcmp(ident, "expand")) instr->_exprule = expand_parse(instr); 81.28 + else if (!strcmp(ident, "rewrite")) instr->_rewrule = rewrite_parse(); 81.29 else if (!strcmp(ident, "constraint")) { 81.30 parse_err(SYNERR, "Instructions do not specify a constraint\n"); 81.31 } 81.32 else if (!strcmp(ident, "construct")) { 81.33 parse_err(SYNERR, "Instructions do not specify a construct\n"); 81.34 } 81.35 - else if (!strcmp(ident, "format")) instr->_format = format_parse(); 81.36 + else if (!strcmp(ident, "format")) instr->_format = format_parse(); 81.37 else if (!strcmp(ident, "interface")) { 81.38 parse_err(SYNERR, "Instructions do not specify an interface\n"); 81.39 } 81.40 - else if (!strcmp(ident, "ins_pipe")) ins_pipe_parse(*instr); 81.41 + else if (!strcmp(ident, "ins_pipe")) ins_pipe_parse(*instr); 81.42 else { // Done with staticly defined parts of instruction definition 81.43 // Check identifier to see if it is the name of an attribute 81.44 const Form *form = _globalNames[ident]; 81.45 @@ -323,7 +322,8 @@ 81.46 const char *optype2 = NULL; 81.47 // Can not have additional base operands in right side of match! 81.48 if ( ! right->base_operand( position, _globalNames, result2, name2, optype2) ) { 81.49 - assert( instr->_predicate == NULL, "ADLC does not support instruction chain rules with predicates"); 81.50 + if (instr->_predicate != NULL) 81.51 + parse_err(SYNERR, "ADLC does not support instruction chain rules with predicates"); 81.52 // Chain from input _ideal_operand_type_, 81.53 // Needed for shared roots of match-trees 81.54 ChainList *lst = (ChainList *)_AD._chainRules[optype]; 81.55 @@ -935,9 +935,9 @@ 81.56 // (2) 81.57 // If we are at a replacement variable, 81.58 // copy it and record in EncClass 81.59 - if ( _curchar == '$' ) { 81.60 + if (_curchar == '$') { 81.61 // Found replacement Variable 81.62 - char *rep_var = get_rep_var_ident_dup(); 81.63 + char* rep_var = get_rep_var_ident_dup(); 81.64 // Add flag to _strings list indicating we should check _rep_vars 81.65 encoding->add_rep_var(rep_var); 81.66 } 81.67 @@ -2774,47 +2774,122 @@ 81.68 81.69 //------------------------------ins_encode_parse_block------------------------- 81.70 // Parse the block form of ins_encode. See ins_encode_parse for more details 81.71 -InsEncode *ADLParser::ins_encode_parse_block(InstructForm &inst) { 81.72 +void ADLParser::ins_encode_parse_block(InstructForm& inst) { 81.73 // Create a new encoding name based on the name of the instruction 81.74 // definition, which should be unique. 81.75 - const char * prefix = "__enc_"; 81.76 - char* ec_name = (char*)malloc(strlen(inst._ident) + strlen(prefix) + 1); 81.77 + const char* prefix = "__ins_encode_"; 81.78 + char* ec_name = (char*) malloc(strlen(inst._ident) + strlen(prefix) + 1); 81.79 sprintf(ec_name, "%s%s", prefix, inst._ident); 81.80 81.81 assert(_AD._encode->encClass(ec_name) == NULL, "shouldn't already exist"); 81.82 - EncClass *encoding = _AD._encode->add_EncClass(ec_name); 81.83 + EncClass* encoding = _AD._encode->add_EncClass(ec_name); 81.84 encoding->_linenum = linenum(); 81.85 81.86 // synthesize the arguments list for the enc_class from the 81.87 // arguments to the instruct definition. 81.88 - const char * param = NULL; 81.89 + const char* param = NULL; 81.90 inst._parameters.reset(); 81.91 while ((param = inst._parameters.iter()) != NULL) { 81.92 - OperandForm *opForm = (OperandForm*)inst._localNames[param]; 81.93 + OperandForm* opForm = (OperandForm*) inst._localNames[param]; 81.94 encoding->add_parameter(opForm->_ident, param); 81.95 } 81.96 81.97 - // Add the prologue to create the MacroAssembler 81.98 - encoding->add_code("\n" 81.99 - " // Define a MacroAssembler instance for use by the encoding. The\n" 81.100 - " // name is chosen to match the __ idiom used for assembly in other\n" 81.101 - " // parts of hotspot and assumes the existence of the standard\n" 81.102 - " // #define __ _masm.\n" 81.103 - " MacroAssembler _masm(&cbuf);\n"); 81.104 + // Define a MacroAssembler instance for use by the encoding. The 81.105 + // name is chosen to match the __ idiom used for assembly in other 81.106 + // parts of hotspot and assumes the existence of the standard 81.107 + // #define __ _masm. 81.108 + encoding->add_code(" MacroAssembler _masm(&cbuf);\n"); 81.109 81.110 // Parse the following %{ }% block 81.111 - enc_class_parse_block(encoding, ec_name); 81.112 + ins_encode_parse_block_impl(inst, encoding, ec_name); 81.113 81.114 // Build an encoding rule which invokes the encoding rule we just 81.115 // created, passing all arguments that we received. 81.116 - InsEncode *encrule = new InsEncode(); // Encode class for instruction 81.117 - NameAndList *params = encrule->add_encode(ec_name); 81.118 + InsEncode* encrule = new InsEncode(); // Encode class for instruction 81.119 + NameAndList* params = encrule->add_encode(ec_name); 81.120 inst._parameters.reset(); 81.121 while ((param = inst._parameters.iter()) != NULL) { 81.122 params->add_entry(param); 81.123 } 81.124 81.125 - return encrule; 81.126 + // Set encode class of this instruction. 81.127 + inst._insencode = encrule; 81.128 +} 81.129 + 81.130 + 81.131 +void ADLParser::ins_encode_parse_block_impl(InstructForm& inst, EncClass* encoding, char* ec_name) { 81.132 + skipws_no_preproc(); // Skip leading whitespace 81.133 + // Prepend location descriptor, for debugging; cf. ADLParser::find_cpp_block 81.134 + if (_AD._adlocation_debug) { 81.135 + encoding->add_code(get_line_string()); 81.136 + } 81.137 + 81.138 + // Collect the parts of the encode description 81.139 + // (1) strings that are passed through to output 81.140 + // (2) replacement/substitution variable, preceeded by a '$' 81.141 + while ((_curchar != '%') && (*(_ptr+1) != '}')) { 81.142 + 81.143 + // (1) 81.144 + // Check if there is a string to pass through to output 81.145 + char *start = _ptr; // Record start of the next string 81.146 + while ((_curchar != '$') && ((_curchar != '%') || (*(_ptr+1) != '}')) ) { 81.147 + // If at the start of a comment, skip past it 81.148 + if( (_curchar == '/') && ((*(_ptr+1) == '/') || (*(_ptr+1) == '*')) ) { 81.149 + skipws_no_preproc(); 81.150 + } else { 81.151 + // ELSE advance to the next character, or start of the next line 81.152 + next_char_or_line(); 81.153 + } 81.154 + } 81.155 + // If a string was found, terminate it and record in EncClass 81.156 + if (start != _ptr) { 81.157 + *_ptr = '\0'; // Terminate the string 81.158 + encoding->add_code(start); 81.159 + } 81.160 + 81.161 + // (2) 81.162 + // If we are at a replacement variable, 81.163 + // copy it and record in EncClass 81.164 + if (_curchar == '$') { 81.165 + // Found replacement Variable 81.166 + char* rep_var = get_rep_var_ident_dup(); 81.167 + 81.168 + // Add flag to _strings list indicating we should check _rep_vars 81.169 + encoding->add_rep_var(rep_var); 81.170 + 81.171 + skipws(); 81.172 + 81.173 + // Check if this instruct is a MachConstantNode. 81.174 + if (strcmp(rep_var, "constanttablebase") == 0) { 81.175 + // This instruct is a MachConstantNode. 81.176 + inst.set_is_mach_constant(true); 81.177 + 81.178 + if (_curchar == '(') { 81.179 + parse_err(SYNERR, "constanttablebase in instruct %s cannot have an argument (only constantaddress and constantoffset)", ec_name); 81.180 + return; 81.181 + } 81.182 + } 81.183 + else if ((strcmp(rep_var, "constantaddress") == 0) || 81.184 + (strcmp(rep_var, "constantoffset") == 0)) { 81.185 + // This instruct is a MachConstantNode. 81.186 + inst.set_is_mach_constant(true); 81.187 + 81.188 + // If the constant keyword has an argument, parse it. 81.189 + if (_curchar == '(') constant_parse(inst); 81.190 + } 81.191 + } 81.192 + } // end while part of format description 81.193 + next_char(); // Skip '%' 81.194 + next_char(); // Skip '}' 81.195 + 81.196 + skipws(); 81.197 + 81.198 + if (_AD._adlocation_debug) { 81.199 + encoding->add_code(end_line_marker()); 81.200 + } 81.201 + 81.202 + // Debug Stuff 81.203 + if (_AD._adl_debug > 1) fprintf(stderr, "EncodingClass Form: %s\n", ec_name); 81.204 } 81.205 81.206 81.207 @@ -2838,7 +2913,7 @@ 81.208 // 81.209 // making it more compact to take advantage of the MacroAssembler and 81.210 // placing the assembly closer to it's use by instructions. 81.211 -InsEncode *ADLParser::ins_encode_parse(InstructForm &inst) { 81.212 +void ADLParser::ins_encode_parse(InstructForm& inst) { 81.213 81.214 // Parse encode class name 81.215 skipws(); // Skip whitespace 81.216 @@ -2849,11 +2924,12 @@ 81.217 next_char(); // Skip '{' 81.218 81.219 // Parse the block form of ins_encode 81.220 - return ins_encode_parse_block(inst); 81.221 + ins_encode_parse_block(inst); 81.222 + return; 81.223 } 81.224 81.225 parse_err(SYNERR, "missing '%%{' or '(' in ins_encode definition\n"); 81.226 - return NULL; 81.227 + return; 81.228 } 81.229 next_char(); // move past '(' 81.230 skipws(); 81.231 @@ -2866,7 +2942,7 @@ 81.232 ec_name = get_ident(); 81.233 if (ec_name == NULL) { 81.234 parse_err(SYNERR, "Invalid encode class name after 'ins_encode('.\n"); 81.235 - return NULL; 81.236 + return; 81.237 } 81.238 // Check that encoding is defined in the encode section 81.239 EncClass *encode_class = _AD._encode->encClass(ec_name); 81.240 @@ -2898,7 +2974,7 @@ 81.241 (Opcode::as_opcode_type(param) == Opcode::NOT_AN_OPCODE) && 81.242 ((_AD._register == NULL ) || (_AD._register->getRegDef(param) == NULL)) ) { 81.243 parse_err(SYNERR, "Using non-locally defined parameter %s for encoding %s.\n", param, ec_name); 81.244 - return NULL; 81.245 + return; 81.246 } 81.247 params->add_entry(param); 81.248 81.249 @@ -2915,7 +2991,7 @@ 81.250 // Only ',' or ')' are valid after a parameter name 81.251 parse_err(SYNERR, "expected ',' or ')' after parameter %s.\n", 81.252 ec_name); 81.253 - return NULL; 81.254 + return; 81.255 } 81.256 81.257 } else { 81.258 @@ -2923,11 +2999,11 @@ 81.259 // Did not find a parameter 81.260 if (_curchar == ',') { 81.261 parse_err(SYNERR, "Expected encode parameter before ',' in encoding %s.\n", ec_name); 81.262 - return NULL; 81.263 + return; 81.264 } 81.265 if (_curchar != ')') { 81.266 parse_err(SYNERR, "Expected ')' after encode parameters.\n"); 81.267 - return NULL; 81.268 + return; 81.269 } 81.270 } 81.271 } // WHILE loop collecting parameters 81.272 @@ -2944,7 +3020,7 @@ 81.273 else if ( _curchar != ')' ) { 81.274 // If not a ',' then only a ')' is allowed 81.275 parse_err(SYNERR, "Expected ')' after encoding %s.\n", ec_name); 81.276 - return NULL; 81.277 + return; 81.278 } 81.279 81.280 // Check for ',' separating parameters 81.281 @@ -2956,14 +3032,14 @@ 81.282 } // done parsing ins_encode methods and their parameters 81.283 if (_curchar != ')') { 81.284 parse_err(SYNERR, "Missing ')' at end of ins_encode description.\n"); 81.285 - return NULL; 81.286 + return; 81.287 } 81.288 next_char(); // move past ')' 81.289 skipws(); // Skip leading whitespace 81.290 81.291 if ( _curchar != ';' ) { 81.292 parse_err(SYNERR, "Missing ';' at end of ins_encode.\n"); 81.293 - return NULL; 81.294 + return; 81.295 } 81.296 next_char(); // move past ';' 81.297 skipws(); // be friendly to oper_parse() 81.298 @@ -2971,7 +3047,113 @@ 81.299 // Debug Stuff 81.300 if (_AD._adl_debug > 1) fprintf(stderr,"Instruction Encode: %s\n", ec_name); 81.301 81.302 - return encrule; 81.303 + // Set encode class of this instruction. 81.304 + inst._insencode = encrule; 81.305 +} 81.306 + 81.307 + 81.308 +//------------------------------constant_parse--------------------------------- 81.309 +// Parse a constant expression. 81.310 +void ADLParser::constant_parse(InstructForm& inst) { 81.311 + // Create a new encoding name based on the name of the instruction 81.312 + // definition, which should be unique. 81.313 + const char* prefix = "__constant_"; 81.314 + char* ec_name = (char*) malloc(strlen(inst._ident) + strlen(prefix) + 1); 81.315 + sprintf(ec_name, "%s%s", prefix, inst._ident); 81.316 + 81.317 + assert(_AD._encode->encClass(ec_name) == NULL, "shouldn't already exist"); 81.318 + EncClass* encoding = _AD._encode->add_EncClass(ec_name); 81.319 + encoding->_linenum = linenum(); 81.320 + 81.321 + // synthesize the arguments list for the enc_class from the 81.322 + // arguments to the instruct definition. 81.323 + const char* param = NULL; 81.324 + inst._parameters.reset(); 81.325 + while ((param = inst._parameters.iter()) != NULL) { 81.326 + OperandForm* opForm = (OperandForm*) inst._localNames[param]; 81.327 + encoding->add_parameter(opForm->_ident, param); 81.328 + } 81.329 + 81.330 + // Parse the following ( ) expression. 81.331 + constant_parse_expression(encoding, ec_name); 81.332 + 81.333 + // Build an encoding rule which invokes the encoding rule we just 81.334 + // created, passing all arguments that we received. 81.335 + InsEncode* encrule = new InsEncode(); // Encode class for instruction 81.336 + NameAndList* params = encrule->add_encode(ec_name); 81.337 + inst._parameters.reset(); 81.338 + while ((param = inst._parameters.iter()) != NULL) { 81.339 + params->add_entry(param); 81.340 + } 81.341 + 81.342 + // Set encode class of this instruction. 81.343 + inst._constant = encrule; 81.344 +} 81.345 + 81.346 + 81.347 +//------------------------------constant_parse_expression---------------------- 81.348 +void ADLParser::constant_parse_expression(EncClass* encoding, char* ec_name) { 81.349 + skipws(); 81.350 + 81.351 + // Prepend location descriptor, for debugging; cf. ADLParser::find_cpp_block 81.352 + if (_AD._adlocation_debug) { 81.353 + encoding->add_code(get_line_string()); 81.354 + } 81.355 + 81.356 + // Start code line. 81.357 + encoding->add_code(" _constant = C->constant_table().add"); 81.358 + 81.359 + // Parse everything in ( ) expression. 81.360 + encoding->add_code("("); 81.361 + next_char(); // Skip '(' 81.362 + int parens_depth = 1; 81.363 + 81.364 + // Collect the parts of the constant expression. 81.365 + // (1) strings that are passed through to output 81.366 + // (2) replacement/substitution variable, preceeded by a '$' 81.367 + while (parens_depth > 0) { 81.368 + if (_curchar == '(') { 81.369 + parens_depth++; 81.370 + encoding->add_code("("); 81.371 + next_char(); 81.372 + } 81.373 + else if (_curchar == ')') { 81.374 + parens_depth--; 81.375 + encoding->add_code(")"); 81.376 + next_char(); 81.377 + } 81.378 + else { 81.379 + // (1) 81.380 + // Check if there is a string to pass through to output 81.381 + char *start = _ptr; // Record start of the next string 81.382 + while ((_curchar != '$') && (_curchar != '(') && (_curchar != ')')) { 81.383 + next_char(); 81.384 + } 81.385 + // If a string was found, terminate it and record in EncClass 81.386 + if (start != _ptr) { 81.387 + *_ptr = '\0'; // Terminate the string 81.388 + encoding->add_code(start); 81.389 + } 81.390 + 81.391 + // (2) 81.392 + // If we are at a replacement variable, copy it and record in EncClass. 81.393 + if (_curchar == '$') { 81.394 + // Found replacement Variable 81.395 + char* rep_var = get_rep_var_ident_dup(); 81.396 + encoding->add_rep_var(rep_var); 81.397 + } 81.398 + } 81.399 + } 81.400 + 81.401 + // Finish code line. 81.402 + encoding->add_code(";"); 81.403 + 81.404 + if (_AD._adlocation_debug) { 81.405 + encoding->add_code(end_line_marker()); 81.406 + } 81.407 + 81.408 + // Debug Stuff 81.409 + if (_AD._adl_debug > 1) fprintf(stderr, "EncodingClass Form: %s\n", ec_name); 81.410 } 81.411 81.412
82.1 --- a/src/share/vm/adlc/adlparse.hpp Mon Dec 27 09:30:20 2010 -0500 82.2 +++ b/src/share/vm/adlc/adlparse.hpp Mon Dec 27 09:56:29 2010 -0500 82.3 @@ -156,8 +156,13 @@ 82.4 82.5 Attribute *attr_parse(char *ident);// Parse instr/operand attribute rule 82.6 // Parse instruction encode rule 82.7 - InsEncode *ins_encode_parse(InstructForm &inst); 82.8 - InsEncode *ins_encode_parse_block(InstructForm &inst); 82.9 + void ins_encode_parse(InstructForm &inst); 82.10 + void ins_encode_parse_block(InstructForm &inst); 82.11 + void ins_encode_parse_block_impl(InstructForm& inst, EncClass* encoding, char* ec_name); 82.12 + 82.13 + void constant_parse(InstructForm& inst); 82.14 + void constant_parse_expression(EncClass* encoding, char* ec_name); 82.15 + 82.16 Opcode *opcode_parse(InstructForm *insr); // Parse instruction opcode 82.17 char *size_parse(InstructForm *insr); // Parse instruction size 82.18 Interface *interface_parse(); // Parse operand interface rule
83.1 --- a/src/share/vm/adlc/archDesc.hpp Mon Dec 27 09:30:20 2010 -0500 83.2 +++ b/src/share/vm/adlc/archDesc.hpp Mon Dec 27 09:56:29 2010 -0500 83.3 @@ -126,7 +126,6 @@ 83.4 void chain_rule(FILE *fp, const char *indent, const char *ideal, 83.5 const Expr *icost, const char *irule, 83.6 Dict &operands_chained_from, ProductionState &status); 83.7 - void chain_rule_c(FILE *fp, char *indent, char *ideal, char *irule); // %%%%% TODO: remove this 83.8 void expand_opclass(FILE *fp, const char *indent, const Expr *cost, 83.9 const char *result_type, ProductionState &status); 83.10 Expr *calc_cost(FILE *fp, const char *spaces, MatchList &mList, ProductionState &status); 83.11 @@ -301,13 +300,18 @@ 83.12 void buildMachNodeGenerator(FILE *fp_cpp); 83.13 83.14 // Generator for Expand methods for instructions with expand rules 83.15 - void defineExpand(FILE *fp, InstructForm *node); 83.16 + void defineExpand (FILE *fp, InstructForm *node); 83.17 // Generator for Peephole methods for instructions with peephole rules 83.18 - void definePeephole(FILE *fp, InstructForm *node); 83.19 + void definePeephole (FILE *fp, InstructForm *node); 83.20 // Generator for Size methods for instructions 83.21 - void defineSize(FILE *fp, InstructForm &node); 83.22 + void defineSize (FILE *fp, InstructForm &node); 83.23 + 83.24 +public: 83.25 + // Generator for EvalConstantValue methods for instructions 83.26 + void defineEvalConstant(FILE *fp, InstructForm &node); 83.27 // Generator for Emit methods for instructions 83.28 - void defineEmit(FILE *fp, InstructForm &node); 83.29 + void defineEmit (FILE *fp, InstructForm &node); 83.30 + 83.31 // Define a MachOper encode method 83.32 void define_oper_interface(FILE *fp, OperandForm &oper, FormDict &globals, 83.33 const char *name, const char *encoding);
84.1 --- a/src/share/vm/adlc/formssel.cpp Mon Dec 27 09:30:20 2010 -0500 84.2 +++ b/src/share/vm/adlc/formssel.cpp Mon Dec 27 09:56:29 2010 -0500 84.3 @@ -30,11 +30,14 @@ 84.4 InstructForm::InstructForm(const char *id, bool ideal_only) 84.5 : _ident(id), _ideal_only(ideal_only), 84.6 _localNames(cmpstr, hashstr, Form::arena), 84.7 - _effects(cmpstr, hashstr, Form::arena) { 84.8 + _effects(cmpstr, hashstr, Form::arena), 84.9 + _is_mach_constant(false) 84.10 +{ 84.11 _ftype = Form::INS; 84.12 84.13 _matrule = NULL; 84.14 _insencode = NULL; 84.15 + _constant = NULL; 84.16 _opcode = NULL; 84.17 _size = NULL; 84.18 _attribs = NULL; 84.19 @@ -58,11 +61,14 @@ 84.20 InstructForm::InstructForm(const char *id, InstructForm *instr, MatchRule *rule) 84.21 : _ident(id), _ideal_only(false), 84.22 _localNames(instr->_localNames), 84.23 - _effects(instr->_effects) { 84.24 + _effects(instr->_effects), 84.25 + _is_mach_constant(false) 84.26 +{ 84.27 _ftype = Form::INS; 84.28 84.29 _matrule = rule; 84.30 _insencode = instr->_insencode; 84.31 + _constant = instr->_constant; 84.32 _opcode = instr->_opcode; 84.33 _size = instr->_size; 84.34 _attribs = instr->_attribs; 84.35 @@ -1094,6 +1100,9 @@ 84.36 else if (is_ideal_nop()) { 84.37 return "MachNopNode"; 84.38 } 84.39 + else if (is_mach_constant()) { 84.40 + return "MachConstantNode"; 84.41 + } 84.42 else if (captures_bottom_type(globals)) { 84.43 return "MachTypeNode"; 84.44 } else { 84.45 @@ -1190,6 +1199,21 @@ 84.46 // 84.47 // Generate the format call for the replacement variable 84.48 void InstructForm::rep_var_format(FILE *fp, const char *rep_var) { 84.49 + // Handle special constant table variables. 84.50 + if (strcmp(rep_var, "constanttablebase") == 0) { 84.51 + fprintf(fp, "char reg[128]; ra->dump_register(in(mach_constant_base_node_input()), reg);\n"); 84.52 + fprintf(fp, "st->print(\"%%s\");\n"); 84.53 + return; 84.54 + } 84.55 + if (strcmp(rep_var, "constantoffset") == 0) { 84.56 + fprintf(fp, "st->print(\"#%%d\", constant_offset());\n"); 84.57 + return; 84.58 + } 84.59 + if (strcmp(rep_var, "constantaddress") == 0) { 84.60 + fprintf(fp, "st->print(\"constant table base + #%%d\", constant_offset());\n"); 84.61 + return; 84.62 + } 84.63 + 84.64 // Find replacement variable's type 84.65 const Form *form = _localNames[rep_var]; 84.66 if (form == NULL) { 84.67 @@ -1348,6 +1372,7 @@ 84.68 fprintf(fp,"\nInstruction: %s\n", (_ident?_ident:"")); 84.69 if (_matrule) _matrule->output(fp); 84.70 if (_insencode) _insencode->output(fp); 84.71 + if (_constant) _constant->output(fp); 84.72 if (_opcode) _opcode->output(fp); 84.73 if (_attribs) _attribs->output(fp); 84.74 if (_predicate) _predicate->output(fp);
85.1 --- a/src/share/vm/adlc/formssel.hpp Mon Dec 27 09:30:20 2010 -0500 85.2 +++ b/src/share/vm/adlc/formssel.hpp Mon Dec 27 09:56:29 2010 -0500 85.3 @@ -74,15 +74,16 @@ 85.4 //------------------------------InstructForm----------------------------------- 85.5 class InstructForm : public Form { 85.6 private: 85.7 - bool _ideal_only; // Not a user-defined instruction 85.8 + bool _ideal_only; // Not a user-defined instruction 85.9 // Members used for tracking CISC-spilling 85.10 - uint _cisc_spill_operand;// Which operand may cisc-spill 85.11 + uint _cisc_spill_operand;// Which operand may cisc-spill 85.12 void set_cisc_spill_operand(uint op_index) { _cisc_spill_operand = op_index; } 85.13 - bool _is_cisc_alternate; 85.14 + bool _is_cisc_alternate; 85.15 InstructForm *_cisc_spill_alternate;// cisc possible replacement 85.16 const char *_cisc_reg_mask_name; 85.17 InstructForm *_short_branch_form; 85.18 bool _is_short_branch; 85.19 + bool _is_mach_constant; // true if Node is a MachConstantNode 85.20 uint _alignment; 85.21 85.22 public: 85.23 @@ -94,6 +95,7 @@ 85.24 Opcode *_opcode; // Encoding of the opcode for instruction 85.25 char *_size; // Size of instruction 85.26 InsEncode *_insencode; // Encoding class instruction belongs to 85.27 + InsEncode *_constant; // Encoding class constant value belongs to 85.28 Attribute *_attribs; // List of Attribute rules 85.29 Predicate *_predicate; // Predicate test for this instruction 85.30 FormDict _effects; // Dictionary of effect rules 85.31 @@ -251,6 +253,9 @@ 85.32 bool is_short_branch() { return _is_short_branch; } 85.33 void set_short_branch(bool val) { _is_short_branch = val; } 85.34 85.35 + bool is_mach_constant() const { return _is_mach_constant; } 85.36 + void set_is_mach_constant(bool x) { _is_mach_constant = x; } 85.37 + 85.38 InstructForm *short_branch_form() { return _short_branch_form; } 85.39 bool has_short_branch_form() { return _short_branch_form != NULL; } 85.40 // Output short branch prototypes and method bodies
86.1 --- a/src/share/vm/adlc/output_c.cpp Mon Dec 27 09:30:20 2010 -0500 86.2 +++ b/src/share/vm/adlc/output_c.cpp Mon Dec 27 09:56:29 2010 -0500 86.3 @@ -1496,8 +1496,8 @@ 86.4 unsigned i; 86.5 86.6 // Generate Expand function header 86.7 - fprintf(fp,"MachNode *%sNode::Expand(State *state, Node_List &proj_list, Node* mem) {\n", node->_ident); 86.8 - fprintf(fp,"Compile* C = Compile::current();\n"); 86.9 + fprintf(fp, "MachNode* %sNode::Expand(State* state, Node_List& proj_list, Node* mem) {\n", node->_ident); 86.10 + fprintf(fp, " Compile* C = Compile::current();\n"); 86.11 // Generate expand code 86.12 if( node->expands() ) { 86.13 const char *opid; 86.14 @@ -1818,6 +1818,12 @@ 86.15 } 86.16 } 86.17 86.18 + // If the node is a MachConstantNode, insert the MachConstantBaseNode edge. 86.19 + // NOTE: this edge must be the last input (see MachConstantNode::mach_constant_base_node_input). 86.20 + if (node->is_mach_constant()) { 86.21 + fprintf(fp," add_req(C->mach_constant_base_node());\n"); 86.22 + } 86.23 + 86.24 fprintf(fp,"\n"); 86.25 if( node->expands() ) { 86.26 fprintf(fp," return result;\n"); 86.27 @@ -1924,7 +1930,17 @@ 86.28 // No state needed. 86.29 assert( _opclass == NULL, 86.30 "'primary', 'secondary' and 'tertiary' don't follow operand."); 86.31 - } else { 86.32 + } 86.33 + else if ((strcmp(rep_var, "constanttablebase") == 0) || 86.34 + (strcmp(rep_var, "constantoffset") == 0) || 86.35 + (strcmp(rep_var, "constantaddress") == 0)) { 86.36 + if (!_inst.is_mach_constant()) { 86.37 + _AD.syntax_err(_encoding._linenum, 86.38 + "Replacement variable %s not allowed in instruct %s (only in MachConstantNode).\n", 86.39 + rep_var, _encoding._name); 86.40 + } 86.41 + } 86.42 + else { 86.43 // Lookup its position in parameter list 86.44 int param_no = _encoding.rep_var_index(rep_var); 86.45 if ( param_no == -1 ) { 86.46 @@ -2380,6 +2396,15 @@ 86.47 rep_var, _inst._ident, _encoding._name); 86.48 } 86.49 } 86.50 + else if (strcmp(rep_var, "constanttablebase") == 0) { 86.51 + fprintf(_fp, "as_Register(ra_->get_encode(in(mach_constant_base_node_input())))"); 86.52 + } 86.53 + else if (strcmp(rep_var, "constantoffset") == 0) { 86.54 + fprintf(_fp, "constant_offset()"); 86.55 + } 86.56 + else if (strcmp(rep_var, "constantaddress") == 0) { 86.57 + fprintf(_fp, "InternalAddress(__ code()->consts()->start() + constant_offset())"); 86.58 + } 86.59 else { 86.60 // Lookup its position in parameter list 86.61 int param_no = _encoding.rep_var_index(rep_var); 86.62 @@ -2465,37 +2490,39 @@ 86.63 fprintf(fp,"}\n"); 86.64 } 86.65 86.66 -void ArchDesc::defineEmit(FILE *fp, InstructForm &inst) { 86.67 - InsEncode *ins_encode = inst._insencode; 86.68 +// defineEmit ----------------------------------------------------------------- 86.69 +void ArchDesc::defineEmit(FILE* fp, InstructForm& inst) { 86.70 + InsEncode* encode = inst._insencode; 86.71 86.72 // (1) 86.73 // Output instruction's emit prototype 86.74 - fprintf(fp,"void %sNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {\n", 86.75 - inst._ident); 86.76 + fprintf(fp, "void %sNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {\n", inst._ident); 86.77 86.78 // If user did not define an encode section, 86.79 // provide stub that does not generate any machine code. 86.80 - if( (_encode == NULL) || (ins_encode == NULL) ) { 86.81 + if( (_encode == NULL) || (encode == NULL) ) { 86.82 fprintf(fp, " // User did not define an encode section.\n"); 86.83 - fprintf(fp,"}\n"); 86.84 + fprintf(fp, "}\n"); 86.85 return; 86.86 } 86.87 86.88 // Save current instruction's starting address (helps with relocation). 86.89 - fprintf(fp, " cbuf.set_insts_mark();\n"); 86.90 - 86.91 - // // // idx0 is only needed for syntactic purposes and only by "storeSSI" 86.92 - // fprintf( fp, " unsigned idx0 = 0;\n"); 86.93 + fprintf(fp, " cbuf.set_insts_mark();\n"); 86.94 + 86.95 + // For MachConstantNodes which are ideal jump nodes, fill the jump table. 86.96 + if (inst.is_mach_constant() && inst.is_ideal_jump()) { 86.97 + fprintf(fp, " ra_->C->constant_table().fill_jump_table(cbuf, (MachConstantNode*) this, _index2label);\n"); 86.98 + } 86.99 86.100 // Output each operand's offset into the array of registers. 86.101 - inst.index_temps( fp, _globalNames ); 86.102 + inst.index_temps(fp, _globalNames); 86.103 86.104 // Output this instruction's encodings 86.105 const char *ec_name; 86.106 bool user_defined = false; 86.107 - ins_encode->reset(); 86.108 - while ( (ec_name = ins_encode->encode_class_iter()) != NULL ) { 86.109 - fprintf(fp, " {"); 86.110 + encode->reset(); 86.111 + while ((ec_name = encode->encode_class_iter()) != NULL) { 86.112 + fprintf(fp, " {\n"); 86.113 // Output user-defined encoding 86.114 user_defined = true; 86.115 86.116 @@ -2507,25 +2534,25 @@ 86.117 abort(); 86.118 } 86.119 86.120 - if (ins_encode->current_encoding_num_args() != encoding->num_args()) { 86.121 - globalAD->syntax_err(ins_encode->_linenum, "In %s: passing %d arguments to %s but expecting %d", 86.122 - inst._ident, ins_encode->current_encoding_num_args(), 86.123 + if (encode->current_encoding_num_args() != encoding->num_args()) { 86.124 + globalAD->syntax_err(encode->_linenum, "In %s: passing %d arguments to %s but expecting %d", 86.125 + inst._ident, encode->current_encoding_num_args(), 86.126 ec_name, encoding->num_args()); 86.127 } 86.128 86.129 - DefineEmitState pending(fp, *this, *encoding, *ins_encode, inst ); 86.130 + DefineEmitState pending(fp, *this, *encoding, *encode, inst); 86.131 encoding->_code.reset(); 86.132 encoding->_rep_vars.reset(); 86.133 // Process list of user-defined strings, 86.134 // and occurrences of replacement variables. 86.135 // Replacement Vars are pushed into a list and then output 86.136 - while ( (ec_code = encoding->_code.iter()) != NULL ) { 86.137 - if ( ! encoding->_code.is_signal( ec_code ) ) { 86.138 + while ((ec_code = encoding->_code.iter()) != NULL) { 86.139 + if (!encoding->_code.is_signal(ec_code)) { 86.140 // Emit pending code 86.141 pending.emit(); 86.142 pending.clear(); 86.143 // Emit this code section 86.144 - fprintf(fp,"%s", ec_code); 86.145 + fprintf(fp, "%s", ec_code); 86.146 } else { 86.147 // A replacement variable or one of its subfields 86.148 // Obtain replacement variable from list 86.149 @@ -2536,7 +2563,7 @@ 86.150 // Emit pending code 86.151 pending.emit(); 86.152 pending.clear(); 86.153 - fprintf(fp, "}\n"); 86.154 + fprintf(fp, " }\n"); 86.155 } // end while instruction's encodings 86.156 86.157 // Check if user stated which encoding to user 86.158 @@ -2545,7 +2572,86 @@ 86.159 } 86.160 86.161 // (3) and (4) 86.162 - fprintf(fp,"}\n"); 86.163 + fprintf(fp, "}\n"); 86.164 +} 86.165 + 86.166 +// defineEvalConstant --------------------------------------------------------- 86.167 +void ArchDesc::defineEvalConstant(FILE* fp, InstructForm& inst) { 86.168 + InsEncode* encode = inst._constant; 86.169 + 86.170 + // (1) 86.171 + // Output instruction's emit prototype 86.172 + fprintf(fp, "void %sNode::eval_constant(Compile* C) {\n", inst._ident); 86.173 + 86.174 + // For ideal jump nodes, allocate a jump table. 86.175 + if (inst.is_ideal_jump()) { 86.176 + fprintf(fp, " _constant = C->constant_table().allocate_jump_table(this);\n"); 86.177 + } 86.178 + 86.179 + // If user did not define an encode section, 86.180 + // provide stub that does not generate any machine code. 86.181 + if ((_encode == NULL) || (encode == NULL)) { 86.182 + fprintf(fp, " // User did not define an encode section.\n"); 86.183 + fprintf(fp, "}\n"); 86.184 + return; 86.185 + } 86.186 + 86.187 + // Output this instruction's encodings 86.188 + const char *ec_name; 86.189 + bool user_defined = false; 86.190 + encode->reset(); 86.191 + while ((ec_name = encode->encode_class_iter()) != NULL) { 86.192 + fprintf(fp, " {\n"); 86.193 + // Output user-defined encoding 86.194 + user_defined = true; 86.195 + 86.196 + const char *ec_code = NULL; 86.197 + const char *ec_rep_var = NULL; 86.198 + EncClass *encoding = _encode->encClass(ec_name); 86.199 + if (encoding == NULL) { 86.200 + fprintf(stderr, "User did not define contents of this encode_class: %s\n", ec_name); 86.201 + abort(); 86.202 + } 86.203 + 86.204 + if (encode->current_encoding_num_args() != encoding->num_args()) { 86.205 + globalAD->syntax_err(encode->_linenum, "In %s: passing %d arguments to %s but expecting %d", 86.206 + inst._ident, encode->current_encoding_num_args(), 86.207 + ec_name, encoding->num_args()); 86.208 + } 86.209 + 86.210 + DefineEmitState pending(fp, *this, *encoding, *encode, inst); 86.211 + encoding->_code.reset(); 86.212 + encoding->_rep_vars.reset(); 86.213 + // Process list of user-defined strings, 86.214 + // and occurrences of replacement variables. 86.215 + // Replacement Vars are pushed into a list and then output 86.216 + while ((ec_code = encoding->_code.iter()) != NULL) { 86.217 + if (!encoding->_code.is_signal(ec_code)) { 86.218 + // Emit pending code 86.219 + pending.emit(); 86.220 + pending.clear(); 86.221 + // Emit this code section 86.222 + fprintf(fp, "%s", ec_code); 86.223 + } else { 86.224 + // A replacement variable or one of its subfields 86.225 + // Obtain replacement variable from list 86.226 + ec_rep_var = encoding->_rep_vars.iter(); 86.227 + pending.add_rep_var(ec_rep_var); 86.228 + } 86.229 + } 86.230 + // Emit pending code 86.231 + pending.emit(); 86.232 + pending.clear(); 86.233 + fprintf(fp, " }\n"); 86.234 + } // end while instruction's encodings 86.235 + 86.236 + // Check if user stated which encoding to user 86.237 + if (user_defined == false) { 86.238 + fprintf(fp, " // User did not define which encode class to use.\n"); 86.239 + } 86.240 + 86.241 + // (3) and (4) 86.242 + fprintf(fp, "}\n"); 86.243 } 86.244 86.245 // --------------------------------------------------------------------------- 86.246 @@ -2952,6 +3058,7 @@ 86.247 // If there are multiple defs/kills, or an explicit expand rule, build rule 86.248 if( instr->expands() || instr->needs_projections() || 86.249 instr->has_temps() || 86.250 + instr->is_mach_constant() || 86.251 instr->_matrule != NULL && 86.252 instr->num_opnds() != instr->num_unique_opnds() ) 86.253 defineExpand(_CPP_EXPAND_file._fp, instr); 86.254 @@ -3032,8 +3139,9 @@ 86.255 // Ensure this is a machine-world instruction 86.256 if ( instr->ideal_only() ) continue; 86.257 86.258 - if (instr->_insencode) defineEmit(fp, *instr); 86.259 - if (instr->_size) defineSize(fp, *instr); 86.260 + if (instr->_insencode) defineEmit (fp, *instr); 86.261 + if (instr->is_mach_constant()) defineEvalConstant(fp, *instr); 86.262 + if (instr->_size) defineSize (fp, *instr); 86.263 86.264 // side-call to generate output that used to be in the header file: 86.265 extern void gen_inst_format(FILE *fp, FormDict &globals, InstructForm &oper, bool for_c_file);
87.1 --- a/src/share/vm/adlc/output_h.cpp Mon Dec 27 09:30:20 2010 -0500 87.2 +++ b/src/share/vm/adlc/output_h.cpp Mon Dec 27 09:56:29 2010 -0500 87.3 @@ -1550,7 +1550,12 @@ 87.4 } 87.5 87.6 // virtual functions for encode and format 87.7 - // 87.8 + 87.9 + // Virtual function for evaluating the constant. 87.10 + if (instr->is_mach_constant()) { 87.11 + fprintf(fp," virtual void eval_constant(Compile* C);\n"); 87.12 + } 87.13 + 87.14 // Output the opcode function and the encode function here using the 87.15 // encoding class information in the _insencode slot. 87.16 if ( instr->_insencode ) { 87.17 @@ -1559,7 +1564,7 @@ 87.18 87.19 // virtual function for getting the size of an instruction 87.20 if ( instr->_size ) { 87.21 - fprintf(fp," virtual uint size(PhaseRegAlloc *ra_) const;\n"); 87.22 + fprintf(fp," virtual uint size(PhaseRegAlloc *ra_) const;\n"); 87.23 } 87.24 87.25 // Return the top-level ideal opcode. 87.26 @@ -1752,6 +1757,7 @@ 87.27 // Virtual methods which are only generated to override base class 87.28 if( instr->expands() || instr->needs_projections() || 87.29 instr->has_temps() || 87.30 + instr->is_mach_constant() || 87.31 instr->_matrule != NULL && 87.32 instr->num_opnds() != instr->num_unique_opnds() ) { 87.33 fprintf(fp," virtual MachNode *Expand(State *state, Node_List &proj_list, Node* mem);\n"); 87.34 @@ -1780,24 +1786,6 @@ 87.35 // Declare short branch methods, if applicable 87.36 instr->declare_short_branch_methods(fp); 87.37 87.38 - // Instructions containing a constant that will be entered into the 87.39 - // float/double table redefine the base virtual function 87.40 -#ifdef SPARC 87.41 - // Sparc doubles entries in the constant table require more space for 87.42 - // alignment. (expires 9/98) 87.43 - int table_entries = (3 * instr->num_consts( _globalNames, Form::idealD )) 87.44 - + instr->num_consts( _globalNames, Form::idealF ); 87.45 -#else 87.46 - int table_entries = instr->num_consts( _globalNames, Form::idealD ) 87.47 - + instr->num_consts( _globalNames, Form::idealF ); 87.48 -#endif 87.49 - if( table_entries != 0 ) { 87.50 - fprintf(fp," virtual int const_size() const {"); 87.51 - fprintf(fp, " return %d;", table_entries); 87.52 - fprintf(fp, " }\n"); 87.53 - } 87.54 - 87.55 - 87.56 // See if there is an "ins_pipe" declaration for this instruction 87.57 if (instr->_ins_pipe) { 87.58 fprintf(fp," static const Pipeline *pipeline_class();\n");
88.1 --- a/src/share/vm/asm/assembler.hpp Mon Dec 27 09:30:20 2010 -0500 88.2 +++ b/src/share/vm/asm/assembler.hpp Mon Dec 27 09:56:29 2010 -0500 88.3 @@ -292,7 +292,16 @@ 88.4 address start_a_const(int required_space, int required_align = sizeof(double)); 88.5 void end_a_const(); 88.6 88.7 - // fp constants support 88.8 + // constants support 88.9 + address long_constant(jlong c) { 88.10 + address ptr = start_a_const(sizeof(c), sizeof(c)); 88.11 + if (ptr != NULL) { 88.12 + *(jlong*)ptr = c; 88.13 + _code_pos = ptr + sizeof(c); 88.14 + end_a_const(); 88.15 + } 88.16 + return ptr; 88.17 + } 88.18 address double_constant(jdouble c) { 88.19 address ptr = start_a_const(sizeof(c), sizeof(c)); 88.20 if (ptr != NULL) { 88.21 @@ -311,6 +320,15 @@ 88.22 } 88.23 return ptr; 88.24 } 88.25 + address address_constant(address c) { 88.26 + address ptr = start_a_const(sizeof(c), sizeof(c)); 88.27 + if (ptr != NULL) { 88.28 + *(address*)ptr = c; 88.29 + _code_pos = ptr + sizeof(c); 88.30 + end_a_const(); 88.31 + } 88.32 + return ptr; 88.33 + } 88.34 address address_constant(address c, RelocationHolder const& rspec) { 88.35 address ptr = start_a_const(sizeof(c), sizeof(c)); 88.36 if (ptr != NULL) { 88.37 @@ -321,8 +339,6 @@ 88.38 } 88.39 return ptr; 88.40 } 88.41 - inline address address_constant(Label& L); 88.42 - inline address address_table_constant(GrowableArray<Label*> label); 88.43 88.44 // Bootstrapping aid to cope with delayed determination of constants. 88.45 // Returns a static address which will eventually contain the constant.
89.1 --- a/src/share/vm/asm/assembler.inline.hpp Mon Dec 27 09:30:20 2010 -0500 89.2 +++ b/src/share/vm/asm/assembler.inline.hpp Mon Dec 27 09:56:29 2010 -0500 89.3 @@ -114,32 +114,4 @@ 89.4 bind_loc(CodeBuffer::locator(pos, sect)); 89.5 } 89.6 89.7 -address AbstractAssembler::address_constant(Label& L) { 89.8 - address c = NULL; 89.9 - address ptr = start_a_const(sizeof(c), sizeof(c)); 89.10 - if (ptr != NULL) { 89.11 - relocate(Relocation::spec_simple(relocInfo::internal_word_type)); 89.12 - *(address*)ptr = c = code_section()->target(L, ptr); 89.13 - _code_pos = ptr + sizeof(c); 89.14 - end_a_const(); 89.15 - } 89.16 - return ptr; 89.17 -} 89.18 - 89.19 -address AbstractAssembler::address_table_constant(GrowableArray<Label*> labels) { 89.20 - int addressSize = sizeof(address); 89.21 - int sizeLabel = addressSize * labels.length(); 89.22 - address ptr = start_a_const(sizeLabel, addressSize); 89.23 - 89.24 - if (ptr != NULL) { 89.25 - address *labelLoc = (address*)ptr; 89.26 - for (int i=0; i < labels.length(); i++) { 89.27 - emit_address(code_section()->target(*labels.at(i), (address)&labelLoc[i])); 89.28 - code_section()->relocate((address)&labelLoc[i], relocInfo::internal_word_type); 89.29 - } 89.30 - end_a_const(); 89.31 - } 89.32 - return ptr; 89.33 -} 89.34 - 89.35 #endif // SHARE_VM_ASM_ASSEMBLER_INLINE_HPP
90.1 --- a/src/share/vm/asm/codeBuffer.cpp Mon Dec 27 09:30:20 2010 -0500 90.2 +++ b/src/share/vm/asm/codeBuffer.cpp Mon Dec 27 09:56:29 2010 -0500 90.3 @@ -131,6 +131,7 @@ 90.4 #ifdef ASSERT 90.5 // Save allocation type to execute assert in ~ResourceObj() 90.6 // which is called after this destructor. 90.7 + assert(_default_oop_recorder.allocated_on_stack(), "should be embedded object"); 90.8 ResourceObj::allocation_type at = _default_oop_recorder.get_allocation_type(); 90.9 Copy::fill_to_bytes(this, sizeof(*this), badResourceValue); 90.10 ResourceObj::set_allocation_type((address)(&_default_oop_recorder), at);
91.1 --- a/src/share/vm/c1/c1_Compilation.cpp Mon Dec 27 09:30:20 2010 -0500 91.2 +++ b/src/share/vm/c1/c1_Compilation.cpp Mon Dec 27 09:56:29 2010 -0500 91.3 @@ -298,8 +298,8 @@ 91.4 91.5 CHECK_BAILOUT_(no_frame_size); 91.6 91.7 - if (is_profiling()) { 91.8 - method()->build_method_data(); 91.9 + if (is_profiling() && !method()->ensure_method_data()) { 91.10 + BAILOUT_("mdo allocation failed", no_frame_size); 91.11 } 91.12 91.13 { 91.14 @@ -484,11 +484,11 @@ 91.15 if (is_profiling()) { 91.16 // Compilation failed, create MDO, which would signal the interpreter 91.17 // to start profiling on its own. 91.18 - _method->build_method_data(); 91.19 + _method->ensure_method_data(); 91.20 } 91.21 } else if (is_profiling() && _would_profile) { 91.22 - ciMethodData *md = method->method_data(); 91.23 - assert (md != NULL, "Should have MDO"); 91.24 + ciMethodData *md = method->method_data_or_null(); 91.25 + assert(md != NULL, "Sanity"); 91.26 md->set_would_profile(_would_profile); 91.27 } 91.28 }
92.1 --- a/src/share/vm/c1/c1_FrameMap.hpp Mon Dec 27 09:30:20 2010 -0500 92.2 +++ b/src/share/vm/c1/c1_FrameMap.hpp Mon Dec 27 09:56:29 2010 -0500 92.3 @@ -76,8 +76,8 @@ 92.4 nof_cpu_regs_reg_alloc = pd_nof_cpu_regs_reg_alloc, 92.5 nof_fpu_regs_reg_alloc = pd_nof_fpu_regs_reg_alloc, 92.6 92.7 - nof_caller_save_cpu_regs = pd_nof_caller_save_cpu_regs_frame_map, 92.8 - nof_caller_save_fpu_regs = pd_nof_caller_save_fpu_regs_frame_map, 92.9 + max_nof_caller_save_cpu_regs = pd_nof_caller_save_cpu_regs_frame_map, 92.10 + nof_caller_save_fpu_regs = pd_nof_caller_save_fpu_regs_frame_map, 92.11 92.12 spill_slot_size_in_bytes = 4 92.13 }; 92.14 @@ -97,7 +97,7 @@ 92.15 static Register _cpu_rnr2reg [nof_cpu_regs]; 92.16 static int _cpu_reg2rnr [nof_cpu_regs]; 92.17 92.18 - static LIR_Opr _caller_save_cpu_regs [nof_caller_save_cpu_regs]; 92.19 + static LIR_Opr _caller_save_cpu_regs [max_nof_caller_save_cpu_regs]; 92.20 static LIR_Opr _caller_save_fpu_regs [nof_caller_save_fpu_regs]; 92.21 92.22 int _framesize; 92.23 @@ -243,7 +243,7 @@ 92.24 VMReg regname(LIR_Opr opr) const; 92.25 92.26 static LIR_Opr caller_save_cpu_reg_at(int i) { 92.27 - assert(i >= 0 && i < nof_caller_save_cpu_regs, "out of bounds"); 92.28 + assert(i >= 0 && i < max_nof_caller_save_cpu_regs, "out of bounds"); 92.29 return _caller_save_cpu_regs[i]; 92.30 } 92.31
93.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp Mon Dec 27 09:30:20 2010 -0500 93.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Mon Dec 27 09:56:29 2010 -0500 93.3 @@ -2795,7 +2795,7 @@ 93.4 get = append(new UnsafeGetRaw(as_BasicType(local->type()), e, 93.5 append(new Constant(new IntConstant(offset))), 93.6 0, 93.7 - true)); 93.8 + true /*unaligned*/, true /*wide*/)); 93.9 } 93.10 _state->store_local(index, get); 93.11 } 93.12 @@ -3377,6 +3377,9 @@ 93.13 INLINE_BAILOUT("total inlining greater than DesiredMethodLimit"); 93.14 } 93.15 93.16 + if (is_profiling() && !callee->ensure_method_data()) { 93.17 + INLINE_BAILOUT("mdo allocation failed"); 93.18 + } 93.19 #ifndef PRODUCT 93.20 // printing 93.21 if (PrintInlining) {
94.1 --- a/src/share/vm/c1/c1_IR.cpp Mon Dec 27 09:30:20 2010 -0500 94.2 +++ b/src/share/vm/c1/c1_IR.cpp Mon Dec 27 09:56:29 2010 -0500 94.3 @@ -504,7 +504,12 @@ 94.4 count_edges(start_block, NULL); 94.5 94.6 if (compilation()->is_profiling()) { 94.7 - compilation()->method()->method_data()->set_compilation_stats(_num_loops, _num_blocks); 94.8 + ciMethod *method = compilation()->method(); 94.9 + if (!method->is_accessor()) { 94.10 + ciMethodData* md = method->method_data_or_null(); 94.11 + assert(md != NULL, "Sanity"); 94.12 + md->set_compilation_stats(_num_loops, _num_blocks); 94.13 + } 94.14 } 94.15 94.16 if (_num_loops > 0) {
95.1 --- a/src/share/vm/c1/c1_Instruction.hpp Mon Dec 27 09:30:20 2010 -0500 95.2 +++ b/src/share/vm/c1/c1_Instruction.hpp Mon Dec 27 09:56:29 2010 -0500 95.3 @@ -2110,20 +2110,23 @@ 95.4 95.5 LEAF(UnsafeGetRaw, UnsafeRawOp) 95.6 private: 95.7 - bool _may_be_unaligned; // For OSREntry 95.8 + bool _may_be_unaligned, _is_wide; // For OSREntry 95.9 95.10 public: 95.11 - UnsafeGetRaw(BasicType basic_type, Value addr, bool may_be_unaligned) 95.12 + UnsafeGetRaw(BasicType basic_type, Value addr, bool may_be_unaligned, bool is_wide = false) 95.13 : UnsafeRawOp(basic_type, addr, false) { 95.14 _may_be_unaligned = may_be_unaligned; 95.15 + _is_wide = is_wide; 95.16 } 95.17 95.18 - UnsafeGetRaw(BasicType basic_type, Value base, Value index, int log2_scale, bool may_be_unaligned) 95.19 + UnsafeGetRaw(BasicType basic_type, Value base, Value index, int log2_scale, bool may_be_unaligned, bool is_wide = false) 95.20 : UnsafeRawOp(basic_type, base, index, log2_scale, false) { 95.21 _may_be_unaligned = may_be_unaligned; 95.22 + _is_wide = is_wide; 95.23 } 95.24 95.25 - bool may_be_unaligned() { return _may_be_unaligned; } 95.26 + bool may_be_unaligned() { return _may_be_unaligned; } 95.27 + bool is_wide() { return _is_wide; } 95.28 }; 95.29 95.30
96.1 --- a/src/share/vm/c1/c1_LIR.cpp Mon Dec 27 09:30:20 2010 -0500 96.2 +++ b/src/share/vm/c1/c1_LIR.cpp Mon Dec 27 09:56:29 2010 -0500 96.3 @@ -1742,6 +1742,8 @@ 96.4 return "unaligned move"; 96.5 case lir_move_volatile: 96.6 return "volatile_move"; 96.7 + case lir_move_wide: 96.8 + return "wide_move"; 96.9 default: 96.10 ShouldNotReachHere(); 96.11 return "illegal_op";
97.1 --- a/src/share/vm/c1/c1_LIR.hpp Mon Dec 27 09:30:20 2010 -0500 97.2 +++ b/src/share/vm/c1/c1_LIR.hpp Mon Dec 27 09:56:29 2010 -0500 97.3 @@ -985,6 +985,7 @@ 97.4 lir_move_normal, 97.5 lir_move_volatile, 97.6 lir_move_unaligned, 97.7 + lir_move_wide, 97.8 lir_move_max_flag 97.9 }; 97.10 97.11 @@ -1932,7 +1933,20 @@ 97.12 void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); } 97.13 void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); } 97.14 void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); } 97.15 - 97.16 + void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { 97.17 + if (UseCompressedOops) { 97.18 + append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide)); 97.19 + } else { 97.20 + move(src, dst, info); 97.21 + } 97.22 + } 97.23 + void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { 97.24 + if (UseCompressedOops) { 97.25 + append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide)); 97.26 + } else { 97.27 + move(src, dst, info); 97.28 + } 97.29 + } 97.30 void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); } 97.31 97.32 void oop2reg (jobject o, LIR_Opr reg) { append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o), reg)); }
98.1 --- a/src/share/vm/c1/c1_LIRAssembler.cpp Mon Dec 27 09:30:20 2010 -0500 98.2 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp Mon Dec 27 09:56:29 2010 -0500 98.3 @@ -489,7 +489,9 @@ 98.4 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info()); 98.5 } else { 98.6 move_op(op->in_opr(), op->result_opr(), op->type(), 98.7 - op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned); 98.8 + op->patch_code(), op->info(), op->pop_fpu_stack(), 98.9 + op->move_kind() == lir_move_unaligned, 98.10 + op->move_kind() == lir_move_wide); 98.11 } 98.12 break; 98.13 98.14 @@ -758,7 +760,7 @@ 98.15 } 98.16 98.17 98.18 -void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned) { 98.19 +void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) { 98.20 if (src->is_register()) { 98.21 if (dest->is_register()) { 98.22 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 98.23 @@ -767,7 +769,7 @@ 98.24 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 98.25 reg2stack(src, dest, type, pop_fpu_stack); 98.26 } else if (dest->is_address()) { 98.27 - reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, unaligned); 98.28 + reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned); 98.29 } else { 98.30 ShouldNotReachHere(); 98.31 } 98.32 @@ -790,13 +792,13 @@ 98.33 const2stack(src, dest); 98.34 } else if (dest->is_address()) { 98.35 assert(patch_code == lir_patch_none, "no patching allowed here"); 98.36 - const2mem(src, dest, type, info); 98.37 + const2mem(src, dest, type, info, wide); 98.38 } else { 98.39 ShouldNotReachHere(); 98.40 } 98.41 98.42 } else if (src->is_address()) { 98.43 - mem2reg(src, dest, type, patch_code, info, unaligned); 98.44 + mem2reg(src, dest, type, patch_code, info, wide, unaligned); 98.45 98.46 } else { 98.47 ShouldNotReachHere();
99.1 --- a/src/share/vm/c1/c1_LIRAssembler.hpp Mon Dec 27 09:30:20 2010 -0500 99.2 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp Mon Dec 27 09:56:29 2010 -0500 99.3 @@ -165,15 +165,17 @@ 99.4 99.5 void const2reg (LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info); 99.6 void const2stack(LIR_Opr src, LIR_Opr dest); 99.7 - void const2mem (LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info); 99.8 + void const2mem (LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide); 99.9 void reg2stack (LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack); 99.10 void reg2reg (LIR_Opr src, LIR_Opr dest); 99.11 - void reg2mem (LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned); 99.12 + void reg2mem (LIR_Opr src, LIR_Opr dest, BasicType type, 99.13 + LIR_PatchCode patch_code, CodeEmitInfo* info, 99.14 + bool pop_fpu_stack, bool wide, bool unaligned); 99.15 void stack2reg (LIR_Opr src, LIR_Opr dest, BasicType type); 99.16 void stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type); 99.17 void mem2reg (LIR_Opr src, LIR_Opr dest, BasicType type, 99.18 - LIR_PatchCode patch_code = lir_patch_none, 99.19 - CodeEmitInfo* info = NULL, bool unaligned = false); 99.20 + LIR_PatchCode patch_code, 99.21 + CodeEmitInfo* info, bool wide, bool unaligned); 99.22 99.23 void prefetchr (LIR_Opr src); 99.24 void prefetchw (LIR_Opr src); 99.25 @@ -211,7 +213,7 @@ 99.26 99.27 void roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack); 99.28 void move_op(LIR_Opr src, LIR_Opr result, BasicType type, 99.29 - LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned); 99.30 + LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide); 99.31 void volatile_move_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info); 99.32 void comp_mem_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info); // info set for null exceptions 99.33 void comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr result, LIR_Op2* op);
100.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp Mon Dec 27 09:30:20 2010 -0500 100.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Mon Dec 27 09:56:29 2010 -0500 100.3 @@ -836,11 +836,8 @@ 100.4 if (if_instr->should_profile()) { 100.5 ciMethod* method = if_instr->profiled_method(); 100.6 assert(method != NULL, "method should be set if branch is profiled"); 100.7 - ciMethodData* md = method->method_data(); 100.8 - if (md == NULL) { 100.9 - bailout("out of memory building methodDataOop"); 100.10 - return; 100.11 - } 100.12 + ciMethodData* md = method->method_data_or_null(); 100.13 + assert(md != NULL, "Sanity"); 100.14 ciProfileData* data = md->bci_to_data(if_instr->profiled_bci()); 100.15 assert(data != NULL, "must have profiling data"); 100.16 assert(data->is_BranchData(), "need BranchData for two-way branches"); 100.17 @@ -864,11 +861,11 @@ 100.18 // MDO cells are intptr_t, so the data_reg width is arch-dependent. 100.19 LIR_Opr data_reg = new_pointer_register(); 100.20 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type()); 100.21 - __ move(LIR_OprFact::address(data_addr), data_reg); 100.22 + __ move(data_addr, data_reg); 100.23 // Use leal instead of add to avoid destroying condition codes on x86 100.24 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT); 100.25 __ leal(LIR_OprFact::address(fake_incr_value), data_reg); 100.26 - __ move(data_reg, LIR_OprFact::address(data_addr)); 100.27 + __ move(data_reg, data_addr); 100.28 } 100.29 } 100.30 100.31 @@ -1009,12 +1006,12 @@ 100.32 operand_for_instruction(phi)); 100.33 100.34 LIR_Opr thread_reg = getThreadPointer(); 100.35 - __ move(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT), 100.36 - exceptionOopOpr()); 100.37 - __ move(LIR_OprFact::oopConst(NULL), 100.38 - new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT)); 100.39 - __ move(LIR_OprFact::oopConst(NULL), 100.40 - new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT)); 100.41 + __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT), 100.42 + exceptionOopOpr()); 100.43 + __ move_wide(LIR_OprFact::oopConst(NULL), 100.44 + new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT)); 100.45 + __ move_wide(LIR_OprFact::oopConst(NULL), 100.46 + new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT)); 100.47 100.48 LIR_Opr result = new_register(T_OBJECT); 100.49 __ move(exceptionOopOpr(), result); 100.50 @@ -1085,7 +1082,7 @@ 100.51 void LIRGenerator::do_Return(Return* x) { 100.52 if (compilation()->env()->dtrace_method_probes()) { 100.53 BasicTypeList signature; 100.54 - signature.append(T_INT); // thread 100.55 + signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread 100.56 signature.append(T_OBJECT); // methodOop 100.57 LIR_OprList* args = new LIR_OprList(); 100.58 args->append(getThreadPointer()); 100.59 @@ -1122,8 +1119,8 @@ 100.60 info = state_for(x); 100.61 } 100.62 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info); 100.63 - __ move(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() + 100.64 - klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result); 100.65 + __ move_wide(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() + 100.66 + klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result); 100.67 } 100.68 100.69 100.70 @@ -1131,7 +1128,7 @@ 100.71 void LIRGenerator::do_currentThread(Intrinsic* x) { 100.72 assert(x->number_of_arguments() == 0, "wrong type"); 100.73 LIR_Opr reg = rlock_result(x); 100.74 - __ load(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg); 100.75 + __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg); 100.76 } 100.77 100.78 100.79 @@ -1908,7 +1905,11 @@ 100.80 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) { 100.81 __ unaligned_move(addr, reg); 100.82 } else { 100.83 - __ move(addr, reg); 100.84 + if (dst_type == T_OBJECT && x->is_wide()) { 100.85 + __ move_wide(addr, reg); 100.86 + } else { 100.87 + __ move(addr, reg); 100.88 + } 100.89 } 100.90 } 100.91 100.92 @@ -2215,11 +2216,8 @@ 100.93 if (x->should_profile()) { 100.94 ciMethod* method = x->profiled_method(); 100.95 assert(method != NULL, "method should be set if branch is profiled"); 100.96 - ciMethodData* md = method->method_data(); 100.97 - if (md == NULL) { 100.98 - bailout("out of memory building methodDataOop"); 100.99 - return; 100.100 - } 100.101 + ciMethodData* md = method->method_data_or_null(); 100.102 + assert(md != NULL, "Sanity"); 100.103 ciProfileData* data = md->bci_to_data(x->profiled_bci()); 100.104 assert(data != NULL, "must have profiling data"); 100.105 int offset; 100.106 @@ -2287,7 +2285,7 @@ 100.107 100.108 if (compilation()->env()->dtrace_method_probes()) { 100.109 BasicTypeList signature; 100.110 - signature.append(T_INT); // thread 100.111 + signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread 100.112 signature.append(T_OBJECT); // methodOop 100.113 LIR_OprList* args = new LIR_OprList(); 100.114 args->append(getThreadPointer()); 100.115 @@ -2352,11 +2350,14 @@ 100.116 } else { 100.117 LIR_Address* addr = loc->as_address_ptr(); 100.118 param->load_for_store(addr->type()); 100.119 - if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 100.120 - __ unaligned_move(param->result(), addr); 100.121 - } else { 100.122 - __ move(param->result(), addr); 100.123 - } 100.124 + if (addr->type() == T_OBJECT) { 100.125 + __ move_wide(param->result(), addr); 100.126 + } else 100.127 + if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 100.128 + __ unaligned_move(param->result(), addr); 100.129 + } else { 100.130 + __ move(param->result(), addr); 100.131 + } 100.132 } 100.133 } 100.134 100.135 @@ -2368,7 +2369,7 @@ 100.136 } else { 100.137 assert(loc->is_address(), "just checking"); 100.138 receiver->load_for_store(T_OBJECT); 100.139 - __ move(receiver->result(), loc); 100.140 + __ move_wide(receiver->result(), loc->as_address_ptr()); 100.141 } 100.142 } 100.143 } 100.144 @@ -2716,7 +2717,9 @@ 100.145 } else if (level == CompLevel_full_profile) { 100.146 offset = in_bytes(backedge ? methodDataOopDesc::backedge_counter_offset() : 100.147 methodDataOopDesc::invocation_counter_offset()); 100.148 - __ oop2reg(method->method_data()->constant_encoding(), counter_holder); 100.149 + ciMethodData* md = method->method_data_or_null(); 100.150 + assert(md != NULL, "Sanity"); 100.151 + __ oop2reg(md->constant_encoding(), counter_holder); 100.152 meth = new_register(T_OBJECT); 100.153 __ oop2reg(method->constant_encoding(), meth); 100.154 } else {
101.1 --- a/src/share/vm/c1/c1_LinearScan.cpp Mon Dec 27 09:30:20 2010 -0500 101.2 +++ b/src/share/vm/c1/c1_LinearScan.cpp Mon Dec 27 09:56:29 2010 -0500 101.3 @@ -1273,7 +1273,7 @@ 101.4 int caller_save_registers[LinearScan::nof_regs]; 101.5 101.6 int i; 101.7 - for (i = 0; i < FrameMap::nof_caller_save_cpu_regs; i++) { 101.8 + for (i = 0; i < FrameMap::nof_caller_save_cpu_regs(); i++) { 101.9 LIR_Opr opr = FrameMap::caller_save_cpu_reg_at(i); 101.10 assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands"); 101.11 assert(reg_numHi(opr) == -1, "missing addition of range for hi-register"); 101.12 @@ -3557,7 +3557,7 @@ 101.13 101.14 // invalidate all caller save registers at calls 101.15 if (visitor.has_call()) { 101.16 - for (j = 0; j < FrameMap::nof_caller_save_cpu_regs; j++) { 101.17 + for (j = 0; j < FrameMap::nof_caller_save_cpu_regs(); j++) { 101.18 state_put(input_state, reg_num(FrameMap::caller_save_cpu_reg_at(j)), NULL); 101.19 } 101.20 for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) { 101.21 @@ -5596,7 +5596,7 @@ 101.22 _last_reg = pd_last_fpu_reg; 101.23 } else { 101.24 _first_reg = pd_first_cpu_reg; 101.25 - _last_reg = pd_last_cpu_reg; 101.26 + _last_reg = FrameMap::last_cpu_reg(); 101.27 } 101.28 101.29 assert(0 <= _first_reg && _first_reg < LinearScan::nof_regs, "out of range");
102.1 --- a/src/share/vm/c1/c1_Runtime1.cpp Mon Dec 27 09:30:20 2010 -0500 102.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp Mon Dec 27 09:56:29 2010 -0500 102.3 @@ -1174,7 +1174,7 @@ 102.4 memmove(dst_addr, src_addr, length << l2es); 102.5 return ac_ok; 102.6 } else if (src->is_objArray() && dst->is_objArray()) { 102.7 - if (UseCompressedOops) { // will need for tiered 102.8 + if (UseCompressedOops) { 102.9 narrowOop *src_addr = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos); 102.10 narrowOop *dst_addr = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos); 102.11 return obj_arraycopy_work(src, src_addr, dst, dst_addr, length); 102.12 @@ -1210,10 +1210,11 @@ 102.13 assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well."); 102.14 if (UseCompressedOops) { 102.15 bs->write_ref_array_pre((narrowOop*)dst, num); 102.16 + Copy::conjoint_oops_atomic((narrowOop*) src, (narrowOop*) dst, num); 102.17 } else { 102.18 bs->write_ref_array_pre((oop*)dst, num); 102.19 + Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num); 102.20 } 102.21 - Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num); 102.22 bs->write_ref_array(dst, num); 102.23 JRT_END 102.24
103.1 --- a/src/share/vm/ci/ciMethod.cpp Mon Dec 27 09:30:20 2010 -0500 103.2 +++ b/src/share/vm/ci/ciMethod.cpp Mon Dec 27 09:56:29 2010 -0500 103.3 @@ -797,12 +797,13 @@ 103.4 103.5 103.6 // ------------------------------------------------------------------ 103.7 -// ciMethod::build_method_data 103.8 +// ciMethod::ensure_method_data 103.9 // 103.10 // Generate new methodDataOop objects at compile time. 103.11 -void ciMethod::build_method_data(methodHandle h_m) { 103.12 +// Return true if allocation was successful or no MDO is required. 103.13 +bool ciMethod::ensure_method_data(methodHandle h_m) { 103.14 EXCEPTION_CONTEXT; 103.15 - if (is_native() || is_abstract() || h_m()->is_accessor()) return; 103.16 + if (is_native() || is_abstract() || h_m()->is_accessor()) return true; 103.17 if (h_m()->method_data() == NULL) { 103.18 methodOopDesc::build_interpreter_method_data(h_m, THREAD); 103.19 if (HAS_PENDING_EXCEPTION) { 103.20 @@ -812,18 +813,22 @@ 103.21 if (h_m()->method_data() != NULL) { 103.22 _method_data = CURRENT_ENV->get_object(h_m()->method_data())->as_method_data(); 103.23 _method_data->load_data(); 103.24 + return true; 103.25 } else { 103.26 _method_data = CURRENT_ENV->get_empty_methodData(); 103.27 + return false; 103.28 } 103.29 } 103.30 103.31 // public, retroactive version 103.32 -void ciMethod::build_method_data() { 103.33 +bool ciMethod::ensure_method_data() { 103.34 + bool result = true; 103.35 if (_method_data == NULL || _method_data->is_empty()) { 103.36 GUARDED_VM_ENTRY({ 103.37 - build_method_data(get_methodOop()); 103.38 + result = ensure_method_data(get_methodOop()); 103.39 }); 103.40 } 103.41 + return result; 103.42 } 103.43 103.44 103.45 @@ -839,11 +844,6 @@ 103.46 Thread* my_thread = JavaThread::current(); 103.47 methodHandle h_m(my_thread, get_methodOop()); 103.48 103.49 - // Create an MDO for the inlinee 103.50 - if (TieredCompilation && is_c1_compile(env->comp_level())) { 103.51 - build_method_data(h_m); 103.52 - } 103.53 - 103.54 if (h_m()->method_data() != NULL) { 103.55 _method_data = CURRENT_ENV->get_object(h_m()->method_data())->as_method_data(); 103.56 _method_data->load_data(); 103.57 @@ -854,6 +854,15 @@ 103.58 103.59 } 103.60 103.61 +// ------------------------------------------------------------------ 103.62 +// ciMethod::method_data_or_null 103.63 +// Returns a pointer to ciMethodData if MDO exists on the VM side, 103.64 +// NULL otherwise. 103.65 +ciMethodData* ciMethod::method_data_or_null() { 103.66 + ciMethodData *md = method_data(); 103.67 + if (md->is_empty()) return NULL; 103.68 + return md; 103.69 +} 103.70 103.71 // ------------------------------------------------------------------ 103.72 // ciMethod::will_link
104.1 --- a/src/share/vm/ci/ciMethod.hpp Mon Dec 27 09:30:20 2010 -0500 104.2 +++ b/src/share/vm/ci/ciMethod.hpp Mon Dec 27 09:56:29 2010 -0500 104.3 @@ -106,7 +106,7 @@ 104.4 104.5 void check_is_loaded() const { assert(is_loaded(), "not loaded"); } 104.6 104.7 - void build_method_data(methodHandle h_m); 104.8 + bool ensure_method_data(methodHandle h_m); 104.9 104.10 void code_at_put(int bci, Bytecodes::Code code) { 104.11 Bytecodes::check(code); 104.12 @@ -121,6 +121,7 @@ 104.13 ciSymbol* name() const { return _name; } 104.14 ciInstanceKlass* holder() const { return _holder; } 104.15 ciMethodData* method_data(); 104.16 + ciMethodData* method_data_or_null(); 104.17 104.18 // Signature information. 104.19 ciSignature* signature() const { return _signature; } 104.20 @@ -230,7 +231,7 @@ 104.21 bool has_unloaded_classes_in_signature(); 104.22 bool is_klass_loaded(int refinfo_index, bool must_be_resolved) const; 104.23 bool check_call(int refinfo_index, bool is_static) const; 104.24 - void build_method_data(); // make sure it exists in the VM also 104.25 + bool ensure_method_data(); // make sure it exists in the VM also 104.26 int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC 104.27 104.28 // JSR 292 support
105.1 --- a/src/share/vm/classfile/classFileParser.cpp Mon Dec 27 09:30:20 2010 -0500 105.2 +++ b/src/share/vm/classfile/classFileParser.cpp Mon Dec 27 09:56:29 2010 -0500 105.3 @@ -99,12 +99,6 @@ 105.4 unsigned int hashValues[SymbolTable::symbol_alloc_batch_size]; 105.5 int names_count = 0; 105.6 105.7 - // Side buffer for operands of variable-sized (InvokeDynamic) entries. 105.8 - GrowableArray<int>* operands = NULL; 105.9 -#ifdef ASSERT 105.10 - GrowableArray<int>* indy_instructions = new GrowableArray<int>(THREAD, 10); 105.11 -#endif 105.12 - 105.13 // parsing Index 0 is unused 105.14 for (int index = 1; index < length; index++) { 105.15 // Each of the following case guarantees one more byte in the stream 105.16 @@ -184,36 +178,20 @@ 105.17 "Class file version does not support constant tag %u in class file %s"), 105.18 tag, CHECK); 105.19 } 105.20 - if (!AllowTransitionalJSR292 && tag == JVM_CONSTANT_InvokeDynamicTrans) { 105.21 - classfile_parse_error( 105.22 + cfs->guarantee_more(5, CHECK); // bsm_index, nt, tag/access_flags 105.23 + u2 bootstrap_specifier_index = cfs->get_u2_fast(); 105.24 + u2 name_and_type_index = cfs->get_u2_fast(); 105.25 + if (tag == JVM_CONSTANT_InvokeDynamicTrans) { 105.26 + if (!AllowTransitionalJSR292) 105.27 + classfile_parse_error( 105.28 "This JVM does not support transitional InvokeDynamic tag %u in class file %s", 105.29 tag, CHECK); 105.30 + cp->invoke_dynamic_trans_at_put(index, bootstrap_specifier_index, name_and_type_index); 105.31 + break; 105.32 } 105.33 - bool trans_no_argc = AllowTransitionalJSR292 && (tag == JVM_CONSTANT_InvokeDynamicTrans); 105.34 - cfs->guarantee_more(7, CHECK); // bsm_index, nt, argc, ..., tag/access_flags 105.35 - u2 bootstrap_method_index = cfs->get_u2_fast(); 105.36 - u2 name_and_type_index = cfs->get_u2_fast(); 105.37 - int argument_count = trans_no_argc ? 0 : cfs->get_u2_fast(); 105.38 - cfs->guarantee_more(2*argument_count + 1, CHECK); // argv[argc]..., tag/access_flags 105.39 - int argv_offset = constantPoolOopDesc::_indy_argv_offset; 105.40 - int op_count = argv_offset + argument_count; // bsm, nt, argc, argv[]... 105.41 - int op_base = start_operand_group(operands, op_count, CHECK); 105.42 - assert(argv_offset == 3, "else adjust next 3 assignments"); 105.43 - operands->at_put(op_base + constantPoolOopDesc::_indy_bsm_offset, bootstrap_method_index); 105.44 - operands->at_put(op_base + constantPoolOopDesc::_indy_nt_offset, name_and_type_index); 105.45 - operands->at_put(op_base + constantPoolOopDesc::_indy_argc_offset, argument_count); 105.46 - for (int arg_i = 0; arg_i < argument_count; arg_i++) { 105.47 - int arg = cfs->get_u2_fast(); 105.48 - operands->at_put(op_base + constantPoolOopDesc::_indy_argv_offset + arg_i, arg); 105.49 - } 105.50 - cp->invoke_dynamic_at_put(index, op_base, op_count); 105.51 -#ifdef ASSERT 105.52 - // Record the steps just taken for later checking. 105.53 - indy_instructions->append(index); 105.54 - indy_instructions->append(bootstrap_method_index); 105.55 - indy_instructions->append(name_and_type_index); 105.56 - indy_instructions->append(argument_count); 105.57 -#endif //ASSERT 105.58 + if (_max_bootstrap_specifier_index < (int) bootstrap_specifier_index) 105.59 + _max_bootstrap_specifier_index = (int) bootstrap_specifier_index; // collect for later 105.60 + cp->invoke_dynamic_at_put(index, bootstrap_specifier_index, name_and_type_index); 105.61 } 105.62 break; 105.63 case JVM_CONSTANT_Integer : 105.64 @@ -316,23 +294,6 @@ 105.65 oopFactory::new_symbols(cp, names_count, names, lengths, indices, hashValues, CHECK); 105.66 } 105.67 105.68 - if (operands != NULL && operands->length() > 0) { 105.69 - store_operand_array(operands, cp, CHECK); 105.70 - } 105.71 -#ifdef ASSERT 105.72 - // Re-assert the indy structures, now that assertion checking can work. 105.73 - for (int indy_i = 0; indy_i < indy_instructions->length(); ) { 105.74 - int index = indy_instructions->at(indy_i++); 105.75 - int bootstrap_method_index = indy_instructions->at(indy_i++); 105.76 - int name_and_type_index = indy_instructions->at(indy_i++); 105.77 - int argument_count = indy_instructions->at(indy_i++); 105.78 - assert(cp->check_invoke_dynamic_at(index, 105.79 - bootstrap_method_index, name_and_type_index, 105.80 - argument_count), 105.81 - "indy structure is OK"); 105.82 - } 105.83 -#endif //ASSERT 105.84 - 105.85 // Copy _current pointer of local copy back to stream(). 105.86 #ifdef ASSERT 105.87 assert(cfs0->current() == old_current, "non-exclusive use of stream()"); 105.88 @@ -340,41 +301,6 @@ 105.89 cfs0->set_current(cfs1.current()); 105.90 } 105.91 105.92 -int ClassFileParser::start_operand_group(GrowableArray<int>* &operands, int op_count, TRAPS) { 105.93 - if (operands == NULL) { 105.94 - operands = new GrowableArray<int>(THREAD, 100); 105.95 - int fillp_offset = constantPoolOopDesc::_multi_operand_buffer_fill_pointer_offset; 105.96 - while (operands->length() <= fillp_offset) 105.97 - operands->append(0); // force op_base > 0, for an error check 105.98 - DEBUG_ONLY(operands->at_put(fillp_offset, (int)badHeapWordVal)); 105.99 - } 105.100 - int cnt_pos = operands->append(op_count); 105.101 - int arg_pos = operands->length(); 105.102 - operands->at_grow(arg_pos + op_count - 1); // grow to include the operands 105.103 - assert(operands->length() == arg_pos + op_count, ""); 105.104 - int op_base = cnt_pos - constantPoolOopDesc::_multi_operand_count_offset; 105.105 - return op_base; 105.106 -} 105.107 - 105.108 -void ClassFileParser::store_operand_array(GrowableArray<int>* operands, constantPoolHandle cp, TRAPS) { 105.109 - // Collect the buffer of operands from variable-sized entries into a permanent array. 105.110 - int arraylen = operands->length(); 105.111 - int fillp_offset = constantPoolOopDesc::_multi_operand_buffer_fill_pointer_offset; 105.112 - assert(operands->at(fillp_offset) == (int)badHeapWordVal, "value unused so far"); 105.113 - operands->at_put(fillp_offset, arraylen); 105.114 - cp->multi_operand_buffer_grow(arraylen, CHECK); 105.115 - typeArrayOop operands_oop = cp->operands(); 105.116 - assert(operands_oop->length() == arraylen, ""); 105.117 - for (int i = 0; i < arraylen; i++) { 105.118 - operands_oop->int_at_put(i, operands->at(i)); 105.119 - } 105.120 - cp->set_operands(operands_oop); 105.121 - // The fill_pointer is used only by constantPoolOop::copy_entry_to and friends, 105.122 - // when constant pools need to be merged. Make sure it is sane now. 105.123 - assert(cp->multi_operand_buffer_fill_pointer() == arraylen, ""); 105.124 -} 105.125 - 105.126 - 105.127 bool inline valid_cp_range(int index, int length) { return (index > 0 && index < length); } 105.128 105.129 constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) { 105.130 @@ -401,7 +327,8 @@ 105.131 105.132 // first verification pass - validate cross references and fixup class and string constants 105.133 for (index = 1; index < length; index++) { // Index 0 is unused 105.134 - switch (cp->tag_at(index).value()) { 105.135 + jbyte tag = cp->tag_at(index).value(); 105.136 + switch (tag) { 105.137 case JVM_CONSTANT_Class : 105.138 ShouldNotReachHere(); // Only JVM_CONSTANT_ClassIndex should be present 105.139 break; 105.140 @@ -543,35 +470,23 @@ 105.141 } 105.142 break; 105.143 case JVM_CONSTANT_InvokeDynamicTrans : 105.144 - ShouldNotReachHere(); // this tag does not appear in the heap 105.145 case JVM_CONSTANT_InvokeDynamic : 105.146 { 105.147 - int bootstrap_method_ref_index = cp->invoke_dynamic_bootstrap_method_ref_index_at(index); 105.148 int name_and_type_ref_index = cp->invoke_dynamic_name_and_type_ref_index_at(index); 105.149 - check_property((bootstrap_method_ref_index == 0 && AllowTransitionalJSR292) 105.150 - || 105.151 - (valid_cp_range(bootstrap_method_ref_index, length) && 105.152 - (cp->tag_at(bootstrap_method_ref_index).is_method_handle())), 105.153 - "Invalid constant pool index %u in class file %s", 105.154 - bootstrap_method_ref_index, 105.155 - CHECK_(nullHandle)); 105.156 check_property(valid_cp_range(name_and_type_ref_index, length) && 105.157 cp->tag_at(name_and_type_ref_index).is_name_and_type(), 105.158 "Invalid constant pool index %u in class file %s", 105.159 name_and_type_ref_index, 105.160 CHECK_(nullHandle)); 105.161 - int argc = cp->invoke_dynamic_argument_count_at(index); 105.162 - for (int arg_i = 0; arg_i < argc; arg_i++) { 105.163 - int arg = cp->invoke_dynamic_argument_index_at(index, arg_i); 105.164 - check_property(valid_cp_range(arg, length) && 105.165 - cp->tag_at(arg).is_loadable_constant() || 105.166 - // temporary early forms of string and class: 105.167 - cp->tag_at(arg).is_klass_index() || 105.168 - cp->tag_at(arg).is_string_index(), 105.169 + if (tag == JVM_CONSTANT_InvokeDynamicTrans) { 105.170 + int bootstrap_method_ref_index = cp->invoke_dynamic_bootstrap_method_ref_index_at(index); 105.171 + check_property(valid_cp_range(bootstrap_method_ref_index, length) && 105.172 + cp->tag_at(bootstrap_method_ref_index).is_method_handle(), 105.173 "Invalid constant pool index %u in class file %s", 105.174 - arg, 105.175 + bootstrap_method_ref_index, 105.176 CHECK_(nullHandle)); 105.177 } 105.178 + // bootstrap specifier index must be checked later, when BootstrapMethods attr is available 105.179 break; 105.180 } 105.181 default: 105.182 @@ -2429,6 +2344,76 @@ 105.183 k->set_generic_signature(cp->symbol_at(signature_index)); 105.184 } 105.185 105.186 +void ClassFileParser::parse_classfile_bootstrap_methods_attribute(constantPoolHandle cp, instanceKlassHandle k, 105.187 + u4 attribute_byte_length, TRAPS) { 105.188 + ClassFileStream* cfs = stream(); 105.189 + u1* current_start = cfs->current(); 105.190 + 105.191 + cfs->guarantee_more(2, CHECK); // length 105.192 + int attribute_array_length = cfs->get_u2_fast(); 105.193 + 105.194 + guarantee_property(_max_bootstrap_specifier_index < attribute_array_length, 105.195 + "Short length on BootstrapMethods in class file %s", 105.196 + CHECK); 105.197 + 105.198 + // The attribute contains a counted array of counted tuples of shorts, 105.199 + // represending bootstrap specifiers: 105.200 + // length*{bootstrap_method_index, argument_count*{argument_index}} 105.201 + int operand_count = (attribute_byte_length - sizeof(u2)) / sizeof(u2); 105.202 + // operand_count = number of shorts in attr, except for leading length 105.203 + 105.204 + // The attribute is copied into a short[] array. 105.205 + // The array begins with a series of short[2] pairs, one for each tuple. 105.206 + int index_size = (attribute_array_length * 2); 105.207 + 105.208 + typeArrayOop operands_oop = oopFactory::new_permanent_intArray(index_size + operand_count, CHECK); 105.209 + typeArrayHandle operands(THREAD, operands_oop); 105.210 + operands_oop = NULL; // tidy 105.211 + 105.212 + int operand_fill_index = index_size; 105.213 + int cp_size = cp->length(); 105.214 + 105.215 + for (int n = 0; n < attribute_array_length; n++) { 105.216 + // Store a 32-bit offset into the header of the operand array. 105.217 + assert(constantPoolOopDesc::operand_offset_at(operands(), n) == 0, ""); 105.218 + constantPoolOopDesc::operand_offset_at_put(operands(), n, operand_fill_index); 105.219 + 105.220 + // Read a bootstrap specifier. 105.221 + cfs->guarantee_more(sizeof(u2) * 2, CHECK); // bsm, argc 105.222 + u2 bootstrap_method_index = cfs->get_u2_fast(); 105.223 + u2 argument_count = cfs->get_u2_fast(); 105.224 + check_property( 105.225 + valid_cp_range(bootstrap_method_index, cp_size) && 105.226 + cp->tag_at(bootstrap_method_index).is_method_handle(), 105.227 + "bootstrap_method_index %u has bad constant type in class file %s", 105.228 + CHECK); 105.229 + operands->short_at_put(operand_fill_index++, bootstrap_method_index); 105.230 + operands->short_at_put(operand_fill_index++, argument_count); 105.231 + 105.232 + cfs->guarantee_more(sizeof(u2) * argument_count, CHECK); // argv[argc] 105.233 + for (int j = 0; j < argument_count; j++) { 105.234 + u2 arg_index = cfs->get_u2_fast(); 105.235 + check_property( 105.236 + valid_cp_range(arg_index, cp_size) && 105.237 + cp->tag_at(arg_index).is_loadable_constant(), 105.238 + "argument_index %u has bad constant type in class file %s", 105.239 + CHECK); 105.240 + operands->short_at_put(operand_fill_index++, arg_index); 105.241 + } 105.242 + } 105.243 + 105.244 + assert(operand_fill_index == operands()->length(), "exact fill"); 105.245 + assert(constantPoolOopDesc::operand_array_length(operands()) == attribute_array_length, "correct decode"); 105.246 + 105.247 + u1* current_end = cfs->current(); 105.248 + guarantee_property(current_end == current_start + attribute_byte_length, 105.249 + "Bad length on BootstrapMethods in class file %s", 105.250 + CHECK); 105.251 + 105.252 + cp->set_operands(operands()); 105.253 +} 105.254 + 105.255 + 105.256 void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instanceKlassHandle k, TRAPS) { 105.257 ClassFileStream* cfs = stream(); 105.258 // Set inner classes attribute to default sentinel 105.259 @@ -2438,6 +2423,7 @@ 105.260 bool parsed_sourcefile_attribute = false; 105.261 bool parsed_innerclasses_attribute = false; 105.262 bool parsed_enclosingmethod_attribute = false; 105.263 + bool parsed_bootstrap_methods_attribute = false; 105.264 u1* runtime_visible_annotations = NULL; 105.265 int runtime_visible_annotations_length = 0; 105.266 u1* runtime_invisible_annotations = NULL; 105.267 @@ -2536,6 +2522,12 @@ 105.268 classfile_parse_error("Invalid or out-of-bounds method index in EnclosingMethod attribute in class file %s", CHECK); 105.269 } 105.270 k->set_enclosing_method_indices(class_index, method_index); 105.271 + } else if (tag == vmSymbols::tag_bootstrap_methods() && 105.272 + _major_version >= Verifier::INVOKEDYNAMIC_MAJOR_VERSION) { 105.273 + if (parsed_bootstrap_methods_attribute) 105.274 + classfile_parse_error("Multiple BootstrapMethods attributes in class file %s", CHECK); 105.275 + parsed_bootstrap_methods_attribute = true; 105.276 + parse_classfile_bootstrap_methods_attribute(cp, k, attribute_length, CHECK); 105.277 } else { 105.278 // Unknown attribute 105.279 cfs->skip_u1(attribute_length, CHECK); 105.280 @@ -2551,6 +2543,11 @@ 105.281 runtime_invisible_annotations_length, 105.282 CHECK); 105.283 k->set_class_annotations(annotations()); 105.284 + 105.285 + if (_max_bootstrap_specifier_index >= 0) { 105.286 + guarantee_property(parsed_bootstrap_methods_attribute, 105.287 + "Missing BootstrapMethods attribute in class file %s", CHECK); 105.288 + } 105.289 } 105.290 105.291 105.292 @@ -2868,6 +2865,7 @@ 105.293 PerfClassTraceTime::PARSE_CLASS); 105.294 105.295 _has_finalizer = _has_empty_finalizer = _has_vanilla_constructor = false; 105.296 + _max_bootstrap_specifier_index = -1; 105.297 105.298 if (JvmtiExport::should_post_class_file_load_hook()) { 105.299 unsigned char* ptr = cfs->buffer();
106.1 --- a/src/share/vm/classfile/classFileParser.hpp Mon Dec 27 09:30:20 2010 -0500 106.2 +++ b/src/share/vm/classfile/classFileParser.hpp Mon Dec 27 09:56:29 2010 -0500 106.3 @@ -50,6 +50,8 @@ 106.4 bool _has_empty_finalizer; 106.5 bool _has_vanilla_constructor; 106.6 106.7 + int _max_bootstrap_specifier_index; 106.8 + 106.9 enum { fixed_buffer_size = 128 }; 106.10 u_char linenumbertable_buffer[fixed_buffer_size]; 106.11 106.12 @@ -66,9 +68,6 @@ 106.13 106.14 constantPoolHandle parse_constant_pool(TRAPS); 106.15 106.16 - static int start_operand_group(GrowableArray<int>* &operands, int op_count, TRAPS); 106.17 - static void store_operand_array(GrowableArray<int>* operands, constantPoolHandle cp, TRAPS); 106.18 - 106.19 // Interface parsing 106.20 objArrayHandle parse_interfaces(constantPoolHandle cp, 106.21 int length, 106.22 @@ -130,6 +129,7 @@ 106.23 void parse_classfile_attributes(constantPoolHandle cp, instanceKlassHandle k, TRAPS); 106.24 void parse_classfile_synthetic_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS); 106.25 void parse_classfile_signature_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS); 106.26 + void parse_classfile_bootstrap_methods_attribute(constantPoolHandle cp, instanceKlassHandle k, u4 attribute_length, TRAPS); 106.27 106.28 // Annotations handling 106.29 typeArrayHandle assemble_annotations(u1* runtime_visible_annotations,
107.1 --- a/src/share/vm/classfile/systemDictionary.cpp Mon Dec 27 09:30:20 2010 -0500 107.2 +++ b/src/share/vm/classfile/systemDictionary.cpp Mon Dec 27 09:56:29 2010 -0500 107.3 @@ -2010,7 +2010,7 @@ 107.4 scan = WKID(meth_group_end+1); 107.5 } 107.6 WKID indy_group_start = WK_KLASS_ENUM_NAME(Linkage_klass); 107.7 - WKID indy_group_end = WK_KLASS_ENUM_NAME(InvokeDynamic_klass); 107.8 + WKID indy_group_end = WK_KLASS_ENUM_NAME(CallSite_klass); 107.9 initialize_wk_klasses_until(indy_group_start, scan, CHECK); 107.10 if (EnableInvokeDynamic) { 107.11 initialize_wk_klasses_through(indy_group_end, scan, CHECK);
108.1 --- a/src/share/vm/classfile/systemDictionary.hpp Mon Dec 27 09:30:20 2010 -0500 108.2 +++ b/src/share/vm/classfile/systemDictionary.hpp Mon Dec 27 09:56:29 2010 -0500 108.3 @@ -156,8 +156,7 @@ 108.4 template(WrongMethodTypeException_klass, java_dyn_WrongMethodTypeException, Opt) \ 108.5 template(Linkage_klass, java_dyn_Linkage, Opt) \ 108.6 template(CallSite_klass, java_dyn_CallSite, Opt) \ 108.7 - template(InvokeDynamic_klass, java_dyn_InvokeDynamic, Opt) \ 108.8 - /* Note: MethodHandle must be first, and InvokeDynamic last in group */ \ 108.9 + /* Note: MethodHandle must be first, and CallSite last in group */ \ 108.10 \ 108.11 template(StringBuffer_klass, java_lang_StringBuffer, Pre) \ 108.12 template(StringBuilder_klass, java_lang_StringBuilder, Pre) \
109.1 --- a/src/share/vm/classfile/vmSymbols.hpp Mon Dec 27 09:30:20 2010 -0500 109.2 +++ b/src/share/vm/classfile/vmSymbols.hpp Mon Dec 27 09:56:29 2010 -0500 109.3 @@ -132,6 +132,7 @@ 109.4 template(tag_runtime_invisible_parameter_annotations,"RuntimeInvisibleParameterAnnotations") \ 109.5 template(tag_annotation_default, "AnnotationDefault") \ 109.6 template(tag_enclosing_method, "EnclosingMethod") \ 109.7 + template(tag_bootstrap_methods, "BootstrapMethods") \ 109.8 \ 109.9 /* exception klasses: at least all exceptions thrown by the VM have entries here */ \ 109.10 template(java_lang_ArithmeticException, "java/lang/ArithmeticException") \
110.1 --- a/src/share/vm/code/compressedStream.cpp Mon Dec 27 09:30:20 2010 -0500 110.2 +++ b/src/share/vm/code/compressedStream.cpp Mon Dec 27 09:56:29 2010 -0500 110.3 @@ -197,6 +197,7 @@ 110.4 // compiler stack overflow is fixed. 110.5 #if _MSC_VER >=1400 && !defined(_WIN64) 110.6 #pragma optimize("", off) 110.7 +#pragma warning(disable: 4748) 110.8 #endif 110.9 110.10 // generator for an "interesting" set of critical values 110.11 @@ -276,6 +277,7 @@ 110.12 } 110.13 110.14 #if _MSC_VER >=1400 && !defined(_WIN64) 110.15 +#pragma warning(default: 4748) 110.16 #pragma optimize("", on) 110.17 #endif 110.18
111.1 --- a/src/share/vm/code/relocInfo.cpp Mon Dec 27 09:30:20 2010 -0500 111.2 +++ b/src/share/vm/code/relocInfo.cpp Mon Dec 27 09:56:29 2010 -0500 111.3 @@ -1093,8 +1093,8 @@ 111.4 tty->print_cr("(no relocs)"); 111.5 return; 111.6 } 111.7 - tty->print("relocInfo@" INTPTR_FORMAT " [type=%d(%s) addr=" INTPTR_FORMAT, 111.8 - _current, type(), reloc_type_string((relocInfo::relocType) type()), _addr); 111.9 + tty->print("relocInfo@" INTPTR_FORMAT " [type=%d(%s) addr=" INTPTR_FORMAT " offset=%d", 111.10 + _current, type(), reloc_type_string((relocInfo::relocType) type()), _addr, _current->addr_offset()); 111.11 if (current()->format() != 0) 111.12 tty->print(" format=%d", current()->format()); 111.13 if (datalen() == 1) {
112.1 --- a/src/share/vm/compiler/disassembler.cpp Mon Dec 27 09:30:20 2010 -0500 112.2 +++ b/src/share/vm/compiler/disassembler.cpp Mon Dec 27 09:56:29 2010 -0500 112.3 @@ -466,5 +466,18 @@ 112.4 env.set_total_ticks(total_bucket_count); 112.5 } 112.6 112.7 + // Print constant table. 112.8 + if (nm->consts_size() > 0) { 112.9 + nm->print_nmethod_labels(env.output(), nm->consts_begin()); 112.10 + int offset = 0; 112.11 + for (address p = nm->consts_begin(); p < nm->consts_end(); p += 4, offset += 4) { 112.12 + if ((offset % 8) == 0) { 112.13 + env.output()->print_cr(" " INTPTR_FORMAT " (offset: %4d): " PTR32_FORMAT " " PTR64_FORMAT, (intptr_t) p, offset, *((int32_t*) p), *((int64_t*) p)); 112.14 + } else { 112.15 + env.output()->print_cr(" " INTPTR_FORMAT " (offset: %4d): " PTR32_FORMAT, (intptr_t) p, offset, *((int32_t*) p)); 112.16 + } 112.17 + } 112.18 + } 112.19 + 112.20 env.decode_instructions(p, end); 112.21 }
113.1 --- a/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Mon Dec 27 09:30:20 2010 -0500 113.2 +++ b/src/share/vm/gc_implementation/g1/concurrentMarkThread.cpp Mon Dec 27 09:56:29 2010 -0500 113.3 @@ -277,7 +277,9 @@ 113.4 // completed. This will also notify the FullGCCount_lock in case a 113.5 // Java thread is waiting for a full GC to happen (e.g., it 113.6 // called System.gc() with +ExplicitGCInvokesConcurrent). 113.7 - g1->increment_full_collections_completed(true /* outer */); 113.8 + _sts.join(); 113.9 + g1->increment_full_collections_completed(true /* concurrent */); 113.10 + _sts.leave(); 113.11 } 113.12 assert(_should_terminate, "just checking"); 113.13
114.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Dec 27 09:30:20 2010 -0500 114.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Mon Dec 27 09:56:29 2010 -0500 114.3 @@ -1389,7 +1389,7 @@ 114.4 } 114.5 114.6 // Update the number of full collections that have been completed. 114.7 - increment_full_collections_completed(false /* outer */); 114.8 + increment_full_collections_completed(false /* concurrent */); 114.9 114.10 if (PrintHeapAtGC) { 114.11 Universe::print_heap_after_gc(); 114.12 @@ -2176,9 +2176,14 @@ 114.13 (cause == GCCause::_java_lang_system_gc && ExplicitGCInvokesConcurrent)); 114.14 } 114.15 114.16 -void G1CollectedHeap::increment_full_collections_completed(bool outer) { 114.17 +void G1CollectedHeap::increment_full_collections_completed(bool concurrent) { 114.18 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag); 114.19 114.20 + // We assume that if concurrent == true, then the caller is a 114.21 + // concurrent thread that was joined the Suspendible Thread 114.22 + // Set. If there's ever a cheap way to check this, we should add an 114.23 + // assert here. 114.24 + 114.25 // We have already incremented _total_full_collections at the start 114.26 // of the GC, so total_full_collections() represents how many full 114.27 // collections have been started. 114.28 @@ -2192,17 +2197,18 @@ 114.29 // behind the number of full collections started. 114.30 114.31 // This is the case for the inner caller, i.e. a Full GC. 114.32 - assert(outer || 114.33 + assert(concurrent || 114.34 (full_collections_started == _full_collections_completed + 1) || 114.35 (full_collections_started == _full_collections_completed + 2), 114.36 - err_msg("for inner caller: full_collections_started = %u " 114.37 + err_msg("for inner caller (Full GC): full_collections_started = %u " 114.38 "is inconsistent with _full_collections_completed = %u", 114.39 full_collections_started, _full_collections_completed)); 114.40 114.41 // This is the case for the outer caller, i.e. the concurrent cycle. 114.42 - assert(!outer || 114.43 + assert(!concurrent || 114.44 (full_collections_started == _full_collections_completed + 1), 114.45 - err_msg("for outer caller: full_collections_started = %u " 114.46 + err_msg("for outer caller (concurrent cycle): " 114.47 + "full_collections_started = %u " 114.48 "is inconsistent with _full_collections_completed = %u", 114.49 full_collections_started, _full_collections_completed)); 114.50 114.51 @@ -2212,7 +2218,7 @@ 114.52 // we wake up any waiters (especially when ExplicitInvokesConcurrent 114.53 // is set) so that if a waiter requests another System.gc() it doesn't 114.54 // incorrectly see that a marking cyle is still in progress. 114.55 - if (outer) { 114.56 + if (concurrent) { 114.57 _cmThread->clear_in_progress(); 114.58 } 114.59
115.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Dec 27 09:30:20 2010 -0500 115.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.hpp Mon Dec 27 09:56:29 2010 -0500 115.3 @@ -643,16 +643,16 @@ 115.4 // can happen in a nested fashion, i.e., we start a concurrent 115.5 // cycle, a Full GC happens half-way through it which ends first, 115.6 // and then the cycle notices that a Full GC happened and ends 115.7 - // too. The outer parameter is a boolean to help us do a bit tighter 115.8 - // consistency checking in the method. If outer is false, the caller 115.9 - // is the inner caller in the nesting (i.e., the Full GC). If outer 115.10 - // is true, the caller is the outer caller in this nesting (i.e., 115.11 - // the concurrent cycle). Further nesting is not currently 115.12 - // supported. The end of the this call also notifies the 115.13 - // FullGCCount_lock in case a Java thread is waiting for a full GC 115.14 - // to happen (e.g., it called System.gc() with 115.15 + // too. The concurrent parameter is a boolean to help us do a bit 115.16 + // tighter consistency checking in the method. If concurrent is 115.17 + // false, the caller is the inner caller in the nesting (i.e., the 115.18 + // Full GC). If concurrent is true, the caller is the outer caller 115.19 + // in this nesting (i.e., the concurrent cycle). Further nesting is 115.20 + // not currently supported. The end of the this call also notifies 115.21 + // the FullGCCount_lock in case a Java thread is waiting for a full 115.22 + // GC to happen (e.g., it called System.gc() with 115.23 // +ExplicitGCInvokesConcurrent). 115.24 - void increment_full_collections_completed(bool outer); 115.25 + void increment_full_collections_completed(bool concurrent); 115.26 115.27 unsigned int full_collections_completed() { 115.28 return _full_collections_completed;
116.1 --- a/src/share/vm/interpreter/bytecodeTracer.cpp Mon Dec 27 09:30:20 2010 -0500 116.2 +++ b/src/share/vm/interpreter/bytecodeTracer.cpp Mon Dec 27 09:56:29 2010 -0500 116.3 @@ -346,6 +346,7 @@ 116.4 break; 116.5 case JVM_CONSTANT_NameAndType: 116.6 case JVM_CONSTANT_InvokeDynamic: 116.7 + case JVM_CONSTANT_InvokeDynamicTrans: 116.8 has_klass = false; 116.9 break; 116.10 default:
117.1 --- a/src/share/vm/interpreter/rewriter.cpp Mon Dec 27 09:30:20 2010 -0500 117.2 +++ b/src/share/vm/interpreter/rewriter.cpp Mon Dec 27 09:56:29 2010 -0500 117.3 @@ -52,6 +52,7 @@ 117.4 case JVM_CONSTANT_MethodHandle : // fall through 117.5 case JVM_CONSTANT_MethodType : // fall through 117.6 case JVM_CONSTANT_InvokeDynamic : // fall through 117.7 + case JVM_CONSTANT_InvokeDynamicTrans: // fall through 117.8 add_cp_cache_entry(i); 117.9 break; 117.10 } 117.11 @@ -61,6 +62,7 @@ 117.12 "all cp cache indexes fit in a u2"); 117.13 117.14 _have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0); 117.15 + _have_invoke_dynamic |= ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamicTrans)) != 0); 117.16 } 117.17 117.18 117.19 @@ -74,7 +76,7 @@ 117.20 oopFactory::new_constantPoolCache(length, methodOopDesc::IsUnsafeConc, CHECK); 117.21 cache->initialize(_cp_cache_map); 117.22 117.23 - // Don't bother to the next pass if there is no JVM_CONSTANT_InvokeDynamic. 117.24 + // Don't bother with the next pass if there is no JVM_CONSTANT_InvokeDynamic. 117.25 if (_have_invoke_dynamic) { 117.26 for (int i = 0; i < length; i++) { 117.27 int pool_index = cp_cache_entry_pool_index(i);
118.1 --- a/src/share/vm/memory/allocation.cpp Mon Dec 27 09:30:20 2010 -0500 118.2 +++ b/src/share/vm/memory/allocation.cpp Mon Dec 27 09:56:29 2010 -0500 118.3 @@ -73,7 +73,7 @@ 118.4 void ResourceObj::operator delete(void* p) { 118.5 assert(((ResourceObj *)p)->allocated_on_C_heap(), 118.6 "delete only allowed for C_HEAP objects"); 118.7 - DEBUG_ONLY(((ResourceObj *)p)->_allocation = (uintptr_t)badHeapOopVal;) 118.8 + DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;) 118.9 FreeHeap(p); 118.10 } 118.11 118.12 @@ -83,43 +83,73 @@ 118.13 uintptr_t allocation = (uintptr_t)res; 118.14 assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least"); 118.15 assert(type <= allocation_mask, "incorrect allocation type"); 118.16 - ((ResourceObj *)res)->_allocation = ~(allocation + type); 118.17 + ResourceObj* resobj = (ResourceObj *)res; 118.18 + resobj->_allocation_t[0] = ~(allocation + type); 118.19 + if (type != STACK_OR_EMBEDDED) { 118.20 + // Called from operator new() and CollectionSetChooser(), 118.21 + // set verification value. 118.22 + resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type; 118.23 + } 118.24 } 118.25 118.26 ResourceObj::allocation_type ResourceObj::get_allocation_type() const { 118.27 - assert(~(_allocation | allocation_mask) == (uintptr_t)this, "lost resource object"); 118.28 - return (allocation_type)((~_allocation) & allocation_mask); 118.29 + assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object"); 118.30 + return (allocation_type)((~_allocation_t[0]) & allocation_mask); 118.31 +} 118.32 + 118.33 +bool ResourceObj::is_type_set() const { 118.34 + allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask); 118.35 + return get_allocation_type() == type && 118.36 + (_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]); 118.37 } 118.38 118.39 ResourceObj::ResourceObj() { // default constructor 118.40 - if (~(_allocation | allocation_mask) != (uintptr_t)this) { 118.41 + if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) { 118.42 + // Operator new() is not called for allocations 118.43 + // on stack and for embedded objects. 118.44 set_allocation_type((address)this, STACK_OR_EMBEDDED); 118.45 - } else if (allocated_on_stack()) { 118.46 - // For some reason we got a value which looks like an allocation on stack. 118.47 - // Pass if it is really allocated on stack. 118.48 - assert(Thread::current()->on_local_stack((address)this),"should be on stack"); 118.49 + } else if (allocated_on_stack()) { // STACK_OR_EMBEDDED 118.50 + // For some reason we got a value which resembles 118.51 + // an embedded or stack object (operator new() does not 118.52 + // set such type). Keep it since it is valid value 118.53 + // (even if it was garbage). 118.54 + // Ignore garbage in other fields. 118.55 + } else if (is_type_set()) { 118.56 + // Operator new() was called and type was set. 118.57 + assert(!allocated_on_stack(), 118.58 + err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 118.59 + this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); 118.60 } else { 118.61 - assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena(), 118.62 - "allocation_type should be set by operator new()"); 118.63 + // Operator new() was not called. 118.64 + // Assume that it is embedded or stack object. 118.65 + set_allocation_type((address)this, STACK_OR_EMBEDDED); 118.66 } 118.67 + _allocation_t[1] = 0; // Zap verification value 118.68 } 118.69 118.70 ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor 118.71 // Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream. 118.72 + // Note: garbage may resembles valid value. 118.73 + assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(), 118.74 + err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 118.75 + this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); 118.76 set_allocation_type((address)this, STACK_OR_EMBEDDED); 118.77 + _allocation_t[1] = 0; // Zap verification value 118.78 } 118.79 118.80 ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment 118.81 // Used in InlineTree::ok_to_inline() for WarmCallInfo. 118.82 - assert(allocated_on_stack(), "copy only into local"); 118.83 - // Keep current _allocation value; 118.84 + assert(allocated_on_stack(), 118.85 + err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")", 118.86 + this, get_allocation_type(), _allocation_t[0], _allocation_t[1])); 118.87 + // Keep current _allocation_t value; 118.88 return *this; 118.89 } 118.90 118.91 ResourceObj::~ResourceObj() { 118.92 // allocated_on_C_heap() also checks that encoded (in _allocation) address == this. 118.93 - if (!allocated_on_C_heap()) { // ResourceObj::delete() zaps _allocation for C_heap. 118.94 - _allocation = (uintptr_t)badHeapOopVal; // zap type 118.95 + if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap. 118.96 + _allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type 118.97 } 118.98 } 118.99 #endif // ASSERT
119.1 --- a/src/share/vm/memory/allocation.hpp Mon Dec 27 09:30:20 2010 -0500 119.2 +++ b/src/share/vm/memory/allocation.hpp Mon Dec 27 09:56:29 2010 -0500 119.3 @@ -337,7 +337,9 @@ 119.4 // When this object is allocated on stack the new() operator is not 119.5 // called but garbage on stack may look like a valid allocation_type. 119.6 // Store negated 'this' pointer when new() is called to distinguish cases. 119.7 - uintptr_t _allocation; 119.8 + // Use second array's element for verification value to distinguish garbage. 119.9 + uintptr_t _allocation_t[2]; 119.10 + bool is_type_set() const; 119.11 public: 119.12 allocation_type get_allocation_type() const; 119.13 bool allocated_on_stack() const { return get_allocation_type() == STACK_OR_EMBEDDED; }
120.1 --- a/src/share/vm/oops/constantPoolKlass.cpp Mon Dec 27 09:30:20 2010 -0500 120.2 +++ b/src/share/vm/oops/constantPoolKlass.cpp Mon Dec 27 09:56:29 2010 -0500 120.3 @@ -399,6 +399,7 @@ 120.4 case JVM_CONSTANT_MethodType : 120.5 st->print("signature_index=%d", cp->method_type_index_at(index)); 120.6 break; 120.7 + case JVM_CONSTANT_InvokeDynamicTrans : 120.8 case JVM_CONSTANT_InvokeDynamic : 120.9 { 120.10 st->print("bootstrap_method_index=%d", cp->invoke_dynamic_bootstrap_method_ref_index_at(index));
121.1 --- a/src/share/vm/oops/constantPoolOop.cpp Mon Dec 27 09:30:20 2010 -0500 121.2 +++ b/src/share/vm/oops/constantPoolOop.cpp Mon Dec 27 09:56:29 2010 -0500 121.3 @@ -915,7 +915,8 @@ 121.4 { 121.5 int k1 = method_type_index_at(index1); 121.6 int k2 = cp2->method_type_index_at(index2); 121.7 - if (k1 == k2) { 121.8 + bool match = compare_entry_to(k1, cp2, k2, CHECK_false); 121.9 + if (match) { 121.10 return true; 121.11 } 121.12 } break; 121.13 @@ -927,28 +928,33 @@ 121.14 if (k1 == k2) { 121.15 int i1 = method_handle_index_at(index1); 121.16 int i2 = cp2->method_handle_index_at(index2); 121.17 - if (i1 == i2) { 121.18 + bool match = compare_entry_to(i1, cp2, i2, CHECK_false); 121.19 + if (match) { 121.20 return true; 121.21 } 121.22 } 121.23 } break; 121.24 121.25 case JVM_CONSTANT_InvokeDynamic: 121.26 + case JVM_CONSTANT_InvokeDynamicTrans: 121.27 { 121.28 - int op_count = multi_operand_count_at(index1); 121.29 - if (op_count == cp2->multi_operand_count_at(index2)) { 121.30 - bool all_equal = true; 121.31 - for (int op_i = 0; op_i < op_count; op_i++) { 121.32 - int k1 = multi_operand_ref_at(index1, op_i); 121.33 - int k2 = cp2->multi_operand_ref_at(index2, op_i); 121.34 - if (k1 != k2) { 121.35 - all_equal = false; 121.36 - break; 121.37 - } 121.38 + int k1 = invoke_dynamic_bootstrap_method_ref_index_at(index1); 121.39 + int k2 = cp2->invoke_dynamic_bootstrap_method_ref_index_at(index2); 121.40 + bool match = compare_entry_to(k1, cp2, k2, CHECK_false); 121.41 + if (!match) return false; 121.42 + k1 = invoke_dynamic_name_and_type_ref_index_at(index1); 121.43 + k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2); 121.44 + match = compare_entry_to(k1, cp2, k2, CHECK_false); 121.45 + if (!match) return false; 121.46 + int argc = invoke_dynamic_argument_count_at(index1); 121.47 + if (argc == cp2->invoke_dynamic_argument_count_at(index2)) { 121.48 + for (int j = 0; j < argc; j++) { 121.49 + k1 = invoke_dynamic_argument_index_at(index1, j); 121.50 + k2 = cp2->invoke_dynamic_argument_index_at(index2, j); 121.51 + match = compare_entry_to(k1, cp2, k2, CHECK_false); 121.52 + if (!match) return false; 121.53 } 121.54 - if (all_equal) { 121.55 - return true; // got through loop; all elements equal 121.56 - } 121.57 + return true; // got through loop; all elements equal 121.58 } 121.59 } break; 121.60 121.61 @@ -984,44 +990,18 @@ 121.62 } // end compare_entry_to() 121.63 121.64 121.65 -// Grow this->operands() to the indicated length, unless it is already at least that long. 121.66 -void constantPoolOopDesc::multi_operand_buffer_grow(int min_length, TRAPS) { 121.67 - int old_length = multi_operand_buffer_fill_pointer(); 121.68 - if (old_length >= min_length) return; 121.69 - int new_length = min_length; 121.70 - assert(new_length > _multi_operand_buffer_fill_pointer_offset, ""); 121.71 - typeArrayHandle new_operands = oopFactory::new_permanent_intArray(new_length, CHECK); 121.72 - if (operands() == NULL) { 121.73 - new_operands->int_at_put(_multi_operand_buffer_fill_pointer_offset, old_length); 121.74 - } else { 121.75 - // copy fill pointer and everything else 121.76 - for (int i = 0; i < old_length; i++) { 121.77 - new_operands->int_at_put(i, operands()->int_at(i)); 121.78 - } 121.79 - } 121.80 - set_operands(new_operands()); 121.81 -} 121.82 - 121.83 - 121.84 // Copy this constant pool's entries at start_i to end_i (inclusive) 121.85 // to the constant pool to_cp's entries starting at to_i. A total of 121.86 // (end_i - start_i) + 1 entries are copied. 121.87 -void constantPoolOopDesc::copy_cp_to(int start_i, int end_i, 121.88 +void constantPoolOopDesc::copy_cp_to_impl(constantPoolHandle from_cp, int start_i, int end_i, 121.89 constantPoolHandle to_cp, int to_i, TRAPS) { 121.90 121.91 int dest_i = to_i; // leave original alone for debug purposes 121.92 121.93 - if (operands() != NULL) { 121.94 - // pre-grow the target CP's operand buffer 121.95 - int nops = this->multi_operand_buffer_fill_pointer(); 121.96 - nops += to_cp->multi_operand_buffer_fill_pointer(); 121.97 - to_cp->multi_operand_buffer_grow(nops, CHECK); 121.98 - } 121.99 + for (int src_i = start_i; src_i <= end_i; /* see loop bottom */ ) { 121.100 + copy_entry_to(from_cp, src_i, to_cp, dest_i, CHECK); 121.101 121.102 - for (int src_i = start_i; src_i <= end_i; /* see loop bottom */ ) { 121.103 - copy_entry_to(src_i, to_cp, dest_i, CHECK); 121.104 - 121.105 - switch (tag_at(src_i).value()) { 121.106 + switch (from_cp->tag_at(src_i).value()) { 121.107 case JVM_CONSTANT_Double: 121.108 case JVM_CONSTANT_Long: 121.109 // double and long take two constant pool entries 121.110 @@ -1036,30 +1016,81 @@ 121.111 break; 121.112 } 121.113 } 121.114 + 121.115 + int from_oplen = operand_array_length(from_cp->operands()); 121.116 + int old_oplen = operand_array_length(to_cp->operands()); 121.117 + if (from_oplen != 0) { 121.118 + // append my operands to the target's operands array 121.119 + if (old_oplen == 0) { 121.120 + to_cp->set_operands(from_cp->operands()); // reuse; do not merge 121.121 + } else { 121.122 + int old_len = to_cp->operands()->length(); 121.123 + int from_len = from_cp->operands()->length(); 121.124 + int old_off = old_oplen * sizeof(u2); 121.125 + int from_off = from_oplen * sizeof(u2); 121.126 + typeArrayHandle new_operands = oopFactory::new_permanent_shortArray(old_len + from_len, CHECK); 121.127 + int fillp = 0, len = 0; 121.128 + // first part of dest 121.129 + Copy::conjoint_memory_atomic(to_cp->operands()->short_at_addr(0), 121.130 + new_operands->short_at_addr(fillp), 121.131 + (len = old_off) * sizeof(u2)); 121.132 + fillp += len; 121.133 + // first part of src 121.134 + Copy::conjoint_memory_atomic(to_cp->operands()->short_at_addr(0), 121.135 + new_operands->short_at_addr(fillp), 121.136 + (len = from_off) * sizeof(u2)); 121.137 + fillp += len; 121.138 + // second part of dest 121.139 + Copy::conjoint_memory_atomic(to_cp->operands()->short_at_addr(old_off), 121.140 + new_operands->short_at_addr(fillp), 121.141 + (len = old_len - old_off) * sizeof(u2)); 121.142 + fillp += len; 121.143 + // second part of src 121.144 + Copy::conjoint_memory_atomic(to_cp->operands()->short_at_addr(from_off), 121.145 + new_operands->short_at_addr(fillp), 121.146 + (len = from_len - from_off) * sizeof(u2)); 121.147 + fillp += len; 121.148 + assert(fillp == new_operands->length(), ""); 121.149 + 121.150 + // Adjust indexes in the first part of the copied operands array. 121.151 + for (int j = 0; j < from_oplen; j++) { 121.152 + int offset = operand_offset_at(new_operands(), old_oplen + j); 121.153 + assert(offset == operand_offset_at(from_cp->operands(), j), "correct copy"); 121.154 + offset += old_len; // every new tuple is preceded by old_len extra u2's 121.155 + operand_offset_at_put(new_operands(), old_oplen + j, offset); 121.156 + } 121.157 + 121.158 + // replace target operands array with combined array 121.159 + to_cp->set_operands(new_operands()); 121.160 + } 121.161 + } 121.162 + 121.163 } // end copy_cp_to() 121.164 121.165 121.166 // Copy this constant pool's entry at from_i to the constant pool 121.167 // to_cp's entry at to_i. 121.168 -void constantPoolOopDesc::copy_entry_to(int from_i, constantPoolHandle to_cp, 121.169 - int to_i, TRAPS) { 121.170 +void constantPoolOopDesc::copy_entry_to(constantPoolHandle from_cp, int from_i, 121.171 + constantPoolHandle to_cp, int to_i, 121.172 + TRAPS) { 121.173 121.174 - switch (tag_at(from_i).value()) { 121.175 + int tag = from_cp->tag_at(from_i).value(); 121.176 + switch (tag) { 121.177 case JVM_CONSTANT_Class: 121.178 { 121.179 - klassOop k = klass_at(from_i, CHECK); 121.180 + klassOop k = from_cp->klass_at(from_i, CHECK); 121.181 to_cp->klass_at_put(to_i, k); 121.182 } break; 121.183 121.184 case JVM_CONSTANT_ClassIndex: 121.185 { 121.186 - jint ki = klass_index_at(from_i); 121.187 + jint ki = from_cp->klass_index_at(from_i); 121.188 to_cp->klass_index_at_put(to_i, ki); 121.189 } break; 121.190 121.191 case JVM_CONSTANT_Double: 121.192 { 121.193 - jdouble d = double_at(from_i); 121.194 + jdouble d = from_cp->double_at(from_i); 121.195 to_cp->double_at_put(to_i, d); 121.196 // double takes two constant pool entries so init second entry's tag 121.197 to_cp->tag_at_put(to_i + 1, JVM_CONSTANT_Invalid); 121.198 @@ -1067,33 +1098,33 @@ 121.199 121.200 case JVM_CONSTANT_Fieldref: 121.201 { 121.202 - int class_index = uncached_klass_ref_index_at(from_i); 121.203 - int name_and_type_index = uncached_name_and_type_ref_index_at(from_i); 121.204 + int class_index = from_cp->uncached_klass_ref_index_at(from_i); 121.205 + int name_and_type_index = from_cp->uncached_name_and_type_ref_index_at(from_i); 121.206 to_cp->field_at_put(to_i, class_index, name_and_type_index); 121.207 } break; 121.208 121.209 case JVM_CONSTANT_Float: 121.210 { 121.211 - jfloat f = float_at(from_i); 121.212 + jfloat f = from_cp->float_at(from_i); 121.213 to_cp->float_at_put(to_i, f); 121.214 } break; 121.215 121.216 case JVM_CONSTANT_Integer: 121.217 { 121.218 - jint i = int_at(from_i); 121.219 + jint i = from_cp->int_at(from_i); 121.220 to_cp->int_at_put(to_i, i); 121.221 } break; 121.222 121.223 case JVM_CONSTANT_InterfaceMethodref: 121.224 { 121.225 - int class_index = uncached_klass_ref_index_at(from_i); 121.226 - int name_and_type_index = uncached_name_and_type_ref_index_at(from_i); 121.227 + int class_index = from_cp->uncached_klass_ref_index_at(from_i); 121.228 + int name_and_type_index = from_cp->uncached_name_and_type_ref_index_at(from_i); 121.229 to_cp->interface_method_at_put(to_i, class_index, name_and_type_index); 121.230 } break; 121.231 121.232 case JVM_CONSTANT_Long: 121.233 { 121.234 - jlong l = long_at(from_i); 121.235 + jlong l = from_cp->long_at(from_i); 121.236 to_cp->long_at_put(to_i, l); 121.237 // long takes two constant pool entries so init second entry's tag 121.238 to_cp->tag_at_put(to_i + 1, JVM_CONSTANT_Invalid); 121.239 @@ -1101,39 +1132,39 @@ 121.240 121.241 case JVM_CONSTANT_Methodref: 121.242 { 121.243 - int class_index = uncached_klass_ref_index_at(from_i); 121.244 - int name_and_type_index = uncached_name_and_type_ref_index_at(from_i); 121.245 + int class_index = from_cp->uncached_klass_ref_index_at(from_i); 121.246 + int name_and_type_index = from_cp->uncached_name_and_type_ref_index_at(from_i); 121.247 to_cp->method_at_put(to_i, class_index, name_and_type_index); 121.248 } break; 121.249 121.250 case JVM_CONSTANT_NameAndType: 121.251 { 121.252 - int name_ref_index = name_ref_index_at(from_i); 121.253 - int signature_ref_index = signature_ref_index_at(from_i); 121.254 + int name_ref_index = from_cp->name_ref_index_at(from_i); 121.255 + int signature_ref_index = from_cp->signature_ref_index_at(from_i); 121.256 to_cp->name_and_type_at_put(to_i, name_ref_index, signature_ref_index); 121.257 } break; 121.258 121.259 case JVM_CONSTANT_String: 121.260 { 121.261 - oop s = string_at(from_i, CHECK); 121.262 + oop s = from_cp->string_at(from_i, CHECK); 121.263 to_cp->string_at_put(to_i, s); 121.264 } break; 121.265 121.266 case JVM_CONSTANT_StringIndex: 121.267 { 121.268 - jint si = string_index_at(from_i); 121.269 + jint si = from_cp->string_index_at(from_i); 121.270 to_cp->string_index_at_put(to_i, si); 121.271 } break; 121.272 121.273 case JVM_CONSTANT_UnresolvedClass: 121.274 { 121.275 - symbolOop k = unresolved_klass_at(from_i); 121.276 + symbolOop k = from_cp->unresolved_klass_at(from_i); 121.277 to_cp->unresolved_klass_at_put(to_i, k); 121.278 } break; 121.279 121.280 case JVM_CONSTANT_UnresolvedClassInError: 121.281 { 121.282 - symbolOop k = unresolved_klass_at(from_i); 121.283 + symbolOop k = from_cp->unresolved_klass_at(from_i); 121.284 to_cp->unresolved_klass_at_put(to_i, k); 121.285 to_cp->tag_at_put(to_i, JVM_CONSTANT_UnresolvedClassInError); 121.286 } break; 121.287 @@ -1141,51 +1172,42 @@ 121.288 121.289 case JVM_CONSTANT_UnresolvedString: 121.290 { 121.291 - symbolOop s = unresolved_string_at(from_i); 121.292 + symbolOop s = from_cp->unresolved_string_at(from_i); 121.293 to_cp->unresolved_string_at_put(to_i, s); 121.294 } break; 121.295 121.296 case JVM_CONSTANT_Utf8: 121.297 { 121.298 - symbolOop s = symbol_at(from_i); 121.299 + symbolOop s = from_cp->symbol_at(from_i); 121.300 to_cp->symbol_at_put(to_i, s); 121.301 } break; 121.302 121.303 case JVM_CONSTANT_MethodType: 121.304 { 121.305 - jint k = method_type_index_at(from_i); 121.306 + jint k = from_cp->method_type_index_at(from_i); 121.307 to_cp->method_type_index_at_put(to_i, k); 121.308 } break; 121.309 121.310 case JVM_CONSTANT_MethodHandle: 121.311 { 121.312 - int k1 = method_handle_ref_kind_at(from_i); 121.313 - int k2 = method_handle_index_at(from_i); 121.314 + int k1 = from_cp->method_handle_ref_kind_at(from_i); 121.315 + int k2 = from_cp->method_handle_index_at(from_i); 121.316 to_cp->method_handle_index_at_put(to_i, k1, k2); 121.317 } break; 121.318 121.319 + case JVM_CONSTANT_InvokeDynamicTrans: 121.320 + { 121.321 + int k1 = from_cp->invoke_dynamic_bootstrap_method_ref_index_at(from_i); 121.322 + int k2 = from_cp->invoke_dynamic_name_and_type_ref_index_at(from_i); 121.323 + to_cp->invoke_dynamic_trans_at_put(to_i, k1, k2); 121.324 + } break; 121.325 + 121.326 case JVM_CONSTANT_InvokeDynamic: 121.327 { 121.328 - int op_count = multi_operand_count_at(from_i); 121.329 - int fillp = to_cp->multi_operand_buffer_fill_pointer(); 121.330 - int to_op_base = fillp - _multi_operand_count_offset; // fillp is count offset; get to base 121.331 - to_cp->multi_operand_buffer_grow(to_op_base + op_count, CHECK); 121.332 - to_cp->operands()->int_at_put(fillp++, op_count); 121.333 - assert(fillp == to_op_base + _multi_operand_base_offset, "just wrote count, will now write args"); 121.334 - for (int op_i = 0; op_i < op_count; op_i++) { 121.335 - int op = multi_operand_ref_at(from_i, op_i); 121.336 - to_cp->operands()->int_at_put(fillp++, op); 121.337 - } 121.338 - assert(fillp <= to_cp->operands()->length(), "oob"); 121.339 - to_cp->set_multi_operand_buffer_fill_pointer(fillp); 121.340 - to_cp->invoke_dynamic_at_put(to_i, to_op_base, op_count); 121.341 -#ifdef ASSERT 121.342 - int k1 = invoke_dynamic_bootstrap_method_ref_index_at(from_i); 121.343 - int k2 = invoke_dynamic_name_and_type_ref_index_at(from_i); 121.344 - int k3 = invoke_dynamic_argument_count_at(from_i); 121.345 - assert(to_cp->check_invoke_dynamic_at(to_i, k1, k2, k3), 121.346 - "indy structure is OK"); 121.347 -#endif //ASSERT 121.348 + int k1 = from_cp->invoke_dynamic_bootstrap_specifier_index(from_i); 121.349 + int k2 = from_cp->invoke_dynamic_name_and_type_ref_index_at(from_i); 121.350 + k1 += operand_array_length(to_cp->operands()); // to_cp might already have operands 121.351 + to_cp->invoke_dynamic_at_put(to_i, k1, k2); 121.352 } break; 121.353 121.354 // Invalid is used as the tag for the second constant pool entry 121.355 @@ -1195,7 +1217,6 @@ 121.356 121.357 default: 121.358 { 121.359 - jbyte bad_value = tag_at(from_i).value(); // leave a breadcrumb 121.360 ShouldNotReachHere(); 121.361 } break; 121.362 } 121.363 @@ -1406,8 +1427,9 @@ 121.364 return 5; 121.365 121.366 case JVM_CONSTANT_InvokeDynamic: 121.367 - // u1 tag, u2 bsm, u2 nt, u2 argc, u2 argv[argc] 121.368 - return 7 + 2 * invoke_dynamic_argument_count_at(idx); 121.369 + case JVM_CONSTANT_InvokeDynamicTrans: 121.370 + // u1 tag, u2 bsm, u2 nt 121.371 + return 5; 121.372 121.373 case JVM_CONSTANT_Long: 121.374 case JVM_CONSTANT_Double: 121.375 @@ -1620,19 +1642,15 @@ 121.376 DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1)); 121.377 break; 121.378 } 121.379 + case JVM_CONSTANT_InvokeDynamicTrans: 121.380 case JVM_CONSTANT_InvokeDynamic: { 121.381 - *bytes = JVM_CONSTANT_InvokeDynamic; 121.382 - idx1 = invoke_dynamic_bootstrap_method_ref_index_at(idx); 121.383 - idx2 = invoke_dynamic_name_and_type_ref_index_at(idx); 121.384 - int argc = invoke_dynamic_argument_count_at(idx); 121.385 + *bytes = tag; 121.386 + idx1 = extract_low_short_from_int(*int_at_addr(idx)); 121.387 + idx2 = extract_high_short_from_int(*int_at_addr(idx)); 121.388 + assert(idx2 == invoke_dynamic_name_and_type_ref_index_at(idx), "correct half of u4"); 121.389 Bytes::put_Java_u2((address) (bytes+1), idx1); 121.390 Bytes::put_Java_u2((address) (bytes+3), idx2); 121.391 - Bytes::put_Java_u2((address) (bytes+5), argc); 121.392 - for (int arg_i = 0; arg_i < argc; arg_i++) { 121.393 - int arg = invoke_dynamic_argument_index_at(idx, arg_i); 121.394 - Bytes::put_Java_u2((address) (bytes+7+2*arg_i), arg); 121.395 - } 121.396 - DBG(printf("JVM_CONSTANT_InvokeDynamic: %hd %hd [%d]", idx1, idx2, argc)); 121.397 + DBG(printf("JVM_CONSTANT_InvokeDynamic: %hd %hd", idx1, idx2)); 121.398 break; 121.399 } 121.400 }
122.1 --- a/src/share/vm/oops/constantPoolOop.hpp Mon Dec 27 09:30:20 2010 -0500 122.2 +++ b/src/share/vm/oops/constantPoolOop.hpp Mon Dec 27 09:56:29 2010 -0500 122.3 @@ -179,28 +179,16 @@ 122.4 *int_at_addr(which) = ref_index; 122.5 } 122.6 122.7 - void invoke_dynamic_at_put(int which, int operand_base, int operand_count) { 122.8 + void invoke_dynamic_at_put(int which, int bootstrap_specifier_index, int name_and_type_index) { 122.9 tag_at_put(which, JVM_CONSTANT_InvokeDynamic); 122.10 - *int_at_addr(which) = operand_base; // this is the real information 122.11 + *int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_specifier_index; 122.12 } 122.13 -#ifdef ASSERT 122.14 - bool check_invoke_dynamic_at(int which, 122.15 - int bootstrap_method_index, 122.16 - int name_and_type_index, 122.17 - int argument_count) { 122.18 - assert(invoke_dynamic_bootstrap_method_ref_index_at(which) == bootstrap_method_index, 122.19 - "already stored by caller"); 122.20 - assert(invoke_dynamic_name_and_type_ref_index_at(which) == name_and_type_index, 122.21 - "already stored by caller"); 122.22 - assert(invoke_dynamic_argument_count_at(which) == argument_count, 122.23 - "consistent argument count"); 122.24 - if (argument_count != 0) { 122.25 - invoke_dynamic_argument_index_at(which, 0); 122.26 - invoke_dynamic_argument_index_at(which, argument_count - 1); 122.27 - } 122.28 - return true; 122.29 + 122.30 + void invoke_dynamic_trans_at_put(int which, int bootstrap_method_index, int name_and_type_index) { 122.31 + tag_at_put(which, JVM_CONSTANT_InvokeDynamicTrans); 122.32 + *int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_method_index; 122.33 + assert(AllowTransitionalJSR292, ""); 122.34 } 122.35 -#endif //ASSERT 122.36 122.37 // Temporary until actual use 122.38 void unresolved_string_at_put(int which, symbolOop s) { 122.39 @@ -443,75 +431,90 @@ 122.40 return symbol_at(sym); 122.41 } 122.42 122.43 - private: 122.44 - // some nodes (InvokeDynamic) have a variable number of operands, each a u2 value 122.45 - enum { _multi_operand_count_offset = -1, 122.46 - _multi_operand_base_offset = 0, 122.47 - _multi_operand_buffer_fill_pointer_offset = 0 // shared at front of operands array 122.48 - }; 122.49 - int multi_operand_buffer_length() { 122.50 - return operands() == NULL ? 0 : operands()->length(); 122.51 + int invoke_dynamic_name_and_type_ref_index_at(int which) { 122.52 + assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool"); 122.53 + return extract_high_short_from_int(*int_at_addr(which)); 122.54 } 122.55 - int multi_operand_buffer_fill_pointer() { 122.56 - return operands() == NULL 122.57 - ? _multi_operand_buffer_fill_pointer_offset + 1 122.58 - : operands()->int_at(_multi_operand_buffer_fill_pointer_offset); 122.59 + int invoke_dynamic_bootstrap_specifier_index(int which) { 122.60 + assert(tag_at(which).value() == JVM_CONSTANT_InvokeDynamic, "Corrupted constant pool"); 122.61 + return extract_low_short_from_int(*int_at_addr(which)); 122.62 } 122.63 - void multi_operand_buffer_grow(int min_length, TRAPS); 122.64 - void set_multi_operand_buffer_fill_pointer(int fillp) { 122.65 - assert(operands() != NULL, ""); 122.66 - operands()->int_at_put(_multi_operand_buffer_fill_pointer_offset, fillp); 122.67 + int invoke_dynamic_operand_base(int which) { 122.68 + int bootstrap_specifier_index = invoke_dynamic_bootstrap_specifier_index(which); 122.69 + return operand_offset_at(operands(), bootstrap_specifier_index); 122.70 } 122.71 - int multi_operand_base_at(int which) { 122.72 - assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool"); 122.73 - int op_base = *int_at_addr(which); 122.74 - assert(op_base > _multi_operand_buffer_fill_pointer_offset, "Corrupted operand base"); 122.75 - return op_base; 122.76 + // The first part of the operands array consists of an index into the second part. 122.77 + // Extract a 32-bit index value from the first part. 122.78 + static int operand_offset_at(typeArrayOop operands, int bootstrap_specifier_index) { 122.79 + int n = (bootstrap_specifier_index * 2); 122.80 + assert(n >= 0 && n+2 <= operands->length(), "oob"); 122.81 + // The first 32-bit index points to the beginning of the second part 122.82 + // of the operands array. Make sure this index is in the first part. 122.83 + DEBUG_ONLY(int second_part = build_int_from_shorts(operands->short_at(0), 122.84 + operands->short_at(1))); 122.85 + assert(second_part == 0 || n+2 <= second_part, "oob (2)"); 122.86 + int offset = build_int_from_shorts(operands->short_at(n+0), 122.87 + operands->short_at(n+1)); 122.88 + // The offset itself must point into the second part of the array. 122.89 + assert(offset == 0 || offset >= second_part && offset <= operands->length(), "oob (3)"); 122.90 + return offset; 122.91 } 122.92 - int multi_operand_count_at(int which) { 122.93 - int op_base = multi_operand_base_at(which); 122.94 - assert((uint)(op_base + _multi_operand_count_offset) < (uint)operands()->length(), "oob"); 122.95 - int count = operands()->int_at(op_base + _multi_operand_count_offset); 122.96 - return count; 122.97 + static void operand_offset_at_put(typeArrayOop operands, int bootstrap_specifier_index, int offset) { 122.98 + int n = bootstrap_specifier_index * 2; 122.99 + assert(n >= 0 && n+2 <= operands->length(), "oob"); 122.100 + operands->short_at_put(n+0, extract_low_short_from_int(offset)); 122.101 + operands->short_at_put(n+1, extract_high_short_from_int(offset)); 122.102 } 122.103 - int multi_operand_ref_at(int which, int i) { 122.104 - int op_base = multi_operand_base_at(which); 122.105 - assert((uint)i < (uint)multi_operand_count_at(which), "oob"); 122.106 - assert((uint)(op_base + _multi_operand_base_offset + i) < (uint)operands()->length(), "oob"); 122.107 - return operands()->int_at(op_base + _multi_operand_base_offset + i); 122.108 - } 122.109 - void set_multi_operand_ref_at(int which, int i, int ref) { 122.110 - DEBUG_ONLY(multi_operand_ref_at(which, i)); // trigger asserts 122.111 - int op_base = multi_operand_base_at(which); 122.112 - operands()->int_at_put(op_base + _multi_operand_base_offset + i, ref); 122.113 + static int operand_array_length(typeArrayOop operands) { 122.114 + if (operands == NULL || operands->length() == 0) return 0; 122.115 + int second_part = operand_offset_at(operands, 0); 122.116 + return (second_part / 2); 122.117 } 122.118 122.119 - public: 122.120 - // layout of InvokeDynamic: 122.121 +#ifdef ASSERT 122.122 + // operand tuples fit together exactly, end to end 122.123 + static int operand_limit_at(typeArrayOop operands, int bootstrap_specifier_index) { 122.124 + int nextidx = bootstrap_specifier_index + 1; 122.125 + if (nextidx == operand_array_length(operands)) 122.126 + return operands->length(); 122.127 + else 122.128 + return operand_offset_at(operands, nextidx); 122.129 + } 122.130 + int invoke_dynamic_operand_limit(int which) { 122.131 + int bootstrap_specifier_index = invoke_dynamic_bootstrap_specifier_index(which); 122.132 + return operand_limit_at(operands(), bootstrap_specifier_index); 122.133 + } 122.134 +#endif //ASSERT 122.135 + 122.136 + // layout of InvokeDynamic bootstrap method specifier (in second part of operands array): 122.137 enum { 122.138 _indy_bsm_offset = 0, // CONSTANT_MethodHandle bsm 122.139 - _indy_nt_offset = 1, // CONSTANT_NameAndType descr 122.140 - _indy_argc_offset = 2, // u2 argc 122.141 - _indy_argv_offset = 3 // u2 argv[argc] 122.142 + _indy_argc_offset = 1, // u2 argc 122.143 + _indy_argv_offset = 2 // u2 argv[argc] 122.144 }; 122.145 int invoke_dynamic_bootstrap_method_ref_index_at(int which) { 122.146 assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool"); 122.147 - return multi_operand_ref_at(which, _indy_bsm_offset); 122.148 - } 122.149 - int invoke_dynamic_name_and_type_ref_index_at(int which) { 122.150 - assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool"); 122.151 - return multi_operand_ref_at(which, _indy_nt_offset); 122.152 + if (tag_at(which).value() == JVM_CONSTANT_InvokeDynamicTrans) 122.153 + return extract_low_short_from_int(*int_at_addr(which)); 122.154 + int op_base = invoke_dynamic_operand_base(which); 122.155 + return operands()->short_at(op_base + _indy_bsm_offset); 122.156 } 122.157 int invoke_dynamic_argument_count_at(int which) { 122.158 assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool"); 122.159 - int argc = multi_operand_ref_at(which, _indy_argc_offset); 122.160 - DEBUG_ONLY(int op_count = multi_operand_count_at(which)); 122.161 - assert(_indy_argv_offset + argc == op_count, "consistent inner and outer counts"); 122.162 + if (tag_at(which).value() == JVM_CONSTANT_InvokeDynamicTrans) 122.163 + return 0; 122.164 + int op_base = invoke_dynamic_operand_base(which); 122.165 + int argc = operands()->short_at(op_base + _indy_argc_offset); 122.166 + DEBUG_ONLY(int end_offset = op_base + _indy_argv_offset + argc; 122.167 + int next_offset = invoke_dynamic_operand_limit(which)); 122.168 + assert(end_offset == next_offset, "matched ending"); 122.169 return argc; 122.170 } 122.171 int invoke_dynamic_argument_index_at(int which, int j) { 122.172 - assert((uint)j < (uint)invoke_dynamic_argument_count_at(which), "oob"); 122.173 - return multi_operand_ref_at(which, _indy_argv_offset + j); 122.174 + int op_base = invoke_dynamic_operand_base(which); 122.175 + DEBUG_ONLY(int argc = operands()->short_at(op_base + _indy_argc_offset)); 122.176 + assert((uint)j < (uint)argc, "oob"); 122.177 + return operands()->short_at(op_base + _indy_argv_offset + j); 122.178 } 122.179 122.180 // The following methods (name/signature/klass_ref_at, klass_ref_at_noresolve, 122.181 @@ -659,9 +662,12 @@ 122.182 public: 122.183 // Merging constantPoolOop support: 122.184 bool compare_entry_to(int index1, constantPoolHandle cp2, int index2, TRAPS); 122.185 - void copy_cp_to(int start_i, int end_i, constantPoolHandle to_cp, int to_i, 122.186 - TRAPS); 122.187 - void copy_entry_to(int from_i, constantPoolHandle to_cp, int to_i, TRAPS); 122.188 + void copy_cp_to(int start_i, int end_i, constantPoolHandle to_cp, int to_i, TRAPS) { 122.189 + constantPoolHandle h_this(THREAD, this); 122.190 + copy_cp_to_impl(h_this, start_i, end_i, to_cp, to_i, THREAD); 122.191 + } 122.192 + static void copy_cp_to_impl(constantPoolHandle from_cp, int start_i, int end_i, constantPoolHandle to_cp, int to_i, TRAPS); 122.193 + static void copy_entry_to(constantPoolHandle from_cp, int from_i, constantPoolHandle to_cp, int to_i, TRAPS); 122.194 int find_matching_entry(int pattern_i, constantPoolHandle search_cp, TRAPS); 122.195 int orig_length() const { return _orig_length; } 122.196 void set_orig_length(int orig_length) { _orig_length = orig_length; }
123.1 --- a/src/share/vm/opto/c2_globals.hpp Mon Dec 27 09:30:20 2010 -0500 123.2 +++ b/src/share/vm/opto/c2_globals.hpp Mon Dec 27 09:56:29 2010 -0500 123.3 @@ -284,6 +284,9 @@ 123.4 develop(bool, SparcV9RegsHiBitsZero, true, \ 123.5 "Assume Sparc V9 I&L registers on V8+ systems are zero-extended") \ 123.6 \ 123.7 + product(bool, UseRDPCForConstantTableBase, false, \ 123.8 + "Use Sparc RDPC instruction for the constant table base.") \ 123.9 + \ 123.10 develop(intx, PrintIdealGraphLevel, 0, \ 123.11 "Print ideal graph to XML file / network interface. " \ 123.12 "By default attempts to connect to the visualizer on a socket.") \
124.1 --- a/src/share/vm/opto/chaitin.cpp Mon Dec 27 09:30:20 2010 -0500 124.2 +++ b/src/share/vm/opto/chaitin.cpp Mon Dec 27 09:56:29 2010 -0500 124.3 @@ -1782,7 +1782,7 @@ 124.4 for(uint i2 = 1; i2 < _maxlrg; i2++ ) { 124.5 tty->print("L%d: ",i2); 124.6 if( i2 < _ifg->_maxlrg ) lrgs(i2).dump( ); 124.7 - else tty->print("new LRG"); 124.8 + else tty->print_cr("new LRG"); 124.9 } 124.10 tty->print_cr(""); 124.11 124.12 @@ -1993,7 +1993,7 @@ 124.13 } 124.14 124.15 //------------------------------dump_lrg--------------------------------------- 124.16 -void PhaseChaitin::dump_lrg( uint lidx ) const { 124.17 +void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const { 124.18 tty->print_cr("---dump of L%d---",lidx); 124.19 124.20 if( _ifg ) { 124.21 @@ -2002,9 +2002,11 @@ 124.22 return; 124.23 } 124.24 tty->print("L%d: ",lidx); 124.25 - lrgs(lidx).dump( ); 124.26 + if( lidx < _ifg->_maxlrg ) lrgs(lidx).dump( ); 124.27 + else tty->print_cr("new LRG"); 124.28 } 124.29 - if( _ifg ) { tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx)); 124.30 + if( _ifg && lidx < _ifg->_maxlrg) { 124.31 + tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx)); 124.32 _ifg->neighbors(lidx)->dump(); 124.33 tty->cr(); 124.34 } 124.35 @@ -2024,16 +2026,18 @@ 124.36 dump(n); 124.37 continue; 124.38 } 124.39 - uint cnt = n->req(); 124.40 - for( uint k = 1; k < cnt; k++ ) { 124.41 - Node *m = n->in(k); 124.42 - if (!m) continue; // be robust in the dumper 124.43 - if( Find_const(m) == lidx ) { 124.44 - if( !dump_once++ ) { 124.45 - tty->cr(); 124.46 - b->dump_head( &_cfg._bbs ); 124.47 + if (!defs_only) { 124.48 + uint cnt = n->req(); 124.49 + for( uint k = 1; k < cnt; k++ ) { 124.50 + Node *m = n->in(k); 124.51 + if (!m) continue; // be robust in the dumper 124.52 + if( Find_const(m) == lidx ) { 124.53 + if( !dump_once++ ) { 124.54 + tty->cr(); 124.55 + b->dump_head( &_cfg._bbs ); 124.56 + } 124.57 + dump(n); 124.58 } 124.59 - dump(n); 124.60 } 124.61 } 124.62 }
125.1 --- a/src/share/vm/opto/chaitin.hpp Mon Dec 27 09:30:20 2010 -0500 125.2 +++ b/src/share/vm/opto/chaitin.hpp Mon Dec 27 09:56:29 2010 -0500 125.3 @@ -512,7 +512,11 @@ 125.4 void dump( const Block * b ) const; 125.5 void dump_degree_lists() const; 125.6 void dump_simplified() const; 125.7 - void dump_lrg( uint lidx ) const; 125.8 + void dump_lrg( uint lidx, bool defs_only) const; 125.9 + void dump_lrg( uint lidx) const { 125.10 + // dump defs and uses by default 125.11 + dump_lrg(lidx, false); 125.12 + } 125.13 void dump_bb( uint pre_order ) const; 125.14 125.15 // Verify that base pointers and derived pointers are still sane
126.1 --- a/src/share/vm/opto/compile.cpp Mon Dec 27 09:30:20 2010 -0500 126.2 +++ b/src/share/vm/opto/compile.cpp Mon Dec 27 09:56:29 2010 -0500 126.3 @@ -75,6 +75,18 @@ 126.4 # include "adfiles/ad_zero.hpp" 126.5 #endif 126.6 126.7 + 126.8 +// -------------------- Compile::mach_constant_base_node ----------------------- 126.9 +// Constant table base node singleton. 126.10 +MachConstantBaseNode* Compile::mach_constant_base_node() { 126.11 + if (_mach_constant_base_node == NULL) { 126.12 + _mach_constant_base_node = new (C) MachConstantBaseNode(); 126.13 + _mach_constant_base_node->add_req(C->root()); 126.14 + } 126.15 + return _mach_constant_base_node; 126.16 +} 126.17 + 126.18 + 126.19 /// Support for intrinsics. 126.20 126.21 // Return the index at which m must be inserted (or already exists). 126.22 @@ -432,13 +444,14 @@ 126.23 } 126.24 126.25 126.26 -void Compile::init_scratch_buffer_blob() { 126.27 - if( scratch_buffer_blob() != NULL ) return; 126.28 +void Compile::init_scratch_buffer_blob(int const_size) { 126.29 + if (scratch_buffer_blob() != NULL) return; 126.30 126.31 // Construct a temporary CodeBuffer to have it construct a BufferBlob 126.32 // Cache this BufferBlob for this compile. 126.33 ResourceMark rm; 126.34 - int size = (MAX_inst_size + MAX_stubs_size + MAX_const_size); 126.35 + _scratch_const_size = const_size; 126.36 + int size = (MAX_inst_size + MAX_stubs_size + _scratch_const_size); 126.37 BufferBlob* blob = BufferBlob::create("Compile::scratch_buffer", size); 126.38 // Record the buffer blob for next time. 126.39 set_scratch_buffer_blob(blob); 126.40 @@ -455,9 +468,19 @@ 126.41 } 126.42 126.43 126.44 +void Compile::clear_scratch_buffer_blob() { 126.45 + assert(scratch_buffer_blob(), "no BufferBlob set"); 126.46 + set_scratch_buffer_blob(NULL); 126.47 + set_scratch_locs_memory(NULL); 126.48 +} 126.49 + 126.50 + 126.51 //-----------------------scratch_emit_size------------------------------------- 126.52 // Helper function that computes size by emitting code 126.53 uint Compile::scratch_emit_size(const Node* n) { 126.54 + // Start scratch_emit_size section. 126.55 + set_in_scratch_emit_size(true); 126.56 + 126.57 // Emit into a trash buffer and count bytes emitted. 126.58 // This is a pretty expensive way to compute a size, 126.59 // but it works well enough if seldom used. 126.60 @@ -476,13 +499,20 @@ 126.61 address blob_end = (address)locs_buf; 126.62 assert(blob->content_contains(blob_end), "sanity"); 126.63 CodeBuffer buf(blob_begin, blob_end - blob_begin); 126.64 - buf.initialize_consts_size(MAX_const_size); 126.65 + buf.initialize_consts_size(_scratch_const_size); 126.66 buf.initialize_stubs_size(MAX_stubs_size); 126.67 assert(locs_buf != NULL, "sanity"); 126.68 - int lsize = MAX_locs_size / 2; 126.69 - buf.insts()->initialize_shared_locs(&locs_buf[0], lsize); 126.70 - buf.stubs()->initialize_shared_locs(&locs_buf[lsize], lsize); 126.71 + int lsize = MAX_locs_size / 3; 126.72 + buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize); 126.73 + buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize); 126.74 + buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize); 126.75 + 126.76 + // Do the emission. 126.77 n->emit(buf, this->regalloc()); 126.78 + 126.79 + // End scratch_emit_size section. 126.80 + set_in_scratch_emit_size(false); 126.81 + 126.82 return buf.insts_size(); 126.83 } 126.84 126.85 @@ -516,10 +546,13 @@ 126.86 _orig_pc_slot(0), 126.87 _orig_pc_slot_offset_in_bytes(0), 126.88 _has_method_handle_invokes(false), 126.89 + _mach_constant_base_node(NULL), 126.90 _node_bundling_limit(0), 126.91 _node_bundling_base(NULL), 126.92 _java_calls(0), 126.93 _inner_loops(0), 126.94 + _scratch_const_size(-1), 126.95 + _in_scratch_emit_size(false), 126.96 #ifndef PRODUCT 126.97 _trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")), 126.98 _printer(IdealGraphPrinter::printer()), 126.99 @@ -553,7 +586,7 @@ 126.100 if (ProfileTraps) { 126.101 // Make sure the method being compiled gets its own MDO, 126.102 // so we can at least track the decompile_count(). 126.103 - method()->build_method_data(); 126.104 + method()->ensure_method_data(); 126.105 } 126.106 126.107 Init(::AliasLevel); 126.108 @@ -783,6 +816,7 @@ 126.109 _failure_reason(NULL), 126.110 _code_buffer("Compile::Fill_buffer"), 126.111 _has_method_handle_invokes(false), 126.112 + _mach_constant_base_node(NULL), 126.113 _node_bundling_limit(0), 126.114 _node_bundling_base(NULL), 126.115 _java_calls(0), 126.116 @@ -2862,3 +2896,207 @@ 126.117 _log->done("phase nodes='%d'", C->unique()); 126.118 } 126.119 } 126.120 + 126.121 +//============================================================================= 126.122 +// Two Constant's are equal when the type and the value are equal. 126.123 +bool Compile::Constant::operator==(const Constant& other) { 126.124 + if (type() != other.type() ) return false; 126.125 + if (can_be_reused() != other.can_be_reused()) return false; 126.126 + // For floating point values we compare the bit pattern. 126.127 + switch (type()) { 126.128 + case T_FLOAT: return (_value.i == other._value.i); 126.129 + case T_LONG: 126.130 + case T_DOUBLE: return (_value.j == other._value.j); 126.131 + case T_OBJECT: 126.132 + case T_ADDRESS: return (_value.l == other._value.l); 126.133 + case T_VOID: return (_value.l == other._value.l); // jump-table entries 126.134 + default: ShouldNotReachHere(); 126.135 + } 126.136 + return false; 126.137 +} 126.138 + 126.139 +// Emit constants grouped in the following order: 126.140 +static BasicType type_order[] = { 126.141 + T_FLOAT, // 32-bit 126.142 + T_OBJECT, // 32 or 64-bit 126.143 + T_ADDRESS, // 32 or 64-bit 126.144 + T_DOUBLE, // 64-bit 126.145 + T_LONG, // 64-bit 126.146 + T_VOID, // 32 or 64-bit (jump-tables are at the end of the constant table for code emission reasons) 126.147 + T_ILLEGAL 126.148 +}; 126.149 + 126.150 +static int type_to_size_in_bytes(BasicType t) { 126.151 + switch (t) { 126.152 + case T_LONG: return sizeof(jlong ); 126.153 + case T_FLOAT: return sizeof(jfloat ); 126.154 + case T_DOUBLE: return sizeof(jdouble); 126.155 + // We use T_VOID as marker for jump-table entries (labels) which 126.156 + // need an interal word relocation. 126.157 + case T_VOID: 126.158 + case T_ADDRESS: 126.159 + case T_OBJECT: return sizeof(jobject); 126.160 + } 126.161 + 126.162 + ShouldNotReachHere(); 126.163 + return -1; 126.164 +} 126.165 + 126.166 +void Compile::ConstantTable::calculate_offsets_and_size() { 126.167 + int size = 0; 126.168 + for (int t = 0; type_order[t] != T_ILLEGAL; t++) { 126.169 + BasicType type = type_order[t]; 126.170 + 126.171 + for (int i = 0; i < _constants.length(); i++) { 126.172 + Constant con = _constants.at(i); 126.173 + if (con.type() != type) continue; // Skip other types. 126.174 + 126.175 + // Align size for type. 126.176 + int typesize = type_to_size_in_bytes(con.type()); 126.177 + size = align_size_up(size, typesize); 126.178 + 126.179 + // Set offset. 126.180 + con.set_offset(size); 126.181 + _constants.at_put(i, con); 126.182 + 126.183 + // Add type size. 126.184 + size = size + typesize; 126.185 + } 126.186 + } 126.187 + 126.188 + // Align size up to the next section start (which is insts; see 126.189 + // CodeBuffer::align_at_start). 126.190 + assert(_size == -1, "already set?"); 126.191 + _size = align_size_up(size, CodeEntryAlignment); 126.192 + 126.193 + if (Matcher::constant_table_absolute_addressing) { 126.194 + set_table_base_offset(0); // No table base offset required 126.195 + } else { 126.196 + if (UseRDPCForConstantTableBase) { 126.197 + // table base offset is set in MachConstantBaseNode::emit 126.198 + } else { 126.199 + // When RDPC is not used, the table base is set into the middle of 126.200 + // the constant table. 126.201 + int half_size = _size / 2; 126.202 + assert(half_size * 2 == _size, "sanity"); 126.203 + set_table_base_offset(-half_size); 126.204 + } 126.205 + } 126.206 +} 126.207 + 126.208 +void Compile::ConstantTable::emit(CodeBuffer& cb) { 126.209 + MacroAssembler _masm(&cb); 126.210 + for (int t = 0; type_order[t] != T_ILLEGAL; t++) { 126.211 + BasicType type = type_order[t]; 126.212 + 126.213 + for (int i = 0; i < _constants.length(); i++) { 126.214 + Constant con = _constants.at(i); 126.215 + if (con.type() != type) continue; // Skip other types. 126.216 + 126.217 + address constant_addr; 126.218 + switch (con.type()) { 126.219 + case T_LONG: constant_addr = _masm.long_constant( con.get_jlong() ); break; 126.220 + case T_FLOAT: constant_addr = _masm.float_constant( con.get_jfloat() ); break; 126.221 + case T_DOUBLE: constant_addr = _masm.double_constant(con.get_jdouble()); break; 126.222 + case T_OBJECT: { 126.223 + jobject obj = con.get_jobject(); 126.224 + int oop_index = _masm.oop_recorder()->find_index(obj); 126.225 + constant_addr = _masm.address_constant((address) obj, oop_Relocation::spec(oop_index)); 126.226 + break; 126.227 + } 126.228 + case T_ADDRESS: { 126.229 + address addr = (address) con.get_jobject(); 126.230 + constant_addr = _masm.address_constant(addr); 126.231 + break; 126.232 + } 126.233 + // We use T_VOID as marker for jump-table entries (labels) which 126.234 + // need an interal word relocation. 126.235 + case T_VOID: { 126.236 + // Write a dummy word. The real value is filled in later 126.237 + // in fill_jump_table_in_constant_table. 126.238 + address addr = (address) con.get_jobject(); 126.239 + constant_addr = _masm.address_constant(addr); 126.240 + break; 126.241 + } 126.242 + default: ShouldNotReachHere(); 126.243 + } 126.244 + assert(constant_addr != NULL, "consts section too small"); 126.245 + assert((constant_addr - _masm.code()->consts()->start()) == con.offset(), err_msg("must be: %d == %d", constant_addr - _masm.code()->consts()->start(), con.offset())); 126.246 + } 126.247 + } 126.248 +} 126.249 + 126.250 +int Compile::ConstantTable::find_offset(Constant& con) const { 126.251 + int idx = _constants.find(con); 126.252 + assert(idx != -1, "constant must be in constant table"); 126.253 + int offset = _constants.at(idx).offset(); 126.254 + assert(offset != -1, "constant table not emitted yet?"); 126.255 + return offset; 126.256 +} 126.257 + 126.258 +void Compile::ConstantTable::add(Constant& con) { 126.259 + if (con.can_be_reused()) { 126.260 + int idx = _constants.find(con); 126.261 + if (idx != -1 && _constants.at(idx).can_be_reused()) { 126.262 + return; 126.263 + } 126.264 + } 126.265 + (void) _constants.append(con); 126.266 +} 126.267 + 126.268 +Compile::Constant Compile::ConstantTable::add(BasicType type, jvalue value) { 126.269 + Constant con(type, value); 126.270 + add(con); 126.271 + return con; 126.272 +} 126.273 + 126.274 +Compile::Constant Compile::ConstantTable::add(MachOper* oper) { 126.275 + jvalue value; 126.276 + BasicType type = oper->type()->basic_type(); 126.277 + switch (type) { 126.278 + case T_LONG: value.j = oper->constantL(); break; 126.279 + case T_FLOAT: value.f = oper->constantF(); break; 126.280 + case T_DOUBLE: value.d = oper->constantD(); break; 126.281 + case T_OBJECT: 126.282 + case T_ADDRESS: value.l = (jobject) oper->constant(); break; 126.283 + default: ShouldNotReachHere(); 126.284 + } 126.285 + return add(type, value); 126.286 +} 126.287 + 126.288 +Compile::Constant Compile::ConstantTable::allocate_jump_table(MachConstantNode* n) { 126.289 + jvalue value; 126.290 + // We can use the node pointer here to identify the right jump-table 126.291 + // as this method is called from Compile::Fill_buffer right before 126.292 + // the MachNodes are emitted and the jump-table is filled (means the 126.293 + // MachNode pointers do not change anymore). 126.294 + value.l = (jobject) n; 126.295 + Constant con(T_VOID, value, false); // Labels of a jump-table cannot be reused. 126.296 + for (uint i = 0; i < n->outcnt(); i++) { 126.297 + add(con); 126.298 + } 126.299 + return con; 126.300 +} 126.301 + 126.302 +void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const { 126.303 + // If called from Compile::scratch_emit_size do nothing. 126.304 + if (Compile::current()->in_scratch_emit_size()) return; 126.305 + 126.306 + assert(labels.is_nonempty(), "must be"); 126.307 + assert((uint) labels.length() == n->outcnt(), err_msg("must be equal: %d == %d", labels.length(), n->outcnt())); 126.308 + 126.309 + // Since MachConstantNode::constant_offset() also contains 126.310 + // table_base_offset() we need to subtract the table_base_offset() 126.311 + // to get the plain offset into the constant table. 126.312 + int offset = n->constant_offset() - table_base_offset(); 126.313 + 126.314 + MacroAssembler _masm(&cb); 126.315 + address* jump_table_base = (address*) (_masm.code()->consts()->start() + offset); 126.316 + 126.317 + for (int i = 0; i < labels.length(); i++) { 126.318 + address* constant_addr = &jump_table_base[i]; 126.319 + assert(*constant_addr == (address) n, "all jump-table entries must contain node pointer"); 126.320 + *constant_addr = cb.consts()->target(*labels.at(i), (address) constant_addr); 126.321 + cb.consts()->relocate((address) constant_addr, relocInfo::internal_word_type); 126.322 + } 126.323 +}
127.1 --- a/src/share/vm/opto/compile.hpp Mon Dec 27 09:30:20 2010 -0500 127.2 +++ b/src/share/vm/opto/compile.hpp Mon Dec 27 09:56:29 2010 -0500 127.3 @@ -48,7 +48,10 @@ 127.4 class InlineTree; 127.5 class Int_Array; 127.6 class Matcher; 127.7 +class MachConstantNode; 127.8 +class MachConstantBaseNode; 127.9 class MachNode; 127.10 +class MachOper; 127.11 class MachSafePointNode; 127.12 class Node; 127.13 class Node_Array; 127.14 @@ -139,6 +142,81 @@ 127.15 trapHistLength = methodDataOopDesc::_trap_hist_limit 127.16 }; 127.17 127.18 + // Constant entry of the constant table. 127.19 + class Constant { 127.20 + private: 127.21 + BasicType _type; 127.22 + jvalue _value; 127.23 + int _offset; // offset of this constant (in bytes) relative to the constant table base. 127.24 + bool _can_be_reused; // true (default) if the value can be shared with other users. 127.25 + 127.26 + public: 127.27 + Constant() : _type(T_ILLEGAL), _offset(-1), _can_be_reused(true) { _value.l = 0; } 127.28 + Constant(BasicType type, jvalue value, bool can_be_reused = true) : 127.29 + _type(type), 127.30 + _value(value), 127.31 + _offset(-1), 127.32 + _can_be_reused(can_be_reused) 127.33 + {} 127.34 + 127.35 + bool operator==(const Constant& other); 127.36 + 127.37 + BasicType type() const { return _type; } 127.38 + 127.39 + jlong get_jlong() const { return _value.j; } 127.40 + jfloat get_jfloat() const { return _value.f; } 127.41 + jdouble get_jdouble() const { return _value.d; } 127.42 + jobject get_jobject() const { return _value.l; } 127.43 + 127.44 + int offset() const { return _offset; } 127.45 + void set_offset(int offset) { _offset = offset; } 127.46 + 127.47 + bool can_be_reused() const { return _can_be_reused; } 127.48 + }; 127.49 + 127.50 + // Constant table. 127.51 + class ConstantTable { 127.52 + private: 127.53 + GrowableArray<Constant> _constants; // Constants of this table. 127.54 + int _size; // Size in bytes the emitted constant table takes (including padding). 127.55 + int _table_base_offset; // Offset of the table base that gets added to the constant offsets. 127.56 + 127.57 + public: 127.58 + ConstantTable() : 127.59 + _size(-1), 127.60 + _table_base_offset(-1) // We can use -1 here since the constant table is always bigger than 2 bytes (-(size / 2), see MachConstantBaseNode::emit). 127.61 + {} 127.62 + 127.63 + int size() const { assert(_size != -1, "size not yet calculated"); return _size; } 127.64 + 127.65 + void set_table_base_offset(int x) { assert(_table_base_offset == -1, "set only once"); _table_base_offset = x; } 127.66 + int table_base_offset() const { assert(_table_base_offset != -1, "table base offset not yet set"); return _table_base_offset; } 127.67 + 127.68 + void emit(CodeBuffer& cb); 127.69 + 127.70 + // Returns the offset of the last entry (the top) of the constant table. 127.71 + int top_offset() const { assert(_constants.top().offset() != -1, "constant not yet bound"); return _constants.top().offset(); } 127.72 + 127.73 + void calculate_offsets_and_size(); 127.74 + int find_offset(Constant& con) const; 127.75 + 127.76 + void add(Constant& con); 127.77 + Constant add(BasicType type, jvalue value); 127.78 + Constant add(MachOper* oper); 127.79 + Constant add(jfloat f) { 127.80 + jvalue value; value.f = f; 127.81 + return add(T_FLOAT, value); 127.82 + } 127.83 + Constant add(jdouble d) { 127.84 + jvalue value; value.d = d; 127.85 + return add(T_DOUBLE, value); 127.86 + } 127.87 + 127.88 + // Jump table 127.89 + Constant allocate_jump_table(MachConstantNode* n); 127.90 + void fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const; 127.91 + }; 127.92 + 127.93 private: 127.94 // Fixed parameters to this compilation. 127.95 const int _compile_id; 127.96 @@ -212,6 +290,11 @@ 127.97 Node* _recent_alloc_obj; 127.98 Node* _recent_alloc_ctl; 127.99 127.100 + // Constant table 127.101 + ConstantTable _constant_table; // The constant table for this compile. 127.102 + MachConstantBaseNode* _mach_constant_base_node; // Constant table base node singleton. 127.103 + 127.104 + 127.105 // Blocked array of debugging and profiling information, 127.106 // tracked per node. 127.107 enum { _log2_node_notes_block_size = 8, 127.108 @@ -272,6 +355,8 @@ 127.109 static int _CompiledZap_count; // counter compared against CompileZap[First/Last] 127.110 BufferBlob* _scratch_buffer_blob; // For temporary code buffers. 127.111 relocInfo* _scratch_locs_memory; // For temporary code buffers. 127.112 + int _scratch_const_size; // For temporary code buffers. 127.113 + bool _in_scratch_emit_size; // true when in scratch_emit_size. 127.114 127.115 public: 127.116 // Accessors 127.117 @@ -454,6 +539,12 @@ 127.118 _recent_alloc_obj = obj; 127.119 } 127.120 127.121 + // Constant table 127.122 + ConstantTable& constant_table() { return _constant_table; } 127.123 + 127.124 + MachConstantBaseNode* mach_constant_base_node(); 127.125 + bool has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; } 127.126 + 127.127 // Handy undefined Node 127.128 Node* top() const { return _top; } 127.129 127.130 @@ -605,13 +696,16 @@ 127.131 Dependencies* dependencies() { return env()->dependencies(); } 127.132 static int CompiledZap_count() { return _CompiledZap_count; } 127.133 BufferBlob* scratch_buffer_blob() { return _scratch_buffer_blob; } 127.134 - void init_scratch_buffer_blob(); 127.135 + void init_scratch_buffer_blob(int const_size); 127.136 + void clear_scratch_buffer_blob(); 127.137 void set_scratch_buffer_blob(BufferBlob* b) { _scratch_buffer_blob = b; } 127.138 relocInfo* scratch_locs_memory() { return _scratch_locs_memory; } 127.139 void set_scratch_locs_memory(relocInfo* b) { _scratch_locs_memory = b; } 127.140 127.141 // emit to scratch blob, report resulting size 127.142 uint scratch_emit_size(const Node* n); 127.143 + void set_in_scratch_emit_size(bool x) { _in_scratch_emit_size = x; } 127.144 + bool in_scratch_emit_size() const { return _in_scratch_emit_size; } 127.145 127.146 enum ScratchBufferBlob { 127.147 MAX_inst_size = 1024, 127.148 @@ -692,7 +786,7 @@ 127.149 void Fill_buffer(); 127.150 127.151 // Determine which variable sized branches can be shortened 127.152 - void Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size, int& const_size); 127.153 + void Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size); 127.154 127.155 // Compute the size of first NumberOfLoopInstrToAlign instructions 127.156 // at the head of a loop.
128.1 --- a/src/share/vm/opto/gcm.cpp Mon Dec 27 09:30:20 2010 -0500 128.2 +++ b/src/share/vm/opto/gcm.cpp Mon Dec 27 09:56:29 2010 -0500 128.3 @@ -89,7 +89,7 @@ 128.4 assert(in0 != NULL, "Only control-dependent"); 128.5 const Node *p = in0->is_block_proj(); 128.6 if (p != NULL && p != n) { // Control from a block projection? 128.7 - assert(!n->pinned() || n->is_SafePointScalarObject(), "only SafePointScalarObject pinned node is expected here"); 128.8 + assert(!n->pinned() || n->is_MachConstantBase() || n->is_SafePointScalarObject(), "only pinned MachConstantBase or SafePointScalarObject node is expected here"); 128.9 // Find trailing Region 128.10 Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block 128.11 uint j = 0;
129.1 --- a/src/share/vm/opto/graphKit.cpp Mon Dec 27 09:30:20 2010 -0500 129.2 +++ b/src/share/vm/opto/graphKit.cpp Mon Dec 27 09:56:29 2010 -0500 129.3 @@ -1841,7 +1841,7 @@ 129.4 129.5 // Note: If ProfileTraps is true, and if a deopt. actually 129.6 // occurs here, the runtime will make sure an MDO exists. There is 129.7 - // no need to call method()->build_method_data() at this point. 129.8 + // no need to call method()->ensure_method_data() at this point. 129.9 129.10 #ifdef ASSERT 129.11 if (!must_throw) {
130.1 --- a/src/share/vm/opto/machnode.cpp Mon Dec 27 09:30:20 2010 -0500 130.2 +++ b/src/share/vm/opto/machnode.cpp Mon Dec 27 09:56:29 2010 -0500 130.3 @@ -489,6 +489,20 @@ 130.4 } 130.5 #endif 130.6 130.7 + 130.8 +//============================================================================= 130.9 +int MachConstantNode::constant_offset() { 130.10 + int offset = _constant.offset(); 130.11 + // Bind the offset lazily. 130.12 + if (offset == -1) { 130.13 + Compile::ConstantTable& constant_table = Compile::current()->constant_table(); 130.14 + offset = constant_table.table_base_offset() + constant_table.find_offset(_constant); 130.15 + _constant.set_offset(offset); 130.16 + } 130.17 + return offset; 130.18 +} 130.19 + 130.20 + 130.21 //============================================================================= 130.22 #ifndef PRODUCT 130.23 void MachNullCheckNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
131.1 --- a/src/share/vm/opto/machnode.hpp Mon Dec 27 09:30:20 2010 -0500 131.2 +++ b/src/share/vm/opto/machnode.hpp Mon Dec 27 09:56:29 2010 -0500 131.3 @@ -231,9 +231,6 @@ 131.4 // Return number of relocatable values contained in this instruction 131.5 virtual int reloc() const { return 0; } 131.6 131.7 - // Return number of words used for double constants in this instruction 131.8 - virtual int const_size() const { return 0; } 131.9 - 131.10 // Hash and compare over operands. Used to do GVN on machine Nodes. 131.11 virtual uint hash() const; 131.12 virtual uint cmp( const Node &n ) const; 131.13 @@ -348,6 +345,65 @@ 131.14 #endif 131.15 }; 131.16 131.17 +//------------------------------MachConstantBaseNode-------------------------- 131.18 +// Machine node that represents the base address of the constant table. 131.19 +class MachConstantBaseNode : public MachIdealNode { 131.20 +public: 131.21 + static const RegMask& _out_RegMask; // We need the out_RegMask statically in MachConstantNode::in_RegMask(). 131.22 + 131.23 +public: 131.24 + MachConstantBaseNode() : MachIdealNode() { 131.25 + init_class_id(Class_MachConstantBase); 131.26 + } 131.27 + virtual const class Type* bottom_type() const { return TypeRawPtr::NOTNULL; } 131.28 + virtual uint ideal_reg() const { return Op_RegP; } 131.29 + virtual uint oper_input_base() const { return 1; } 131.30 + 131.31 + virtual void emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const; 131.32 + virtual uint size(PhaseRegAlloc* ra_) const; 131.33 + virtual bool pinned() const { return UseRDPCForConstantTableBase; } 131.34 + 131.35 + static const RegMask& static_out_RegMask() { return _out_RegMask; } 131.36 + virtual const RegMask& out_RegMask() const { return static_out_RegMask(); } 131.37 + 131.38 +#ifndef PRODUCT 131.39 + virtual const char* Name() const { return "MachConstantBaseNode"; } 131.40 + virtual void format(PhaseRegAlloc*, outputStream* st) const; 131.41 +#endif 131.42 +}; 131.43 + 131.44 +//------------------------------MachConstantNode------------------------------- 131.45 +// Machine node that holds a constant which is stored in the constant table. 131.46 +class MachConstantNode : public MachNode { 131.47 +protected: 131.48 + Compile::Constant _constant; // This node's constant. 131.49 + 131.50 +public: 131.51 + MachConstantNode() : MachNode() { 131.52 + init_class_id(Class_MachConstant); 131.53 + } 131.54 + 131.55 + virtual void eval_constant(Compile* C) { 131.56 +#ifdef ASSERT 131.57 + tty->print("missing MachConstantNode eval_constant function: "); 131.58 + dump(); 131.59 +#endif 131.60 + ShouldNotCallThis(); 131.61 + } 131.62 + 131.63 + virtual const RegMask &in_RegMask(uint idx) const { 131.64 + if (idx == mach_constant_base_node_input()) 131.65 + return MachConstantBaseNode::static_out_RegMask(); 131.66 + return MachNode::in_RegMask(idx); 131.67 + } 131.68 + 131.69 + // Input edge of MachConstantBaseNode. 131.70 + uint mach_constant_base_node_input() const { return req() - 1; } 131.71 + 131.72 + int constant_offset(); 131.73 + int constant_offset() const { return ((MachConstantNode*) this)->constant_offset(); } 131.74 +}; 131.75 + 131.76 //------------------------------MachUEPNode----------------------------------- 131.77 // Machine Unvalidated Entry Point Node 131.78 class MachUEPNode : public MachIdealNode {
132.1 --- a/src/share/vm/opto/matcher.hpp Mon Dec 27 09:30:20 2010 -0500 132.2 +++ b/src/share/vm/opto/matcher.hpp Mon Dec 27 09:56:29 2010 -0500 132.3 @@ -365,6 +365,10 @@ 132.4 // registers? True for Intel but false for most RISCs 132.5 static const bool clone_shift_expressions; 132.6 132.7 + // Should constant table entries be accessed with loads using 132.8 + // absolute addressing? True for x86 but false for most RISCs. 132.9 + static const bool constant_table_absolute_addressing; 132.10 + 132.11 static bool narrow_oop_use_complex_address(); 132.12 132.13 // Generate implicit null check for narrow oops if it can fold
133.1 --- a/src/share/vm/opto/memnode.cpp Mon Dec 27 09:30:20 2010 -0500 133.2 +++ b/src/share/vm/opto/memnode.cpp Mon Dec 27 09:56:29 2010 -0500 133.3 @@ -3599,10 +3599,12 @@ 133.4 intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint); 133.5 if (zeroes_done + BytesPerLong >= size_limit) { 133.6 assert(allocation() != NULL, ""); 133.7 - Node* klass_node = allocation()->in(AllocateNode::KlassNode); 133.8 - ciKlass* k = phase->type(klass_node)->is_klassptr()->klass(); 133.9 - if (zeroes_done == k->layout_helper()) 133.10 - zeroes_done = size_limit; 133.11 + if (allocation()->Opcode() == Op_Allocate) { 133.12 + Node* klass_node = allocation()->in(AllocateNode::KlassNode); 133.13 + ciKlass* k = phase->type(klass_node)->is_klassptr()->klass(); 133.14 + if (zeroes_done == k->layout_helper()) 133.15 + zeroes_done = size_limit; 133.16 + } 133.17 } 133.18 if (zeroes_done < size_limit) { 133.19 rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
134.1 --- a/src/share/vm/opto/node.hpp Mon Dec 27 09:30:20 2010 -0500 134.2 +++ b/src/share/vm/opto/node.hpp Mon Dec 27 09:56:29 2010 -0500 134.3 @@ -81,6 +81,8 @@ 134.4 class MachCallNode; 134.5 class MachCallRuntimeNode; 134.6 class MachCallStaticJavaNode; 134.7 +class MachConstantBaseNode; 134.8 +class MachConstantNode; 134.9 class MachIfNode; 134.10 class MachNode; 134.11 class MachNullCheckNode; 134.12 @@ -566,10 +568,12 @@ 134.13 DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1) 134.14 DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1) 134.15 DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0) 134.16 - DEFINE_CLASS_ID(MachSpillCopy, Mach, 1) 134.17 - DEFINE_CLASS_ID(MachNullCheck, Mach, 2) 134.18 - DEFINE_CLASS_ID(MachIf, Mach, 3) 134.19 - DEFINE_CLASS_ID(MachTemp, Mach, 4) 134.20 + DEFINE_CLASS_ID(MachSpillCopy, Mach, 1) 134.21 + DEFINE_CLASS_ID(MachNullCheck, Mach, 2) 134.22 + DEFINE_CLASS_ID(MachIf, Mach, 3) 134.23 + DEFINE_CLASS_ID(MachTemp, Mach, 4) 134.24 + DEFINE_CLASS_ID(MachConstantBase, Mach, 5) 134.25 + DEFINE_CLASS_ID(MachConstant, Mach, 6) 134.26 134.27 DEFINE_CLASS_ID(Proj, Node, 2) 134.28 DEFINE_CLASS_ID(CatchProj, Proj, 0) 134.29 @@ -734,6 +738,8 @@ 134.30 DEFINE_CLASS_QUERY(MachCallLeaf) 134.31 DEFINE_CLASS_QUERY(MachCallRuntime) 134.32 DEFINE_CLASS_QUERY(MachCallStaticJava) 134.33 + DEFINE_CLASS_QUERY(MachConstantBase) 134.34 + DEFINE_CLASS_QUERY(MachConstant) 134.35 DEFINE_CLASS_QUERY(MachIf) 134.36 DEFINE_CLASS_QUERY(MachNullCheck) 134.37 DEFINE_CLASS_QUERY(MachReturn)
135.1 --- a/src/share/vm/opto/output.cpp Mon Dec 27 09:30:20 2010 -0500 135.2 +++ b/src/share/vm/opto/output.cpp Mon Dec 27 09:56:29 2010 -0500 135.3 @@ -61,11 +61,6 @@ 135.4 // RootNode goes 135.5 assert( _cfg->_broot->_nodes.size() == 0, "" ); 135.6 135.7 - // Initialize the space for the BufferBlob used to find and verify 135.8 - // instruction size in MachNode::emit_size() 135.9 - init_scratch_buffer_blob(); 135.10 - if (failing()) return; // Out of memory 135.11 - 135.12 // The number of new nodes (mostly MachNop) is proportional to 135.13 // the number of java calls and inner loops which are aligned. 135.14 if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 + 135.15 @@ -333,7 +328,7 @@ 135.16 //----------------------Shorten_branches--------------------------------------- 135.17 // The architecture description provides short branch variants for some long 135.18 // branch instructions. Replace eligible long branches with short branches. 135.19 -void Compile::Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size, int& const_size) { 135.20 +void Compile::Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size) { 135.21 135.22 // fill in the nop array for bundling computations 135.23 MachNode *_nop_list[Bundle::_nop_count]; 135.24 @@ -353,12 +348,11 @@ 135.25 // Size in bytes of all relocation entries, including those in local stubs. 135.26 // Start with 2-bytes of reloc info for the unvalidated entry point 135.27 reloc_size = 1; // Number of relocation entries 135.28 - const_size = 0; // size of fp constants in words 135.29 135.30 // Make three passes. The first computes pessimistic blk_starts, 135.31 - // relative jmp_end, reloc_size and const_size information. 135.32 - // The second performs short branch substitution using the pessimistic 135.33 - // sizing. The third inserts nops where needed. 135.34 + // relative jmp_end and reloc_size information. The second performs 135.35 + // short branch substitution using the pessimistic sizing. The 135.36 + // third inserts nops where needed. 135.37 135.38 Node *nj; // tmp 135.39 135.40 @@ -381,7 +375,6 @@ 135.41 MachNode *mach = nj->as_Mach(); 135.42 blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding 135.43 reloc_size += mach->reloc(); 135.44 - const_size += mach->const_size(); 135.45 if( mach->is_MachCall() ) { 135.46 MachCallNode *mcall = mach->as_MachCall(); 135.47 // This destination address is NOT PC-relative 135.48 @@ -398,10 +391,6 @@ 135.49 if (min_offset_from_last_call == 0) { 135.50 blk_size += nop_size; 135.51 } 135.52 - } else if (mach->ideal_Opcode() == Op_Jump) { 135.53 - const_size += b->_num_succs; // Address table size 135.54 - // The size is valid even for 64 bit since it is 135.55 - // multiplied by 2*jintSize on this method exit. 135.56 } 135.57 } 135.58 min_offset_from_last_call += inst_size; 135.59 @@ -562,10 +551,6 @@ 135.60 // a relocation index. 135.61 // The CodeBuffer will expand the locs array if this estimate is too low. 135.62 reloc_size *= 10 / sizeof(relocInfo); 135.63 - 135.64 - // Adjust const_size to number of bytes 135.65 - const_size *= 2*jintSize; // both float and double take two words per entry 135.66 - 135.67 } 135.68 135.69 //------------------------------FillLocArray----------------------------------- 135.70 @@ -1102,10 +1087,39 @@ 135.71 blk_labels[i].init(); 135.72 } 135.73 135.74 + if (has_mach_constant_base_node()) { 135.75 + // Fill the constant table. 135.76 + // Note: This must happen before Shorten_branches. 135.77 + for (i = 0; i < _cfg->_num_blocks; i++) { 135.78 + Block* b = _cfg->_blocks[i]; 135.79 + 135.80 + for (uint j = 0; j < b->_nodes.size(); j++) { 135.81 + Node* n = b->_nodes[j]; 135.82 + 135.83 + // If the node is a MachConstantNode evaluate the constant 135.84 + // value section. 135.85 + if (n->is_MachConstant()) { 135.86 + MachConstantNode* machcon = n->as_MachConstant(); 135.87 + machcon->eval_constant(C); 135.88 + } 135.89 + } 135.90 + } 135.91 + 135.92 + // Calculate the offsets of the constants and the size of the 135.93 + // constant table (including the padding to the next section). 135.94 + constant_table().calculate_offsets_and_size(); 135.95 + const_req = constant_table().size(); 135.96 + } 135.97 + 135.98 + // Initialize the space for the BufferBlob used to find and verify 135.99 + // instruction size in MachNode::emit_size() 135.100 + init_scratch_buffer_blob(const_req); 135.101 + if (failing()) return; // Out of memory 135.102 + 135.103 // If this machine supports different size branch offsets, then pre-compute 135.104 // the length of the blocks 135.105 if( _matcher->is_short_branch_offset(-1, 0) ) { 135.106 - Shorten_branches(blk_labels, code_req, locs_req, stub_req, const_req); 135.107 + Shorten_branches(blk_labels, code_req, locs_req, stub_req); 135.108 labels_not_set = false; 135.109 } 135.110 135.111 @@ -1121,12 +1135,12 @@ 135.112 code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion 135.113 135.114 int total_req = 135.115 + const_req + 135.116 code_req + 135.117 pad_req + 135.118 stub_req + 135.119 exception_handler_req + 135.120 - deopt_handler_req + // deopt handler 135.121 - const_req; 135.122 + deopt_handler_req; // deopt handler 135.123 135.124 if (has_method_handle_invokes()) 135.125 total_req += deopt_handler_req; // deopt MH handler 135.126 @@ -1180,6 +1194,11 @@ 135.127 135.128 NonSafepointEmitter non_safepoints(this); // emit non-safepoints lazily 135.129 135.130 + // Emit the constant table. 135.131 + if (has_mach_constant_base_node()) { 135.132 + constant_table().emit(*cb); 135.133 + } 135.134 + 135.135 // ------------------ 135.136 // Now fill in the code buffer 135.137 Node *delay_slot = NULL; 135.138 @@ -1196,12 +1215,13 @@ 135.139 cb->flush_bundle(true); 135.140 135.141 // Define the label at the beginning of the basic block 135.142 - if( labels_not_set ) 135.143 - MacroAssembler(cb).bind( blk_labels[b->_pre_order] ); 135.144 - 135.145 - else 135.146 - assert( blk_labels[b->_pre_order].loc_pos() == cb->insts_size(), 135.147 - "label position does not match code offset" ); 135.148 + if (labels_not_set) { 135.149 + MacroAssembler(cb).bind(blk_labels[b->_pre_order]); 135.150 + } else { 135.151 + assert(blk_labels[b->_pre_order].loc_pos() == cb->insts_size(), 135.152 + err_msg("label position does not match code offset: %d != %d", 135.153 + blk_labels[b->_pre_order].loc_pos(), cb->insts_size())); 135.154 + } 135.155 135.156 uint last_inst = b->_nodes.size(); 135.157 135.158 @@ -1718,9 +1738,17 @@ 135.159 // Create a data structure for all the scheduling information 135.160 Scheduling scheduling(Thread::current()->resource_area(), *this); 135.161 135.162 + // Initialize the space for the BufferBlob used to find and verify 135.163 + // instruction size in MachNode::emit_size() 135.164 + init_scratch_buffer_blob(MAX_const_size); 135.165 + if (failing()) return; // Out of memory 135.166 + 135.167 // Walk backwards over each basic block, computing the needed alignment 135.168 // Walk over all the basic blocks 135.169 scheduling.DoScheduling(); 135.170 + 135.171 + // Clear the BufferBlob used for scheduling. 135.172 + clear_scratch_buffer_blob(); 135.173 } 135.174 135.175 //------------------------------ComputeLocalLatenciesForward-------------------
136.1 --- a/src/share/vm/opto/postaloc.cpp Mon Dec 27 09:30:20 2010 -0500 136.2 +++ b/src/share/vm/opto/postaloc.cpp Mon Dec 27 09:56:29 2010 -0500 136.3 @@ -200,6 +200,19 @@ 136.4 // then reloaded BUT survives in a register the whole way. 136.5 Node *val = skip_copies(n->in(k)); 136.6 136.7 + if (val == x && nk_idx != 0 && 136.8 + regnd[nk_reg] != NULL && regnd[nk_reg] != x && 136.9 + n2lidx(x) == n2lidx(regnd[nk_reg])) { 136.10 + // When rematerialzing nodes and stretching lifetimes, the 136.11 + // allocator will reuse the original def for multidef LRG instead 136.12 + // of the current reaching def because it can't know it's safe to 136.13 + // do so. After allocation completes if they are in the same LRG 136.14 + // then it should use the current reaching def instead. 136.15 + n->set_req(k, regnd[nk_reg]); 136.16 + blk_adjust += yank_if_dead(val, current_block, &value, ®nd); 136.17 + val = skip_copies(n->in(k)); 136.18 + } 136.19 + 136.20 if( val == x ) return blk_adjust; // No progress? 136.21 136.22 bool single = is_single_register(val->ideal_reg());
137.1 --- a/src/share/vm/opto/reg_split.cpp Mon Dec 27 09:30:20 2010 -0500 137.2 +++ b/src/share/vm/opto/reg_split.cpp Mon Dec 27 09:56:29 2010 -0500 137.3 @@ -1239,6 +1239,7 @@ 137.4 // Cycle through this block's predecessors, collecting Reaches 137.5 // info for each spilled LRG and update edges. 137.6 // Walk the phis list to patch inputs, split phis, and name phis 137.7 + uint lrgs_before_phi_split = maxlrg; 137.8 for( insidx = 0; insidx < phis->size(); insidx++ ) { 137.9 Node *phi = phis->at(insidx); 137.10 assert(phi->is_Phi(),"This list must only contain Phi Nodes"); 137.11 @@ -1273,7 +1274,16 @@ 137.12 assert( def, "must have reaching def" ); 137.13 // If input up/down sense and reg-pressure DISagree 137.14 if( def->rematerialize() ) { 137.15 - def = split_Rematerialize( def, pred, pred->end_idx(), maxlrg, splits, slidx, lrg2reach, Reachblock, false ); 137.16 + // Place the rematerialized node above any MSCs created during 137.17 + // phi node splitting. end_idx points at the insertion point 137.18 + // so look at the node before it. 137.19 + int insert = pred->end_idx(); 137.20 + while (insert >= 1 && 137.21 + pred->_nodes[insert - 1]->is_SpillCopy() && 137.22 + Find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) { 137.23 + insert--; 137.24 + } 137.25 + def = split_Rematerialize( def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false ); 137.26 if( !def ) return 0; // Bail out 137.27 } 137.28 // Update the Phi's input edge array
138.1 --- a/src/share/vm/prims/jvm.h Mon Dec 27 09:30:20 2010 -0500 138.2 +++ b/src/share/vm/prims/jvm.h Mon Dec 27 09:56:29 2010 -0500 138.3 @@ -1063,7 +1063,8 @@ 138.4 JVM_CONSTANT_MethodHandle = 15, // JSR 292 138.5 JVM_CONSTANT_MethodType = 16, // JSR 292 138.6 JVM_CONSTANT_InvokeDynamicTrans = 17, // JSR 292, only occurs in old class files 138.7 - JVM_CONSTANT_InvokeDynamic = 18 // JSR 292 138.8 + JVM_CONSTANT_InvokeDynamic = 18, // JSR 292 138.9 + JVM_CONSTANT_ExternalMax = 18 // Last tag found in classfiles 138.10 }; 138.11 138.12 /* JVM_CONSTANT_MethodHandle subtypes */
139.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp Mon Dec 27 09:30:20 2010 -0500 139.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp Mon Dec 27 09:56:29 2010 -0500 139.3 @@ -214,7 +214,7 @@ 139.4 case JVM_CONSTANT_Double: // fall through 139.5 case JVM_CONSTANT_Long: 139.6 { 139.7 - scratch_cp->copy_entry_to(scratch_i, *merge_cp_p, *merge_cp_length_p, 139.8 + constantPoolOopDesc::copy_entry_to(scratch_cp, scratch_i, *merge_cp_p, *merge_cp_length_p, 139.9 THREAD); 139.10 139.11 if (scratch_i != *merge_cp_length_p) { 139.12 @@ -239,7 +239,7 @@ 139.13 case JVM_CONSTANT_UnresolvedClass: // fall through 139.14 case JVM_CONSTANT_UnresolvedString: 139.15 { 139.16 - scratch_cp->copy_entry_to(scratch_i, *merge_cp_p, *merge_cp_length_p, 139.17 + constantPoolOopDesc::copy_entry_to(scratch_cp, scratch_i, *merge_cp_p, *merge_cp_length_p, 139.18 THREAD); 139.19 139.20 if (scratch_i != *merge_cp_length_p) { 139.21 @@ -1093,13 +1093,13 @@ 139.22 case JVM_CONSTANT_Long: 139.23 // just copy the entry to *merge_cp_p, but double and long take 139.24 // two constant pool entries 139.25 - old_cp->copy_entry_to(old_i, *merge_cp_p, old_i, CHECK_0); 139.26 + constantPoolOopDesc::copy_entry_to(old_cp, old_i, *merge_cp_p, old_i, CHECK_0); 139.27 old_i++; 139.28 break; 139.29 139.30 default: 139.31 // just copy the entry to *merge_cp_p 139.32 - old_cp->copy_entry_to(old_i, *merge_cp_p, old_i, CHECK_0); 139.33 + constantPoolOopDesc::copy_entry_to(old_cp, old_i, *merge_cp_p, old_i, CHECK_0); 139.34 break; 139.35 } 139.36 } // end for each old_cp entry
140.1 --- a/src/share/vm/prims/methodHandleWalk.cpp Mon Dec 27 09:30:20 2010 -0500 140.2 +++ b/src/share/vm/prims/methodHandleWalk.cpp Mon Dec 27 09:56:29 2010 -0500 140.3 @@ -968,16 +968,11 @@ 140.4 140.5 if (tailcall) { 140.6 // Actually, in order to make these methods more recognizable, 140.7 - // let's put them in holder classes MethodHandle and InvokeDynamic. 140.8 - // That way stack walkers and compiler heuristics can recognize them. 140.9 - _target_klass = (for_invokedynamic() 140.10 - ? SystemDictionary::InvokeDynamic_klass() 140.11 - : SystemDictionary::MethodHandle_klass()); 140.12 + // let's put them in holder class MethodHandle. That way stack 140.13 + // walkers and compiler heuristics can recognize them. 140.14 + _target_klass = SystemDictionary::MethodHandle_klass(); 140.15 } 140.16 140.17 - // instanceKlass* ik = instanceKlass::cast(klass); 140.18 - // tty->print_cr("MethodHandleCompiler::make_invoke: %s %s.%s%s", Bytecodes::name(op), ik->external_name(), name->as_C_string(), signature->as_C_string()); 140.19 - 140.20 // Inline the method. 140.21 InvocationCounter* ic = m->invocation_counter(); 140.22 ic->set_carry_flag();
141.1 --- a/src/share/vm/prims/methodHandleWalk.hpp Mon Dec 27 09:30:20 2010 -0500 141.2 +++ b/src/share/vm/prims/methodHandleWalk.hpp Mon Dec 27 09:56:29 2010 -0500 141.3 @@ -412,8 +412,7 @@ 141.4 141.5 // Tests if the given class is a MH adapter holder. 141.6 static bool klass_is_method_handle_adapter_holder(klassOop klass) { 141.7 - return (klass == SystemDictionary::MethodHandle_klass() || 141.8 - klass == SystemDictionary::InvokeDynamic_klass()); 141.9 + return (klass == SystemDictionary::MethodHandle_klass()); 141.10 } 141.11 }; 141.12
142.1 --- a/src/share/vm/prims/methodHandles.cpp Mon Dec 27 09:30:20 2010 -0500 142.2 +++ b/src/share/vm/prims/methodHandles.cpp Mon Dec 27 09:56:29 2010 -0500 142.3 @@ -485,9 +485,8 @@ 142.4 Handle polymorphic_method_type; 142.5 bool polymorphic_signature = false; 142.6 if ((flags & ALL_KINDS) == IS_METHOD && 142.7 - (defc() == SystemDictionary::InvokeDynamic_klass() || 142.8 - (defc() == SystemDictionary::MethodHandle_klass() && 142.9 - methodOopDesc::is_method_handle_invoke_name(name())))) 142.10 + (defc() == SystemDictionary::MethodHandle_klass() && 142.11 + methodOopDesc::is_method_handle_invoke_name(name()))) 142.12 polymorphic_signature = true; 142.13 142.14 // convert the external string or reflective type to an internal signature
143.1 --- a/src/share/vm/runtime/arguments.cpp Mon Dec 27 09:30:20 2010 -0500 143.2 +++ b/src/share/vm/runtime/arguments.cpp Mon Dec 27 09:56:29 2010 -0500 143.3 @@ -1007,24 +1007,9 @@ 143.4 void Arguments::check_compressed_oops_compat() { 143.5 #ifdef _LP64 143.6 assert(UseCompressedOops, "Precondition"); 143.7 -# if defined(COMPILER1) && !defined(TIERED) 143.8 - // Until c1 supports compressed oops turn them off. 143.9 - FLAG_SET_DEFAULT(UseCompressedOops, false); 143.10 -# else 143.11 // Is it on by default or set on ergonomically 143.12 bool is_on_by_default = FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops); 143.13 143.14 - // Tiered currently doesn't work with compressed oops 143.15 - if (TieredCompilation) { 143.16 - if (is_on_by_default) { 143.17 - FLAG_SET_DEFAULT(UseCompressedOops, false); 143.18 - return; 143.19 - } else { 143.20 - vm_exit_during_initialization( 143.21 - "Tiered compilation is not supported with compressed oops yet", NULL); 143.22 - } 143.23 - } 143.24 - 143.25 // If dumping an archive or forcing its use, disable compressed oops if possible 143.26 if (DumpSharedSpaces || RequireSharedSpaces) { 143.27 if (is_on_by_default) { 143.28 @@ -1038,9 +1023,7 @@ 143.29 // UseSharedSpaces is on by default. With compressed oops, we turn it off. 143.30 FLAG_SET_DEFAULT(UseSharedSpaces, false); 143.31 } 143.32 - 143.33 -# endif // defined(COMPILER1) && !defined(TIERED) 143.34 -#endif // _LP64 143.35 +#endif 143.36 } 143.37 143.38 void Arguments::set_tiered_flags() { 143.39 @@ -3075,11 +3058,9 @@ 143.40 // Set flags based on ergonomics. 143.41 set_ergonomics_flags(); 143.42 143.43 -#ifdef _LP64 143.44 if (UseCompressedOops) { 143.45 check_compressed_oops_compat(); 143.46 } 143.47 -#endif 143.48 143.49 // Check the GC selections again. 143.50 if (!check_gc_consistency()) {
144.1 --- a/src/share/vm/runtime/thread.cpp Mon Dec 27 09:30:20 2010 -0500 144.2 +++ b/src/share/vm/runtime/thread.cpp Mon Dec 27 09:56:29 2010 -0500 144.3 @@ -3231,12 +3231,6 @@ 144.4 warning("java.lang.ArithmeticException has not been initialized"); 144.5 warning("java.lang.StackOverflowError has not been initialized"); 144.6 } 144.7 - 144.8 - if (EnableInvokeDynamic) { 144.9 - // JSR 292: An intialized java.dyn.InvokeDynamic is required in 144.10 - // the compiler. 144.11 - initialize_class(vmSymbolHandles::java_dyn_InvokeDynamic(), CHECK_0); 144.12 - } 144.13 } 144.14 144.15 // See : bugid 4211085.
145.1 --- a/src/share/vm/runtime/vmStructs.cpp Mon Dec 27 09:30:20 2010 -0500 145.2 +++ b/src/share/vm/runtime/vmStructs.cpp Mon Dec 27 09:56:29 2010 -0500 145.3 @@ -1676,10 +1676,7 @@ 145.4 /* constantPoolOop layout enum for InvokeDynamic */ \ 145.5 /*************************************************/ \ 145.6 \ 145.7 - declare_constant(constantPoolOopDesc::_multi_operand_count_offset) \ 145.8 - declare_constant(constantPoolOopDesc::_multi_operand_base_offset) \ 145.9 declare_constant(constantPoolOopDesc::_indy_bsm_offset) \ 145.10 - declare_constant(constantPoolOopDesc::_indy_nt_offset) \ 145.11 declare_constant(constantPoolOopDesc::_indy_argc_offset) \ 145.12 declare_constant(constantPoolOopDesc::_indy_argv_offset) \ 145.13 \
146.1 --- a/src/share/vm/utilities/constantTag.cpp Mon Dec 27 09:30:20 2010 -0500 146.2 +++ b/src/share/vm/utilities/constantTag.cpp Mon Dec 27 09:56:29 2010 -0500 146.3 @@ -93,6 +93,8 @@ 146.4 return "MethodType"; 146.5 case JVM_CONSTANT_InvokeDynamic : 146.6 return "InvokeDynamic"; 146.7 + case JVM_CONSTANT_InvokeDynamicTrans : 146.8 + return "InvokeDynamic/transitional"; 146.9 case JVM_CONSTANT_Object : 146.10 return "Object"; 146.11 case JVM_CONSTANT_Utf8 :
147.1 --- a/src/share/vm/utilities/constantTag.hpp Mon Dec 27 09:30:20 2010 -0500 147.2 +++ b/src/share/vm/utilities/constantTag.hpp Mon Dec 27 09:56:29 2010 -0500 147.3 @@ -86,7 +86,8 @@ 147.4 147.5 bool is_method_type() const { return _tag == JVM_CONSTANT_MethodType; } 147.6 bool is_method_handle() const { return _tag == JVM_CONSTANT_MethodHandle; } 147.7 - bool is_invoke_dynamic() const { return _tag == JVM_CONSTANT_InvokeDynamic; } 147.8 + bool is_invoke_dynamic() const { return (_tag == JVM_CONSTANT_InvokeDynamic || 147.9 + _tag == JVM_CONSTANT_InvokeDynamicTrans); } 147.10 147.11 bool is_loadable_constant() const { 147.12 return ((_tag >= JVM_CONSTANT_Integer && _tag <= JVM_CONSTANT_String) ||
148.1 --- a/src/share/vm/utilities/debug.cpp Mon Dec 27 09:30:20 2010 -0500 148.2 +++ b/src/share/vm/utilities/debug.cpp Mon Dec 27 09:56:29 2010 -0500 148.3 @@ -399,8 +399,14 @@ 148.4 extern "C" void disnm(intptr_t p) { 148.5 Command c("disnm"); 148.6 CodeBlob* cb = CodeCache::find_blob((address) p); 148.7 - cb->print(); 148.8 - Disassembler::decode(cb); 148.9 + nmethod* nm = cb->as_nmethod_or_null(); 148.10 + if (nm) { 148.11 + nm->print(); 148.12 + Disassembler::decode(nm); 148.13 + } else { 148.14 + cb->print(); 148.15 + Disassembler::decode(cb); 148.16 + } 148.17 } 148.18 148.19
149.1 --- a/test/compiler/6991596/Test6991596.java Mon Dec 27 09:30:20 2010 -0500 149.2 +++ b/test/compiler/6991596/Test6991596.java Mon Dec 27 09:56:29 2010 -0500 149.3 @@ -35,7 +35,7 @@ 149.4 public class Test6991596 { 149.5 private static final Class CLASS = Test6991596.class; 149.6 private static final String NAME = "foo"; 149.7 - private static final boolean DEBUG = false; 149.8 + private static final boolean DEBUG = System.getProperty("DEBUG", "false").equals("true"); 149.9 149.10 public static void main(String[] args) throws Throwable { 149.11 testboolean(); 149.12 @@ -47,7 +47,7 @@ 149.13 } 149.14 149.15 // Helpers to get various methods. 149.16 - static MethodHandle getmh1(Class ret, Class arg) { 149.17 + static MethodHandle getmh1(Class ret, Class arg) throws NoAccessException { 149.18 return MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(ret, arg)); 149.19 } 149.20 static MethodHandle getmh2(MethodHandle mh1, Class ret, Class arg) { 149.21 @@ -76,38 +76,38 @@ 149.22 MethodHandle mh2 = getmh2(mh1, boolean.class, boolean.class); 149.23 // TODO add this for all cases when the bugs are fixed. 149.24 //MethodHandle mh3 = getmh3(mh1, boolean.class, boolean.class); 149.25 - boolean a = mh1.<boolean>invokeExact((boolean) x); 149.26 - boolean b = mh2.<boolean>invokeExact(x); 149.27 + boolean a = (boolean) mh1.invokeExact((boolean) x); 149.28 + boolean b = (boolean) mh2.invokeExact(x); 149.29 //boolean c = mh3.<boolean>invokeExact((boolean) x); 149.30 - assert a == b : a + " != " + b; 149.31 - //assert c == x : c + " != " + x; 149.32 + check(x, a, b); 149.33 + //check(x, c, x); 149.34 } 149.35 149.36 // byte 149.37 { 149.38 MethodHandle mh1 = getmh1( byte.class, byte.class ); 149.39 MethodHandle mh2 = getmh2(mh1, byte.class, boolean.class); 149.40 - byte a = mh1.<byte>invokeExact((byte) (x ? 1 : 0)); 149.41 - byte b = mh2.<byte>invokeExact(x); 149.42 - assert a == b : a + " != " + b; 149.43 + byte a = (byte) mh1.invokeExact((byte) (x ? 1 : 0)); 149.44 + byte b = (byte) mh2.invokeExact(x); 149.45 + check(x, a, b); 149.46 } 149.47 149.48 // char 149.49 { 149.50 MethodHandle mh1 = getmh1( char.class, char.class); 149.51 MethodHandle mh2 = getmh2(mh1, char.class, boolean.class); 149.52 - char a = mh1.<char>invokeExact((char) (x ? 1 : 0)); 149.53 - char b = mh2.<char>invokeExact(x); 149.54 - assert a == b : a + " != " + b; 149.55 + char a = (char) mh1.invokeExact((char) (x ? 1 : 0)); 149.56 + char b = (char) mh2.invokeExact(x); 149.57 + check(x, a, b); 149.58 } 149.59 149.60 // short 149.61 { 149.62 MethodHandle mh1 = getmh1( short.class, short.class); 149.63 MethodHandle mh2 = getmh2(mh1, short.class, boolean.class); 149.64 - short a = mh1.<short>invokeExact((short) (x ? 1 : 0)); 149.65 - short b = mh2.<short>invokeExact(x); 149.66 - assert a == b : a + " != " + b; 149.67 + short a = (short) mh1.invokeExact((short) (x ? 1 : 0)); 149.68 + short b = (short) mh2.invokeExact(x); 149.69 + check(x, a, b); 149.70 } 149.71 } 149.72 149.73 @@ -134,36 +134,36 @@ 149.74 { 149.75 MethodHandle mh1 = getmh1( boolean.class, boolean.class); 149.76 MethodHandle mh2 = getmh2(mh1, boolean.class, byte.class); 149.77 - boolean a = mh1.<boolean>invokeExact((x & 1) == 1); 149.78 - boolean b = mh2.<boolean>invokeExact(x); 149.79 - assert a == b : a + " != " + b; 149.80 + boolean a = (boolean) mh1.invokeExact((x & 1) == 1); 149.81 + boolean b = (boolean) mh2.invokeExact(x); 149.82 + check(x, a, b); 149.83 } 149.84 149.85 // byte 149.86 { 149.87 MethodHandle mh1 = getmh1( byte.class, byte.class); 149.88 MethodHandle mh2 = getmh2(mh1, byte.class, byte.class); 149.89 - byte a = mh1.<byte>invokeExact((byte) x); 149.90 - byte b = mh2.<byte>invokeExact(x); 149.91 - assert a == b : a + " != " + b; 149.92 + byte a = (byte) mh1.invokeExact((byte) x); 149.93 + byte b = (byte) mh2.invokeExact(x); 149.94 + check(x, a, b); 149.95 } 149.96 149.97 // char 149.98 { 149.99 MethodHandle mh1 = getmh1( char.class, char.class); 149.100 MethodHandle mh2 = getmh2(mh1, char.class, byte.class); 149.101 - char a = mh1.<char>invokeExact((char) x); 149.102 - char b = mh2.<char>invokeExact(x); 149.103 - assert a == b : a + " != " + b; 149.104 + char a = (char) mh1.invokeExact((char) x); 149.105 + char b = (char) mh2.invokeExact(x); 149.106 + check(x, a, b); 149.107 } 149.108 149.109 // short 149.110 { 149.111 MethodHandle mh1 = getmh1( short.class, short.class); 149.112 MethodHandle mh2 = getmh2(mh1, short.class, byte.class); 149.113 - short a = mh1.<short>invokeExact((short) x); 149.114 - short b = mh2.<short>invokeExact(x); 149.115 - assert a == b : a + " != " + b; 149.116 + short a = (short) mh1.invokeExact((short) x); 149.117 + short b = (short) mh2.invokeExact(x); 149.118 + check(x, a, b); 149.119 } 149.120 } 149.121 149.122 @@ -188,36 +188,36 @@ 149.123 { 149.124 MethodHandle mh1 = getmh1( boolean.class, boolean.class); 149.125 MethodHandle mh2 = getmh2(mh1, boolean.class, char.class); 149.126 - boolean a = mh1.<boolean>invokeExact((x & 1) == 1); 149.127 - boolean b = mh2.<boolean>invokeExact(x); 149.128 - assert a == b : a + " != " + b; 149.129 + boolean a = (boolean) mh1.invokeExact((x & 1) == 1); 149.130 + boolean b = (boolean) mh2.invokeExact(x); 149.131 + check(x, a, b); 149.132 } 149.133 149.134 // byte 149.135 { 149.136 MethodHandle mh1 = getmh1( byte.class, byte.class); 149.137 MethodHandle mh2 = getmh2(mh1, byte.class, char.class); 149.138 - byte a = mh1.<byte>invokeExact((byte) x); 149.139 - byte b = mh2.<byte>invokeExact(x); 149.140 - assert a == b : a + " != " + b; 149.141 + byte a = (byte) mh1.invokeExact((byte) x); 149.142 + byte b = (byte) mh2.invokeExact(x); 149.143 + check(x, a, b); 149.144 } 149.145 149.146 // char 149.147 { 149.148 MethodHandle mh1 = getmh1( char.class, char.class); 149.149 MethodHandle mh2 = getmh2(mh1, char.class, char.class); 149.150 - char a = mh1.<char>invokeExact((char) x); 149.151 - char b = mh2.<char>invokeExact(x); 149.152 - assert a == b : a + " != " + b; 149.153 + char a = (char) mh1.invokeExact((char) x); 149.154 + char b = (char) mh2.invokeExact(x); 149.155 + check(x, a, b); 149.156 } 149.157 149.158 // short 149.159 { 149.160 MethodHandle mh1 = getmh1( short.class, short.class); 149.161 MethodHandle mh2 = getmh2(mh1, short.class, char.class); 149.162 - short a = mh1.<short>invokeExact((short) x); 149.163 - short b = mh2.<short>invokeExact(x); 149.164 - assert a == b : a + " != " + b; 149.165 + short a = (short) mh1.invokeExact((short) x); 149.166 + short b = (short) mh2.invokeExact(x); 149.167 + check(x, a, b); 149.168 } 149.169 } 149.170 149.171 @@ -248,36 +248,36 @@ 149.172 { 149.173 MethodHandle mh1 = getmh1( boolean.class, boolean.class); 149.174 MethodHandle mh2 = getmh2(mh1, boolean.class, short.class); 149.175 - boolean a = mh1.<boolean>invokeExact((x & 1) == 1); 149.176 - boolean b = mh2.<boolean>invokeExact(x); 149.177 - assert a == b : a + " != " + b; 149.178 + boolean a = (boolean) mh1.invokeExact((x & 1) == 1); 149.179 + boolean b = (boolean) mh2.invokeExact(x); 149.180 + check(x, a, b); 149.181 } 149.182 149.183 // byte 149.184 { 149.185 MethodHandle mh1 = getmh1( byte.class, byte.class); 149.186 MethodHandle mh2 = getmh2(mh1, byte.class, short.class); 149.187 - byte a = mh1.<byte>invokeExact((byte) x); 149.188 - byte b = mh2.<byte>invokeExact(x); 149.189 - assert a == b : a + " != " + b; 149.190 + byte a = (byte) mh1.invokeExact((byte) x); 149.191 + byte b = (byte) mh2.invokeExact(x); 149.192 + check(x, a, b); 149.193 } 149.194 149.195 // char 149.196 { 149.197 MethodHandle mh1 = getmh1( char.class, char.class); 149.198 MethodHandle mh2 = getmh2(mh1, char.class, short.class); 149.199 - char a = mh1.<char>invokeExact((char) x); 149.200 - char b = mh2.<char>invokeExact(x); 149.201 - assert a == b : a + " != " + b; 149.202 + char a = (char) mh1.invokeExact((char) x); 149.203 + char b = (char) mh2.invokeExact(x); 149.204 + check(x, a, b); 149.205 } 149.206 149.207 // short 149.208 { 149.209 MethodHandle mh1 = getmh1( short.class, short.class); 149.210 MethodHandle mh2 = getmh2(mh1, short.class, short.class); 149.211 - short a = mh1.<short>invokeExact((short) x); 149.212 - short b = mh2.<short>invokeExact(x); 149.213 - assert a == b : a + " != " + b; 149.214 + short a = (short) mh1.invokeExact((short) x); 149.215 + short b = (short) mh2.invokeExact(x); 149.216 + check(x, a, b); 149.217 } 149.218 } 149.219 149.220 @@ -316,45 +316,46 @@ 149.221 { 149.222 MethodHandle mh1 = getmh1( boolean.class, boolean.class); 149.223 MethodHandle mh2 = getmh2(mh1, boolean.class, int.class); 149.224 - boolean a = mh1.<boolean>invokeExact((x & 1) == 1); 149.225 - boolean b = mh2.<boolean>invokeExact(x); 149.226 - assert a == b : a + " != " + b; 149.227 + boolean a = (boolean) mh1.invokeExact((x & 1) == 1); 149.228 + boolean b = (boolean) mh2.invokeExact(x); 149.229 + check(x, a, b); 149.230 } 149.231 149.232 // byte 149.233 { 149.234 MethodHandle mh1 = getmh1( byte.class, byte.class); 149.235 MethodHandle mh2 = getmh2(mh1, byte.class, int.class); 149.236 - byte a = mh1.<byte>invokeExact((byte) x); 149.237 - byte b = mh2.<byte>invokeExact(x); 149.238 - assert a == b : a + " != " + b; 149.239 + byte a = (byte) mh1.invokeExact((byte) x); 149.240 + byte b = (byte) mh2.invokeExact(x); 149.241 + check(x, a, b); 149.242 } 149.243 149.244 // char 149.245 { 149.246 MethodHandle mh1 = getmh1( char.class, char.class); 149.247 MethodHandle mh2 = getmh2(mh1, char.class, int.class); 149.248 - char a = mh1.<char>invokeExact((char) x); 149.249 - char b = mh2.<char>invokeExact(x); 149.250 - assert a == b : a + " != " + b; 149.251 + char a = (char) mh1.invokeExact((char) x); 149.252 + char b = (char) mh2.invokeExact(x); 149.253 + check(x, a, b); 149.254 } 149.255 149.256 // short 149.257 { 149.258 MethodHandle mh1 = getmh1( short.class, short.class); 149.259 MethodHandle mh2 = getmh2(mh1, short.class, int.class); 149.260 - short a = mh1.<short>invokeExact((short) x); 149.261 - short b = mh2.<short>invokeExact(x); 149.262 + short a = (short) mh1.invokeExact((short) x); 149.263 + short b = (short) mh2.invokeExact(x); 149.264 assert a == b : a + " != " + b; 149.265 + check(x, a, b); 149.266 } 149.267 149.268 // int 149.269 { 149.270 MethodHandle mh1 = getmh1( int.class, int.class); 149.271 MethodHandle mh2 = getmh2(mh1, int.class, int.class); 149.272 - int a = mh1.<int>invokeExact((int) x); 149.273 - int b = mh2.<int>invokeExact(x); 149.274 - assert a == b : a + " != " + b; 149.275 + int a = (int) mh1.invokeExact((int) x); 149.276 + int b = (int) mh2.invokeExact(x); 149.277 + check(x, a, b); 149.278 } 149.279 } 149.280 149.281 @@ -395,48 +396,65 @@ 149.282 { 149.283 MethodHandle mh1 = getmh1( boolean.class, boolean.class); 149.284 MethodHandle mh2 = getmh2(mh1, boolean.class, long.class); 149.285 - boolean a = mh1.<boolean>invokeExact((x & 1L) == 1L); 149.286 - boolean b = mh2.<boolean>invokeExact(x); 149.287 - assert a == b : a + " != " + b; 149.288 + boolean a = (boolean) mh1.invokeExact((x & 1L) == 1L); 149.289 + boolean b = (boolean) mh2.invokeExact(x); 149.290 + check(x, a, b); 149.291 } 149.292 149.293 // byte 149.294 { 149.295 MethodHandle mh1 = getmh1( byte.class, byte.class); 149.296 MethodHandle mh2 = getmh2(mh1, byte.class, long.class); 149.297 - byte a = mh1.<byte>invokeExact((byte) x); 149.298 - byte b = mh2.<byte>invokeExact(x); 149.299 - assert a == b : a + " != " + b; 149.300 + byte a = (byte) mh1.invokeExact((byte) x); 149.301 + byte b = (byte) mh2.invokeExact(x); 149.302 + check(x, a, b); 149.303 } 149.304 149.305 // char 149.306 { 149.307 MethodHandle mh1 = getmh1( char.class, char.class); 149.308 MethodHandle mh2 = getmh2(mh1, char.class, long.class); 149.309 - char a = mh1.<char>invokeExact((char) x); 149.310 - char b = mh2.<char>invokeExact(x); 149.311 - assert a == b : a + " != " + b; 149.312 + char a = (char) mh1.invokeExact((char) x); 149.313 + char b = (char) mh2.invokeExact(x); 149.314 + check(x, a, b); 149.315 } 149.316 149.317 // short 149.318 { 149.319 MethodHandle mh1 = getmh1( short.class, short.class); 149.320 MethodHandle mh2 = getmh2(mh1, short.class, long.class); 149.321 - short a = mh1.<short>invokeExact((short) x); 149.322 - short b = mh2.<short>invokeExact(x); 149.323 - assert a == b : a + " != " + b; 149.324 + short a = (short) mh1.invokeExact((short) x); 149.325 + short b = (short) mh2.invokeExact(x); 149.326 + check(x, a, b); 149.327 } 149.328 149.329 // int 149.330 { 149.331 MethodHandle mh1 = getmh1( int.class, int.class); 149.332 MethodHandle mh2 = getmh2(mh1, int.class, long.class); 149.333 - int a = mh1.<int>invokeExact((int) x); 149.334 - int b = mh2.<int>invokeExact(x); 149.335 - assert a == b : a + " != " + b; 149.336 + int a = (int) mh1.invokeExact((int) x); 149.337 + int b = (int) mh2.invokeExact(x); 149.338 + check(x, a, b); 149.339 } 149.340 + } 149.341 149.342 - } 149.343 + static void check(boolean x, boolean e, boolean a) { p(z2h(x), z2h(e), z2h(a)); assert e == a : z2h(x) + ": " + z2h(e) + " != " + z2h(a); } 149.344 + static void check(boolean x, byte e, byte a) { p(z2h(x), i2h(e), i2h(a)); assert e == a : z2h(x) + ": " + i2h(e) + " != " + i2h(a); } 149.345 + static void check(boolean x, int e, int a) { p(z2h(x), i2h(e), i2h(a)); assert e == a : z2h(x) + ": " + i2h(e) + " != " + i2h(a); } 149.346 + 149.347 + static void check(int x, boolean e, boolean a) { p(i2h(x), z2h(e), z2h(a)); assert e == a : i2h(x) + ": " + z2h(e) + " != " + z2h(a); } 149.348 + static void check(int x, byte e, byte a) { p(i2h(x), i2h(e), i2h(a)); assert e == a : i2h(x) + ": " + i2h(e) + " != " + i2h(a); } 149.349 + static void check(int x, int e, int a) { p(i2h(x), i2h(e), i2h(a)); assert e == a : i2h(x) + ": " + i2h(e) + " != " + i2h(a); } 149.350 + 149.351 + static void check(long x, boolean e, boolean a) { p(l2h(x), z2h(e), z2h(a)); assert e == a : l2h(x) + ": " + z2h(e) + " != " + z2h(a); } 149.352 + static void check(long x, byte e, byte a) { p(l2h(x), i2h(e), i2h(a)); assert e == a : l2h(x) + ": " + i2h(e) + " != " + i2h(a); } 149.353 + static void check(long x, int e, int a) { p(l2h(x), i2h(e), i2h(a)); assert e == a : l2h(x) + ": " + i2h(e) + " != " + i2h(a); } 149.354 + 149.355 + static void p(String x, String e, String a) { if (DEBUG) System.out.println(x + ": expected: " + e + ", actual: " + a); } 149.356 + 149.357 + static String z2h(boolean x) { return x ? "1" : "0"; } 149.358 + static String i2h(int x) { return Integer.toHexString(x); } 149.359 + static String l2h(long x) { return Long.toHexString(x); } 149.360 149.361 // to int 149.362 public static boolean foo(boolean i) { return i; }
150.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 150.2 +++ b/test/compiler/7002666/Test7002666.java Mon Dec 27 09:56:29 2010 -0500 150.3 @@ -0,0 +1,57 @@ 150.4 +/* 150.5 + * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved. 150.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 150.7 + * 150.8 + * This code is free software; you can redistribute it and/or modify it 150.9 + * under the terms of the GNU General Public License version 2 only, as 150.10 + * published by the Free Software Foundation. 150.11 + * 150.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 150.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 150.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 150.15 + * version 2 for more details (a copy is included in the LICENSE file that 150.16 + * accompanied this code). 150.17 + * 150.18 + * You should have received a copy of the GNU General Public License version 150.19 + * 2 along with this work; if not, write to the Free Software Foundation, 150.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 150.21 + * 150.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 150.23 + * or visit www.oracle.com if you need additional information or have any 150.24 + * questions. 150.25 + * 150.26 + */ 150.27 + 150.28 +/** 150.29 + * @test 150.30 + * @bug 7002666 150.31 + * @summary eclipse CDT projects crash with compressed oops 150.32 + * 150.33 + * @run main/othervm -Xbatch -XX:CompileOnly=Test7002666.test,java/lang/reflect/Array Test7002666 150.34 + * 150.35 + * This will only reliably fail with a fastdebug build since it relies 150.36 + * on seeing garbage in the heap to die. It could be made more 150.37 + * reliable in product mode but that would require greatly increasing 150.38 + * the runtime. 150.39 + */ 150.40 + 150.41 +public class Test7002666 { 150.42 + public static void main(String[] args) { 150.43 + for (int i = 0; i < 25000; i++) { 150.44 + Object[] a = test(Test7002666.class, new Test7002666()); 150.45 + if (a[0] != null) { 150.46 + // The element should be null but if it's not then 150.47 + // we've hit the bug. This will most likely crash but 150.48 + // at least throw an exception. 150.49 + System.err.println(a[0]); 150.50 + throw new InternalError(a[0].toString()); 150.51 + 150.52 + } 150.53 + } 150.54 + } 150.55 + public static Object[] test(Class c, Object o) { 150.56 + // allocate an array small enough to be trigger the bug 150.57 + Object[] a = (Object[])java.lang.reflect.Array.newInstance(c, 1); 150.58 + return a; 150.59 + } 150.60 +}