Mon, 15 Jul 2013 11:07:03 +0100
Merge
src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp | file | annotate | diff | comparison | revisions | |
src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp | file | annotate | diff | comparison | revisions |
1.1 --- a/.hgtags Thu Jul 11 12:59:03 2013 -0400 1.2 +++ b/.hgtags Mon Jul 15 11:07:03 2013 +0100 1.3 @@ -351,3 +351,10 @@ 1.4 3c78a14da19d26d6937af5f98b97e2a21c653b04 hs25-b36 1.5 1beed1f6f9edefe47ba8ed1355fbd3e7606b8288 jdk8-b94 1.6 69689078dff8b21e6df30870464f5d736eebdf72 hs25-b37 1.7 +5d65c078cd0ac455aa5e58a09844c7acce54b487 jdk8-b95 1.8 +2cc5a9d1ba66dfdff578918b393c727bd9450210 hs25-b38 1.9 +e6a4b8c71fa6f225bd989a34de2d0d0a656a8be8 jdk8-b96 1.10 +2b9380b0bf0b649f40704735773e8956c2d88ba0 hs25-b39 1.11 +d197d377ab2e016d024e8c86cb06a57bd7eae590 jdk8-b97 1.12 +c9dd82da51ed34a28f7c6b3245163ee962e94572 hs25-b40 1.13 +30b5b75c42ac5174b640fbef8aa87527668e8400 jdk8-b98
2.1 --- a/agent/src/share/classes/sun/jvm/hotspot/CLHSDB.java Thu Jul 11 12:59:03 2013 -0400 2.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/CLHSDB.java Mon Jul 15 11:07:03 2013 +0100 2.3 @@ -31,13 +31,19 @@ 2.4 import java.util.*; 2.5 2.6 public class CLHSDB { 2.7 + 2.8 + public CLHSDB(JVMDebugger d) { 2.9 + jvmDebugger = d; 2.10 + } 2.11 + 2.12 public static void main(String[] args) { 2.13 new CLHSDB(args).run(); 2.14 } 2.15 2.16 - private void run() { 2.17 - // At this point, if pidText != null we are supposed to attach to it. 2.18 - // Else, if execPath != null, it is the path of a jdk/bin/java 2.19 + public void run() { 2.20 + // If jvmDebugger is already set, we have been given a JVMDebugger. 2.21 + // Otherwise, if pidText != null we are supposed to attach to it. 2.22 + // Finally, if execPath != null, it is the path of a jdk/bin/java 2.23 // and coreFilename is the pathname of a core file we are 2.24 // supposed to attach to. 2.25 2.26 @@ -49,7 +55,9 @@ 2.27 } 2.28 }); 2.29 2.30 - if (pidText != null) { 2.31 + if (jvmDebugger != null) { 2.32 + attachDebugger(jvmDebugger); 2.33 + } else if (pidText != null) { 2.34 attachDebugger(pidText); 2.35 } else if (execPath != null) { 2.36 attachDebugger(execPath, coreFilename); 2.37 @@ -96,6 +104,7 @@ 2.38 // Internals only below this point 2.39 // 2.40 private HotSpotAgent agent; 2.41 + private JVMDebugger jvmDebugger; 2.42 private boolean attached; 2.43 // These had to be made data members because they are referenced in inner classes. 2.44 private String pidText; 2.45 @@ -120,7 +129,7 @@ 2.46 case (1): 2.47 if (args[0].equals("help") || args[0].equals("-help")) { 2.48 doUsage(); 2.49 - System.exit(0); 2.50 + return; 2.51 } 2.52 // If all numbers, it is a PID to attach to 2.53 // Else, it is a pathname to a .../bin/java for a core file. 2.54 @@ -142,10 +151,15 @@ 2.55 default: 2.56 System.out.println("HSDB Error: Too many options specified"); 2.57 doUsage(); 2.58 - System.exit(1); 2.59 + return; 2.60 } 2.61 } 2.62 2.63 + private void attachDebugger(JVMDebugger d) { 2.64 + agent.attach(d); 2.65 + attached = true; 2.66 + } 2.67 + 2.68 /** NOTE we are in a different thread here than either the main 2.69 thread or the Swing/AWT event handler thread, so we must be very 2.70 careful when creating or removing widgets */
3.1 --- a/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Thu Jul 11 12:59:03 2013 -0400 3.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/CommandProcessor.java Mon Jul 15 11:07:03 2013 +0100 3.3 @@ -101,6 +101,9 @@ 3.4 import sun.jvm.hotspot.utilities.soql.JSJavaScriptEngine; 3.5 3.6 public class CommandProcessor { 3.7 + 3.8 + volatile boolean quit; 3.9 + 3.10 public abstract static class DebuggerInterface { 3.11 public abstract HotSpotAgent getAgent(); 3.12 public abstract boolean isAttached(); 3.13 @@ -1135,7 +1138,7 @@ 3.14 usage(); 3.15 } else { 3.16 debugger.detach(); 3.17 - System.exit(0); 3.18 + quit = true; 3.19 } 3.20 } 3.21 }, 3.22 @@ -1714,7 +1717,7 @@ 3.23 } 3.24 protected void quit() { 3.25 debugger.detach(); 3.26 - System.exit(0); 3.27 + quit = true; 3.28 } 3.29 protected BufferedReader getInputReader() { 3.30 return in; 3.31 @@ -1781,7 +1784,7 @@ 3.32 3.33 public void run(boolean prompt) { 3.34 // Process interactive commands. 3.35 - while (true) { 3.36 + while (!quit) { 3.37 if (prompt) printPrompt(); 3.38 String ln = null; 3.39 try {
4.1 --- a/agent/src/share/classes/sun/jvm/hotspot/HSDB.java Thu Jul 11 12:59:03 2013 -0400 4.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/HSDB.java Mon Jul 15 11:07:03 2013 +0100 4.3 @@ -59,8 +59,11 @@ 4.4 // Internals only below this point 4.5 // 4.6 private HotSpotAgent agent; 4.7 + private JVMDebugger jvmDebugger; 4.8 private JDesktopPane desktop; 4.9 private boolean attached; 4.10 + private boolean argError; 4.11 + private JFrame frame; 4.12 /** List <JMenuItem> */ 4.13 private java.util.List attachMenuItems; 4.14 /** List <JMenuItem> */ 4.15 @@ -85,6 +88,11 @@ 4.16 System.out.println(" path-to-corefile: Debug this corefile. The default is 'core'"); 4.17 System.out.println(" If no arguments are specified, you can select what to do from the GUI.\n"); 4.18 HotSpotAgent.showUsage(); 4.19 + argError = true; 4.20 + } 4.21 + 4.22 + public HSDB(JVMDebugger d) { 4.23 + jvmDebugger = d; 4.24 } 4.25 4.26 private HSDB(String[] args) { 4.27 @@ -95,7 +103,6 @@ 4.28 case (1): 4.29 if (args[0].equals("help") || args[0].equals("-help")) { 4.30 doUsage(); 4.31 - System.exit(0); 4.32 } 4.33 // If all numbers, it is a PID to attach to 4.34 // Else, it is a pathname to a .../bin/java for a core file. 4.35 @@ -117,24 +124,29 @@ 4.36 default: 4.37 System.out.println("HSDB Error: Too many options specified"); 4.38 doUsage(); 4.39 - System.exit(1); 4.40 } 4.41 } 4.42 4.43 - private void run() { 4.44 - // At this point, if pidText != null we are supposed to attach to it. 4.45 - // Else, if execPath != null, it is the path of a jdk/bin/java 4.46 - // and coreFilename is the pathname of a core file we are 4.47 - // supposed to attach to. 4.48 + // close this tool without calling System.exit 4.49 + protected void closeUI() { 4.50 + workerThread.shutdown(); 4.51 + frame.dispose(); 4.52 + } 4.53 + 4.54 + public void run() { 4.55 + // Don't start the UI if there were bad arguments. 4.56 + if (argError) { 4.57 + return; 4.58 + } 4.59 4.60 agent = new HotSpotAgent(); 4.61 workerThread = new WorkerThread(); 4.62 attachMenuItems = new java.util.ArrayList(); 4.63 detachMenuItems = new java.util.ArrayList(); 4.64 4.65 - JFrame frame = new JFrame("HSDB - HotSpot Debugger"); 4.66 + frame = new JFrame("HSDB - HotSpot Debugger"); 4.67 frame.setSize(800, 600); 4.68 - frame.setDefaultCloseOperation(WindowConstants.EXIT_ON_CLOSE); 4.69 + frame.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE); 4.70 4.71 JMenuBar menuBar = new JMenuBar(); 4.72 4.73 @@ -197,7 +209,7 @@ 4.74 item = createMenuItem("Exit", 4.75 new ActionListener() { 4.76 public void actionPerformed(ActionEvent e) { 4.77 - System.exit(0); 4.78 + closeUI(); 4.79 } 4.80 }); 4.81 item.setAccelerator(KeyStroke.getKeyStroke(KeyEvent.VK_X, ActionEvent.ALT_MASK)); 4.82 @@ -406,7 +418,15 @@ 4.83 } 4.84 }); 4.85 4.86 - if (pidText != null) { 4.87 + // If jvmDebugger is already set, we have been given a JVMDebugger. 4.88 + // Otherwise, if pidText != null we are supposed to attach to it. 4.89 + // Finally, if execPath != null, it is the path of a jdk/bin/java 4.90 + // and coreFilename is the pathname of a core file we are 4.91 + // supposed to attach to. 4.92 + 4.93 + if (jvmDebugger != null) { 4.94 + attach(jvmDebugger); 4.95 + } else if (pidText != null) { 4.96 attach(pidText); 4.97 } else if (execPath != null) { 4.98 attach(execPath, coreFilename); 4.99 @@ -1113,6 +1133,12 @@ 4.100 }); 4.101 } 4.102 4.103 + // Attach to existing JVMDebugger, which should be already attached to a core/process. 4.104 + private void attach(JVMDebugger d) { 4.105 + attached = true; 4.106 + showThreadsDialog(); 4.107 + } 4.108 + 4.109 /** NOTE we are in a different thread here than either the main 4.110 thread or the Swing/AWT event handler thread, so we must be very 4.111 careful when creating or removing widgets */
5.1 --- a/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java Thu Jul 11 12:59:03 2013 -0400 5.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/HotSpotAgent.java Mon Jul 15 11:07:03 2013 +0100 5.3 @@ -25,6 +25,8 @@ 5.4 package sun.jvm.hotspot; 5.5 5.6 import java.rmi.RemoteException; 5.7 +import java.lang.reflect.Constructor; 5.8 +import java.lang.reflect.InvocationTargetException; 5.9 5.10 import sun.jvm.hotspot.debugger.Debugger; 5.11 import sun.jvm.hotspot.debugger.DebuggerException; 5.12 @@ -63,7 +65,6 @@ 5.13 5.14 private String os; 5.15 private String cpu; 5.16 - private String fileSep; 5.17 5.18 // The system can work in several ways: 5.19 // - Attaching to local process 5.20 @@ -155,6 +156,14 @@ 5.21 go(); 5.22 } 5.23 5.24 + /** This uses a JVMDebugger that is already attached to the core or process */ 5.25 + public synchronized void attach(JVMDebugger d) 5.26 + throws DebuggerException { 5.27 + debugger = d; 5.28 + isServer = false; 5.29 + go(); 5.30 + } 5.31 + 5.32 /** This attaches to a "debug server" on a remote machine; this 5.33 remote server has already attached to a process or opened a 5.34 core file and is waiting for RMI calls on the Debugger object to 5.35 @@ -303,28 +312,37 @@ 5.36 // server, but not client attaching to server) 5.37 // 5.38 5.39 - try { 5.40 - os = PlatformInfo.getOS(); 5.41 - cpu = PlatformInfo.getCPU(); 5.42 - } 5.43 - catch (UnsupportedPlatformException e) { 5.44 - throw new DebuggerException(e); 5.45 - } 5.46 - fileSep = System.getProperty("file.separator"); 5.47 + // Handle existing or alternate JVMDebugger: 5.48 + // these will set os, cpu independently of our PlatformInfo implementation. 5.49 + String alternateDebugger = System.getProperty("sa.altDebugger"); 5.50 + if (debugger != null) { 5.51 + setupDebuggerExisting(); 5.52 5.53 - if (os.equals("solaris")) { 5.54 - setupDebuggerSolaris(); 5.55 - } else if (os.equals("win32")) { 5.56 - setupDebuggerWin32(); 5.57 - } else if (os.equals("linux")) { 5.58 - setupDebuggerLinux(); 5.59 - } else if (os.equals("bsd")) { 5.60 - setupDebuggerBsd(); 5.61 - } else if (os.equals("darwin")) { 5.62 - setupDebuggerDarwin(); 5.63 + } else if (alternateDebugger != null) { 5.64 + setupDebuggerAlternate(alternateDebugger); 5.65 + 5.66 } else { 5.67 - // Add support for more operating systems here 5.68 - throw new DebuggerException("Operating system " + os + " not yet supported"); 5.69 + // Otherwise, os, cpu are those of our current platform: 5.70 + try { 5.71 + os = PlatformInfo.getOS(); 5.72 + cpu = PlatformInfo.getCPU(); 5.73 + } catch (UnsupportedPlatformException e) { 5.74 + throw new DebuggerException(e); 5.75 + } 5.76 + if (os.equals("solaris")) { 5.77 + setupDebuggerSolaris(); 5.78 + } else if (os.equals("win32")) { 5.79 + setupDebuggerWin32(); 5.80 + } else if (os.equals("linux")) { 5.81 + setupDebuggerLinux(); 5.82 + } else if (os.equals("bsd")) { 5.83 + setupDebuggerBsd(); 5.84 + } else if (os.equals("darwin")) { 5.85 + setupDebuggerDarwin(); 5.86 + } else { 5.87 + // Add support for more operating systems here 5.88 + throw new DebuggerException("Operating system " + os + " not yet supported"); 5.89 + } 5.90 } 5.91 5.92 if (isServer) { 5.93 @@ -423,6 +441,41 @@ 5.94 // OS-specific debugger setup/connect routines 5.95 // 5.96 5.97 + // Use the existing JVMDebugger, as passed to our constructor. 5.98 + // Retrieve os and cpu from that debugger, not the current platform. 5.99 + private void setupDebuggerExisting() { 5.100 + 5.101 + os = debugger.getOS(); 5.102 + cpu = debugger.getCPU(); 5.103 + setupJVMLibNames(os); 5.104 + machDesc = debugger.getMachineDescription(); 5.105 + } 5.106 + 5.107 + // Given a classname, load an alternate implementation of JVMDebugger. 5.108 + private void setupDebuggerAlternate(String alternateName) { 5.109 + 5.110 + try { 5.111 + Class c = Class.forName(alternateName); 5.112 + Constructor cons = c.getConstructor(); 5.113 + debugger = (JVMDebugger) cons.newInstance(); 5.114 + attachDebugger(); 5.115 + setupDebuggerExisting(); 5.116 + 5.117 + } catch (ClassNotFoundException cnfe) { 5.118 + throw new DebuggerException("Cannot find alternate SA Debugger: '" + alternateName + "'"); 5.119 + } catch (NoSuchMethodException nsme) { 5.120 + throw new DebuggerException("Alternate SA Debugger: '" + alternateName + "' has missing constructor."); 5.121 + } catch (InstantiationException ie) { 5.122 + throw new DebuggerException("Alternate SA Debugger: '" + alternateName + "' fails to initialise: ", ie); 5.123 + } catch (IllegalAccessException iae) { 5.124 + throw new DebuggerException("Alternate SA Debugger: '" + alternateName + "' fails to initialise: ", iae); 5.125 + } catch (InvocationTargetException iae) { 5.126 + throw new DebuggerException("Alternate SA Debugger: '" + alternateName + "' fails to initialise: ", iae); 5.127 + } 5.128 + 5.129 + System.err.println("Loaded alternate HotSpot SA Debugger: " + alternateName); 5.130 + } 5.131 + 5.132 // 5.133 // Solaris 5.134 // 5.135 @@ -466,6 +519,11 @@ 5.136 debugger = new RemoteDebuggerClient(remote); 5.137 machDesc = ((RemoteDebuggerClient) debugger).getMachineDescription(); 5.138 os = debugger.getOS(); 5.139 + setupJVMLibNames(os); 5.140 + cpu = debugger.getCPU(); 5.141 + } 5.142 + 5.143 + private void setupJVMLibNames(String os) { 5.144 if (os.equals("solaris")) { 5.145 setupJVMLibNamesSolaris(); 5.146 } else if (os.equals("win32")) { 5.147 @@ -479,8 +537,6 @@ 5.148 } else { 5.149 throw new RuntimeException("Unknown OS type"); 5.150 } 5.151 - 5.152 - cpu = debugger.getCPU(); 5.153 } 5.154 5.155 private void setupJVMLibNamesSolaris() {
6.1 --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java Thu Jul 11 12:59:03 2013 -0400 6.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxAddress.java Mon Jul 15 11:07:03 2013 +0100 6.3 @@ -26,11 +26,11 @@ 6.4 6.5 import sun.jvm.hotspot.debugger.*; 6.6 6.7 -class LinuxAddress implements Address { 6.8 +public class LinuxAddress implements Address { 6.9 protected LinuxDebugger debugger; 6.10 protected long addr; 6.11 6.12 - LinuxAddress(LinuxDebugger debugger, long addr) { 6.13 + public LinuxAddress(LinuxDebugger debugger, long addr) { 6.14 this.debugger = debugger; 6.15 this.addr = addr; 6.16 }
7.1 --- a/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxOopHandle.java Thu Jul 11 12:59:03 2013 -0400 7.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/debugger/linux/LinuxOopHandle.java Mon Jul 15 11:07:03 2013 +0100 7.3 @@ -26,8 +26,8 @@ 7.4 7.5 import sun.jvm.hotspot.debugger.*; 7.6 7.7 -class LinuxOopHandle extends LinuxAddress implements OopHandle { 7.8 - LinuxOopHandle(LinuxDebugger debugger, long addr) { 7.9 +public class LinuxOopHandle extends LinuxAddress implements OopHandle { 7.10 + public LinuxOopHandle(LinuxDebugger debugger, long addr) { 7.11 super(debugger, addr); 7.12 } 7.13
8.1 --- a/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Thu Jul 11 12:59:03 2013 -0400 8.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/runtime/VM.java Mon Jul 15 11:07:03 2013 +0100 8.3 @@ -246,7 +246,7 @@ 8.4 } 8.5 } 8.6 8.7 - private static final boolean disableDerivedPrinterTableCheck; 8.8 + private static final boolean disableDerivedPointerTableCheck; 8.9 private static final Properties saProps; 8.10 8.11 static { 8.12 @@ -256,12 +256,12 @@ 8.13 url = VM.class.getClassLoader().getResource("sa.properties"); 8.14 saProps.load(new BufferedInputStream(url.openStream())); 8.15 } catch (Exception e) { 8.16 - throw new RuntimeException("Unable to load properties " + 8.17 + System.err.println("Unable to load properties " + 8.18 (url == null ? "null" : url.toString()) + 8.19 ": " + e.getMessage()); 8.20 } 8.21 8.22 - disableDerivedPrinterTableCheck = System.getProperty("sun.jvm.hotspot.runtime.VM.disableDerivedPointerTableCheck") != null; 8.23 + disableDerivedPointerTableCheck = System.getProperty("sun.jvm.hotspot.runtime.VM.disableDerivedPointerTableCheck") != null; 8.24 } 8.25 8.26 private VM(TypeDataBase db, JVMDebugger debugger, boolean isBigEndian) { 8.27 @@ -371,7 +371,8 @@ 8.28 /** This is used by the debugging system */ 8.29 public static void initialize(TypeDataBase db, JVMDebugger debugger) { 8.30 if (soleInstance != null) { 8.31 - throw new RuntimeException("Attempt to initialize VM twice"); 8.32 + // Using multiple SA Tool classes in the same process creates a call here. 8.33 + return; 8.34 } 8.35 soleInstance = new VM(db, debugger, debugger.getMachineDescription().isBigEndian()); 8.36 8.37 @@ -683,7 +684,7 @@ 8.38 8.39 /** Returns true if C2 derived pointer table should be used, false otherwise */ 8.40 public boolean useDerivedPointerTable() { 8.41 - return !disableDerivedPrinterTableCheck; 8.42 + return !disableDerivedPointerTableCheck; 8.43 } 8.44 8.45 /** Returns the code cache; should not be used if is core build */
9.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java Thu Jul 11 12:59:03 2013 -0400 9.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/ClassLoaderStats.java Mon Jul 15 11:07:03 2013 +0100 9.3 @@ -41,6 +41,14 @@ 9.4 public class ClassLoaderStats extends Tool { 9.5 boolean verbose = true; 9.6 9.7 + public ClassLoaderStats() { 9.8 + super(); 9.9 + } 9.10 + 9.11 + public ClassLoaderStats(JVMDebugger d) { 9.12 + super(d); 9.13 + } 9.14 + 9.15 public static void main(String[] args) { 9.16 ClassLoaderStats cls = new ClassLoaderStats(); 9.17 cls.start(args);
10.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java Thu Jul 11 12:59:03 2013 -0400 10.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/FinalizerInfo.java Mon Jul 15 11:07:03 2013 +0100 10.3 @@ -24,6 +24,7 @@ 10.4 10.5 package sun.jvm.hotspot.tools; 10.6 10.7 +import sun.jvm.hotspot.debugger.JVMDebugger; 10.8 import sun.jvm.hotspot.tools.*; 10.9 10.10 import sun.jvm.hotspot.oops.*; 10.11 @@ -42,6 +43,15 @@ 10.12 * summary of these objects in the form of a histogram. 10.13 */ 10.14 public class FinalizerInfo extends Tool { 10.15 + 10.16 + public FinalizerInfo() { 10.17 + super(); 10.18 + } 10.19 + 10.20 + public FinalizerInfo(JVMDebugger d) { 10.21 + super(d); 10.22 + } 10.23 + 10.24 public static void main(String[] args) { 10.25 FinalizerInfo finfo = new FinalizerInfo(); 10.26 finfo.start(args);
11.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/FlagDumper.java Thu Jul 11 12:59:03 2013 -0400 11.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/FlagDumper.java Mon Jul 15 11:07:03 2013 +0100 11.3 @@ -25,10 +25,19 @@ 11.4 package sun.jvm.hotspot.tools; 11.5 11.6 import java.io.PrintStream; 11.7 +import sun.jvm.hotspot.debugger.JVMDebugger; 11.8 import sun.jvm.hotspot.runtime.*; 11.9 11.10 public class FlagDumper extends Tool { 11.11 11.12 + public FlagDumper() { 11.13 + super(); 11.14 + } 11.15 + 11.16 + public FlagDumper(JVMDebugger d) { 11.17 + super(d); 11.18 + } 11.19 + 11.20 public void run() { 11.21 VM.Flag[] flags = VM.getVM().getCommandLineFlags(); 11.22 PrintStream out = System.out;
12.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java Thu Jul 11 12:59:03 2013 -0400 12.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapDumper.java Mon Jul 15 11:07:03 2013 +0100 12.3 @@ -25,6 +25,7 @@ 12.4 package sun.jvm.hotspot.tools; 12.5 12.6 import sun.jvm.hotspot.utilities.HeapHprofBinWriter; 12.7 +import sun.jvm.hotspot.debugger.JVMDebugger; 12.8 import java.io.IOException; 12.9 12.10 /* 12.11 @@ -42,6 +43,11 @@ 12.12 this.dumpFile = dumpFile; 12.13 } 12.14 12.15 + public HeapDumper(String dumpFile, JVMDebugger d) { 12.16 + super(d); 12.17 + this.dumpFile = dumpFile; 12.18 + } 12.19 + 12.20 protected void printFlagsUsage() { 12.21 System.out.println(" <no option>\tto dump heap to " + 12.22 DEFAULT_DUMP_FILE);
13.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Thu Jul 11 12:59:03 2013 -0400 13.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/HeapSummary.java Mon Jul 15 11:07:03 2013 +0100 13.3 @@ -29,12 +29,21 @@ 13.4 import sun.jvm.hotspot.gc_implementation.g1.*; 13.5 import sun.jvm.hotspot.gc_implementation.parallelScavenge.*; 13.6 import sun.jvm.hotspot.gc_implementation.shared.*; 13.7 +import sun.jvm.hotspot.debugger.JVMDebugger; 13.8 import sun.jvm.hotspot.memory.*; 13.9 import sun.jvm.hotspot.oops.*; 13.10 import sun.jvm.hotspot.runtime.*; 13.11 13.12 public class HeapSummary extends Tool { 13.13 13.14 + public HeapSummary() { 13.15 + super(); 13.16 + } 13.17 + 13.18 + public HeapSummary(JVMDebugger d) { 13.19 + super(d); 13.20 + } 13.21 + 13.22 public static void main(String[] args) { 13.23 HeapSummary hs = new HeapSummary(); 13.24 hs.start(args);
14.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java Thu Jul 11 12:59:03 2013 -0400 14.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JInfo.java Mon Jul 15 11:07:03 2013 +0100 14.3 @@ -25,12 +25,21 @@ 14.4 package sun.jvm.hotspot.tools; 14.5 14.6 import sun.jvm.hotspot.runtime.*; 14.7 +import sun.jvm.hotspot.debugger.JVMDebugger; 14.8 14.9 public class JInfo extends Tool { 14.10 + public JInfo() { 14.11 + super(); 14.12 + } 14.13 + 14.14 public JInfo(int m) { 14.15 mode = m; 14.16 } 14.17 14.18 + public JInfo(JVMDebugger d) { 14.19 + super(d); 14.20 + } 14.21 + 14.22 protected boolean needsJavaPrefix() { 14.23 return false; 14.24 }
15.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java Thu Jul 11 12:59:03 2013 -0400 15.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JMap.java Mon Jul 15 11:07:03 2013 +0100 15.3 @@ -25,6 +25,7 @@ 15.4 package sun.jvm.hotspot.tools; 15.5 15.6 import java.io.*; 15.7 +import sun.jvm.hotspot.debugger.JVMDebugger; 15.8 import sun.jvm.hotspot.utilities.*; 15.9 15.10 public class JMap extends Tool { 15.11 @@ -36,6 +37,10 @@ 15.12 this(MODE_PMAP); 15.13 } 15.14 15.15 + public JMap(JVMDebugger d) { 15.16 + super(d); 15.17 + } 15.18 + 15.19 protected boolean needsJavaPrefix() { 15.20 return false; 15.21 }
16.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/JSnap.java Thu Jul 11 12:59:03 2013 -0400 16.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JSnap.java Mon Jul 15 11:07:03 2013 +0100 16.3 @@ -25,9 +25,19 @@ 16.4 package sun.jvm.hotspot.tools; 16.5 16.6 import java.io.*; 16.7 +import sun.jvm.hotspot.debugger.JVMDebugger; 16.8 import sun.jvm.hotspot.runtime.*; 16.9 16.10 public class JSnap extends Tool { 16.11 + 16.12 + public JSnap() { 16.13 + super(); 16.14 + } 16.15 + 16.16 + public JSnap(JVMDebugger d) { 16.17 + super(d); 16.18 + } 16.19 + 16.20 public void run() { 16.21 final PrintStream out = System.out; 16.22 if (PerfMemory.initialized()) {
17.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/JStack.java Thu Jul 11 12:59:03 2013 -0400 17.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/JStack.java Mon Jul 15 11:07:03 2013 +0100 17.3 @@ -24,6 +24,8 @@ 17.4 17.5 package sun.jvm.hotspot.tools; 17.6 17.7 +import sun.jvm.hotspot.debugger.JVMDebugger; 17.8 + 17.9 public class JStack extends Tool { 17.10 public JStack(boolean mixedMode, boolean concurrentLocks) { 17.11 this.mixedMode = mixedMode; 17.12 @@ -34,6 +36,10 @@ 17.13 this(true, true); 17.14 } 17.15 17.16 + public JStack(JVMDebugger d) { 17.17 + super(d); 17.18 + } 17.19 + 17.20 protected boolean needsJavaPrefix() { 17.21 return false; 17.22 }
18.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/ObjectHistogram.java Thu Jul 11 12:59:03 2013 -0400 18.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/ObjectHistogram.java Mon Jul 15 11:07:03 2013 +0100 18.3 @@ -33,6 +33,14 @@ 18.4 an object histogram from a remote or crashed VM. */ 18.5 public class ObjectHistogram extends Tool { 18.6 18.7 + public ObjectHistogram() { 18.8 + super(); 18.9 + } 18.10 + 18.11 + public ObjectHistogram(JVMDebugger d) { 18.12 + super(d); 18.13 + } 18.14 + 18.15 public void run() { 18.16 run(System.out, System.err); 18.17 }
19.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/PMap.java Thu Jul 11 12:59:03 2013 -0400 19.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/PMap.java Mon Jul 15 11:07:03 2013 +0100 19.3 @@ -31,6 +31,15 @@ 19.4 import sun.jvm.hotspot.runtime.*; 19.5 19.6 public class PMap extends Tool { 19.7 + 19.8 + public PMap() { 19.9 + super(); 19.10 + } 19.11 + 19.12 + public PMap(JVMDebugger d) { 19.13 + super(d); 19.14 + } 19.15 + 19.16 public void run() { 19.17 run(System.out); 19.18 }
20.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/PStack.java Thu Jul 11 12:59:03 2013 -0400 20.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/PStack.java Mon Jul 15 11:07:03 2013 +0100 20.3 @@ -45,6 +45,10 @@ 20.4 this(true, true); 20.5 } 20.6 20.7 + public PStack(JVMDebugger d) { 20.8 + super(d); 20.9 + } 20.10 + 20.11 public void run() { 20.12 run(System.out); 20.13 }
21.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/StackTrace.java Thu Jul 11 12:59:03 2013 -0400 21.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/StackTrace.java Mon Jul 15 11:07:03 2013 +0100 21.3 @@ -45,6 +45,16 @@ 21.4 run(System.out); 21.5 } 21.6 21.7 + public StackTrace(JVMDebugger d) { 21.8 + super(d); 21.9 + } 21.10 + 21.11 + public StackTrace(JVMDebugger d, boolean v, boolean concurrentLocks) { 21.12 + super(d); 21.13 + this.verbose = v; 21.14 + this.concurrentLocks = concurrentLocks; 21.15 + } 21.16 + 21.17 public void run(java.io.PrintStream tty) { 21.18 // Ready to go with the database... 21.19 try {
22.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/SysPropsDumper.java Thu Jul 11 12:59:03 2013 -0400 22.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/SysPropsDumper.java Mon Jul 15 11:07:03 2013 +0100 22.3 @@ -27,10 +27,19 @@ 22.4 import java.io.PrintStream; 22.5 import java.util.*; 22.6 22.7 +import sun.jvm.hotspot.debugger.JVMDebugger; 22.8 import sun.jvm.hotspot.runtime.*; 22.9 22.10 public class SysPropsDumper extends Tool { 22.11 22.12 + public SysPropsDumper() { 22.13 + super(); 22.14 + } 22.15 + 22.16 + public SysPropsDumper(JVMDebugger d) { 22.17 + super(d); 22.18 + } 22.19 + 22.20 public void run() { 22.21 Properties sysProps = VM.getVM().getSystemProperties(); 22.22 PrintStream out = System.out;
23.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java Thu Jul 11 12:59:03 2013 -0400 23.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/Tool.java Mon Jul 15 11:07:03 2013 +0100 23.3 @@ -35,6 +35,7 @@ 23.4 23.5 public abstract class Tool implements Runnable { 23.6 private HotSpotAgent agent; 23.7 + private JVMDebugger jvmDebugger; 23.8 private int debugeeType; 23.9 23.10 // debugeeType is one of constants below 23.11 @@ -42,6 +43,13 @@ 23.12 protected static final int DEBUGEE_CORE = 1; 23.13 protected static final int DEBUGEE_REMOTE = 2; 23.14 23.15 + public Tool() { 23.16 + } 23.17 + 23.18 + public Tool(JVMDebugger d) { 23.19 + jvmDebugger = d; 23.20 + } 23.21 + 23.22 public String getName() { 23.23 return getClass().getName(); 23.24 } 23.25 @@ -90,7 +98,6 @@ 23.26 23.27 protected void usage() { 23.28 printUsage(); 23.29 - System.exit(1); 23.30 } 23.31 23.32 /* 23.33 @@ -106,13 +113,13 @@ 23.34 protected void stop() { 23.35 if (agent != null) { 23.36 agent.detach(); 23.37 - System.exit(0); 23.38 } 23.39 } 23.40 23.41 protected void start(String[] args) { 23.42 if ((args.length < 1) || (args.length > 2)) { 23.43 usage(); 23.44 + return; 23.45 } 23.46 23.47 // Attempt to handle -h or -help or some invalid flag 23.48 @@ -185,13 +192,31 @@ 23.49 } 23.50 if (e.getMessage() != null) { 23.51 err.print(e.getMessage()); 23.52 + e.printStackTrace(); 23.53 } 23.54 err.println(); 23.55 - System.exit(1); 23.56 + return; 23.57 } 23.58 23.59 err.println("Debugger attached successfully."); 23.60 + startInternal(); 23.61 + } 23.62 23.63 + // When using an existing JVMDebugger. 23.64 + public void start() { 23.65 + 23.66 + if (jvmDebugger == null) { 23.67 + throw new RuntimeException("Tool.start() called with no JVMDebugger set."); 23.68 + } 23.69 + agent = new HotSpotAgent(); 23.70 + agent.attach(jvmDebugger); 23.71 + startInternal(); 23.72 + } 23.73 + 23.74 + // Remains of the start mechanism, common to both start methods. 23.75 + private void startInternal() { 23.76 + 23.77 + PrintStream err = System.err; 23.78 VM vm = VM.getVM(); 23.79 if (vm.isCore()) { 23.80 err.println("Core build detected.");
24.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java Thu Jul 11 12:59:03 2013 -0400 24.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/jcore/ClassDump.java Mon Jul 15 11:07:03 2013 +0100 24.3 @@ -25,6 +25,7 @@ 24.4 package sun.jvm.hotspot.tools.jcore; 24.5 24.6 import java.io.*; 24.7 +import java.lang.reflect.Constructor; 24.8 import java.util.jar.JarOutputStream; 24.9 import java.util.jar.JarEntry; 24.10 import java.util.jar.Manifest; 24.11 @@ -38,6 +39,16 @@ 24.12 private ClassFilter classFilter; 24.13 private String outputDirectory; 24.14 private JarOutputStream jarStream; 24.15 + private String pkgList; 24.16 + 24.17 + public ClassDump() { 24.18 + super(); 24.19 + } 24.20 + 24.21 + public ClassDump(JVMDebugger d, String pkgList) { 24.22 + super(d); 24.23 + this.pkgList = pkgList; 24.24 + } 24.25 24.26 public void setClassFilter(ClassFilter cf) { 24.27 classFilter = cf; 24.28 @@ -63,6 +74,25 @@ 24.29 public void run() { 24.30 // Ready to go with the database... 24.31 try { 24.32 + // The name of the filter always comes from a System property. 24.33 + // If we have a pkgList, pass it, otherwise let the filter read 24.34 + // its own System property for the list of classes. 24.35 + String filterClassName = System.getProperty("sun.jvm.hotspot.tools.jcore.filter", 24.36 + "sun.jvm.hotspot.tools.jcore.PackageNameFilter"); 24.37 + try { 24.38 + Class filterClass = Class.forName(filterClassName); 24.39 + if (pkgList == null) { 24.40 + classFilter = (ClassFilter) filterClass.newInstance(); 24.41 + } else { 24.42 + Constructor con = filterClass.getConstructor(String.class); 24.43 + classFilter = (ClassFilter) con.newInstance(pkgList); 24.44 + } 24.45 + } catch(Exception exp) { 24.46 + System.err.println("Warning: Can not create class filter!"); 24.47 + } 24.48 + 24.49 + String outputDirectory = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir", "."); 24.50 + setOutputDirectory(outputDirectory); 24.51 24.52 // walk through the system dictionary 24.53 SystemDictionary dict = VM.getVM().getSystemDictionary(); 24.54 @@ -139,26 +169,8 @@ 24.55 } 24.56 24.57 public static void main(String[] args) { 24.58 - // load class filters 24.59 - ClassFilter classFilter = null; 24.60 - String filterClassName = System.getProperty("sun.jvm.hotspot.tools.jcore.filter"); 24.61 - if (filterClassName != null) { 24.62 - try { 24.63 - Class filterClass = Class.forName(filterClassName); 24.64 - classFilter = (ClassFilter) filterClass.newInstance(); 24.65 - } catch(Exception exp) { 24.66 - System.err.println("Warning: Can not create class filter!"); 24.67 - } 24.68 - } 24.69 - 24.70 - String outputDirectory = System.getProperty("sun.jvm.hotspot.tools.jcore.outputDir"); 24.71 - if (outputDirectory == null) 24.72 - outputDirectory = "."; 24.73 - 24.74 24.75 ClassDump cd = new ClassDump(); 24.76 - cd.setClassFilter(classFilter); 24.77 - cd.setOutputDirectory(outputDirectory); 24.78 cd.start(args); 24.79 cd.stop(); 24.80 }
25.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/soql/JSDB.java Thu Jul 11 12:59:03 2013 -0400 25.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/soql/JSDB.java Mon Jul 15 11:07:03 2013 +0100 25.3 @@ -24,12 +24,22 @@ 25.4 25.5 package sun.jvm.hotspot.tools.soql; 25.6 25.7 +import sun.jvm.hotspot.debugger.JVMDebugger; 25.8 import sun.jvm.hotspot.tools.*; 25.9 import sun.jvm.hotspot.utilities.*; 25.10 import sun.jvm.hotspot.utilities.soql.*; 25.11 25.12 /** This is command line JavaScript debugger console */ 25.13 public class JSDB extends Tool { 25.14 + 25.15 + public JSDB() { 25.16 + super(); 25.17 + } 25.18 + 25.19 + public JSDB(JVMDebugger d) { 25.20 + super(d); 25.21 + } 25.22 + 25.23 public static void main(String[] args) { 25.24 JSDB jsdb = new JSDB(); 25.25 jsdb.start(args);
26.1 --- a/agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java Thu Jul 11 12:59:03 2013 -0400 26.2 +++ b/agent/src/share/classes/sun/jvm/hotspot/tools/soql/SOQL.java Mon Jul 15 11:07:03 2013 +0100 26.3 @@ -44,6 +44,14 @@ 26.4 soql.stop(); 26.5 } 26.6 26.7 + public SOQL() { 26.8 + super(); 26.9 + } 26.10 + 26.11 + public SOQL(JVMDebugger d) { 26.12 + super(d); 26.13 + } 26.14 + 26.15 protected SOQLEngine soqlEngine; 26.16 protected BufferedReader in = new BufferedReader(new InputStreamReader(System.in)); 26.17 protected PrintStream out = System.out;
27.1 --- a/make/bsd/makefiles/build_vm_def.sh Thu Jul 11 12:59:03 2013 -0400 27.2 +++ b/make/bsd/makefiles/build_vm_def.sh Mon Jul 15 11:07:03 2013 +0100 27.3 @@ -7,6 +7,6 @@ 27.4 NM=nm 27.5 fi 27.6 27.7 -$NM --defined-only $* | awk ' 27.8 - { if ($3 ~ /^_ZTV/ || $3 ~ /^gHotSpotVM/) print "\t" $3 ";" } 27.9 +$NM -Uj $* | awk ' 27.10 + { if ($3 ~ /^_ZTV/ || $3 ~ /^gHotSpotVM/) print "\t" $3 } 27.11 '
28.1 --- a/make/bsd/makefiles/gcc.make Thu Jul 11 12:59:03 2013 -0400 28.2 +++ b/make/bsd/makefiles/gcc.make Mon Jul 15 11:07:03 2013 +0100 28.3 @@ -1,5 +1,5 @@ 28.4 # 28.5 -# Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. 28.6 +# Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. 28.7 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 28.8 # 28.9 # This code is free software; you can redistribute it and/or modify it 28.10 @@ -368,8 +368,8 @@ 28.11 # Standard linker flags 28.12 LFLAGS += 28.13 28.14 - # Darwin doesn't use ELF and doesn't support version scripts 28.15 - LDNOMAP = true 28.16 + # The apple linker has its own variant of mapfiles/version-scripts 28.17 + MAPFLAG = -Xlinker -exported_symbols_list -Xlinker FILENAME 28.18 28.19 # Use $(SONAMEFLAG:SONAME=soname) to specify the intrinsic name of a shared obj 28.20 SONAMEFLAG =
29.1 --- a/make/bsd/makefiles/mapfile-vers-debug Thu Jul 11 12:59:03 2013 -0400 29.2 +++ b/make/bsd/makefiles/mapfile-vers-debug Mon Jul 15 11:07:03 2013 +0100 29.3 @@ -1,7 +1,3 @@ 29.4 -# 29.5 -# @(#)mapfile-vers-debug 1.18 07/10/25 16:47:35 29.6 -# 29.7 - 29.8 # 29.9 # Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. 29.10 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 29.11 @@ -23,273 +19,244 @@ 29.12 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 29.13 # or visit www.oracle.com if you need additional information or have any 29.14 # questions. 29.15 -# 29.16 # 29.17 +# 29.18 +# Only used for OSX/Darwin builds 29.19 29.20 # Define public interface. 29.21 + # _JNI 29.22 + _JNI_CreateJavaVM 29.23 + _JNI_GetCreatedJavaVMs 29.24 + _JNI_GetDefaultJavaVMInitArgs 29.25 29.26 -SUNWprivate_1.1 { 29.27 - global: 29.28 - # JNI 29.29 - JNI_CreateJavaVM; 29.30 - JNI_GetCreatedJavaVMs; 29.31 - JNI_GetDefaultJavaVMInitArgs; 29.32 + # _JVM 29.33 + _JVM_Accept 29.34 + _JVM_ActiveProcessorCount 29.35 + _JVM_AllocateNewArray 29.36 + _JVM_AllocateNewObject 29.37 + _JVM_ArrayCopy 29.38 + _JVM_AssertionStatusDirectives 29.39 + _JVM_Available 29.40 + _JVM_Bind 29.41 + _JVM_ClassDepth 29.42 + _JVM_ClassLoaderDepth 29.43 + _JVM_Clone 29.44 + _JVM_Close 29.45 + _JVM_CX8Field 29.46 + _JVM_CompileClass 29.47 + _JVM_CompileClasses 29.48 + _JVM_CompilerCommand 29.49 + _JVM_Connect 29.50 + _JVM_ConstantPoolGetClassAt 29.51 + _JVM_ConstantPoolGetClassAtIfLoaded 29.52 + _JVM_ConstantPoolGetDoubleAt 29.53 + _JVM_ConstantPoolGetFieldAt 29.54 + _JVM_ConstantPoolGetFieldAtIfLoaded 29.55 + _JVM_ConstantPoolGetFloatAt 29.56 + _JVM_ConstantPoolGetIntAt 29.57 + _JVM_ConstantPoolGetLongAt 29.58 + _JVM_ConstantPoolGetMethodAt 29.59 + _JVM_ConstantPoolGetMethodAtIfLoaded 29.60 + _JVM_ConstantPoolGetMemberRefInfoAt 29.61 + _JVM_ConstantPoolGetSize 29.62 + _JVM_ConstantPoolGetStringAt 29.63 + _JVM_ConstantPoolGetUTF8At 29.64 + _JVM_CountStackFrames 29.65 + _JVM_CurrentClassLoader 29.66 + _JVM_CurrentLoadedClass 29.67 + _JVM_CurrentThread 29.68 + _JVM_CurrentTimeMillis 29.69 + _JVM_DefineClass 29.70 + _JVM_DefineClassWithSource 29.71 + _JVM_DefineClassWithSourceCond 29.72 + _JVM_DesiredAssertionStatus 29.73 + _JVM_DisableCompiler 29.74 + _JVM_DoPrivileged 29.75 + _JVM_DTraceGetVersion 29.76 + _JVM_DTraceActivate 29.77 + _JVM_DTraceIsProbeEnabled 29.78 + _JVM_DTraceIsSupported 29.79 + _JVM_DTraceDispose 29.80 + _JVM_DumpAllStacks 29.81 + _JVM_DumpThreads 29.82 + _JVM_EnableCompiler 29.83 + _JVM_Exit 29.84 + _JVM_FillInStackTrace 29.85 + _JVM_FindClassFromClass 29.86 + _JVM_FindClassFromClassLoader 29.87 + _JVM_FindClassFromBootLoader 29.88 + _JVM_FindLibraryEntry 29.89 + _JVM_FindLoadedClass 29.90 + _JVM_FindPrimitiveClass 29.91 + _JVM_FindSignal 29.92 + _JVM_FreeMemory 29.93 + _JVM_GC 29.94 + _JVM_GetAllThreads 29.95 + _JVM_GetArrayElement 29.96 + _JVM_GetArrayLength 29.97 + _JVM_GetCPClassNameUTF 29.98 + _JVM_GetCPFieldClassNameUTF 29.99 + _JVM_GetCPFieldModifiers 29.100 + _JVM_GetCPFieldNameUTF 29.101 + _JVM_GetCPFieldSignatureUTF 29.102 + _JVM_GetCPMethodClassNameUTF 29.103 + _JVM_GetCPMethodModifiers 29.104 + _JVM_GetCPMethodNameUTF 29.105 + _JVM_GetCPMethodSignatureUTF 29.106 + _JVM_GetCallerClass 29.107 + _JVM_GetClassAccessFlags 29.108 + _JVM_GetClassAnnotations 29.109 + _JVM_GetClassCPEntriesCount 29.110 + _JVM_GetClassCPTypes 29.111 + _JVM_GetClassConstantPool 29.112 + _JVM_GetClassContext 29.113 + _JVM_GetClassDeclaredConstructors 29.114 + _JVM_GetClassDeclaredFields 29.115 + _JVM_GetClassDeclaredMethods 29.116 + _JVM_GetClassFieldsCount 29.117 + _JVM_GetClassInterfaces 29.118 + _JVM_GetClassLoader 29.119 + _JVM_GetClassMethodsCount 29.120 + _JVM_GetClassModifiers 29.121 + _JVM_GetClassName 29.122 + _JVM_GetClassNameUTF 29.123 + _JVM_GetClassSignature 29.124 + _JVM_GetClassSigners 29.125 + _JVM_GetClassTypeAnnotations 29.126 + _JVM_GetComponentType 29.127 + _JVM_GetDeclaredClasses 29.128 + _JVM_GetDeclaringClass 29.129 + _JVM_GetEnclosingMethodInfo 29.130 + _JVM_GetFieldAnnotations 29.131 + _JVM_GetFieldIxModifiers 29.132 + _JVM_GetFieldTypeAnnotations 29.133 + _JVM_GetHostName 29.134 + _JVM_GetInheritedAccessControlContext 29.135 + _JVM_GetInterfaceVersion 29.136 + _JVM_GetLastErrorString 29.137 + _JVM_GetManagement 29.138 + _JVM_GetMethodAnnotations 29.139 + _JVM_GetMethodDefaultAnnotationValue 29.140 + _JVM_GetMethodIxArgsSize 29.141 + _JVM_GetMethodIxByteCode 29.142 + _JVM_GetMethodIxByteCodeLength 29.143 + _JVM_GetMethodIxExceptionIndexes 29.144 + _JVM_GetMethodIxExceptionTableEntry 29.145 + _JVM_GetMethodIxExceptionTableLength 29.146 + _JVM_GetMethodIxExceptionsCount 29.147 + _JVM_GetMethodIxLocalsCount 29.148 + _JVM_GetMethodIxMaxStack 29.149 + _JVM_GetMethodIxModifiers 29.150 + _JVM_GetMethodIxNameUTF 29.151 + _JVM_GetMethodIxSignatureUTF 29.152 + _JVM_GetMethodParameterAnnotations 29.153 + _JVM_GetMethodParameters 29.154 + _JVM_GetMethodTypeAnnotations 29.155 + _JVM_GetPrimitiveArrayElement 29.156 + _JVM_GetProtectionDomain 29.157 + _JVM_GetSockName 29.158 + _JVM_GetSockOpt 29.159 + _JVM_GetStackAccessControlContext 29.160 + _JVM_GetStackTraceDepth 29.161 + _JVM_GetStackTraceElement 29.162 + _JVM_GetSystemPackage 29.163 + _JVM_GetSystemPackages 29.164 + _JVM_GetThreadStateNames 29.165 + _JVM_GetThreadStateValues 29.166 + _JVM_GetVersionInfo 29.167 + _JVM_Halt 29.168 + _JVM_HoldsLock 29.169 + _JVM_IHashCode 29.170 + _JVM_InitAgentProperties 29.171 + _JVM_InitProperties 29.172 + _JVM_InitializeCompiler 29.173 + _JVM_InitializeSocketLibrary 29.174 + _JVM_InternString 29.175 + _JVM_Interrupt 29.176 + _JVM_InvokeMethod 29.177 + _JVM_IsArrayClass 29.178 + _JVM_IsConstructorIx 29.179 + _JVM_IsInterface 29.180 + _JVM_IsInterrupted 29.181 + _JVM_IsNaN 29.182 + _JVM_IsPrimitiveClass 29.183 + _JVM_IsSameClassPackage 29.184 + _JVM_IsSilentCompiler 29.185 + _JVM_IsSupportedJNIVersion 29.186 + _JVM_IsThreadAlive 29.187 + _JVM_IsVMGeneratedMethodIx 29.188 + _JVM_LatestUserDefinedLoader 29.189 + _JVM_Listen 29.190 + _JVM_LoadClass0 29.191 + _JVM_LoadLibrary 29.192 + _JVM_Lseek 29.193 + _JVM_MaxObjectInspectionAge 29.194 + _JVM_MaxMemory 29.195 + _JVM_MonitorNotify 29.196 + _JVM_MonitorNotifyAll 29.197 + _JVM_MonitorWait 29.198 + _JVM_NanoTime 29.199 + _JVM_NativePath 29.200 + _JVM_NewArray 29.201 + _JVM_NewInstanceFromConstructor 29.202 + _JVM_NewMultiArray 29.203 + _JVM_OnExit 29.204 + _JVM_Open 29.205 + _JVM_RaiseSignal 29.206 + _JVM_RawMonitorCreate 29.207 + _JVM_RawMonitorDestroy 29.208 + _JVM_RawMonitorEnter 29.209 + _JVM_RawMonitorExit 29.210 + _JVM_Read 29.211 + _JVM_Recv 29.212 + _JVM_RecvFrom 29.213 + _JVM_RegisterSignal 29.214 + _JVM_ReleaseUTF 29.215 + _JVM_ResolveClass 29.216 + _JVM_ResumeThread 29.217 + _JVM_Send 29.218 + _JVM_SendTo 29.219 + _JVM_SetArrayElement 29.220 + _JVM_SetClassSigners 29.221 + _JVM_SetLength 29.222 + _JVM_SetNativeThreadName 29.223 + _JVM_SetPrimitiveArrayElement 29.224 + _JVM_SetProtectionDomain 29.225 + _JVM_SetSockOpt 29.226 + _JVM_SetThreadPriority 29.227 + _JVM_Sleep 29.228 + _JVM_Socket 29.229 + _JVM_SocketAvailable 29.230 + _JVM_SocketClose 29.231 + _JVM_SocketShutdown 29.232 + _JVM_StartThread 29.233 + _JVM_StopThread 29.234 + _JVM_SuspendThread 29.235 + _JVM_SupportsCX8 29.236 + _JVM_Sync 29.237 + _JVM_Timeout 29.238 + _JVM_TotalMemory 29.239 + _JVM_TraceInstructions 29.240 + _JVM_TraceMethodCalls 29.241 + _JVM_UnloadLibrary 29.242 + _JVM_Write 29.243 + _JVM_Yield 29.244 + _JVM_handle_bsd_signal 29.245 29.246 - # JVM 29.247 - JVM_Accept; 29.248 - JVM_ActiveProcessorCount; 29.249 - JVM_AllocateNewArray; 29.250 - JVM_AllocateNewObject; 29.251 - JVM_ArrayCopy; 29.252 - JVM_AssertionStatusDirectives; 29.253 - JVM_Available; 29.254 - JVM_Bind; 29.255 - JVM_ClassDepth; 29.256 - JVM_ClassLoaderDepth; 29.257 - JVM_Clone; 29.258 - JVM_Close; 29.259 - JVM_CX8Field; 29.260 - JVM_CompileClass; 29.261 - JVM_CompileClasses; 29.262 - JVM_CompilerCommand; 29.263 - JVM_Connect; 29.264 - JVM_ConstantPoolGetClassAt; 29.265 - JVM_ConstantPoolGetClassAtIfLoaded; 29.266 - JVM_ConstantPoolGetDoubleAt; 29.267 - JVM_ConstantPoolGetFieldAt; 29.268 - JVM_ConstantPoolGetFieldAtIfLoaded; 29.269 - JVM_ConstantPoolGetFloatAt; 29.270 - JVM_ConstantPoolGetIntAt; 29.271 - JVM_ConstantPoolGetLongAt; 29.272 - JVM_ConstantPoolGetMethodAt; 29.273 - JVM_ConstantPoolGetMethodAtIfLoaded; 29.274 - JVM_ConstantPoolGetMemberRefInfoAt; 29.275 - JVM_ConstantPoolGetSize; 29.276 - JVM_ConstantPoolGetStringAt; 29.277 - JVM_ConstantPoolGetUTF8At; 29.278 - JVM_CountStackFrames; 29.279 - JVM_CurrentClassLoader; 29.280 - JVM_CurrentLoadedClass; 29.281 - JVM_CurrentThread; 29.282 - JVM_CurrentTimeMillis; 29.283 - JVM_DefineClass; 29.284 - JVM_DefineClassWithSource; 29.285 - JVM_DefineClassWithSourceCond; 29.286 - JVM_DesiredAssertionStatus; 29.287 - JVM_DisableCompiler; 29.288 - JVM_DoPrivileged; 29.289 - JVM_DTraceGetVersion; 29.290 - JVM_DTraceActivate; 29.291 - JVM_DTraceIsProbeEnabled; 29.292 - JVM_DTraceIsSupported; 29.293 - JVM_DTraceDispose; 29.294 - JVM_DumpAllStacks; 29.295 - JVM_DumpThreads; 29.296 - JVM_EnableCompiler; 29.297 - JVM_Exit; 29.298 - JVM_FillInStackTrace; 29.299 - JVM_FindClassFromClass; 29.300 - JVM_FindClassFromClassLoader; 29.301 - JVM_FindClassFromBootLoader; 29.302 - JVM_FindLibraryEntry; 29.303 - JVM_FindLoadedClass; 29.304 - JVM_FindPrimitiveClass; 29.305 - JVM_FindSignal; 29.306 - JVM_FreeMemory; 29.307 - JVM_GC; 29.308 - JVM_GetAllThreads; 29.309 - JVM_GetArrayElement; 29.310 - JVM_GetArrayLength; 29.311 - JVM_GetCPClassNameUTF; 29.312 - JVM_GetCPFieldClassNameUTF; 29.313 - JVM_GetCPFieldModifiers; 29.314 - JVM_GetCPFieldNameUTF; 29.315 - JVM_GetCPFieldSignatureUTF; 29.316 - JVM_GetCPMethodClassNameUTF; 29.317 - JVM_GetCPMethodModifiers; 29.318 - JVM_GetCPMethodNameUTF; 29.319 - JVM_GetCPMethodSignatureUTF; 29.320 - JVM_GetCallerClass; 29.321 - JVM_GetClassAccessFlags; 29.322 - JVM_GetClassAnnotations; 29.323 - JVM_GetClassCPEntriesCount; 29.324 - JVM_GetClassCPTypes; 29.325 - JVM_GetClassConstantPool; 29.326 - JVM_GetClassContext; 29.327 - JVM_GetClassDeclaredConstructors; 29.328 - JVM_GetClassDeclaredFields; 29.329 - JVM_GetClassDeclaredMethods; 29.330 - JVM_GetClassFieldsCount; 29.331 - JVM_GetClassInterfaces; 29.332 - JVM_GetClassLoader; 29.333 - JVM_GetClassMethodsCount; 29.334 - JVM_GetClassModifiers; 29.335 - JVM_GetClassName; 29.336 - JVM_GetClassNameUTF; 29.337 - JVM_GetClassSignature; 29.338 - JVM_GetClassSigners; 29.339 - JVM_GetClassTypeAnnotations; 29.340 - JVM_GetComponentType; 29.341 - JVM_GetDeclaredClasses; 29.342 - JVM_GetDeclaringClass; 29.343 - JVM_GetEnclosingMethodInfo; 29.344 - JVM_GetFieldAnnotations; 29.345 - JVM_GetFieldIxModifiers; 29.346 - JVM_GetFieldTypeAnnotations; 29.347 - JVM_GetHostName; 29.348 - JVM_GetInheritedAccessControlContext; 29.349 - JVM_GetInterfaceVersion; 29.350 - JVM_GetLastErrorString; 29.351 - JVM_GetManagement; 29.352 - JVM_GetMethodAnnotations; 29.353 - JVM_GetMethodDefaultAnnotationValue; 29.354 - JVM_GetMethodIxArgsSize; 29.355 - JVM_GetMethodIxByteCode; 29.356 - JVM_GetMethodIxByteCodeLength; 29.357 - JVM_GetMethodIxExceptionIndexes; 29.358 - JVM_GetMethodIxExceptionTableEntry; 29.359 - JVM_GetMethodIxExceptionTableLength; 29.360 - JVM_GetMethodIxExceptionsCount; 29.361 - JVM_GetMethodIxLocalsCount; 29.362 - JVM_GetMethodIxMaxStack; 29.363 - JVM_GetMethodIxModifiers; 29.364 - JVM_GetMethodIxNameUTF; 29.365 - JVM_GetMethodIxSignatureUTF; 29.366 - JVM_GetMethodParameterAnnotations; 29.367 - JVM_GetMethodParameters; 29.368 - JVM_GetMethodTypeAnnotations; 29.369 - JVM_GetPrimitiveArrayElement; 29.370 - JVM_GetProtectionDomain; 29.371 - JVM_GetSockName; 29.372 - JVM_GetSockOpt; 29.373 - JVM_GetStackAccessControlContext; 29.374 - JVM_GetStackTraceDepth; 29.375 - JVM_GetStackTraceElement; 29.376 - JVM_GetSystemPackage; 29.377 - JVM_GetSystemPackages; 29.378 - JVM_GetThreadStateNames; 29.379 - JVM_GetThreadStateValues; 29.380 - JVM_GetVersionInfo; 29.381 - JVM_Halt; 29.382 - JVM_HoldsLock; 29.383 - JVM_IHashCode; 29.384 - JVM_InitAgentProperties; 29.385 - JVM_InitProperties; 29.386 - JVM_InitializeCompiler; 29.387 - JVM_InitializeSocketLibrary; 29.388 - JVM_InternString; 29.389 - JVM_Interrupt; 29.390 - JVM_InvokeMethod; 29.391 - JVM_IsArrayClass; 29.392 - JVM_IsConstructorIx; 29.393 - JVM_IsInterface; 29.394 - JVM_IsInterrupted; 29.395 - JVM_IsNaN; 29.396 - JVM_IsPrimitiveClass; 29.397 - JVM_IsSameClassPackage; 29.398 - JVM_IsSilentCompiler; 29.399 - JVM_IsSupportedJNIVersion; 29.400 - JVM_IsThreadAlive; 29.401 - JVM_IsVMGeneratedMethodIx; 29.402 - JVM_LatestUserDefinedLoader; 29.403 - JVM_Listen; 29.404 - JVM_LoadClass0; 29.405 - JVM_LoadLibrary; 29.406 - JVM_Lseek; 29.407 - JVM_MaxObjectInspectionAge; 29.408 - JVM_MaxMemory; 29.409 - JVM_MonitorNotify; 29.410 - JVM_MonitorNotifyAll; 29.411 - JVM_MonitorWait; 29.412 - JVM_NanoTime; 29.413 - JVM_NativePath; 29.414 - JVM_NewArray; 29.415 - JVM_NewInstanceFromConstructor; 29.416 - JVM_NewMultiArray; 29.417 - JVM_OnExit; 29.418 - JVM_Open; 29.419 - JVM_RaiseSignal; 29.420 - JVM_RawMonitorCreate; 29.421 - JVM_RawMonitorDestroy; 29.422 - JVM_RawMonitorEnter; 29.423 - JVM_RawMonitorExit; 29.424 - JVM_Read; 29.425 - JVM_Recv; 29.426 - JVM_RecvFrom; 29.427 - JVM_RegisterSignal; 29.428 - JVM_ReleaseUTF; 29.429 - JVM_ResolveClass; 29.430 - JVM_ResumeThread; 29.431 - JVM_Send; 29.432 - JVM_SendTo; 29.433 - JVM_SetArrayElement; 29.434 - JVM_SetClassSigners; 29.435 - JVM_SetLength; 29.436 - JVM_SetPrimitiveArrayElement; 29.437 - JVM_SetProtectionDomain; 29.438 - JVM_SetSockOpt; 29.439 - JVM_SetThreadPriority; 29.440 - JVM_Sleep; 29.441 - JVM_Socket; 29.442 - JVM_SocketAvailable; 29.443 - JVM_SocketClose; 29.444 - JVM_SocketShutdown; 29.445 - JVM_StartThread; 29.446 - JVM_StopThread; 29.447 - JVM_SuspendThread; 29.448 - JVM_SupportsCX8; 29.449 - JVM_Sync; 29.450 - JVM_Timeout; 29.451 - JVM_TotalMemory; 29.452 - JVM_TraceInstructions; 29.453 - JVM_TraceMethodCalls; 29.454 - JVM_UnloadLibrary; 29.455 - JVM_Write; 29.456 - JVM_Yield; 29.457 - JVM_handle_bsd_signal; 29.458 - 29.459 - # Old reflection routines 29.460 - # These do not need to be present in the product build in JDK 1.4 29.461 - # but their code has not been removed yet because there will not 29.462 - # be a substantial code savings until JVM_InvokeMethod and 29.463 - # JVM_NewInstanceFromConstructor can also be removed; see 29.464 - # reflectionCompat.hpp. 29.465 - JVM_GetClassConstructor; 29.466 - JVM_GetClassConstructors; 29.467 - JVM_GetClassField; 29.468 - JVM_GetClassFields; 29.469 - JVM_GetClassMethod; 29.470 - JVM_GetClassMethods; 29.471 - JVM_GetField; 29.472 - JVM_GetPrimitiveField; 29.473 - JVM_NewInstance; 29.474 - JVM_SetField; 29.475 - JVM_SetPrimitiveField; 29.476 - 29.477 - # debug JVM 29.478 - JVM_AccessVMBooleanFlag; 29.479 - JVM_AccessVMIntFlag; 29.480 - JVM_VMBreakPoint; 29.481 + # debug _JVM 29.482 + _JVM_AccessVMBooleanFlag 29.483 + _JVM_AccessVMIntFlag 29.484 + _JVM_VMBreakPoint 29.485 29.486 # miscellaneous functions 29.487 - jio_fprintf; 29.488 - jio_printf; 29.489 - jio_snprintf; 29.490 - jio_vfprintf; 29.491 - jio_vsnprintf; 29.492 - fork1; 29.493 - numa_warn; 29.494 - numa_error; 29.495 - 29.496 - # Needed because there is no JVM interface for this. 29.497 - sysThreadAvailableStackWithSlack; 29.498 + _jio_fprintf 29.499 + _jio_printf 29.500 + _jio_snprintf 29.501 + _jio_vfprintf 29.502 + _jio_vsnprintf 29.503 29.504 # This is for Forte Analyzer profiling support. 29.505 - AsyncGetCallTrace; 29.506 + _AsyncGetCallTrace 29.507 29.508 # INSERT VTABLE SYMBOLS HERE 29.509 29.510 - local: 29.511 - *; 29.512 -}; 29.513 -
30.1 --- a/make/bsd/makefiles/mapfile-vers-product Thu Jul 11 12:59:03 2013 -0400 30.2 +++ b/make/bsd/makefiles/mapfile-vers-product Mon Jul 15 11:07:03 2013 +0100 30.3 @@ -1,7 +1,3 @@ 30.4 -# 30.5 -# @(#)mapfile-vers-product 1.19 08/02/12 10:56:37 30.6 -# 30.7 - 30.8 # 30.9 # Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. 30.10 # DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 30.11 @@ -23,268 +19,239 @@ 30.12 # Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 30.13 # or visit www.oracle.com if you need additional information or have any 30.14 # questions. 30.15 -# 30.16 # 30.17 +# 30.18 +# Only used for OSX/Darwin builds 30.19 30.20 # Define public interface. 30.21 + # _JNI 30.22 + _JNI_CreateJavaVM 30.23 + _JNI_GetCreatedJavaVMs 30.24 + _JNI_GetDefaultJavaVMInitArgs 30.25 30.26 -SUNWprivate_1.1 { 30.27 - global: 30.28 - # JNI 30.29 - JNI_CreateJavaVM; 30.30 - JNI_GetCreatedJavaVMs; 30.31 - JNI_GetDefaultJavaVMInitArgs; 30.32 - 30.33 - # JVM 30.34 - JVM_Accept; 30.35 - JVM_ActiveProcessorCount; 30.36 - JVM_AllocateNewArray; 30.37 - JVM_AllocateNewObject; 30.38 - JVM_ArrayCopy; 30.39 - JVM_AssertionStatusDirectives; 30.40 - JVM_Available; 30.41 - JVM_Bind; 30.42 - JVM_ClassDepth; 30.43 - JVM_ClassLoaderDepth; 30.44 - JVM_Clone; 30.45 - JVM_Close; 30.46 - JVM_CX8Field; 30.47 - JVM_CompileClass; 30.48 - JVM_CompileClasses; 30.49 - JVM_CompilerCommand; 30.50 - JVM_Connect; 30.51 - JVM_ConstantPoolGetClassAt; 30.52 - JVM_ConstantPoolGetClassAtIfLoaded; 30.53 - JVM_ConstantPoolGetDoubleAt; 30.54 - JVM_ConstantPoolGetFieldAt; 30.55 - JVM_ConstantPoolGetFieldAtIfLoaded; 30.56 - JVM_ConstantPoolGetFloatAt; 30.57 - JVM_ConstantPoolGetIntAt; 30.58 - JVM_ConstantPoolGetLongAt; 30.59 - JVM_ConstantPoolGetMethodAt; 30.60 - JVM_ConstantPoolGetMethodAtIfLoaded; 30.61 - JVM_ConstantPoolGetMemberRefInfoAt; 30.62 - JVM_ConstantPoolGetSize; 30.63 - JVM_ConstantPoolGetStringAt; 30.64 - JVM_ConstantPoolGetUTF8At; 30.65 - JVM_CountStackFrames; 30.66 - JVM_CurrentClassLoader; 30.67 - JVM_CurrentLoadedClass; 30.68 - JVM_CurrentThread; 30.69 - JVM_CurrentTimeMillis; 30.70 - JVM_DefineClass; 30.71 - JVM_DefineClassWithSource; 30.72 - JVM_DefineClassWithSourceCond; 30.73 - JVM_DesiredAssertionStatus; 30.74 - JVM_DisableCompiler; 30.75 - JVM_DoPrivileged; 30.76 - JVM_DTraceGetVersion; 30.77 - JVM_DTraceActivate; 30.78 - JVM_DTraceIsProbeEnabled; 30.79 - JVM_DTraceIsSupported; 30.80 - JVM_DTraceDispose; 30.81 - JVM_DumpAllStacks; 30.82 - JVM_DumpThreads; 30.83 - JVM_EnableCompiler; 30.84 - JVM_Exit; 30.85 - JVM_FillInStackTrace; 30.86 - JVM_FindClassFromClass; 30.87 - JVM_FindClassFromClassLoader; 30.88 - JVM_FindClassFromBootLoader; 30.89 - JVM_FindLibraryEntry; 30.90 - JVM_FindLoadedClass; 30.91 - JVM_FindPrimitiveClass; 30.92 - JVM_FindSignal; 30.93 - JVM_FreeMemory; 30.94 - JVM_GC; 30.95 - JVM_GetAllThreads; 30.96 - JVM_GetArrayElement; 30.97 - JVM_GetArrayLength; 30.98 - JVM_GetCPClassNameUTF; 30.99 - JVM_GetCPFieldClassNameUTF; 30.100 - JVM_GetCPFieldModifiers; 30.101 - JVM_GetCPFieldNameUTF; 30.102 - JVM_GetCPFieldSignatureUTF; 30.103 - JVM_GetCPMethodClassNameUTF; 30.104 - JVM_GetCPMethodModifiers; 30.105 - JVM_GetCPMethodNameUTF; 30.106 - JVM_GetCPMethodSignatureUTF; 30.107 - JVM_GetCallerClass; 30.108 - JVM_GetClassAccessFlags; 30.109 - JVM_GetClassAnnotations; 30.110 - JVM_GetClassCPEntriesCount; 30.111 - JVM_GetClassCPTypes; 30.112 - JVM_GetClassConstantPool; 30.113 - JVM_GetClassContext; 30.114 - JVM_GetClassDeclaredConstructors; 30.115 - JVM_GetClassDeclaredFields; 30.116 - JVM_GetClassDeclaredMethods; 30.117 - JVM_GetClassFieldsCount; 30.118 - JVM_GetClassInterfaces; 30.119 - JVM_GetClassLoader; 30.120 - JVM_GetClassMethodsCount; 30.121 - JVM_GetClassModifiers; 30.122 - JVM_GetClassName; 30.123 - JVM_GetClassNameUTF; 30.124 - JVM_GetClassSignature; 30.125 - JVM_GetClassSigners; 30.126 - JVM_GetClassTypeAnnotations; 30.127 - JVM_GetComponentType; 30.128 - JVM_GetDeclaredClasses; 30.129 - JVM_GetDeclaringClass; 30.130 - JVM_GetEnclosingMethodInfo; 30.131 - JVM_GetFieldAnnotations; 30.132 - JVM_GetFieldIxModifiers; 30.133 - JVM_GetFieldTypeAnnotations; 30.134 - JVM_GetHostName; 30.135 - JVM_GetInheritedAccessControlContext; 30.136 - JVM_GetInterfaceVersion; 30.137 - JVM_GetLastErrorString; 30.138 - JVM_GetManagement; 30.139 - JVM_GetMethodAnnotations; 30.140 - JVM_GetMethodDefaultAnnotationValue; 30.141 - JVM_GetMethodIxArgsSize; 30.142 - JVM_GetMethodIxByteCode; 30.143 - JVM_GetMethodIxByteCodeLength; 30.144 - JVM_GetMethodIxExceptionIndexes; 30.145 - JVM_GetMethodIxExceptionTableEntry; 30.146 - JVM_GetMethodIxExceptionTableLength; 30.147 - JVM_GetMethodIxExceptionsCount; 30.148 - JVM_GetMethodIxLocalsCount; 30.149 - JVM_GetMethodIxMaxStack; 30.150 - JVM_GetMethodIxModifiers; 30.151 - JVM_GetMethodIxNameUTF; 30.152 - JVM_GetMethodIxSignatureUTF; 30.153 - JVM_GetMethodParameterAnnotations; 30.154 - JVM_GetMethodParameters; 30.155 - JVM_GetMethodTypeAnnotations; 30.156 - JVM_GetPrimitiveArrayElement; 30.157 - JVM_GetProtectionDomain; 30.158 - JVM_GetSockName; 30.159 - JVM_GetSockOpt; 30.160 - JVM_GetStackAccessControlContext; 30.161 - JVM_GetStackTraceDepth; 30.162 - JVM_GetStackTraceElement; 30.163 - JVM_GetSystemPackage; 30.164 - JVM_GetSystemPackages; 30.165 - JVM_GetThreadStateNames; 30.166 - JVM_GetThreadStateValues; 30.167 - JVM_GetVersionInfo; 30.168 - JVM_Halt; 30.169 - JVM_HoldsLock; 30.170 - JVM_IHashCode; 30.171 - JVM_InitAgentProperties; 30.172 - JVM_InitProperties; 30.173 - JVM_InitializeCompiler; 30.174 - JVM_InitializeSocketLibrary; 30.175 - JVM_InternString; 30.176 - JVM_Interrupt; 30.177 - JVM_InvokeMethod; 30.178 - JVM_IsArrayClass; 30.179 - JVM_IsConstructorIx; 30.180 - JVM_IsInterface; 30.181 - JVM_IsInterrupted; 30.182 - JVM_IsNaN; 30.183 - JVM_IsPrimitiveClass; 30.184 - JVM_IsSameClassPackage; 30.185 - JVM_IsSilentCompiler; 30.186 - JVM_IsSupportedJNIVersion; 30.187 - JVM_IsThreadAlive; 30.188 - JVM_IsVMGeneratedMethodIx; 30.189 - JVM_LatestUserDefinedLoader; 30.190 - JVM_Listen; 30.191 - JVM_LoadClass0; 30.192 - JVM_LoadLibrary; 30.193 - JVM_Lseek; 30.194 - JVM_MaxObjectInspectionAge; 30.195 - JVM_MaxMemory; 30.196 - JVM_MonitorNotify; 30.197 - JVM_MonitorNotifyAll; 30.198 - JVM_MonitorWait; 30.199 - JVM_NanoTime; 30.200 - JVM_NativePath; 30.201 - JVM_NewArray; 30.202 - JVM_NewInstanceFromConstructor; 30.203 - JVM_NewMultiArray; 30.204 - JVM_OnExit; 30.205 - JVM_Open; 30.206 - JVM_RaiseSignal; 30.207 - JVM_RawMonitorCreate; 30.208 - JVM_RawMonitorDestroy; 30.209 - JVM_RawMonitorEnter; 30.210 - JVM_RawMonitorExit; 30.211 - JVM_Read; 30.212 - JVM_Recv; 30.213 - JVM_RecvFrom; 30.214 - JVM_RegisterSignal; 30.215 - JVM_ReleaseUTF; 30.216 - JVM_ResolveClass; 30.217 - JVM_ResumeThread; 30.218 - JVM_Send; 30.219 - JVM_SendTo; 30.220 - JVM_SetArrayElement; 30.221 - JVM_SetClassSigners; 30.222 - JVM_SetLength; 30.223 - JVM_SetPrimitiveArrayElement; 30.224 - JVM_SetProtectionDomain; 30.225 - JVM_SetSockOpt; 30.226 - JVM_SetThreadPriority; 30.227 - JVM_Sleep; 30.228 - JVM_Socket; 30.229 - JVM_SocketAvailable; 30.230 - JVM_SocketClose; 30.231 - JVM_SocketShutdown; 30.232 - JVM_StartThread; 30.233 - JVM_StopThread; 30.234 - JVM_SuspendThread; 30.235 - JVM_SupportsCX8; 30.236 - JVM_Sync; 30.237 - JVM_Timeout; 30.238 - JVM_TotalMemory; 30.239 - JVM_TraceInstructions; 30.240 - JVM_TraceMethodCalls; 30.241 - JVM_UnloadLibrary; 30.242 - JVM_Write; 30.243 - JVM_Yield; 30.244 - JVM_handle_bsd_signal; 30.245 - 30.246 - # Old reflection routines 30.247 - # These do not need to be present in the product build in JDK 1.4 30.248 - # but their code has not been removed yet because there will not 30.249 - # be a substantial code savings until JVM_InvokeMethod and 30.250 - # JVM_NewInstanceFromConstructor can also be removed; see 30.251 - # reflectionCompat.hpp. 30.252 - JVM_GetClassConstructor; 30.253 - JVM_GetClassConstructors; 30.254 - JVM_GetClassField; 30.255 - JVM_GetClassFields; 30.256 - JVM_GetClassMethod; 30.257 - JVM_GetClassMethods; 30.258 - JVM_GetField; 30.259 - JVM_GetPrimitiveField; 30.260 - JVM_NewInstance; 30.261 - JVM_SetField; 30.262 - JVM_SetPrimitiveField; 30.263 + # _JVM 30.264 + _JVM_Accept 30.265 + _JVM_ActiveProcessorCount 30.266 + _JVM_AllocateNewArray 30.267 + _JVM_AllocateNewObject 30.268 + _JVM_ArrayCopy 30.269 + _JVM_AssertionStatusDirectives 30.270 + _JVM_Available 30.271 + _JVM_Bind 30.272 + _JVM_ClassDepth 30.273 + _JVM_ClassLoaderDepth 30.274 + _JVM_Clone 30.275 + _JVM_Close 30.276 + _JVM_CX8Field 30.277 + _JVM_CompileClass 30.278 + _JVM_CompileClasses 30.279 + _JVM_CompilerCommand 30.280 + _JVM_Connect 30.281 + _JVM_ConstantPoolGetClassAt 30.282 + _JVM_ConstantPoolGetClassAtIfLoaded 30.283 + _JVM_ConstantPoolGetDoubleAt 30.284 + _JVM_ConstantPoolGetFieldAt 30.285 + _JVM_ConstantPoolGetFieldAtIfLoaded 30.286 + _JVM_ConstantPoolGetFloatAt 30.287 + _JVM_ConstantPoolGetIntAt 30.288 + _JVM_ConstantPoolGetLongAt 30.289 + _JVM_ConstantPoolGetMethodAt 30.290 + _JVM_ConstantPoolGetMethodAtIfLoaded 30.291 + _JVM_ConstantPoolGetMemberRefInfoAt 30.292 + _JVM_ConstantPoolGetSize 30.293 + _JVM_ConstantPoolGetStringAt 30.294 + _JVM_ConstantPoolGetUTF8At 30.295 + _JVM_CountStackFrames 30.296 + _JVM_CurrentClassLoader 30.297 + _JVM_CurrentLoadedClass 30.298 + _JVM_CurrentThread 30.299 + _JVM_CurrentTimeMillis 30.300 + _JVM_DefineClass 30.301 + _JVM_DefineClassWithSource 30.302 + _JVM_DefineClassWithSourceCond 30.303 + _JVM_DesiredAssertionStatus 30.304 + _JVM_DisableCompiler 30.305 + _JVM_DoPrivileged 30.306 + _JVM_DTraceGetVersion 30.307 + _JVM_DTraceActivate 30.308 + _JVM_DTraceIsProbeEnabled 30.309 + _JVM_DTraceIsSupported 30.310 + _JVM_DTraceDispose 30.311 + _JVM_DumpAllStacks 30.312 + _JVM_DumpThreads 30.313 + _JVM_EnableCompiler 30.314 + _JVM_Exit 30.315 + _JVM_FillInStackTrace 30.316 + _JVM_FindClassFromClass 30.317 + _JVM_FindClassFromClassLoader 30.318 + _JVM_FindClassFromBootLoader 30.319 + _JVM_FindLibraryEntry 30.320 + _JVM_FindLoadedClass 30.321 + _JVM_FindPrimitiveClass 30.322 + _JVM_FindSignal 30.323 + _JVM_FreeMemory 30.324 + _JVM_GC 30.325 + _JVM_GetAllThreads 30.326 + _JVM_GetArrayElement 30.327 + _JVM_GetArrayLength 30.328 + _JVM_GetCPClassNameUTF 30.329 + _JVM_GetCPFieldClassNameUTF 30.330 + _JVM_GetCPFieldModifiers 30.331 + _JVM_GetCPFieldNameUTF 30.332 + _JVM_GetCPFieldSignatureUTF 30.333 + _JVM_GetCPMethodClassNameUTF 30.334 + _JVM_GetCPMethodModifiers 30.335 + _JVM_GetCPMethodNameUTF 30.336 + _JVM_GetCPMethodSignatureUTF 30.337 + _JVM_GetCallerClass 30.338 + _JVM_GetClassAccessFlags 30.339 + _JVM_GetClassAnnotations 30.340 + _JVM_GetClassCPEntriesCount 30.341 + _JVM_GetClassCPTypes 30.342 + _JVM_GetClassConstantPool 30.343 + _JVM_GetClassContext 30.344 + _JVM_GetClassDeclaredConstructors 30.345 + _JVM_GetClassDeclaredFields 30.346 + _JVM_GetClassDeclaredMethods 30.347 + _JVM_GetClassFieldsCount 30.348 + _JVM_GetClassInterfaces 30.349 + _JVM_GetClassLoader 30.350 + _JVM_GetClassMethodsCount 30.351 + _JVM_GetClassModifiers 30.352 + _JVM_GetClassName 30.353 + _JVM_GetClassNameUTF 30.354 + _JVM_GetClassSignature 30.355 + _JVM_GetClassSigners 30.356 + _JVM_GetClassTypeAnnotations 30.357 + _JVM_GetComponentType 30.358 + _JVM_GetDeclaredClasses 30.359 + _JVM_GetDeclaringClass 30.360 + _JVM_GetEnclosingMethodInfo 30.361 + _JVM_GetFieldAnnotations 30.362 + _JVM_GetFieldIxModifiers 30.363 + _JVM_GetFieldTypeAnnotations 30.364 + _JVM_GetHostName 30.365 + _JVM_GetInheritedAccessControlContext 30.366 + _JVM_GetInterfaceVersion 30.367 + _JVM_GetLastErrorString 30.368 + _JVM_GetManagement 30.369 + _JVM_GetMethodAnnotations 30.370 + _JVM_GetMethodDefaultAnnotationValue 30.371 + _JVM_GetMethodIxArgsSize 30.372 + _JVM_GetMethodIxByteCode 30.373 + _JVM_GetMethodIxByteCodeLength 30.374 + _JVM_GetMethodIxExceptionIndexes 30.375 + _JVM_GetMethodIxExceptionTableEntry 30.376 + _JVM_GetMethodIxExceptionTableLength 30.377 + _JVM_GetMethodIxExceptionsCount 30.378 + _JVM_GetMethodIxLocalsCount 30.379 + _JVM_GetMethodIxMaxStack 30.380 + _JVM_GetMethodIxModifiers 30.381 + _JVM_GetMethodIxNameUTF 30.382 + _JVM_GetMethodIxSignatureUTF 30.383 + _JVM_GetMethodParameterAnnotations 30.384 + _JVM_GetMethodParameters 30.385 + _JVM_GetMethodTypeAnnotations 30.386 + _JVM_GetPrimitiveArrayElement 30.387 + _JVM_GetProtectionDomain 30.388 + _JVM_GetSockName 30.389 + _JVM_GetSockOpt 30.390 + _JVM_GetStackAccessControlContext 30.391 + _JVM_GetStackTraceDepth 30.392 + _JVM_GetStackTraceElement 30.393 + _JVM_GetSystemPackage 30.394 + _JVM_GetSystemPackages 30.395 + _JVM_GetThreadStateNames 30.396 + _JVM_GetThreadStateValues 30.397 + _JVM_GetVersionInfo 30.398 + _JVM_Halt 30.399 + _JVM_HoldsLock 30.400 + _JVM_IHashCode 30.401 + _JVM_InitAgentProperties 30.402 + _JVM_InitProperties 30.403 + _JVM_InitializeCompiler 30.404 + _JVM_InitializeSocketLibrary 30.405 + _JVM_InternString 30.406 + _JVM_Interrupt 30.407 + _JVM_InvokeMethod 30.408 + _JVM_IsArrayClass 30.409 + _JVM_IsConstructorIx 30.410 + _JVM_IsInterface 30.411 + _JVM_IsInterrupted 30.412 + _JVM_IsNaN 30.413 + _JVM_IsPrimitiveClass 30.414 + _JVM_IsSameClassPackage 30.415 + _JVM_IsSilentCompiler 30.416 + _JVM_IsSupportedJNIVersion 30.417 + _JVM_IsThreadAlive 30.418 + _JVM_IsVMGeneratedMethodIx 30.419 + _JVM_LatestUserDefinedLoader 30.420 + _JVM_Listen 30.421 + _JVM_LoadClass0 30.422 + _JVM_LoadLibrary 30.423 + _JVM_Lseek 30.424 + _JVM_MaxObjectInspectionAge 30.425 + _JVM_MaxMemory 30.426 + _JVM_MonitorNotify 30.427 + _JVM_MonitorNotifyAll 30.428 + _JVM_MonitorWait 30.429 + _JVM_NanoTime 30.430 + _JVM_NativePath 30.431 + _JVM_NewArray 30.432 + _JVM_NewInstanceFromConstructor 30.433 + _JVM_NewMultiArray 30.434 + _JVM_OnExit 30.435 + _JVM_Open 30.436 + _JVM_RaiseSignal 30.437 + _JVM_RawMonitorCreate 30.438 + _JVM_RawMonitorDestroy 30.439 + _JVM_RawMonitorEnter 30.440 + _JVM_RawMonitorExit 30.441 + _JVM_Read 30.442 + _JVM_Recv 30.443 + _JVM_RecvFrom 30.444 + _JVM_RegisterSignal 30.445 + _JVM_ReleaseUTF 30.446 + _JVM_ResolveClass 30.447 + _JVM_ResumeThread 30.448 + _JVM_Send 30.449 + _JVM_SendTo 30.450 + _JVM_SetArrayElement 30.451 + _JVM_SetClassSigners 30.452 + _JVM_SetLength 30.453 + _JVM_SetNativeThreadName 30.454 + _JVM_SetPrimitiveArrayElement 30.455 + _JVM_SetProtectionDomain 30.456 + _JVM_SetSockOpt 30.457 + _JVM_SetThreadPriority 30.458 + _JVM_Sleep 30.459 + _JVM_Socket 30.460 + _JVM_SocketAvailable 30.461 + _JVM_SocketClose 30.462 + _JVM_SocketShutdown 30.463 + _JVM_StartThread 30.464 + _JVM_StopThread 30.465 + _JVM_SuspendThread 30.466 + _JVM_SupportsCX8 30.467 + _JVM_Sync 30.468 + _JVM_Timeout 30.469 + _JVM_TotalMemory 30.470 + _JVM_TraceInstructions 30.471 + _JVM_TraceMethodCalls 30.472 + _JVM_UnloadLibrary 30.473 + _JVM_Write 30.474 + _JVM_Yield 30.475 + _JVM_handle_bsd_signal 30.476 30.477 # miscellaneous functions 30.478 - jio_fprintf; 30.479 - jio_printf; 30.480 - jio_snprintf; 30.481 - jio_vfprintf; 30.482 - jio_vsnprintf; 30.483 - fork1; 30.484 - numa_warn; 30.485 - numa_error; 30.486 - 30.487 - # Needed because there is no JVM interface for this. 30.488 - sysThreadAvailableStackWithSlack; 30.489 + _jio_fprintf 30.490 + _jio_printf 30.491 + _jio_snprintf 30.492 + _jio_vfprintf 30.493 + _jio_vsnprintf 30.494 30.495 # This is for Forte Analyzer profiling support. 30.496 - AsyncGetCallTrace; 30.497 + _AsyncGetCallTrace 30.498 30.499 - # INSERT VTABLE SYMBOLS HERE 30.500 + # INSERT VTABLE SYMBOLS HERE 30.501 30.502 - local: 30.503 - *; 30.504 -}; 30.505 -
31.1 --- a/make/excludeSrc.make Thu Jul 11 12:59:03 2013 -0400 31.2 +++ b/make/excludeSrc.make Mon Jul 15 11:07:03 2013 +0100 31.3 @@ -112,3 +112,5 @@ 31.4 endif 31.5 31.6 -include $(HS_ALT_MAKE)/excludeSrc.make 31.7 + 31.8 +.PHONY: $(HS_ALT_MAKE)/excludeSrc.make
32.1 --- a/make/hotspot_version Thu Jul 11 12:59:03 2013 -0400 32.2 +++ b/make/hotspot_version Mon Jul 15 11:07:03 2013 +0100 32.3 @@ -35,7 +35,7 @@ 32.4 32.5 HS_MAJOR_VER=25 32.6 HS_MINOR_VER=0 32.7 -HS_BUILD_NUMBER=37 32.8 +HS_BUILD_NUMBER=40 32.9 32.10 JDK_MAJOR_VER=1 32.11 JDK_MINOR_VER=8
33.1 --- a/make/linux/makefiles/gcc.make Thu Jul 11 12:59:03 2013 -0400 33.2 +++ b/make/linux/makefiles/gcc.make Mon Jul 15 11:07:03 2013 +0100 33.3 @@ -214,7 +214,7 @@ 33.4 WARNINGS_ARE_ERRORS += -Wno-return-type -Wno-empty-body 33.5 endif 33.6 33.7 -WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function 33.8 +WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wunused-value 33.9 33.10 ifeq ($(USE_CLANG),) 33.11 # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit 33.12 @@ -350,9 +350,9 @@ 33.13 ifeq ($(DEBUG_CFLAGS/$(BUILDARCH)),) 33.14 ifeq ($(USE_CLANG), true) 33.15 # Clang doesn't understand -gstabs 33.16 - OPT_CFLAGS += -g 33.17 + DEBUG_CFLAGS += -g 33.18 else 33.19 - OPT_CFLAGS += -gstabs 33.20 + DEBUG_CFLAGS += -gstabs 33.21 endif 33.22 endif 33.23 33.24 @@ -365,9 +365,9 @@ 33.25 ifeq ($(FASTDEBUG_CFLAGS/$(BUILDARCH)),) 33.26 ifeq ($(USE_CLANG), true) 33.27 # Clang doesn't understand -gstabs 33.28 - OPT_CFLAGS += -g 33.29 + FASTDEBUG_CFLAGS += -g 33.30 else 33.31 - OPT_CFLAGS += -gstabs 33.32 + FASTDEBUG_CFLAGS += -gstabs 33.33 endif 33.34 endif 33.35
34.1 --- a/make/linux/makefiles/vm.make Thu Jul 11 12:59:03 2013 -0400 34.2 +++ b/make/linux/makefiles/vm.make Mon Jul 15 11:07:03 2013 +0100 34.3 @@ -107,6 +107,10 @@ 34.4 # File specific flags 34.5 CXXFLAGS += $(CXXFLAGS/BYFILE) 34.6 34.7 +# Large File Support 34.8 +ifneq ($(LP64), 1) 34.9 +CXXFLAGS/ostream.o += -D_FILE_OFFSET_BITS=64 34.10 +endif # ifneq ($(LP64), 1) 34.11 34.12 # CFLAGS_WARN holds compiler options to suppress/enable warnings. 34.13 CFLAGS += $(CFLAGS_WARN/BYFILE)
35.1 --- a/make/solaris/makefiles/vm.make Thu Jul 11 12:59:03 2013 -0400 35.2 +++ b/make/solaris/makefiles/vm.make Mon Jul 15 11:07:03 2013 +0100 35.3 @@ -95,6 +95,10 @@ 35.4 # File specific flags 35.5 CXXFLAGS += $(CXXFLAGS/BYFILE) 35.6 35.7 +# Large File Support 35.8 +ifneq ($(LP64), 1) 35.9 +CXXFLAGS/ostream.o += -D_FILE_OFFSET_BITS=64 35.10 +endif # ifneq ($(LP64), 1) 35.11 35.12 # CFLAGS_WARN holds compiler options to suppress/enable warnings. 35.13 CFLAGS += $(CFLAGS_WARN)
36.1 --- a/src/cpu/sparc/vm/assembler_sparc.hpp Thu Jul 11 12:59:03 2013 -0400 36.2 +++ b/src/cpu/sparc/vm/assembler_sparc.hpp Mon Jul 15 11:07:03 2013 +0100 36.3 @@ -57,7 +57,6 @@ 36.4 fbp_op2 = 5, 36.5 br_op2 = 2, 36.6 bp_op2 = 1, 36.7 - cb_op2 = 7, // V8 36.8 sethi_op2 = 4 36.9 }; 36.10 36.11 @@ -145,7 +144,6 @@ 36.12 ldsh_op3 = 0x0a, 36.13 ldx_op3 = 0x0b, 36.14 36.15 - ldstub_op3 = 0x0d, 36.16 stx_op3 = 0x0e, 36.17 swap_op3 = 0x0f, 36.18 36.19 @@ -163,15 +161,6 @@ 36.20 36.21 prefetch_op3 = 0x2d, 36.22 36.23 - 36.24 - ldc_op3 = 0x30, 36.25 - ldcsr_op3 = 0x31, 36.26 - lddc_op3 = 0x33, 36.27 - stc_op3 = 0x34, 36.28 - stcsr_op3 = 0x35, 36.29 - stdcq_op3 = 0x36, 36.30 - stdc_op3 = 0x37, 36.31 - 36.32 casa_op3 = 0x3c, 36.33 casxa_op3 = 0x3e, 36.34 36.35 @@ -574,17 +563,11 @@ 36.36 static void vis3_only() { assert( VM_Version::has_vis3(), "This instruction only works on SPARC with VIS3"); } 36.37 36.38 // instruction only in v9 36.39 - static void v9_only() { assert( VM_Version::v9_instructions_work(), "This instruction only works on SPARC V9"); } 36.40 - 36.41 - // instruction only in v8 36.42 - static void v8_only() { assert( VM_Version::v8_instructions_work(), "This instruction only works on SPARC V8"); } 36.43 + static void v9_only() { } // do nothing 36.44 36.45 // instruction deprecated in v9 36.46 static void v9_dep() { } // do nothing for now 36.47 36.48 - // some float instructions only exist for single prec. on v8 36.49 - static void v8_s_only(FloatRegisterImpl::Width w) { if (w != FloatRegisterImpl::S) v9_only(); } 36.50 - 36.51 // v8 has no CC field 36.52 static void v8_no_cc(CC cc) { if (cc) v9_only(); } 36.53 36.54 @@ -730,11 +713,6 @@ 36.55 inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none ); 36.56 inline void bp( Condition c, bool a, CC cc, Predict p, Label& L ); 36.57 36.58 - // pp 121 (V8) 36.59 - 36.60 - inline void cb( Condition c, bool a, address d, relocInfo::relocType rt = relocInfo::none ); 36.61 - inline void cb( Condition c, bool a, Label& L ); 36.62 - 36.63 // pp 149 36.64 36.65 inline void call( address d, relocInfo::relocType rt = relocInfo::runtime_call_type ); 36.66 @@ -775,8 +753,8 @@ 36.67 36.68 // pp 157 36.69 36.70 - void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); } 36.71 - void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { v8_no_cc(cc); emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); } 36.72 + void fcmp( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x50 + w) | fs2(s2, w)); } 36.73 + void fcmpe( FloatRegisterImpl::Width w, CC cc, FloatRegister s1, FloatRegister s2) { emit_int32( op(arith_op) | cmpcc(cc) | op3(fpop2_op3) | fs1(s1, w) | opf(0x54 + w) | fs2(s2, w)); } 36.74 36.75 // pp 159 36.76 36.77 @@ -794,21 +772,11 @@ 36.78 36.79 // pp 162 36.80 36.81 - void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); } 36.82 + void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x00 + w) | fs2(s, w)); } 36.83 36.84 - void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); } 36.85 + void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(s, w)); } 36.86 36.87 - // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fnegs is the only instruction available 36.88 - // on v8 to do negation of single, double and quad precision floats. 36.89 - 36.90 - void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x04 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x05) | fs2(sd, w)); } 36.91 - 36.92 - void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { v8_s_only(w); emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); } 36.93 - 36.94 - // page 144 sparc v8 architecture (double prec works on v8 if the source and destination registers are the same). fabss is the only instruction available 36.95 - // on v8 to do abs operation on single/double/quad precision floats. 36.96 - 36.97 - void fabs( FloatRegisterImpl::Width w, FloatRegister sd ) { if (VM_Version::v9_instructions_work()) emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(sd, w)); else emit_int32( op(arith_op) | fd(sd, w) | op3(fpop1_op3) | opf(0x09) | fs2(sd, w)); } 36.98 + void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d ) { emit_int32( op(arith_op) | fd(d, w) | op3(fpop1_op3) | opf(0x08 + w) | fs2(s, w)); } 36.99 36.100 // pp 163 36.101 36.102 @@ -839,11 +807,6 @@ 36.103 void impdep1( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep1_op3) | u_field(const19a, 18, 0)); } 36.104 void impdep2( int id1, int const19a ) { v9_only(); emit_int32( op(arith_op) | fcn(id1) | op3(impdep2_op3) | u_field(const19a, 18, 0)); } 36.105 36.106 - // pp 149 (v8) 36.107 - 36.108 - void cpop1( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_int32( op(arith_op) | fcn(crd) | op3(impdep1_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); } 36.109 - void cpop2( int opc, int cr1, int cr2, int crd ) { v8_only(); emit_int32( op(arith_op) | fcn(crd) | op3(impdep2_op3) | u_field(cr1, 18, 14) | opf(opc) | u_field(cr2, 4, 0)); } 36.110 - 36.111 // pp 170 36.112 36.113 void jmpl( Register s1, Register s2, Register d ); 36.114 @@ -860,16 +823,6 @@ 36.115 inline void ldxfsr( Register s1, Register s2 ); 36.116 inline void ldxfsr( Register s1, int simm13a); 36.117 36.118 - // pp 94 (v8) 36.119 - 36.120 - inline void ldc( Register s1, Register s2, int crd ); 36.121 - inline void ldc( Register s1, int simm13a, int crd); 36.122 - inline void lddc( Register s1, Register s2, int crd ); 36.123 - inline void lddc( Register s1, int simm13a, int crd); 36.124 - inline void ldcsr( Register s1, Register s2, int crd ); 36.125 - inline void ldcsr( Register s1, int simm13a, int crd); 36.126 - 36.127 - 36.128 // 173 36.129 36.130 void ldfa( FloatRegisterImpl::Width w, Register s1, Register s2, int ia, FloatRegister d ) { v9_only(); emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3 | alt_bit_op3, w) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } 36.131 @@ -910,18 +863,6 @@ 36.132 void lduwa( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(lduw_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } 36.133 void ldxa( Register s1, Register s2, int ia, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } 36.134 void ldxa( Register s1, int simm13a, Register d ) { v9_only(); emit_int32( op(ldst_op) | rd(d) | op3(ldx_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } 36.135 - void ldda( Register s1, Register s2, int ia, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } 36.136 - void ldda( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } 36.137 - 36.138 - // pp 179 36.139 - 36.140 - inline void ldstub( Register s1, Register s2, Register d ); 36.141 - inline void ldstub( Register s1, int simm13a, Register d); 36.142 - 36.143 - // pp 180 36.144 - 36.145 - void ldstuba( Register s1, Register s2, int ia, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } 36.146 - void ldstuba( Register s1, int simm13a, Register d ) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } 36.147 36.148 // pp 181 36.149 36.150 @@ -992,11 +933,6 @@ 36.151 void smulcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | rs2(s2) ); } 36.152 void smulcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(smul_op3 | cc_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } 36.153 36.154 - // pp 199 36.155 - 36.156 - void mulscc( Register s1, Register s2, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | rs2(s2) ); } 36.157 - void mulscc( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(mulscc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } 36.158 - 36.159 // pp 201 36.160 36.161 void nop() { emit_int32( op(branch_op) | op2(sethi_op2) ); } 36.162 @@ -1116,17 +1052,6 @@ 36.163 void stda( Register d, Register s1, Register s2, int ia ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | imm_asi(ia) | rs2(s2) ); } 36.164 void stda( Register d, Register s1, int simm13a ) { emit_int32( op(ldst_op) | rd(d) | op3(std_op3 | alt_bit_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } 36.165 36.166 - // pp 97 (v8) 36.167 - 36.168 - inline void stc( int crd, Register s1, Register s2 ); 36.169 - inline void stc( int crd, Register s1, int simm13a); 36.170 - inline void stdc( int crd, Register s1, Register s2 ); 36.171 - inline void stdc( int crd, Register s1, int simm13a); 36.172 - inline void stcsr( int crd, Register s1, Register s2 ); 36.173 - inline void stcsr( int crd, Register s1, int simm13a); 36.174 - inline void stdcq( int crd, Register s1, Register s2 ); 36.175 - inline void stdcq( int crd, Register s1, int simm13a); 36.176 - 36.177 // pp 230 36.178 36.179 void sub( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); } 36.180 @@ -1153,20 +1078,16 @@ 36.181 36.182 void taddcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | rs2(s2) ); } 36.183 void taddcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(taddcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } 36.184 - void taddcctv( Register s1, Register s2, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | rs2(s2) ); } 36.185 - void taddcctv( Register s1, int simm13a, Register d ) { v9_dep(); emit_int32( op(arith_op) | rd(d) | op3(taddcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } 36.186 36.187 // pp 235 36.188 36.189 void tsubcc( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | rs2(s2) ); } 36.190 void tsubcc( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } 36.191 - void tsubcctv( Register s1, Register s2, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | rs2(s2) ); } 36.192 - void tsubcctv( Register s1, int simm13a, Register d ) { emit_int32( op(arith_op) | rd(d) | op3(tsubcctv_op3) | rs1(s1) | immed(true) | simm(simm13a, 13) ); } 36.193 36.194 // pp 237 36.195 36.196 - void trap( Condition c, CC cc, Register s1, Register s2 ) { v8_no_cc(cc); emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); } 36.197 - void trap( Condition c, CC cc, Register s1, int trapa ) { v8_no_cc(cc); emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); } 36.198 + void trap( Condition c, CC cc, Register s1, Register s2 ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | rs2(s2)); } 36.199 + void trap( Condition c, CC cc, Register s1, int trapa ) { emit_int32( op(arith_op) | cond(c) | op3(trap_op3) | rs1(s1) | trapcc(cc) | immed(true) | u_field(trapa, 6, 0)); } 36.200 // simple uncond. trap 36.201 void trap( int trapa ) { trap( always, icc, G0, trapa ); } 36.202
37.1 --- a/src/cpu/sparc/vm/assembler_sparc.inline.hpp Thu Jul 11 12:59:03 2013 -0400 37.2 +++ b/src/cpu/sparc/vm/assembler_sparc.inline.hpp Mon Jul 15 11:07:03 2013 +0100 37.3 @@ -63,9 +63,6 @@ 37.4 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt ) { v9_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(fbp_op2) | branchcc(cc) | predict(p) | wdisp(intptr_t(d), intptr_t(pc()), 19), rt); has_delay_slot(); } 37.5 inline void Assembler::fbp( Condition c, bool a, CC cc, Predict p, Label& L ) { fbp(c, a, cc, p, target(L)); } 37.6 37.7 -inline void Assembler::cb( Condition c, bool a, address d, relocInfo::relocType rt ) { v8_only(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(cb_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); } 37.8 -inline void Assembler::cb( Condition c, bool a, Label& L ) { cb(c, a, target(L)); } 37.9 - 37.10 inline void Assembler::br( Condition c, bool a, address d, relocInfo::relocType rt ) { v9_dep(); cti(); emit_data( op(branch_op) | annul(a) | cond(c) | op2(br_op2) | wdisp(intptr_t(d), intptr_t(pc()), 22), rt); has_delay_slot(); } 37.11 inline void Assembler::br( Condition c, bool a, Label& L ) { br(c, a, target(L)); } 37.12 37.13 @@ -88,18 +85,9 @@ 37.14 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, Register s2, FloatRegister d) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | rs2(s2) ); } 37.15 inline void Assembler::ldf(FloatRegisterImpl::Width w, Register s1, int simm13a, FloatRegister d, RelocationHolder const& rspec) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(ldf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13), rspec); } 37.16 37.17 -inline void Assembler::ldfsr( Register s1, Register s2) { v9_dep(); emit_int32( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); } 37.18 -inline void Assembler::ldfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.19 inline void Assembler::ldxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | rs2(s2) ); } 37.20 inline void Assembler::ldxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(ldfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.21 37.22 -inline void Assembler::ldc( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | rs2(s2) ); } 37.23 -inline void Assembler::ldc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.24 -inline void Assembler::lddc( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | rs2(s2) ); } 37.25 -inline void Assembler::lddc( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(lddc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.26 -inline void Assembler::ldcsr( Register s1, Register s2, int crd) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | rs2(s2) ); } 37.27 -inline void Assembler::ldcsr( Register s1, int simm13a, int crd) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(ldcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.28 - 37.29 inline void Assembler::ldsb( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | rs2(s2) ); } 37.30 inline void Assembler::ldsb( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldsb_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.31 37.32 @@ -119,9 +107,6 @@ 37.33 inline void Assembler::ldd( Register s1, Register s2, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | rs2(s2) ); } 37.34 inline void Assembler::ldd( Register s1, int simm13a, Register d) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(ldd_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.35 37.36 -inline void Assembler::ldstub( Register s1, Register s2, Register d) { emit_int32( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | rs2(s2) ); } 37.37 -inline void Assembler::ldstub( Register s1, int simm13a, Register d) { emit_data( op(ldst_op) | rd(d) | op3(ldstub_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.38 - 37.39 inline void Assembler::rett( Register s1, Register s2 ) { cti(); emit_int32( op(arith_op) | op3(rett_op3) | rs1(s1) | rs2(s2)); has_delay_slot(); } 37.40 inline void Assembler::rett( Register s1, int simm13a, relocInfo::relocType rt) { cti(); emit_data( op(arith_op) | op3(rett_op3) | rs1(s1) | immed(true) | simm(simm13a, 13), rt); has_delay_slot(); } 37.41 37.42 @@ -132,8 +117,6 @@ 37.43 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, Register s2) { emit_int32( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | rs2(s2) ); } 37.44 inline void Assembler::stf( FloatRegisterImpl::Width w, FloatRegister d, Register s1, int simm13a) { emit_data( op(ldst_op) | fd(d, w) | alt_op3(stf_op3, w) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.45 37.46 -inline void Assembler::stfsr( Register s1, Register s2) { v9_dep(); emit_int32( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); } 37.47 -inline void Assembler::stfsr( Register s1, int simm13a) { v9_dep(); emit_data( op(ldst_op) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.48 inline void Assembler::stxfsr( Register s1, Register s2) { v9_only(); emit_int32( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | rs2(s2) ); } 37.49 inline void Assembler::stxfsr( Register s1, int simm13a) { v9_only(); emit_data( op(ldst_op) | rd(G1) | op3(stfsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.50 37.51 @@ -152,17 +135,6 @@ 37.52 inline void Assembler::std( Register d, Register s1, Register s2) { v9_dep(); assert(d->is_even(), "not even"); emit_int32( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | rs2(s2) ); } 37.53 inline void Assembler::std( Register d, Register s1, int simm13a) { v9_dep(); assert(d->is_even(), "not even"); emit_data( op(ldst_op) | rd(d) | op3(std_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.54 37.55 -// v8 p 99 37.56 - 37.57 -inline void Assembler::stc( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | rs2(s2) ); } 37.58 -inline void Assembler::stc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stc_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.59 -inline void Assembler::stdc( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | rs2(s2) ); } 37.60 -inline void Assembler::stdc( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdc_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.61 -inline void Assembler::stcsr( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | rs2(s2) ); } 37.62 -inline void Assembler::stcsr( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stcsr_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.63 -inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_int32( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); } 37.64 -inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); } 37.65 - 37.66 // pp 231 37.67 37.68 inline void Assembler::swap( Register s1, Register s2, Register d) { v9_dep(); emit_int32( op(ldst_op) | rd(d) | op3(swap_op3) | rs1(s1) | rs2(s2) ); }
38.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 38.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Mon Jul 15 11:07:03 2013 +0100 38.3 @@ -1,5 +1,5 @@ 38.4 /* 38.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 38.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 38.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 38.8 * 38.9 * This code is free software; you can redistribute it and/or modify it 38.10 @@ -597,13 +597,6 @@ 38.11 38.12 __ sra(Rdividend, 31, Rscratch); 38.13 __ wry(Rscratch); 38.14 - if (!VM_Version::v9_instructions_work()) { 38.15 - // v9 doesn't require these nops 38.16 - __ nop(); 38.17 - __ nop(); 38.18 - __ nop(); 38.19 - __ nop(); 38.20 - } 38.21 38.22 add_debug_info_for_div0_here(op->info()); 38.23 38.24 @@ -652,10 +645,6 @@ 38.25 case lir_cond_lessEqual: acond = (is_unordered ? Assembler::f_unorderedOrLessOrEqual : Assembler::f_lessOrEqual); break; 38.26 case lir_cond_greaterEqual: acond = (is_unordered ? Assembler::f_unorderedOrGreaterOrEqual: Assembler::f_greaterOrEqual); break; 38.27 default : ShouldNotReachHere(); 38.28 - }; 38.29 - 38.30 - if (!VM_Version::v9_instructions_work()) { 38.31 - __ nop(); 38.32 } 38.33 __ fb( acond, false, Assembler::pn, *(op->label())); 38.34 } else { 38.35 @@ -725,9 +714,6 @@ 38.36 Label L; 38.37 // result must be 0 if value is NaN; test by comparing value to itself 38.38 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, rsrc, rsrc); 38.39 - if (!VM_Version::v9_instructions_work()) { 38.40 - __ nop(); 38.41 - } 38.42 __ fb(Assembler::f_unordered, true, Assembler::pn, L); 38.43 __ delayed()->st(G0, addr); // annuled if contents of rsrc is not NaN 38.44 __ ftoi(FloatRegisterImpl::S, rsrc, rsrc); 38.45 @@ -1909,7 +1895,7 @@ 38.46 switch (code) { 38.47 case lir_add: __ add (lreg, rreg, res); break; 38.48 case lir_sub: __ sub (lreg, rreg, res); break; 38.49 - case lir_mul: __ mult (lreg, rreg, res); break; 38.50 + case lir_mul: __ mulx (lreg, rreg, res); break; 38.51 default: ShouldNotReachHere(); 38.52 } 38.53 } 38.54 @@ -1924,7 +1910,7 @@ 38.55 switch (code) { 38.56 case lir_add: __ add (lreg, simm13, res); break; 38.57 case lir_sub: __ sub (lreg, simm13, res); break; 38.58 - case lir_mul: __ mult (lreg, simm13, res); break; 38.59 + case lir_mul: __ mulx (lreg, simm13, res); break; 38.60 default: ShouldNotReachHere(); 38.61 } 38.62 } else { 38.63 @@ -1936,7 +1922,7 @@ 38.64 switch (code) { 38.65 case lir_add: __ add (lreg, (int)con, res); break; 38.66 case lir_sub: __ sub (lreg, (int)con, res); break; 38.67 - case lir_mul: __ mult (lreg, (int)con, res); break; 38.68 + case lir_mul: __ mulx (lreg, (int)con, res); break; 38.69 default: ShouldNotReachHere(); 38.70 } 38.71 } 38.72 @@ -2960,6 +2946,9 @@ 38.73 } 38.74 } 38.75 38.76 +void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 38.77 + fatal("CRC32 intrinsic is not implemented on this platform"); 38.78 +} 38.79 38.80 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 38.81 Register obj = op->obj_opr()->as_register(); 38.82 @@ -3234,48 +3223,26 @@ 38.83 Register base = mem_addr->base()->as_register(); 38.84 if (src->is_register() && dest->is_address()) { 38.85 // G4 is high half, G5 is low half 38.86 - if (VM_Version::v9_instructions_work()) { 38.87 - // clear the top bits of G5, and scale up G4 38.88 - __ srl (src->as_register_lo(), 0, G5); 38.89 - __ sllx(src->as_register_hi(), 32, G4); 38.90 - // combine the two halves into the 64 bits of G4 38.91 - __ or3(G4, G5, G4); 38.92 - null_check_offset = __ offset(); 38.93 - if (idx == noreg) { 38.94 - __ stx(G4, base, disp); 38.95 - } else { 38.96 - __ stx(G4, base, idx); 38.97 - } 38.98 + // clear the top bits of G5, and scale up G4 38.99 + __ srl (src->as_register_lo(), 0, G5); 38.100 + __ sllx(src->as_register_hi(), 32, G4); 38.101 + // combine the two halves into the 64 bits of G4 38.102 + __ or3(G4, G5, G4); 38.103 + null_check_offset = __ offset(); 38.104 + if (idx == noreg) { 38.105 + __ stx(G4, base, disp); 38.106 } else { 38.107 - __ mov (src->as_register_hi(), G4); 38.108 - __ mov (src->as_register_lo(), G5); 38.109 - null_check_offset = __ offset(); 38.110 - if (idx == noreg) { 38.111 - __ std(G4, base, disp); 38.112 - } else { 38.113 - __ std(G4, base, idx); 38.114 - } 38.115 + __ stx(G4, base, idx); 38.116 } 38.117 } else if (src->is_address() && dest->is_register()) { 38.118 null_check_offset = __ offset(); 38.119 - if (VM_Version::v9_instructions_work()) { 38.120 - if (idx == noreg) { 38.121 - __ ldx(base, disp, G5); 38.122 - } else { 38.123 - __ ldx(base, idx, G5); 38.124 - } 38.125 - __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi 38.126 - __ mov (G5, dest->as_register_lo()); // copy low half into lo 38.127 + if (idx == noreg) { 38.128 + __ ldx(base, disp, G5); 38.129 } else { 38.130 - if (idx == noreg) { 38.131 - __ ldd(base, disp, G4); 38.132 - } else { 38.133 - __ ldd(base, idx, G4); 38.134 - } 38.135 - // G4 is high half, G5 is low half 38.136 - __ mov (G4, dest->as_register_hi()); 38.137 - __ mov (G5, dest->as_register_lo()); 38.138 + __ ldx(base, idx, G5); 38.139 } 38.140 + __ srax(G5, 32, dest->as_register_hi()); // fetch the high half into hi 38.141 + __ mov (G5, dest->as_register_lo()); // copy low half into lo 38.142 } else { 38.143 Unimplemented(); 38.144 }
39.1 --- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 39.2 +++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp Mon Jul 15 11:07:03 2013 +0100 39.3 @@ -1,5 +1,5 @@ 39.4 /* 39.5 - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. 39.6 + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. 39.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 39.8 * 39.9 * This code is free software; you can redistribute it and/or modify it 39.10 @@ -784,6 +784,10 @@ 39.11 set_no_result(x); 39.12 } 39.13 39.14 +void LIRGenerator::do_update_CRC32(Intrinsic* x) { 39.15 + fatal("CRC32 intrinsic is not implemented on this platform"); 39.16 +} 39.17 + 39.18 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f 39.19 // _i2b, _i2c, _i2s 39.20 void LIRGenerator::do_Convert(Convert* x) {
40.1 --- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 40.2 +++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Mon Jul 15 11:07:03 2013 +0100 40.3 @@ -108,7 +108,7 @@ 40.4 40.5 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop 40.6 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 40.7 - casx_under_lock(mark_addr.base(), Rmark, Rscratch, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 40.8 + cas_ptr(mark_addr.base(), Rmark, Rscratch); 40.9 // if compare/exchange succeeded we found an unlocked object and we now have locked it 40.10 // hence we are done 40.11 cmp(Rmark, Rscratch); 40.12 @@ -149,7 +149,7 @@ 40.13 40.14 // Check if it is still a light weight lock, this is is true if we see 40.15 // the stack address of the basicLock in the markOop of the object 40.16 - casx_under_lock(mark_addr.base(), Rbox, Rmark, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 40.17 + cas_ptr(mark_addr.base(), Rbox, Rmark); 40.18 cmp(Rbox, Rmark); 40.19 40.20 brx(Assembler::notEqual, false, Assembler::pn, slow_case); 40.21 @@ -276,7 +276,7 @@ 40.22 sub(var_size_in_bytes, hdr_size_in_bytes, t2); // compute size of body 40.23 initialize_body(t1, t2); 40.24 #ifndef _LP64 40.25 - } else if (VM_Version::v9_instructions_work() && con_size_in_bytes < threshold * 2) { 40.26 + } else if (con_size_in_bytes < threshold * 2) { 40.27 // on v9 we can do double word stores to fill twice as much space. 40.28 assert(hdr_size_in_bytes % 8 == 0, "double word aligned"); 40.29 assert(con_size_in_bytes % 8 == 0, "double word aligned");
41.1 --- a/src/cpu/sparc/vm/c1_globals_sparc.hpp Thu Jul 11 12:59:03 2013 -0400 41.2 +++ b/src/cpu/sparc/vm/c1_globals_sparc.hpp Mon Jul 15 11:07:03 2013 +0100 41.3 @@ -49,8 +49,9 @@ 41.4 define_pd_global(bool, ResizeTLAB, true ); 41.5 define_pd_global(intx, ReservedCodeCacheSize, 32*M ); 41.6 define_pd_global(intx, CodeCacheExpansionSize, 32*K ); 41.7 -define_pd_global(uintx,CodeCacheMinBlockLength, 1); 41.8 -define_pd_global(uintx,MetaspaceSize, 12*M ); 41.9 +define_pd_global(uintx, CodeCacheMinBlockLength, 1); 41.10 +define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); 41.11 +define_pd_global(uintx, MetaspaceSize, 12*M ); 41.12 define_pd_global(bool, NeverActAsServerClassMachine, true ); 41.13 define_pd_global(intx, NewSizeThreadIncrease, 16*K ); 41.14 define_pd_global(uint64_t,MaxRAM, 1ULL*G);
42.1 --- a/src/cpu/sparc/vm/c2_globals_sparc.hpp Thu Jul 11 12:59:03 2013 -0400 42.2 +++ b/src/cpu/sparc/vm/c2_globals_sparc.hpp Mon Jul 15 11:07:03 2013 +0100 42.3 @@ -86,7 +86,8 @@ 42.4 // Ergonomics related flags 42.5 define_pd_global(uint64_t,MaxRAM, 4ULL*G); 42.6 #endif 42.7 -define_pd_global(uintx,CodeCacheMinBlockLength, 4); 42.8 +define_pd_global(uintx, CodeCacheMinBlockLength, 4); 42.9 +define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); 42.10 42.11 // Heap related flags 42.12 define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M));
43.1 --- a/src/cpu/sparc/vm/c2_init_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 43.2 +++ b/src/cpu/sparc/vm/c2_init_sparc.cpp Mon Jul 15 11:07:03 2013 +0100 43.3 @@ -30,5 +30,4 @@ 43.4 43.5 void Compile::pd_compiler2_init() { 43.6 guarantee(CodeEntryAlignment >= InteriorEntryAlignment, "" ); 43.7 - guarantee( VM_Version::v9_instructions_work(), "Server compiler does not run on V8 systems" ); 43.8 }
44.1 --- a/src/cpu/sparc/vm/disassembler_sparc.hpp Thu Jul 11 12:59:03 2013 -0400 44.2 +++ b/src/cpu/sparc/vm/disassembler_sparc.hpp Mon Jul 15 11:07:03 2013 +0100 44.3 @@ -30,8 +30,7 @@ 44.4 } 44.5 44.6 static const char* pd_cpu_opts() { 44.7 - return (VM_Version::v9_instructions_work()? 44.8 - (VM_Version::v8_instructions_work()? "" : "v9only") : "v8only"); 44.9 + return "v9only"; 44.10 } 44.11 44.12 #endif // CPU_SPARC_VM_DISASSEMBLER_SPARC_HPP
45.1 --- a/src/cpu/sparc/vm/frame_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 45.2 +++ b/src/cpu/sparc/vm/frame_sparc.cpp Mon Jul 15 11:07:03 2013 +0100 45.3 @@ -257,11 +257,6 @@ 45.4 return false; 45.5 } 45.6 45.7 - // Could be a zombie method 45.8 - if (sender_blob->is_zombie() || sender_blob->is_unloaded()) { 45.9 - return false; 45.10 - } 45.11 - 45.12 // It should be safe to construct the sender though it might not be valid 45.13 45.14 frame sender(_SENDER_SP, younger_sp, adjusted_stack); 45.15 @@ -680,7 +675,7 @@ 45.16 45.17 // validate ConstantPoolCache* 45.18 ConstantPoolCache* cp = *interpreter_frame_cache_addr(); 45.19 - if (cp == NULL || !cp->is_metadata()) return false; 45.20 + if (cp == NULL || !cp->is_metaspace_object()) return false; 45.21 45.22 // validate locals 45.23
46.1 --- a/src/cpu/sparc/vm/globals_sparc.hpp Thu Jul 11 12:59:03 2013 -0400 46.2 +++ b/src/cpu/sparc/vm/globals_sparc.hpp Mon Jul 15 11:07:03 2013 +0100 46.3 @@ -110,8 +110,5 @@ 46.4 \ 46.5 product(uintx, ArraycopyDstPrefetchDistance, 0, \ 46.6 "Distance to prefetch destination array in arracopy") \ 46.7 - \ 46.8 - develop(intx, V8AtomicOperationUnderLockSpinCount, 50, \ 46.9 - "Number of times to spin wait on a v8 atomic operation lock") \ 46.10 46.11 #endif // CPU_SPARC_VM_GLOBALS_SPARC_HPP
47.1 --- a/src/cpu/sparc/vm/interp_masm_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 47.2 +++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp Mon Jul 15 11:07:03 2013 +0100 47.3 @@ -1210,8 +1210,7 @@ 47.4 st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes()); 47.5 // compare and exchange object_addr, markOop | 1, stack address of basicLock 47.6 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 47.7 - casx_under_lock(mark_addr.base(), mark_reg, temp_reg, 47.8 - (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 47.9 + cas_ptr(mark_addr.base(), mark_reg, temp_reg); 47.10 47.11 // if the compare and exchange succeeded we are done (we saw an unlocked object) 47.12 cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done); 47.13 @@ -1291,8 +1290,7 @@ 47.14 // we expect to see the stack address of the basicLock in case the 47.15 // lock is still a light weight lock (lock_reg) 47.16 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 47.17 - casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg, 47.18 - (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 47.19 + cas_ptr(mark_addr.base(), lock_reg, displaced_header_reg); 47.20 cmp(lock_reg, displaced_header_reg); 47.21 brx(Assembler::equal, true, Assembler::pn, done); 47.22 delayed()->st_ptr(G0, lockobj_addr); // free entry
48.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 48.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.cpp Mon Jul 15 11:07:03 2013 +0100 48.3 @@ -118,7 +118,6 @@ 48.4 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break; 48.5 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 48.6 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 48.7 - case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; 48.8 case bpr_op2: { 48.9 if (is_cbcond(inst)) { 48.10 m = wdisp10(word_aligned_ones, 0); 48.11 @@ -149,7 +148,6 @@ 48.12 case bp_op2: r = inv_wdisp( inst, pos, 19); break; 48.13 case fb_op2: r = inv_wdisp( inst, pos, 22); break; 48.14 case br_op2: r = inv_wdisp( inst, pos, 22); break; 48.15 - case cb_op2: r = inv_wdisp( inst, pos, 22); break; 48.16 case bpr_op2: { 48.17 if (is_cbcond(inst)) { 48.18 r = inv_wdisp10(inst, pos); 48.19 @@ -325,12 +323,6 @@ 48.20 trap(ST_RESERVED_FOR_USER_0); 48.21 } 48.22 48.23 -// flush windows (except current) using flushw instruction if avail. 48.24 -void MacroAssembler::flush_windows() { 48.25 - if (VM_Version::v9_instructions_work()) flushw(); 48.26 - else flush_windows_trap(); 48.27 -} 48.28 - 48.29 // Write serialization page so VM thread can do a pseudo remote membar 48.30 // We use the current thread pointer to calculate a thread specific 48.31 // offset to write to within the page. This minimizes bus traffic 48.32 @@ -358,88 +350,6 @@ 48.33 Unimplemented(); 48.34 } 48.35 48.36 -void MacroAssembler::mult(Register s1, Register s2, Register d) { 48.37 - if(VM_Version::v9_instructions_work()) { 48.38 - mulx (s1, s2, d); 48.39 - } else { 48.40 - smul (s1, s2, d); 48.41 - } 48.42 -} 48.43 - 48.44 -void MacroAssembler::mult(Register s1, int simm13a, Register d) { 48.45 - if(VM_Version::v9_instructions_work()) { 48.46 - mulx (s1, simm13a, d); 48.47 - } else { 48.48 - smul (s1, simm13a, d); 48.49 - } 48.50 -} 48.51 - 48.52 - 48.53 -#ifdef ASSERT 48.54 -void MacroAssembler::read_ccr_v8_assert(Register ccr_save) { 48.55 - const Register s1 = G3_scratch; 48.56 - const Register s2 = G4_scratch; 48.57 - Label get_psr_test; 48.58 - // Get the condition codes the V8 way. 48.59 - read_ccr_trap(s1); 48.60 - mov(ccr_save, s2); 48.61 - // This is a test of V8 which has icc but not xcc 48.62 - // so mask off the xcc bits 48.63 - and3(s2, 0xf, s2); 48.64 - // Compare condition codes from the V8 and V9 ways. 48.65 - subcc(s2, s1, G0); 48.66 - br(Assembler::notEqual, true, Assembler::pt, get_psr_test); 48.67 - delayed()->breakpoint_trap(); 48.68 - bind(get_psr_test); 48.69 -} 48.70 - 48.71 -void MacroAssembler::write_ccr_v8_assert(Register ccr_save) { 48.72 - const Register s1 = G3_scratch; 48.73 - const Register s2 = G4_scratch; 48.74 - Label set_psr_test; 48.75 - // Write out the saved condition codes the V8 way 48.76 - write_ccr_trap(ccr_save, s1, s2); 48.77 - // Read back the condition codes using the V9 instruction 48.78 - rdccr(s1); 48.79 - mov(ccr_save, s2); 48.80 - // This is a test of V8 which has icc but not xcc 48.81 - // so mask off the xcc bits 48.82 - and3(s2, 0xf, s2); 48.83 - and3(s1, 0xf, s1); 48.84 - // Compare the V8 way with the V9 way. 48.85 - subcc(s2, s1, G0); 48.86 - br(Assembler::notEqual, true, Assembler::pt, set_psr_test); 48.87 - delayed()->breakpoint_trap(); 48.88 - bind(set_psr_test); 48.89 -} 48.90 -#else 48.91 -#define read_ccr_v8_assert(x) 48.92 -#define write_ccr_v8_assert(x) 48.93 -#endif // ASSERT 48.94 - 48.95 -void MacroAssembler::read_ccr(Register ccr_save) { 48.96 - if (VM_Version::v9_instructions_work()) { 48.97 - rdccr(ccr_save); 48.98 - // Test code sequence used on V8. Do not move above rdccr. 48.99 - read_ccr_v8_assert(ccr_save); 48.100 - } else { 48.101 - read_ccr_trap(ccr_save); 48.102 - } 48.103 -} 48.104 - 48.105 -void MacroAssembler::write_ccr(Register ccr_save) { 48.106 - if (VM_Version::v9_instructions_work()) { 48.107 - // Test code sequence used on V8. Do not move below wrccr. 48.108 - write_ccr_v8_assert(ccr_save); 48.109 - wrccr(ccr_save); 48.110 - } else { 48.111 - const Register temp_reg1 = G3_scratch; 48.112 - const Register temp_reg2 = G4_scratch; 48.113 - write_ccr_trap(ccr_save, temp_reg1, temp_reg2); 48.114 - } 48.115 -} 48.116 - 48.117 - 48.118 // Calls to C land 48.119 48.120 #ifdef ASSERT 48.121 @@ -465,8 +375,8 @@ 48.122 #ifdef ASSERT 48.123 AddressLiteral last_get_thread_addrlit(&last_get_thread); 48.124 set(last_get_thread_addrlit, L3); 48.125 - inc(L4, get_pc(L4) + 2 * BytesPerInstWord); // skip getpc() code + inc + st_ptr to point L4 at call 48.126 - st_ptr(L4, L3, 0); 48.127 + rdpc(L4); 48.128 + inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0); 48.129 #endif 48.130 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type); 48.131 delayed()->nop(); 48.132 @@ -1251,12 +1161,6 @@ 48.133 while (offset() % modulus != 0) nop(); 48.134 } 48.135 48.136 - 48.137 -void MacroAssembler::safepoint() { 48.138 - relocate(breakpoint_Relocation::spec(breakpoint_Relocation::safepoint)); 48.139 -} 48.140 - 48.141 - 48.142 void RegistersForDebugging::print(outputStream* s) { 48.143 FlagSetting fs(Debugging, true); 48.144 int j; 48.145 @@ -1327,7 +1231,7 @@ 48.146 48.147 void RegistersForDebugging::save_registers(MacroAssembler* a) { 48.148 a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0); 48.149 - a->flush_windows(); 48.150 + a->flushw(); 48.151 int i; 48.152 for (i = 0; i < 8; ++i) { 48.153 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i)); 48.154 @@ -1338,7 +1242,7 @@ 48.155 for (i = 0; i < 32; ++i) { 48.156 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i)); 48.157 } 48.158 - for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { 48.159 + for (i = 0; i < 64; i += 2) { 48.160 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i)); 48.161 } 48.162 } 48.163 @@ -1350,7 +1254,7 @@ 48.164 for (int j = 0; j < 32; ++j) { 48.165 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j)); 48.166 } 48.167 - for (int k = 0; k < (VM_Version::v9_instructions_work() ? 64 : 32); k += 2) { 48.168 + for (int k = 0; k < 64; k += 2) { 48.169 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k)); 48.170 } 48.171 } 48.172 @@ -1465,8 +1369,6 @@ 48.173 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf' 48.174 // call. 48.175 void MacroAssembler::verify_oop_subroutine() { 48.176 - assert( VM_Version::v9_instructions_work(), "VerifyOops not supported for V8" ); 48.177 - 48.178 // Leaf call; no frame. 48.179 Label succeed, fail, null_or_fail; 48.180 48.181 @@ -1870,26 +1772,17 @@ 48.182 // And the equals case for the high part does not need testing, 48.183 // since that triplet is reached only after finding the high halves differ. 48.184 48.185 - if (VM_Version::v9_instructions_work()) { 48.186 - mov(-1, Rresult); 48.187 - ba(done); delayed()-> movcc(greater, false, icc, 1, Rresult); 48.188 - } else { 48.189 - br(less, true, pt, done); delayed()-> set(-1, Rresult); 48.190 - br(greater, true, pt, done); delayed()-> set( 1, Rresult); 48.191 - } 48.192 - 48.193 - bind( check_low_parts ); 48.194 - 48.195 - if (VM_Version::v9_instructions_work()) { 48.196 - mov( -1, Rresult); 48.197 - movcc(equal, false, icc, 0, Rresult); 48.198 - movcc(greaterUnsigned, false, icc, 1, Rresult); 48.199 - } else { 48.200 - set(-1, Rresult); 48.201 - br(equal, true, pt, done); delayed()->set( 0, Rresult); 48.202 - br(greaterUnsigned, true, pt, done); delayed()->set( 1, Rresult); 48.203 - } 48.204 - bind( done ); 48.205 + mov(-1, Rresult); 48.206 + ba(done); 48.207 + delayed()->movcc(greater, false, icc, 1, Rresult); 48.208 + 48.209 + bind(check_low_parts); 48.210 + 48.211 + mov( -1, Rresult); 48.212 + movcc(equal, false, icc, 0, Rresult); 48.213 + movcc(greaterUnsigned, false, icc, 1, Rresult); 48.214 + 48.215 + bind(done); 48.216 } 48.217 48.218 void MacroAssembler::lneg( Register Rhi, Register Rlow ) { 48.219 @@ -2117,119 +2010,24 @@ 48.220 void MacroAssembler::float_cmp( bool is_float, int unordered_result, 48.221 FloatRegister Fa, FloatRegister Fb, 48.222 Register Rresult) { 48.223 - 48.224 - fcmp(is_float ? FloatRegisterImpl::S : FloatRegisterImpl::D, fcc0, Fa, Fb); 48.225 - 48.226 - Condition lt = unordered_result == -1 ? f_unorderedOrLess : f_less; 48.227 - Condition eq = f_equal; 48.228 - Condition gt = unordered_result == 1 ? f_unorderedOrGreater : f_greater; 48.229 - 48.230 - if (VM_Version::v9_instructions_work()) { 48.231 - 48.232 - mov(-1, Rresult); 48.233 - movcc(eq, true, fcc0, 0, Rresult); 48.234 - movcc(gt, true, fcc0, 1, Rresult); 48.235 - 48.236 + if (is_float) { 48.237 + fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb); 48.238 } else { 48.239 - Label done; 48.240 - 48.241 - set( -1, Rresult ); 48.242 - //fb(lt, true, pn, done); delayed()->set( -1, Rresult ); 48.243 - fb( eq, true, pn, done); delayed()->set( 0, Rresult ); 48.244 - fb( gt, true, pn, done); delayed()->set( 1, Rresult ); 48.245 - 48.246 - bind (done); 48.247 + fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb); 48.248 + } 48.249 + 48.250 + if (unordered_result == 1) { 48.251 + mov( -1, Rresult); 48.252 + movcc(f_equal, true, fcc0, 0, Rresult); 48.253 + movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult); 48.254 + } else { 48.255 + mov( -1, Rresult); 48.256 + movcc(f_equal, true, fcc0, 0, Rresult); 48.257 + movcc(f_greater, true, fcc0, 1, Rresult); 48.258 } 48.259 } 48.260 48.261 48.262 -void MacroAssembler::fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) 48.263 -{ 48.264 - if (VM_Version::v9_instructions_work()) { 48.265 - Assembler::fneg(w, s, d); 48.266 - } else { 48.267 - if (w == FloatRegisterImpl::S) { 48.268 - Assembler::fneg(w, s, d); 48.269 - } else if (w == FloatRegisterImpl::D) { 48.270 - // number() does a sanity check on the alignment. 48.271 - assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && 48.272 - ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); 48.273 - 48.274 - Assembler::fneg(FloatRegisterImpl::S, s, d); 48.275 - Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 48.276 - } else { 48.277 - assert(w == FloatRegisterImpl::Q, "Invalid float register width"); 48.278 - 48.279 - // number() does a sanity check on the alignment. 48.280 - assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && 48.281 - ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); 48.282 - 48.283 - Assembler::fneg(FloatRegisterImpl::S, s, d); 48.284 - Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 48.285 - Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); 48.286 - Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); 48.287 - } 48.288 - } 48.289 -} 48.290 - 48.291 -void MacroAssembler::fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) 48.292 -{ 48.293 - if (VM_Version::v9_instructions_work()) { 48.294 - Assembler::fmov(w, s, d); 48.295 - } else { 48.296 - if (w == FloatRegisterImpl::S) { 48.297 - Assembler::fmov(w, s, d); 48.298 - } else if (w == FloatRegisterImpl::D) { 48.299 - // number() does a sanity check on the alignment. 48.300 - assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && 48.301 - ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); 48.302 - 48.303 - Assembler::fmov(FloatRegisterImpl::S, s, d); 48.304 - Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 48.305 - } else { 48.306 - assert(w == FloatRegisterImpl::Q, "Invalid float register width"); 48.307 - 48.308 - // number() does a sanity check on the alignment. 48.309 - assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && 48.310 - ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); 48.311 - 48.312 - Assembler::fmov(FloatRegisterImpl::S, s, d); 48.313 - Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 48.314 - Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); 48.315 - Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); 48.316 - } 48.317 - } 48.318 -} 48.319 - 48.320 -void MacroAssembler::fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d) 48.321 -{ 48.322 - if (VM_Version::v9_instructions_work()) { 48.323 - Assembler::fabs(w, s, d); 48.324 - } else { 48.325 - if (w == FloatRegisterImpl::S) { 48.326 - Assembler::fabs(w, s, d); 48.327 - } else if (w == FloatRegisterImpl::D) { 48.328 - // number() does a sanity check on the alignment. 48.329 - assert(((s->encoding(FloatRegisterImpl::D) & 1) == 0) && 48.330 - ((d->encoding(FloatRegisterImpl::D) & 1) == 0), "float register alignment check"); 48.331 - 48.332 - Assembler::fabs(FloatRegisterImpl::S, s, d); 48.333 - Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 48.334 - } else { 48.335 - assert(w == FloatRegisterImpl::Q, "Invalid float register width"); 48.336 - 48.337 - // number() does a sanity check on the alignment. 48.338 - assert(((s->encoding(FloatRegisterImpl::D) & 3) == 0) && 48.339 - ((d->encoding(FloatRegisterImpl::D) & 3) == 0), "float register alignment check"); 48.340 - 48.341 - Assembler::fabs(FloatRegisterImpl::S, s, d); 48.342 - Assembler::fmov(FloatRegisterImpl::S, s->successor(), d->successor()); 48.343 - Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor(), d->successor()->successor()); 48.344 - Assembler::fmov(FloatRegisterImpl::S, s->successor()->successor()->successor(), d->successor()->successor()->successor()); 48.345 - } 48.346 - } 48.347 -} 48.348 - 48.349 void MacroAssembler::save_all_globals_into_locals() { 48.350 mov(G1,L1); 48.351 mov(G2,L2); 48.352 @@ -2250,135 +2048,6 @@ 48.353 mov(L7,G7); 48.354 } 48.355 48.356 -// Use for 64 bit operation. 48.357 -void MacroAssembler::casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm) 48.358 -{ 48.359 - // store ptr_reg as the new top value 48.360 -#ifdef _LP64 48.361 - casx(top_ptr_reg, top_reg, ptr_reg); 48.362 -#else 48.363 - cas_under_lock(top_ptr_reg, top_reg, ptr_reg, lock_addr, use_call_vm); 48.364 -#endif // _LP64 48.365 -} 48.366 - 48.367 -// [RGV] This routine does not handle 64 bit operations. 48.368 -// use casx_under_lock() or casx directly!!! 48.369 -void MacroAssembler::cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, address lock_addr, bool use_call_vm) 48.370 -{ 48.371 - // store ptr_reg as the new top value 48.372 - if (VM_Version::v9_instructions_work()) { 48.373 - cas(top_ptr_reg, top_reg, ptr_reg); 48.374 - } else { 48.375 - 48.376 - // If the register is not an out nor global, it is not visible 48.377 - // after the save. Allocate a register for it, save its 48.378 - // value in the register save area (the save may not flush 48.379 - // registers to the save area). 48.380 - 48.381 - Register top_ptr_reg_after_save; 48.382 - Register top_reg_after_save; 48.383 - Register ptr_reg_after_save; 48.384 - 48.385 - if (top_ptr_reg->is_out() || top_ptr_reg->is_global()) { 48.386 - top_ptr_reg_after_save = top_ptr_reg->after_save(); 48.387 - } else { 48.388 - Address reg_save_addr = top_ptr_reg->address_in_saved_window(); 48.389 - top_ptr_reg_after_save = L0; 48.390 - st(top_ptr_reg, reg_save_addr); 48.391 - } 48.392 - 48.393 - if (top_reg->is_out() || top_reg->is_global()) { 48.394 - top_reg_after_save = top_reg->after_save(); 48.395 - } else { 48.396 - Address reg_save_addr = top_reg->address_in_saved_window(); 48.397 - top_reg_after_save = L1; 48.398 - st(top_reg, reg_save_addr); 48.399 - } 48.400 - 48.401 - if (ptr_reg->is_out() || ptr_reg->is_global()) { 48.402 - ptr_reg_after_save = ptr_reg->after_save(); 48.403 - } else { 48.404 - Address reg_save_addr = ptr_reg->address_in_saved_window(); 48.405 - ptr_reg_after_save = L2; 48.406 - st(ptr_reg, reg_save_addr); 48.407 - } 48.408 - 48.409 - const Register& lock_reg = L3; 48.410 - const Register& lock_ptr_reg = L4; 48.411 - const Register& value_reg = L5; 48.412 - const Register& yield_reg = L6; 48.413 - const Register& yieldall_reg = L7; 48.414 - 48.415 - save_frame(); 48.416 - 48.417 - if (top_ptr_reg_after_save == L0) { 48.418 - ld(top_ptr_reg->address_in_saved_window().after_save(), top_ptr_reg_after_save); 48.419 - } 48.420 - 48.421 - if (top_reg_after_save == L1) { 48.422 - ld(top_reg->address_in_saved_window().after_save(), top_reg_after_save); 48.423 - } 48.424 - 48.425 - if (ptr_reg_after_save == L2) { 48.426 - ld(ptr_reg->address_in_saved_window().after_save(), ptr_reg_after_save); 48.427 - } 48.428 - 48.429 - Label(retry_get_lock); 48.430 - Label(not_same); 48.431 - Label(dont_yield); 48.432 - 48.433 - assert(lock_addr, "lock_address should be non null for v8"); 48.434 - set((intptr_t)lock_addr, lock_ptr_reg); 48.435 - // Initialize yield counter 48.436 - mov(G0,yield_reg); 48.437 - mov(G0, yieldall_reg); 48.438 - set(StubRoutines::Sparc::locked, lock_reg); 48.439 - 48.440 - bind(retry_get_lock); 48.441 - cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield); 48.442 - 48.443 - if(use_call_vm) { 48.444 - Untested("Need to verify global reg consistancy"); 48.445 - call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::yield_all), yieldall_reg); 48.446 - } else { 48.447 - // Save the regs and make space for a C call 48.448 - save(SP, -96, SP); 48.449 - save_all_globals_into_locals(); 48.450 - call(CAST_FROM_FN_PTR(address,os::yield_all)); 48.451 - delayed()->mov(yieldall_reg, O0); 48.452 - restore_globals_from_locals(); 48.453 - restore(); 48.454 - } 48.455 - 48.456 - // reset the counter 48.457 - mov(G0,yield_reg); 48.458 - add(yieldall_reg, 1, yieldall_reg); 48.459 - 48.460 - bind(dont_yield); 48.461 - // try to get lock 48.462 - Assembler::swap(lock_ptr_reg, 0, lock_reg); 48.463 - 48.464 - // did we get the lock? 48.465 - cmp(lock_reg, StubRoutines::Sparc::unlocked); 48.466 - br(Assembler::notEqual, true, Assembler::pn, retry_get_lock); 48.467 - delayed()->add(yield_reg,1,yield_reg); 48.468 - 48.469 - // yes, got lock. do we have the same top? 48.470 - ld(top_ptr_reg_after_save, 0, value_reg); 48.471 - cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same); 48.472 - 48.473 - // yes, same top. 48.474 - st(ptr_reg_after_save, top_ptr_reg_after_save, 0); 48.475 - membar(Assembler::StoreStore); 48.476 - 48.477 - bind(not_same); 48.478 - mov(value_reg, ptr_reg_after_save); 48.479 - st(lock_reg, lock_ptr_reg, 0); // unlock 48.480 - 48.481 - restore(); 48.482 - } 48.483 -} 48.484 - 48.485 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr, 48.486 Register tmp, 48.487 int offset) { 48.488 @@ -2970,7 +2639,7 @@ 48.489 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place, 48.490 mark_reg); 48.491 or3(G2_thread, mark_reg, temp_reg); 48.492 - casn(mark_addr.base(), mark_reg, temp_reg); 48.493 + cas_ptr(mark_addr.base(), mark_reg, temp_reg); 48.494 // If the biasing toward our thread failed, this means that 48.495 // another thread succeeded in biasing it toward itself and we 48.496 // need to revoke that bias. The revocation will occur in the 48.497 @@ -2998,7 +2667,7 @@ 48.498 load_klass(obj_reg, temp_reg); 48.499 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 48.500 or3(G2_thread, temp_reg, temp_reg); 48.501 - casn(mark_addr.base(), mark_reg, temp_reg); 48.502 + cas_ptr(mark_addr.base(), mark_reg, temp_reg); 48.503 // If the biasing toward our thread failed, this means that 48.504 // another thread succeeded in biasing it toward itself and we 48.505 // need to revoke that bias. The revocation will occur in the 48.506 @@ -3027,7 +2696,7 @@ 48.507 // bits in this situation. Should attempt to preserve them. 48.508 load_klass(obj_reg, temp_reg); 48.509 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg); 48.510 - casn(mark_addr.base(), mark_reg, temp_reg); 48.511 + cas_ptr(mark_addr.base(), mark_reg, temp_reg); 48.512 // Fall through to the normal CAS-based lock, because no matter what 48.513 // the result of the above CAS, some thread must have succeeded in 48.514 // removing the bias bit from the object's header. 48.515 @@ -3058,15 +2727,6 @@ 48.516 } 48.517 48.518 48.519 -// CASN -- 32-64 bit switch hitter similar to the synthetic CASN provided by 48.520 -// Solaris/SPARC's "as". Another apt name would be cas_ptr() 48.521 - 48.522 -void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) { 48.523 - casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 48.524 -} 48.525 - 48.526 - 48.527 - 48.528 // compiler_lock_object() and compiler_unlock_object() are direct transliterations 48.529 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments. 48.530 // The code could be tightened up considerably. 48.531 @@ -3129,8 +2789,7 @@ 48.532 48.533 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop 48.534 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 48.535 - casx_under_lock(mark_addr.base(), Rmark, Rscratch, 48.536 - (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 48.537 + cas_ptr(mark_addr.base(), Rmark, Rscratch); 48.538 48.539 // if compare/exchange succeeded we found an unlocked object and we now have locked it 48.540 // hence we are done 48.541 @@ -3176,7 +2835,7 @@ 48.542 mov(Rbox, Rscratch); 48.543 or3(Rmark, markOopDesc::unlocked_value, Rmark); 48.544 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 48.545 - casn(mark_addr.base(), Rmark, Rscratch); 48.546 + cas_ptr(mark_addr.base(), Rmark, Rscratch); 48.547 cmp(Rmark, Rscratch); 48.548 brx(Assembler::equal, false, Assembler::pt, done); 48.549 delayed()->sub(Rscratch, SP, Rscratch); 48.550 @@ -3207,7 +2866,7 @@ 48.551 // Invariant: if we acquire the lock then _recursions should be 0. 48.552 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); 48.553 mov(G2_thread, Rscratch); 48.554 - casn(Rmark, G0, Rscratch); 48.555 + cas_ptr(Rmark, G0, Rscratch); 48.556 cmp(Rscratch, G0); 48.557 // Intentional fall-through into done 48.558 } else { 48.559 @@ -3240,7 +2899,7 @@ 48.560 mov(0, Rscratch); 48.561 or3(Rmark, markOopDesc::unlocked_value, Rmark); 48.562 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 48.563 - casn(mark_addr.base(), Rmark, Rscratch); 48.564 + cas_ptr(mark_addr.base(), Rmark, Rscratch); 48.565 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); 48.566 cmp(Rscratch, Rmark); 48.567 brx(Assembler::notZero, false, Assembler::pn, Recursive); 48.568 @@ -3266,7 +2925,7 @@ 48.569 // the fast-path stack-lock code from the interpreter and always passed 48.570 // control to the "slow" operators in synchronizer.cpp. 48.571 48.572 - // RScratch contains the fetched obj->mark value from the failed CASN. 48.573 + // RScratch contains the fetched obj->mark value from the failed CAS. 48.574 #ifdef _LP64 48.575 sub(Rscratch, STACK_BIAS, Rscratch); 48.576 #endif 48.577 @@ -3300,7 +2959,7 @@ 48.578 // Invariant: if we acquire the lock then _recursions should be 0. 48.579 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); 48.580 mov(G2_thread, Rscratch); 48.581 - casn(Rmark, G0, Rscratch); 48.582 + cas_ptr(Rmark, G0, Rscratch); 48.583 cmp(Rscratch, G0); 48.584 // ST box->displaced_header = NonZero. 48.585 // Any non-zero value suffices: 48.586 @@ -3336,8 +2995,7 @@ 48.587 // Check if it is still a light weight lock, this is is true if we see 48.588 // the stack address of the basicLock in the markOop of the object 48.589 assert(mark_addr.disp() == 0, "cas must take a zero displacement"); 48.590 - casx_under_lock(mark_addr.base(), Rbox, Rmark, 48.591 - (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 48.592 + cas_ptr(mark_addr.base(), Rbox, Rmark); 48.593 ba(done); 48.594 delayed()->cmp(Rbox, Rmark); 48.595 bind(done); 48.596 @@ -3398,7 +3056,7 @@ 48.597 delayed()->andcc(G0, G0, G0); 48.598 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); 48.599 mov(G2_thread, Rscratch); 48.600 - casn(Rmark, G0, Rscratch); 48.601 + cas_ptr(Rmark, G0, Rscratch); 48.602 // invert icc.zf and goto done 48.603 br_notnull(Rscratch, false, Assembler::pt, done); 48.604 delayed()->cmp(G0, G0); 48.605 @@ -3440,7 +3098,7 @@ 48.606 // A prototype implementation showed excellent results, although 48.607 // the scavenger and timeout code was rather involved. 48.608 48.609 - casn(mark_addr.base(), Rbox, Rscratch); 48.610 + cas_ptr(mark_addr.base(), Rbox, Rscratch); 48.611 cmp(Rbox, Rscratch); 48.612 // Intentional fall through into done ... 48.613 48.614 @@ -3540,7 +3198,8 @@ 48.615 48.616 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { 48.617 // No allocation in the shared eden. 48.618 - ba_short(slow_case); 48.619 + ba(slow_case); 48.620 + delayed()->nop(); 48.621 } else { 48.622 // get eden boundaries 48.623 // note: we need both top & top_addr! 48.624 @@ -3583,7 +3242,7 @@ 48.625 // Compare obj with the value at top_addr; if still equal, swap the value of 48.626 // end with the value at top_addr. If not equal, read the value at top_addr 48.627 // into end. 48.628 - casx_under_lock(top_addr, obj, end, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 48.629 + cas_ptr(top_addr, obj, end); 48.630 // if someone beat us on the allocation, try again, otherwise continue 48.631 cmp(obj, end); 48.632 brx(Assembler::notEqual, false, Assembler::pn, retry);
49.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.hpp Thu Jul 11 12:59:03 2013 -0400 49.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.hpp Mon Jul 15 11:07:03 2013 +0100 49.3 @@ -1,5 +1,5 @@ 49.4 /* 49.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 49.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 49.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 49.8 * 49.9 * This code is free software; you can redistribute it and/or modify it 49.10 @@ -963,7 +963,7 @@ 49.11 inline void sub(Register s1, RegisterOrConstant s2, Register d, int offset = 0); 49.12 49.13 using Assembler::swap; 49.14 - inline void swap(Address& a, Register d, int offset = 0); 49.15 + inline void swap(const Address& a, Register d, int offset = 0); 49.16 49.17 // address pseudos: make these names unlike instruction names to avoid confusion 49.18 inline intptr_t load_pc_address( Register reg, int bytes_to_skip ); 49.19 @@ -1056,13 +1056,6 @@ 49.20 49.21 void breakpoint_trap(); 49.22 void breakpoint_trap(Condition c, CC cc); 49.23 - void flush_windows_trap(); 49.24 - void clean_windows_trap(); 49.25 - void get_psr_trap(); 49.26 - void set_psr_trap(); 49.27 - 49.28 - // V8/V9 flush_windows 49.29 - void flush_windows(); 49.30 49.31 // Support for serializing memory accesses between threads 49.32 void serialize_memory(Register thread, Register tmp1, Register tmp2); 49.33 @@ -1071,14 +1064,6 @@ 49.34 void enter(); 49.35 void leave(); 49.36 49.37 - // V8/V9 integer multiply 49.38 - void mult(Register s1, Register s2, Register d); 49.39 - void mult(Register s1, int simm13a, Register d); 49.40 - 49.41 - // V8/V9 read and write of condition codes. 49.42 - void read_ccr(Register d); 49.43 - void write_ccr(Register s); 49.44 - 49.45 // Manipulation of C++ bools 49.46 // These are idioms to flag the need for care with accessing bools but on 49.47 // this platform we assume byte size 49.48 @@ -1162,21 +1147,6 @@ 49.49 // check_and_forward_exception to handle exceptions when it is safe 49.50 void check_and_forward_exception(Register scratch_reg); 49.51 49.52 - private: 49.53 - // For V8 49.54 - void read_ccr_trap(Register ccr_save); 49.55 - void write_ccr_trap(Register ccr_save1, Register scratch1, Register scratch2); 49.56 - 49.57 -#ifdef ASSERT 49.58 - // For V8 debugging. Uses V8 instruction sequence and checks 49.59 - // result with V9 insturctions rdccr and wrccr. 49.60 - // Uses Gscatch and Gscatch2 49.61 - void read_ccr_v8_assert(Register ccr_save); 49.62 - void write_ccr_v8_assert(Register ccr_save); 49.63 -#endif // ASSERT 49.64 - 49.65 - public: 49.66 - 49.67 // Write to card table for - register is destroyed afterwards. 49.68 void card_table_write(jbyte* byte_map_base, Register tmp, Register obj); 49.69 49.70 @@ -1314,20 +1284,9 @@ 49.71 FloatRegister Fa, FloatRegister Fb, 49.72 Register Rresult); 49.73 49.74 - void fneg( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d); 49.75 - void fneg( FloatRegisterImpl::Width w, FloatRegister sd ) { Assembler::fneg(w, sd); } 49.76 - void fmov( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d); 49.77 - void fabs( FloatRegisterImpl::Width w, FloatRegister s, FloatRegister d); 49.78 - 49.79 void save_all_globals_into_locals(); 49.80 void restore_globals_from_locals(); 49.81 49.82 - void casx_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, 49.83 - address lock_addr=0, bool use_call_vm=false); 49.84 - void cas_under_lock(Register top_ptr_reg, Register top_reg, Register ptr_reg, 49.85 - address lock_addr=0, bool use_call_vm=false); 49.86 - void casn (Register addr_reg, Register cmp_reg, Register set_reg) ; 49.87 - 49.88 // These set the icc condition code to equal if the lock succeeded 49.89 // and notEqual if it failed and requires a slow case 49.90 void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
50.1 --- a/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp Thu Jul 11 12:59:03 2013 -0400 50.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.inline.hpp Mon Jul 15 11:07:03 2013 +0100 50.3 @@ -1,5 +1,5 @@ 50.4 /* 50.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 50.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 50.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 50.8 * 50.9 * This code is free software; you can redistribute it and/or modify it 50.10 @@ -229,10 +229,7 @@ 50.11 // Use the right branch for the platform 50.12 50.13 inline void MacroAssembler::br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { 50.14 - if (VM_Version::v9_instructions_work()) 50.15 - Assembler::bp(c, a, icc, p, d, rt); 50.16 - else 50.17 - Assembler::br(c, a, d, rt); 50.18 + Assembler::bp(c, a, icc, p, d, rt); 50.19 } 50.20 50.21 inline void MacroAssembler::br( Condition c, bool a, Predict p, Label& L ) { 50.22 @@ -268,10 +265,7 @@ 50.23 } 50.24 50.25 inline void MacroAssembler::fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt ) { 50.26 - if (VM_Version::v9_instructions_work()) 50.27 - fbp(c, a, fcc0, p, d, rt); 50.28 - else 50.29 - Assembler::fb(c, a, d, rt); 50.30 + fbp(c, a, fcc0, p, d, rt); 50.31 } 50.32 50.33 inline void MacroAssembler::fb( Condition c, bool a, Predict p, Label& L ) { 50.34 @@ -334,7 +328,7 @@ 50.35 50.36 // prefetch instruction 50.37 inline void MacroAssembler::iprefetch( address d, relocInfo::relocType rt ) { 50.38 - if (VM_Version::v9_instructions_work()) 50.39 + Assembler::bp( never, true, xcc, pt, d, rt ); 50.40 Assembler::bp( never, true, xcc, pt, d, rt ); 50.41 } 50.42 inline void MacroAssembler::iprefetch( Label& L) { iprefetch( target(L) ); } 50.43 @@ -344,15 +338,7 @@ 50.44 // returns delta from gotten pc to addr after 50.45 inline int MacroAssembler::get_pc( Register d ) { 50.46 int x = offset(); 50.47 - if (VM_Version::v9_instructions_work()) 50.48 - rdpc(d); 50.49 - else { 50.50 - Label lbl; 50.51 - Assembler::call(lbl, relocInfo::none); // No relocation as this is call to pc+0x8 50.52 - if (d == O7) delayed()->nop(); 50.53 - else delayed()->mov(O7, d); 50.54 - bind(lbl); 50.55 - } 50.56 + rdpc(d); 50.57 return offset() - x; 50.58 } 50.59 50.60 @@ -646,41 +632,26 @@ 50.61 // returns if membar generates anything, obviously this code should mirror 50.62 // membar below. 50.63 inline bool MacroAssembler::membar_has_effect( Membar_mask_bits const7a ) { 50.64 - if( !os::is_MP() ) return false; // Not needed on single CPU 50.65 - if( VM_Version::v9_instructions_work() ) { 50.66 - const Membar_mask_bits effective_mask = 50.67 - Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); 50.68 - return (effective_mask != 0); 50.69 - } else { 50.70 - return true; 50.71 - } 50.72 + if (!os::is_MP()) 50.73 + return false; // Not needed on single CPU 50.74 + const Membar_mask_bits effective_mask = 50.75 + Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); 50.76 + return (effective_mask != 0); 50.77 } 50.78 50.79 inline void MacroAssembler::membar( Membar_mask_bits const7a ) { 50.80 // Uniprocessors do not need memory barriers 50.81 - if (!os::is_MP()) return; 50.82 + if (!os::is_MP()) 50.83 + return; 50.84 // Weakened for current Sparcs and TSO. See the v9 manual, sections 8.4.3, 50.85 // 8.4.4.3, a.31 and a.50. 50.86 - if( VM_Version::v9_instructions_work() ) { 50.87 - // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value 50.88 - // of the mmask subfield of const7a that does anything that isn't done 50.89 - // implicitly is StoreLoad. 50.90 - const Membar_mask_bits effective_mask = 50.91 - Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); 50.92 - if ( effective_mask != 0 ) { 50.93 - Assembler::membar( effective_mask ); 50.94 - } 50.95 - } else { 50.96 - // stbar is the closest there is on v8. Equivalent to membar(StoreStore). We 50.97 - // do not issue the stbar because to my knowledge all v8 machines implement TSO, 50.98 - // which guarantees that all stores behave as if an stbar were issued just after 50.99 - // each one of them. On these machines, stbar ought to be a nop. There doesn't 50.100 - // appear to be an equivalent of membar(StoreLoad) on v8: TSO doesn't require it, 50.101 - // it can't be specified by stbar, nor have I come up with a way to simulate it. 50.102 - // 50.103 - // Addendum. Dave says that ldstub guarantees a write buffer flush to coherent 50.104 - // space. Put one here to be on the safe side. 50.105 - Assembler::ldstub(SP, 0, G0); 50.106 + // Under TSO, setting bit 3, 2, or 0 is redundant, so the only value 50.107 + // of the mmask subfield of const7a that does anything that isn't done 50.108 + // implicitly is StoreLoad. 50.109 + const Membar_mask_bits effective_mask = 50.110 + Membar_mask_bits(const7a & ~(LoadLoad | LoadStore | StoreStore)); 50.111 + if (effective_mask != 0) { 50.112 + Assembler::membar(effective_mask); 50.113 } 50.114 } 50.115 50.116 @@ -748,7 +719,7 @@ 50.117 if (offset != 0) sub(d, offset, d); 50.118 } 50.119 50.120 -inline void MacroAssembler::swap(Address& a, Register d, int offset) { 50.121 +inline void MacroAssembler::swap(const Address& a, Register d, int offset) { 50.122 relocate(a.rspec(offset)); 50.123 if (a.has_index()) { assert(offset == 0, ""); swap(a.base(), a.index(), d ); } 50.124 else { swap(a.base(), a.disp() + offset, d); }
51.1 --- a/src/cpu/sparc/vm/nativeInst_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 51.2 +++ b/src/cpu/sparc/vm/nativeInst_sparc.cpp Mon Jul 15 11:07:03 2013 +0100 51.3 @@ -162,7 +162,7 @@ 51.4 int i1 = ((int*)code_buffer)[1]; 51.5 int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord); 51.6 assert(inv_op(*contention_addr) == Assembler::arith_op || 51.7 - *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), 51.8 + *contention_addr == nop_instruction(), 51.9 "must not interfere with original call"); 51.10 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order 51.11 n_call->set_long_at(1*BytesPerInstWord, i1); 51.12 @@ -181,7 +181,7 @@ 51.13 // Make sure the first-patched instruction, which may co-exist 51.14 // briefly with the call, will do something harmless. 51.15 assert(inv_op(*contention_addr) == Assembler::arith_op || 51.16 - *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), 51.17 + *contention_addr == nop_instruction(), 51.18 "must not interfere with original call"); 51.19 } 51.20 51.21 @@ -933,11 +933,7 @@ 51.22 int code_size = 1 * BytesPerInstWord; 51.23 CodeBuffer cb(verified_entry, code_size + 1); 51.24 MacroAssembler* a = new MacroAssembler(&cb); 51.25 - if (VM_Version::v9_instructions_work()) { 51.26 - a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler 51.27 - } else { 51.28 - a->lduw(G0, 0, O7); // "ld" must agree with code in the signal handler 51.29 - } 51.30 + a->ldsw(G0, 0, O7); // "ld" must agree with code in the signal handler 51.31 ICache::invalidate_range(verified_entry, code_size); 51.32 } 51.33 51.34 @@ -1024,7 +1020,7 @@ 51.35 int i1 = ((int*)code_buffer)[1]; 51.36 int* contention_addr = (int*) h_jump->addr_at(1*BytesPerInstWord); 51.37 assert(inv_op(*contention_addr) == Assembler::arith_op || 51.38 - *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), 51.39 + *contention_addr == nop_instruction(), 51.40 "must not interfere with original call"); 51.41 // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order 51.42 h_jump->set_long_at(1*BytesPerInstWord, i1); 51.43 @@ -1043,6 +1039,6 @@ 51.44 // Make sure the first-patched instruction, which may co-exist 51.45 // briefly with the call, will do something harmless. 51.46 assert(inv_op(*contention_addr) == Assembler::arith_op || 51.47 - *contention_addr == nop_instruction() || !VM_Version::v9_instructions_work(), 51.48 + *contention_addr == nop_instruction(), 51.49 "must not interfere with original call"); 51.50 }
52.1 --- a/src/cpu/sparc/vm/nativeInst_sparc.hpp Thu Jul 11 12:59:03 2013 -0400 52.2 +++ b/src/cpu/sparc/vm/nativeInst_sparc.hpp Mon Jul 15 11:07:03 2013 +0100 52.3 @@ -70,8 +70,7 @@ 52.4 bool is_zombie() { 52.5 int x = long_at(0); 52.6 return is_op3(x, 52.7 - VM_Version::v9_instructions_work() ? 52.8 - Assembler::ldsw_op3 : Assembler::lduw_op3, 52.9 + Assembler::ldsw_op3, 52.10 Assembler::ldst_op) 52.11 && Assembler::inv_rs1(x) == G0 52.12 && Assembler::inv_rd(x) == O7;
53.1 --- a/src/cpu/sparc/vm/register_sparc.hpp Thu Jul 11 12:59:03 2013 -0400 53.2 +++ b/src/cpu/sparc/vm/register_sparc.hpp Mon Jul 15 11:07:03 2013 +0100 53.3 @@ -249,12 +249,10 @@ 53.4 53.5 case D: 53.6 assert(c < 64 && (c & 1) == 0, "bad double float register"); 53.7 - assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform"); 53.8 return (c & 0x1e) | ((c & 0x20) >> 5); 53.9 53.10 case Q: 53.11 assert(c < 64 && (c & 3) == 0, "bad quad float register"); 53.12 - assert(c < 32 || VM_Version::v9_instructions_work(), "V9 float work only on V9 platform"); 53.13 return (c & 0x1c) | ((c & 0x20) >> 5); 53.14 } 53.15 ShouldNotReachHere();
54.1 --- a/src/cpu/sparc/vm/relocInfo_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 54.2 +++ b/src/cpu/sparc/vm/relocInfo_sparc.cpp Mon Jul 15 11:07:03 2013 +0100 54.3 @@ -193,36 +193,6 @@ 54.4 return *(address*)addr(); 54.5 } 54.6 54.7 - 54.8 -int Relocation::pd_breakpoint_size() { 54.9 - // minimum breakpoint size, in short words 54.10 - return NativeIllegalInstruction::instruction_size / sizeof(short); 54.11 -} 54.12 - 54.13 -void Relocation::pd_swap_in_breakpoint(address x, short* instrs, int instrlen) { 54.14 - Untested("pd_swap_in_breakpoint"); 54.15 - // %%% probably do not need a general instrlen; just use the trap size 54.16 - if (instrs != NULL) { 54.17 - assert(instrlen * sizeof(short) == NativeIllegalInstruction::instruction_size, "enough instrlen in reloc. data"); 54.18 - for (int i = 0; i < instrlen; i++) { 54.19 - instrs[i] = ((short*)x)[i]; 54.20 - } 54.21 - } 54.22 - NativeIllegalInstruction::insert(x); 54.23 -} 54.24 - 54.25 - 54.26 -void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen) { 54.27 - Untested("pd_swap_out_breakpoint"); 54.28 - assert(instrlen * sizeof(short) == sizeof(int), "enough buf"); 54.29 - union { int l; short s[1]; } u; 54.30 - for (int i = 0; i < instrlen; i++) { 54.31 - u.s[i] = instrs[i]; 54.32 - } 54.33 - NativeInstruction* ni = nativeInstruction_at(x); 54.34 - ni->set_long_at(0, u.l); 54.35 -} 54.36 - 54.37 void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { 54.38 } 54.39
55.1 --- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 55.2 +++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp Mon Jul 15 11:07:03 2013 +0100 55.3 @@ -2459,7 +2459,7 @@ 55.4 55.5 // Finally just about ready to make the JNI call 55.6 55.7 - __ flush_windows(); 55.8 + __ flushw(); 55.9 if (inner_frame_created) { 55.10 __ restore(); 55.11 } else {
56.1 --- a/src/cpu/sparc/vm/sparc.ad Thu Jul 11 12:59:03 2013 -0400 56.2 +++ b/src/cpu/sparc/vm/sparc.ad Mon Jul 15 11:07:03 2013 +0100 56.3 @@ -2778,10 +2778,7 @@ 56.4 Register Rold = reg_to_register_object($old$$reg); 56.5 Register Rnew = reg_to_register_object($new$$reg); 56.6 56.7 - // casx_under_lock picks 1 of 3 encodings: 56.8 - // For 32-bit pointers you get a 32-bit CAS 56.9 - // For 64-bit pointers you get a 64-bit CASX 56.10 - __ casn(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold 56.11 + __ cas_ptr(Rmem, Rold, Rnew); // Swap(*Rmem,Rnew) if *Rmem == Rold 56.12 __ cmp( Rold, Rnew ); 56.13 %} 56.14 56.15 @@ -3067,7 +3064,7 @@ 56.16 AddressLiteral last_rethrow_addrlit(&last_rethrow); 56.17 __ sethi(last_rethrow_addrlit, L1); 56.18 Address addr(L1, last_rethrow_addrlit.low10()); 56.19 - __ get_pc(L2); 56.20 + __ rdpc(L2); 56.21 __ inc(L2, 3 * BytesPerInstWord); // skip this & 2 more insns to point at jump_to 56.22 __ st_ptr(L2, addr); 56.23 __ restore();
57.1 --- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 57.2 +++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp Mon Jul 15 11:07:03 2013 +0100 57.3 @@ -566,7 +566,7 @@ 57.4 StubCodeMark mark(this, "StubRoutines", "flush_callers_register_windows"); 57.5 address start = __ pc(); 57.6 57.7 - __ flush_windows(); 57.8 + __ flushw(); 57.9 __ retl(false); 57.10 __ delayed()->add( FP, STACK_BIAS, O0 ); 57.11 // The returned value must be a stack pointer whose register save area 57.12 @@ -575,67 +575,9 @@ 57.13 return start; 57.14 } 57.15 57.16 - // Helper functions for v8 atomic operations. 57.17 - // 57.18 - void get_v8_oop_lock_ptr(Register lock_ptr_reg, Register mark_oop_reg, Register scratch_reg) { 57.19 - if (mark_oop_reg == noreg) { 57.20 - address lock_ptr = (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(); 57.21 - __ set((intptr_t)lock_ptr, lock_ptr_reg); 57.22 - } else { 57.23 - assert(scratch_reg != noreg, "just checking"); 57.24 - address lock_ptr = (address)StubRoutines::Sparc::_v8_oop_lock_cache; 57.25 - __ set((intptr_t)lock_ptr, lock_ptr_reg); 57.26 - __ and3(mark_oop_reg, StubRoutines::Sparc::v8_oop_lock_mask_in_place, scratch_reg); 57.27 - __ add(lock_ptr_reg, scratch_reg, lock_ptr_reg); 57.28 - } 57.29 - } 57.30 - 57.31 - void generate_v8_lock_prologue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) { 57.32 - 57.33 - get_v8_oop_lock_ptr(lock_ptr_reg, mark_oop_reg, scratch_reg); 57.34 - __ set(StubRoutines::Sparc::locked, lock_reg); 57.35 - // Initialize yield counter 57.36 - __ mov(G0,yield_reg); 57.37 - 57.38 - __ BIND(retry); 57.39 - __ cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dontyield); 57.40 - 57.41 - // This code can only be called from inside the VM, this 57.42 - // stub is only invoked from Atomic::add(). We do not 57.43 - // want to use call_VM, because _last_java_sp and such 57.44 - // must already be set. 57.45 - // 57.46 - // Save the regs and make space for a C call 57.47 - __ save(SP, -96, SP); 57.48 - __ save_all_globals_into_locals(); 57.49 - BLOCK_COMMENT("call os::naked_sleep"); 57.50 - __ call(CAST_FROM_FN_PTR(address, os::naked_sleep)); 57.51 - __ delayed()->nop(); 57.52 - __ restore_globals_from_locals(); 57.53 - __ restore(); 57.54 - // reset the counter 57.55 - __ mov(G0,yield_reg); 57.56 - 57.57 - __ BIND(dontyield); 57.58 - 57.59 - // try to get lock 57.60 - __ swap(lock_ptr_reg, 0, lock_reg); 57.61 - 57.62 - // did we get the lock? 57.63 - __ cmp(lock_reg, StubRoutines::Sparc::unlocked); 57.64 - __ br(Assembler::notEqual, true, Assembler::pn, retry); 57.65 - __ delayed()->add(yield_reg,1,yield_reg); 57.66 - 57.67 - // yes, got lock. do the operation here. 57.68 - } 57.69 - 57.70 - void generate_v8_lock_epilogue(Register lock_reg, Register lock_ptr_reg, Register yield_reg, Label& retry, Label& dontyield, Register mark_oop_reg = noreg, Register scratch_reg = noreg) { 57.71 - __ st(lock_reg, lock_ptr_reg, 0); // unlock 57.72 - } 57.73 - 57.74 // Support for jint Atomic::xchg(jint exchange_value, volatile jint* dest). 57.75 // 57.76 - // Arguments : 57.77 + // Arguments: 57.78 // 57.79 // exchange_value: O0 57.80 // dest: O1 57.81 @@ -656,33 +598,14 @@ 57.82 __ mov(O0, O3); // scratch copy of exchange value 57.83 __ ld(O1, 0, O2); // observe the previous value 57.84 // try to replace O2 with O3 57.85 - __ cas_under_lock(O1, O2, O3, 57.86 - (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false); 57.87 + __ cas(O1, O2, O3); 57.88 __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); 57.89 57.90 __ retl(false); 57.91 __ delayed()->mov(O2, O0); // report previous value to caller 57.92 - 57.93 } else { 57.94 - if (VM_Version::v9_instructions_work()) { 57.95 - __ retl(false); 57.96 - __ delayed()->swap(O1, 0, O0); 57.97 - } else { 57.98 - const Register& lock_reg = O2; 57.99 - const Register& lock_ptr_reg = O3; 57.100 - const Register& yield_reg = O4; 57.101 - 57.102 - Label retry; 57.103 - Label dontyield; 57.104 - 57.105 - generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield); 57.106 - // got the lock, do the swap 57.107 - __ swap(O1, 0, O0); 57.108 - 57.109 - generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield); 57.110 - __ retl(false); 57.111 - __ delayed()->nop(); 57.112 - } 57.113 + __ retl(false); 57.114 + __ delayed()->swap(O1, 0, O0); 57.115 } 57.116 57.117 return start; 57.118 @@ -691,7 +614,7 @@ 57.119 57.120 // Support for jint Atomic::cmpxchg(jint exchange_value, volatile jint* dest, jint compare_value) 57.121 // 57.122 - // Arguments : 57.123 + // Arguments: 57.124 // 57.125 // exchange_value: O0 57.126 // dest: O1 57.127 @@ -701,15 +624,12 @@ 57.128 // 57.129 // O0: the value previously stored in dest 57.130 // 57.131 - // Overwrites (v8): O3,O4,O5 57.132 - // 57.133 address generate_atomic_cmpxchg() { 57.134 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg"); 57.135 address start = __ pc(); 57.136 57.137 // cmpxchg(dest, compare_value, exchange_value) 57.138 - __ cas_under_lock(O1, O2, O0, 57.139 - (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr(),false); 57.140 + __ cas(O1, O2, O0); 57.141 __ retl(false); 57.142 __ delayed()->nop(); 57.143 57.144 @@ -718,7 +638,7 @@ 57.145 57.146 // Support for jlong Atomic::cmpxchg(jlong exchange_value, volatile jlong *dest, jlong compare_value) 57.147 // 57.148 - // Arguments : 57.149 + // Arguments: 57.150 // 57.151 // exchange_value: O1:O0 57.152 // dest: O2 57.153 @@ -728,17 +648,12 @@ 57.154 // 57.155 // O1:O0: the value previously stored in dest 57.156 // 57.157 - // This only works on V9, on V8 we don't generate any 57.158 - // code and just return NULL. 57.159 - // 57.160 // Overwrites: G1,G2,G3 57.161 // 57.162 address generate_atomic_cmpxchg_long() { 57.163 StubCodeMark mark(this, "StubRoutines", "atomic_cmpxchg_long"); 57.164 address start = __ pc(); 57.165 57.166 - if (!VM_Version::supports_cx8()) 57.167 - return NULL;; 57.168 __ sllx(O0, 32, O0); 57.169 __ srl(O1, 0, O1); 57.170 __ or3(O0,O1,O0); // O0 holds 64-bit value from compare_value 57.171 @@ -756,7 +671,7 @@ 57.172 57.173 // Support for jint Atomic::add(jint add_value, volatile jint* dest). 57.174 // 57.175 - // Arguments : 57.176 + // Arguments: 57.177 // 57.178 // add_value: O0 (e.g., +1 or -1) 57.179 // dest: O1 57.180 @@ -765,47 +680,22 @@ 57.181 // 57.182 // O0: the new value stored in dest 57.183 // 57.184 - // Overwrites (v9): O3 57.185 - // Overwrites (v8): O3,O4,O5 57.186 + // Overwrites: O3 57.187 // 57.188 address generate_atomic_add() { 57.189 StubCodeMark mark(this, "StubRoutines", "atomic_add"); 57.190 address start = __ pc(); 57.191 __ BIND(_atomic_add_stub); 57.192 57.193 - if (VM_Version::v9_instructions_work()) { 57.194 - Label(retry); 57.195 - __ BIND(retry); 57.196 - 57.197 - __ lduw(O1, 0, O2); 57.198 - __ add(O0, O2, O3); 57.199 - __ cas(O1, O2, O3); 57.200 - __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); 57.201 - __ retl(false); 57.202 - __ delayed()->add(O0, O2, O0); // note that cas made O2==O3 57.203 - } else { 57.204 - const Register& lock_reg = O2; 57.205 - const Register& lock_ptr_reg = O3; 57.206 - const Register& value_reg = O4; 57.207 - const Register& yield_reg = O5; 57.208 - 57.209 - Label(retry); 57.210 - Label(dontyield); 57.211 - 57.212 - generate_v8_lock_prologue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield); 57.213 - // got lock, do the increment 57.214 - __ ld(O1, 0, value_reg); 57.215 - __ add(O0, value_reg, value_reg); 57.216 - __ st(value_reg, O1, 0); 57.217 - 57.218 - // %%% only for RMO and PSO 57.219 - __ membar(Assembler::StoreStore); 57.220 - 57.221 - generate_v8_lock_epilogue(lock_reg, lock_ptr_reg, yield_reg, retry, dontyield); 57.222 - 57.223 - __ retl(false); 57.224 - __ delayed()->mov(value_reg, O0); 57.225 - } 57.226 + Label(retry); 57.227 + __ BIND(retry); 57.228 + 57.229 + __ lduw(O1, 0, O2); 57.230 + __ add(O0, O2, O3); 57.231 + __ cas(O1, O2, O3); 57.232 + __ cmp_and_br_short(O2, O3, Assembler::notEqual, Assembler::pn, retry); 57.233 + __ retl(false); 57.234 + __ delayed()->add(O0, O2, O0); // note that cas made O2==O3 57.235 57.236 return start; 57.237 } 57.238 @@ -841,7 +731,7 @@ 57.239 __ mov(G3, L3); 57.240 __ mov(G4, L4); 57.241 __ mov(G5, L5); 57.242 - for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { 57.243 + for (i = 0; i < 64; i += 2) { 57.244 __ stf(FloatRegisterImpl::D, as_FloatRegister(i), preserve_addr, i * wordSize); 57.245 } 57.246 57.247 @@ -855,7 +745,7 @@ 57.248 __ mov(L3, G3); 57.249 __ mov(L4, G4); 57.250 __ mov(L5, G5); 57.251 - for (i = 0; i < (VM_Version::v9_instructions_work() ? 64 : 32); i += 2) { 57.252 + for (i = 0; i < 64; i += 2) { 57.253 __ ldf(FloatRegisterImpl::D, preserve_addr, as_FloatRegister(i), i * wordSize); 57.254 } 57.255
58.1 --- a/src/cpu/sparc/vm/stubRoutines_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 58.2 +++ b/src/cpu/sparc/vm/stubRoutines_sparc.cpp Mon Jul 15 11:07:03 2013 +0100 58.3 @@ -52,7 +52,3 @@ 58.4 address StubRoutines::Sparc::_flush_callers_register_windows_entry = CAST_FROM_FN_PTR(address, bootstrap_flush_windows); 58.5 58.6 address StubRoutines::Sparc::_partial_subtype_check = NULL; 58.7 - 58.8 -int StubRoutines::Sparc::_atomic_memory_operation_lock = StubRoutines::Sparc::unlocked; 58.9 - 58.10 -int StubRoutines::Sparc::_v8_oop_lock_cache[StubRoutines::Sparc::nof_v8_oop_lock_cache_entries];
59.1 --- a/src/cpu/sparc/vm/stubRoutines_sparc.hpp Thu Jul 11 12:59:03 2013 -0400 59.2 +++ b/src/cpu/sparc/vm/stubRoutines_sparc.hpp Mon Jul 15 11:07:03 2013 +0100 59.3 @@ -47,46 +47,14 @@ 59.4 class Sparc { 59.5 friend class StubGenerator; 59.6 59.7 - public: 59.8 - enum { nof_instance_allocators = 10 }; 59.9 - 59.10 - // allocator lock values 59.11 - enum { 59.12 - unlocked = 0, 59.13 - locked = 1 59.14 - }; 59.15 - 59.16 - enum { 59.17 - v8_oop_lock_ignore_bits = 2, 59.18 - v8_oop_lock_bits = 4, 59.19 - nof_v8_oop_lock_cache_entries = 1 << (v8_oop_lock_bits+v8_oop_lock_ignore_bits), 59.20 - v8_oop_lock_mask = right_n_bits(v8_oop_lock_bits), 59.21 - v8_oop_lock_mask_in_place = v8_oop_lock_mask << v8_oop_lock_ignore_bits 59.22 - }; 59.23 - 59.24 - static int _v8_oop_lock_cache[nof_v8_oop_lock_cache_entries]; 59.25 - 59.26 private: 59.27 static address _test_stop_entry; 59.28 static address _stop_subroutine_entry; 59.29 static address _flush_callers_register_windows_entry; 59.30 59.31 - static int _atomic_memory_operation_lock; 59.32 - 59.33 static address _partial_subtype_check; 59.34 59.35 public: 59.36 - // %%% global lock for everyone who needs to use atomic_compare_and_exchange 59.37 - // %%% or atomic_increment -- should probably use more locks for more 59.38 - // %%% scalability-- for instance one for each eden space or group of 59.39 - 59.40 - // address of the lock for atomic_compare_and_exchange 59.41 - static int* atomic_memory_operation_lock_addr() { return &_atomic_memory_operation_lock; } 59.42 - 59.43 - // accessor and mutator for _atomic_memory_operation_lock 59.44 - static int atomic_memory_operation_lock() { return _atomic_memory_operation_lock; } 59.45 - static void set_atomic_memory_operation_lock(int value) { _atomic_memory_operation_lock = value; } 59.46 - 59.47 // test assembler stop routine by setting registers 59.48 static void (*test_stop_entry()) () { return CAST_TO_FN_PTR(void (*)(void), _test_stop_entry); } 59.49
60.1 --- a/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 60.2 +++ b/src/cpu/sparc/vm/templateInterpreter_sparc.cpp Mon Jul 15 11:07:03 2013 +0100 60.3 @@ -1054,7 +1054,7 @@ 60.4 // flush the windows now. We don't care about the current (protection) frame 60.5 // only the outer frames 60.6 60.7 - __ flush_windows(); 60.8 + __ flushw(); 60.9 60.10 // mark windows as flushed 60.11 Address flags(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
61.1 --- a/src/cpu/sparc/vm/templateTable_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 61.2 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Mon Jul 15 11:07:03 2013 +0100 61.3 @@ -1338,14 +1338,13 @@ 61.4 61.5 void TemplateTable::fneg() { 61.6 transition(ftos, ftos); 61.7 - __ fneg(FloatRegisterImpl::S, Ftos_f); 61.8 + __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f); 61.9 } 61.10 61.11 61.12 void TemplateTable::dneg() { 61.13 transition(dtos, dtos); 61.14 - // v8 has fnegd if source and dest are the same 61.15 - __ fneg(FloatRegisterImpl::D, Ftos_f); 61.16 + __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f); 61.17 } 61.18 61.19 61.20 @@ -1470,19 +1469,10 @@ 61.21 __ st_long(Otos_l, __ d_tmp); 61.22 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d); 61.23 61.24 - if (VM_Version::v9_instructions_work()) { 61.25 - if (bytecode() == Bytecodes::_l2f) { 61.26 - __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); 61.27 - } else { 61.28 - __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); 61.29 - } 61.30 + if (bytecode() == Bytecodes::_l2f) { 61.31 + __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f); 61.32 } else { 61.33 - __ call_VM_leaf( 61.34 - Lscratch, 61.35 - bytecode() == Bytecodes::_l2f 61.36 - ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f) 61.37 - : CAST_FROM_FN_PTR(address, SharedRuntime::l2d) 61.38 - ); 61.39 + __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d); 61.40 } 61.41 break; 61.42 61.43 @@ -1490,11 +1480,6 @@ 61.44 Label isNaN; 61.45 // result must be 0 if value is NaN; test by comparing value to itself 61.46 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f); 61.47 - // According to the v8 manual, you have to have a non-fp instruction 61.48 - // between fcmp and fb. 61.49 - if (!VM_Version::v9_instructions_work()) { 61.50 - __ nop(); 61.51 - } 61.52 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN); 61.53 __ delayed()->clr(Otos_i); // NaN 61.54 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30); 61.55 @@ -1537,16 +1522,7 @@ 61.56 break; 61.57 61.58 case Bytecodes::_d2f: 61.59 - if (VM_Version::v9_instructions_work()) { 61.60 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f); 61.61 - } 61.62 - else { 61.63 - // must uncache tos 61.64 - __ push_d(); 61.65 - __ pop_i(O0); 61.66 - __ pop_i(O1); 61.67 - __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f)); 61.68 - } 61.69 break; 61.70 61.71 default: ShouldNotReachHere(); 61.72 @@ -1956,17 +1932,8 @@ 61.73 __ ld( Rarray, Rscratch, Rscratch ); 61.74 // (Rscratch is already in the native byte-ordering.) 61.75 __ cmp( Rkey, Rscratch ); 61.76 - if ( VM_Version::v9_instructions_work() ) { 61.77 - __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match()) 61.78 - __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match()) 61.79 - } 61.80 - else { 61.81 - Label end_of_if; 61.82 - __ br( Assembler::less, true, Assembler::pt, end_of_if ); 61.83 - __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh 61.84 - __ mov( Rh, Ri ); // else i = h 61.85 - __ bind(end_of_if); // } 61.86 - } 61.87 + __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match()) 61.88 + __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match()) 61.89 61.90 // while (i+1 < j) 61.91 __ bind( entry ); 61.92 @@ -3418,9 +3385,7 @@ 61.93 // has been allocated. 61.94 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case); 61.95 61.96 - __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue, 61.97 - VM_Version::v9_instructions_work() ? NULL : 61.98 - (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); 61.99 + __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue); 61.100 61.101 // if someone beat us on the allocation, try again, otherwise continue 61.102 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry); 61.103 @@ -3701,14 +3666,7 @@ 61.104 61.105 __ verify_oop(O4); // verify each monitor's oop 61.106 __ tst(O4); // is this entry unused? 61.107 - if (VM_Version::v9_instructions_work()) 61.108 - __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); 61.109 - else { 61.110 - Label L; 61.111 - __ br( Assembler::zero, true, Assembler::pn, L ); 61.112 - __ delayed()->mov(O3, O1); // rememeber this one if match 61.113 - __ bind(L); 61.114 - } 61.115 + __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1); 61.116 61.117 __ cmp(O4, O0); // check if current entry is for same object 61.118 __ brx( Assembler::equal, false, Assembler::pn, exit );
62.1 --- a/src/cpu/sparc/vm/vm_version_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 62.2 +++ b/src/cpu/sparc/vm/vm_version_sparc.cpp Mon Jul 15 11:07:03 2013 +0100 62.3 @@ -75,23 +75,14 @@ 62.4 FLAG_SET_DEFAULT(AllocatePrefetchStyle, 1); 62.5 } 62.6 62.7 - if (has_v9()) { 62.8 - assert(ArraycopySrcPrefetchDistance < 4096, "invalid value"); 62.9 - if (ArraycopySrcPrefetchDistance >= 4096) 62.10 - ArraycopySrcPrefetchDistance = 4064; 62.11 - assert(ArraycopyDstPrefetchDistance < 4096, "invalid value"); 62.12 - if (ArraycopyDstPrefetchDistance >= 4096) 62.13 - ArraycopyDstPrefetchDistance = 4064; 62.14 - } else { 62.15 - if (ArraycopySrcPrefetchDistance > 0) { 62.16 - warning("prefetch instructions are not available on this CPU"); 62.17 - FLAG_SET_DEFAULT(ArraycopySrcPrefetchDistance, 0); 62.18 - } 62.19 - if (ArraycopyDstPrefetchDistance > 0) { 62.20 - warning("prefetch instructions are not available on this CPU"); 62.21 - FLAG_SET_DEFAULT(ArraycopyDstPrefetchDistance, 0); 62.22 - } 62.23 - } 62.24 + guarantee(VM_Version::has_v9(), "only SPARC v9 is supported"); 62.25 + 62.26 + assert(ArraycopySrcPrefetchDistance < 4096, "invalid value"); 62.27 + if (ArraycopySrcPrefetchDistance >= 4096) 62.28 + ArraycopySrcPrefetchDistance = 4064; 62.29 + assert(ArraycopyDstPrefetchDistance < 4096, "invalid value"); 62.30 + if (ArraycopyDstPrefetchDistance >= 4096) 62.31 + ArraycopyDstPrefetchDistance = 4064; 62.32 62.33 UseSSE = 0; // Only on x86 and x64 62.34
63.1 --- a/src/cpu/sparc/vm/vm_version_sparc.hpp Thu Jul 11 12:59:03 2013 -0400 63.2 +++ b/src/cpu/sparc/vm/vm_version_sparc.hpp Mon Jul 15 11:07:03 2013 +0100 63.3 @@ -177,10 +177,6 @@ 63.4 return AllocatePrefetchDistance > 0 ? AllocatePrefetchStyle : 0; 63.5 } 63.6 63.7 - // Legacy 63.8 - static bool v8_instructions_work() { return has_v8() && !has_v9(); } 63.9 - static bool v9_instructions_work() { return has_v9(); } 63.10 - 63.11 // Assembler testing 63.12 static void allow_all(); 63.13 static void revert();
64.1 --- a/src/cpu/x86/vm/assembler_x86.cpp Thu Jul 11 12:59:03 2013 -0400 64.2 +++ b/src/cpu/x86/vm/assembler_x86.cpp Mon Jul 15 11:07:03 2013 +0100 64.3 @@ -1,5 +1,5 @@ 64.4 /* 64.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 64.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 64.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 64.8 * 64.9 * This code is free software; you can redistribute it and/or modify it 64.10 @@ -1673,6 +1673,11 @@ 64.11 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66); 64.12 } 64.13 64.14 +void Assembler::movdqa(XMMRegister dst, Address src) { 64.15 + NOT_LP64(assert(VM_Version::supports_sse2(), "")); 64.16 + emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66); 64.17 +} 64.18 + 64.19 void Assembler::movdqu(XMMRegister dst, Address src) { 64.20 NOT_LP64(assert(VM_Version::supports_sse2(), "")); 64.21 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3); 64.22 @@ -2286,6 +2291,38 @@ 64.23 emit_int8(imm8); 64.24 } 64.25 64.26 +void Assembler::pextrd(Register dst, XMMRegister src, int imm8) { 64.27 + assert(VM_Version::supports_sse4_1(), ""); 64.28 + int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, false); 64.29 + emit_int8(0x16); 64.30 + emit_int8((unsigned char)(0xC0 | encode)); 64.31 + emit_int8(imm8); 64.32 +} 64.33 + 64.34 +void Assembler::pextrq(Register dst, XMMRegister src, int imm8) { 64.35 + assert(VM_Version::supports_sse4_1(), ""); 64.36 + int encode = simd_prefix_and_encode(as_XMMRegister(dst->encoding()), xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, true); 64.37 + emit_int8(0x16); 64.38 + emit_int8((unsigned char)(0xC0 | encode)); 64.39 + emit_int8(imm8); 64.40 +} 64.41 + 64.42 +void Assembler::pinsrd(XMMRegister dst, Register src, int imm8) { 64.43 + assert(VM_Version::supports_sse4_1(), ""); 64.44 + int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, false); 64.45 + emit_int8(0x22); 64.46 + emit_int8((unsigned char)(0xC0 | encode)); 64.47 + emit_int8(imm8); 64.48 +} 64.49 + 64.50 +void Assembler::pinsrq(XMMRegister dst, Register src, int imm8) { 64.51 + assert(VM_Version::supports_sse4_1(), ""); 64.52 + int encode = simd_prefix_and_encode(dst, dst, as_XMMRegister(src->encoding()), VEX_SIMD_66, VEX_OPCODE_0F_3A, true); 64.53 + emit_int8(0x22); 64.54 + emit_int8((unsigned char)(0xC0 | encode)); 64.55 + emit_int8(imm8); 64.56 +} 64.57 + 64.58 void Assembler::pmovzxbw(XMMRegister dst, Address src) { 64.59 assert(VM_Version::supports_sse4_1(), ""); 64.60 InstructionMark im(this); 64.61 @@ -3691,6 +3728,16 @@ 64.62 emit_int8((unsigned char)(0xC0 | encode)); 64.63 } 64.64 64.65 +// Carry-Less Multiplication Quadword 64.66 +void Assembler::vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask) { 64.67 + assert(VM_Version::supports_avx() && VM_Version::supports_clmul(), ""); 64.68 + bool vector256 = false; 64.69 + int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A); 64.70 + emit_int8(0x44); 64.71 + emit_int8((unsigned char)(0xC0 | encode)); 64.72 + emit_int8((unsigned char)mask); 64.73 +} 64.74 + 64.75 void Assembler::vzeroupper() { 64.76 assert(VM_Version::supports_avx(), ""); 64.77 (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
65.1 --- a/src/cpu/x86/vm/assembler_x86.hpp Thu Jul 11 12:59:03 2013 -0400 65.2 +++ b/src/cpu/x86/vm/assembler_x86.hpp Mon Jul 15 11:07:03 2013 +0100 65.3 @@ -1266,6 +1266,7 @@ 65.4 65.5 // Move Aligned Double Quadword 65.6 void movdqa(XMMRegister dst, XMMRegister src); 65.7 + void movdqa(XMMRegister dst, Address src); 65.8 65.9 // Move Unaligned Double Quadword 65.10 void movdqu(Address dst, XMMRegister src); 65.11 @@ -1404,6 +1405,14 @@ 65.12 void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8); 65.13 void pcmpestri(XMMRegister xmm1, Address src, int imm8); 65.14 65.15 + // SSE 4.1 extract 65.16 + void pextrd(Register dst, XMMRegister src, int imm8); 65.17 + void pextrq(Register dst, XMMRegister src, int imm8); 65.18 + 65.19 + // SSE 4.1 insert 65.20 + void pinsrd(XMMRegister dst, Register src, int imm8); 65.21 + void pinsrq(XMMRegister dst, Register src, int imm8); 65.22 + 65.23 // SSE4.1 packed move 65.24 void pmovzxbw(XMMRegister dst, XMMRegister src); 65.25 void pmovzxbw(XMMRegister dst, Address src); 65.26 @@ -1764,6 +1773,9 @@ 65.27 // duplicate 4-bytes integer data from src into 8 locations in dest 65.28 void vpbroadcastd(XMMRegister dst, XMMRegister src); 65.29 65.30 + // Carry-Less Multiplication Quadword 65.31 + void vpclmulqdq(XMMRegister dst, XMMRegister nds, XMMRegister src, int mask); 65.32 + 65.33 // AVX instruction which is used to clear upper 128 bits of YMM registers and 65.34 // to avoid transaction penalty between AVX and SSE states. There is no 65.35 // penalty if legacy SSE instructions are encoded using VEX prefix because
66.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Thu Jul 11 12:59:03 2013 -0400 66.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Mon Jul 15 11:07:03 2013 +0100 66.3 @@ -1,5 +1,5 @@ 66.4 /* 66.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 66.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 66.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 66.8 * 66.9 * This code is free software; you can redistribute it and/or modify it 66.10 @@ -3512,6 +3512,22 @@ 66.11 __ bind(*stub->continuation()); 66.12 } 66.13 66.14 +void LIR_Assembler::emit_updatecrc32(LIR_OpUpdateCRC32* op) { 66.15 + assert(op->crc()->is_single_cpu(), "crc must be register"); 66.16 + assert(op->val()->is_single_cpu(), "byte value must be register"); 66.17 + assert(op->result_opr()->is_single_cpu(), "result must be register"); 66.18 + Register crc = op->crc()->as_register(); 66.19 + Register val = op->val()->as_register(); 66.20 + Register res = op->result_opr()->as_register(); 66.21 + 66.22 + assert_different_registers(val, crc, res); 66.23 + 66.24 + __ lea(res, ExternalAddress(StubRoutines::crc_table_addr())); 66.25 + __ notl(crc); // ~crc 66.26 + __ update_byte_crc32(crc, val, res); 66.27 + __ notl(crc); // ~crc 66.28 + __ mov(res, crc); 66.29 +} 66.30 66.31 void LIR_Assembler::emit_lock(LIR_OpLock* op) { 66.32 Register obj = op->obj_opr()->as_register(); // may not be an oop
67.1 --- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Thu Jul 11 12:59:03 2013 -0400 67.2 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Mon Jul 15 11:07:03 2013 +0100 67.3 @@ -1,5 +1,5 @@ 67.4 /* 67.5 - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. 67.6 + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. 67.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 67.8 * 67.9 * This code is free software; you can redistribute it and/or modify it 67.10 @@ -932,6 +932,81 @@ 67.11 __ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint 67.12 } 67.13 67.14 +void LIRGenerator::do_update_CRC32(Intrinsic* x) { 67.15 + assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support"); 67.16 + // Make all state_for calls early since they can emit code 67.17 + LIR_Opr result = rlock_result(x); 67.18 + int flags = 0; 67.19 + switch (x->id()) { 67.20 + case vmIntrinsics::_updateCRC32: { 67.21 + LIRItem crc(x->argument_at(0), this); 67.22 + LIRItem val(x->argument_at(1), this); 67.23 + crc.load_item(); 67.24 + val.load_item(); 67.25 + __ update_crc32(crc.result(), val.result(), result); 67.26 + break; 67.27 + } 67.28 + case vmIntrinsics::_updateBytesCRC32: 67.29 + case vmIntrinsics::_updateByteBufferCRC32: { 67.30 + bool is_updateBytes = (x->id() == vmIntrinsics::_updateBytesCRC32); 67.31 + 67.32 + LIRItem crc(x->argument_at(0), this); 67.33 + LIRItem buf(x->argument_at(1), this); 67.34 + LIRItem off(x->argument_at(2), this); 67.35 + LIRItem len(x->argument_at(3), this); 67.36 + buf.load_item(); 67.37 + off.load_nonconstant(); 67.38 + 67.39 + LIR_Opr index = off.result(); 67.40 + int offset = is_updateBytes ? arrayOopDesc::base_offset_in_bytes(T_BYTE) : 0; 67.41 + if(off.result()->is_constant()) { 67.42 + index = LIR_OprFact::illegalOpr; 67.43 + offset += off.result()->as_jint(); 67.44 + } 67.45 + LIR_Opr base_op = buf.result(); 67.46 + 67.47 +#ifndef _LP64 67.48 + if (!is_updateBytes) { // long b raw address 67.49 + base_op = new_register(T_INT); 67.50 + __ convert(Bytecodes::_l2i, buf.result(), base_op); 67.51 + } 67.52 +#else 67.53 + if (index->is_valid()) { 67.54 + LIR_Opr tmp = new_register(T_LONG); 67.55 + __ convert(Bytecodes::_i2l, index, tmp); 67.56 + index = tmp; 67.57 + } 67.58 +#endif 67.59 + 67.60 + LIR_Address* a = new LIR_Address(base_op, 67.61 + index, 67.62 + LIR_Address::times_1, 67.63 + offset, 67.64 + T_BYTE); 67.65 + BasicTypeList signature(3); 67.66 + signature.append(T_INT); 67.67 + signature.append(T_ADDRESS); 67.68 + signature.append(T_INT); 67.69 + CallingConvention* cc = frame_map()->c_calling_convention(&signature); 67.70 + const LIR_Opr result_reg = result_register_for(x->type()); 67.71 + 67.72 + LIR_Opr addr = new_pointer_register(); 67.73 + __ leal(LIR_OprFact::address(a), addr); 67.74 + 67.75 + crc.load_item_force(cc->at(0)); 67.76 + __ move(addr, cc->at(1)); 67.77 + len.load_item_force(cc->at(2)); 67.78 + 67.79 + __ call_runtime_leaf(StubRoutines::updateBytesCRC32(), getThreadTemp(), result_reg, cc->args()); 67.80 + __ move(result_reg, result); 67.81 + 67.82 + break; 67.83 + } 67.84 + default: { 67.85 + ShouldNotReachHere(); 67.86 + } 67.87 + } 67.88 +} 67.89 67.90 // _i2l, _i2f, _i2d, _l2i, _l2f, _l2d, _f2i, _f2l, _f2d, _d2i, _d2l, _d2f 67.91 // _i2b, _i2c, _i2s
68.1 --- a/src/cpu/x86/vm/c1_globals_x86.hpp Thu Jul 11 12:59:03 2013 -0400 68.2 +++ b/src/cpu/x86/vm/c1_globals_x86.hpp Mon Jul 15 11:07:03 2013 +0100 68.3 @@ -50,8 +50,9 @@ 68.4 define_pd_global(intx, ReservedCodeCacheSize, 32*M ); 68.5 define_pd_global(bool, ProfileInterpreter, false); 68.6 define_pd_global(intx, CodeCacheExpansionSize, 32*K ); 68.7 -define_pd_global(uintx,CodeCacheMinBlockLength, 1); 68.8 -define_pd_global(uintx,MetaspaceSize, 12*M ); 68.9 +define_pd_global(uintx, CodeCacheMinBlockLength, 1); 68.10 +define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); 68.11 +define_pd_global(uintx, MetaspaceSize, 12*M ); 68.12 define_pd_global(bool, NeverActAsServerClassMachine, true ); 68.13 define_pd_global(uint64_t,MaxRAM, 1ULL*G); 68.14 define_pd_global(bool, CICompileOSR, true );
69.1 --- a/src/cpu/x86/vm/c2_globals_x86.hpp Thu Jul 11 12:59:03 2013 -0400 69.2 +++ b/src/cpu/x86/vm/c2_globals_x86.hpp Mon Jul 15 11:07:03 2013 +0100 69.3 @@ -85,7 +85,8 @@ 69.4 define_pd_global(bool, OptoBundling, false); 69.5 69.6 define_pd_global(intx, ReservedCodeCacheSize, 48*M); 69.7 -define_pd_global(uintx,CodeCacheMinBlockLength, 4); 69.8 +define_pd_global(uintx, CodeCacheMinBlockLength, 4); 69.9 +define_pd_global(uintx, CodeCacheMinimumUseSpace, 400*K); 69.10 69.11 // Heap related flags 69.12 define_pd_global(uintx,MetaspaceSize, ScaleForWordSize(16*M));
70.1 --- a/src/cpu/x86/vm/frame_x86.cpp Thu Jul 11 12:59:03 2013 -0400 70.2 +++ b/src/cpu/x86/vm/frame_x86.cpp Mon Jul 15 11:07:03 2013 +0100 70.3 @@ -587,7 +587,7 @@ 70.4 70.5 // validate ConstantPoolCache* 70.6 ConstantPoolCache* cp = *interpreter_frame_cache_addr(); 70.7 - if (cp == NULL || !cp->is_metadata()) return false; 70.8 + if (cp == NULL || !cp->is_metaspace_object()) return false; 70.9 70.10 // validate locals 70.11
71.1 --- a/src/cpu/x86/vm/globals_x86.hpp Thu Jul 11 12:59:03 2013 -0400 71.2 +++ b/src/cpu/x86/vm/globals_x86.hpp Mon Jul 15 11:07:03 2013 +0100 71.3 @@ -1,5 +1,5 @@ 71.4 /* 71.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 71.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 71.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 71.8 * 71.9 * This code is free software; you can redistribute it and/or modify it 71.10 @@ -55,7 +55,7 @@ 71.11 define_pd_global(intx, InlineFrequencyCount, 100); 71.12 define_pd_global(intx, InlineSmallCode, 1000); 71.13 71.14 -define_pd_global(intx, StackYellowPages, 2); 71.15 +define_pd_global(intx, StackYellowPages, NOT_WINDOWS(2) WINDOWS_ONLY(3)); 71.16 define_pd_global(intx, StackRedPages, 1); 71.17 #ifdef AMD64 71.18 // Very large C++ stack frames using solaris-amd64 optimized builds 71.19 @@ -96,6 +96,9 @@ 71.20 product(intx, UseAVX, 99, \ 71.21 "Highest supported AVX instructions set on x86/x64") \ 71.22 \ 71.23 + product(bool, UseCLMUL, false, \ 71.24 + "Control whether CLMUL instructions can be used on x86/x64") \ 71.25 + \ 71.26 diagnostic(bool, UseIncDec, true, \ 71.27 "Use INC, DEC instructions on x86") \ 71.28 \
72.1 --- a/src/cpu/x86/vm/interpreterGenerator_x86.hpp Thu Jul 11 12:59:03 2013 -0400 72.2 +++ b/src/cpu/x86/vm/interpreterGenerator_x86.hpp Mon Jul 15 11:07:03 2013 +0100 72.3 @@ -1,5 +1,5 @@ 72.4 /* 72.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 72.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 72.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 72.8 * 72.9 * This code is free software; you can redistribute it and/or modify it 72.10 @@ -39,6 +39,8 @@ 72.11 address generate_empty_entry(void); 72.12 address generate_accessor_entry(void); 72.13 address generate_Reference_get_entry(); 72.14 + address generate_CRC32_update_entry(); 72.15 + address generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind); 72.16 void lock_method(void); 72.17 void generate_stack_overflow_check(void); 72.18
73.1 --- a/src/cpu/x86/vm/macroAssembler_x86.cpp Thu Jul 11 12:59:03 2013 -0400 73.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.cpp Mon Jul 15 11:07:03 2013 +0100 73.3 @@ -1,5 +1,5 @@ 73.4 /* 73.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 73.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 73.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 73.8 * 73.9 * This code is free software; you can redistribute it and/or modify it 73.10 @@ -2794,6 +2794,15 @@ 73.11 } 73.12 } 73.13 73.14 +void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src) { 73.15 + if (reachable(src)) { 73.16 + Assembler::movdqa(dst, as_Address(src)); 73.17 + } else { 73.18 + lea(rscratch1, src); 73.19 + Assembler::movdqa(dst, Address(rscratch1, 0)); 73.20 + } 73.21 +} 73.22 + 73.23 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) { 73.24 if (reachable(src)) { 73.25 Assembler::movsd(dst, as_Address(src)); 73.26 @@ -6388,6 +6397,193 @@ 73.27 bind(L_done); 73.28 } 73.29 73.30 +/** 73.31 + * Emits code to update CRC-32 with a byte value according to constants in table 73.32 + * 73.33 + * @param [in,out]crc Register containing the crc. 73.34 + * @param [in]val Register containing the byte to fold into the CRC. 73.35 + * @param [in]table Register containing the table of crc constants. 73.36 + * 73.37 + * uint32_t crc; 73.38 + * val = crc_table[(val ^ crc) & 0xFF]; 73.39 + * crc = val ^ (crc >> 8); 73.40 + * 73.41 + */ 73.42 +void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) { 73.43 + xorl(val, crc); 73.44 + andl(val, 0xFF); 73.45 + shrl(crc, 8); // unsigned shift 73.46 + xorl(crc, Address(table, val, Address::times_4, 0)); 73.47 +} 73.48 + 73.49 +/** 73.50 + * Fold 128-bit data chunk 73.51 + */ 73.52 +void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) { 73.53 + vpclmulhdq(xtmp, xK, xcrc); // [123:64] 73.54 + vpclmulldq(xcrc, xK, xcrc); // [63:0] 73.55 + vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */); 73.56 + pxor(xcrc, xtmp); 73.57 +} 73.58 + 73.59 +void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) { 73.60 + vpclmulhdq(xtmp, xK, xcrc); 73.61 + vpclmulldq(xcrc, xK, xcrc); 73.62 + pxor(xcrc, xbuf); 73.63 + pxor(xcrc, xtmp); 73.64 +} 73.65 + 73.66 +/** 73.67 + * 8-bit folds to compute 32-bit CRC 73.68 + * 73.69 + * uint64_t xcrc; 73.70 + * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8); 73.71 + */ 73.72 +void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) { 73.73 + movdl(tmp, xcrc); 73.74 + andl(tmp, 0xFF); 73.75 + movdl(xtmp, Address(table, tmp, Address::times_4, 0)); 73.76 + psrldq(xcrc, 1); // unsigned shift one byte 73.77 + pxor(xcrc, xtmp); 73.78 +} 73.79 + 73.80 +/** 73.81 + * uint32_t crc; 73.82 + * timesXtoThe32[crc & 0xFF] ^ (crc >> 8); 73.83 + */ 73.84 +void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) { 73.85 + movl(tmp, crc); 73.86 + andl(tmp, 0xFF); 73.87 + shrl(crc, 8); 73.88 + xorl(crc, Address(table, tmp, Address::times_4, 0)); 73.89 +} 73.90 + 73.91 +/** 73.92 + * @param crc register containing existing CRC (32-bit) 73.93 + * @param buf register pointing to input byte buffer (byte*) 73.94 + * @param len register containing number of bytes 73.95 + * @param table register that will contain address of CRC table 73.96 + * @param tmp scratch register 73.97 + */ 73.98 +void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) { 73.99 + assert_different_registers(crc, buf, len, table, tmp, rax); 73.100 + 73.101 + Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned; 73.102 + Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop; 73.103 + 73.104 + lea(table, ExternalAddress(StubRoutines::crc_table_addr())); 73.105 + notl(crc); // ~crc 73.106 + cmpl(len, 16); 73.107 + jcc(Assembler::less, L_tail); 73.108 + 73.109 + // Align buffer to 16 bytes 73.110 + movl(tmp, buf); 73.111 + andl(tmp, 0xF); 73.112 + jccb(Assembler::zero, L_aligned); 73.113 + subl(tmp, 16); 73.114 + addl(len, tmp); 73.115 + 73.116 + align(4); 73.117 + BIND(L_align_loop); 73.118 + movsbl(rax, Address(buf, 0)); // load byte with sign extension 73.119 + update_byte_crc32(crc, rax, table); 73.120 + increment(buf); 73.121 + incrementl(tmp); 73.122 + jccb(Assembler::less, L_align_loop); 73.123 + 73.124 + BIND(L_aligned); 73.125 + movl(tmp, len); // save 73.126 + shrl(len, 4); 73.127 + jcc(Assembler::zero, L_tail_restore); 73.128 + 73.129 + // Fold crc into first bytes of vector 73.130 + movdqa(xmm1, Address(buf, 0)); 73.131 + movdl(rax, xmm1); 73.132 + xorl(crc, rax); 73.133 + pinsrd(xmm1, crc, 0); 73.134 + addptr(buf, 16); 73.135 + subl(len, 4); // len > 0 73.136 + jcc(Assembler::less, L_fold_tail); 73.137 + 73.138 + movdqa(xmm2, Address(buf, 0)); 73.139 + movdqa(xmm3, Address(buf, 16)); 73.140 + movdqa(xmm4, Address(buf, 32)); 73.141 + addptr(buf, 48); 73.142 + subl(len, 3); 73.143 + jcc(Assembler::lessEqual, L_fold_512b); 73.144 + 73.145 + // Fold total 512 bits of polynomial on each iteration, 73.146 + // 128 bits per each of 4 parallel streams. 73.147 + movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32)); 73.148 + 73.149 + align(32); 73.150 + BIND(L_fold_512b_loop); 73.151 + fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 73.152 + fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16); 73.153 + fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32); 73.154 + fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48); 73.155 + addptr(buf, 64); 73.156 + subl(len, 4); 73.157 + jcc(Assembler::greater, L_fold_512b_loop); 73.158 + 73.159 + // Fold 512 bits to 128 bits. 73.160 + BIND(L_fold_512b); 73.161 + movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16)); 73.162 + fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2); 73.163 + fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3); 73.164 + fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4); 73.165 + 73.166 + // Fold the rest of 128 bits data chunks 73.167 + BIND(L_fold_tail); 73.168 + addl(len, 3); 73.169 + jccb(Assembler::lessEqual, L_fold_128b); 73.170 + movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16)); 73.171 + 73.172 + BIND(L_fold_tail_loop); 73.173 + fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0); 73.174 + addptr(buf, 16); 73.175 + decrementl(len); 73.176 + jccb(Assembler::greater, L_fold_tail_loop); 73.177 + 73.178 + // Fold 128 bits in xmm1 down into 32 bits in crc register. 73.179 + BIND(L_fold_128b); 73.180 + movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr())); 73.181 + vpclmulqdq(xmm2, xmm0, xmm1, 0x1); 73.182 + vpand(xmm3, xmm0, xmm2, false /* vector256 */); 73.183 + vpclmulqdq(xmm0, xmm0, xmm3, 0x1); 73.184 + psrldq(xmm1, 8); 73.185 + psrldq(xmm2, 4); 73.186 + pxor(xmm0, xmm1); 73.187 + pxor(xmm0, xmm2); 73.188 + 73.189 + // 8 8-bit folds to compute 32-bit CRC. 73.190 + for (int j = 0; j < 4; j++) { 73.191 + fold_8bit_crc32(xmm0, table, xmm1, rax); 73.192 + } 73.193 + movdl(crc, xmm0); // mov 32 bits to general register 73.194 + for (int j = 0; j < 4; j++) { 73.195 + fold_8bit_crc32(crc, table, rax); 73.196 + } 73.197 + 73.198 + BIND(L_tail_restore); 73.199 + movl(len, tmp); // restore 73.200 + BIND(L_tail); 73.201 + andl(len, 0xf); 73.202 + jccb(Assembler::zero, L_exit); 73.203 + 73.204 + // Fold the rest of bytes 73.205 + align(4); 73.206 + BIND(L_tail_loop); 73.207 + movsbl(rax, Address(buf, 0)); // load byte with sign extension 73.208 + update_byte_crc32(crc, rax, table); 73.209 + increment(buf); 73.210 + decrementl(len); 73.211 + jccb(Assembler::greater, L_tail_loop); 73.212 + 73.213 + BIND(L_exit); 73.214 + notl(crc); // ~c 73.215 +} 73.216 + 73.217 #undef BIND 73.218 #undef BLOCK_COMMENT 73.219
74.1 --- a/src/cpu/x86/vm/macroAssembler_x86.hpp Thu Jul 11 12:59:03 2013 -0400 74.2 +++ b/src/cpu/x86/vm/macroAssembler_x86.hpp Mon Jul 15 11:07:03 2013 +0100 74.3 @@ -1,5 +1,5 @@ 74.4 /* 74.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 74.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 74.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 74.8 * 74.9 * This code is free software; you can redistribute it and/or modify it 74.10 @@ -899,6 +899,11 @@ 74.11 void movdqu(XMMRegister dst, XMMRegister src) { Assembler::movdqu(dst, src); } 74.12 void movdqu(XMMRegister dst, AddressLiteral src); 74.13 74.14 + // Move Aligned Double Quadword 74.15 + void movdqa(XMMRegister dst, Address src) { Assembler::movdqa(dst, src); } 74.16 + void movdqa(XMMRegister dst, XMMRegister src) { Assembler::movdqa(dst, src); } 74.17 + void movdqa(XMMRegister dst, AddressLiteral src); 74.18 + 74.19 void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); } 74.20 void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); } 74.21 void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); } 74.22 @@ -1027,6 +1032,16 @@ 74.23 Assembler::vinsertf128h(dst, nds, src); 74.24 } 74.25 74.26 + // Carry-Less Multiplication Quadword 74.27 + void vpclmulldq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 74.28 + // 0x00 - multiply lower 64 bits [0:63] 74.29 + Assembler::vpclmulqdq(dst, nds, src, 0x00); 74.30 + } 74.31 + void vpclmulhdq(XMMRegister dst, XMMRegister nds, XMMRegister src) { 74.32 + // 0x11 - multiply upper 64 bits [64:127] 74.33 + Assembler::vpclmulqdq(dst, nds, src, 0x11); 74.34 + } 74.35 + 74.36 // Data 74.37 74.38 void cmov32( Condition cc, Register dst, Address src); 74.39 @@ -1143,6 +1158,16 @@ 74.40 XMMRegister tmp1, XMMRegister tmp2, XMMRegister tmp3, 74.41 XMMRegister tmp4, Register tmp5, Register result); 74.42 74.43 + // CRC32 code for java.util.zip.CRC32::updateBytes() instrinsic. 74.44 + void update_byte_crc32(Register crc, Register val, Register table); 74.45 + void kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp); 74.46 + // Fold 128-bit data chunk 74.47 + void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset); 74.48 + void fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf); 74.49 + // Fold 8-bit data 74.50 + void fold_8bit_crc32(Register crc, Register table, Register tmp); 74.51 + void fold_8bit_crc32(XMMRegister crc, Register table, XMMRegister xtmp, Register tmp); 74.52 + 74.53 #undef VIRTUAL 74.54 74.55 };
75.1 --- a/src/cpu/x86/vm/relocInfo_x86.cpp Thu Jul 11 12:59:03 2013 -0400 75.2 +++ b/src/cpu/x86/vm/relocInfo_x86.cpp Mon Jul 15 11:07:03 2013 +0100 75.3 @@ -177,30 +177,6 @@ 75.4 return *pd_address_in_code(); 75.5 } 75.6 75.7 -int Relocation::pd_breakpoint_size() { 75.8 - // minimum breakpoint size, in short words 75.9 - return NativeIllegalInstruction::instruction_size / sizeof(short); 75.10 -} 75.11 - 75.12 -void Relocation::pd_swap_in_breakpoint(address x, short* instrs, int instrlen) { 75.13 - Untested("pd_swap_in_breakpoint"); 75.14 - if (instrs != NULL) { 75.15 - assert(instrlen * sizeof(short) == NativeIllegalInstruction::instruction_size, "enough instrlen in reloc. data"); 75.16 - for (int i = 0; i < instrlen; i++) { 75.17 - instrs[i] = ((short*)x)[i]; 75.18 - } 75.19 - } 75.20 - NativeIllegalInstruction::insert(x); 75.21 -} 75.22 - 75.23 - 75.24 -void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen) { 75.25 - Untested("pd_swap_out_breakpoint"); 75.26 - assert(NativeIllegalInstruction::instruction_size == sizeof(short), "right address unit for update"); 75.27 - NativeInstruction* ni = nativeInstruction_at(x); 75.28 - *(short*)ni->addr_at(0) = instrs[0]; 75.29 -} 75.30 - 75.31 void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { 75.32 #ifdef _LP64 75.33 if (!Assembler::is_polling_page_far()) {
76.1 --- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Thu Jul 11 12:59:03 2013 -0400 76.2 +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Mon Jul 15 11:07:03 2013 +0100 76.3 @@ -1429,6 +1429,8 @@ 76.4 assert(!length_arg.first()->is_Register() || length_arg.first()->as_Register() != tmp_reg, 76.5 "possible collision"); 76.6 76.7 + __ block_comment("unpack_array_argument {"); 76.8 + 76.9 // Pass the length, ptr pair 76.10 Label is_null, done; 76.11 VMRegPair tmp; 76.12 @@ -1453,6 +1455,8 @@ 76.13 move_ptr(masm, tmp, body_arg); 76.14 move32_64(masm, tmp, length_arg); 76.15 __ bind(done); 76.16 + 76.17 + __ block_comment("} unpack_array_argument"); 76.18 } 76.19 76.20 76.21 @@ -2170,27 +2174,34 @@ 76.22 } 76.23 } 76.24 76.25 - // point c_arg at the first arg that is already loaded in case we 76.26 - // need to spill before we call out 76.27 - int c_arg = total_c_args - total_in_args; 76.28 + int c_arg; 76.29 76.30 // Pre-load a static method's oop into r14. Used both by locking code and 76.31 // the normal JNI call code. 76.32 - if (method->is_static() && !is_critical_native) { 76.33 - 76.34 - // load oop into a register 76.35 - __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror())); 76.36 - 76.37 - // Now handlize the static class mirror it's known not-null. 76.38 - __ movptr(Address(rsp, klass_offset), oop_handle_reg); 76.39 - map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 76.40 - 76.41 - // Now get the handle 76.42 - __ lea(oop_handle_reg, Address(rsp, klass_offset)); 76.43 - // store the klass handle as second argument 76.44 - __ movptr(c_rarg1, oop_handle_reg); 76.45 - // and protect the arg if we must spill 76.46 - c_arg--; 76.47 + if (!is_critical_native) { 76.48 + // point c_arg at the first arg that is already loaded in case we 76.49 + // need to spill before we call out 76.50 + c_arg = total_c_args - total_in_args; 76.51 + 76.52 + if (method->is_static()) { 76.53 + 76.54 + // load oop into a register 76.55 + __ movoop(oop_handle_reg, JNIHandles::make_local(method->method_holder()->java_mirror())); 76.56 + 76.57 + // Now handlize the static class mirror it's known not-null. 76.58 + __ movptr(Address(rsp, klass_offset), oop_handle_reg); 76.59 + map->set_oop(VMRegImpl::stack2reg(klass_slot_offset)); 76.60 + 76.61 + // Now get the handle 76.62 + __ lea(oop_handle_reg, Address(rsp, klass_offset)); 76.63 + // store the klass handle as second argument 76.64 + __ movptr(c_rarg1, oop_handle_reg); 76.65 + // and protect the arg if we must spill 76.66 + c_arg--; 76.67 + } 76.68 + } else { 76.69 + // For JNI critical methods we need to save all registers in save_args. 76.70 + c_arg = 0; 76.71 } 76.72 76.73 // Change state to native (we save the return address in the thread, since it might not
77.1 --- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp Thu Jul 11 12:59:03 2013 -0400 77.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp Mon Jul 15 11:07:03 2013 +0100 77.3 @@ -1,5 +1,5 @@ 77.4 /* 77.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. 77.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. 77.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 77.8 * 77.9 * This code is free software; you can redistribute it and/or modify it 77.10 @@ -83,7 +83,7 @@ 77.11 private: 77.12 77.13 #ifdef PRODUCT 77.14 -#define inc_counter_np(counter) (0) 77.15 +#define inc_counter_np(counter) ((void)0) 77.16 #else 77.17 void inc_counter_np_(int& counter) { 77.18 __ incrementl(ExternalAddress((address)&counter)); 77.19 @@ -2713,6 +2713,59 @@ 77.20 return start; 77.21 } 77.22 77.23 + /** 77.24 + * Arguments: 77.25 + * 77.26 + * Inputs: 77.27 + * rsp(4) - int crc 77.28 + * rsp(8) - byte* buf 77.29 + * rsp(12) - int length 77.30 + * 77.31 + * Ouput: 77.32 + * rax - int crc result 77.33 + */ 77.34 + address generate_updateBytesCRC32() { 77.35 + assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 77.36 + 77.37 + __ align(CodeEntryAlignment); 77.38 + StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 77.39 + 77.40 + address start = __ pc(); 77.41 + 77.42 + const Register crc = rdx; // crc 77.43 + const Register buf = rsi; // source java byte array address 77.44 + const Register len = rcx; // length 77.45 + const Register table = rdi; // crc_table address (reuse register) 77.46 + const Register tmp = rbx; 77.47 + assert_different_registers(crc, buf, len, table, tmp, rax); 77.48 + 77.49 + BLOCK_COMMENT("Entry:"); 77.50 + __ enter(); // required for proper stackwalking of RuntimeStub frame 77.51 + __ push(rsi); 77.52 + __ push(rdi); 77.53 + __ push(rbx); 77.54 + 77.55 + Address crc_arg(rbp, 8 + 0); 77.56 + Address buf_arg(rbp, 8 + 4); 77.57 + Address len_arg(rbp, 8 + 8); 77.58 + 77.59 + // Load up: 77.60 + __ movl(crc, crc_arg); 77.61 + __ movptr(buf, buf_arg); 77.62 + __ movl(len, len_arg); 77.63 + 77.64 + __ kernel_crc32(crc, buf, len, table, tmp); 77.65 + 77.66 + __ movl(rax, crc); 77.67 + __ pop(rbx); 77.68 + __ pop(rdi); 77.69 + __ pop(rsi); 77.70 + __ leave(); // required for proper stackwalking of RuntimeStub frame 77.71 + __ ret(0); 77.72 + 77.73 + return start; 77.74 + } 77.75 + 77.76 77.77 public: 77.78 // Information about frame layout at time of blocking runtime call. 77.79 @@ -2887,6 +2940,12 @@ 77.80 77.81 // Build this early so it's available for the interpreter 77.82 StubRoutines::_throw_StackOverflowError_entry = generate_throw_exception("StackOverflowError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_StackOverflowError)); 77.83 + 77.84 + if (UseCRC32Intrinsics) { 77.85 + // set table address before stub generation which use it 77.86 + StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 77.87 + StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 77.88 + } 77.89 } 77.90 77.91
78.1 --- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp Thu Jul 11 12:59:03 2013 -0400 78.2 +++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp Mon Jul 15 11:07:03 2013 +0100 78.3 @@ -1,5 +1,5 @@ 78.4 /* 78.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. 78.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. 78.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 78.8 * 78.9 * This code is free software; you can redistribute it and/or modify it 78.10 @@ -81,7 +81,7 @@ 78.11 private: 78.12 78.13 #ifdef PRODUCT 78.14 -#define inc_counter_np(counter) (0) 78.15 +#define inc_counter_np(counter) ((void)0) 78.16 #else 78.17 void inc_counter_np_(int& counter) { 78.18 // This can destroy rscratch1 if counter is far from the code cache 78.19 @@ -3584,7 +3584,45 @@ 78.20 return start; 78.21 } 78.22 78.23 - 78.24 + /** 78.25 + * Arguments: 78.26 + * 78.27 + * Inputs: 78.28 + * c_rarg0 - int crc 78.29 + * c_rarg1 - byte* buf 78.30 + * c_rarg2 - int length 78.31 + * 78.32 + * Ouput: 78.33 + * rax - int crc result 78.34 + */ 78.35 + address generate_updateBytesCRC32() { 78.36 + assert(UseCRC32Intrinsics, "need AVX and CLMUL instructions"); 78.37 + 78.38 + __ align(CodeEntryAlignment); 78.39 + StubCodeMark mark(this, "StubRoutines", "updateBytesCRC32"); 78.40 + 78.41 + address start = __ pc(); 78.42 + // Win64: rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...) 78.43 + // Unix: rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...) 78.44 + // rscratch1: r10 78.45 + const Register crc = c_rarg0; // crc 78.46 + const Register buf = c_rarg1; // source java byte array address 78.47 + const Register len = c_rarg2; // length 78.48 + const Register table = c_rarg3; // crc_table address (reuse register) 78.49 + const Register tmp = r11; 78.50 + assert_different_registers(crc, buf, len, table, tmp, rax); 78.51 + 78.52 + BLOCK_COMMENT("Entry:"); 78.53 + __ enter(); // required for proper stackwalking of RuntimeStub frame 78.54 + 78.55 + __ kernel_crc32(crc, buf, len, table, tmp); 78.56 + 78.57 + __ movl(rax, crc); 78.58 + __ leave(); // required for proper stackwalking of RuntimeStub frame 78.59 + __ ret(0); 78.60 + 78.61 + return start; 78.62 + } 78.63 78.64 #undef __ 78.65 #define __ masm-> 78.66 @@ -3736,6 +3774,11 @@ 78.67 CAST_FROM_FN_PTR(address, 78.68 SharedRuntime:: 78.69 throw_StackOverflowError)); 78.70 + if (UseCRC32Intrinsics) { 78.71 + // set table address before stub generation which use it 78.72 + StubRoutines::_crc_table_adr = (address)StubRoutines::x86::_crc_table; 78.73 + StubRoutines::_updateBytesCRC32 = generate_updateBytesCRC32(); 78.74 + } 78.75 } 78.76 78.77 void generate_all() {
79.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 79.2 +++ b/src/cpu/x86/vm/stubRoutines_x86.cpp Mon Jul 15 11:07:03 2013 +0100 79.3 @@ -0,0 +1,130 @@ 79.4 +/* 79.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 79.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 79.7 + * 79.8 + * This code is free software; you can redistribute it and/or modify it 79.9 + * under the terms of the GNU General Public License version 2 only, as 79.10 + * published by the Free Software Foundation. 79.11 + * 79.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 79.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 79.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 79.15 + * version 2 for more details (a copy is included in the LICENSE file that 79.16 + * accompanied this code). 79.17 + * 79.18 + * You should have received a copy of the GNU General Public License version 79.19 + * 2 along with this work; if not, write to the Free Software Foundation, 79.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 79.21 + * 79.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 79.23 + * or visit www.oracle.com if you need additional information or have any 79.24 + * questions. 79.25 + * 79.26 + */ 79.27 + 79.28 +#include "precompiled.hpp" 79.29 +#include "runtime/deoptimization.hpp" 79.30 +#include "runtime/frame.inline.hpp" 79.31 +#include "runtime/stubRoutines.hpp" 79.32 +#include "runtime/thread.inline.hpp" 79.33 + 79.34 +// Implementation of the platform-specific part of StubRoutines - for 79.35 +// a description of how to extend it, see the stubRoutines.hpp file. 79.36 + 79.37 +address StubRoutines::x86::_verify_mxcsr_entry = NULL; 79.38 +address StubRoutines::x86::_key_shuffle_mask_addr = NULL; 79.39 + 79.40 +uint64_t StubRoutines::x86::_crc_by128_masks[] = 79.41 +{ 79.42 + /* The fields in this structure are arranged so that they can be 79.43 + * picked up two at a time with 128-bit loads. 79.44 + * 79.45 + * Because of flipped bit order for this CRC polynomials 79.46 + * the constant for X**N is left-shifted by 1. This is because 79.47 + * a 64 x 64 polynomial multiply produces a 127-bit result 79.48 + * but the highest term is always aligned to bit 0 in the container. 79.49 + * Pre-shifting by one fixes this, at the cost of potentially making 79.50 + * the 32-bit constant no longer fit in a 32-bit container (thus the 79.51 + * use of uint64_t, though this is also the size used by the carry- 79.52 + * less multiply instruction. 79.53 + * 79.54 + * In addition, the flipped bit order and highest-term-at-least-bit 79.55 + * multiply changes the constants used. The 96-bit result will be 79.56 + * aligned to the high-term end of the target 128-bit container, 79.57 + * not the low-term end; that is, instead of a 512-bit or 576-bit fold, 79.58 + * instead it is a 480 (=512-32) or 544 (=512+64-32) bit fold. 79.59 + * 79.60 + * This cause additional problems in the 128-to-64-bit reduction; see the 79.61 + * code for details. By storing a mask in the otherwise unused half of 79.62 + * a 128-bit constant, bits can be cleared before multiplication without 79.63 + * storing and reloading. Note that staying on a 128-bit datapath means 79.64 + * that some data is uselessly stored and some unused data is intersected 79.65 + * with an irrelevant constant. 79.66 + */ 79.67 + 79.68 + ((uint64_t) 0xffffffffUL), /* low of K_M_64 */ 79.69 + ((uint64_t) 0xb1e6b092U << 1), /* high of K_M_64 */ 79.70 + ((uint64_t) 0xba8ccbe8U << 1), /* low of K_160_96 */ 79.71 + ((uint64_t) 0x6655004fU << 1), /* high of K_160_96 */ 79.72 + ((uint64_t) 0xaa2215eaU << 1), /* low of K_544_480 */ 79.73 + ((uint64_t) 0xe3720acbU << 1) /* high of K_544_480 */ 79.74 +}; 79.75 + 79.76 +/** 79.77 + * crc_table[] from jdk/src/share/native/java/util/zip/zlib-1.2.5/crc32.h 79.78 + */ 79.79 +juint StubRoutines::x86::_crc_table[] = 79.80 +{ 79.81 + 0x00000000UL, 0x77073096UL, 0xee0e612cUL, 0x990951baUL, 0x076dc419UL, 79.82 + 0x706af48fUL, 0xe963a535UL, 0x9e6495a3UL, 0x0edb8832UL, 0x79dcb8a4UL, 79.83 + 0xe0d5e91eUL, 0x97d2d988UL, 0x09b64c2bUL, 0x7eb17cbdUL, 0xe7b82d07UL, 79.84 + 0x90bf1d91UL, 0x1db71064UL, 0x6ab020f2UL, 0xf3b97148UL, 0x84be41deUL, 79.85 + 0x1adad47dUL, 0x6ddde4ebUL, 0xf4d4b551UL, 0x83d385c7UL, 0x136c9856UL, 79.86 + 0x646ba8c0UL, 0xfd62f97aUL, 0x8a65c9ecUL, 0x14015c4fUL, 0x63066cd9UL, 79.87 + 0xfa0f3d63UL, 0x8d080df5UL, 0x3b6e20c8UL, 0x4c69105eUL, 0xd56041e4UL, 79.88 + 0xa2677172UL, 0x3c03e4d1UL, 0x4b04d447UL, 0xd20d85fdUL, 0xa50ab56bUL, 79.89 + 0x35b5a8faUL, 0x42b2986cUL, 0xdbbbc9d6UL, 0xacbcf940UL, 0x32d86ce3UL, 79.90 + 0x45df5c75UL, 0xdcd60dcfUL, 0xabd13d59UL, 0x26d930acUL, 0x51de003aUL, 79.91 + 0xc8d75180UL, 0xbfd06116UL, 0x21b4f4b5UL, 0x56b3c423UL, 0xcfba9599UL, 79.92 + 0xb8bda50fUL, 0x2802b89eUL, 0x5f058808UL, 0xc60cd9b2UL, 0xb10be924UL, 79.93 + 0x2f6f7c87UL, 0x58684c11UL, 0xc1611dabUL, 0xb6662d3dUL, 0x76dc4190UL, 79.94 + 0x01db7106UL, 0x98d220bcUL, 0xefd5102aUL, 0x71b18589UL, 0x06b6b51fUL, 79.95 + 0x9fbfe4a5UL, 0xe8b8d433UL, 0x7807c9a2UL, 0x0f00f934UL, 0x9609a88eUL, 79.96 + 0xe10e9818UL, 0x7f6a0dbbUL, 0x086d3d2dUL, 0x91646c97UL, 0xe6635c01UL, 79.97 + 0x6b6b51f4UL, 0x1c6c6162UL, 0x856530d8UL, 0xf262004eUL, 0x6c0695edUL, 79.98 + 0x1b01a57bUL, 0x8208f4c1UL, 0xf50fc457UL, 0x65b0d9c6UL, 0x12b7e950UL, 79.99 + 0x8bbeb8eaUL, 0xfcb9887cUL, 0x62dd1ddfUL, 0x15da2d49UL, 0x8cd37cf3UL, 79.100 + 0xfbd44c65UL, 0x4db26158UL, 0x3ab551ceUL, 0xa3bc0074UL, 0xd4bb30e2UL, 79.101 + 0x4adfa541UL, 0x3dd895d7UL, 0xa4d1c46dUL, 0xd3d6f4fbUL, 0x4369e96aUL, 79.102 + 0x346ed9fcUL, 0xad678846UL, 0xda60b8d0UL, 0x44042d73UL, 0x33031de5UL, 79.103 + 0xaa0a4c5fUL, 0xdd0d7cc9UL, 0x5005713cUL, 0x270241aaUL, 0xbe0b1010UL, 79.104 + 0xc90c2086UL, 0x5768b525UL, 0x206f85b3UL, 0xb966d409UL, 0xce61e49fUL, 79.105 + 0x5edef90eUL, 0x29d9c998UL, 0xb0d09822UL, 0xc7d7a8b4UL, 0x59b33d17UL, 79.106 + 0x2eb40d81UL, 0xb7bd5c3bUL, 0xc0ba6cadUL, 0xedb88320UL, 0x9abfb3b6UL, 79.107 + 0x03b6e20cUL, 0x74b1d29aUL, 0xead54739UL, 0x9dd277afUL, 0x04db2615UL, 79.108 + 0x73dc1683UL, 0xe3630b12UL, 0x94643b84UL, 0x0d6d6a3eUL, 0x7a6a5aa8UL, 79.109 + 0xe40ecf0bUL, 0x9309ff9dUL, 0x0a00ae27UL, 0x7d079eb1UL, 0xf00f9344UL, 79.110 + 0x8708a3d2UL, 0x1e01f268UL, 0x6906c2feUL, 0xf762575dUL, 0x806567cbUL, 79.111 + 0x196c3671UL, 0x6e6b06e7UL, 0xfed41b76UL, 0x89d32be0UL, 0x10da7a5aUL, 79.112 + 0x67dd4accUL, 0xf9b9df6fUL, 0x8ebeeff9UL, 0x17b7be43UL, 0x60b08ed5UL, 79.113 + 0xd6d6a3e8UL, 0xa1d1937eUL, 0x38d8c2c4UL, 0x4fdff252UL, 0xd1bb67f1UL, 79.114 + 0xa6bc5767UL, 0x3fb506ddUL, 0x48b2364bUL, 0xd80d2bdaUL, 0xaf0a1b4cUL, 79.115 + 0x36034af6UL, 0x41047a60UL, 0xdf60efc3UL, 0xa867df55UL, 0x316e8eefUL, 79.116 + 0x4669be79UL, 0xcb61b38cUL, 0xbc66831aUL, 0x256fd2a0UL, 0x5268e236UL, 79.117 + 0xcc0c7795UL, 0xbb0b4703UL, 0x220216b9UL, 0x5505262fUL, 0xc5ba3bbeUL, 79.118 + 0xb2bd0b28UL, 0x2bb45a92UL, 0x5cb36a04UL, 0xc2d7ffa7UL, 0xb5d0cf31UL, 79.119 + 0x2cd99e8bUL, 0x5bdeae1dUL, 0x9b64c2b0UL, 0xec63f226UL, 0x756aa39cUL, 79.120 + 0x026d930aUL, 0x9c0906a9UL, 0xeb0e363fUL, 0x72076785UL, 0x05005713UL, 79.121 + 0x95bf4a82UL, 0xe2b87a14UL, 0x7bb12baeUL, 0x0cb61b38UL, 0x92d28e9bUL, 79.122 + 0xe5d5be0dUL, 0x7cdcefb7UL, 0x0bdbdf21UL, 0x86d3d2d4UL, 0xf1d4e242UL, 79.123 + 0x68ddb3f8UL, 0x1fda836eUL, 0x81be16cdUL, 0xf6b9265bUL, 0x6fb077e1UL, 79.124 + 0x18b74777UL, 0x88085ae6UL, 0xff0f6a70UL, 0x66063bcaUL, 0x11010b5cUL, 79.125 + 0x8f659effUL, 0xf862ae69UL, 0x616bffd3UL, 0x166ccf45UL, 0xa00ae278UL, 79.126 + 0xd70dd2eeUL, 0x4e048354UL, 0x3903b3c2UL, 0xa7672661UL, 0xd06016f7UL, 79.127 + 0x4969474dUL, 0x3e6e77dbUL, 0xaed16a4aUL, 0xd9d65adcUL, 0x40df0b66UL, 79.128 + 0x37d83bf0UL, 0xa9bcae53UL, 0xdebb9ec5UL, 0x47b2cf7fUL, 0x30b5ffe9UL, 79.129 + 0xbdbdf21cUL, 0xcabac28aUL, 0x53b39330UL, 0x24b4a3a6UL, 0xbad03605UL, 79.130 + 0xcdd70693UL, 0x54de5729UL, 0x23d967bfUL, 0xb3667a2eUL, 0xc4614ab8UL, 79.131 + 0x5d681b02UL, 0x2a6f2b94UL, 0xb40bbe37UL, 0xc30c8ea1UL, 0x5a05df1bUL, 79.132 + 0x2d02ef8dUL 79.133 +};
80.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 80.2 +++ b/src/cpu/x86/vm/stubRoutines_x86.hpp Mon Jul 15 11:07:03 2013 +0100 80.3 @@ -0,0 +1,45 @@ 80.4 +/* 80.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 80.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 80.7 + * 80.8 + * This code is free software; you can redistribute it and/or modify it 80.9 + * under the terms of the GNU General Public License version 2 only, as 80.10 + * published by the Free Software Foundation. 80.11 + * 80.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 80.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 80.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 80.15 + * version 2 for more details (a copy is included in the LICENSE file that 80.16 + * accompanied this code). 80.17 + * 80.18 + * You should have received a copy of the GNU General Public License version 80.19 + * 2 along with this work; if not, write to the Free Software Foundation, 80.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 80.21 + * 80.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 80.23 + * or visit www.oracle.com if you need additional information or have any 80.24 + * questions. 80.25 + * 80.26 + */ 80.27 + 80.28 +#ifndef CPU_X86_VM_STUBROUTINES_X86_HPP 80.29 +#define CPU_X86_VM_STUBROUTINES_X86_HPP 80.30 + 80.31 +// This file holds the platform specific parts of the StubRoutines 80.32 +// definition. See stubRoutines.hpp for a description on how to 80.33 +// extend it. 80.34 + 80.35 + private: 80.36 + static address _verify_mxcsr_entry; 80.37 + // shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers 80.38 + static address _key_shuffle_mask_addr; 80.39 + // masks and table for CRC32 80.40 + static uint64_t _crc_by128_masks[]; 80.41 + static juint _crc_table[]; 80.42 + 80.43 + public: 80.44 + static address verify_mxcsr_entry() { return _verify_mxcsr_entry; } 80.45 + static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; } 80.46 + static address crc_by128_masks_addr() { return (address)_crc_by128_masks; } 80.47 + 80.48 +#endif // CPU_X86_VM_STUBROUTINES_X86_32_HPP
81.1 --- a/src/cpu/x86/vm/stubRoutines_x86_32.cpp Thu Jul 11 12:59:03 2013 -0400 81.2 +++ b/src/cpu/x86/vm/stubRoutines_x86_32.cpp Mon Jul 15 11:07:03 2013 +0100 81.3 @@ -1,5 +1,5 @@ 81.4 /* 81.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 81.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 81.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 81.8 * 81.9 * This code is free software; you can redistribute it and/or modify it 81.10 @@ -31,6 +31,4 @@ 81.11 // Implementation of the platform-specific part of StubRoutines - for 81.12 // a description of how to extend it, see the stubRoutines.hpp file. 81.13 81.14 -address StubRoutines::x86::_verify_mxcsr_entry = NULL; 81.15 address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = NULL; 81.16 -address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
82.1 --- a/src/cpu/x86/vm/stubRoutines_x86_32.hpp Thu Jul 11 12:59:03 2013 -0400 82.2 +++ b/src/cpu/x86/vm/stubRoutines_x86_32.hpp Mon Jul 15 11:07:03 2013 +0100 82.3 @@ -1,5 +1,5 @@ 82.4 /* 82.5 - * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. 82.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 82.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 82.8 * 82.9 * This code is free software; you can redistribute it and/or modify it 82.10 @@ -39,15 +39,12 @@ 82.11 friend class VMStructs; 82.12 82.13 private: 82.14 - static address _verify_mxcsr_entry; 82.15 static address _verify_fpu_cntrl_wrd_entry; 82.16 - // shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers 82.17 - static address _key_shuffle_mask_addr; 82.18 82.19 public: 82.20 - static address verify_mxcsr_entry() { return _verify_mxcsr_entry; } 82.21 static address verify_fpu_cntrl_wrd_entry() { return _verify_fpu_cntrl_wrd_entry; } 82.22 - static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; } 82.23 + 82.24 +# include "stubRoutines_x86.hpp" 82.25 82.26 }; 82.27
83.1 --- a/src/cpu/x86/vm/stubRoutines_x86_64.cpp Thu Jul 11 12:59:03 2013 -0400 83.2 +++ b/src/cpu/x86/vm/stubRoutines_x86_64.cpp Mon Jul 15 11:07:03 2013 +0100 83.3 @@ -1,5 +1,5 @@ 83.4 /* 83.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. 83.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. 83.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 83.8 * 83.9 * This code is free software; you can redistribute it and/or modify it 83.10 @@ -34,8 +34,6 @@ 83.11 address StubRoutines::x86::_get_previous_fp_entry = NULL; 83.12 address StubRoutines::x86::_get_previous_sp_entry = NULL; 83.13 83.14 -address StubRoutines::x86::_verify_mxcsr_entry = NULL; 83.15 - 83.16 address StubRoutines::x86::_f2i_fixup = NULL; 83.17 address StubRoutines::x86::_f2l_fixup = NULL; 83.18 address StubRoutines::x86::_d2i_fixup = NULL; 83.19 @@ -45,4 +43,3 @@ 83.20 address StubRoutines::x86::_double_sign_mask = NULL; 83.21 address StubRoutines::x86::_double_sign_flip = NULL; 83.22 address StubRoutines::x86::_mxcsr_std = NULL; 83.23 -address StubRoutines::x86::_key_shuffle_mask_addr = NULL;
84.1 --- a/src/cpu/x86/vm/stubRoutines_x86_64.hpp Thu Jul 11 12:59:03 2013 -0400 84.2 +++ b/src/cpu/x86/vm/stubRoutines_x86_64.hpp Mon Jul 15 11:07:03 2013 +0100 84.3 @@ -1,5 +1,5 @@ 84.4 /* 84.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. 84.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. 84.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 84.8 * 84.9 * This code is free software; you can redistribute it and/or modify it 84.10 @@ -42,7 +42,6 @@ 84.11 private: 84.12 static address _get_previous_fp_entry; 84.13 static address _get_previous_sp_entry; 84.14 - static address _verify_mxcsr_entry; 84.15 84.16 static address _f2i_fixup; 84.17 static address _f2l_fixup; 84.18 @@ -54,8 +53,6 @@ 84.19 static address _double_sign_mask; 84.20 static address _double_sign_flip; 84.21 static address _mxcsr_std; 84.22 - // shuffle mask for fixing up 128-bit words consisting of big-endian 32-bit integers 84.23 - static address _key_shuffle_mask_addr; 84.24 84.25 public: 84.26 84.27 @@ -69,11 +66,6 @@ 84.28 return _get_previous_sp_entry; 84.29 } 84.30 84.31 - static address verify_mxcsr_entry() 84.32 - { 84.33 - return _verify_mxcsr_entry; 84.34 - } 84.35 - 84.36 static address f2i_fixup() 84.37 { 84.38 return _f2i_fixup; 84.39 @@ -119,7 +111,7 @@ 84.40 return _mxcsr_std; 84.41 } 84.42 84.43 - static address key_shuffle_mask_addr() { return _key_shuffle_mask_addr; } 84.44 +# include "stubRoutines_x86.hpp" 84.45 84.46 }; 84.47
85.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Thu Jul 11 12:59:03 2013 -0400 85.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp Mon Jul 15 11:07:03 2013 +0100 85.3 @@ -868,6 +868,120 @@ 85.4 return generate_accessor_entry(); 85.5 } 85.6 85.7 +/** 85.8 + * Method entry for static native methods: 85.9 + * int java.util.zip.CRC32.update(int crc, int b) 85.10 + */ 85.11 +address InterpreterGenerator::generate_CRC32_update_entry() { 85.12 + if (UseCRC32Intrinsics) { 85.13 + address entry = __ pc(); 85.14 + 85.15 + // rbx,: Method* 85.16 + // rsi: senderSP must preserved for slow path, set SP to it on fast path 85.17 + // rdx: scratch 85.18 + // rdi: scratch 85.19 + 85.20 + Label slow_path; 85.21 + // If we need a safepoint check, generate full interpreter entry. 85.22 + ExternalAddress state(SafepointSynchronize::address_of_state()); 85.23 + __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 85.24 + SafepointSynchronize::_not_synchronized); 85.25 + __ jcc(Assembler::notEqual, slow_path); 85.26 + 85.27 + // We don't generate local frame and don't align stack because 85.28 + // we call stub code and there is no safepoint on this path. 85.29 + 85.30 + // Load parameters 85.31 + const Register crc = rax; // crc 85.32 + const Register val = rdx; // source java byte value 85.33 + const Register tbl = rdi; // scratch 85.34 + 85.35 + // Arguments are reversed on java expression stack 85.36 + __ movl(val, Address(rsp, wordSize)); // byte value 85.37 + __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC 85.38 + 85.39 + __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr())); 85.40 + __ notl(crc); // ~crc 85.41 + __ update_byte_crc32(crc, val, tbl); 85.42 + __ notl(crc); // ~crc 85.43 + // result in rax 85.44 + 85.45 + // _areturn 85.46 + __ pop(rdi); // get return address 85.47 + __ mov(rsp, rsi); // set sp to sender sp 85.48 + __ jmp(rdi); 85.49 + 85.50 + // generate a vanilla native entry as the slow path 85.51 + __ bind(slow_path); 85.52 + 85.53 + (void) generate_native_entry(false); 85.54 + 85.55 + return entry; 85.56 + } 85.57 + return generate_native_entry(false); 85.58 +} 85.59 + 85.60 +/** 85.61 + * Method entry for static native methods: 85.62 + * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 85.63 + * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 85.64 + */ 85.65 +address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 85.66 + if (UseCRC32Intrinsics) { 85.67 + address entry = __ pc(); 85.68 + 85.69 + // rbx,: Method* 85.70 + // rsi: senderSP must preserved for slow path, set SP to it on fast path 85.71 + // rdx: scratch 85.72 + // rdi: scratch 85.73 + 85.74 + Label slow_path; 85.75 + // If we need a safepoint check, generate full interpreter entry. 85.76 + ExternalAddress state(SafepointSynchronize::address_of_state()); 85.77 + __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 85.78 + SafepointSynchronize::_not_synchronized); 85.79 + __ jcc(Assembler::notEqual, slow_path); 85.80 + 85.81 + // We don't generate local frame and don't align stack because 85.82 + // we call stub code and there is no safepoint on this path. 85.83 + 85.84 + // Load parameters 85.85 + const Register crc = rax; // crc 85.86 + const Register buf = rdx; // source java byte array address 85.87 + const Register len = rdi; // length 85.88 + 85.89 + // Arguments are reversed on java expression stack 85.90 + __ movl(len, Address(rsp, wordSize)); // Length 85.91 + // Calculate address of start element 85.92 + if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 85.93 + __ movptr(buf, Address(rsp, 3*wordSize)); // long buf 85.94 + __ addptr(buf, Address(rsp, 2*wordSize)); // + offset 85.95 + __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC 85.96 + } else { 85.97 + __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array 85.98 + __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size 85.99 + __ addptr(buf, Address(rsp, 2*wordSize)); // + offset 85.100 + __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC 85.101 + } 85.102 + 85.103 + __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len); 85.104 + // result in rax 85.105 + 85.106 + // _areturn 85.107 + __ pop(rdi); // get return address 85.108 + __ mov(rsp, rsi); // set sp to sender sp 85.109 + __ jmp(rdi); 85.110 + 85.111 + // generate a vanilla native entry as the slow path 85.112 + __ bind(slow_path); 85.113 + 85.114 + (void) generate_native_entry(false); 85.115 + 85.116 + return entry; 85.117 + } 85.118 + return generate_native_entry(false); 85.119 +} 85.120 + 85.121 // 85.122 // Interpreter stub for calling a native method. (asm interpreter) 85.123 // This sets up a somewhat different looking stack for calling the native method 85.124 @@ -1501,15 +1615,16 @@ 85.125 // determine code generation flags 85.126 bool synchronized = false; 85.127 address entry_point = NULL; 85.128 + InterpreterGenerator* ig_this = (InterpreterGenerator*)this; 85.129 85.130 switch (kind) { 85.131 - case Interpreter::zerolocals : break; 85.132 - case Interpreter::zerolocals_synchronized: synchronized = true; break; 85.133 - case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break; 85.134 - case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break; 85.135 - case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; 85.136 - case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; 85.137 - case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; 85.138 + case Interpreter::zerolocals : break; 85.139 + case Interpreter::zerolocals_synchronized: synchronized = true; break; 85.140 + case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break; 85.141 + case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break; 85.142 + case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break; 85.143 + case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break; 85.144 + case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break; 85.145 85.146 case Interpreter::java_lang_math_sin : // fall thru 85.147 case Interpreter::java_lang_math_cos : // fall thru 85.148 @@ -1519,9 +1634,15 @@ 85.149 case Interpreter::java_lang_math_log10 : // fall thru 85.150 case Interpreter::java_lang_math_sqrt : // fall thru 85.151 case Interpreter::java_lang_math_pow : // fall thru 85.152 - case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break; 85.153 + case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break; 85.154 case Interpreter::java_lang_ref_reference_get 85.155 - : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; 85.156 + : entry_point = ig_this->generate_Reference_get_entry(); break; 85.157 + case Interpreter::java_util_zip_CRC32_update 85.158 + : entry_point = ig_this->generate_CRC32_update_entry(); break; 85.159 + case Interpreter::java_util_zip_CRC32_updateBytes 85.160 + : // fall thru 85.161 + case Interpreter::java_util_zip_CRC32_updateByteBuffer 85.162 + : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break; 85.163 default: 85.164 fatal(err_msg("unexpected method kind: %d", kind)); 85.165 break; 85.166 @@ -1529,7 +1650,7 @@ 85.167 85.168 if (entry_point) return entry_point; 85.169 85.170 - return ((InterpreterGenerator*)this)->generate_normal_entry(synchronized); 85.171 + return ig_this->generate_normal_entry(synchronized); 85.172 85.173 } 85.174
86.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Thu Jul 11 12:59:03 2013 -0400 86.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Mon Jul 15 11:07:03 2013 +0100 86.3 @@ -840,6 +840,117 @@ 86.4 return generate_accessor_entry(); 86.5 } 86.6 86.7 +/** 86.8 + * Method entry for static native methods: 86.9 + * int java.util.zip.CRC32.update(int crc, int b) 86.10 + */ 86.11 +address InterpreterGenerator::generate_CRC32_update_entry() { 86.12 + if (UseCRC32Intrinsics) { 86.13 + address entry = __ pc(); 86.14 + 86.15 + // rbx,: Method* 86.16 + // rsi: senderSP must preserved for slow path, set SP to it on fast path 86.17 + // rdx: scratch 86.18 + // rdi: scratch 86.19 + 86.20 + Label slow_path; 86.21 + // If we need a safepoint check, generate full interpreter entry. 86.22 + ExternalAddress state(SafepointSynchronize::address_of_state()); 86.23 + __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 86.24 + SafepointSynchronize::_not_synchronized); 86.25 + __ jcc(Assembler::notEqual, slow_path); 86.26 + 86.27 + // We don't generate local frame and don't align stack because 86.28 + // we call stub code and there is no safepoint on this path. 86.29 + 86.30 + // Load parameters 86.31 + const Register crc = rax; // crc 86.32 + const Register val = rdx; // source java byte value 86.33 + const Register tbl = rdi; // scratch 86.34 + 86.35 + // Arguments are reversed on java expression stack 86.36 + __ movl(val, Address(rsp, wordSize)); // byte value 86.37 + __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC 86.38 + 86.39 + __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr())); 86.40 + __ notl(crc); // ~crc 86.41 + __ update_byte_crc32(crc, val, tbl); 86.42 + __ notl(crc); // ~crc 86.43 + // result in rax 86.44 + 86.45 + // _areturn 86.46 + __ pop(rdi); // get return address 86.47 + __ mov(rsp, rsi); // set sp to sender sp 86.48 + __ jmp(rdi); 86.49 + 86.50 + // generate a vanilla native entry as the slow path 86.51 + __ bind(slow_path); 86.52 + 86.53 + (void) generate_native_entry(false); 86.54 + 86.55 + return entry; 86.56 + } 86.57 + return generate_native_entry(false); 86.58 +} 86.59 + 86.60 +/** 86.61 + * Method entry for static native methods: 86.62 + * int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len) 86.63 + * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 86.64 + */ 86.65 +address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) { 86.66 + if (UseCRC32Intrinsics) { 86.67 + address entry = __ pc(); 86.68 + 86.69 + // rbx,: Method* 86.70 + // r13: senderSP must preserved for slow path, set SP to it on fast path 86.71 + 86.72 + Label slow_path; 86.73 + // If we need a safepoint check, generate full interpreter entry. 86.74 + ExternalAddress state(SafepointSynchronize::address_of_state()); 86.75 + __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()), 86.76 + SafepointSynchronize::_not_synchronized); 86.77 + __ jcc(Assembler::notEqual, slow_path); 86.78 + 86.79 + // We don't generate local frame and don't align stack because 86.80 + // we call stub code and there is no safepoint on this path. 86.81 + 86.82 + // Load parameters 86.83 + const Register crc = c_rarg0; // crc 86.84 + const Register buf = c_rarg1; // source java byte array address 86.85 + const Register len = c_rarg2; // length 86.86 + 86.87 + // Arguments are reversed on java expression stack 86.88 + __ movl(len, Address(rsp, wordSize)); // Length 86.89 + // Calculate address of start element 86.90 + if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) { 86.91 + __ movptr(buf, Address(rsp, 3*wordSize)); // long buf 86.92 + __ addptr(buf, Address(rsp, 2*wordSize)); // + offset 86.93 + __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC 86.94 + } else { 86.95 + __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array 86.96 + __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size 86.97 + __ addptr(buf, Address(rsp, 2*wordSize)); // + offset 86.98 + __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC 86.99 + } 86.100 + 86.101 + __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len); 86.102 + // result in rax 86.103 + 86.104 + // _areturn 86.105 + __ pop(rdi); // get return address 86.106 + __ mov(rsp, r13); // set sp to sender sp 86.107 + __ jmp(rdi); 86.108 + 86.109 + // generate a vanilla native entry as the slow path 86.110 + __ bind(slow_path); 86.111 + 86.112 + (void) generate_native_entry(false); 86.113 + 86.114 + return entry; 86.115 + } 86.116 + return generate_native_entry(false); 86.117 +} 86.118 86.119 // Interpreter stub for calling a native method. (asm interpreter) 86.120 // This sets up a somewhat different looking stack for calling the 86.121 @@ -1510,15 +1621,16 @@ 86.122 // determine code generation flags 86.123 bool synchronized = false; 86.124 address entry_point = NULL; 86.125 + InterpreterGenerator* ig_this = (InterpreterGenerator*)this; 86.126 86.127 switch (kind) { 86.128 - case Interpreter::zerolocals : break; 86.129 - case Interpreter::zerolocals_synchronized: synchronized = true; break; 86.130 - case Interpreter::native : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(false); break; 86.131 - case Interpreter::native_synchronized : entry_point = ((InterpreterGenerator*)this)->generate_native_entry(true); break; 86.132 - case Interpreter::empty : entry_point = ((InterpreterGenerator*)this)->generate_empty_entry(); break; 86.133 - case Interpreter::accessor : entry_point = ((InterpreterGenerator*)this)->generate_accessor_entry(); break; 86.134 - case Interpreter::abstract : entry_point = ((InterpreterGenerator*)this)->generate_abstract_entry(); break; 86.135 + case Interpreter::zerolocals : break; 86.136 + case Interpreter::zerolocals_synchronized: synchronized = true; break; 86.137 + case Interpreter::native : entry_point = ig_this->generate_native_entry(false); break; 86.138 + case Interpreter::native_synchronized : entry_point = ig_this->generate_native_entry(true); break; 86.139 + case Interpreter::empty : entry_point = ig_this->generate_empty_entry(); break; 86.140 + case Interpreter::accessor : entry_point = ig_this->generate_accessor_entry(); break; 86.141 + case Interpreter::abstract : entry_point = ig_this->generate_abstract_entry(); break; 86.142 86.143 case Interpreter::java_lang_math_sin : // fall thru 86.144 case Interpreter::java_lang_math_cos : // fall thru 86.145 @@ -1528,9 +1640,15 @@ 86.146 case Interpreter::java_lang_math_log10 : // fall thru 86.147 case Interpreter::java_lang_math_sqrt : // fall thru 86.148 case Interpreter::java_lang_math_pow : // fall thru 86.149 - case Interpreter::java_lang_math_exp : entry_point = ((InterpreterGenerator*)this)->generate_math_entry(kind); break; 86.150 + case Interpreter::java_lang_math_exp : entry_point = ig_this->generate_math_entry(kind); break; 86.151 case Interpreter::java_lang_ref_reference_get 86.152 - : entry_point = ((InterpreterGenerator*)this)->generate_Reference_get_entry(); break; 86.153 + : entry_point = ig_this->generate_Reference_get_entry(); break; 86.154 + case Interpreter::java_util_zip_CRC32_update 86.155 + : entry_point = ig_this->generate_CRC32_update_entry(); break; 86.156 + case Interpreter::java_util_zip_CRC32_updateBytes 86.157 + : // fall thru 86.158 + case Interpreter::java_util_zip_CRC32_updateByteBuffer 86.159 + : entry_point = ig_this->generate_CRC32_updateBytes_entry(kind); break; 86.160 default: 86.161 fatal(err_msg("unexpected method kind: %d", kind)); 86.162 break; 86.163 @@ -1540,8 +1658,7 @@ 86.164 return entry_point; 86.165 } 86.166 86.167 - return ((InterpreterGenerator*) this)-> 86.168 - generate_normal_entry(synchronized); 86.169 + return ig_this->generate_normal_entry(synchronized); 86.170 } 86.171 86.172 // These should never be compiled since the interpreter will prefer
87.1 --- a/src/cpu/x86/vm/vm_version_x86.cpp Thu Jul 11 12:59:03 2013 -0400 87.2 +++ b/src/cpu/x86/vm/vm_version_x86.cpp Mon Jul 15 11:07:03 2013 +0100 87.3 @@ -1,5 +1,5 @@ 87.4 /* 87.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 87.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 87.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 87.8 * 87.9 * This code is free software; you can redistribute it and/or modify it 87.10 @@ -446,6 +446,7 @@ 87.11 (supports_avx() ? ", avx" : ""), 87.12 (supports_avx2() ? ", avx2" : ""), 87.13 (supports_aes() ? ", aes" : ""), 87.14 + (supports_clmul() ? ", clmul" : ""), 87.15 (supports_erms() ? ", erms" : ""), 87.16 (supports_mmx_ext() ? ", mmxext" : ""), 87.17 (supports_3dnow_prefetch() ? ", 3dnowpref" : ""), 87.18 @@ -489,6 +490,27 @@ 87.19 FLAG_SET_DEFAULT(UseAES, false); 87.20 } 87.21 87.22 + // Use CLMUL instructions if available. 87.23 + if (supports_clmul()) { 87.24 + if (FLAG_IS_DEFAULT(UseCLMUL)) { 87.25 + UseCLMUL = true; 87.26 + } 87.27 + } else if (UseCLMUL) { 87.28 + if (!FLAG_IS_DEFAULT(UseCLMUL)) 87.29 + warning("CLMUL instructions not available on this CPU (AVX may also be required)"); 87.30 + FLAG_SET_DEFAULT(UseCLMUL, false); 87.31 + } 87.32 + 87.33 + if (UseCLMUL && (UseAVX > 0) && (UseSSE > 2)) { 87.34 + if (FLAG_IS_DEFAULT(UseCRC32Intrinsics)) { 87.35 + UseCRC32Intrinsics = true; 87.36 + } 87.37 + } else if (UseCRC32Intrinsics) { 87.38 + if (!FLAG_IS_DEFAULT(UseCRC32Intrinsics)) 87.39 + warning("CRC32 Intrinsics requires AVX and CLMUL instructions (not available on this CPU)"); 87.40 + FLAG_SET_DEFAULT(UseCRC32Intrinsics, false); 87.41 + } 87.42 + 87.43 // The AES intrinsic stubs require AES instruction support (of course) 87.44 // but also require sse3 mode for instructions it use. 87.45 if (UseAES && (UseSSE > 2)) {
88.1 --- a/src/cpu/x86/vm/vm_version_x86.hpp Thu Jul 11 12:59:03 2013 -0400 88.2 +++ b/src/cpu/x86/vm/vm_version_x86.hpp Mon Jul 15 11:07:03 2013 +0100 88.3 @@ -1,5 +1,5 @@ 88.4 /* 88.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 88.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 88.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 88.8 * 88.9 * This code is free software; you can redistribute it and/or modify it 88.10 @@ -61,7 +61,8 @@ 88.11 uint32_t value; 88.12 struct { 88.13 uint32_t sse3 : 1, 88.14 - : 2, 88.15 + clmul : 1, 88.16 + : 1, 88.17 monitor : 1, 88.18 : 1, 88.19 vmx : 1, 88.20 @@ -249,7 +250,8 @@ 88.21 CPU_AVX = (1 << 17), 88.22 CPU_AVX2 = (1 << 18), 88.23 CPU_AES = (1 << 19), 88.24 - CPU_ERMS = (1 << 20) // enhanced 'rep movsb/stosb' instructions 88.25 + CPU_ERMS = (1 << 20), // enhanced 'rep movsb/stosb' instructions 88.26 + CPU_CLMUL = (1 << 21) // carryless multiply for CRC 88.27 } cpuFeatureFlags; 88.28 88.29 enum { 88.30 @@ -429,6 +431,8 @@ 88.31 result |= CPU_AES; 88.32 if (_cpuid_info.sef_cpuid7_ebx.bits.erms != 0) 88.33 result |= CPU_ERMS; 88.34 + if (_cpuid_info.std_cpuid1_ecx.bits.clmul != 0) 88.35 + result |= CPU_CLMUL; 88.36 88.37 // AMD features. 88.38 if (is_amd()) { 88.39 @@ -555,6 +559,7 @@ 88.40 static bool supports_tsc() { return (_cpuFeatures & CPU_TSC) != 0; } 88.41 static bool supports_aes() { return (_cpuFeatures & CPU_AES) != 0; } 88.42 static bool supports_erms() { return (_cpuFeatures & CPU_ERMS) != 0; } 88.43 + static bool supports_clmul() { return (_cpuFeatures & CPU_CLMUL) != 0; } 88.44 88.45 // Intel features 88.46 static bool is_intel_family_core() { return is_intel() &&
89.1 --- a/src/cpu/zero/vm/relocInfo_zero.cpp Thu Jul 11 12:59:03 2013 -0400 89.2 +++ b/src/cpu/zero/vm/relocInfo_zero.cpp Mon Jul 15 11:07:03 2013 +0100 89.3 @@ -52,22 +52,6 @@ 89.4 return (address *) addr(); 89.5 } 89.6 89.7 -int Relocation::pd_breakpoint_size() { 89.8 - ShouldNotCallThis(); 89.9 -} 89.10 - 89.11 -void Relocation::pd_swap_in_breakpoint(address x, 89.12 - short* instrs, 89.13 - int instrlen) { 89.14 - ShouldNotCallThis(); 89.15 -} 89.16 - 89.17 -void Relocation::pd_swap_out_breakpoint(address x, 89.18 - short* instrs, 89.19 - int instrlen) { 89.20 - ShouldNotCallThis(); 89.21 -} 89.22 - 89.23 void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, 89.24 CodeBuffer* dst) { 89.25 ShouldNotCallThis();
90.1 --- a/src/cpu/zero/vm/shark_globals_zero.hpp Thu Jul 11 12:59:03 2013 -0400 90.2 +++ b/src/cpu/zero/vm/shark_globals_zero.hpp Mon Jul 15 11:07:03 2013 +0100 90.3 @@ -58,7 +58,9 @@ 90.4 define_pd_global(bool, ProfileInterpreter, false); 90.5 define_pd_global(intx, CodeCacheExpansionSize, 32*K ); 90.6 define_pd_global(uintx, CodeCacheMinBlockLength, 1 ); 90.7 -define_pd_global(uintx, MetaspaceSize, 12*M ); 90.8 +define_pd_global(uintx, CodeCacheMinimumUseSpace, 200*K); 90.9 + 90.10 +define_pd_global(uintx, MetaspaceSize, 12*M ); 90.11 define_pd_global(bool, NeverActAsServerClassMachine, true ); 90.12 define_pd_global(uint64_t, MaxRAM, 1ULL*G); 90.13 define_pd_global(bool, CICompileOSR, true );
91.1 --- a/src/os/bsd/dtrace/jvm_dtrace.c Thu Jul 11 12:59:03 2013 -0400 91.2 +++ b/src/os/bsd/dtrace/jvm_dtrace.c Mon Jul 15 11:07:03 2013 +0100 91.3 @@ -122,9 +122,7 @@ 91.4 } 91.5 91.6 static int file_close(int fd) { 91.7 - int ret; 91.8 - RESTARTABLE(close(fd), ret); 91.9 - return ret; 91.10 + return close(fd); 91.11 } 91.12 91.13 static int file_read(int fd, char* buf, int len) {
92.1 --- a/src/os/bsd/vm/attachListener_bsd.cpp Thu Jul 11 12:59:03 2013 -0400 92.2 +++ b/src/os/bsd/vm/attachListener_bsd.cpp Mon Jul 15 11:07:03 2013 +0100 92.3 @@ -199,7 +199,7 @@ 92.4 ::unlink(initial_path); 92.5 int res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr)); 92.6 if (res == -1) { 92.7 - RESTARTABLE(::close(listener), res); 92.8 + ::close(listener); 92.9 return -1; 92.10 } 92.11 92.12 @@ -217,7 +217,7 @@ 92.13 } 92.14 } 92.15 if (res == -1) { 92.16 - RESTARTABLE(::close(listener), res); 92.17 + ::close(listener); 92.18 ::unlink(initial_path); 92.19 return -1; 92.20 } 92.21 @@ -345,24 +345,21 @@ 92.22 uid_t puid; 92.23 gid_t pgid; 92.24 if (::getpeereid(s, &puid, &pgid) != 0) { 92.25 - int res; 92.26 - RESTARTABLE(::close(s), res); 92.27 + ::close(s); 92.28 continue; 92.29 } 92.30 uid_t euid = geteuid(); 92.31 gid_t egid = getegid(); 92.32 92.33 if (puid != euid || pgid != egid) { 92.34 - int res; 92.35 - RESTARTABLE(::close(s), res); 92.36 + ::close(s); 92.37 continue; 92.38 } 92.39 92.40 // peer credential look okay so we read the request 92.41 BsdAttachOperation* op = read_request(s); 92.42 if (op == NULL) { 92.43 - int res; 92.44 - RESTARTABLE(::close(s), res); 92.45 + ::close(s); 92.46 continue; 92.47 } else { 92.48 return op; 92.49 @@ -413,7 +410,7 @@ 92.50 } 92.51 92.52 // done 92.53 - RESTARTABLE(::close(this->socket()), rc); 92.54 + ::close(this->socket()); 92.55 92.56 // were we externally suspended while we were waiting? 92.57 thread->check_and_wait_while_suspended();
93.1 --- a/src/os/bsd/vm/os_bsd.cpp Thu Jul 11 12:59:03 2013 -0400 93.2 +++ b/src/os/bsd/vm/os_bsd.cpp Mon Jul 15 11:07:03 2013 +0100 93.3 @@ -2074,6 +2074,13 @@ 93.4 } 93.5 } 93.6 93.7 +static void warn_fail_commit_memory(char* addr, size_t size, bool exec, 93.8 + int err) { 93.9 + warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 93.10 + ", %d) failed; error='%s' (errno=%d)", addr, size, exec, 93.11 + strerror(err), err); 93.12 +} 93.13 + 93.14 // NOTE: Bsd kernel does not really reserve the pages for us. 93.15 // All it does is to check if there are enough free pages 93.16 // left at the time of mmap(). This could be a potential 93.17 @@ -2082,18 +2089,45 @@ 93.18 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 93.19 #ifdef __OpenBSD__ 93.20 // XXX: Work-around mmap/MAP_FIXED bug temporarily on OpenBSD 93.21 - return ::mprotect(addr, size, prot) == 0; 93.22 + if (::mprotect(addr, size, prot) == 0) { 93.23 + return true; 93.24 + } 93.25 #else 93.26 uintptr_t res = (uintptr_t) ::mmap(addr, size, prot, 93.27 MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0); 93.28 - return res != (uintptr_t) MAP_FAILED; 93.29 + if (res != (uintptr_t) MAP_FAILED) { 93.30 + return true; 93.31 + } 93.32 #endif 93.33 + 93.34 + // Warn about any commit errors we see in non-product builds just 93.35 + // in case mmap() doesn't work as described on the man page. 93.36 + NOT_PRODUCT(warn_fail_commit_memory(addr, size, exec, errno);) 93.37 + 93.38 + return false; 93.39 } 93.40 93.41 - 93.42 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 93.43 bool exec) { 93.44 - return commit_memory(addr, size, exec); 93.45 + // alignment_hint is ignored on this OS 93.46 + return pd_commit_memory(addr, size, exec); 93.47 +} 93.48 + 93.49 +void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 93.50 + const char* mesg) { 93.51 + assert(mesg != NULL, "mesg must be specified"); 93.52 + if (!pd_commit_memory(addr, size, exec)) { 93.53 + // add extra info in product mode for vm_exit_out_of_memory(): 93.54 + PRODUCT_ONLY(warn_fail_commit_memory(addr, size, exec, errno);) 93.55 + vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 93.56 + } 93.57 +} 93.58 + 93.59 +void os::pd_commit_memory_or_exit(char* addr, size_t size, 93.60 + size_t alignment_hint, bool exec, 93.61 + const char* mesg) { 93.62 + // alignment_hint is ignored on this OS 93.63 + pd_commit_memory_or_exit(addr, size, exec, mesg); 93.64 } 93.65 93.66 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 93.67 @@ -2148,7 +2182,7 @@ 93.68 } 93.69 93.70 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 93.71 - return os::commit_memory(addr, size); 93.72 + return os::commit_memory(addr, size, !ExecMem); 93.73 } 93.74 93.75 // If this is a growable mapping, remove the guard pages entirely by 93.76 @@ -2320,21 +2354,20 @@ 93.77 } 93.78 93.79 // The memory is committed 93.80 - address pc = CALLER_PC; 93.81 - MemTracker::record_virtual_memory_reserve((address)addr, bytes, pc); 93.82 - MemTracker::record_virtual_memory_commit((address)addr, bytes, pc); 93.83 + MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC); 93.84 93.85 return addr; 93.86 } 93.87 93.88 bool os::release_memory_special(char* base, size_t bytes) { 93.89 + MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); 93.90 // detaching the SHM segment will also delete it, see reserve_memory_special() 93.91 int rslt = shmdt(base); 93.92 if (rslt == 0) { 93.93 - MemTracker::record_virtual_memory_uncommit((address)base, bytes); 93.94 - MemTracker::record_virtual_memory_release((address)base, bytes); 93.95 + tkr.record((address)base, bytes); 93.96 return true; 93.97 } else { 93.98 + tkr.discard(); 93.99 return false; 93.100 } 93.101 93.102 @@ -3512,7 +3545,7 @@ 93.103 93.104 if (!UseMembar) { 93.105 address mem_serialize_page = (address) ::mmap(NULL, Bsd::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 93.106 - guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 93.107 + guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page"); 93.108 os::set_memory_serialize_page( mem_serialize_page ); 93.109 93.110 #ifndef PRODUCT
94.1 --- a/src/os/bsd/vm/os_bsd.inline.hpp Thu Jul 11 12:59:03 2013 -0400 94.2 +++ b/src/os/bsd/vm/os_bsd.inline.hpp Mon Jul 15 11:07:03 2013 +0100 94.3 @@ -178,11 +178,11 @@ 94.4 } 94.5 94.6 inline int os::close(int fd) { 94.7 - RESTARTABLE_RETURN_INT(::close(fd)); 94.8 + return ::close(fd); 94.9 } 94.10 94.11 inline int os::socket_close(int fd) { 94.12 - RESTARTABLE_RETURN_INT(::close(fd)); 94.13 + return ::close(fd); 94.14 } 94.15 94.16 inline int os::socket(int domain, int type, int protocol) {
95.1 --- a/src/os/bsd/vm/perfMemory_bsd.cpp Thu Jul 11 12:59:03 2013 -0400 95.2 +++ b/src/os/bsd/vm/perfMemory_bsd.cpp Mon Jul 15 11:07:03 2013 +0100 95.3 @@ -1,5 +1,5 @@ 95.4 /* 95.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 95.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 95.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 95.8 * 95.9 * This code is free software; you can redistribute it and/or modify it 95.10 @@ -60,7 +60,7 @@ 95.11 } 95.12 95.13 // commit memory 95.14 - if (!os::commit_memory(mapAddress, size)) { 95.15 + if (!os::commit_memory(mapAddress, size, !ExecMem)) { 95.16 if (PrintMiscellaneous && Verbose) { 95.17 warning("Could not commit PerfData memory\n"); 95.18 } 95.19 @@ -120,7 +120,7 @@ 95.20 addr += result; 95.21 } 95.22 95.23 - RESTARTABLE(::close(fd), result); 95.24 + result = ::close(fd); 95.25 if (PrintMiscellaneous && Verbose) { 95.26 if (result == OS_ERR) { 95.27 warning("Could not close %s: %s\n", destfile, strerror(errno)); 95.28 @@ -632,7 +632,7 @@ 95.29 if (PrintMiscellaneous && Verbose) { 95.30 warning("could not set shared memory file size: %s\n", strerror(errno)); 95.31 } 95.32 - RESTARTABLE(::close(fd), result); 95.33 + ::close(fd); 95.34 return -1; 95.35 } 95.36 95.37 @@ -656,7 +656,7 @@ 95.38 if (result != -1) { 95.39 return fd; 95.40 } else { 95.41 - RESTARTABLE(::close(fd), result); 95.42 + ::close(fd); 95.43 return -1; 95.44 } 95.45 } 95.46 @@ -734,9 +734,7 @@ 95.47 95.48 mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); 95.49 95.50 - // attempt to close the file - restart it if it was interrupted, 95.51 - // but ignore other failures 95.52 - RESTARTABLE(::close(fd), result); 95.53 + result = ::close(fd); 95.54 assert(result != OS_ERR, "could not close file"); 95.55 95.56 if (mapAddress == MAP_FAILED) { 95.57 @@ -755,8 +753,7 @@ 95.58 (void)::memset((void*) mapAddress, 0, size); 95.59 95.60 // it does not go through os api, the operation has to record from here 95.61 - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); 95.62 - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); 95.63 + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); 95.64 95.65 return mapAddress; 95.66 } 95.67 @@ -909,7 +906,7 @@ 95.68 95.69 // attempt to close the file - restart if it gets interrupted, 95.70 // but ignore other failures 95.71 - RESTARTABLE(::close(fd), result); 95.72 + result = ::close(fd); 95.73 assert(result != OS_ERR, "could not close file"); 95.74 95.75 if (mapAddress == MAP_FAILED) { 95.76 @@ -921,8 +918,7 @@ 95.77 } 95.78 95.79 // it does not go through os api, the operation has to record from here 95.80 - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); 95.81 - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); 95.82 + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); 95.83 95.84 *addr = mapAddress; 95.85 *sizep = size;
96.1 --- a/src/os/linux/vm/attachListener_linux.cpp Thu Jul 11 12:59:03 2013 -0400 96.2 +++ b/src/os/linux/vm/attachListener_linux.cpp Mon Jul 15 11:07:03 2013 +0100 96.3 @@ -199,7 +199,7 @@ 96.4 ::unlink(initial_path); 96.5 int res = ::bind(listener, (struct sockaddr*)&addr, sizeof(addr)); 96.6 if (res == -1) { 96.7 - RESTARTABLE(::close(listener), res); 96.8 + ::close(listener); 96.9 return -1; 96.10 } 96.11 96.12 @@ -212,7 +212,7 @@ 96.13 } 96.14 } 96.15 if (res == -1) { 96.16 - RESTARTABLE(::close(listener), res); 96.17 + ::close(listener); 96.18 ::unlink(initial_path); 96.19 return -1; 96.20 } 96.21 @@ -340,24 +340,21 @@ 96.22 struct ucred cred_info; 96.23 socklen_t optlen = sizeof(cred_info); 96.24 if (::getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void*)&cred_info, &optlen) == -1) { 96.25 - int res; 96.26 - RESTARTABLE(::close(s), res); 96.27 + ::close(s); 96.28 continue; 96.29 } 96.30 uid_t euid = geteuid(); 96.31 gid_t egid = getegid(); 96.32 96.33 if (cred_info.uid != euid || cred_info.gid != egid) { 96.34 - int res; 96.35 - RESTARTABLE(::close(s), res); 96.36 + ::close(s); 96.37 continue; 96.38 } 96.39 96.40 // peer credential look okay so we read the request 96.41 LinuxAttachOperation* op = read_request(s); 96.42 if (op == NULL) { 96.43 - int res; 96.44 - RESTARTABLE(::close(s), res); 96.45 + ::close(s); 96.46 continue; 96.47 } else { 96.48 return op; 96.49 @@ -408,7 +405,7 @@ 96.50 } 96.51 96.52 // done 96.53 - RESTARTABLE(::close(this->socket()), rc); 96.54 + ::close(this->socket()); 96.55 96.56 // were we externally suspended while we were waiting? 96.57 thread->check_and_wait_while_suspended();
97.1 --- a/src/os/linux/vm/os_linux.cpp Thu Jul 11 12:59:03 2013 -0400 97.2 +++ b/src/os/linux/vm/os_linux.cpp Mon Jul 15 11:07:03 2013 +0100 97.3 @@ -2612,11 +2612,49 @@ 97.4 } 97.5 } 97.6 97.7 +static bool recoverable_mmap_error(int err) { 97.8 + // See if the error is one we can let the caller handle. This 97.9 + // list of errno values comes from JBS-6843484. I can't find a 97.10 + // Linux man page that documents this specific set of errno 97.11 + // values so while this list currently matches Solaris, it may 97.12 + // change as we gain experience with this failure mode. 97.13 + switch (err) { 97.14 + case EBADF: 97.15 + case EINVAL: 97.16 + case ENOTSUP: 97.17 + // let the caller deal with these errors 97.18 + return true; 97.19 + 97.20 + default: 97.21 + // Any remaining errors on this OS can cause our reserved mapping 97.22 + // to be lost. That can cause confusion where different data 97.23 + // structures think they have the same memory mapped. The worst 97.24 + // scenario is if both the VM and a library think they have the 97.25 + // same memory mapped. 97.26 + return false; 97.27 + } 97.28 +} 97.29 + 97.30 +static void warn_fail_commit_memory(char* addr, size_t size, bool exec, 97.31 + int err) { 97.32 + warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 97.33 + ", %d) failed; error='%s' (errno=%d)", addr, size, exec, 97.34 + strerror(err), err); 97.35 +} 97.36 + 97.37 +static void warn_fail_commit_memory(char* addr, size_t size, 97.38 + size_t alignment_hint, bool exec, 97.39 + int err) { 97.40 + warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 97.41 + ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, size, 97.42 + alignment_hint, exec, strerror(err), err); 97.43 +} 97.44 + 97.45 // NOTE: Linux kernel does not really reserve the pages for us. 97.46 // All it does is to check if there are enough free pages 97.47 // left at the time of mmap(). This could be a potential 97.48 // problem. 97.49 -bool os::pd_commit_memory(char* addr, size_t size, bool exec) { 97.50 +int os::Linux::commit_memory_impl(char* addr, size_t size, bool exec) { 97.51 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 97.52 uintptr_t res = (uintptr_t) ::mmap(addr, size, prot, 97.53 MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0); 97.54 @@ -2624,9 +2662,32 @@ 97.55 if (UseNUMAInterleaving) { 97.56 numa_make_global(addr, size); 97.57 } 97.58 - return true; 97.59 - } 97.60 - return false; 97.61 + return 0; 97.62 + } 97.63 + 97.64 + int err = errno; // save errno from mmap() call above 97.65 + 97.66 + if (!recoverable_mmap_error(err)) { 97.67 + warn_fail_commit_memory(addr, size, exec, err); 97.68 + vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "committing reserved memory."); 97.69 + } 97.70 + 97.71 + return err; 97.72 +} 97.73 + 97.74 +bool os::pd_commit_memory(char* addr, size_t size, bool exec) { 97.75 + return os::Linux::commit_memory_impl(addr, size, exec) == 0; 97.76 +} 97.77 + 97.78 +void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 97.79 + const char* mesg) { 97.80 + assert(mesg != NULL, "mesg must be specified"); 97.81 + int err = os::Linux::commit_memory_impl(addr, size, exec); 97.82 + if (err != 0) { 97.83 + // the caller wants all commit errors to exit with the specified mesg: 97.84 + warn_fail_commit_memory(addr, size, exec, err); 97.85 + vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 97.86 + } 97.87 } 97.88 97.89 // Define MAP_HUGETLB here so we can build HotSpot on old systems. 97.90 @@ -2639,8 +2700,9 @@ 97.91 #define MADV_HUGEPAGE 14 97.92 #endif 97.93 97.94 -bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 97.95 - bool exec) { 97.96 +int os::Linux::commit_memory_impl(char* addr, size_t size, 97.97 + size_t alignment_hint, bool exec) { 97.98 + int err; 97.99 if (UseHugeTLBFS && alignment_hint > (size_t)vm_page_size()) { 97.100 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 97.101 uintptr_t res = 97.102 @@ -2651,16 +2713,46 @@ 97.103 if (UseNUMAInterleaving) { 97.104 numa_make_global(addr, size); 97.105 } 97.106 - return true; 97.107 + return 0; 97.108 + } 97.109 + 97.110 + err = errno; // save errno from mmap() call above 97.111 + 97.112 + if (!recoverable_mmap_error(err)) { 97.113 + // However, it is not clear that this loss of our reserved mapping 97.114 + // happens with large pages on Linux or that we cannot recover 97.115 + // from the loss. For now, we just issue a warning and we don't 97.116 + // call vm_exit_out_of_memory(). This issue is being tracked by 97.117 + // JBS-8007074. 97.118 + warn_fail_commit_memory(addr, size, alignment_hint, exec, err); 97.119 +// vm_exit_out_of_memory(size, OOM_MMAP_ERROR, 97.120 +// "committing reserved memory."); 97.121 } 97.122 // Fall through and try to use small pages 97.123 } 97.124 97.125 - if (commit_memory(addr, size, exec)) { 97.126 + err = os::Linux::commit_memory_impl(addr, size, exec); 97.127 + if (err == 0) { 97.128 realign_memory(addr, size, alignment_hint); 97.129 - return true; 97.130 - } 97.131 - return false; 97.132 + } 97.133 + return err; 97.134 +} 97.135 + 97.136 +bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 97.137 + bool exec) { 97.138 + return os::Linux::commit_memory_impl(addr, size, alignment_hint, exec) == 0; 97.139 +} 97.140 + 97.141 +void os::pd_commit_memory_or_exit(char* addr, size_t size, 97.142 + size_t alignment_hint, bool exec, 97.143 + const char* mesg) { 97.144 + assert(mesg != NULL, "mesg must be specified"); 97.145 + int err = os::Linux::commit_memory_impl(addr, size, alignment_hint, exec); 97.146 + if (err != 0) { 97.147 + // the caller wants all commit errors to exit with the specified mesg: 97.148 + warn_fail_commit_memory(addr, size, alignment_hint, exec, err); 97.149 + vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 97.150 + } 97.151 } 97.152 97.153 void os::pd_realign_memory(char *addr, size_t bytes, size_t alignment_hint) { 97.154 @@ -2678,7 +2770,7 @@ 97.155 // small pages on top of the SHM segment. This method always works for small pages, so we 97.156 // allow that in any case. 97.157 if (alignment_hint <= (size_t)os::vm_page_size() || !UseSHM) { 97.158 - commit_memory(addr, bytes, alignment_hint, false); 97.159 + commit_memory(addr, bytes, alignment_hint, !ExecMem); 97.160 } 97.161 } 97.162 97.163 @@ -2931,7 +3023,7 @@ 97.164 ::munmap((void*)stack_extent, (uintptr_t)addr - stack_extent); 97.165 } 97.166 97.167 - return os::commit_memory(addr, size); 97.168 + return os::commit_memory(addr, size, !ExecMem); 97.169 } 97.170 97.171 // If this is a growable mapping, remove the guard pages entirely by 97.172 @@ -3053,7 +3145,7 @@ 97.173 MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB, 97.174 -1, 0); 97.175 97.176 - if (p != (void *) -1) { 97.177 + if (p != MAP_FAILED) { 97.178 // We don't know if this really is a huge page or not. 97.179 FILE *fp = fopen("/proc/self/maps", "r"); 97.180 if (fp) { 97.181 @@ -3271,22 +3363,21 @@ 97.182 } 97.183 97.184 // The memory is committed 97.185 - address pc = CALLER_PC; 97.186 - MemTracker::record_virtual_memory_reserve((address)addr, bytes, pc); 97.187 - MemTracker::record_virtual_memory_commit((address)addr, bytes, pc); 97.188 + MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, mtNone, CALLER_PC); 97.189 97.190 return addr; 97.191 } 97.192 97.193 bool os::release_memory_special(char* base, size_t bytes) { 97.194 + MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); 97.195 // detaching the SHM segment will also delete it, see reserve_memory_special() 97.196 int rslt = shmdt(base); 97.197 if (rslt == 0) { 97.198 - MemTracker::record_virtual_memory_uncommit((address)base, bytes); 97.199 - MemTracker::record_virtual_memory_release((address)base, bytes); 97.200 + tkr.record((address)base, bytes); 97.201 return true; 97.202 } else { 97.203 - return false; 97.204 + tkr.discard(); 97.205 + return false; 97.206 } 97.207 } 97.208 97.209 @@ -4393,7 +4484,7 @@ 97.210 97.211 if (!UseMembar) { 97.212 address mem_serialize_page = (address) ::mmap(NULL, Linux::page_size(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 97.213 - guarantee( mem_serialize_page != NULL, "mmap Failed for memory serialize page"); 97.214 + guarantee( mem_serialize_page != MAP_FAILED, "mmap Failed for memory serialize page"); 97.215 os::set_memory_serialize_page( mem_serialize_page ); 97.216 97.217 #ifndef PRODUCT
98.1 --- a/src/os/linux/vm/os_linux.hpp Thu Jul 11 12:59:03 2013 -0400 98.2 +++ b/src/os/linux/vm/os_linux.hpp Mon Jul 15 11:07:03 2013 +0100 98.3 @@ -76,6 +76,10 @@ 98.4 static julong physical_memory() { return _physical_memory; } 98.5 static void initialize_system_info(); 98.6 98.7 + static int commit_memory_impl(char* addr, size_t bytes, bool exec); 98.8 + static int commit_memory_impl(char* addr, size_t bytes, 98.9 + size_t alignment_hint, bool exec); 98.10 + 98.11 static void set_glibc_version(const char *s) { _glibc_version = s; } 98.12 static void set_libpthread_version(const char *s) { _libpthread_version = s; } 98.13
99.1 --- a/src/os/linux/vm/perfMemory_linux.cpp Thu Jul 11 12:59:03 2013 -0400 99.2 +++ b/src/os/linux/vm/perfMemory_linux.cpp Mon Jul 15 11:07:03 2013 +0100 99.3 @@ -1,5 +1,5 @@ 99.4 /* 99.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 99.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 99.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 99.8 * 99.9 * This code is free software; you can redistribute it and/or modify it 99.10 @@ -60,7 +60,7 @@ 99.11 } 99.12 99.13 // commit memory 99.14 - if (!os::commit_memory(mapAddress, size)) { 99.15 + if (!os::commit_memory(mapAddress, size, !ExecMem)) { 99.16 if (PrintMiscellaneous && Verbose) { 99.17 warning("Could not commit PerfData memory\n"); 99.18 } 99.19 @@ -120,7 +120,7 @@ 99.20 addr += result; 99.21 } 99.22 99.23 - RESTARTABLE(::close(fd), result); 99.24 + result = ::close(fd); 99.25 if (PrintMiscellaneous && Verbose) { 99.26 if (result == OS_ERR) { 99.27 warning("Could not close %s: %s\n", destfile, strerror(errno)); 99.28 @@ -632,7 +632,7 @@ 99.29 if (PrintMiscellaneous && Verbose) { 99.30 warning("could not set shared memory file size: %s\n", strerror(errno)); 99.31 } 99.32 - RESTARTABLE(::close(fd), result); 99.33 + ::close(fd); 99.34 return -1; 99.35 } 99.36 99.37 @@ -656,7 +656,7 @@ 99.38 if (result != -1) { 99.39 return fd; 99.40 } else { 99.41 - RESTARTABLE(::close(fd), result); 99.42 + ::close(fd); 99.43 return -1; 99.44 } 99.45 } 99.46 @@ -734,9 +734,7 @@ 99.47 99.48 mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); 99.49 99.50 - // attempt to close the file - restart it if it was interrupted, 99.51 - // but ignore other failures 99.52 - RESTARTABLE(::close(fd), result); 99.53 + result = ::close(fd); 99.54 assert(result != OS_ERR, "could not close file"); 99.55 99.56 if (mapAddress == MAP_FAILED) { 99.57 @@ -755,8 +753,7 @@ 99.58 (void)::memset((void*) mapAddress, 0, size); 99.59 99.60 // it does not go through os api, the operation has to record from here 99.61 - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); 99.62 - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); 99.63 + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); 99.64 99.65 return mapAddress; 99.66 } 99.67 @@ -907,9 +904,7 @@ 99.68 99.69 mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0); 99.70 99.71 - // attempt to close the file - restart if it gets interrupted, 99.72 - // but ignore other failures 99.73 - RESTARTABLE(::close(fd), result); 99.74 + result = ::close(fd); 99.75 assert(result != OS_ERR, "could not close file"); 99.76 99.77 if (mapAddress == MAP_FAILED) { 99.78 @@ -921,8 +916,7 @@ 99.79 } 99.80 99.81 // it does not go through os api, the operation has to record from here 99.82 - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); 99.83 - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); 99.84 + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); 99.85 99.86 *addr = mapAddress; 99.87 *sizep = size;
100.1 --- a/src/os/solaris/dtrace/jhelper.d Thu Jul 11 12:59:03 2013 -0400 100.2 +++ b/src/os/solaris/dtrace/jhelper.d Mon Jul 15 11:07:03 2013 +0100 100.3 @@ -1,5 +1,5 @@ 100.4 /* 100.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. 100.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. 100.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 100.8 * 100.9 * This code is free software; you can redistribute it and/or modify it 100.10 @@ -332,12 +332,15 @@ 100.11 100.12 this->nameSymbol = copyin_ptr(this->constantPool + 100.13 this->nameIndex * sizeof (pointer) + SIZE_ConstantPool); 100.14 + /* The symbol is a CPSlot and has lower bit set to indicate metadata */ 100.15 + this->nameSymbol &= (~1); /* remove metadata lsb */ 100.16 100.17 this->nameSymbolLength = copyin_uint16(this->nameSymbol + 100.18 OFFSET_Symbol_length); 100.19 100.20 this->signatureSymbol = copyin_ptr(this->constantPool + 100.21 this->signatureIndex * sizeof (pointer) + SIZE_ConstantPool); 100.22 + this->signatureSymbol &= (~1); /* remove metadata lsb */ 100.23 100.24 this->signatureSymbolLength = copyin_uint16(this->signatureSymbol + 100.25 OFFSET_Symbol_length);
101.1 --- a/src/os/solaris/dtrace/jvm_dtrace.c Thu Jul 11 12:59:03 2013 -0400 101.2 +++ b/src/os/solaris/dtrace/jvm_dtrace.c Mon Jul 15 11:07:03 2013 +0100 101.3 @@ -122,9 +122,7 @@ 101.4 } 101.5 101.6 static int file_close(int fd) { 101.7 - int ret; 101.8 - RESTARTABLE(close(fd), ret); 101.9 - return ret; 101.10 + return close(fd); 101.11 } 101.12 101.13 static int file_read(int fd, char* buf, int len) {
102.1 --- a/src/os/solaris/vm/attachListener_solaris.cpp Thu Jul 11 12:59:03 2013 -0400 102.2 +++ b/src/os/solaris/vm/attachListener_solaris.cpp Mon Jul 15 11:07:03 2013 +0100 102.3 @@ -392,7 +392,7 @@ 102.4 return -1; 102.5 } 102.6 assert(fd >= 0, "bad file descriptor"); 102.7 - RESTARTABLE(::close(fd), res); 102.8 + ::close(fd); 102.9 102.10 // attach the door descriptor to the file 102.11 if ((res = ::fattach(dd, initial_path)) == -1) { 102.12 @@ -410,7 +410,7 @@ 102.13 // rename file so that clients can attach 102.14 if (dd >= 0) { 102.15 if (::rename(initial_path, door_path) == -1) { 102.16 - RESTARTABLE(::close(dd), res); 102.17 + ::close(dd); 102.18 ::fdetach(initial_path); 102.19 dd = -1; 102.20 } 102.21 @@ -549,7 +549,7 @@ 102.22 } 102.23 102.24 // close socket and we're done 102.25 - RESTARTABLE(::close(this->socket()), rc); 102.26 + ::close(this->socket()); 102.27 102.28 // were we externally suspended while we were waiting? 102.29 thread->check_and_wait_while_suspended();
103.1 --- a/src/os/solaris/vm/os_solaris.cpp Thu Jul 11 12:59:03 2013 -0400 103.2 +++ b/src/os/solaris/vm/os_solaris.cpp Mon Jul 15 11:07:03 2013 +0100 103.3 @@ -2784,7 +2784,42 @@ 103.4 return page_size; 103.5 } 103.6 103.7 -bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 103.8 +static bool recoverable_mmap_error(int err) { 103.9 + // See if the error is one we can let the caller handle. This 103.10 + // list of errno values comes from the Solaris mmap(2) man page. 103.11 + switch (err) { 103.12 + case EBADF: 103.13 + case EINVAL: 103.14 + case ENOTSUP: 103.15 + // let the caller deal with these errors 103.16 + return true; 103.17 + 103.18 + default: 103.19 + // Any remaining errors on this OS can cause our reserved mapping 103.20 + // to be lost. That can cause confusion where different data 103.21 + // structures think they have the same memory mapped. The worst 103.22 + // scenario is if both the VM and a library think they have the 103.23 + // same memory mapped. 103.24 + return false; 103.25 + } 103.26 +} 103.27 + 103.28 +static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec, 103.29 + int err) { 103.30 + warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 103.31 + ", %d) failed; error='%s' (errno=%d)", addr, bytes, exec, 103.32 + strerror(err), err); 103.33 +} 103.34 + 103.35 +static void warn_fail_commit_memory(char* addr, size_t bytes, 103.36 + size_t alignment_hint, bool exec, 103.37 + int err) { 103.38 + warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 103.39 + ", " SIZE_FORMAT ", %d) failed; error='%s' (errno=%d)", addr, bytes, 103.40 + alignment_hint, exec, strerror(err), err); 103.41 +} 103.42 + 103.43 +int os::Solaris::commit_memory_impl(char* addr, size_t bytes, bool exec) { 103.44 int prot = exec ? PROT_READ|PROT_WRITE|PROT_EXEC : PROT_READ|PROT_WRITE; 103.45 size_t size = bytes; 103.46 char *res = Solaris::mmap_chunk(addr, size, MAP_PRIVATE|MAP_FIXED, prot); 103.47 @@ -2792,14 +2827,38 @@ 103.48 if (UseNUMAInterleaving) { 103.49 numa_make_global(addr, bytes); 103.50 } 103.51 - return true; 103.52 - } 103.53 - return false; 103.54 -} 103.55 - 103.56 -bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint, 103.57 - bool exec) { 103.58 - if (commit_memory(addr, bytes, exec)) { 103.59 + return 0; 103.60 + } 103.61 + 103.62 + int err = errno; // save errno from mmap() call in mmap_chunk() 103.63 + 103.64 + if (!recoverable_mmap_error(err)) { 103.65 + warn_fail_commit_memory(addr, bytes, exec, err); 103.66 + vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, "committing reserved memory."); 103.67 + } 103.68 + 103.69 + return err; 103.70 +} 103.71 + 103.72 +bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 103.73 + return Solaris::commit_memory_impl(addr, bytes, exec) == 0; 103.74 +} 103.75 + 103.76 +void os::pd_commit_memory_or_exit(char* addr, size_t bytes, bool exec, 103.77 + const char* mesg) { 103.78 + assert(mesg != NULL, "mesg must be specified"); 103.79 + int err = os::Solaris::commit_memory_impl(addr, bytes, exec); 103.80 + if (err != 0) { 103.81 + // the caller wants all commit errors to exit with the specified mesg: 103.82 + warn_fail_commit_memory(addr, bytes, exec, err); 103.83 + vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg); 103.84 + } 103.85 +} 103.86 + 103.87 +int os::Solaris::commit_memory_impl(char* addr, size_t bytes, 103.88 + size_t alignment_hint, bool exec) { 103.89 + int err = Solaris::commit_memory_impl(addr, bytes, exec); 103.90 + if (err == 0) { 103.91 if (UseMPSS && alignment_hint > (size_t)vm_page_size()) { 103.92 // If the large page size has been set and the VM 103.93 // is using large pages, use the large page size 103.94 @@ -2821,9 +2880,25 @@ 103.95 // Since this is a hint, ignore any failures. 103.96 (void)Solaris::set_mpss_range(addr, bytes, page_size); 103.97 } 103.98 - return true; 103.99 - } 103.100 - return false; 103.101 + } 103.102 + return err; 103.103 +} 103.104 + 103.105 +bool os::pd_commit_memory(char* addr, size_t bytes, size_t alignment_hint, 103.106 + bool exec) { 103.107 + return Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec) == 0; 103.108 +} 103.109 + 103.110 +void os::pd_commit_memory_or_exit(char* addr, size_t bytes, 103.111 + size_t alignment_hint, bool exec, 103.112 + const char* mesg) { 103.113 + assert(mesg != NULL, "mesg must be specified"); 103.114 + int err = os::Solaris::commit_memory_impl(addr, bytes, alignment_hint, exec); 103.115 + if (err != 0) { 103.116 + // the caller wants all commit errors to exit with the specified mesg: 103.117 + warn_fail_commit_memory(addr, bytes, alignment_hint, exec, err); 103.118 + vm_exit_out_of_memory(bytes, OOM_MMAP_ERROR, mesg); 103.119 + } 103.120 } 103.121 103.122 // Uncommit the pages in a specified region. 103.123 @@ -2835,7 +2910,7 @@ 103.124 } 103.125 103.126 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 103.127 - return os::commit_memory(addr, size); 103.128 + return os::commit_memory(addr, size, !ExecMem); 103.129 } 103.130 103.131 bool os::remove_stack_guard_pages(char* addr, size_t size) { 103.132 @@ -3457,22 +3532,21 @@ 103.133 } 103.134 103.135 // The memory is committed 103.136 - address pc = CALLER_PC; 103.137 - MemTracker::record_virtual_memory_reserve((address)retAddr, size, pc); 103.138 - MemTracker::record_virtual_memory_commit((address)retAddr, size, pc); 103.139 + MemTracker::record_virtual_memory_reserve_and_commit((address)retAddr, size, mtNone, CURRENT_PC); 103.140 103.141 return retAddr; 103.142 } 103.143 103.144 bool os::release_memory_special(char* base, size_t bytes) { 103.145 + MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); 103.146 // detaching the SHM segment will also delete it, see reserve_memory_special() 103.147 int rslt = shmdt(base); 103.148 if (rslt == 0) { 103.149 - MemTracker::record_virtual_memory_uncommit((address)base, bytes); 103.150 - MemTracker::record_virtual_memory_release((address)base, bytes); 103.151 + tkr.record((address)base, bytes); 103.152 return true; 103.153 } else { 103.154 - return false; 103.155 + tkr.discard(); 103.156 + return false; 103.157 } 103.158 } 103.159 103.160 @@ -6604,11 +6678,11 @@ 103.161 } 103.162 103.163 int os::close(int fd) { 103.164 - RESTARTABLE_RETURN_INT(::close(fd)); 103.165 + return ::close(fd); 103.166 } 103.167 103.168 int os::socket_close(int fd) { 103.169 - RESTARTABLE_RETURN_INT(::close(fd)); 103.170 + return ::close(fd); 103.171 } 103.172 103.173 int os::recv(int fd, char* buf, size_t nBytes, uint flags) {
104.1 --- a/src/os/solaris/vm/os_solaris.hpp Thu Jul 11 12:59:03 2013 -0400 104.2 +++ b/src/os/solaris/vm/os_solaris.hpp Mon Jul 15 11:07:03 2013 +0100 104.3 @@ -168,6 +168,9 @@ 104.4 static int _dev_zero_fd; 104.5 static int get_dev_zero_fd() { return _dev_zero_fd; } 104.6 static void set_dev_zero_fd(int fd) { _dev_zero_fd = fd; } 104.7 + static int commit_memory_impl(char* addr, size_t bytes, bool exec); 104.8 + static int commit_memory_impl(char* addr, size_t bytes, 104.9 + size_t alignment_hint, bool exec); 104.10 static char* mmap_chunk(char *addr, size_t size, int flags, int prot); 104.11 static char* anon_mmap(char* requested_addr, size_t bytes, size_t alignment_hint, bool fixed); 104.12 static bool mpss_sanity_check(bool warn, size_t * page_size);
105.1 --- a/src/os/solaris/vm/os_solaris.inline.hpp Thu Jul 11 12:59:03 2013 -0400 105.2 +++ b/src/os/solaris/vm/os_solaris.inline.hpp Mon Jul 15 11:07:03 2013 +0100 105.3 @@ -89,7 +89,7 @@ 105.4 105.5 inline struct dirent* os::readdir(DIR* dirp, dirent* dbuf) { 105.6 assert(dirp != NULL, "just checking"); 105.7 -#if defined(_LP64) || defined(_GNU_SOURCE) 105.8 +#if defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64 105.9 dirent* p; 105.10 int status; 105.11 105.12 @@ -98,9 +98,9 @@ 105.13 return NULL; 105.14 } else 105.15 return p; 105.16 -#else // defined(_LP64) || defined(_GNU_SOURCE) 105.17 +#else // defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64 105.18 return ::readdir_r(dirp, dbuf); 105.19 -#endif // defined(_LP64) || defined(_GNU_SOURCE) 105.20 +#endif // defined(_LP64) || defined(_GNU_SOURCE) || _FILE_OFFSET_BITS==64 105.21 } 105.22 105.23 inline int os::closedir(DIR *dirp) {
106.1 --- a/src/os/solaris/vm/perfMemory_solaris.cpp Thu Jul 11 12:59:03 2013 -0400 106.2 +++ b/src/os/solaris/vm/perfMemory_solaris.cpp Mon Jul 15 11:07:03 2013 +0100 106.3 @@ -1,5 +1,5 @@ 106.4 /* 106.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 106.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 106.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 106.8 * 106.9 * This code is free software; you can redistribute it and/or modify it 106.10 @@ -62,7 +62,7 @@ 106.11 } 106.12 106.13 // commit memory 106.14 - if (!os::commit_memory(mapAddress, size)) { 106.15 + if (!os::commit_memory(mapAddress, size, !ExecMem)) { 106.16 if (PrintMiscellaneous && Verbose) { 106.17 warning("Could not commit PerfData memory\n"); 106.18 } 106.19 @@ -122,7 +122,7 @@ 106.20 addr += result; 106.21 } 106.22 106.23 - RESTARTABLE(::close(fd), result); 106.24 + result = ::close(fd); 106.25 if (PrintMiscellaneous && Verbose) { 106.26 if (result == OS_ERR) { 106.27 warning("Could not close %s: %s\n", destfile, strerror(errno)); 106.28 @@ -437,7 +437,7 @@ 106.29 addr+=result; 106.30 } 106.31 106.32 - RESTARTABLE(::close(fd), result); 106.33 + ::close(fd); 106.34 106.35 // get the user name for the effective user id of the process 106.36 char* user_name = get_user_name(psinfo.pr_euid); 106.37 @@ -669,7 +669,7 @@ 106.38 if (PrintMiscellaneous && Verbose) { 106.39 warning("could not set shared memory file size: %s\n", strerror(errno)); 106.40 } 106.41 - RESTARTABLE(::close(fd), result); 106.42 + ::close(fd); 106.43 return -1; 106.44 } 106.45 106.46 @@ -749,9 +749,7 @@ 106.47 106.48 mapAddress = (char*)::mmap((char*)0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); 106.49 106.50 - // attempt to close the file - restart it if it was interrupted, 106.51 - // but ignore other failures 106.52 - RESTARTABLE(::close(fd), result); 106.53 + result = ::close(fd); 106.54 assert(result != OS_ERR, "could not close file"); 106.55 106.56 if (mapAddress == MAP_FAILED) { 106.57 @@ -770,8 +768,7 @@ 106.58 (void)::memset((void*) mapAddress, 0, size); 106.59 106.60 // it does not go through os api, the operation has to record from here 106.61 - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); 106.62 - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); 106.63 + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); 106.64 106.65 return mapAddress; 106.66 } 106.67 @@ -922,9 +919,7 @@ 106.68 106.69 mapAddress = (char*)::mmap((char*)0, size, mmap_prot, MAP_SHARED, fd, 0); 106.70 106.71 - // attempt to close the file - restart if it gets interrupted, 106.72 - // but ignore other failures 106.73 - RESTARTABLE(::close(fd), result); 106.74 + result = ::close(fd); 106.75 assert(result != OS_ERR, "could not close file"); 106.76 106.77 if (mapAddress == MAP_FAILED) { 106.78 @@ -936,8 +931,7 @@ 106.79 } 106.80 106.81 // it does not go through os api, the operation has to record from here 106.82 - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); 106.83 - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); 106.84 + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); 106.85 106.86 *addr = mapAddress; 106.87 *sizep = size;
107.1 --- a/src/os/windows/vm/os_windows.cpp Thu Jul 11 12:59:03 2013 -0400 107.2 +++ b/src/os/windows/vm/os_windows.cpp Mon Jul 15 11:07:03 2013 +0100 107.3 @@ -2524,7 +2524,7 @@ 107.4 addr = (address)((uintptr_t)addr & 107.5 (~((uintptr_t)os::vm_page_size() - (uintptr_t)1))); 107.6 os::commit_memory((char *)addr, thread->stack_base() - addr, 107.7 - false ); 107.8 + !ExecMem); 107.9 return EXCEPTION_CONTINUE_EXECUTION; 107.10 } 107.11 else 107.12 @@ -2875,7 +2875,7 @@ 107.13 PAGE_READWRITE); 107.14 // If reservation failed, return NULL 107.15 if (p_buf == NULL) return NULL; 107.16 - MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC); 107.17 + MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, mtNone, CALLER_PC); 107.18 os::release_memory(p_buf, bytes + chunk_size); 107.19 107.20 // we still need to round up to a page boundary (in case we are using large pages) 107.21 @@ -2941,7 +2941,7 @@ 107.22 // need to create a dummy 'reserve' record to match 107.23 // the release. 107.24 MemTracker::record_virtual_memory_reserve((address)p_buf, 107.25 - bytes_to_release, CALLER_PC); 107.26 + bytes_to_release, mtNone, CALLER_PC); 107.27 os::release_memory(p_buf, bytes_to_release); 107.28 } 107.29 #ifdef ASSERT 107.30 @@ -2961,9 +2961,10 @@ 107.31 // Although the memory is allocated individually, it is returned as one. 107.32 // NMT records it as one block. 107.33 address pc = CALLER_PC; 107.34 - MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, pc); 107.35 if ((flags & MEM_COMMIT) != 0) { 107.36 - MemTracker::record_virtual_memory_commit((address)p_buf, bytes, pc); 107.37 + MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, mtNone, pc); 107.38 + } else { 107.39 + MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, mtNone, pc); 107.40 } 107.41 107.42 // made it this far, success 107.43 @@ -3154,8 +3155,7 @@ 107.44 char * res = (char *)VirtualAlloc(NULL, bytes, flag, prot); 107.45 if (res != NULL) { 107.46 address pc = CALLER_PC; 107.47 - MemTracker::record_virtual_memory_reserve((address)res, bytes, pc); 107.48 - MemTracker::record_virtual_memory_commit((address)res, bytes, pc); 107.49 + MemTracker::record_virtual_memory_reserve_and_commit((address)res, bytes, mtNone, pc); 107.50 } 107.51 107.52 return res; 107.53 @@ -3164,14 +3164,21 @@ 107.54 107.55 bool os::release_memory_special(char* base, size_t bytes) { 107.56 assert(base != NULL, "Sanity check"); 107.57 - // Memory allocated via reserve_memory_special() is committed 107.58 - MemTracker::record_virtual_memory_uncommit((address)base, bytes); 107.59 return release_memory(base, bytes); 107.60 } 107.61 107.62 void os::print_statistics() { 107.63 } 107.64 107.65 +static void warn_fail_commit_memory(char* addr, size_t bytes, bool exec) { 107.66 + int err = os::get_last_error(); 107.67 + char buf[256]; 107.68 + size_t buf_len = os::lasterror(buf, sizeof(buf)); 107.69 + warning("INFO: os::commit_memory(" PTR_FORMAT ", " SIZE_FORMAT 107.70 + ", %d) failed; error='%s' (DOS error/errno=%d)", addr, bytes, 107.71 + exec, buf_len != 0 ? buf : "<no_error_string>", err); 107.72 +} 107.73 + 107.74 bool os::pd_commit_memory(char* addr, size_t bytes, bool exec) { 107.75 if (bytes == 0) { 107.76 // Don't bother the OS with noops. 107.77 @@ -3186,11 +3193,17 @@ 107.78 // is always within a reserve covered by a single VirtualAlloc 107.79 // in that case we can just do a single commit for the requested size 107.80 if (!UseNUMAInterleaving) { 107.81 - if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) return false; 107.82 + if (VirtualAlloc(addr, bytes, MEM_COMMIT, PAGE_READWRITE) == NULL) { 107.83 + NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 107.84 + return false; 107.85 + } 107.86 if (exec) { 107.87 DWORD oldprot; 107.88 // Windows doc says to use VirtualProtect to get execute permissions 107.89 - if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) return false; 107.90 + if (!VirtualProtect(addr, bytes, PAGE_EXECUTE_READWRITE, &oldprot)) { 107.91 + NOT_PRODUCT(warn_fail_commit_memory(addr, bytes, exec);) 107.92 + return false; 107.93 + } 107.94 } 107.95 return true; 107.96 } else { 107.97 @@ -3205,12 +3218,20 @@ 107.98 MEMORY_BASIC_INFORMATION alloc_info; 107.99 VirtualQuery(next_alloc_addr, &alloc_info, sizeof(alloc_info)); 107.100 size_t bytes_to_rq = MIN2(bytes_remaining, (size_t)alloc_info.RegionSize); 107.101 - if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, PAGE_READWRITE) == NULL) 107.102 + if (VirtualAlloc(next_alloc_addr, bytes_to_rq, MEM_COMMIT, 107.103 + PAGE_READWRITE) == NULL) { 107.104 + NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 107.105 + exec);) 107.106 return false; 107.107 + } 107.108 if (exec) { 107.109 DWORD oldprot; 107.110 - if (!VirtualProtect(next_alloc_addr, bytes_to_rq, PAGE_EXECUTE_READWRITE, &oldprot)) 107.111 + if (!VirtualProtect(next_alloc_addr, bytes_to_rq, 107.112 + PAGE_EXECUTE_READWRITE, &oldprot)) { 107.113 + NOT_PRODUCT(warn_fail_commit_memory(next_alloc_addr, bytes_to_rq, 107.114 + exec);) 107.115 return false; 107.116 + } 107.117 } 107.118 bytes_remaining -= bytes_to_rq; 107.119 next_alloc_addr += bytes_to_rq; 107.120 @@ -3222,7 +3243,24 @@ 107.121 107.122 bool os::pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 107.123 bool exec) { 107.124 - return commit_memory(addr, size, exec); 107.125 + // alignment_hint is ignored on this OS 107.126 + return pd_commit_memory(addr, size, exec); 107.127 +} 107.128 + 107.129 +void os::pd_commit_memory_or_exit(char* addr, size_t size, bool exec, 107.130 + const char* mesg) { 107.131 + assert(mesg != NULL, "mesg must be specified"); 107.132 + if (!pd_commit_memory(addr, size, exec)) { 107.133 + warn_fail_commit_memory(addr, size, exec); 107.134 + vm_exit_out_of_memory(size, OOM_MMAP_ERROR, mesg); 107.135 + } 107.136 +} 107.137 + 107.138 +void os::pd_commit_memory_or_exit(char* addr, size_t size, 107.139 + size_t alignment_hint, bool exec, 107.140 + const char* mesg) { 107.141 + // alignment_hint is ignored on this OS 107.142 + pd_commit_memory_or_exit(addr, size, exec, mesg); 107.143 } 107.144 107.145 bool os::pd_uncommit_memory(char* addr, size_t bytes) { 107.146 @@ -3240,7 +3278,7 @@ 107.147 } 107.148 107.149 bool os::pd_create_stack_guard_pages(char* addr, size_t size) { 107.150 - return os::commit_memory(addr, size); 107.151 + return os::commit_memory(addr, size, !ExecMem); 107.152 } 107.153 107.154 bool os::remove_stack_guard_pages(char* addr, size_t size) { 107.155 @@ -3264,8 +3302,9 @@ 107.156 107.157 // Strange enough, but on Win32 one can change protection only for committed 107.158 // memory, not a big deal anyway, as bytes less or equal than 64K 107.159 - if (!is_committed && !commit_memory(addr, bytes, prot == MEM_PROT_RWX)) { 107.160 - fatal("cannot commit protection page"); 107.161 + if (!is_committed) { 107.162 + commit_memory_or_exit(addr, bytes, prot == MEM_PROT_RWX, 107.163 + "cannot commit protection page"); 107.164 } 107.165 // One cannot use os::guard_memory() here, as on Win32 guard page 107.166 // have different (one-shot) semantics, from MSDN on PAGE_GUARD:
108.1 --- a/src/os/windows/vm/perfMemory_windows.cpp Thu Jul 11 12:59:03 2013 -0400 108.2 +++ b/src/os/windows/vm/perfMemory_windows.cpp Mon Jul 15 11:07:03 2013 +0100 108.3 @@ -1,5 +1,5 @@ 108.4 /* 108.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 108.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 108.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 108.8 * 108.9 * This code is free software; you can redistribute it and/or modify it 108.10 @@ -58,7 +58,7 @@ 108.11 } 108.12 108.13 // commit memory 108.14 - if (!os::commit_memory(mapAddress, size)) { 108.15 + if (!os::commit_memory(mapAddress, size, !ExecMem)) { 108.16 if (PrintMiscellaneous && Verbose) { 108.17 warning("Could not commit PerfData memory\n"); 108.18 } 108.19 @@ -1498,8 +1498,7 @@ 108.20 (void)memset(mapAddress, '\0', size); 108.21 108.22 // it does not go through os api, the operation has to record from here 108.23 - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); 108.24 - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); 108.25 + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); 108.26 108.27 return (char*) mapAddress; 108.28 } 108.29 @@ -1681,8 +1680,7 @@ 108.30 } 108.31 108.32 // it does not go through os api, the operation has to record from here 108.33 - MemTracker::record_virtual_memory_reserve((address)mapAddress, size, CURRENT_PC); 108.34 - MemTracker::record_virtual_memory_type((address)mapAddress, mtInternal); 108.35 + MemTracker::record_virtual_memory_reserve((address)mapAddress, size, mtInternal, CURRENT_PC); 108.36 108.37 108.38 *addrp = (char*)mapAddress; 108.39 @@ -1836,9 +1834,10 @@ 108.40 return; 108.41 } 108.42 108.43 + MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); 108.44 remove_file_mapping(addr); 108.45 // it does not go through os api, the operation has to record from here 108.46 - MemTracker::record_virtual_memory_release((address)addr, bytes); 108.47 + tkr.record((address)addr, bytes); 108.48 } 108.49 108.50 char* PerfMemory::backing_store_filename() {
109.1 --- a/src/os_cpu/linux_sparc/vm/assembler_linux_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 109.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 109.3 @@ -1,47 +0,0 @@ 109.4 -/* 109.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 109.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 109.7 - * 109.8 - * This code is free software; you can redistribute it and/or modify it 109.9 - * under the terms of the GNU General Public License version 2 only, as 109.10 - * published by the Free Software Foundation. 109.11 - * 109.12 - * This code is distributed in the hope that it will be useful, but WITHOUT 109.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 109.14 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 109.15 - * version 2 for more details (a copy is included in the LICENSE file that 109.16 - * accompanied this code). 109.17 - * 109.18 - * You should have received a copy of the GNU General Public License version 109.19 - * 2 along with this work; if not, write to the Free Software Foundation, 109.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 109.21 - * 109.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 109.23 - * or visit www.oracle.com if you need additional information or have any 109.24 - * questions. 109.25 - * 109.26 - */ 109.27 - 109.28 -#include "precompiled.hpp" 109.29 -#include "asm/macroAssembler.hpp" 109.30 -#include "runtime/os.hpp" 109.31 -#include "runtime/threadLocalStorage.hpp" 109.32 - 109.33 -#include <asm-sparc/traps.h> 109.34 - 109.35 -void MacroAssembler::read_ccr_trap(Register ccr_save) { 109.36 - // No implementation 109.37 - breakpoint_trap(); 109.38 -} 109.39 - 109.40 -void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) { 109.41 - // No implementation 109.42 - breakpoint_trap(); 109.43 -} 109.44 - 109.45 -void MacroAssembler::flush_windows_trap() { trap(SP_TRAP_FWIN); } 109.46 -void MacroAssembler::clean_windows_trap() { trap(SP_TRAP_CWIN); } 109.47 - 109.48 -// Use software breakpoint trap until we figure out how to do this on Linux 109.49 -void MacroAssembler::get_psr_trap() { trap(SP_TRAP_SBPT); } 109.50 -void MacroAssembler::set_psr_trap() { trap(SP_TRAP_SBPT); }
110.1 --- a/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp Thu Jul 11 12:59:03 2013 -0400 110.2 +++ b/src/os_cpu/linux_sparc/vm/atomic_linux_sparc.inline.hpp Mon Jul 15 11:07:03 2013 +0100 110.3 @@ -169,7 +169,6 @@ 110.4 : "memory"); 110.5 return rv; 110.6 #else 110.7 - assert(VM_Version::v9_instructions_work(), "cas only supported on v9"); 110.8 volatile jlong_accessor evl, cvl, rv; 110.9 evl.long_value = exchange_value; 110.10 cvl.long_value = compare_value;
111.1 --- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Thu Jul 11 12:59:03 2013 -0400 111.2 +++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp Mon Jul 15 11:07:03 2013 +0100 111.3 @@ -289,6 +289,16 @@ 111.4 } 111.5 #endif // AMD64 111.6 111.7 +#ifndef AMD64 111.8 + // Halt if SI_KERNEL before more crashes get misdiagnosed as Java bugs 111.9 + // This can happen in any running code (currently more frequently in 111.10 + // interpreter code but has been seen in compiled code) 111.11 + if (sig == SIGSEGV && info->si_addr == 0 && info->si_code == SI_KERNEL) { 111.12 + fatal("An irrecoverable SI_KERNEL SIGSEGV has occurred due " 111.13 + "to unstable signal handling in this distribution."); 111.14 + } 111.15 +#endif // AMD64 111.16 + 111.17 // Handle ALL stack overflow variations here 111.18 if (sig == SIGSEGV) { 111.19 address addr = (address) info->si_addr;
112.1 --- a/src/os_cpu/solaris_sparc/vm/assembler_solaris_sparc.cpp Thu Jul 11 12:59:03 2013 -0400 112.2 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 112.3 @@ -1,61 +0,0 @@ 112.4 -/* 112.5 - * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved. 112.6 - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 112.7 - * 112.8 - * This code is free software; you can redistribute it and/or modify it 112.9 - * under the terms of the GNU General Public License version 2 only, as 112.10 - * published by the Free Software Foundation. 112.11 - * 112.12 - * This code is distributed in the hope that it will be useful, but WITHOUT 112.13 - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 112.14 - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 112.15 - * version 2 for more details (a copy is included in the LICENSE file that 112.16 - * accompanied this code). 112.17 - * 112.18 - * You should have received a copy of the GNU General Public License version 112.19 - * 2 along with this work; if not, write to the Free Software Foundation, 112.20 - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 112.21 - * 112.22 - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 112.23 - * or visit www.oracle.com if you need additional information or have any 112.24 - * questions. 112.25 - * 112.26 - */ 112.27 - 112.28 -#include "precompiled.hpp" 112.29 -#include "asm/macroAssembler.inline.hpp" 112.30 -#include "runtime/os.hpp" 112.31 -#include "runtime/threadLocalStorage.hpp" 112.32 - 112.33 -#include <sys/trap.h> // For trap numbers 112.34 -#include <v9/sys/psr_compat.h> // For V8 compatibility 112.35 - 112.36 -void MacroAssembler::read_ccr_trap(Register ccr_save) { 112.37 - // Execute a trap to get the PSR, mask and shift 112.38 - // to get the condition codes. 112.39 - get_psr_trap(); 112.40 - nop(); 112.41 - set(PSR_ICC, ccr_save); 112.42 - and3(O0, ccr_save, ccr_save); 112.43 - srl(ccr_save, PSR_ICC_SHIFT, ccr_save); 112.44 -} 112.45 - 112.46 -void MacroAssembler::write_ccr_trap(Register ccr_save, Register scratch1, Register scratch2) { 112.47 - // Execute a trap to get the PSR, shift back 112.48 - // the condition codes, mask the condition codes 112.49 - // back into and PSR and trap to write back the 112.50 - // PSR. 112.51 - sll(ccr_save, PSR_ICC_SHIFT, scratch2); 112.52 - get_psr_trap(); 112.53 - nop(); 112.54 - set(~PSR_ICC, scratch1); 112.55 - and3(O0, scratch1, O0); 112.56 - or3(O0, scratch2, O0); 112.57 - set_psr_trap(); 112.58 - nop(); 112.59 -} 112.60 - 112.61 -void MacroAssembler::flush_windows_trap() { trap(ST_FLUSH_WINDOWS); } 112.62 -void MacroAssembler::clean_windows_trap() { trap(ST_CLEAN_WINDOWS); } 112.63 -void MacroAssembler::get_psr_trap() { trap(ST_GETPSR); } 112.64 -void MacroAssembler::set_psr_trap() { trap(ST_SETPSR); }
113.1 --- a/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp Thu Jul 11 12:59:03 2013 -0400 113.2 +++ b/src/os_cpu/solaris_sparc/vm/atomic_solaris_sparc.inline.hpp Mon Jul 15 11:07:03 2013 +0100 113.3 @@ -60,21 +60,10 @@ 113.4 113.5 #else 113.6 113.7 -extern "C" void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst); 113.8 extern "C" void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst); 113.9 113.10 inline void Atomic_move_long(volatile jlong* src, volatile jlong* dst) { 113.11 -#ifdef COMPILER2 113.12 - // Compiler2 does not support v8, it is used only for v9. 113.13 _Atomic_move_long_v9(src, dst); 113.14 -#else 113.15 - // The branch is cheaper then emulated LDD. 113.16 - if (VM_Version::v9_instructions_work()) { 113.17 - _Atomic_move_long_v9(src, dst); 113.18 - } else { 113.19 - _Atomic_move_long_v8(src, dst); 113.20 - } 113.21 -#endif 113.22 } 113.23 113.24 inline jlong Atomic::load(volatile jlong* src) { 113.25 @@ -209,7 +198,6 @@ 113.26 : "memory"); 113.27 return rv; 113.28 #else //_LP64 113.29 - assert(VM_Version::v9_instructions_work(), "cas only supported on v9"); 113.30 volatile jlong_accessor evl, cvl, rv; 113.31 evl.long_value = exchange_value; 113.32 cvl.long_value = compare_value; 113.33 @@ -318,7 +306,6 @@ 113.34 // Return 64 bit value in %o0 113.35 return _Atomic_cas64((intptr_t)exchange_value, (intptr_t *)dest, (intptr_t)compare_value); 113.36 #else // _LP64 113.37 - assert (VM_Version::v9_instructions_work(), "only supported on v9"); 113.38 // Return 64 bit value in %o0,%o1 by hand 113.39 return _Atomic_casl(exchange_value, dest, compare_value); 113.40 #endif // _LP64
114.1 --- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.il Thu Jul 11 12:59:03 2013 -0400 114.2 +++ b/src/os_cpu/solaris_sparc/vm/solaris_sparc.il Mon Jul 15 11:07:03 2013 +0100 114.3 @@ -152,23 +152,6 @@ 114.4 .nonvolatile 114.5 .end 114.6 114.7 - // Support for jlong Atomic::load and Atomic::store on v8. 114.8 - // 114.9 - // void _Atomic_move_long_v8(volatile jlong* src, volatile jlong* dst) 114.10 - // 114.11 - // Arguments: 114.12 - // src: O0 114.13 - // dest: O1 114.14 - // 114.15 - // Overwrites O2 and O3 114.16 - 114.17 - .inline _Atomic_move_long_v8,2 114.18 - .volatile 114.19 - ldd [%o0], %o2 114.20 - std %o2, [%o1] 114.21 - .nonvolatile 114.22 - .end 114.23 - 114.24 // Support for jlong Atomic::load and Atomic::store on v9. 114.25 // 114.26 // void _Atomic_move_long_v9(volatile jlong* src, volatile jlong* dst)
115.1 --- a/src/share/vm/adlc/formssel.cpp Thu Jul 11 12:59:03 2013 -0400 115.2 +++ b/src/share/vm/adlc/formssel.cpp Mon Jul 15 11:07:03 2013 +0100 115.3 @@ -235,6 +235,9 @@ 115.4 return false; 115.5 } 115.6 115.7 +bool InstructForm::is_ideal_negD() const { 115.8 + return (_matrule && _matrule->_rChild && strcmp(_matrule->_rChild->_opType, "NegD") == 0); 115.9 +} 115.10 115.11 // Return 'true' if this instruction matches an ideal 'Copy*' node 115.12 int InstructForm::is_ideal_copy() const { 115.13 @@ -533,6 +536,12 @@ 115.14 if( data_type != Form::none ) 115.15 rematerialize = true; 115.16 115.17 + // Ugly: until a better fix is implemented, disable rematerialization for 115.18 + // negD nodes because they are proved to be problematic. 115.19 + if (is_ideal_negD()) { 115.20 + return false; 115.21 + } 115.22 + 115.23 // Constants 115.24 if( _components.count() == 1 && _components[0]->is(Component::USE_DEF) ) 115.25 rematerialize = true;
116.1 --- a/src/share/vm/adlc/formssel.hpp Thu Jul 11 12:59:03 2013 -0400 116.2 +++ b/src/share/vm/adlc/formssel.hpp Mon Jul 15 11:07:03 2013 +0100 116.3 @@ -147,6 +147,7 @@ 116.4 virtual int is_empty_encoding() const; // _size=0 and/or _insencode empty 116.5 virtual int is_tls_instruction() const; // tlsLoadP rule or ideal ThreadLocal 116.6 virtual int is_ideal_copy() const; // node matches ideal 'Copy*' 116.7 + virtual bool is_ideal_negD() const; // node matches ideal 'NegD' 116.8 virtual bool is_ideal_if() const; // node matches ideal 'If' 116.9 virtual bool is_ideal_fastlock() const; // node matches 'FastLock' 116.10 virtual bool is_ideal_membar() const; // node matches ideal 'MemBarXXX'
117.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp Thu Jul 11 12:59:03 2013 -0400 117.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Mon Jul 15 11:07:03 2013 +0100 117.3 @@ -1,5 +1,5 @@ 117.4 /* 117.5 - * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved. 117.6 + * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved. 117.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 117.8 * 117.9 * This code is free software; you can redistribute it and/or modify it 117.10 @@ -3461,6 +3461,14 @@ 117.11 preserves_state = true; 117.12 break; 117.13 117.14 + case vmIntrinsics::_updateCRC32: 117.15 + case vmIntrinsics::_updateBytesCRC32: 117.16 + case vmIntrinsics::_updateByteBufferCRC32: 117.17 + if (!UseCRC32Intrinsics) return false; 117.18 + cantrap = false; 117.19 + preserves_state = true; 117.20 + break; 117.21 + 117.22 case vmIntrinsics::_loadFence : 117.23 case vmIntrinsics::_storeFence: 117.24 case vmIntrinsics::_fullFence :
118.1 --- a/src/share/vm/c1/c1_IR.cpp Thu Jul 11 12:59:03 2013 -0400 118.2 +++ b/src/share/vm/c1/c1_IR.cpp Mon Jul 15 11:07:03 2013 +0100 118.3 @@ -506,7 +506,7 @@ 118.4 _loop_map(0, 0), // initialized later with correct size 118.5 _compilation(c) 118.6 { 118.7 - TRACE_LINEAR_SCAN(2, "***** computing linear-scan block order"); 118.8 + TRACE_LINEAR_SCAN(2, tty->print_cr("***** computing linear-scan block order")); 118.9 118.10 init_visited(); 118.11 count_edges(start_block, NULL); 118.12 @@ -683,7 +683,7 @@ 118.13 } 118.14 118.15 void ComputeLinearScanOrder::assign_loop_depth(BlockBegin* start_block) { 118.16 - TRACE_LINEAR_SCAN(3, "----- computing loop-depth and weight"); 118.17 + TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing loop-depth and weight")); 118.18 init_visited(); 118.19 118.20 assert(_work_list.is_empty(), "work list must be empty before processing"); 118.21 @@ -868,7 +868,7 @@ 118.22 } 118.23 118.24 void ComputeLinearScanOrder::compute_order(BlockBegin* start_block) { 118.25 - TRACE_LINEAR_SCAN(3, "----- computing final block order"); 118.26 + TRACE_LINEAR_SCAN(3, tty->print_cr("----- computing final block order")); 118.27 118.28 // the start block is always the first block in the linear scan order 118.29 _linear_scan_order = new BlockList(_num_blocks);
119.1 --- a/src/share/vm/c1/c1_LIR.cpp Thu Jul 11 12:59:03 2013 -0400 119.2 +++ b/src/share/vm/c1/c1_LIR.cpp Mon Jul 15 11:07:03 2013 +0100 119.3 @@ -201,23 +201,24 @@ 119.4 119.5 #ifdef ASSERT 119.6 if (!is_pointer() && !is_illegal()) { 119.7 + OprKind kindfield = kind_field(); // Factored out because of compiler bug, see 8002160 119.8 switch (as_BasicType(type_field())) { 119.9 case T_LONG: 119.10 - assert((kind_field() == cpu_register || kind_field() == stack_value) && 119.11 + assert((kindfield == cpu_register || kindfield == stack_value) && 119.12 size_field() == double_size, "must match"); 119.13 break; 119.14 case T_FLOAT: 119.15 // FP return values can be also in CPU registers on ARM and PPC (softfp ABI) 119.16 - assert((kind_field() == fpu_register || kind_field() == stack_value 119.17 - ARM_ONLY(|| kind_field() == cpu_register) 119.18 - PPC_ONLY(|| kind_field() == cpu_register) ) && 119.19 + assert((kindfield == fpu_register || kindfield == stack_value 119.20 + ARM_ONLY(|| kindfield == cpu_register) 119.21 + PPC_ONLY(|| kindfield == cpu_register) ) && 119.22 size_field() == single_size, "must match"); 119.23 break; 119.24 case T_DOUBLE: 119.25 // FP return values can be also in CPU registers on ARM and PPC (softfp ABI) 119.26 - assert((kind_field() == fpu_register || kind_field() == stack_value 119.27 - ARM_ONLY(|| kind_field() == cpu_register) 119.28 - PPC_ONLY(|| kind_field() == cpu_register) ) && 119.29 + assert((kindfield == fpu_register || kindfield == stack_value 119.30 + ARM_ONLY(|| kindfield == cpu_register) 119.31 + PPC_ONLY(|| kindfield == cpu_register) ) && 119.32 size_field() == double_size, "must match"); 119.33 break; 119.34 case T_BOOLEAN: 119.35 @@ -229,7 +230,7 @@ 119.36 case T_OBJECT: 119.37 case T_METADATA: 119.38 case T_ARRAY: 119.39 - assert((kind_field() == cpu_register || kind_field() == stack_value) && 119.40 + assert((kindfield == cpu_register || kindfield == stack_value) && 119.41 size_field() == single_size, "must match"); 119.42 break; 119.43 119.44 @@ -429,6 +430,11 @@ 119.45 _stub = new ArrayCopyStub(this); 119.46 } 119.47 119.48 +LIR_OpUpdateCRC32::LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res) 119.49 + : LIR_Op(lir_updatecrc32, res, NULL) 119.50 + , _crc(crc) 119.51 + , _val(val) { 119.52 +} 119.53 119.54 //-------------------verify-------------------------- 119.55 119.56 @@ -875,6 +881,20 @@ 119.57 } 119.58 119.59 119.60 +// LIR_OpUpdateCRC32 119.61 + case lir_updatecrc32: { 119.62 + assert(op->as_OpUpdateCRC32() != NULL, "must be"); 119.63 + LIR_OpUpdateCRC32* opUp = (LIR_OpUpdateCRC32*)op; 119.64 + 119.65 + assert(opUp->_crc->is_valid(), "used"); do_input(opUp->_crc); do_temp(opUp->_crc); 119.66 + assert(opUp->_val->is_valid(), "used"); do_input(opUp->_val); do_temp(opUp->_val); 119.67 + assert(opUp->_result->is_valid(), "used"); do_output(opUp->_result); 119.68 + assert(opUp->_info == NULL, "no info for LIR_OpUpdateCRC32"); 119.69 + 119.70 + break; 119.71 + } 119.72 + 119.73 + 119.74 // LIR_OpLock 119.75 case lir_lock: 119.76 case lir_unlock: { 119.77 @@ -1055,6 +1075,10 @@ 119.78 masm->emit_code_stub(stub()); 119.79 } 119.80 119.81 +void LIR_OpUpdateCRC32::emit_code(LIR_Assembler* masm) { 119.82 + masm->emit_updatecrc32(this); 119.83 +} 119.84 + 119.85 void LIR_Op0::emit_code(LIR_Assembler* masm) { 119.86 masm->emit_op0(this); 119.87 } 119.88 @@ -1762,6 +1786,8 @@ 119.89 case lir_dynamic_call: s = "dynamic"; break; 119.90 // LIR_OpArrayCopy 119.91 case lir_arraycopy: s = "arraycopy"; break; 119.92 + // LIR_OpUpdateCRC32 119.93 + case lir_updatecrc32: s = "updatecrc32"; break; 119.94 // LIR_OpLock 119.95 case lir_lock: s = "lock"; break; 119.96 case lir_unlock: s = "unlock"; break; 119.97 @@ -1814,6 +1840,13 @@ 119.98 tmp()->print(out); out->print(" "); 119.99 } 119.100 119.101 +// LIR_OpUpdateCRC32 119.102 +void LIR_OpUpdateCRC32::print_instr(outputStream* out) const { 119.103 + crc()->print(out); out->print(" "); 119.104 + val()->print(out); out->print(" "); 119.105 + result_opr()->print(out); out->print(" "); 119.106 +} 119.107 + 119.108 // LIR_OpCompareAndSwap 119.109 void LIR_OpCompareAndSwap::print_instr(outputStream* out) const { 119.110 addr()->print(out); out->print(" ");
120.1 --- a/src/share/vm/c1/c1_LIR.hpp Thu Jul 11 12:59:03 2013 -0400 120.2 +++ b/src/share/vm/c1/c1_LIR.hpp Mon Jul 15 11:07:03 2013 +0100 120.3 @@ -1,5 +1,5 @@ 120.4 /* 120.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 120.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 120.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 120.8 * 120.9 * This code is free software; you can redistribute it and/or modify it 120.10 @@ -877,6 +877,7 @@ 120.11 class LIR_OpJavaCall; 120.12 class LIR_OpRTCall; 120.13 class LIR_OpArrayCopy; 120.14 +class LIR_OpUpdateCRC32; 120.15 class LIR_OpLock; 120.16 class LIR_OpTypeCheck; 120.17 class LIR_OpCompareAndSwap; 120.18 @@ -982,6 +983,9 @@ 120.19 , begin_opArrayCopy 120.20 , lir_arraycopy 120.21 , end_opArrayCopy 120.22 + , begin_opUpdateCRC32 120.23 + , lir_updatecrc32 120.24 + , end_opUpdateCRC32 120.25 , begin_opLock 120.26 , lir_lock 120.27 , lir_unlock 120.28 @@ -1137,6 +1141,7 @@ 120.29 virtual LIR_Op2* as_Op2() { return NULL; } 120.30 virtual LIR_Op3* as_Op3() { return NULL; } 120.31 virtual LIR_OpArrayCopy* as_OpArrayCopy() { return NULL; } 120.32 + virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return NULL; } 120.33 virtual LIR_OpTypeCheck* as_OpTypeCheck() { return NULL; } 120.34 virtual LIR_OpCompareAndSwap* as_OpCompareAndSwap() { return NULL; } 120.35 virtual LIR_OpProfileCall* as_OpProfileCall() { return NULL; } 120.36 @@ -1293,6 +1298,25 @@ 120.37 void print_instr(outputStream* out) const PRODUCT_RETURN; 120.38 }; 120.39 120.40 +// LIR_OpUpdateCRC32 120.41 +class LIR_OpUpdateCRC32: public LIR_Op { 120.42 + friend class LIR_OpVisitState; 120.43 + 120.44 +private: 120.45 + LIR_Opr _crc; 120.46 + LIR_Opr _val; 120.47 + 120.48 +public: 120.49 + 120.50 + LIR_OpUpdateCRC32(LIR_Opr crc, LIR_Opr val, LIR_Opr res); 120.51 + 120.52 + LIR_Opr crc() const { return _crc; } 120.53 + LIR_Opr val() const { return _val; } 120.54 + 120.55 + virtual void emit_code(LIR_Assembler* masm); 120.56 + virtual LIR_OpUpdateCRC32* as_OpUpdateCRC32() { return this; } 120.57 + void print_instr(outputStream* out) const PRODUCT_RETURN; 120.58 +}; 120.59 120.60 // -------------------------------------------------- 120.61 // LIR_Op0 120.62 @@ -2212,6 +2236,8 @@ 120.63 120.64 void arraycopy(LIR_Opr src, LIR_Opr src_pos, LIR_Opr dst, LIR_Opr dst_pos, LIR_Opr length, LIR_Opr tmp, ciArrayKlass* expected_type, int flags, CodeEmitInfo* info) { append(new LIR_OpArrayCopy(src, src_pos, dst, dst_pos, length, tmp, expected_type, flags, info)); } 120.65 120.66 + void update_crc32(LIR_Opr crc, LIR_Opr val, LIR_Opr res) { append(new LIR_OpUpdateCRC32(crc, val, res)); } 120.67 + 120.68 void fpop_raw() { append(new LIR_Op0(lir_fpop_raw)); } 120.69 120.70 void instanceof(LIR_Opr result, LIR_Opr object, ciKlass* klass, LIR_Opr tmp1, LIR_Opr tmp2, LIR_Opr tmp3, bool fast_check, CodeEmitInfo* info_for_patch, ciMethod* profiled_method, int profiled_bci);
121.1 --- a/src/share/vm/c1/c1_LIRAssembler.hpp Thu Jul 11 12:59:03 2013 -0400 121.2 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp Mon Jul 15 11:07:03 2013 +0100 121.3 @@ -1,5 +1,5 @@ 121.4 /* 121.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 121.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 121.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 121.8 * 121.9 * This code is free software; you can redistribute it and/or modify it 121.10 @@ -195,6 +195,7 @@ 121.11 void emit_opBranch(LIR_OpBranch* op); 121.12 void emit_opLabel(LIR_OpLabel* op); 121.13 void emit_arraycopy(LIR_OpArrayCopy* op); 121.14 + void emit_updatecrc32(LIR_OpUpdateCRC32* op); 121.15 void emit_opConvert(LIR_OpConvert* op); 121.16 void emit_alloc_obj(LIR_OpAllocObj* op); 121.17 void emit_alloc_array(LIR_OpAllocArray* op);
122.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp Thu Jul 11 12:59:03 2013 -0400 122.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Mon Jul 15 11:07:03 2013 +0100 122.3 @@ -2994,6 +2994,12 @@ 122.4 do_Reference_get(x); 122.5 break; 122.6 122.7 + case vmIntrinsics::_updateCRC32: 122.8 + case vmIntrinsics::_updateBytesCRC32: 122.9 + case vmIntrinsics::_updateByteBufferCRC32: 122.10 + do_update_CRC32(x); 122.11 + break; 122.12 + 122.13 default: ShouldNotReachHere(); break; 122.14 } 122.15 }
123.1 --- a/src/share/vm/c1/c1_LIRGenerator.hpp Thu Jul 11 12:59:03 2013 -0400 123.2 +++ b/src/share/vm/c1/c1_LIRGenerator.hpp Mon Jul 15 11:07:03 2013 +0100 123.3 @@ -1,5 +1,5 @@ 123.4 /* 123.5 - * Copyright (c) 2005, 2012, Oracle and/or its affiliates. All rights reserved. 123.6 + * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. 123.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 123.8 * 123.9 * This code is free software; you can redistribute it and/or modify it 123.10 @@ -247,6 +247,7 @@ 123.11 void do_NIOCheckIndex(Intrinsic* x); 123.12 void do_FPIntrinsics(Intrinsic* x); 123.13 void do_Reference_get(Intrinsic* x); 123.14 + void do_update_CRC32(Intrinsic* x); 123.15 123.16 void do_UnsafePrefetch(UnsafePrefetch* x, bool is_store); 123.17
124.1 --- a/src/share/vm/c1/c1_Runtime1.cpp Thu Jul 11 12:59:03 2013 -0400 124.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp Mon Jul 15 11:07:03 2013 +0100 124.3 @@ -299,6 +299,7 @@ 124.4 #ifdef TRACE_HAVE_INTRINSICS 124.5 FUNCTION_CASE(entry, TRACE_TIME_METHOD); 124.6 #endif 124.7 + FUNCTION_CASE(entry, StubRoutines::updateBytesCRC32()); 124.8 124.9 #undef FUNCTION_CASE 124.10
125.1 --- a/src/share/vm/ci/ciObjectFactory.cpp Thu Jul 11 12:59:03 2013 -0400 125.2 +++ b/src/share/vm/ci/ciObjectFactory.cpp Mon Jul 15 11:07:03 2013 +0100 125.3 @@ -265,8 +265,6 @@ 125.4 ciMetadata* ciObjectFactory::get_metadata(Metadata* key) { 125.5 ASSERT_IN_VM; 125.6 125.7 - assert(key == NULL || key->is_metadata(), "must be"); 125.8 - 125.9 #ifdef ASSERT 125.10 if (CIObjectFactoryVerify) { 125.11 Metadata* last = NULL;
126.1 --- a/src/share/vm/ci/ciUtilities.hpp Thu Jul 11 12:59:03 2013 -0400 126.2 +++ b/src/share/vm/ci/ciUtilities.hpp Mon Jul 15 11:07:03 2013 +0100 126.3 @@ -96,7 +96,7 @@ 126.4 CLEAR_PENDING_EXCEPTION; \ 126.5 return (result); \ 126.6 } \ 126.7 - (0 126.8 + (void)(0 126.9 126.10 #define KILL_COMPILE_ON_ANY \ 126.11 THREAD); \ 126.12 @@ -104,7 +104,7 @@ 126.13 fatal("unhandled ci exception"); \ 126.14 CLEAR_PENDING_EXCEPTION; \ 126.15 } \ 126.16 -(0 126.17 +(void)(0 126.18 126.19 126.20 inline const char* bool_to_str(bool b) {
127.1 --- a/src/share/vm/classfile/dictionary.cpp Thu Jul 11 12:59:03 2013 -0400 127.2 +++ b/src/share/vm/classfile/dictionary.cpp Mon Jul 15 11:07:03 2013 +0100 127.3 @@ -555,7 +555,7 @@ 127.4 loader_data->class_loader() == NULL || 127.5 loader_data->class_loader()->is_instance(), 127.6 "checking type of class_loader"); 127.7 - e->verify(); 127.8 + e->verify(/*check_dictionary*/false); 127.9 probe->verify_protection_domain_set(); 127.10 element_count++; 127.11 }
128.1 --- a/src/share/vm/classfile/genericSignatures.cpp Thu Jul 11 12:59:03 2013 -0400 128.2 +++ b/src/share/vm/classfile/genericSignatures.cpp Mon Jul 15 11:07:03 2013 +0100 128.3 @@ -124,7 +124,7 @@ 128.4 fatal(STREAM->parse_error()); \ 128.5 } \ 128.6 return NULL; \ 128.7 - } 0 128.8 + } (void)0 128.9 128.10 #define READ() STREAM->read(); CHECK_FOR_PARSE_ERROR() 128.11 #define PEEK() STREAM->peek(); CHECK_FOR_PARSE_ERROR() 128.12 @@ -133,7 +133,7 @@ 128.13 #define EXPECTED(c, ch) STREAM->assert_char(c, ch); CHECK_FOR_PARSE_ERROR() 128.14 #define EXPECT_END() STREAM->expect_end(); CHECK_FOR_PARSE_ERROR() 128.15 128.16 -#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); (0 128.17 +#define CHECK_STREAM STREAM); CHECK_FOR_PARSE_ERROR(); ((void)0 128.18 128.19 #ifndef PRODUCT 128.20 void Identifier::print_on(outputStream* str) const {
129.1 --- a/src/share/vm/classfile/symbolTable.cpp Thu Jul 11 12:59:03 2013 -0400 129.2 +++ b/src/share/vm/classfile/symbolTable.cpp Mon Jul 15 11:07:03 2013 +0100 129.3 @@ -598,6 +598,8 @@ 129.4 129.5 bool StringTable::_needs_rehashing = false; 129.6 129.7 +volatile int StringTable::_parallel_claimed_idx = 0; 129.8 + 129.9 // Pick hashing algorithm 129.10 unsigned int StringTable::hash_string(const jchar* s, int len) { 129.11 return use_alternate_hashcode() ? AltHashing::murmur3_32(seed(), s, len) : 129.12 @@ -761,8 +763,18 @@ 129.13 } 129.14 } 129.15 129.16 -void StringTable::oops_do(OopClosure* f) { 129.17 - for (int i = 0; i < the_table()->table_size(); ++i) { 129.18 +void StringTable::buckets_do(OopClosure* f, int start_idx, int end_idx) { 129.19 + const int limit = the_table()->table_size(); 129.20 + 129.21 + assert(0 <= start_idx && start_idx <= limit, 129.22 + err_msg("start_idx (" INT32_FORMAT ") oob?", start_idx)); 129.23 + assert(0 <= end_idx && end_idx <= limit, 129.24 + err_msg("end_idx (" INT32_FORMAT ") oob?", end_idx)); 129.25 + assert(start_idx <= end_idx, 129.26 + err_msg("Ordering: start_idx=" INT32_FORMAT", end_idx=" INT32_FORMAT, 129.27 + start_idx, end_idx)); 129.28 + 129.29 + for (int i = start_idx; i < end_idx; i += 1) { 129.30 HashtableEntry<oop, mtSymbol>* entry = the_table()->bucket(i); 129.31 while (entry != NULL) { 129.32 assert(!entry->is_shared(), "CDS not used for the StringTable"); 129.33 @@ -774,6 +786,27 @@ 129.34 } 129.35 } 129.36 129.37 +void StringTable::oops_do(OopClosure* f) { 129.38 + buckets_do(f, 0, the_table()->table_size()); 129.39 +} 129.40 + 129.41 +void StringTable::possibly_parallel_oops_do(OopClosure* f) { 129.42 + const int ClaimChunkSize = 32; 129.43 + const int limit = the_table()->table_size(); 129.44 + 129.45 + for (;;) { 129.46 + // Grab next set of buckets to scan 129.47 + int start_idx = Atomic::add(ClaimChunkSize, &_parallel_claimed_idx) - ClaimChunkSize; 129.48 + if (start_idx >= limit) { 129.49 + // End of table 129.50 + break; 129.51 + } 129.52 + 129.53 + int end_idx = MIN2(limit, start_idx + ClaimChunkSize); 129.54 + buckets_do(f, start_idx, end_idx); 129.55 + } 129.56 +} 129.57 + 129.58 void StringTable::verify() { 129.59 for (int i = 0; i < the_table()->table_size(); ++i) { 129.60 HashtableEntry<oop, mtSymbol>* p = the_table()->bucket(i);
130.1 --- a/src/share/vm/classfile/symbolTable.hpp Thu Jul 11 12:59:03 2013 -0400 130.2 +++ b/src/share/vm/classfile/symbolTable.hpp Mon Jul 15 11:07:03 2013 +0100 130.3 @@ -1,5 +1,5 @@ 130.4 /* 130.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 130.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 130.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 130.8 * 130.9 * This code is free software; you can redistribute it and/or modify it 130.10 @@ -246,12 +246,19 @@ 130.11 // Set if one bucket is out of balance due to hash algorithm deficiency 130.12 static bool _needs_rehashing; 130.13 130.14 + // Claimed high water mark for parallel chunked scanning 130.15 + static volatile int _parallel_claimed_idx; 130.16 + 130.17 static oop intern(Handle string_or_null, jchar* chars, int length, TRAPS); 130.18 oop basic_add(int index, Handle string_or_null, jchar* name, int len, 130.19 unsigned int hashValue, TRAPS); 130.20 130.21 oop lookup(int index, jchar* chars, int length, unsigned int hashValue); 130.22 130.23 + // Apply the give oop closure to the entries to the buckets 130.24 + // in the range [start_idx, end_idx). 130.25 + static void buckets_do(OopClosure* f, int start_idx, int end_idx); 130.26 + 130.27 StringTable() : Hashtable<oop, mtSymbol>((int)StringTableSize, 130.28 sizeof (HashtableEntry<oop, mtSymbol>)) {} 130.29 130.30 @@ -277,9 +284,12 @@ 130.31 unlink_or_oops_do(cl, NULL); 130.32 } 130.33 130.34 - // Invoke "f->do_oop" on the locations of all oops in the table. 130.35 + // Serially invoke "f->do_oop" on the locations of all oops in the table. 130.36 static void oops_do(OopClosure* f); 130.37 130.38 + // Possibly parallel version of the above 130.39 + static void possibly_parallel_oops_do(OopClosure* f); 130.40 + 130.41 // Hashing algorithm, used as the hash value used by the 130.42 // StringTable for bucket selection and comparison (stored in the 130.43 // HashtableEntry structures). This is used in the String.intern() method. 130.44 @@ -315,5 +325,8 @@ 130.45 // Rehash the symbol table if it gets out of balance 130.46 static void rehash_table(); 130.47 static bool needs_rehashing() { return _needs_rehashing; } 130.48 + 130.49 + // Parallel chunked scanning 130.50 + static void clear_parallel_claimed_index() { _parallel_claimed_idx = 0; } 130.51 }; 130.52 #endif // SHARE_VM_CLASSFILE_SYMBOLTABLE_HPP
131.1 --- a/src/share/vm/classfile/verifier.hpp Thu Jul 11 12:59:03 2013 -0400 131.2 +++ b/src/share/vm/classfile/verifier.hpp Mon Jul 15 11:07:03 2013 +0100 131.3 @@ -86,9 +86,9 @@ 131.4 // These macros are used similarly to CHECK macros but also check 131.5 // the status of the verifier and return if that has an error. 131.6 #define CHECK_VERIFY(verifier) \ 131.7 - CHECK); if ((verifier)->has_error()) return; (0 131.8 + CHECK); if ((verifier)->has_error()) return; ((void)0 131.9 #define CHECK_VERIFY_(verifier, result) \ 131.10 - CHECK_(result)); if ((verifier)->has_error()) return (result); (0 131.11 + CHECK_(result)); if ((verifier)->has_error()) return (result); ((void)0 131.12 131.13 class TypeOrigin VALUE_OBJ_CLASS_SPEC { 131.14 private:
132.1 --- a/src/share/vm/classfile/vmSymbols.hpp Thu Jul 11 12:59:03 2013 -0400 132.2 +++ b/src/share/vm/classfile/vmSymbols.hpp Mon Jul 15 11:07:03 2013 +0100 132.3 @@ -771,6 +771,17 @@ 132.4 do_name( decrypt_name, "decrypt") \ 132.5 do_signature(byteArray_int_int_byteArray_int_signature, "([BII[BI)V") \ 132.6 \ 132.7 + /* support for java.util.zip */ \ 132.8 + do_class(java_util_zip_CRC32, "java/util/zip/CRC32") \ 132.9 + do_intrinsic(_updateCRC32, java_util_zip_CRC32, update_name, int2_int_signature, F_SN) \ 132.10 + do_name( update_name, "update") \ 132.11 + do_intrinsic(_updateBytesCRC32, java_util_zip_CRC32, updateBytes_name, updateBytes_signature, F_SN) \ 132.12 + do_name( updateBytes_name, "updateBytes") \ 132.13 + do_signature(updateBytes_signature, "(I[BII)I") \ 132.14 + do_intrinsic(_updateByteBufferCRC32, java_util_zip_CRC32, updateByteBuffer_name, updateByteBuffer_signature, F_SN) \ 132.15 + do_name( updateByteBuffer_name, "updateByteBuffer") \ 132.16 + do_signature(updateByteBuffer_signature, "(IJII)I") \ 132.17 + \ 132.18 /* support for sun.misc.Unsafe */ \ 132.19 do_class(sun_misc_Unsafe, "sun/misc/Unsafe") \ 132.20 \
133.1 --- a/src/share/vm/code/debugInfo.hpp Thu Jul 11 12:59:03 2013 -0400 133.2 +++ b/src/share/vm/code/debugInfo.hpp Mon Jul 15 11:07:03 2013 +0100 133.3 @@ -274,7 +274,7 @@ 133.4 Method* read_method() { 133.5 Method* o = (Method*)(code()->metadata_at(read_int())); 133.6 assert(o == NULL || 133.7 - o->is_metadata(), "meta data only"); 133.8 + o->is_metaspace_object(), "meta data only"); 133.9 return o; 133.10 } 133.11 ScopeValue* read_object_value();
134.1 --- a/src/share/vm/code/dependencies.cpp Thu Jul 11 12:59:03 2013 -0400 134.2 +++ b/src/share/vm/code/dependencies.cpp Mon Jul 15 11:07:03 2013 +0100 134.3 @@ -655,8 +655,8 @@ 134.4 } else { 134.5 o = _deps->oop_recorder()->metadata_at(i); 134.6 } 134.7 - assert(o == NULL || o->is_metadata(), 134.8 - err_msg("Should be perm " PTR_FORMAT, o)); 134.9 + assert(o == NULL || o->is_metaspace_object(), 134.10 + err_msg("Should be metadata " PTR_FORMAT, o)); 134.11 return o; 134.12 } 134.13 134.14 @@ -989,7 +989,7 @@ 134.15 assert(changes.involves_context(context_type), "irrelevant dependency"); 134.16 Klass* new_type = changes.new_type(); 134.17 134.18 - count_find_witness_calls(); 134.19 + (void)count_find_witness_calls(); 134.20 NOT_PRODUCT(deps_find_witness_singles++); 134.21 134.22 // Current thread must be in VM (not native mode, as in CI):
135.1 --- a/src/share/vm/code/nmethod.cpp Thu Jul 11 12:59:03 2013 -0400 135.2 +++ b/src/share/vm/code/nmethod.cpp Mon Jul 15 11:07:03 2013 +0100 135.3 @@ -1081,11 +1081,6 @@ 135.4 metadata_Relocation* reloc = iter.metadata_reloc(); 135.5 reloc->fix_metadata_relocation(); 135.6 } 135.7 - 135.8 - // There must not be any interfering patches or breakpoints. 135.9 - assert(!(iter.type() == relocInfo::breakpoint_type 135.10 - && iter.breakpoint_reloc()->active()), 135.11 - "no active breakpoint"); 135.12 } 135.13 } 135.14 135.15 @@ -2615,7 +2610,8 @@ 135.16 relocation_begin()-1+ip[1]); 135.17 for (; ip < index_end; ip++) 135.18 tty->print_cr(" (%d ?)", ip[0]); 135.19 - tty->print_cr(" @" INTPTR_FORMAT ": index_size=%d", ip, *ip++); 135.20 + tty->print_cr(" @" INTPTR_FORMAT ": index_size=%d", ip, *ip); 135.21 + ip++; 135.22 tty->print_cr("reloc_end @" INTPTR_FORMAT ":", ip); 135.23 } 135.24 }
136.1 --- a/src/share/vm/code/relocInfo.cpp Thu Jul 11 12:59:03 2013 -0400 136.2 +++ b/src/share/vm/code/relocInfo.cpp Mon Jul 15 11:07:03 2013 +0100 136.3 @@ -338,31 +338,6 @@ 136.4 _limit = limit; 136.5 } 136.6 136.7 - 136.8 -void PatchingRelocIterator:: prepass() { 136.9 - // turn breakpoints off during patching 136.10 - _init_state = (*this); // save cursor 136.11 - while (next()) { 136.12 - if (type() == relocInfo::breakpoint_type) { 136.13 - breakpoint_reloc()->set_active(false); 136.14 - } 136.15 - } 136.16 - (RelocIterator&)(*this) = _init_state; // reset cursor for client 136.17 -} 136.18 - 136.19 - 136.20 -void PatchingRelocIterator:: postpass() { 136.21 - // turn breakpoints back on after patching 136.22 - (RelocIterator&)(*this) = _init_state; // reset cursor again 136.23 - while (next()) { 136.24 - if (type() == relocInfo::breakpoint_type) { 136.25 - breakpoint_Relocation* bpt = breakpoint_reloc(); 136.26 - bpt->set_active(bpt->enabled()); 136.27 - } 136.28 - } 136.29 -} 136.30 - 136.31 - 136.32 // All the strange bit-encodings are in here. 136.33 // The idea is to encode relocation data which are small integers 136.34 // very efficiently (a single extra halfword). Larger chunks of 136.35 @@ -704,51 +679,6 @@ 136.36 _target = address_from_scaled_offset(offset, base); 136.37 } 136.38 136.39 - 136.40 -void breakpoint_Relocation::pack_data_to(CodeSection* dest) { 136.41 - short* p = (short*) dest->locs_end(); 136.42 - address point = dest->locs_point(); 136.43 - 136.44 - *p++ = _bits; 136.45 - 136.46 - assert(_target != NULL, "sanity"); 136.47 - 136.48 - if (internal()) normalize_address(_target, dest); 136.49 - 136.50 - jint target_bits = 136.51 - (jint)( internal() ? scaled_offset (_target, point) 136.52 - : runtime_address_to_index(_target) ); 136.53 - if (settable()) { 136.54 - // save space for set_target later 136.55 - p = add_jint(p, target_bits); 136.56 - } else { 136.57 - p = add_var_int(p, target_bits); 136.58 - } 136.59 - 136.60 - for (int i = 0; i < instrlen(); i++) { 136.61 - // put placeholder words until bytes can be saved 136.62 - p = add_short(p, (short)0x7777); 136.63 - } 136.64 - 136.65 - dest->set_locs_end((relocInfo*) p); 136.66 -} 136.67 - 136.68 - 136.69 -void breakpoint_Relocation::unpack_data() { 136.70 - _bits = live_bits(); 136.71 - 136.72 - int targetlen = datalen() - 1 - instrlen(); 136.73 - jint target_bits = 0; 136.74 - if (targetlen == 0) target_bits = 0; 136.75 - else if (targetlen == 1) target_bits = *(data()+1); 136.76 - else if (targetlen == 2) target_bits = relocInfo::jint_from_data(data()+1); 136.77 - else { ShouldNotReachHere(); } 136.78 - 136.79 - _target = internal() ? address_from_scaled_offset(target_bits, addr()) 136.80 - : index_to_runtime_address (target_bits); 136.81 -} 136.82 - 136.83 - 136.84 //// miscellaneous methods 136.85 oop* oop_Relocation::oop_addr() { 136.86 int n = _oop_index; 136.87 @@ -933,81 +863,6 @@ 136.88 return target; 136.89 } 136.90 136.91 - 136.92 -breakpoint_Relocation::breakpoint_Relocation(int kind, address target, bool internal) { 136.93 - bool active = false; 136.94 - bool enabled = (kind == initialization); 136.95 - bool removable = (kind != safepoint); 136.96 - bool settable = (target == NULL); 136.97 - 136.98 - int bits = kind; 136.99 - if (enabled) bits |= enabled_state; 136.100 - if (internal) bits |= internal_attr; 136.101 - if (removable) bits |= removable_attr; 136.102 - if (settable) bits |= settable_attr; 136.103 - 136.104 - _bits = bits | high_bit; 136.105 - _target = target; 136.106 - 136.107 - assert(this->kind() == kind, "kind encoded"); 136.108 - assert(this->enabled() == enabled, "enabled encoded"); 136.109 - assert(this->active() == active, "active encoded"); 136.110 - assert(this->internal() == internal, "internal encoded"); 136.111 - assert(this->removable() == removable, "removable encoded"); 136.112 - assert(this->settable() == settable, "settable encoded"); 136.113 -} 136.114 - 136.115 - 136.116 -address breakpoint_Relocation::target() const { 136.117 - return _target; 136.118 -} 136.119 - 136.120 - 136.121 -void breakpoint_Relocation::set_target(address x) { 136.122 - assert(settable(), "must be settable"); 136.123 - jint target_bits = 136.124 - (jint)(internal() ? scaled_offset (x, addr()) 136.125 - : runtime_address_to_index(x)); 136.126 - short* p = &live_bits() + 1; 136.127 - p = add_jint(p, target_bits); 136.128 - assert(p == instrs(), "new target must fit"); 136.129 - _target = x; 136.130 -} 136.131 - 136.132 - 136.133 -void breakpoint_Relocation::set_enabled(bool b) { 136.134 - if (enabled() == b) return; 136.135 - 136.136 - if (b) { 136.137 - set_bits(bits() | enabled_state); 136.138 - } else { 136.139 - set_active(false); // remove the actual breakpoint insn, if any 136.140 - set_bits(bits() & ~enabled_state); 136.141 - } 136.142 -} 136.143 - 136.144 - 136.145 -void breakpoint_Relocation::set_active(bool b) { 136.146 - assert(!b || enabled(), "cannot activate a disabled breakpoint"); 136.147 - 136.148 - if (active() == b) return; 136.149 - 136.150 - // %%% should probably seize a lock here (might not be the right lock) 136.151 - //MutexLockerEx ml_patch(Patching_lock, true); 136.152 - //if (active() == b) return; // recheck state after locking 136.153 - 136.154 - if (b) { 136.155 - set_bits(bits() | active_state); 136.156 - if (instrlen() == 0) 136.157 - fatal("breakpoints in original code must be undoable"); 136.158 - pd_swap_in_breakpoint (addr(), instrs(), instrlen()); 136.159 - } else { 136.160 - set_bits(bits() & ~active_state); 136.161 - pd_swap_out_breakpoint(addr(), instrs(), instrlen()); 136.162 - } 136.163 -} 136.164 - 136.165 - 136.166 //--------------------------------------------------------------------------------- 136.167 // Non-product code 136.168
137.1 --- a/src/share/vm/code/relocInfo.hpp Thu Jul 11 12:59:03 2013 -0400 137.2 +++ b/src/share/vm/code/relocInfo.hpp Mon Jul 15 11:07:03 2013 +0100 137.3 @@ -49,9 +49,6 @@ 137.4 // RelocIterator 137.5 // A StackObj which iterates over the relocations associated with 137.6 // a range of code addresses. Can be used to operate a copy of code. 137.7 -// PatchingRelocIterator 137.8 -// Specialized subtype of RelocIterator which removes breakpoints 137.9 -// temporarily during iteration, then restores them. 137.10 // BoundRelocation 137.11 // An _internal_ type shared by packers and unpackers of relocations. 137.12 // It pastes together a RelocationHolder with some pointers into 137.13 @@ -204,15 +201,6 @@ 137.14 // immediate field must not straddle a unit of memory coherence. 137.15 // //%note reloc_3 137.16 // 137.17 -// relocInfo::breakpoint_type -- a conditional breakpoint in the code 137.18 -// Value: none 137.19 -// Instruction types: any whatsoever 137.20 -// Data: [b [T]t i...] 137.21 -// The b is a bit-packed word representing the breakpoint's attributes. 137.22 -// The t is a target address which the breakpoint calls (when it is enabled). 137.23 -// The i... is a place to store one or two instruction words overwritten 137.24 -// by a trap, so that the breakpoint may be subsequently removed. 137.25 -// 137.26 // relocInfo::static_stub_type -- an extra stub for each static_call_type 137.27 // Value: none 137.28 // Instruction types: a virtual call: { set_oop; jump; } 137.29 @@ -271,8 +259,8 @@ 137.30 section_word_type = 9, // internal, but a cross-section reference 137.31 poll_type = 10, // polling instruction for safepoints 137.32 poll_return_type = 11, // polling instruction for safepoints at return 137.33 - breakpoint_type = 12, // an initialization barrier or safepoint 137.34 - metadata_type = 13, // metadata that used to be oops 137.35 + metadata_type = 12, // metadata that used to be oops 137.36 + yet_unused_type_1 = 13, // Still unused 137.37 yet_unused_type_2 = 14, // Still unused 137.38 data_prefix_tag = 15, // tag for a prefix (carries data arguments) 137.39 type_mask = 15 // A mask which selects only the above values 137.40 @@ -312,7 +300,6 @@ 137.41 visitor(internal_word) \ 137.42 visitor(poll) \ 137.43 visitor(poll_return) \ 137.44 - visitor(breakpoint) \ 137.45 visitor(section_word) \ 137.46 137.47 137.48 @@ -454,7 +441,7 @@ 137.49 public: 137.50 enum { 137.51 // Conservatively large estimate of maximum length (in shorts) 137.52 - // of any relocation record (probably breakpoints are largest). 137.53 + // of any relocation record. 137.54 // Extended format is length prefix, data words, and tag/offset suffix. 137.55 length_limit = 1 + 1 + (3*BytesPerWord/BytesPerShort) + 1, 137.56 have_format = format_width > 0 137.57 @@ -571,8 +558,6 @@ 137.58 137.59 void initialize(nmethod* nm, address begin, address limit); 137.60 137.61 - friend class PatchingRelocIterator; 137.62 - // make an uninitialized one, for PatchingRelocIterator: 137.63 RelocIterator() { initialize_misc(); } 137.64 137.65 public: 137.66 @@ -779,9 +764,6 @@ 137.67 void pd_verify_data_value (address x, intptr_t off) { pd_set_data_value(x, off, true); } 137.68 address pd_call_destination (address orig_addr = NULL); 137.69 void pd_set_call_destination (address x); 137.70 - void pd_swap_in_breakpoint (address x, short* instrs, int instrlen); 137.71 - void pd_swap_out_breakpoint (address x, short* instrs, int instrlen); 137.72 - static int pd_breakpoint_size (); 137.73 137.74 // this extracts the address of an address in the code stream instead of the reloc data 137.75 address* pd_address_in_code (); 137.76 @@ -1302,87 +1284,6 @@ 137.77 void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest); 137.78 }; 137.79 137.80 - 137.81 -class breakpoint_Relocation : public Relocation { 137.82 - relocInfo::relocType type() { return relocInfo::breakpoint_type; } 137.83 - 137.84 - enum { 137.85 - // attributes which affect the interpretation of the data: 137.86 - removable_attr = 0x0010, // buffer [i...] allows for undoing the trap 137.87 - internal_attr = 0x0020, // the target is an internal addr (local stub) 137.88 - settable_attr = 0x0040, // the target is settable 137.89 - 137.90 - // states which can change over time: 137.91 - enabled_state = 0x0100, // breakpoint must be active in running code 137.92 - active_state = 0x0200, // breakpoint instruction actually in code 137.93 - 137.94 - kind_mask = 0x000F, // mask for extracting kind 137.95 - high_bit = 0x4000 // extra bit which is always set 137.96 - }; 137.97 - 137.98 - public: 137.99 - enum { 137.100 - // kinds: 137.101 - initialization = 1, 137.102 - safepoint = 2 137.103 - }; 137.104 - 137.105 - // If target is NULL, 32 bits are reserved for a later set_target(). 137.106 - static RelocationHolder spec(int kind, address target = NULL, bool internal_target = false) { 137.107 - RelocationHolder rh = newHolder(); 137.108 - new(rh) breakpoint_Relocation(kind, target, internal_target); 137.109 - return rh; 137.110 - } 137.111 - 137.112 - private: 137.113 - // We require every bits value to NOT to fit into relocInfo::datalen_width, 137.114 - // because we are going to actually store state in the reloc, and so 137.115 - // cannot allow it to be compressed (and hence copied by the iterator). 137.116 - 137.117 - short _bits; // bit-encoded kind, attrs, & state 137.118 - address _target; 137.119 - 137.120 - breakpoint_Relocation(int kind, address target, bool internal_target); 137.121 - 137.122 - friend class RelocIterator; 137.123 - breakpoint_Relocation() { } 137.124 - 137.125 - short bits() const { return _bits; } 137.126 - short& live_bits() const { return data()[0]; } 137.127 - short* instrs() const { return data() + datalen() - instrlen(); } 137.128 - int instrlen() const { return removable() ? pd_breakpoint_size() : 0; } 137.129 - 137.130 - void set_bits(short x) { 137.131 - assert(live_bits() == _bits, "must be the only mutator of reloc info"); 137.132 - live_bits() = _bits = x; 137.133 - } 137.134 - 137.135 - public: 137.136 - address target() const; 137.137 - void set_target(address x); 137.138 - 137.139 - int kind() const { return bits() & kind_mask; } 137.140 - bool enabled() const { return (bits() & enabled_state) != 0; } 137.141 - bool active() const { return (bits() & active_state) != 0; } 137.142 - bool internal() const { return (bits() & internal_attr) != 0; } 137.143 - bool removable() const { return (bits() & removable_attr) != 0; } 137.144 - bool settable() const { return (bits() & settable_attr) != 0; } 137.145 - 137.146 - void set_enabled(bool b); // to activate, you must also say set_active 137.147 - void set_active(bool b); // actually inserts bpt (must be enabled 1st) 137.148 - 137.149 - // data is packed as 16 bits, followed by the target (1 or 2 words), followed 137.150 - // if necessary by empty storage for saving away original instruction bytes. 137.151 - void pack_data_to(CodeSection* dest); 137.152 - void unpack_data(); 137.153 - 137.154 - // during certain operations, breakpoints must be out of the way: 137.155 - void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) { 137.156 - assert(!active(), "cannot perform relocation on enabled breakpoints"); 137.157 - } 137.158 -}; 137.159 - 137.160 - 137.161 // We know all the xxx_Relocation classes, so now we can define these: 137.162 #define EACH_CASE(name) \ 137.163 inline name##_Relocation* RelocIterator::name##_reloc() { \ 137.164 @@ -1401,25 +1302,4 @@ 137.165 initialize(nm, begin, limit); 137.166 } 137.167 137.168 -// if you are going to patch code, you should use this subclass of 137.169 -// RelocIterator 137.170 -class PatchingRelocIterator : public RelocIterator { 137.171 - private: 137.172 - RelocIterator _init_state; 137.173 - 137.174 - void prepass(); // deactivates all breakpoints 137.175 - void postpass(); // reactivates all enabled breakpoints 137.176 - 137.177 - // do not copy these puppies; it would have unpredictable side effects 137.178 - // these are private and have no bodies defined because they should not be called 137.179 - PatchingRelocIterator(const RelocIterator&); 137.180 - void operator=(const RelocIterator&); 137.181 - 137.182 - public: 137.183 - PatchingRelocIterator(nmethod* nm, address begin = NULL, address limit = NULL) 137.184 - : RelocIterator(nm, begin, limit) { prepass(); } 137.185 - 137.186 - ~PatchingRelocIterator() { postpass(); } 137.187 -}; 137.188 - 137.189 #endif // SHARE_VM_CODE_RELOCINFO_HPP
138.1 --- a/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Thu Jul 11 12:59:03 2013 -0400 138.2 +++ b/src/share/vm/gc_implementation/g1/g1CardCounts.cpp Mon Jul 15 11:07:03 2013 +0100 138.3 @@ -152,12 +152,9 @@ 138.4 if (card_num < _committed_max_card_num) { 138.5 count = (uint) _card_counts[card_num]; 138.6 if (count < G1ConcRSHotCardLimit) { 138.7 - _card_counts[card_num] += 1; 138.8 + _card_counts[card_num] = 138.9 + (jubyte)(MIN2((uintx)(_card_counts[card_num] + 1), G1ConcRSHotCardLimit)); 138.10 } 138.11 - assert(_card_counts[card_num] <= G1ConcRSHotCardLimit, 138.12 - err_msg("Refinement count overflow? " 138.13 - "new count: "UINT32_FORMAT, 138.14 - (uint) _card_counts[card_num])); 138.15 } 138.16 } 138.17 return count;
139.1 --- a/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Thu Jul 11 12:59:03 2013 -0400 139.2 +++ b/src/share/vm/gc_implementation/g1/g1SATBCardTableModRefBS.cpp Mon Jul 15 11:07:03 2013 +0100 139.3 @@ -47,7 +47,7 @@ 139.4 JavaThread* jt = (JavaThread*)thr; 139.5 jt->satb_mark_queue().enqueue(pre_val); 139.6 } else { 139.7 - MutexLocker x(Shared_SATB_Q_lock); 139.8 + MutexLockerEx x(Shared_SATB_Q_lock, Mutex::_no_safepoint_check_flag); 139.9 JavaThread::satb_mark_queue_set().shared_satb_queue()->enqueue(pre_val); 139.10 } 139.11 }
140.1 --- a/src/share/vm/gc_implementation/g1/heapRegion.cpp Thu Jul 11 12:59:03 2013 -0400 140.2 +++ b/src/share/vm/gc_implementation/g1/heapRegion.cpp Mon Jul 15 11:07:03 2013 +0100 140.3 @@ -798,7 +798,7 @@ 140.4 if (!g1->is_obj_dead_cond(obj, this, vo)) { 140.5 if (obj->is_oop()) { 140.6 Klass* klass = obj->klass(); 140.7 - if (!klass->is_metadata()) { 140.8 + if (!klass->is_metaspace_object()) { 140.9 gclog_or_tty->print_cr("klass "PTR_FORMAT" of object "PTR_FORMAT" " 140.10 "not metadata", klass, obj); 140.11 *failures = true;
141.1 --- a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp Thu Jul 11 12:59:03 2013 -0400 141.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.cpp Mon Jul 15 11:07:03 2013 +0100 141.3 @@ -70,6 +70,17 @@ 141.4 _virtual_space = vs; 141.5 } 141.6 141.7 +void ASPSOldGen::initialize_work(const char* perf_data_name, int level) { 141.8 + 141.9 + PSOldGen::initialize_work(perf_data_name, level); 141.10 + 141.11 + // The old gen can grow to gen_size_limit(). _reserve reflects only 141.12 + // the current maximum that can be committed. 141.13 + assert(_reserved.byte_size() <= gen_size_limit(), "Consistency check"); 141.14 + 141.15 + initialize_performance_counters(perf_data_name, level); 141.16 +} 141.17 + 141.18 void ASPSOldGen::reset_after_change() { 141.19 _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), 141.20 (HeapWord*)virtual_space()->high_boundary());
142.1 --- a/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp Thu Jul 11 12:59:03 2013 -0400 142.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/asPSOldGen.hpp Mon Jul 15 11:07:03 2013 +0100 142.3 @@ -50,6 +50,8 @@ 142.4 size_t max_gen_size() { return _reserved.byte_size(); } 142.5 void set_gen_size_limit(size_t v) { _gen_size_limit = v; } 142.6 142.7 + virtual void initialize_work(const char* perf_data_name, int level); 142.8 + 142.9 // After a shrink or expand reset the generation 142.10 void reset_after_change(); 142.11
143.1 --- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Thu Jul 11 12:59:03 2013 -0400 143.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Mon Jul 15 11:07:03 2013 +0100 143.3 @@ -1,5 +1,5 @@ 143.4 /* 143.5 - * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved. 143.6 + * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 143.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 143.8 * 143.9 * This code is free software; you can redistribute it and/or modify it 143.10 @@ -565,11 +565,9 @@ 143.11 if(new_start_aligned < new_end_for_commit) { 143.12 MemRegion new_committed = 143.13 MemRegion(new_start_aligned, new_end_for_commit); 143.14 - if (!os::commit_memory((char*)new_committed.start(), 143.15 - new_committed.byte_size())) { 143.16 - vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR, 143.17 - "card table expansion"); 143.18 - } 143.19 + os::commit_memory_or_exit((char*)new_committed.start(), 143.20 + new_committed.byte_size(), !ExecMem, 143.21 + "card table expansion"); 143.22 } 143.23 result = true; 143.24 } else if (new_start_aligned > cur_committed.start()) {
144.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Thu Jul 11 12:59:03 2013 -0400 144.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psAdaptiveSizePolicy.cpp Mon Jul 15 11:07:03 2013 +0100 144.3 @@ -1250,14 +1250,13 @@ 144.4 avg_promoted()->deviation()); 144.5 } 144.6 144.7 - gclog_or_tty->print( " avg_promoted_padded_avg: %f" 144.8 + gclog_or_tty->print_cr( " avg_promoted_padded_avg: %f" 144.9 " avg_pretenured_padded_avg: %f" 144.10 " tenuring_thresh: %d" 144.11 " target_size: " SIZE_FORMAT, 144.12 avg_promoted()->padded_average(), 144.13 _avg_pretenured->padded_average(), 144.14 tenuring_threshold, target_size); 144.15 - tty->cr(); 144.16 } 144.17 144.18 set_survivor_size(target_size); 144.19 @@ -1279,7 +1278,7 @@ 144.20 avg_promoted()->sample(promoted + _avg_pretenured->padded_average()); 144.21 144.22 if (PrintAdaptiveSizePolicy) { 144.23 - gclog_or_tty->print( 144.24 + gclog_or_tty->print_cr( 144.25 "AdaptiveSizePolicy::update_averages:" 144.26 " survived: " SIZE_FORMAT 144.27 " promoted: " SIZE_FORMAT
145.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Thu Jul 11 12:59:03 2013 -0400 145.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psOldGen.hpp Mon Jul 15 11:07:03 2013 +0100 145.3 @@ -110,7 +110,7 @@ 145.4 virtual void initialize(ReservedSpace rs, size_t alignment, 145.5 const char* perf_data_name, int level); 145.6 void initialize_virtual_space(ReservedSpace rs, size_t alignment); 145.7 - void initialize_work(const char* perf_data_name, int level); 145.8 + virtual void initialize_work(const char* perf_data_name, int level); 145.9 virtual void initialize_performance_counters(const char* perf_data_name, int level); 145.10 145.11 MemRegion reserved() const { return _reserved; }
146.1 --- a/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp Thu Jul 11 12:59:03 2013 -0400 146.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/psVirtualspace.cpp Mon Jul 15 11:07:03 2013 +0100 146.3 @@ -1,5 +1,5 @@ 146.4 /* 146.5 - * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 146.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. 146.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 146.8 * 146.9 * This code is free software; you can redistribute it and/or modify it 146.10 @@ -101,7 +101,8 @@ 146.11 } 146.12 146.13 char* const base_addr = committed_high_addr(); 146.14 - bool result = special() || os::commit_memory(base_addr, bytes, alignment()); 146.15 + bool result = special() || 146.16 + os::commit_memory(base_addr, bytes, alignment(), !ExecMem); 146.17 if (result) { 146.18 _committed_high_addr += bytes; 146.19 } 146.20 @@ -154,7 +155,7 @@ 146.21 if (tmp_bytes > 0) { 146.22 char* const commit_base = committed_high_addr(); 146.23 if (other_space->special() || 146.24 - os::commit_memory(commit_base, tmp_bytes, alignment())) { 146.25 + os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem)) { 146.26 // Reduce the reserved region in the other space. 146.27 other_space->set_reserved(other_space->reserved_low_addr() + tmp_bytes, 146.28 other_space->reserved_high_addr(), 146.29 @@ -269,7 +270,8 @@ 146.30 } 146.31 146.32 char* const base_addr = committed_low_addr() - bytes; 146.33 - bool result = special() || os::commit_memory(base_addr, bytes, alignment()); 146.34 + bool result = special() || 146.35 + os::commit_memory(base_addr, bytes, alignment(), !ExecMem); 146.36 if (result) { 146.37 _committed_low_addr -= bytes; 146.38 } 146.39 @@ -322,7 +324,7 @@ 146.40 if (tmp_bytes > 0) { 146.41 char* const commit_base = committed_low_addr() - tmp_bytes; 146.42 if (other_space->special() || 146.43 - os::commit_memory(commit_base, tmp_bytes, alignment())) { 146.44 + os::commit_memory(commit_base, tmp_bytes, alignment(), !ExecMem)) { 146.45 // Reduce the reserved region in the other space. 146.46 other_space->set_reserved(other_space->reserved_low_addr(), 146.47 other_space->reserved_high_addr() - tmp_bytes,
147.1 --- a/src/share/vm/interpreter/abstractInterpreter.hpp Thu Jul 11 12:59:03 2013 -0400 147.2 +++ b/src/share/vm/interpreter/abstractInterpreter.hpp Mon Jul 15 11:07:03 2013 +0100 147.3 @@ -1,5 +1,5 @@ 147.4 /* 147.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 147.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 147.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 147.8 * 147.9 * This code is free software; you can redistribute it and/or modify it 147.10 @@ -102,6 +102,9 @@ 147.11 java_lang_math_pow, // implementation of java.lang.Math.pow (x,y) 147.12 java_lang_math_exp, // implementation of java.lang.Math.exp (x) 147.13 java_lang_ref_reference_get, // implementation of java.lang.ref.Reference.get() 147.14 + java_util_zip_CRC32_update, // implementation of java.util.zip.CRC32.update() 147.15 + java_util_zip_CRC32_updateBytes, // implementation of java.util.zip.CRC32.updateBytes() 147.16 + java_util_zip_CRC32_updateByteBuffer, // implementation of java.util.zip.CRC32.updateByteBuffer() 147.17 number_of_method_entries, 147.18 invalid = -1 147.19 };
148.1 --- a/src/share/vm/interpreter/bytecodeInterpreter.cpp Thu Jul 11 12:59:03 2013 -0400 148.2 +++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp Mon Jul 15 11:07:03 2013 +0100 148.3 @@ -481,9 +481,9 @@ 148.4 // So we have a second version of the assertion which handles the case where EnableInvokeDynamic was 148.5 // switched off because of the wrong classes. 148.6 if (EnableInvokeDynamic || FLAG_IS_CMDLINE(EnableInvokeDynamic)) { 148.7 - assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); 148.8 + assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit"); 148.9 } else { 148.10 - const int extra_stack_entries = Method::extra_stack_entries_for_indy; 148.11 + const int extra_stack_entries = Method::extra_stack_entries_for_jsr292; 148.12 assert(labs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + extra_stack_entries 148.13 + 1), "bad stack limit"); 148.14 } 148.15 @@ -1581,7 +1581,7 @@ 148.16 #define ARRAY_LOADTO32(T, T2, format, stackRes, extra) \ 148.17 { \ 148.18 ARRAY_INTRO(-2); \ 148.19 - extra; \ 148.20 + (void)extra; \ 148.21 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), \ 148.22 -2); \ 148.23 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1); \ 148.24 @@ -1592,8 +1592,8 @@ 148.25 { \ 148.26 ARRAY_INTRO(-2); \ 148.27 SET_ ## stackRes(*(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)), -1); \ 148.28 - extra; \ 148.29 - UPDATE_PC_AND_CONTINUE(1); \ 148.30 + (void)extra; \ 148.31 + UPDATE_PC_AND_CONTINUE(1); \ 148.32 } 148.33 148.34 CASE(_iaload): 148.35 @@ -1617,7 +1617,7 @@ 148.36 #define ARRAY_STOREFROM32(T, T2, format, stackSrc, extra) \ 148.37 { \ 148.38 ARRAY_INTRO(-3); \ 148.39 - extra; \ 148.40 + (void)extra; \ 148.41 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 148.42 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -3); \ 148.43 } 148.44 @@ -1626,7 +1626,7 @@ 148.45 #define ARRAY_STOREFROM64(T, T2, stackSrc, extra) \ 148.46 { \ 148.47 ARRAY_INTRO(-4); \ 148.48 - extra; \ 148.49 + (void)extra; \ 148.50 *(T2 *)(((address) arrObj->base(T)) + index * sizeof(T2)) = stackSrc( -1); \ 148.51 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -4); \ 148.52 } 148.53 @@ -2233,7 +2233,7 @@ 148.54 } 148.55 148.56 Method* method = cache->f1_as_method(); 148.57 - VERIFY_OOP(method); 148.58 + if (VerifyOops) method->verify(); 148.59 148.60 if (cache->has_appendix()) { 148.61 ConstantPool* constants = METHOD->constants(); 148.62 @@ -2265,8 +2265,7 @@ 148.63 } 148.64 148.65 Method* method = cache->f1_as_method(); 148.66 - 148.67 - VERIFY_OOP(method); 148.68 + if (VerifyOops) method->verify(); 148.69 148.70 if (cache->has_appendix()) { 148.71 ConstantPool* constants = METHOD->constants();
149.1 --- a/src/share/vm/interpreter/interpreter.cpp Thu Jul 11 12:59:03 2013 -0400 149.2 +++ b/src/share/vm/interpreter/interpreter.cpp Mon Jul 15 11:07:03 2013 +0100 149.3 @@ -1,5 +1,5 @@ 149.4 /* 149.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 149.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 149.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 149.8 * 149.9 * This code is free software; you can redistribute it and/or modify it 149.10 @@ -195,6 +195,17 @@ 149.11 return kind; 149.12 } 149.13 149.14 +#ifndef CC_INTERP 149.15 + if (UseCRC32Intrinsics && m->is_native()) { 149.16 + // Use optimized stub code for CRC32 native methods. 149.17 + switch (m->intrinsic_id()) { 149.18 + case vmIntrinsics::_updateCRC32 : return java_util_zip_CRC32_update; 149.19 + case vmIntrinsics::_updateBytesCRC32 : return java_util_zip_CRC32_updateBytes; 149.20 + case vmIntrinsics::_updateByteBufferCRC32 : return java_util_zip_CRC32_updateByteBuffer; 149.21 + } 149.22 + } 149.23 +#endif 149.24 + 149.25 // Native method? 149.26 // Note: This test must come _before_ the test for intrinsic 149.27 // methods. See also comments below. 149.28 @@ -297,6 +308,9 @@ 149.29 case java_lang_math_sqrt : tty->print("java_lang_math_sqrt" ); break; 149.30 case java_lang_math_log : tty->print("java_lang_math_log" ); break; 149.31 case java_lang_math_log10 : tty->print("java_lang_math_log10" ); break; 149.32 + case java_util_zip_CRC32_update : tty->print("java_util_zip_CRC32_update"); break; 149.33 + case java_util_zip_CRC32_updateBytes : tty->print("java_util_zip_CRC32_updateBytes"); break; 149.34 + case java_util_zip_CRC32_updateByteBuffer : tty->print("java_util_zip_CRC32_updateByteBuffer"); break; 149.35 default: 149.36 if (kind >= method_handle_invoke_FIRST && 149.37 kind <= method_handle_invoke_LAST) {
150.1 --- a/src/share/vm/interpreter/templateInterpreter.cpp Thu Jul 11 12:59:03 2013 -0400 150.2 +++ b/src/share/vm/interpreter/templateInterpreter.cpp Mon Jul 15 11:07:03 2013 +0100 150.3 @@ -1,5 +1,5 @@ 150.4 /* 150.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 150.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 150.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 150.8 * 150.9 * This code is free software; you can redistribute it and/or modify it 150.10 @@ -373,6 +373,12 @@ 150.11 method_entry(java_lang_math_pow ) 150.12 method_entry(java_lang_ref_reference_get) 150.13 150.14 + if (UseCRC32Intrinsics) { 150.15 + method_entry(java_util_zip_CRC32_update) 150.16 + method_entry(java_util_zip_CRC32_updateBytes) 150.17 + method_entry(java_util_zip_CRC32_updateByteBuffer) 150.18 + } 150.19 + 150.20 initialize_method_handle_entries(); 150.21 150.22 // all native method kinds (must be one contiguous block)
151.1 --- a/src/share/vm/memory/allocation.cpp Thu Jul 11 12:59:03 2013 -0400 151.2 +++ b/src/share/vm/memory/allocation.cpp Mon Jul 15 11:07:03 2013 +0100 151.3 @@ -71,13 +71,6 @@ 151.4 return MetaspaceShared::is_in_shared_space(this); 151.5 } 151.6 151.7 -bool MetaspaceObj::is_metadata() const { 151.8 - // GC Verify checks use this in guarantees. 151.9 - // TODO: either replace them with is_metaspace_object() or remove them. 151.10 - // is_metaspace_object() is slower than this test. This test doesn't 151.11 - // seem very useful for metaspace objects anymore though. 151.12 - return !Universe::heap()->is_in_reserved(this); 151.13 -} 151.14 151.15 bool MetaspaceObj::is_metaspace_object() const { 151.16 return Metaspace::contains((void*)this);
152.1 --- a/src/share/vm/memory/allocation.hpp Thu Jul 11 12:59:03 2013 -0400 152.2 +++ b/src/share/vm/memory/allocation.hpp Mon Jul 15 11:07:03 2013 +0100 152.3 @@ -264,7 +264,6 @@ 152.4 152.5 class MetaspaceObj { 152.6 public: 152.7 - bool is_metadata() const; 152.8 bool is_metaspace_object() const; // more specific test but slower 152.9 bool is_shared() const; 152.10 void print_address_on(outputStream* st) const; // nonvirtual address printing 152.11 @@ -643,8 +642,15 @@ 152.12 #define NEW_RESOURCE_ARRAY_IN_THREAD(thread, type, size)\ 152.13 (type*) resource_allocate_bytes(thread, (size) * sizeof(type)) 152.14 152.15 +#define NEW_RESOURCE_ARRAY_IN_THREAD_RETURN_NULL(thread, type, size)\ 152.16 + (type*) resource_allocate_bytes(thread, (size) * sizeof(type), AllocFailStrategy::RETURN_NULL) 152.17 + 152.18 #define REALLOC_RESOURCE_ARRAY(type, old, old_size, new_size)\ 152.19 - (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type) ) 152.20 + (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type), (new_size) * sizeof(type)) 152.21 + 152.22 +#define REALLOC_RESOURCE_ARRAY_RETURN_NULL(type, old, old_size, new_size)\ 152.23 + (type*) resource_reallocate_bytes((char*)(old), (old_size) * sizeof(type),\ 152.24 + (new_size) * sizeof(type), AllocFailStrategy::RETURN_NULL) 152.25 152.26 #define FREE_RESOURCE_ARRAY(type, old, size)\ 152.27 resource_free_bytes((char*)(old), (size) * sizeof(type)) 152.28 @@ -655,28 +661,40 @@ 152.29 #define NEW_RESOURCE_OBJ(type)\ 152.30 NEW_RESOURCE_ARRAY(type, 1) 152.31 152.32 +#define NEW_RESOURCE_OBJ_RETURN_NULL(type)\ 152.33 + NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1) 152.34 + 152.35 +#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\ 152.36 + (type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail) 152.37 + 152.38 +#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\ 152.39 + (type*) (AllocateHeap((size) * sizeof(type), memflags, pc)) 152.40 + 152.41 #define NEW_C_HEAP_ARRAY(type, size, memflags)\ 152.42 (type*) (AllocateHeap((size) * sizeof(type), memflags)) 152.43 152.44 +#define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\ 152.45 + NEW_C_HEAP_ARRAY3(type, size, memflags, pc, AllocFailStrategy::RETURN_NULL) 152.46 + 152.47 +#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\ 152.48 + NEW_C_HEAP_ARRAY3(type, size, memflags, (address)0, AllocFailStrategy::RETURN_NULL) 152.49 + 152.50 #define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\ 152.51 (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags)) 152.52 152.53 +#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\ 152.54 + (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL)) 152.55 + 152.56 #define FREE_C_HEAP_ARRAY(type, old, memflags) \ 152.57 FreeHeap((char*)(old), memflags) 152.58 152.59 -#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\ 152.60 - (type*) (AllocateHeap((size) * sizeof(type), memflags, pc)) 152.61 - 152.62 -#define REALLOC_C_HEAP_ARRAY2(type, old, size, memflags, pc)\ 152.63 - (type*) (ReallocateHeap((char*)old, (size) * sizeof(type), memflags, pc)) 152.64 - 152.65 -#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail) \ 152.66 - (type*) AllocateHeap(size * sizeof(type), memflags, pc, allocfail) 152.67 - 152.68 // allocate type in heap without calling ctor 152.69 #define NEW_C_HEAP_OBJ(type, memflags)\ 152.70 NEW_C_HEAP_ARRAY(type, 1, memflags) 152.71 152.72 +#define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\ 152.73 + NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags) 152.74 + 152.75 // deallocate obj of type in heap without calling dtor 152.76 #define FREE_C_HEAP_OBJ(objname, memflags)\ 152.77 FreeHeap((char*)objname, memflags); 152.78 @@ -721,13 +739,21 @@ 152.79 // is set so that we always use malloc except for Solaris where we set the 152.80 // limit to get mapped memory. 152.81 template <class E, MEMFLAGS F> 152.82 -class ArrayAllocator : StackObj { 152.83 +class ArrayAllocator VALUE_OBJ_CLASS_SPEC { 152.84 char* _addr; 152.85 bool _use_malloc; 152.86 size_t _size; 152.87 + bool _free_in_destructor; 152.88 public: 152.89 - ArrayAllocator() : _addr(NULL), _use_malloc(false), _size(0) { } 152.90 - ~ArrayAllocator() { free(); } 152.91 + ArrayAllocator(bool free_in_destructor = true) : 152.92 + _addr(NULL), _use_malloc(false), _size(0), _free_in_destructor(free_in_destructor) { } 152.93 + 152.94 + ~ArrayAllocator() { 152.95 + if (_free_in_destructor) { 152.96 + free(); 152.97 + } 152.98 + } 152.99 + 152.100 E* allocate(size_t length); 152.101 void free(); 152.102 };
153.1 --- a/src/share/vm/memory/allocation.inline.hpp Thu Jul 11 12:59:03 2013 -0400 153.2 +++ b/src/share/vm/memory/allocation.inline.hpp Mon Jul 15 11:07:03 2013 +0100 153.3 @@ -146,10 +146,7 @@ 153.4 vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (reserve)"); 153.5 } 153.6 153.7 - bool success = os::commit_memory(_addr, _size, false /* executable */); 153.8 - if (!success) { 153.9 - vm_exit_out_of_memory(_size, OOM_MMAP_ERROR, "Allocator (commit)"); 153.10 - } 153.11 + os::commit_memory_or_exit(_addr, _size, !ExecMem, "Allocator (commit)"); 153.12 153.13 return (E*)_addr; 153.14 }
154.1 --- a/src/share/vm/memory/cardTableModRefBS.cpp Thu Jul 11 12:59:03 2013 -0400 154.2 +++ b/src/share/vm/memory/cardTableModRefBS.cpp Mon Jul 15 11:07:03 2013 +0100 154.3 @@ -110,11 +110,8 @@ 154.4 jbyte* guard_card = &_byte_map[_guard_index]; 154.5 uintptr_t guard_page = align_size_down((uintptr_t)guard_card, _page_size); 154.6 _guard_region = MemRegion((HeapWord*)guard_page, _page_size); 154.7 - if (!os::commit_memory((char*)guard_page, _page_size, _page_size)) { 154.8 - // Do better than this for Merlin 154.9 - vm_exit_out_of_memory(_page_size, OOM_MMAP_ERROR, "card table last card"); 154.10 - } 154.11 - 154.12 + os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, 154.13 + !ExecMem, "card table last card"); 154.14 *guard_card = last_card; 154.15 154.16 _lowest_non_clean = 154.17 @@ -312,12 +309,9 @@ 154.18 MemRegion(cur_committed.end(), new_end_for_commit); 154.19 154.20 assert(!new_committed.is_empty(), "Region should not be empty here"); 154.21 - if (!os::commit_memory((char*)new_committed.start(), 154.22 - new_committed.byte_size(), _page_size)) { 154.23 - // Do better than this for Merlin 154.24 - vm_exit_out_of_memory(new_committed.byte_size(), OOM_MMAP_ERROR, 154.25 - "card table expansion"); 154.26 - } 154.27 + os::commit_memory_or_exit((char*)new_committed.start(), 154.28 + new_committed.byte_size(), _page_size, 154.29 + !ExecMem, "card table expansion"); 154.30 // Use new_end_aligned (as opposed to new_end_for_commit) because 154.31 // the cur_committed region may include the guard region. 154.32 } else if (new_end_aligned < cur_committed.end()) { 154.33 @@ -418,7 +412,7 @@ 154.34 } 154.35 // Touch the last card of the covered region to show that it 154.36 // is committed (or SEGV). 154.37 - debug_only(*byte_for(_covered[ind].last());) 154.38 + debug_only((void) (*byte_for(_covered[ind].last()));) 154.39 debug_only(verify_guard();) 154.40 } 154.41
155.1 --- a/src/share/vm/memory/filemap.cpp Thu Jul 11 12:59:03 2013 -0400 155.2 +++ b/src/share/vm/memory/filemap.cpp Mon Jul 15 11:07:03 2013 +0100 155.3 @@ -549,3 +549,13 @@ 155.4 155.5 return false; 155.6 } 155.7 + 155.8 +void FileMapInfo::print_shared_spaces() { 155.9 + gclog_or_tty->print_cr("Shared Spaces:"); 155.10 + for (int i = 0; i < MetaspaceShared::n_regions; i++) { 155.11 + struct FileMapInfo::FileMapHeader::space_info* si = &_header._space[i]; 155.12 + gclog_or_tty->print(" %s " INTPTR_FORMAT "-" INTPTR_FORMAT, 155.13 + shared_region_name[i], 155.14 + si->_base, si->_base + si->_used); 155.15 + } 155.16 +}
156.1 --- a/src/share/vm/memory/filemap.hpp Thu Jul 11 12:59:03 2013 -0400 156.2 +++ b/src/share/vm/memory/filemap.hpp Mon Jul 15 11:07:03 2013 +0100 156.3 @@ -149,6 +149,7 @@ 156.4 156.5 // Return true if given address is in the mapped shared space. 156.6 bool is_in_shared_space(const void* p) NOT_CDS_RETURN_(false); 156.7 + void print_shared_spaces() NOT_CDS_RETURN; 156.8 }; 156.9 156.10 #endif // SHARE_VM_MEMORY_FILEMAP_HPP
157.1 --- a/src/share/vm/memory/heapInspection.cpp Thu Jul 11 12:59:03 2013 -0400 157.2 +++ b/src/share/vm/memory/heapInspection.cpp Mon Jul 15 11:07:03 2013 +0100 157.3 @@ -157,7 +157,6 @@ 157.4 } 157.5 157.6 uint KlassInfoTable::hash(const Klass* p) { 157.7 - assert(p->is_metadata(), "all klasses are metadata"); 157.8 return (uint)(((uintptr_t)p - (uintptr_t)_ref) >> 2); 157.9 } 157.10
158.1 --- a/src/share/vm/memory/metaspace.cpp Thu Jul 11 12:59:03 2013 -0400 158.2 +++ b/src/share/vm/memory/metaspace.cpp Mon Jul 15 11:07:03 2013 +0100 158.3 @@ -70,7 +70,7 @@ 158.4 SpecializedChunk = 128, 158.5 ClassSmallChunk = 256, 158.6 SmallChunk = 512, 158.7 - ClassMediumChunk = 1 * K, 158.8 + ClassMediumChunk = 4 * K, 158.9 MediumChunk = 8 * K, 158.10 HumongousChunkGranularity = 8 158.11 }; 158.12 @@ -580,7 +580,6 @@ 158.13 // Number of small chunks to allocate to a manager 158.14 // If class space manager, small chunks are unlimited 158.15 static uint const _small_chunk_limit; 158.16 - bool has_small_chunk_limit() { return !vs_list()->is_class(); } 158.17 158.18 // Sum of all space in allocated chunks 158.19 size_t _allocated_blocks_words; 158.20 @@ -1298,13 +1297,18 @@ 158.21 158.22 bool MetaspaceGC::should_expand(VirtualSpaceList* vsl, size_t word_size) { 158.23 158.24 - size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes(); 158.25 // If the user wants a limit, impose one. 158.26 - size_t max_metaspace_size_bytes = MaxMetaspaceSize; 158.27 - size_t metaspace_size_bytes = MetaspaceSize; 158.28 - if (!FLAG_IS_DEFAULT(MaxMetaspaceSize) && 158.29 - MetaspaceAux::reserved_in_bytes() >= MaxMetaspaceSize) { 158.30 - return false; 158.31 + // The reason for someone using this flag is to limit reserved space. So 158.32 + // for non-class virtual space, compare against virtual spaces that are reserved. 158.33 + // For class virtual space, we only compare against the committed space, not 158.34 + // reserved space, because this is a larger space prereserved for compressed 158.35 + // class pointers. 158.36 + if (!FLAG_IS_DEFAULT(MaxMetaspaceSize)) { 158.37 + size_t real_allocated = Metaspace::space_list()->virtual_space_total() + 158.38 + MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType); 158.39 + if (real_allocated >= MaxMetaspaceSize) { 158.40 + return false; 158.41 + } 158.42 } 158.43 158.44 // Class virtual space should always be expanded. Call GC for the other 158.45 @@ -1318,11 +1322,12 @@ 158.46 } 158.47 158.48 158.49 - 158.50 // If the capacity is below the minimum capacity, allow the 158.51 // expansion. Also set the high-water-mark (capacity_until_GC) 158.52 // to that minimum capacity so that a GC will not be induced 158.53 // until that minimum capacity is exceeded. 158.54 + size_t committed_capacity_bytes = MetaspaceAux::allocated_capacity_bytes(); 158.55 + size_t metaspace_size_bytes = MetaspaceSize; 158.56 if (committed_capacity_bytes < metaspace_size_bytes || 158.57 capacity_until_GC() == 0) { 158.58 set_capacity_until_GC(metaspace_size_bytes); 158.59 @@ -1556,19 +1561,7 @@ 158.60 158.61 // ChunkManager methods 158.62 158.63 -// Verification of _free_chunks_total and _free_chunks_count does not 158.64 -// work with the CMS collector because its use of additional locks 158.65 -// complicate the mutex deadlock detection but it can still be useful 158.66 -// for detecting errors in the chunk accounting with other collectors. 158.67 - 158.68 size_t ChunkManager::free_chunks_total() { 158.69 -#ifdef ASSERT 158.70 - if (!UseConcMarkSweepGC && !SpaceManager::expand_lock()->is_locked()) { 158.71 - MutexLockerEx cl(SpaceManager::expand_lock(), 158.72 - Mutex::_no_safepoint_check_flag); 158.73 - slow_locked_verify_free_chunks_total(); 158.74 - } 158.75 -#endif 158.76 return _free_chunks_total; 158.77 } 158.78 158.79 @@ -1866,13 +1859,11 @@ 158.80 Metachunk* chunk = chunks_in_use(index); 158.81 // Count the free space in all the chunk but not the 158.82 // current chunk from which allocations are still being done. 158.83 - if (chunk != NULL) { 158.84 - Metachunk* prev = chunk; 158.85 - while (chunk != NULL && chunk != current_chunk()) { 158.86 + while (chunk != NULL) { 158.87 + if (chunk != current_chunk()) { 158.88 result += chunk->free_word_size(); 158.89 - prev = chunk; 158.90 - chunk = chunk->next(); 158.91 } 158.92 + chunk = chunk->next(); 158.93 } 158.94 return result; 158.95 } 158.96 @@ -1961,8 +1952,7 @@ 158.97 // chunks will be allocated. 158.98 size_t chunk_word_size; 158.99 if (chunks_in_use(MediumIndex) == NULL && 158.100 - (!has_small_chunk_limit() || 158.101 - sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit)) { 158.102 + sum_count_in_chunks_in_use(SmallIndex) < _small_chunk_limit) { 158.103 chunk_word_size = (size_t) small_chunk_size(); 158.104 if (word_size + Metachunk::overhead() > small_chunk_size()) { 158.105 chunk_word_size = medium_chunk_size(); 158.106 @@ -2608,14 +2598,14 @@ 158.107 "->" SIZE_FORMAT 158.108 "(" SIZE_FORMAT ")", 158.109 prev_metadata_used, 158.110 - allocated_capacity_bytes(), 158.111 + allocated_used_bytes(), 158.112 reserved_in_bytes()); 158.113 } else { 158.114 gclog_or_tty->print(" " SIZE_FORMAT "K" 158.115 "->" SIZE_FORMAT "K" 158.116 "(" SIZE_FORMAT "K)", 158.117 prev_metadata_used / K, 158.118 - allocated_capacity_bytes() / K, 158.119 + allocated_used_bytes() / K, 158.120 reserved_in_bytes()/ K); 158.121 } 158.122 158.123 @@ -2671,10 +2661,10 @@ 158.124 // Print total fragmentation for class and data metaspaces separately 158.125 void MetaspaceAux::print_waste(outputStream* out) { 158.126 158.127 - size_t specialized_waste = 0, small_waste = 0, medium_waste = 0, large_waste = 0; 158.128 - size_t specialized_count = 0, small_count = 0, medium_count = 0, large_count = 0; 158.129 - size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0, cls_large_waste = 0; 158.130 - size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_large_count = 0; 158.131 + size_t specialized_waste = 0, small_waste = 0, medium_waste = 0; 158.132 + size_t specialized_count = 0, small_count = 0, medium_count = 0, humongous_count = 0; 158.133 + size_t cls_specialized_waste = 0, cls_small_waste = 0, cls_medium_waste = 0; 158.134 + size_t cls_specialized_count = 0, cls_small_count = 0, cls_medium_count = 0, cls_humongous_count = 0; 158.135 158.136 ClassLoaderDataGraphMetaspaceIterator iter; 158.137 while (iter.repeat()) { 158.138 @@ -2686,8 +2676,7 @@ 158.139 small_count += msp->vsm()->sum_count_in_chunks_in_use(SmallIndex); 158.140 medium_waste += msp->vsm()->sum_waste_in_chunks_in_use(MediumIndex); 158.141 medium_count += msp->vsm()->sum_count_in_chunks_in_use(MediumIndex); 158.142 - large_waste += msp->vsm()->sum_waste_in_chunks_in_use(HumongousIndex); 158.143 - large_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 158.144 + humongous_count += msp->vsm()->sum_count_in_chunks_in_use(HumongousIndex); 158.145 158.146 cls_specialized_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(SpecializedIndex); 158.147 cls_specialized_count += msp->class_vsm()->sum_count_in_chunks_in_use(SpecializedIndex); 158.148 @@ -2695,20 +2684,23 @@ 158.149 cls_small_count += msp->class_vsm()->sum_count_in_chunks_in_use(SmallIndex); 158.150 cls_medium_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(MediumIndex); 158.151 cls_medium_count += msp->class_vsm()->sum_count_in_chunks_in_use(MediumIndex); 158.152 - cls_large_waste += msp->class_vsm()->sum_waste_in_chunks_in_use(HumongousIndex); 158.153 - cls_large_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 158.154 + cls_humongous_count += msp->class_vsm()->sum_count_in_chunks_in_use(HumongousIndex); 158.155 } 158.156 } 158.157 out->print_cr("Total fragmentation waste (words) doesn't count free space"); 158.158 out->print_cr(" data: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 158.159 SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 158.160 - SIZE_FORMAT " medium(s) " SIZE_FORMAT, 158.161 + SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 158.162 + "large count " SIZE_FORMAT, 158.163 specialized_count, specialized_waste, small_count, 158.164 - small_waste, medium_count, medium_waste); 158.165 + small_waste, medium_count, medium_waste, humongous_count); 158.166 out->print_cr(" class: " SIZE_FORMAT " specialized(s) " SIZE_FORMAT ", " 158.167 - SIZE_FORMAT " small(s) " SIZE_FORMAT, 158.168 + SIZE_FORMAT " small(s) " SIZE_FORMAT ", " 158.169 + SIZE_FORMAT " medium(s) " SIZE_FORMAT ", " 158.170 + "large count " SIZE_FORMAT, 158.171 cls_specialized_count, cls_specialized_waste, 158.172 - cls_small_count, cls_small_waste); 158.173 + cls_small_count, cls_small_waste, 158.174 + cls_medium_count, cls_medium_waste, cls_humongous_count); 158.175 } 158.176 158.177 // Dump global metaspace things from the end of ClassLoaderDataGraph 158.178 @@ -3049,18 +3041,24 @@ 158.179 if (Verbose && TraceMetadataChunkAllocation) { 158.180 gclog_or_tty->print_cr("Metaspace allocation failed for size " 158.181 SIZE_FORMAT, word_size); 158.182 - if (loader_data->metaspace_or_null() != NULL) loader_data->metaspace_or_null()->dump(gclog_or_tty); 158.183 + if (loader_data->metaspace_or_null() != NULL) loader_data->dump(gclog_or_tty); 158.184 MetaspaceAux::dump(gclog_or_tty); 158.185 } 158.186 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support 158.187 - report_java_out_of_memory("Metadata space"); 158.188 + const char* space_string = (mdtype == ClassType) ? "Class Metadata space" : 158.189 + "Metadata space"; 158.190 + report_java_out_of_memory(space_string); 158.191 158.192 if (JvmtiExport::should_post_resource_exhausted()) { 158.193 JvmtiExport::post_resource_exhausted( 158.194 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR, 158.195 - "Metadata space"); 158.196 + space_string); 158.197 } 158.198 - THROW_OOP_0(Universe::out_of_memory_error_perm_gen()); 158.199 + if (mdtype == ClassType) { 158.200 + THROW_OOP_0(Universe::out_of_memory_error_class_metaspace()); 158.201 + } else { 158.202 + THROW_OOP_0(Universe::out_of_memory_error_metaspace()); 158.203 + } 158.204 } 158.205 } 158.206 return Metablock::initialize(result, word_size);
159.1 --- a/src/share/vm/memory/metaspaceShared.cpp Thu Jul 11 12:59:03 2013 -0400 159.2 +++ b/src/share/vm/memory/metaspaceShared.cpp Mon Jul 15 11:07:03 2013 +0100 159.3 @@ -826,35 +826,15 @@ 159.4 bool reading() const { return true; } 159.5 }; 159.6 159.7 - 159.8 -// Save bounds of shared spaces mapped in. 159.9 -static char* _ro_base = NULL; 159.10 -static char* _rw_base = NULL; 159.11 -static char* _md_base = NULL; 159.12 -static char* _mc_base = NULL; 159.13 - 159.14 // Return true if given address is in the mapped shared space. 159.15 bool MetaspaceShared::is_in_shared_space(const void* p) { 159.16 - if (_ro_base == NULL || _rw_base == NULL) { 159.17 - return false; 159.18 - } else { 159.19 - return ((p >= _ro_base && p < (_ro_base + SharedReadOnlySize)) || 159.20 - (p >= _rw_base && p < (_rw_base + SharedReadWriteSize))); 159.21 - } 159.22 + return UseSharedSpaces && FileMapInfo::current_info()->is_in_shared_space(p); 159.23 } 159.24 159.25 void MetaspaceShared::print_shared_spaces() { 159.26 - gclog_or_tty->print_cr("Shared Spaces:"); 159.27 - gclog_or_tty->print(" read-only " INTPTR_FORMAT "-" INTPTR_FORMAT, 159.28 - _ro_base, _ro_base + SharedReadOnlySize); 159.29 - gclog_or_tty->print(" read-write " INTPTR_FORMAT "-" INTPTR_FORMAT, 159.30 - _rw_base, _rw_base + SharedReadWriteSize); 159.31 - gclog_or_tty->cr(); 159.32 - gclog_or_tty->print(" misc-data " INTPTR_FORMAT "-" INTPTR_FORMAT, 159.33 - _md_base, _md_base + SharedMiscDataSize); 159.34 - gclog_or_tty->print(" misc-code " INTPTR_FORMAT "-" INTPTR_FORMAT, 159.35 - _mc_base, _mc_base + SharedMiscCodeSize); 159.36 - gclog_or_tty->cr(); 159.37 + if (UseSharedSpaces) { 159.38 + FileMapInfo::current_info()->print_shared_spaces(); 159.39 + } 159.40 } 159.41 159.42 159.43 @@ -874,6 +854,11 @@ 159.44 159.45 assert(!DumpSharedSpaces, "Should not be called with DumpSharedSpaces"); 159.46 159.47 + char* _ro_base = NULL; 159.48 + char* _rw_base = NULL; 159.49 + char* _md_base = NULL; 159.50 + char* _mc_base = NULL; 159.51 + 159.52 // Map each shared region 159.53 if ((_ro_base = mapinfo->map_region(ro)) != NULL && 159.54 (_rw_base = mapinfo->map_region(rw)) != NULL &&
160.1 --- a/src/share/vm/memory/referenceProcessorStats.hpp Thu Jul 11 12:59:03 2013 -0400 160.2 +++ b/src/share/vm/memory/referenceProcessorStats.hpp Mon Jul 15 11:07:03 2013 +0100 160.3 @@ -1,5 +1,5 @@ 160.4 /* 160.5 - * Copyright (c) 2012 Oracle and/or its affiliates. All rights reserved. 160.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. 160.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 160.8 * 160.9 * This code is free software; you can redistribute it and/or modify it
161.1 --- a/src/share/vm/memory/sharedHeap.cpp Thu Jul 11 12:59:03 2013 -0400 161.2 +++ b/src/share/vm/memory/sharedHeap.cpp Mon Jul 15 11:07:03 2013 +0100 161.3 @@ -1,5 +1,5 @@ 161.4 /* 161.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 161.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 161.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 161.8 * 161.9 * This code is free software; you can redistribute it and/or modify it 161.10 @@ -47,7 +47,6 @@ 161.11 SH_PS_SystemDictionary_oops_do, 161.12 SH_PS_ClassLoaderDataGraph_oops_do, 161.13 SH_PS_jvmti_oops_do, 161.14 - SH_PS_StringTable_oops_do, 161.15 SH_PS_CodeCache_oops_do, 161.16 // Leave this one last. 161.17 SH_PS_NumElements 161.18 @@ -127,6 +126,8 @@ 161.19 { 161.20 if (_active) { 161.21 outer->change_strong_roots_parity(); 161.22 + // Zero the claimed high water mark in the StringTable 161.23 + StringTable::clear_parallel_claimed_index(); 161.24 } 161.25 } 161.26 161.27 @@ -154,14 +155,16 @@ 161.28 // Global (strong) JNI handles 161.29 if (!_process_strong_tasks->is_task_claimed(SH_PS_JNIHandles_oops_do)) 161.30 JNIHandles::oops_do(roots); 161.31 + 161.32 // All threads execute this; the individual threads are task groups. 161.33 CLDToOopClosure roots_from_clds(roots); 161.34 CLDToOopClosure* roots_from_clds_p = (is_scavenging ? NULL : &roots_from_clds); 161.35 - if (ParallelGCThreads > 0) { 161.36 - Threads::possibly_parallel_oops_do(roots, roots_from_clds_p ,code_roots); 161.37 + if (CollectedHeap::use_parallel_gc_threads()) { 161.38 + Threads::possibly_parallel_oops_do(roots, roots_from_clds_p, code_roots); 161.39 } else { 161.40 Threads::oops_do(roots, roots_from_clds_p, code_roots); 161.41 } 161.42 + 161.43 if (!_process_strong_tasks-> is_task_claimed(SH_PS_ObjectSynchronizer_oops_do)) 161.44 ObjectSynchronizer::oops_do(roots); 161.45 if (!_process_strong_tasks->is_task_claimed(SH_PS_FlatProfiler_oops_do)) 161.46 @@ -189,8 +192,12 @@ 161.47 } 161.48 } 161.49 161.50 - if (!_process_strong_tasks->is_task_claimed(SH_PS_StringTable_oops_do)) { 161.51 - if (so & SO_Strings) { 161.52 + // All threads execute the following. A specific chunk of buckets 161.53 + // from the StringTable are the individual tasks. 161.54 + if (so & SO_Strings) { 161.55 + if (CollectedHeap::use_parallel_gc_threads()) { 161.56 + StringTable::possibly_parallel_oops_do(roots); 161.57 + } else { 161.58 StringTable::oops_do(roots); 161.59 } 161.60 }
162.1 --- a/src/share/vm/memory/universe.cpp Thu Jul 11 12:59:03 2013 -0400 162.2 +++ b/src/share/vm/memory/universe.cpp Mon Jul 15 11:07:03 2013 +0100 162.3 @@ -111,7 +111,8 @@ 162.4 LatestMethodOopCache* Universe::_pd_implies_cache = NULL; 162.5 ActiveMethodOopsCache* Universe::_reflect_invoke_cache = NULL; 162.6 oop Universe::_out_of_memory_error_java_heap = NULL; 162.7 -oop Universe::_out_of_memory_error_perm_gen = NULL; 162.8 +oop Universe::_out_of_memory_error_metaspace = NULL; 162.9 +oop Universe::_out_of_memory_error_class_metaspace = NULL; 162.10 oop Universe::_out_of_memory_error_array_size = NULL; 162.11 oop Universe::_out_of_memory_error_gc_overhead_limit = NULL; 162.12 objArrayOop Universe::_preallocated_out_of_memory_error_array = NULL; 162.13 @@ -180,7 +181,8 @@ 162.14 f->do_oop((oop*)&_the_null_string); 162.15 f->do_oop((oop*)&_the_min_jint_string); 162.16 f->do_oop((oop*)&_out_of_memory_error_java_heap); 162.17 - f->do_oop((oop*)&_out_of_memory_error_perm_gen); 162.18 + f->do_oop((oop*)&_out_of_memory_error_metaspace); 162.19 + f->do_oop((oop*)&_out_of_memory_error_class_metaspace); 162.20 f->do_oop((oop*)&_out_of_memory_error_array_size); 162.21 f->do_oop((oop*)&_out_of_memory_error_gc_overhead_limit); 162.22 f->do_oop((oop*)&_preallocated_out_of_memory_error_array); 162.23 @@ -531,7 +533,9 @@ 162.24 if (vt) vt->initialize_vtable(false, CHECK); 162.25 if (ko->oop_is_instance()) { 162.26 InstanceKlass* ik = (InstanceKlass*)ko; 162.27 - for (KlassHandle s_h(THREAD, ik->subklass()); s_h() != NULL; s_h = (THREAD, s_h()->next_sibling())) { 162.28 + for (KlassHandle s_h(THREAD, ik->subklass()); 162.29 + s_h() != NULL; 162.30 + s_h = KlassHandle(THREAD, s_h()->next_sibling())) { 162.31 reinitialize_vtable_of(s_h, CHECK); 162.32 } 162.33 } 162.34 @@ -561,7 +565,8 @@ 162.35 // a potential loop which could happen if an out of memory occurs when attempting 162.36 // to allocate the backtrace. 162.37 return ((throwable() != Universe::_out_of_memory_error_java_heap) && 162.38 - (throwable() != Universe::_out_of_memory_error_perm_gen) && 162.39 + (throwable() != Universe::_out_of_memory_error_metaspace) && 162.40 + (throwable() != Universe::_out_of_memory_error_class_metaspace) && 162.41 (throwable() != Universe::_out_of_memory_error_array_size) && 162.42 (throwable() != Universe::_out_of_memory_error_gc_overhead_limit)); 162.43 } 162.44 @@ -1012,7 +1017,8 @@ 162.45 k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_OutOfMemoryError(), true, CHECK_false); 162.46 k_h = instanceKlassHandle(THREAD, k); 162.47 Universe::_out_of_memory_error_java_heap = k_h->allocate_instance(CHECK_false); 162.48 - Universe::_out_of_memory_error_perm_gen = k_h->allocate_instance(CHECK_false); 162.49 + Universe::_out_of_memory_error_metaspace = k_h->allocate_instance(CHECK_false); 162.50 + Universe::_out_of_memory_error_class_metaspace = k_h->allocate_instance(CHECK_false); 162.51 Universe::_out_of_memory_error_array_size = k_h->allocate_instance(CHECK_false); 162.52 Universe::_out_of_memory_error_gc_overhead_limit = 162.53 k_h->allocate_instance(CHECK_false); 162.54 @@ -1045,7 +1051,9 @@ 162.55 java_lang_Throwable::set_message(Universe::_out_of_memory_error_java_heap, msg()); 162.56 162.57 msg = java_lang_String::create_from_str("Metadata space", CHECK_false); 162.58 - java_lang_Throwable::set_message(Universe::_out_of_memory_error_perm_gen, msg()); 162.59 + java_lang_Throwable::set_message(Universe::_out_of_memory_error_metaspace, msg()); 162.60 + msg = java_lang_String::create_from_str("Class Metadata space", CHECK_false); 162.61 + java_lang_Throwable::set_message(Universe::_out_of_memory_error_class_metaspace, msg()); 162.62 162.63 msg = java_lang_String::create_from_str("Requested array size exceeds VM limit", CHECK_false); 162.64 java_lang_Throwable::set_message(Universe::_out_of_memory_error_array_size, msg()); 162.65 @@ -1145,6 +1153,7 @@ 162.66 162.67 // Initialize performance counters for metaspaces 162.68 MetaspaceCounters::initialize_performance_counters(); 162.69 + MemoryService::add_metaspace_memory_pools(); 162.70 162.71 GC_locker::unlock(); // allow gc after bootstrapping 162.72
163.1 --- a/src/share/vm/memory/universe.hpp Thu Jul 11 12:59:03 2013 -0400 163.2 +++ b/src/share/vm/memory/universe.hpp Mon Jul 15 11:07:03 2013 +0100 163.3 @@ -178,10 +178,12 @@ 163.4 static LatestMethodOopCache* _loader_addClass_cache; // method for registering loaded classes in class loader vector 163.5 static LatestMethodOopCache* _pd_implies_cache; // method for checking protection domain attributes 163.6 static ActiveMethodOopsCache* _reflect_invoke_cache; // method for security checks 163.7 - static oop _out_of_memory_error_java_heap; // preallocated error object (no backtrace) 163.8 - static oop _out_of_memory_error_perm_gen; // preallocated error object (no backtrace) 163.9 - static oop _out_of_memory_error_array_size;// preallocated error object (no backtrace) 163.10 - static oop _out_of_memory_error_gc_overhead_limit; // preallocated error object (no backtrace) 163.11 + // preallocated error objects (no backtrace) 163.12 + static oop _out_of_memory_error_java_heap; 163.13 + static oop _out_of_memory_error_metaspace; 163.14 + static oop _out_of_memory_error_class_metaspace; 163.15 + static oop _out_of_memory_error_array_size; 163.16 + static oop _out_of_memory_error_gc_overhead_limit; 163.17 163.18 static Array<int>* _the_empty_int_array; // Canonicalized int array 163.19 static Array<u2>* _the_empty_short_array; // Canonicalized short array 163.20 @@ -352,7 +354,8 @@ 163.21 // may or may not have a backtrace. If error has a backtrace then the stack trace is already 163.22 // filled in. 163.23 static oop out_of_memory_error_java_heap() { return gen_out_of_memory_error(_out_of_memory_error_java_heap); } 163.24 - static oop out_of_memory_error_perm_gen() { return gen_out_of_memory_error(_out_of_memory_error_perm_gen); } 163.25 + static oop out_of_memory_error_metaspace() { return gen_out_of_memory_error(_out_of_memory_error_metaspace); } 163.26 + static oop out_of_memory_error_class_metaspace() { return gen_out_of_memory_error(_out_of_memory_error_class_metaspace); } 163.27 static oop out_of_memory_error_array_size() { return gen_out_of_memory_error(_out_of_memory_error_array_size); } 163.28 static oop out_of_memory_error_gc_overhead_limit() { return gen_out_of_memory_error(_out_of_memory_error_gc_overhead_limit); } 163.29
164.1 --- a/src/share/vm/oops/arrayKlass.cpp Thu Jul 11 12:59:03 2013 -0400 164.2 +++ b/src/share/vm/oops/arrayKlass.cpp Mon Jul 15 11:07:03 2013 +0100 164.3 @@ -221,8 +221,8 @@ 164.4 164.5 // Verification 164.6 164.7 -void ArrayKlass::verify_on(outputStream* st) { 164.8 - Klass::verify_on(st); 164.9 +void ArrayKlass::verify_on(outputStream* st, bool check_dictionary) { 164.10 + Klass::verify_on(st, check_dictionary); 164.11 164.12 if (component_mirror() != NULL) { 164.13 guarantee(component_mirror()->klass() != NULL, "should have a class");
165.1 --- a/src/share/vm/oops/arrayKlass.hpp Thu Jul 11 12:59:03 2013 -0400 165.2 +++ b/src/share/vm/oops/arrayKlass.hpp Mon Jul 15 11:07:03 2013 +0100 165.3 @@ -152,7 +152,7 @@ 165.4 void oop_print_on(oop obj, outputStream* st); 165.5 165.6 // Verification 165.7 - void verify_on(outputStream* st); 165.8 + void verify_on(outputStream* st, bool check_dictionary); 165.9 165.10 void oop_verify_on(oop obj, outputStream* st); 165.11 };
166.1 --- a/src/share/vm/oops/compiledICHolder.cpp Thu Jul 11 12:59:03 2013 -0400 166.2 +++ b/src/share/vm/oops/compiledICHolder.cpp Mon Jul 15 11:07:03 2013 +0100 166.3 @@ -48,8 +48,6 @@ 166.4 // Verification 166.5 166.6 void CompiledICHolder::verify_on(outputStream* st) { 166.7 - guarantee(holder_method()->is_metadata(), "should be in metaspace"); 166.8 guarantee(holder_method()->is_method(), "should be method"); 166.9 - guarantee(holder_klass()->is_metadata(), "should be in metaspace"); 166.10 guarantee(holder_klass()->is_klass(), "should be klass"); 166.11 }
167.1 --- a/src/share/vm/oops/constMethod.cpp Thu Jul 11 12:59:03 2013 -0400 167.2 +++ b/src/share/vm/oops/constMethod.cpp Mon Jul 15 11:07:03 2013 +0100 167.3 @@ -440,7 +440,6 @@ 167.4 167.5 void ConstMethod::verify_on(outputStream* st) { 167.6 guarantee(is_constMethod(), "object must be constMethod"); 167.7 - guarantee(is_metadata(), err_msg("Should be metadata " PTR_FORMAT, this)); 167.8 167.9 // Verification can occur during oop construction before the method or 167.10 // other fields have been initialized.
168.1 --- a/src/share/vm/oops/constantPool.cpp Thu Jul 11 12:59:03 2013 -0400 168.2 +++ b/src/share/vm/oops/constantPool.cpp Mon Jul 15 11:07:03 2013 +0100 168.3 @@ -2095,12 +2095,10 @@ 168.4 CPSlot entry = slot_at(i); 168.5 if (tag.is_klass()) { 168.6 if (entry.is_resolved()) { 168.7 - guarantee(entry.get_klass()->is_metadata(), "should be metadata"); 168.8 guarantee(entry.get_klass()->is_klass(), "should be klass"); 168.9 } 168.10 } else if (tag.is_unresolved_klass()) { 168.11 if (entry.is_resolved()) { 168.12 - guarantee(entry.get_klass()->is_metadata(), "should be metadata"); 168.13 guarantee(entry.get_klass()->is_klass(), "should be klass"); 168.14 } 168.15 } else if (tag.is_symbol()) { 168.16 @@ -2112,13 +2110,11 @@ 168.17 if (cache() != NULL) { 168.18 // Note: cache() can be NULL before a class is completely setup or 168.19 // in temporary constant pools used during constant pool merging 168.20 - guarantee(cache()->is_metadata(), "should be metadata"); 168.21 guarantee(cache()->is_constantPoolCache(), "should be constant pool cache"); 168.22 } 168.23 if (pool_holder() != NULL) { 168.24 // Note: pool_holder() can be NULL in temporary constant pools 168.25 // used during constant pool merging 168.26 - guarantee(pool_holder()->is_metadata(), "should be metadata"); 168.27 guarantee(pool_holder()->is_klass(), "should be klass"); 168.28 } 168.29 }
169.1 --- a/src/share/vm/oops/instanceKlass.cpp Thu Jul 11 12:59:03 2013 -0400 169.2 +++ b/src/share/vm/oops/instanceKlass.cpp Mon Jul 15 11:07:03 2013 +0100 169.3 @@ -3088,27 +3088,26 @@ 169.4 virtual void do_oop(narrowOop* p) { VerifyFieldClosure::do_oop_work(p); } 169.5 }; 169.6 169.7 -void InstanceKlass::verify_on(outputStream* st) { 169.8 - Klass::verify_on(st); 169.9 - Thread *thread = Thread::current(); 169.10 - 169.11 +void InstanceKlass::verify_on(outputStream* st, bool check_dictionary) { 169.12 #ifndef PRODUCT 169.13 - // Avoid redundant verifies 169.14 + // Avoid redundant verifies, this really should be in product. 169.15 if (_verify_count == Universe::verify_count()) return; 169.16 _verify_count = Universe::verify_count(); 169.17 #endif 169.18 - // Verify that klass is present in SystemDictionary 169.19 - if (is_loaded() && !is_anonymous()) { 169.20 + 169.21 + // Verify Klass 169.22 + Klass::verify_on(st, check_dictionary); 169.23 + 169.24 + // Verify that klass is present in SystemDictionary if not already 169.25 + // verifying the SystemDictionary. 169.26 + if (is_loaded() && !is_anonymous() && check_dictionary) { 169.27 Symbol* h_name = name(); 169.28 SystemDictionary::verify_obj_klass_present(h_name, class_loader_data()); 169.29 } 169.30 169.31 - // Verify static fields 169.32 - VerifyFieldClosure blk; 169.33 - 169.34 // Verify vtables 169.35 if (is_linked()) { 169.36 - ResourceMark rm(thread); 169.37 + ResourceMark rm; 169.38 // $$$ This used to be done only for m/s collections. Doing it 169.39 // always seemed a valid generalization. (DLD -- 6/00) 169.40 vtable()->verify(st); 169.41 @@ -3116,7 +3115,6 @@ 169.42 169.43 // Verify first subklass 169.44 if (subklass_oop() != NULL) { 169.45 - guarantee(subklass_oop()->is_metadata(), "should be in metaspace"); 169.46 guarantee(subklass_oop()->is_klass(), "should be klass"); 169.47 } 169.48 169.49 @@ -3128,7 +3126,6 @@ 169.50 fatal(err_msg("subclass points to itself " PTR_FORMAT, sib)); 169.51 } 169.52 169.53 - guarantee(sib->is_metadata(), "should be in metaspace"); 169.54 guarantee(sib->is_klass(), "should be klass"); 169.55 guarantee(sib->super() == super, "siblings should have same superklass"); 169.56 } 169.57 @@ -3164,7 +3161,6 @@ 169.58 if (methods() != NULL) { 169.59 Array<Method*>* methods = this->methods(); 169.60 for (int j = 0; j < methods->length(); j++) { 169.61 - guarantee(methods->at(j)->is_metadata(), "should be in metaspace"); 169.62 guarantee(methods->at(j)->is_method(), "non-method in methods array"); 169.63 } 169.64 for (int j = 0; j < methods->length() - 1; j++) { 169.65 @@ -3202,16 +3198,13 @@ 169.66 169.67 // Verify other fields 169.68 if (array_klasses() != NULL) { 169.69 - guarantee(array_klasses()->is_metadata(), "should be in metaspace"); 169.70 guarantee(array_klasses()->is_klass(), "should be klass"); 169.71 } 169.72 if (constants() != NULL) { 169.73 - guarantee(constants()->is_metadata(), "should be in metaspace"); 169.74 guarantee(constants()->is_constantPool(), "should be constant pool"); 169.75 } 169.76 const Klass* host = host_klass(); 169.77 if (host != NULL) { 169.78 - guarantee(host->is_metadata(), "should be in metaspace"); 169.79 guarantee(host->is_klass(), "should be klass"); 169.80 } 169.81 }
170.1 --- a/src/share/vm/oops/instanceKlass.hpp Thu Jul 11 12:59:03 2013 -0400 170.2 +++ b/src/share/vm/oops/instanceKlass.hpp Mon Jul 15 11:07:03 2013 +0100 170.3 @@ -1050,7 +1050,7 @@ 170.4 const char* internal_name() const; 170.5 170.6 // Verification 170.7 - void verify_on(outputStream* st); 170.8 + void verify_on(outputStream* st, bool check_dictionary); 170.9 170.10 void oop_verify_on(oop obj, outputStream* st); 170.11 };
171.1 --- a/src/share/vm/oops/klass.cpp Thu Jul 11 12:59:03 2013 -0400 171.2 +++ b/src/share/vm/oops/klass.cpp Mon Jul 15 11:07:03 2013 +0100 171.3 @@ -377,7 +377,6 @@ 171.4 } 171.5 171.6 bool Klass::is_loader_alive(BoolObjectClosure* is_alive) { 171.7 - assert(is_metadata(), "p is not meta-data"); 171.8 assert(ClassLoaderDataGraph::contains((address)this), "is in the metaspace"); 171.9 171.10 #ifdef ASSERT 171.11 @@ -648,27 +647,24 @@ 171.12 171.13 // Verification 171.14 171.15 -void Klass::verify_on(outputStream* st) { 171.16 - guarantee(!Universe::heap()->is_in_reserved(this), "Shouldn't be"); 171.17 - guarantee(this->is_metadata(), "should be in metaspace"); 171.18 +void Klass::verify_on(outputStream* st, bool check_dictionary) { 171.19 171.20 + // This can be expensive, but it is worth checking that this klass is actually 171.21 + // in the CLD graph but not in production. 171.22 assert(ClassLoaderDataGraph::contains((address)this), "Should be"); 171.23 171.24 guarantee(this->is_klass(),"should be klass"); 171.25 171.26 if (super() != NULL) { 171.27 - guarantee(super()->is_metadata(), "should be in metaspace"); 171.28 guarantee(super()->is_klass(), "should be klass"); 171.29 } 171.30 if (secondary_super_cache() != NULL) { 171.31 Klass* ko = secondary_super_cache(); 171.32 - guarantee(ko->is_metadata(), "should be in metaspace"); 171.33 guarantee(ko->is_klass(), "should be klass"); 171.34 } 171.35 for ( uint i = 0; i < primary_super_limit(); i++ ) { 171.36 Klass* ko = _primary_supers[i]; 171.37 if (ko != NULL) { 171.38 - guarantee(ko->is_metadata(), "should be in metaspace"); 171.39 guarantee(ko->is_klass(), "should be klass"); 171.40 } 171.41 } 171.42 @@ -680,7 +676,6 @@ 171.43 171.44 void Klass::oop_verify_on(oop obj, outputStream* st) { 171.45 guarantee(obj->is_oop(), "should be oop"); 171.46 - guarantee(obj->klass()->is_metadata(), "should not be in Java heap"); 171.47 guarantee(obj->klass()->is_klass(), "klass field is not a klass"); 171.48 } 171.49
172.1 --- a/src/share/vm/oops/klass.hpp Thu Jul 11 12:59:03 2013 -0400 172.2 +++ b/src/share/vm/oops/klass.hpp Mon Jul 15 11:07:03 2013 +0100 172.3 @@ -703,8 +703,8 @@ 172.4 virtual const char* internal_name() const = 0; 172.5 172.6 // Verification 172.7 - virtual void verify_on(outputStream* st); 172.8 - void verify() { verify_on(tty); } 172.9 + virtual void verify_on(outputStream* st, bool check_dictionary); 172.10 + void verify(bool check_dictionary = true) { verify_on(tty, check_dictionary); } 172.11 172.12 #ifndef PRODUCT 172.13 void verify_vtable_index(int index);
173.1 --- a/src/share/vm/oops/method.cpp Thu Jul 11 12:59:03 2013 -0400 173.2 +++ b/src/share/vm/oops/method.cpp Mon Jul 15 11:07:03 2013 +0100 173.3 @@ -1969,14 +1969,9 @@ 173.4 173.5 void Method::verify_on(outputStream* st) { 173.6 guarantee(is_method(), "object must be method"); 173.7 - guarantee(is_metadata(), "should be metadata"); 173.8 guarantee(constants()->is_constantPool(), "should be constant pool"); 173.9 - guarantee(constants()->is_metadata(), "should be metadata"); 173.10 guarantee(constMethod()->is_constMethod(), "should be ConstMethod*"); 173.11 - guarantee(constMethod()->is_metadata(), "should be metadata"); 173.12 MethodData* md = method_data(); 173.13 guarantee(md == NULL || 173.14 - md->is_metadata(), "should be metadata"); 173.15 - guarantee(md == NULL || 173.16 md->is_methodData(), "should be method data"); 173.17 }
174.1 --- a/src/share/vm/oops/objArrayKlass.cpp Thu Jul 11 12:59:03 2013 -0400 174.2 +++ b/src/share/vm/oops/objArrayKlass.cpp Mon Jul 15 11:07:03 2013 +0100 174.3 @@ -676,11 +676,9 @@ 174.4 174.5 // Verification 174.6 174.7 -void ObjArrayKlass::verify_on(outputStream* st) { 174.8 - ArrayKlass::verify_on(st); 174.9 - guarantee(element_klass()->is_metadata(), "should be in metaspace"); 174.10 +void ObjArrayKlass::verify_on(outputStream* st, bool check_dictionary) { 174.11 + ArrayKlass::verify_on(st, check_dictionary); 174.12 guarantee(element_klass()->is_klass(), "should be klass"); 174.13 - guarantee(bottom_klass()->is_metadata(), "should be in metaspace"); 174.14 guarantee(bottom_klass()->is_klass(), "should be klass"); 174.15 Klass* bk = bottom_klass(); 174.16 guarantee(bk->oop_is_instance() || bk->oop_is_typeArray(), "invalid bottom klass");
175.1 --- a/src/share/vm/oops/objArrayKlass.hpp Thu Jul 11 12:59:03 2013 -0400 175.2 +++ b/src/share/vm/oops/objArrayKlass.hpp Mon Jul 15 11:07:03 2013 +0100 175.3 @@ -151,7 +151,7 @@ 175.4 const char* internal_name() const; 175.5 175.6 // Verification 175.7 - void verify_on(outputStream* st); 175.8 + void verify_on(outputStream* st, bool check_dictionary); 175.9 175.10 void oop_verify_on(oop obj, outputStream* st); 175.11 };
176.1 --- a/src/share/vm/oops/symbol.cpp Thu Jul 11 12:59:03 2013 -0400 176.2 +++ b/src/share/vm/oops/symbol.cpp Mon Jul 15 11:07:03 2013 +0100 176.3 @@ -32,7 +32,9 @@ 176.4 #include "memory/allocation.inline.hpp" 176.5 #include "memory/resourceArea.hpp" 176.6 176.7 -Symbol::Symbol(const u1* name, int length, int refcount) : _refcount(refcount), _length(length) { 176.8 +Symbol::Symbol(const u1* name, int length, int refcount) { 176.9 + _refcount = refcount; 176.10 + _length = length; 176.11 _identity_hash = os::random(); 176.12 for (int i = 0; i < _length; i++) { 176.13 byte_at_put(i, name[i]);
177.1 --- a/src/share/vm/oops/symbol.hpp Thu Jul 11 12:59:03 2013 -0400 177.2 +++ b/src/share/vm/oops/symbol.hpp Mon Jul 15 11:07:03 2013 +0100 177.3 @@ -27,6 +27,7 @@ 177.4 177.5 #include "utilities/utf8.hpp" 177.6 #include "memory/allocation.hpp" 177.7 +#include "runtime/atomic.hpp" 177.8 177.9 // A Symbol is a canonicalized string. 177.10 // All Symbols reside in global SymbolTable and are reference counted. 177.11 @@ -101,14 +102,22 @@ 177.12 // type without virtual functions. 177.13 class ClassLoaderData; 177.14 177.15 -class Symbol : public MetaspaceObj { 177.16 +// We separate the fields in SymbolBase from Symbol::_body so that 177.17 +// Symbol::size(int) can correctly calculate the space needed. 177.18 +class SymbolBase : public MetaspaceObj { 177.19 + public: 177.20 + ATOMIC_SHORT_PAIR( 177.21 + volatile short _refcount, // needs atomic operation 177.22 + unsigned short _length // number of UTF8 characters in the symbol (does not need atomic op) 177.23 + ); 177.24 + int _identity_hash; 177.25 +}; 177.26 + 177.27 +class Symbol : private SymbolBase { 177.28 friend class VMStructs; 177.29 friend class SymbolTable; 177.30 friend class MoveSymbols; 177.31 private: 177.32 - volatile int _refcount; 177.33 - int _identity_hash; 177.34 - unsigned short _length; // number of UTF8 characters in the symbol 177.35 jbyte _body[1]; 177.36 177.37 enum { 177.38 @@ -117,7 +126,7 @@ 177.39 }; 177.40 177.41 static int size(int length) { 177.42 - size_t sz = heap_word_size(sizeof(Symbol) + (length > 0 ? length - 1 : 0)); 177.43 + size_t sz = heap_word_size(sizeof(SymbolBase) + (length > 0 ? length : 0)); 177.44 return align_object_size(sz); 177.45 } 177.46
178.1 --- a/src/share/vm/opto/c2_globals.hpp Thu Jul 11 12:59:03 2013 -0400 178.2 +++ b/src/share/vm/opto/c2_globals.hpp Mon Jul 15 11:07:03 2013 +0100 178.3 @@ -1,5 +1,5 @@ 178.4 /* 178.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 178.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 178.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 178.8 * 178.9 * This code is free software; you can redistribute it and/or modify it 178.10 @@ -406,10 +406,10 @@ 178.11 develop(intx, WarmCallMaxSize, 999999, \ 178.12 "size of the largest inlinable method") \ 178.13 \ 178.14 - product(intx, MaxNodeLimit, 65000, \ 178.15 + product(intx, MaxNodeLimit, 80000, \ 178.16 "Maximum number of nodes") \ 178.17 \ 178.18 - product(intx, NodeLimitFudgeFactor, 1000, \ 178.19 + product(intx, NodeLimitFudgeFactor, 2000, \ 178.20 "Fudge Factor for certain optimizations") \ 178.21 \ 178.22 product(bool, UseJumpTables, true, \
179.1 --- a/src/share/vm/opto/chaitin.cpp Thu Jul 11 12:59:03 2013 -0400 179.2 +++ b/src/share/vm/opto/chaitin.cpp Mon Jul 15 11:07:03 2013 +0100 179.3 @@ -1,5 +1,5 @@ 179.4 /* 179.5 - * Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved. 179.6 + * Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. 179.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 179.8 * 179.9 * This code is free software; you can redistribute it and/or modify it 179.10 @@ -435,6 +435,9 @@ 179.11 // Insert un-coalesced copies. Visit all Phis. Where inputs to a Phi do 179.12 // not match the Phi itself, insert a copy. 179.13 coalesce.insert_copies(_matcher); 179.14 + if (C->failing()) { 179.15 + return; 179.16 + } 179.17 } 179.18 179.19 // After aggressive coalesce, attempt a first cut at coloring.
180.1 --- a/src/share/vm/opto/coalesce.cpp Thu Jul 11 12:59:03 2013 -0400 180.2 +++ b/src/share/vm/opto/coalesce.cpp Mon Jul 15 11:07:03 2013 +0100 180.3 @@ -1,5 +1,5 @@ 180.4 /* 180.5 - * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. 180.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 180.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 180.8 * 180.9 * This code is free software; you can redistribute it and/or modify it 180.10 @@ -240,6 +240,8 @@ 180.11 _unique = C->unique(); 180.12 180.13 for( uint i=0; i<_phc._cfg._num_blocks; i++ ) { 180.14 + C->check_node_count(NodeLimitFudgeFactor, "out of nodes in coalesce"); 180.15 + if (C->failing()) return; 180.16 Block *b = _phc._cfg._blocks[i]; 180.17 uint cnt = b->num_preds(); // Number of inputs to the Phi 180.18
181.1 --- a/src/share/vm/opto/escape.cpp Thu Jul 11 12:59:03 2013 -0400 181.2 +++ b/src/share/vm/opto/escape.cpp Mon Jul 15 11:07:03 2013 +0100 181.3 @@ -933,6 +933,7 @@ 181.4 (call->as_CallLeaf()->_name != NULL && 181.5 (strcmp(call->as_CallLeaf()->_name, "g1_wb_pre") == 0 || 181.6 strcmp(call->as_CallLeaf()->_name, "g1_wb_post") == 0 || 181.7 + strcmp(call->as_CallLeaf()->_name, "updateBytesCRC32") == 0 || 181.8 strcmp(call->as_CallLeaf()->_name, "aescrypt_encryptBlock") == 0 || 181.9 strcmp(call->as_CallLeaf()->_name, "aescrypt_decryptBlock") == 0 || 181.10 strcmp(call->as_CallLeaf()->_name, "cipherBlockChaining_encryptAESCrypt") == 0 ||
182.1 --- a/src/share/vm/opto/graphKit.cpp Thu Jul 11 12:59:03 2013 -0400 182.2 +++ b/src/share/vm/opto/graphKit.cpp Mon Jul 15 11:07:03 2013 +0100 182.3 @@ -3332,9 +3332,14 @@ 182.4 if (ptr == NULL) { // reduce dumb test in callers 182.5 return NULL; 182.6 } 182.7 - ptr = ptr->uncast(); // strip a raw-to-oop cast 182.8 - if (ptr == NULL) return NULL; 182.9 - 182.10 + if (ptr->is_CheckCastPP()) { // strip only one raw-to-oop cast 182.11 + ptr = ptr->in(1); 182.12 + if (ptr == NULL) return NULL; 182.13 + } 182.14 + // Return NULL for allocations with several casts: 182.15 + // j.l.reflect.Array.newInstance(jobject, jint) 182.16 + // Object.clone() 182.17 + // to keep more precise type from last cast. 182.18 if (ptr->is_Proj()) { 182.19 Node* allo = ptr->in(0); 182.20 if (allo != NULL && allo->is_Allocate()) {
183.1 --- a/src/share/vm/opto/library_call.cpp Thu Jul 11 12:59:03 2013 -0400 183.2 +++ b/src/share/vm/opto/library_call.cpp Mon Jul 15 11:07:03 2013 +0100 183.3 @@ -291,6 +291,9 @@ 183.4 Node* inline_cipherBlockChaining_AESCrypt_predicate(bool decrypting); 183.5 Node* get_key_start_from_aescrypt_object(Node* aescrypt_object); 183.6 bool inline_encodeISOArray(); 183.7 + bool inline_updateCRC32(); 183.8 + bool inline_updateBytesCRC32(); 183.9 + bool inline_updateByteBufferCRC32(); 183.10 }; 183.11 183.12 183.13 @@ -488,6 +491,12 @@ 183.14 is_predicted = true; 183.15 break; 183.16 183.17 + case vmIntrinsics::_updateCRC32: 183.18 + case vmIntrinsics::_updateBytesCRC32: 183.19 + case vmIntrinsics::_updateByteBufferCRC32: 183.20 + if (!UseCRC32Intrinsics) return NULL; 183.21 + break; 183.22 + 183.23 default: 183.24 assert(id <= vmIntrinsics::LAST_COMPILER_INLINE, "caller responsibility"); 183.25 assert(id != vmIntrinsics::_Object_init && id != vmIntrinsics::_invoke, "enum out of order?"); 183.26 @@ -807,6 +816,13 @@ 183.27 case vmIntrinsics::_encodeISOArray: 183.28 return inline_encodeISOArray(); 183.29 183.30 + case vmIntrinsics::_updateCRC32: 183.31 + return inline_updateCRC32(); 183.32 + case vmIntrinsics::_updateBytesCRC32: 183.33 + return inline_updateBytesCRC32(); 183.34 + case vmIntrinsics::_updateByteBufferCRC32: 183.35 + return inline_updateByteBufferCRC32(); 183.36 + 183.37 default: 183.38 // If you get here, it may be that someone has added a new intrinsic 183.39 // to the list in vmSymbols.hpp without implementing it here. 183.40 @@ -884,7 +900,7 @@ 183.41 183.42 IfNode* iff = create_and_map_if(control(), test, true_prob, COUNT_UNKNOWN); 183.43 183.44 - Node* if_slow = _gvn.transform( new (C) IfTrueNode(iff) ); 183.45 + Node* if_slow = _gvn.transform(new (C) IfTrueNode(iff)); 183.46 if (if_slow == top()) { 183.47 // The slow branch is never taken. No need to build this guard. 183.48 return NULL; 183.49 @@ -893,7 +909,7 @@ 183.50 if (region != NULL) 183.51 region->add_req(if_slow); 183.52 183.53 - Node* if_fast = _gvn.transform( new (C) IfFalseNode(iff) ); 183.54 + Node* if_fast = _gvn.transform(new (C) IfFalseNode(iff)); 183.55 set_control(if_fast); 183.56 183.57 return if_slow; 183.58 @@ -912,8 +928,8 @@ 183.59 return NULL; // already stopped 183.60 if (_gvn.type(index)->higher_equal(TypeInt::POS)) // [0,maxint] 183.61 return NULL; // index is already adequately typed 183.62 - Node* cmp_lt = _gvn.transform( new (C) CmpINode(index, intcon(0)) ); 183.63 - Node* bol_lt = _gvn.transform( new (C) BoolNode(cmp_lt, BoolTest::lt) ); 183.64 + Node* cmp_lt = _gvn.transform(new (C) CmpINode(index, intcon(0))); 183.65 + Node* bol_lt = _gvn.transform(new (C) BoolNode(cmp_lt, BoolTest::lt)); 183.66 Node* is_neg = generate_guard(bol_lt, region, PROB_MIN); 183.67 if (is_neg != NULL && pos_index != NULL) { 183.68 // Emulate effect of Parse::adjust_map_after_if. 183.69 @@ -930,9 +946,9 @@ 183.70 return NULL; // already stopped 183.71 if (_gvn.type(index)->higher_equal(TypeInt::POS1)) // [1,maxint] 183.72 return NULL; // index is already adequately typed 183.73 - Node* cmp_le = _gvn.transform( new (C) CmpINode(index, intcon(0)) ); 183.74 + Node* cmp_le = _gvn.transform(new (C) CmpINode(index, intcon(0))); 183.75 BoolTest::mask le_or_eq = (never_negative ? BoolTest::eq : BoolTest::le); 183.76 - Node* bol_le = _gvn.transform( new (C) BoolNode(cmp_le, le_or_eq) ); 183.77 + Node* bol_le = _gvn.transform(new (C) BoolNode(cmp_le, le_or_eq)); 183.78 Node* is_notp = generate_guard(bol_le, NULL, PROB_MIN); 183.79 if (is_notp != NULL && pos_index != NULL) { 183.80 // Emulate effect of Parse::adjust_map_after_if. 183.81 @@ -968,9 +984,9 @@ 183.82 return NULL; // common case of whole-array copy 183.83 Node* last = subseq_length; 183.84 if (!zero_offset) // last += offset 183.85 - last = _gvn.transform( new (C) AddINode(last, offset)); 183.86 - Node* cmp_lt = _gvn.transform( new (C) CmpUNode(array_length, last) ); 183.87 - Node* bol_lt = _gvn.transform( new (C) BoolNode(cmp_lt, BoolTest::lt) ); 183.88 + last = _gvn.transform(new (C) AddINode(last, offset)); 183.89 + Node* cmp_lt = _gvn.transform(new (C) CmpUNode(array_length, last)); 183.90 + Node* bol_lt = _gvn.transform(new (C) BoolNode(cmp_lt, BoolTest::lt)); 183.91 Node* is_over = generate_guard(bol_lt, region, PROB_MIN); 183.92 return is_over; 183.93 } 183.94 @@ -1151,8 +1167,8 @@ 183.95 Node* argument_cnt = load_String_length(no_ctrl, argument); 183.96 183.97 // Check for receiver count != argument count 183.98 - Node* cmp = _gvn.transform( new(C) CmpINode(receiver_cnt, argument_cnt) ); 183.99 - Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::ne) ); 183.100 + Node* cmp = _gvn.transform(new(C) CmpINode(receiver_cnt, argument_cnt)); 183.101 + Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::ne)); 183.102 Node* if_ne = generate_slow_guard(bol, NULL); 183.103 if (if_ne != NULL) { 183.104 phi->init_req(4, intcon(0)); 183.105 @@ -1258,7 +1274,7 @@ 183.106 Node* sourceOffset = load_String_offset(no_ctrl, string_object); 183.107 Node* sourceCount = load_String_length(no_ctrl, string_object); 183.108 183.109 - Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true)) ); 183.110 + Node* target = _gvn.transform( makecon(TypeOopPtr::make_from_constant(target_array, true))); 183.111 jint target_length = target_array->length(); 183.112 const TypeAry* target_array_type = TypeAry::make(TypeInt::CHAR, TypeInt::make(0, target_length, Type::WidenMin)); 183.113 const TypeAryPtr* target_type = TypeAryPtr::make(TypePtr::BotPTR, target_array_type, target_array->klass(), true, Type::OffsetBot); 183.114 @@ -1365,8 +1381,8 @@ 183.115 Node* substr_cnt = load_String_length(no_ctrl, arg); 183.116 183.117 // Check for substr count > string count 183.118 - Node* cmp = _gvn.transform( new(C) CmpINode(substr_cnt, source_cnt) ); 183.119 - Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::gt) ); 183.120 + Node* cmp = _gvn.transform(new(C) CmpINode(substr_cnt, source_cnt)); 183.121 + Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::gt)); 183.122 Node* if_gt = generate_slow_guard(bol, NULL); 183.123 if (if_gt != NULL) { 183.124 result_phi->init_req(2, intcon(-1)); 183.125 @@ -1375,8 +1391,8 @@ 183.126 183.127 if (!stopped()) { 183.128 // Check for substr count == 0 183.129 - cmp = _gvn.transform( new(C) CmpINode(substr_cnt, intcon(0)) ); 183.130 - bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) ); 183.131 + cmp = _gvn.transform(new(C) CmpINode(substr_cnt, intcon(0))); 183.132 + bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::eq)); 183.133 Node* if_zero = generate_slow_guard(bol, NULL); 183.134 if (if_zero != NULL) { 183.135 result_phi->init_req(3, intcon(0)); 183.136 @@ -1552,7 +1568,7 @@ 183.137 // Check PI/4 : abs(arg) 183.138 Node *cmp = _gvn.transform(new (C) CmpDNode(pi4,abs)); 183.139 // Check: If PI/4 < abs(arg) then go slow 183.140 - Node *bol = _gvn.transform( new (C) BoolNode( cmp, BoolTest::lt ) ); 183.141 + Node *bol = _gvn.transform(new (C) BoolNode( cmp, BoolTest::lt )); 183.142 // Branch either way 183.143 IfNode *iff = create_and_xform_if(control(),bol, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); 183.144 set_control(opt_iff(r,iff)); 183.145 @@ -1617,8 +1633,8 @@ 183.146 // to the runtime to properly handle corner cases 183.147 183.148 IfNode* iff = create_and_xform_if(control(), bolisnum, PROB_STATIC_FREQUENT, COUNT_UNKNOWN); 183.149 - Node* if_slow = _gvn.transform( new (C) IfFalseNode(iff) ); 183.150 - Node* if_fast = _gvn.transform( new (C) IfTrueNode(iff) ); 183.151 + Node* if_slow = _gvn.transform(new (C) IfFalseNode(iff)); 183.152 + Node* if_fast = _gvn.transform(new (C) IfTrueNode(iff)); 183.153 183.154 if (!if_slow->is_top()) { 183.155 RegionNode* result_region = new (C) RegionNode(3); 183.156 @@ -1704,42 +1720,42 @@ 183.157 // Check x:0 183.158 Node *cmp = _gvn.transform(new (C) CmpDNode(x, zeronode)); 183.159 // Check: If (x<=0) then go complex path 183.160 - Node *bol1 = _gvn.transform( new (C) BoolNode( cmp, BoolTest::le ) ); 183.161 + Node *bol1 = _gvn.transform(new (C) BoolNode( cmp, BoolTest::le )); 183.162 // Branch either way 183.163 IfNode *if1 = create_and_xform_if(control(),bol1, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN); 183.164 // Fast path taken; set region slot 3 183.165 - Node *fast_taken = _gvn.transform( new (C) IfFalseNode(if1) ); 183.166 + Node *fast_taken = _gvn.transform(new (C) IfFalseNode(if1)); 183.167 r->init_req(3,fast_taken); // Capture fast-control 183.168 183.169 // Fast path not-taken, i.e. slow path 183.170 - Node *complex_path = _gvn.transform( new (C) IfTrueNode(if1) ); 183.171 + Node *complex_path = _gvn.transform(new (C) IfTrueNode(if1)); 183.172 183.173 // Set fast path result 183.174 - Node *fast_result = _gvn.transform( new (C) PowDNode(C, control(), x, y) ); 183.175 + Node *fast_result = _gvn.transform(new (C) PowDNode(C, control(), x, y)); 183.176 phi->init_req(3, fast_result); 183.177 183.178 // Complex path 183.179 // Build the second if node (if y is long) 183.180 // Node for (long)y 183.181 - Node *longy = _gvn.transform( new (C) ConvD2LNode(y)); 183.182 + Node *longy = _gvn.transform(new (C) ConvD2LNode(y)); 183.183 // Node for (double)((long) y) 183.184 - Node *doublelongy= _gvn.transform( new (C) ConvL2DNode(longy)); 183.185 + Node *doublelongy= _gvn.transform(new (C) ConvL2DNode(longy)); 183.186 // Check (double)((long) y) : y 183.187 Node *cmplongy= _gvn.transform(new (C) CmpDNode(doublelongy, y)); 183.188 // Check if (y isn't long) then go to slow path 183.189 183.190 - Node *bol2 = _gvn.transform( new (C) BoolNode( cmplongy, BoolTest::ne ) ); 183.191 + Node *bol2 = _gvn.transform(new (C) BoolNode( cmplongy, BoolTest::ne )); 183.192 // Branch either way 183.193 IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN); 183.194 - Node* ylong_path = _gvn.transform( new (C) IfFalseNode(if2)); 183.195 - 183.196 - Node *slow_path = _gvn.transform( new (C) IfTrueNode(if2) ); 183.197 + Node* ylong_path = _gvn.transform(new (C) IfFalseNode(if2)); 183.198 + 183.199 + Node *slow_path = _gvn.transform(new (C) IfTrueNode(if2)); 183.200 183.201 // Calculate DPow(abs(x), y)*(1 & (long)y) 183.202 // Node for constant 1 183.203 Node *conone = longcon(1); 183.204 // 1& (long)y 183.205 - Node *signnode= _gvn.transform( new (C) AndLNode(conone, longy) ); 183.206 + Node *signnode= _gvn.transform(new (C) AndLNode(conone, longy)); 183.207 183.208 // A huge number is always even. Detect a huge number by checking 183.209 // if y + 1 == y and set integer to be tested for parity to 0. 183.210 @@ -1747,9 +1763,9 @@ 183.211 // (long)9.223372036854776E18 = max_jlong 183.212 // (double)(long)9.223372036854776E18 = 9.223372036854776E18 183.213 // max_jlong is odd but 9.223372036854776E18 is even 183.214 - Node* yplus1 = _gvn.transform( new (C) AddDNode(y, makecon(TypeD::make(1)))); 183.215 + Node* yplus1 = _gvn.transform(new (C) AddDNode(y, makecon(TypeD::make(1)))); 183.216 Node *cmpyplus1= _gvn.transform(new (C) CmpDNode(yplus1, y)); 183.217 - Node *bolyplus1 = _gvn.transform( new (C) BoolNode( cmpyplus1, BoolTest::eq ) ); 183.218 + Node *bolyplus1 = _gvn.transform(new (C) BoolNode( cmpyplus1, BoolTest::eq )); 183.219 Node* correctedsign = NULL; 183.220 if (ConditionalMoveLimit != 0) { 183.221 correctedsign = _gvn.transform( CMoveNode::make(C, NULL, bolyplus1, signnode, longcon(0), TypeLong::LONG)); 183.222 @@ -1757,8 +1773,8 @@ 183.223 IfNode *ifyplus1 = create_and_xform_if(ylong_path,bolyplus1, PROB_FAIR, COUNT_UNKNOWN); 183.224 RegionNode *r = new (C) RegionNode(3); 183.225 Node *phi = new (C) PhiNode(r, TypeLong::LONG); 183.226 - r->init_req(1, _gvn.transform( new (C) IfFalseNode(ifyplus1))); 183.227 - r->init_req(2, _gvn.transform( new (C) IfTrueNode(ifyplus1))); 183.228 + r->init_req(1, _gvn.transform(new (C) IfFalseNode(ifyplus1))); 183.229 + r->init_req(2, _gvn.transform(new (C) IfTrueNode(ifyplus1))); 183.230 phi->init_req(1, signnode); 183.231 phi->init_req(2, longcon(0)); 183.232 correctedsign = _gvn.transform(phi); 183.233 @@ -1771,11 +1787,11 @@ 183.234 // Check (1&(long)y)==0? 183.235 Node *cmpeq1 = _gvn.transform(new (C) CmpLNode(correctedsign, conzero)); 183.236 // Check if (1&(long)y)!=0?, if so the result is negative 183.237 - Node *bol3 = _gvn.transform( new (C) BoolNode( cmpeq1, BoolTest::ne ) ); 183.238 + Node *bol3 = _gvn.transform(new (C) BoolNode( cmpeq1, BoolTest::ne )); 183.239 // abs(x) 183.240 - Node *absx=_gvn.transform( new (C) AbsDNode(x)); 183.241 + Node *absx=_gvn.transform(new (C) AbsDNode(x)); 183.242 // abs(x)^y 183.243 - Node *absxpowy = _gvn.transform( new (C) PowDNode(C, control(), absx, y) ); 183.244 + Node *absxpowy = _gvn.transform(new (C) PowDNode(C, control(), absx, y)); 183.245 // -abs(x)^y 183.246 Node *negabsxpowy = _gvn.transform(new (C) NegDNode (absxpowy)); 183.247 // (1&(long)y)==1?-DPow(abs(x), y):DPow(abs(x), y) 183.248 @@ -1786,8 +1802,8 @@ 183.249 IfNode *ifyeven = create_and_xform_if(ylong_path,bol3, PROB_FAIR, COUNT_UNKNOWN); 183.250 RegionNode *r = new (C) RegionNode(3); 183.251 Node *phi = new (C) PhiNode(r, Type::DOUBLE); 183.252 - r->init_req(1, _gvn.transform( new (C) IfFalseNode(ifyeven))); 183.253 - r->init_req(2, _gvn.transform( new (C) IfTrueNode(ifyeven))); 183.254 + r->init_req(1, _gvn.transform(new (C) IfFalseNode(ifyeven))); 183.255 + r->init_req(2, _gvn.transform(new (C) IfTrueNode(ifyeven))); 183.256 phi->init_req(1, absxpowy); 183.257 phi->init_req(2, negabsxpowy); 183.258 signresult = _gvn.transform(phi); 183.259 @@ -1920,7 +1936,7 @@ 183.260 int cmp_op = Op_CmpI; 183.261 Node* xkey = xvalue; 183.262 Node* ykey = yvalue; 183.263 - Node* ideal_cmpxy = _gvn.transform( new(C) CmpINode(xkey, ykey) ); 183.264 + Node* ideal_cmpxy = _gvn.transform(new(C) CmpINode(xkey, ykey)); 183.265 if (ideal_cmpxy->is_Cmp()) { 183.266 // E.g., if we have CmpI(length - offset, count), 183.267 // it might idealize to CmpI(length, count + offset) 183.268 @@ -2013,7 +2029,7 @@ 183.269 default: 183.270 if (cmpxy == NULL) 183.271 cmpxy = ideal_cmpxy; 183.272 - best_bol = _gvn.transform( new(C) BoolNode(cmpxy, BoolTest::lt) ); 183.273 + best_bol = _gvn.transform(new(C) BoolNode(cmpxy, BoolTest::lt)); 183.274 // and fall through: 183.275 case BoolTest::lt: // x < y 183.276 case BoolTest::le: // x <= y 183.277 @@ -2073,7 +2089,7 @@ 183.278 return Type::AnyPtr; 183.279 } else if (base_type == TypePtr::NULL_PTR) { 183.280 // Since this is a NULL+long form, we have to switch to a rawptr. 183.281 - base = _gvn.transform( new (C) CastX2PNode(offset) ); 183.282 + base = _gvn.transform(new (C) CastX2PNode(offset)); 183.283 offset = MakeConX(0); 183.284 return Type::RawPtr; 183.285 } else if (base_type->base() == Type::RawPtr) { 183.286 @@ -2467,7 +2483,7 @@ 183.287 case T_ADDRESS: 183.288 // Repackage the long as a pointer. 183.289 val = ConvL2X(val); 183.290 - val = _gvn.transform( new (C) CastX2PNode(val) ); 183.291 + val = _gvn.transform(new (C) CastX2PNode(val)); 183.292 break; 183.293 } 183.294 183.295 @@ -2775,7 +2791,7 @@ 183.296 // SCMemProjNodes represent the memory state of a LoadStore. Their 183.297 // main role is to prevent LoadStore nodes from being optimized away 183.298 // when their results aren't used. 183.299 - Node* proj = _gvn.transform( new (C) SCMemProjNode(load_store)); 183.300 + Node* proj = _gvn.transform(new (C) SCMemProjNode(load_store)); 183.301 set_memory(proj, alias_idx); 183.302 183.303 // Add the trailing membar surrounding the access 183.304 @@ -3010,8 +3026,8 @@ 183.305 Node* rec_thr = argument(0); 183.306 Node* tls_ptr = NULL; 183.307 Node* cur_thr = generate_current_thread(tls_ptr); 183.308 - Node* cmp_thr = _gvn.transform( new (C) CmpPNode(cur_thr, rec_thr) ); 183.309 - Node* bol_thr = _gvn.transform( new (C) BoolNode(cmp_thr, BoolTest::ne) ); 183.310 + Node* cmp_thr = _gvn.transform(new (C) CmpPNode(cur_thr, rec_thr)); 183.311 + Node* bol_thr = _gvn.transform(new (C) BoolNode(cmp_thr, BoolTest::ne)); 183.312 183.313 generate_slow_guard(bol_thr, slow_region); 183.314 183.315 @@ -3022,36 +3038,36 @@ 183.316 183.317 // Set the control input on the field _interrupted read to prevent it floating up. 183.318 Node* int_bit = make_load(control(), p, TypeInt::BOOL, T_INT); 183.319 - Node* cmp_bit = _gvn.transform( new (C) CmpINode(int_bit, intcon(0)) ); 183.320 - Node* bol_bit = _gvn.transform( new (C) BoolNode(cmp_bit, BoolTest::ne) ); 183.321 + Node* cmp_bit = _gvn.transform(new (C) CmpINode(int_bit, intcon(0))); 183.322 + Node* bol_bit = _gvn.transform(new (C) BoolNode(cmp_bit, BoolTest::ne)); 183.323 183.324 IfNode* iff_bit = create_and_map_if(control(), bol_bit, PROB_UNLIKELY_MAG(3), COUNT_UNKNOWN); 183.325 183.326 // First fast path: if (!TLS._interrupted) return false; 183.327 - Node* false_bit = _gvn.transform( new (C) IfFalseNode(iff_bit) ); 183.328 + Node* false_bit = _gvn.transform(new (C) IfFalseNode(iff_bit)); 183.329 result_rgn->init_req(no_int_result_path, false_bit); 183.330 result_val->init_req(no_int_result_path, intcon(0)); 183.331 183.332 // drop through to next case 183.333 - set_control( _gvn.transform(new (C) IfTrueNode(iff_bit)) ); 183.334 + set_control( _gvn.transform(new (C) IfTrueNode(iff_bit))); 183.335 183.336 // (c) Or, if interrupt bit is set and clear_int is false, use 2nd fast path. 183.337 Node* clr_arg = argument(1); 183.338 - Node* cmp_arg = _gvn.transform( new (C) CmpINode(clr_arg, intcon(0)) ); 183.339 - Node* bol_arg = _gvn.transform( new (C) BoolNode(cmp_arg, BoolTest::ne) ); 183.340 + Node* cmp_arg = _gvn.transform(new (C) CmpINode(clr_arg, intcon(0))); 183.341 + Node* bol_arg = _gvn.transform(new (C) BoolNode(cmp_arg, BoolTest::ne)); 183.342 IfNode* iff_arg = create_and_map_if(control(), bol_arg, PROB_FAIR, COUNT_UNKNOWN); 183.343 183.344 // Second fast path: ... else if (!clear_int) return true; 183.345 - Node* false_arg = _gvn.transform( new (C) IfFalseNode(iff_arg) ); 183.346 + Node* false_arg = _gvn.transform(new (C) IfFalseNode(iff_arg)); 183.347 result_rgn->init_req(no_clear_result_path, false_arg); 183.348 result_val->init_req(no_clear_result_path, intcon(1)); 183.349 183.350 // drop through to next case 183.351 - set_control( _gvn.transform(new (C) IfTrueNode(iff_arg)) ); 183.352 + set_control( _gvn.transform(new (C) IfTrueNode(iff_arg))); 183.353 183.354 // (d) Otherwise, go to the slow path. 183.355 slow_region->add_req(control()); 183.356 - set_control( _gvn.transform(slow_region) ); 183.357 + set_control( _gvn.transform(slow_region)); 183.358 183.359 if (stopped()) { 183.360 // There is no slow path. 183.361 @@ -3107,7 +3123,7 @@ 183.362 if (region == NULL) never_see_null = true; 183.363 Node* p = basic_plus_adr(mirror, offset); 183.364 const TypeKlassPtr* kls_type = TypeKlassPtr::OBJECT_OR_NULL; 183.365 - Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type) ); 183.366 + Node* kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, kls_type)); 183.367 Node* null_ctl = top(); 183.368 kls = null_check_oop(kls, &null_ctl, never_see_null); 183.369 if (region != NULL) { 183.370 @@ -3129,9 +3145,9 @@ 183.371 Node* mods = make_load(NULL, modp, TypeInt::INT, T_INT); 183.372 Node* mask = intcon(modifier_mask); 183.373 Node* bits = intcon(modifier_bits); 183.374 - Node* mbit = _gvn.transform( new (C) AndINode(mods, mask) ); 183.375 - Node* cmp = _gvn.transform( new (C) CmpINode(mbit, bits) ); 183.376 - Node* bol = _gvn.transform( new (C) BoolNode(cmp, BoolTest::ne) ); 183.377 + Node* mbit = _gvn.transform(new (C) AndINode(mods, mask)); 183.378 + Node* cmp = _gvn.transform(new (C) CmpINode(mbit, bits)); 183.379 + Node* bol = _gvn.transform(new (C) BoolNode(cmp, BoolTest::ne)); 183.380 return generate_fair_guard(bol, region); 183.381 } 183.382 Node* LibraryCallKit::generate_interface_guard(Node* kls, RegionNode* region) { 183.383 @@ -3282,7 +3298,7 @@ 183.384 phi->add_req(makecon(TypeInstPtr::make(env()->Object_klass()->java_mirror()))); 183.385 // If we fall through, it's a plain class. Get its _super. 183.386 p = basic_plus_adr(kls, in_bytes(Klass::super_offset())); 183.387 - kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL) ); 183.388 + kls = _gvn.transform( LoadKlassNode::make(_gvn, immutable_memory(), p, TypeRawPtr::BOTTOM, TypeKlassPtr::OBJECT_OR_NULL)); 183.389 null_ctl = top(); 183.390 kls = null_check_oop(kls, &null_ctl); 183.391 if (null_ctl != top()) { 183.392 @@ -3395,8 +3411,8 @@ 183.393 set_control(region->in(_prim_0_path)); // go back to first null check 183.394 if (!stopped()) { 183.395 // Since superc is primitive, make a guard for the superc==subc case. 183.396 - Node* cmp_eq = _gvn.transform( new (C) CmpPNode(args[0], args[1]) ); 183.397 - Node* bol_eq = _gvn.transform( new (C) BoolNode(cmp_eq, BoolTest::eq) ); 183.398 + Node* cmp_eq = _gvn.transform(new (C) CmpPNode(args[0], args[1])); 183.399 + Node* bol_eq = _gvn.transform(new (C) BoolNode(cmp_eq, BoolTest::eq)); 183.400 generate_guard(bol_eq, region, PROB_FAIR); 183.401 if (region->req() == PATH_LIMIT+1) { 183.402 // A guard was added. If the added guard is taken, superc==subc. 183.403 @@ -3461,11 +3477,11 @@ 183.404 ? ((jint)Klass::_lh_array_tag_type_value 183.405 << Klass::_lh_array_tag_shift) 183.406 : Klass::_lh_neutral_value); 183.407 - Node* cmp = _gvn.transform( new(C) CmpINode(layout_val, intcon(nval)) ); 183.408 + Node* cmp = _gvn.transform(new(C) CmpINode(layout_val, intcon(nval))); 183.409 BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array 183.410 // invert the test if we are looking for a non-array 183.411 if (not_array) btest = BoolTest(btest).negate(); 183.412 - Node* bol = _gvn.transform( new(C) BoolNode(cmp, btest) ); 183.413 + Node* bol = _gvn.transform(new(C) BoolNode(cmp, btest)); 183.414 return generate_fair_guard(bol, region); 183.415 } 183.416 183.417 @@ -3525,7 +3541,7 @@ 183.418 183.419 // Return the combined state. 183.420 set_i_o( _gvn.transform(result_io) ); 183.421 - set_all_memory( _gvn.transform(result_mem) ); 183.422 + set_all_memory( _gvn.transform(result_mem)); 183.423 183.424 C->set_has_split_ifs(true); // Has chance for split-if optimization 183.425 set_result(result_reg, result_val); 183.426 @@ -3678,8 +3694,8 @@ 183.427 const TypePtr* native_call_addr = TypeMetadataPtr::make(method); 183.428 183.429 Node* native_call = makecon(native_call_addr); 183.430 - Node* chk_native = _gvn.transform( new(C) CmpPNode(target_call, native_call) ); 183.431 - Node* test_native = _gvn.transform( new(C) BoolNode(chk_native, BoolTest::ne) ); 183.432 + Node* chk_native = _gvn.transform(new(C) CmpPNode(target_call, native_call)); 183.433 + Node* test_native = _gvn.transform(new(C) BoolNode(chk_native, BoolTest::ne)); 183.434 183.435 return generate_slow_guard(test_native, slow_region); 183.436 } 183.437 @@ -3800,10 +3816,10 @@ 183.438 183.439 // Test the header to see if it is unlocked. 183.440 Node *lock_mask = _gvn.MakeConX(markOopDesc::biased_lock_mask_in_place); 183.441 - Node *lmasked_header = _gvn.transform( new (C) AndXNode(header, lock_mask) ); 183.442 + Node *lmasked_header = _gvn.transform(new (C) AndXNode(header, lock_mask)); 183.443 Node *unlocked_val = _gvn.MakeConX(markOopDesc::unlocked_value); 183.444 - Node *chk_unlocked = _gvn.transform( new (C) CmpXNode( lmasked_header, unlocked_val)); 183.445 - Node *test_unlocked = _gvn.transform( new (C) BoolNode( chk_unlocked, BoolTest::ne) ); 183.446 + Node *chk_unlocked = _gvn.transform(new (C) CmpXNode( lmasked_header, unlocked_val)); 183.447 + Node *test_unlocked = _gvn.transform(new (C) BoolNode( chk_unlocked, BoolTest::ne)); 183.448 183.449 generate_slow_guard(test_unlocked, slow_region); 183.450 183.451 @@ -3813,17 +3829,17 @@ 183.452 // vm: see markOop.hpp. 183.453 Node *hash_mask = _gvn.intcon(markOopDesc::hash_mask); 183.454 Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift); 183.455 - Node *hshifted_header= _gvn.transform( new (C) URShiftXNode(header, hash_shift) ); 183.456 + Node *hshifted_header= _gvn.transform(new (C) URShiftXNode(header, hash_shift)); 183.457 // This hack lets the hash bits live anywhere in the mark object now, as long 183.458 // as the shift drops the relevant bits into the low 32 bits. Note that 183.459 // Java spec says that HashCode is an int so there's no point in capturing 183.460 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build). 183.461 hshifted_header = ConvX2I(hshifted_header); 183.462 - Node *hash_val = _gvn.transform( new (C) AndINode(hshifted_header, hash_mask) ); 183.463 + Node *hash_val = _gvn.transform(new (C) AndINode(hshifted_header, hash_mask)); 183.464 183.465 Node *no_hash_val = _gvn.intcon(markOopDesc::no_hash); 183.466 - Node *chk_assigned = _gvn.transform( new (C) CmpINode( hash_val, no_hash_val)); 183.467 - Node *test_assigned = _gvn.transform( new (C) BoolNode( chk_assigned, BoolTest::eq) ); 183.468 + Node *chk_assigned = _gvn.transform(new (C) CmpINode( hash_val, no_hash_val)); 183.469 + Node *test_assigned = _gvn.transform(new (C) BoolNode( chk_assigned, BoolTest::eq)); 183.470 183.471 generate_slow_guard(test_assigned, slow_region); 183.472 183.473 @@ -3854,7 +3870,7 @@ 183.474 183.475 // Return the combined state. 183.476 set_i_o( _gvn.transform(result_io) ); 183.477 - set_all_memory( _gvn.transform(result_mem) ); 183.478 + set_all_memory( _gvn.transform(result_mem)); 183.479 183.480 set_result(result_reg, result_val); 183.481 return true; 183.482 @@ -3982,7 +3998,7 @@ 183.483 Node *opt_isnan = _gvn.transform(ifisnan); 183.484 assert( opt_isnan->is_If(), "Expect an IfNode"); 183.485 IfNode *opt_ifisnan = (IfNode*)opt_isnan; 183.486 - Node *iftrue = _gvn.transform( new (C) IfTrueNode(opt_ifisnan) ); 183.487 + Node *iftrue = _gvn.transform(new (C) IfTrueNode(opt_ifisnan)); 183.488 183.489 set_control(iftrue); 183.490 183.491 @@ -4023,7 +4039,7 @@ 183.492 Node *opt_isnan = _gvn.transform(ifisnan); 183.493 assert( opt_isnan->is_If(), "Expect an IfNode"); 183.494 IfNode *opt_ifisnan = (IfNode*)opt_isnan; 183.495 - Node *iftrue = _gvn.transform( new (C) IfTrueNode(opt_ifisnan) ); 183.496 + Node *iftrue = _gvn.transform(new (C) IfTrueNode(opt_ifisnan)); 183.497 183.498 set_control(iftrue); 183.499 183.500 @@ -4152,8 +4168,8 @@ 183.501 183.502 // Compute the length also, if needed: 183.503 Node* countx = size; 183.504 - countx = _gvn.transform( new (C) SubXNode(countx, MakeConX(base_off)) ); 183.505 - countx = _gvn.transform( new (C) URShiftXNode(countx, intcon(LogBytesPerLong) )); 183.506 + countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(base_off))); 183.507 + countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong) )); 183.508 183.509 const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; 183.510 bool disjoint_bases = true; 183.511 @@ -4357,9 +4373,9 @@ 183.512 } 183.513 183.514 // Return the combined state. 183.515 - set_control( _gvn.transform(result_reg) ); 183.516 - set_i_o( _gvn.transform(result_i_o) ); 183.517 - set_all_memory( _gvn.transform(result_mem) ); 183.518 + set_control( _gvn.transform(result_reg)); 183.519 + set_i_o( _gvn.transform(result_i_o)); 183.520 + set_all_memory( _gvn.transform(result_mem)); 183.521 } // original reexecute is set back here 183.522 183.523 set_result(_gvn.transform(result_val)); 183.524 @@ -4684,8 +4700,8 @@ 183.525 // are dest_head = dest[0..off] and dest_tail = dest[off+len..dest.length]. 183.526 Node* dest_size = alloc->in(AllocateNode::AllocSize); 183.527 Node* dest_length = alloc->in(AllocateNode::ALength); 183.528 - Node* dest_tail = _gvn.transform( new(C) AddINode(dest_offset, 183.529 - copy_length) ); 183.530 + Node* dest_tail = _gvn.transform(new(C) AddINode(dest_offset, 183.531 + copy_length)); 183.532 183.533 // If there is a head section that needs zeroing, do it now. 183.534 if (find_int_con(dest_offset, -1) != 0) { 183.535 @@ -4701,8 +4717,8 @@ 183.536 // the copy to a more hardware-friendly word size of 64 bits. 183.537 Node* tail_ctl = NULL; 183.538 if (!stopped() && !dest_tail->eqv_uncast(dest_length)) { 183.539 - Node* cmp_lt = _gvn.transform( new(C) CmpINode(dest_tail, dest_length) ); 183.540 - Node* bol_lt = _gvn.transform( new(C) BoolNode(cmp_lt, BoolTest::lt) ); 183.541 + Node* cmp_lt = _gvn.transform(new(C) CmpINode(dest_tail, dest_length)); 183.542 + Node* bol_lt = _gvn.transform(new(C) BoolNode(cmp_lt, BoolTest::lt)); 183.543 tail_ctl = generate_slow_guard(bol_lt, NULL); 183.544 assert(tail_ctl != NULL || !stopped(), "must be an outcome"); 183.545 } 183.546 @@ -4745,7 +4761,7 @@ 183.547 dest_size); 183.548 done_ctl->init_req(2, control()); 183.549 done_mem->init_req(2, memory(adr_type)); 183.550 - set_control( _gvn.transform(done_ctl) ); 183.551 + set_control( _gvn.transform(done_ctl)); 183.552 set_memory( _gvn.transform(done_mem), adr_type ); 183.553 } 183.554 } 183.555 @@ -4832,18 +4848,18 @@ 183.556 // Clean up after the checked call. 183.557 // The returned value is either 0 or -1^K, 183.558 // where K = number of partially transferred array elements. 183.559 - Node* cmp = _gvn.transform( new(C) CmpINode(checked_value, intcon(0)) ); 183.560 - Node* bol = _gvn.transform( new(C) BoolNode(cmp, BoolTest::eq) ); 183.561 + Node* cmp = _gvn.transform(new(C) CmpINode(checked_value, intcon(0))); 183.562 + Node* bol = _gvn.transform(new(C) BoolNode(cmp, BoolTest::eq)); 183.563 IfNode* iff = create_and_map_if(control(), bol, PROB_MAX, COUNT_UNKNOWN); 183.564 183.565 // If it is 0, we are done, so transfer to the end. 183.566 - Node* checks_done = _gvn.transform( new(C) IfTrueNode(iff) ); 183.567 + Node* checks_done = _gvn.transform(new(C) IfTrueNode(iff)); 183.568 result_region->init_req(checked_path, checks_done); 183.569 result_i_o ->init_req(checked_path, checked_i_o); 183.570 result_memory->init_req(checked_path, checked_mem); 183.571 183.572 // If it is not zero, merge into the slow call. 183.573 - set_control( _gvn.transform( new(C) IfFalseNode(iff) )); 183.574 + set_control( _gvn.transform(new(C) IfFalseNode(iff) )); 183.575 RegionNode* slow_reg2 = new(C) RegionNode(3); 183.576 PhiNode* slow_i_o2 = new(C) PhiNode(slow_reg2, Type::ABIO); 183.577 PhiNode* slow_mem2 = new(C) PhiNode(slow_reg2, Type::MEMORY, adr_type); 183.578 @@ -4866,16 +4882,16 @@ 183.579 } else { 183.580 // We must continue the copy exactly where it failed, or else 183.581 // another thread might see the wrong number of writes to dest. 183.582 - Node* checked_offset = _gvn.transform( new(C) XorINode(checked_value, intcon(-1)) ); 183.583 + Node* checked_offset = _gvn.transform(new(C) XorINode(checked_value, intcon(-1))); 183.584 Node* slow_offset = new(C) PhiNode(slow_reg2, TypeInt::INT); 183.585 slow_offset->init_req(1, intcon(0)); 183.586 slow_offset->init_req(2, checked_offset); 183.587 slow_offset = _gvn.transform(slow_offset); 183.588 183.589 // Adjust the arguments by the conditionally incoming offset. 183.590 - Node* src_off_plus = _gvn.transform( new(C) AddINode(src_offset, slow_offset) ); 183.591 - Node* dest_off_plus = _gvn.transform( new(C) AddINode(dest_offset, slow_offset) ); 183.592 - Node* length_minus = _gvn.transform( new(C) SubINode(copy_length, slow_offset) ); 183.593 + Node* src_off_plus = _gvn.transform(new(C) AddINode(src_offset, slow_offset)); 183.594 + Node* dest_off_plus = _gvn.transform(new(C) AddINode(dest_offset, slow_offset)); 183.595 + Node* length_minus = _gvn.transform(new(C) SubINode(copy_length, slow_offset)); 183.596 183.597 // Tweak the node variables to adjust the code produced below: 183.598 src_offset = src_off_plus; 183.599 @@ -4914,7 +4930,7 @@ 183.600 } 183.601 183.602 // Finished; return the combined state. 183.603 - set_control( _gvn.transform(result_region) ); 183.604 + set_control( _gvn.transform(result_region)); 183.605 set_i_o( _gvn.transform(result_i_o) ); 183.606 set_memory( _gvn.transform(result_memory), adr_type ); 183.607 183.608 @@ -5096,10 +5112,10 @@ 183.609 int end_round = (-1 << scale) & (BytesPerLong - 1); 183.610 Node* end = ConvI2X(slice_len); 183.611 if (scale != 0) 183.612 - end = _gvn.transform( new(C) LShiftXNode(end, intcon(scale) )); 183.613 + end = _gvn.transform(new(C) LShiftXNode(end, intcon(scale) )); 183.614 end_base += end_round; 183.615 - end = _gvn.transform( new(C) AddXNode(end, MakeConX(end_base)) ); 183.616 - end = _gvn.transform( new(C) AndXNode(end, MakeConX(~end_round)) ); 183.617 + end = _gvn.transform(new(C) AddXNode(end, MakeConX(end_base))); 183.618 + end = _gvn.transform(new(C) AndXNode(end, MakeConX(~end_round))); 183.619 mem = ClearArrayNode::clear_memory(control(), mem, dest, 183.620 start_con, end, &_gvn); 183.621 } else if (start_con < 0 && dest_size != top()) { 183.622 @@ -5108,8 +5124,8 @@ 183.623 Node* start = slice_idx; 183.624 start = ConvI2X(start); 183.625 if (scale != 0) 183.626 - start = _gvn.transform( new(C) LShiftXNode( start, intcon(scale) )); 183.627 - start = _gvn.transform( new(C) AddXNode(start, MakeConX(abase)) ); 183.628 + start = _gvn.transform(new(C) LShiftXNode( start, intcon(scale) )); 183.629 + start = _gvn.transform(new(C) AddXNode(start, MakeConX(abase))); 183.630 if ((bump_bit | clear_low) != 0) { 183.631 int to_clear = (bump_bit | clear_low); 183.632 // Align up mod 8, then store a jint zero unconditionally 183.633 @@ -5120,14 +5136,14 @@ 183.634 assert((abase & to_clear) == 0, "array base must be long-aligned"); 183.635 } else { 183.636 // Bump 'start' up to (or past) the next jint boundary: 183.637 - start = _gvn.transform( new(C) AddXNode(start, MakeConX(bump_bit)) ); 183.638 + start = _gvn.transform(new(C) AddXNode(start, MakeConX(bump_bit))); 183.639 assert((abase & clear_low) == 0, "array base must be int-aligned"); 183.640 } 183.641 // Round bumped 'start' down to jlong boundary in body of array. 183.642 - start = _gvn.transform( new(C) AndXNode(start, MakeConX(~to_clear)) ); 183.643 + start = _gvn.transform(new(C) AndXNode(start, MakeConX(~to_clear))); 183.644 if (bump_bit != 0) { 183.645 // Store a zero to the immediately preceding jint: 183.646 - Node* x1 = _gvn.transform( new(C) AddXNode(start, MakeConX(-bump_bit)) ); 183.647 + Node* x1 = _gvn.transform(new(C) AddXNode(start, MakeConX(-bump_bit))); 183.648 Node* p1 = basic_plus_adr(dest, x1); 183.649 mem = StoreNode::make(_gvn, control(), mem, p1, adr_type, intcon(0), T_INT); 183.650 mem = _gvn.transform(mem); 183.651 @@ -5194,8 +5210,8 @@ 183.652 Node* sptr = basic_plus_adr(src, src_off); 183.653 Node* dptr = basic_plus_adr(dest, dest_off); 183.654 Node* countx = dest_size; 183.655 - countx = _gvn.transform( new (C) SubXNode(countx, MakeConX(dest_off)) ); 183.656 - countx = _gvn.transform( new (C) URShiftXNode(countx, intcon(LogBytesPerLong)) ); 183.657 + countx = _gvn.transform(new (C) SubXNode(countx, MakeConX(dest_off))); 183.658 + countx = _gvn.transform(new (C) URShiftXNode(countx, intcon(LogBytesPerLong))); 183.659 183.660 bool disjoint_bases = true; // since alloc != NULL 183.661 generate_unchecked_arraycopy(adr_type, T_LONG, disjoint_bases, 183.662 @@ -5360,6 +5376,117 @@ 183.663 return true; 183.664 } 183.665 183.666 +/** 183.667 + * Calculate CRC32 for byte. 183.668 + * int java.util.zip.CRC32.update(int crc, int b) 183.669 + */ 183.670 +bool LibraryCallKit::inline_updateCRC32() { 183.671 + assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support"); 183.672 + assert(callee()->signature()->size() == 2, "update has 2 parameters"); 183.673 + // no receiver since it is static method 183.674 + Node* crc = argument(0); // type: int 183.675 + Node* b = argument(1); // type: int 183.676 + 183.677 + /* 183.678 + * int c = ~ crc; 183.679 + * b = timesXtoThe32[(b ^ c) & 0xFF]; 183.680 + * b = b ^ (c >>> 8); 183.681 + * crc = ~b; 183.682 + */ 183.683 + 183.684 + Node* M1 = intcon(-1); 183.685 + crc = _gvn.transform(new (C) XorINode(crc, M1)); 183.686 + Node* result = _gvn.transform(new (C) XorINode(crc, b)); 183.687 + result = _gvn.transform(new (C) AndINode(result, intcon(0xFF))); 183.688 + 183.689 + Node* base = makecon(TypeRawPtr::make(StubRoutines::crc_table_addr())); 183.690 + Node* offset = _gvn.transform(new (C) LShiftINode(result, intcon(0x2))); 183.691 + Node* adr = basic_plus_adr(top(), base, ConvI2X(offset)); 183.692 + result = make_load(control(), adr, TypeInt::INT, T_INT); 183.693 + 183.694 + crc = _gvn.transform(new (C) URShiftINode(crc, intcon(8))); 183.695 + result = _gvn.transform(new (C) XorINode(crc, result)); 183.696 + result = _gvn.transform(new (C) XorINode(result, M1)); 183.697 + set_result(result); 183.698 + return true; 183.699 +} 183.700 + 183.701 +/** 183.702 + * Calculate CRC32 for byte[] array. 183.703 + * int java.util.zip.CRC32.updateBytes(int crc, byte[] buf, int off, int len) 183.704 + */ 183.705 +bool LibraryCallKit::inline_updateBytesCRC32() { 183.706 + assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support"); 183.707 + assert(callee()->signature()->size() == 4, "updateBytes has 4 parameters"); 183.708 + // no receiver since it is static method 183.709 + Node* crc = argument(0); // type: int 183.710 + Node* src = argument(1); // type: oop 183.711 + Node* offset = argument(2); // type: int 183.712 + Node* length = argument(3); // type: int 183.713 + 183.714 + const Type* src_type = src->Value(&_gvn); 183.715 + const TypeAryPtr* top_src = src_type->isa_aryptr(); 183.716 + if (top_src == NULL || top_src->klass() == NULL) { 183.717 + // failed array check 183.718 + return false; 183.719 + } 183.720 + 183.721 + // Figure out the size and type of the elements we will be copying. 183.722 + BasicType src_elem = src_type->isa_aryptr()->klass()->as_array_klass()->element_type()->basic_type(); 183.723 + if (src_elem != T_BYTE) { 183.724 + return false; 183.725 + } 183.726 + 183.727 + // 'src_start' points to src array + scaled offset 183.728 + Node* src_start = array_element_address(src, offset, src_elem); 183.729 + 183.730 + // We assume that range check is done by caller. 183.731 + // TODO: generate range check (offset+length < src.length) in debug VM. 183.732 + 183.733 + // Call the stub. 183.734 + address stubAddr = StubRoutines::updateBytesCRC32(); 183.735 + const char *stubName = "updateBytesCRC32"; 183.736 + 183.737 + Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(), 183.738 + stubAddr, stubName, TypePtr::BOTTOM, 183.739 + crc, src_start, length); 183.740 + Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms)); 183.741 + set_result(result); 183.742 + return true; 183.743 +} 183.744 + 183.745 +/** 183.746 + * Calculate CRC32 for ByteBuffer. 183.747 + * int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len) 183.748 + */ 183.749 +bool LibraryCallKit::inline_updateByteBufferCRC32() { 183.750 + assert(UseCRC32Intrinsics, "need AVX and LCMUL instructions support"); 183.751 + assert(callee()->signature()->size() == 5, "updateByteBuffer has 4 parameters and one is long"); 183.752 + // no receiver since it is static method 183.753 + Node* crc = argument(0); // type: int 183.754 + Node* src = argument(1); // type: long 183.755 + Node* offset = argument(3); // type: int 183.756 + Node* length = argument(4); // type: int 183.757 + 183.758 + src = ConvL2X(src); // adjust Java long to machine word 183.759 + Node* base = _gvn.transform(new (C) CastX2PNode(src)); 183.760 + offset = ConvI2X(offset); 183.761 + 183.762 + // 'src_start' points to src array + scaled offset 183.763 + Node* src_start = basic_plus_adr(top(), base, offset); 183.764 + 183.765 + // Call the stub. 183.766 + address stubAddr = StubRoutines::updateBytesCRC32(); 183.767 + const char *stubName = "updateBytesCRC32"; 183.768 + 183.769 + Node* call = make_runtime_call(RC_LEAF|RC_NO_FP, OptoRuntime::updateBytesCRC32_Type(), 183.770 + stubAddr, stubName, TypePtr::BOTTOM, 183.771 + crc, src_start, length); 183.772 + Node* result = _gvn.transform(new (C) ProjNode(call, TypeFunc::Parms)); 183.773 + set_result(result); 183.774 + return true; 183.775 +} 183.776 + 183.777 //----------------------------inline_reference_get---------------------------- 183.778 // public T java.lang.ref.Reference.get(); 183.779 bool LibraryCallKit::inline_reference_get() {
184.1 --- a/src/share/vm/opto/matcher.cpp Thu Jul 11 12:59:03 2013 -0400 184.2 +++ b/src/share/vm/opto/matcher.cpp Mon Jul 15 11:07:03 2013 +0100 184.3 @@ -985,6 +985,8 @@ 184.4 mstack.push(n, Visit, NULL, -1); // set NULL as parent to indicate root 184.5 184.6 while (mstack.is_nonempty()) { 184.7 + C->check_node_count(NodeLimitFudgeFactor, "too many nodes matching instructions"); 184.8 + if (C->failing()) return NULL; 184.9 n = mstack.node(); // Leave node on stack 184.10 Node_State nstate = mstack.state(); 184.11 if (nstate == Visit) {
185.1 --- a/src/share/vm/opto/memnode.cpp Thu Jul 11 12:59:03 2013 -0400 185.2 +++ b/src/share/vm/opto/memnode.cpp Mon Jul 15 11:07:03 2013 +0100 185.3 @@ -2930,7 +2930,9 @@ 185.4 Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) { 185.5 if (remove_dead_region(phase, can_reshape)) return this; 185.6 // Don't bother trying to transform a dead node 185.7 - if (in(0) && in(0)->is_top()) return NULL; 185.8 + if (in(0) && in(0)->is_top()) { 185.9 + return NULL; 185.10 + } 185.11 185.12 // Eliminate volatile MemBars for scalar replaced objects. 185.13 if (can_reshape && req() == (Precedent+1)) { 185.14 @@ -2939,6 +2941,22 @@ 185.15 if ((opc == Op_MemBarAcquire || opc == Op_MemBarVolatile)) { 185.16 // Volatile field loads and stores. 185.17 Node* my_mem = in(MemBarNode::Precedent); 185.18 + // The MembarAquire may keep an unused LoadNode alive through the Precedent edge 185.19 + if ((my_mem != NULL) && (opc == Op_MemBarAcquire) && (my_mem->outcnt() == 1)) { 185.20 + // if the Precedent is a decodeN and its input (a Load) is used at more than one place, 185.21 + // replace this Precedent (decodeN) with the Load instead. 185.22 + if ((my_mem->Opcode() == Op_DecodeN) && (my_mem->in(1)->outcnt() > 1)) { 185.23 + Node* load_node = my_mem->in(1); 185.24 + set_req(MemBarNode::Precedent, load_node); 185.25 + phase->is_IterGVN()->_worklist.push(my_mem); 185.26 + my_mem = load_node; 185.27 + } else { 185.28 + assert(my_mem->unique_out() == this, "sanity"); 185.29 + del_req(Precedent); 185.30 + phase->is_IterGVN()->_worklist.push(my_mem); // remove dead node later 185.31 + my_mem = NULL; 185.32 + } 185.33 + } 185.34 if (my_mem != NULL && my_mem->is_Mem()) { 185.35 const TypeOopPtr* t_oop = my_mem->in(MemNode::Address)->bottom_type()->isa_oopptr(); 185.36 // Check for scalar replaced object reference. 185.37 @@ -4384,7 +4402,7 @@ 185.38 } 185.39 } 185.40 #else // !ASSERT 185.41 -#define verify_memory_slice(m,i,n) (0) // PRODUCT version is no-op 185.42 +#define verify_memory_slice(m,i,n) (void)(0) // PRODUCT version is no-op 185.43 #endif 185.44 185.45
186.1 --- a/src/share/vm/opto/runtime.cpp Thu Jul 11 12:59:03 2013 -0400 186.2 +++ b/src/share/vm/opto/runtime.cpp Mon Jul 15 11:07:03 2013 +0100 186.3 @@ -1,5 +1,5 @@ 186.4 /* 186.5 - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. 186.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. 186.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 186.8 * 186.9 * This code is free software; you can redistribute it and/or modify it 186.10 @@ -829,6 +829,28 @@ 186.11 return TypeFunc::make(domain, range); 186.12 } 186.13 186.14 +/** 186.15 + * int updateBytesCRC32(int crc, byte* b, int len) 186.16 + */ 186.17 +const TypeFunc* OptoRuntime::updateBytesCRC32_Type() { 186.18 + // create input type (domain) 186.19 + int num_args = 3; 186.20 + int argcnt = num_args; 186.21 + const Type** fields = TypeTuple::fields(argcnt); 186.22 + int argp = TypeFunc::Parms; 186.23 + fields[argp++] = TypeInt::INT; // crc 186.24 + fields[argp++] = TypePtr::NOTNULL; // src 186.25 + fields[argp++] = TypeInt::INT; // len 186.26 + assert(argp == TypeFunc::Parms+argcnt, "correct decoding"); 186.27 + const TypeTuple* domain = TypeTuple::make(TypeFunc::Parms+argcnt, fields); 186.28 + 186.29 + // result type needed 186.30 + fields = TypeTuple::fields(1); 186.31 + fields[TypeFunc::Parms+0] = TypeInt::INT; // crc result 186.32 + const TypeTuple* range = TypeTuple::make(TypeFunc::Parms+1, fields); 186.33 + return TypeFunc::make(domain, range); 186.34 +} 186.35 + 186.36 // for cipherBlockChaining calls of aescrypt encrypt/decrypt, four pointers and a length, returning void 186.37 const TypeFunc* OptoRuntime::cipherBlockChaining_aescrypt_Type() { 186.38 // create input type (domain)
187.1 --- a/src/share/vm/opto/runtime.hpp Thu Jul 11 12:59:03 2013 -0400 187.2 +++ b/src/share/vm/opto/runtime.hpp Mon Jul 15 11:07:03 2013 +0100 187.3 @@ -1,5 +1,5 @@ 187.4 /* 187.5 - * Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved. 187.6 + * Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved. 187.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 187.8 * 187.9 * This code is free software; you can redistribute it and/or modify it 187.10 @@ -284,6 +284,8 @@ 187.11 static const TypeFunc* aescrypt_block_Type(); 187.12 static const TypeFunc* cipherBlockChaining_aescrypt_Type(); 187.13 187.14 + static const TypeFunc* updateBytesCRC32_Type(); 187.15 + 187.16 // leaf on stack replacement interpreter accessor types 187.17 static const TypeFunc* osr_end_Type(); 187.18
188.1 --- a/src/share/vm/prims/forte.cpp Thu Jul 11 12:59:03 2013 -0400 188.2 +++ b/src/share/vm/prims/forte.cpp Mon Jul 15 11:07:03 2013 +0100 188.3 @@ -619,7 +619,7 @@ 188.4 void* null_argument_3); 188.5 #pragma weak collector_func_load 188.6 #define collector_func_load(x0,x1,x2,x3,x4,x5,x6) \ 188.7 - ( collector_func_load ? collector_func_load(x0,x1,x2,x3,x4,x5,x6),0 : 0 ) 188.8 + ( collector_func_load ? collector_func_load(x0,x1,x2,x3,x4,x5,x6),(void)0 : (void)0 ) 188.9 #endif // __APPLE__ 188.10 #endif // !_WINDOWS 188.11
189.1 --- a/src/share/vm/prims/jni.cpp Thu Jul 11 12:59:03 2013 -0400 189.2 +++ b/src/share/vm/prims/jni.cpp Mon Jul 15 11:07:03 2013 +0100 189.3 @@ -879,7 +879,7 @@ 189.4 env, capacity); 189.5 #endif /* USDT2 */ 189.6 //%note jni_11 189.7 - if (capacity < 0 && capacity > MAX_REASONABLE_LOCAL_CAPACITY) { 189.8 + if (capacity < 0 || capacity > MAX_REASONABLE_LOCAL_CAPACITY) { 189.9 #ifndef USDT2 189.10 DTRACE_PROBE1(hotspot_jni, PushLocalFrame__return, JNI_ERR); 189.11 #else /* USDT2 */
190.1 --- a/src/share/vm/prims/jvm.cpp Thu Jul 11 12:59:03 2013 -0400 190.2 +++ b/src/share/vm/prims/jvm.cpp Mon Jul 15 11:07:03 2013 +0100 190.3 @@ -3310,24 +3310,10 @@ 190.4 JVM_END 190.5 190.6 190.7 -// Utility object for collecting method holders walking down the stack 190.8 -class KlassLink: public ResourceObj { 190.9 - public: 190.10 - KlassHandle klass; 190.11 - KlassLink* next; 190.12 - 190.13 - KlassLink(KlassHandle k) { klass = k; next = NULL; } 190.14 -}; 190.15 - 190.16 - 190.17 JVM_ENTRY(jobjectArray, JVM_GetClassContext(JNIEnv *env)) 190.18 JVMWrapper("JVM_GetClassContext"); 190.19 ResourceMark rm(THREAD); 190.20 JvmtiVMObjectAllocEventCollector oam; 190.21 - // Collect linked list of (handles to) method holders 190.22 - KlassLink* first = NULL; 190.23 - KlassLink* last = NULL; 190.24 - int depth = 0; 190.25 vframeStream vfst(thread); 190.26 190.27 if (SystemDictionary::reflect_CallerSensitive_klass() != NULL) { 190.28 @@ -3341,32 +3327,23 @@ 190.29 } 190.30 190.31 // Collect method holders 190.32 + GrowableArray<KlassHandle>* klass_array = new GrowableArray<KlassHandle>(); 190.33 for (; !vfst.at_end(); vfst.security_next()) { 190.34 Method* m = vfst.method(); 190.35 // Native frames are not returned 190.36 if (!m->is_ignored_by_security_stack_walk() && !m->is_native()) { 190.37 Klass* holder = m->method_holder(); 190.38 assert(holder->is_klass(), "just checking"); 190.39 - depth++; 190.40 - KlassLink* l = new KlassLink(KlassHandle(thread, holder)); 190.41 - if (first == NULL) { 190.42 - first = last = l; 190.43 - } else { 190.44 - last->next = l; 190.45 - last = l; 190.46 - } 190.47 + klass_array->append(holder); 190.48 } 190.49 } 190.50 190.51 // Create result array of type [Ljava/lang/Class; 190.52 - objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), depth, CHECK_NULL); 190.53 + objArrayOop result = oopFactory::new_objArray(SystemDictionary::Class_klass(), klass_array->length(), CHECK_NULL); 190.54 // Fill in mirrors corresponding to method holders 190.55 - int index = 0; 190.56 - while (first != NULL) { 190.57 - result->obj_at_put(index++, first->klass()->java_mirror()); 190.58 - first = first->next; 190.59 + for (int i = 0; i < klass_array->length(); i++) { 190.60 + result->obj_at_put(i, klass_array->at(i)->java_mirror()); 190.61 } 190.62 - assert(index == depth, "just checking"); 190.63 190.64 return (jobjectArray) JNIHandles::make_local(env, result); 190.65 JVM_END
191.1 --- a/src/share/vm/prims/jvmti.xml Thu Jul 11 12:59:03 2013 -0400 191.2 +++ b/src/share/vm/prims/jvmti.xml Mon Jul 15 11:07:03 2013 +0100 191.3 @@ -1897,7 +1897,7 @@ 191.4 </description> 191.5 </param> 191.6 <param id="monitor_info_ptr"> 191.7 - <allocbuf outcount="owned_monitor_depth_count_ptr"> 191.8 + <allocbuf outcount="monitor_info_count_ptr"> 191.9 <struct>jvmtiMonitorStackDepthInfo</struct> 191.10 </allocbuf> 191.11 <description>
192.1 --- a/src/share/vm/prims/methodHandles.cpp Thu Jul 11 12:59:03 2013 -0400 192.2 +++ b/src/share/vm/prims/methodHandles.cpp Mon Jul 15 11:07:03 2013 +0100 192.3 @@ -1137,7 +1137,12 @@ 192.4 if (VerifyMethodHandles && caller_jh != NULL && 192.5 java_lang_invoke_MemberName::clazz(mname()) != NULL) { 192.6 Klass* reference_klass = java_lang_Class::as_Klass(java_lang_invoke_MemberName::clazz(mname())); 192.7 - if (reference_klass != NULL) { 192.8 + if (reference_klass != NULL && reference_klass->oop_is_objArray()) { 192.9 + reference_klass = ObjArrayKlass::cast(reference_klass)->bottom_klass(); 192.10 + } 192.11 + 192.12 + // Reflection::verify_class_access can only handle instance classes. 192.13 + if (reference_klass != NULL && reference_klass->oop_is_instance()) { 192.14 // Emulate LinkResolver::check_klass_accessability. 192.15 Klass* caller = java_lang_Class::as_Klass(JNIHandles::resolve_non_null(caller_jh)); 192.16 if (!Reflection::verify_class_access(caller,
193.1 --- a/src/share/vm/prims/whitebox.cpp Thu Jul 11 12:59:03 2013 -0400 193.2 +++ b/src/share/vm/prims/whitebox.cpp Mon Jul 15 11:07:03 2013 +0100 193.3 @@ -159,7 +159,7 @@ 193.4 193.5 193.6 WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size)) 193.7 - os::commit_memory((char *)(uintptr_t)addr, size); 193.8 + os::commit_memory((char *)(uintptr_t)addr, size, !ExecMem); 193.9 MemTracker::record_virtual_memory_type((address)(uintptr_t)addr, mtTest); 193.10 WB_END 193.11
194.1 --- a/src/share/vm/runtime/arguments.cpp Thu Jul 11 12:59:03 2013 -0400 194.2 +++ b/src/share/vm/runtime/arguments.cpp Mon Jul 15 11:07:03 2013 +0100 194.3 @@ -849,7 +849,7 @@ 194.4 arg_len = equal_sign - argname; 194.5 } 194.6 194.7 - Flag* found_flag = Flag::find_flag((char*)argname, arg_len, true); 194.8 + Flag* found_flag = Flag::find_flag((const char*)argname, arg_len, true); 194.9 if (found_flag != NULL) { 194.10 char locked_message_buf[BUFLEN]; 194.11 found_flag->get_locked_message(locked_message_buf, BUFLEN); 194.12 @@ -870,6 +870,14 @@ 194.13 } else { 194.14 jio_fprintf(defaultStream::error_stream(), 194.15 "Unrecognized VM option '%s'\n", argname); 194.16 + Flag* fuzzy_matched = Flag::fuzzy_match((const char*)argname, arg_len, true); 194.17 + if (fuzzy_matched != NULL) { 194.18 + jio_fprintf(defaultStream::error_stream(), 194.19 + "Did you mean '%s%s%s'?\n", 194.20 + (fuzzy_matched->is_bool()) ? "(+/-)" : "", 194.21 + fuzzy_matched->name, 194.22 + (fuzzy_matched->is_bool()) ? "" : "=<value>"); 194.23 + } 194.24 } 194.25 194.26 // allow for commandline "commenting out" options like -XX:#+Verbose 194.27 @@ -1566,6 +1574,17 @@ 194.28 return result; 194.29 } 194.30 194.31 +void Arguments::set_heap_base_min_address() { 194.32 + if (FLAG_IS_DEFAULT(HeapBaseMinAddress) && UseG1GC && HeapBaseMinAddress < 1*G) { 194.33 + // By default HeapBaseMinAddress is 2G on all platforms except Solaris x86. 194.34 + // G1 currently needs a lot of C-heap, so on Solaris we have to give G1 194.35 + // some extra space for the C-heap compared to other collectors. 194.36 + // Use FLAG_SET_DEFAULT here rather than FLAG_SET_ERGO to make sure that 194.37 + // code that checks for default values work correctly. 194.38 + FLAG_SET_DEFAULT(HeapBaseMinAddress, 1*G); 194.39 + } 194.40 +} 194.41 + 194.42 void Arguments::set_heap_size() { 194.43 if (!FLAG_IS_DEFAULT(DefaultMaxRAMFraction)) { 194.44 // Deprecated flag 194.45 @@ -1885,21 +1904,6 @@ 194.46 // Note: Needs platform-dependent factoring. 194.47 bool status = true; 194.48 194.49 -#if ( (defined(COMPILER2) && defined(SPARC))) 194.50 - // NOTE: The call to VM_Version_init depends on the fact that VM_Version_init 194.51 - // on sparc doesn't require generation of a stub as is the case on, e.g., 194.52 - // x86. Normally, VM_Version_init must be called from init_globals in 194.53 - // init.cpp, which is called by the initial java thread *after* arguments 194.54 - // have been parsed. VM_Version_init gets called twice on sparc. 194.55 - extern void VM_Version_init(); 194.56 - VM_Version_init(); 194.57 - if (!VM_Version::has_v9()) { 194.58 - jio_fprintf(defaultStream::error_stream(), 194.59 - "V8 Machine detected, Server requires V9\n"); 194.60 - status = false; 194.61 - } 194.62 -#endif /* COMPILER2 && SPARC */ 194.63 - 194.64 // Allow both -XX:-UseStackBanging and -XX:-UseBoundThreads in non-product 194.65 // builds so the cost of stack banging can be measured. 194.66 #if (defined(PRODUCT) && defined(SOLARIS)) 194.67 @@ -2217,11 +2221,24 @@ 194.68 status = false; 194.69 } 194.70 194.71 - if (ReservedCodeCacheSize < InitialCodeCacheSize) { 194.72 + // Check lower bounds of the code cache 194.73 + // Template Interpreter code is approximately 3X larger in debug builds. 194.74 + uint min_code_cache_size = (CodeCacheMinimumUseSpace DEBUG_ONLY(* 3)) + CodeCacheMinimumFreeSpace; 194.75 + if (InitialCodeCacheSize < (uintx)os::vm_page_size()) { 194.76 jio_fprintf(defaultStream::error_stream(), 194.77 - "Invalid ReservedCodeCacheSize: %dK. Should be greater than InitialCodeCacheSize=%dK\n", 194.78 + "Invalid InitialCodeCacheSize=%dK. Must be at least %dK.\n", InitialCodeCacheSize/K, 194.79 + os::vm_page_size()/K); 194.80 + status = false; 194.81 + } else if (ReservedCodeCacheSize < InitialCodeCacheSize) { 194.82 + jio_fprintf(defaultStream::error_stream(), 194.83 + "Invalid ReservedCodeCacheSize: %dK. Must be at least InitialCodeCacheSize=%dK.\n", 194.84 ReservedCodeCacheSize/K, InitialCodeCacheSize/K); 194.85 status = false; 194.86 + } else if (ReservedCodeCacheSize < min_code_cache_size) { 194.87 + jio_fprintf(defaultStream::error_stream(), 194.88 + "Invalid ReservedCodeCacheSize=%dK. Must be at least %uK.\n", ReservedCodeCacheSize/K, 194.89 + min_code_cache_size/K); 194.90 + status = false; 194.91 } 194.92 194.93 return status; 194.94 @@ -2622,10 +2639,20 @@ 194.95 // -Xoss 194.96 } else if (match_option(option, "-Xoss", &tail)) { 194.97 // HotSpot does not have separate native and Java stacks, ignore silently for compatibility 194.98 - // -Xmaxjitcodesize 194.99 + } else if (match_option(option, "-XX:CodeCacheExpansionSize=", &tail)) { 194.100 + julong long_CodeCacheExpansionSize = 0; 194.101 + ArgsRange errcode = parse_memory_size(tail, &long_CodeCacheExpansionSize, os::vm_page_size()); 194.102 + if (errcode != arg_in_range) { 194.103 + jio_fprintf(defaultStream::error_stream(), 194.104 + "Invalid argument: %s. Must be at least %luK.\n", option->optionString, 194.105 + os::vm_page_size()/K); 194.106 + return JNI_EINVAL; 194.107 + } 194.108 + FLAG_SET_CMDLINE(uintx, CodeCacheExpansionSize, (uintx)long_CodeCacheExpansionSize); 194.109 } else if (match_option(option, "-Xmaxjitcodesize", &tail) || 194.110 match_option(option, "-XX:ReservedCodeCacheSize=", &tail)) { 194.111 julong long_ReservedCodeCacheSize = 0; 194.112 + 194.113 ArgsRange errcode = parse_memory_size(tail, &long_ReservedCodeCacheSize, 1); 194.114 if (errcode != arg_in_range) { 194.115 jio_fprintf(defaultStream::error_stream(), 194.116 @@ -3525,6 +3552,8 @@ 194.117 } 194.118 } 194.119 194.120 + set_heap_base_min_address(); 194.121 + 194.122 // Set heap size based on available physical memory 194.123 set_heap_size(); 194.124
195.1 --- a/src/share/vm/runtime/arguments.hpp Thu Jul 11 12:59:03 2013 -0400 195.2 +++ b/src/share/vm/runtime/arguments.hpp Mon Jul 15 11:07:03 2013 +0100 195.3 @@ -315,6 +315,8 @@ 195.4 // limits the given memory size by the maximum amount of memory this process is 195.5 // currently allowed to allocate or reserve. 195.6 static julong limit_by_allocatable_memory(julong size); 195.7 + // Setup HeapBaseMinAddress 195.8 + static void set_heap_base_min_address(); 195.9 // Setup heap size 195.10 static void set_heap_size(); 195.11 // Based on automatic selection criteria, should the
196.1 --- a/src/share/vm/runtime/atomic.cpp Thu Jul 11 12:59:03 2013 -0400 196.2 +++ b/src/share/vm/runtime/atomic.cpp Mon Jul 15 11:07:03 2013 +0100 196.3 @@ -80,3 +80,32 @@ 196.4 } 196.5 return old; 196.6 } 196.7 + 196.8 +void Atomic::inc(volatile short* dest) { 196.9 + // Most platforms do not support atomic increment on a 2-byte value. However, 196.10 + // if the value occupies the most significant 16 bits of an aligned 32-bit 196.11 + // word, then we can do this with an atomic add of 0x10000 to the 32-bit word. 196.12 + // 196.13 + // The least significant parts of this 32-bit word will never be affected, even 196.14 + // in case of overflow/underflow. 196.15 + // 196.16 + // Use the ATOMIC_SHORT_PAIR macro to get the desired alignment. 196.17 +#ifdef VM_LITTLE_ENDIAN 196.18 + assert((intx(dest) & 0x03) == 0x02, "wrong alignment"); 196.19 + (void)Atomic::add(0x10000, (volatile int*)(dest-1)); 196.20 +#else 196.21 + assert((intx(dest) & 0x03) == 0x00, "wrong alignment"); 196.22 + (void)Atomic::add(0x10000, (volatile int*)(dest)); 196.23 +#endif 196.24 +} 196.25 + 196.26 +void Atomic::dec(volatile short* dest) { 196.27 +#ifdef VM_LITTLE_ENDIAN 196.28 + assert((intx(dest) & 0x03) == 0x02, "wrong alignment"); 196.29 + (void)Atomic::add(-0x10000, (volatile int*)(dest-1)); 196.30 +#else 196.31 + assert((intx(dest) & 0x03) == 0x00, "wrong alignment"); 196.32 + (void)Atomic::add(-0x10000, (volatile int*)(dest)); 196.33 +#endif 196.34 +} 196.35 +
197.1 --- a/src/share/vm/runtime/atomic.hpp Thu Jul 11 12:59:03 2013 -0400 197.2 +++ b/src/share/vm/runtime/atomic.hpp Mon Jul 15 11:07:03 2013 +0100 197.3 @@ -64,11 +64,13 @@ 197.4 197.5 // Atomically increment location 197.6 inline static void inc (volatile jint* dest); 197.7 + static void inc (volatile jshort* dest); 197.8 inline static void inc_ptr(volatile intptr_t* dest); 197.9 inline static void inc_ptr(volatile void* dest); 197.10 197.11 // Atomically decrement a location 197.12 inline static void dec (volatile jint* dest); 197.13 + static void dec (volatile jshort* dest); 197.14 inline static void dec_ptr(volatile intptr_t* dest); 197.15 inline static void dec_ptr(volatile void* dest); 197.16 197.17 @@ -95,4 +97,24 @@ 197.18 inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value); 197.19 }; 197.20 197.21 +// To use Atomic::inc(jshort* dest) and Atomic::dec(jshort* dest), the address must be specially 197.22 +// aligned, such that (*dest) occupies the upper 16 bits of an aligned 32-bit word. The best way to 197.23 +// achieve is to place your short value next to another short value, which doesn't need atomic ops. 197.24 +// 197.25 +// Example 197.26 +// ATOMIC_SHORT_PAIR( 197.27 +// volatile short _refcount, // needs atomic operation 197.28 +// unsigned short _length // number of UTF8 characters in the symbol (does not need atomic op) 197.29 +// ); 197.30 + 197.31 +#ifdef VM_LITTLE_ENDIAN 197.32 +#define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl) \ 197.33 + non_atomic_decl; \ 197.34 + atomic_decl 197.35 +#else 197.36 +#define ATOMIC_SHORT_PAIR(atomic_decl, non_atomic_decl) \ 197.37 + atomic_decl ; \ 197.38 + non_atomic_decl 197.39 +#endif 197.40 + 197.41 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP
198.1 --- a/src/share/vm/runtime/frame.cpp Thu Jul 11 12:59:03 2013 -0400 198.2 +++ b/src/share/vm/runtime/frame.cpp Mon Jul 15 11:07:03 2013 +0100 198.3 @@ -387,7 +387,6 @@ 198.4 Method* frame::interpreter_frame_method() const { 198.5 assert(is_interpreted_frame(), "interpreted frame expected"); 198.6 Method* m = *interpreter_frame_method_addr(); 198.7 - assert(m->is_metadata(), "bad Method* in interpreter frame"); 198.8 assert(m->is_method(), "not a Method*"); 198.9 return m; 198.10 } 198.11 @@ -713,7 +712,8 @@ 198.12 Method* m = ((nmethod *)_cb)->method(); 198.13 if (m != NULL) { 198.14 m->name_and_sig_as_C_string(buf, buflen); 198.15 - st->print("J %s", buf); 198.16 + st->print("J %s @ " PTR_FORMAT " [" PTR_FORMAT "+" SIZE_FORMAT "]", 198.17 + buf, _pc, _cb->code_begin(), _pc - _cb->code_begin()); 198.18 } else { 198.19 st->print("J " PTR_FORMAT, pc()); 198.20 }
199.1 --- a/src/share/vm/runtime/globals.cpp Thu Jul 11 12:59:03 2013 -0400 199.2 +++ b/src/share/vm/runtime/globals.cpp Mon Jul 15 11:07:03 2013 +0100 199.3 @@ -73,12 +73,6 @@ 199.4 strcmp(kind, "{C2 diagnostic}") == 0 || 199.5 strcmp(kind, "{ARCH diagnostic}") == 0 || 199.6 strcmp(kind, "{Shark diagnostic}") == 0) { 199.7 - if (strcmp(name, "EnableInvokeDynamic") == 0 && UnlockExperimentalVMOptions && !UnlockDiagnosticVMOptions) { 199.8 - // transitional logic to allow tests to run until they are changed 199.9 - static int warned; 199.10 - if (++warned == 1) warning("Use -XX:+UnlockDiagnosticVMOptions before EnableInvokeDynamic flag"); 199.11 - return true; 199.12 - } 199.13 return UnlockDiagnosticVMOptions; 199.14 } else if (strcmp(kind, "{experimental}") == 0 || 199.15 strcmp(kind, "{C2 experimental}") == 0 || 199.16 @@ -282,14 +276,14 @@ 199.17 Flag* Flag::flags = flagTable; 199.18 size_t Flag::numFlags = (sizeof(flagTable) / sizeof(Flag)); 199.19 199.20 -inline bool str_equal(const char* s, char* q, size_t len) { 199.21 +inline bool str_equal(const char* s, const char* q, size_t len) { 199.22 // s is null terminated, q is not! 199.23 if (strlen(s) != (unsigned int) len) return false; 199.24 return strncmp(s, q, len) == 0; 199.25 } 199.26 199.27 // Search the flag table for a named flag 199.28 -Flag* Flag::find_flag(char* name, size_t length, bool allow_locked) { 199.29 +Flag* Flag::find_flag(const char* name, size_t length, bool allow_locked) { 199.30 for (Flag* current = &flagTable[0]; current->name != NULL; current++) { 199.31 if (str_equal(current->name, name, length)) { 199.32 // Found a matching entry. Report locked flags only if allowed. 199.33 @@ -307,6 +301,52 @@ 199.34 return NULL; 199.35 } 199.36 199.37 +// Compute string similarity based on Dice's coefficient 199.38 +static float str_similar(const char* str1, const char* str2, size_t len2) { 199.39 + int len1 = (int) strlen(str1); 199.40 + int total = len1 + (int) len2; 199.41 + 199.42 + int hit = 0; 199.43 + 199.44 + for (int i = 0; i < len1 -1; ++i) { 199.45 + for (int j = 0; j < (int) len2 -1; ++j) { 199.46 + if ((str1[i] == str2[j]) && (str1[i+1] == str2[j+1])) { 199.47 + ++hit; 199.48 + break; 199.49 + } 199.50 + } 199.51 + } 199.52 + 199.53 + return 2.0f * (float) hit / (float) total; 199.54 +} 199.55 + 199.56 +Flag* Flag::fuzzy_match(const char* name, size_t length, bool allow_locked) { 199.57 + float VMOptionsFuzzyMatchSimilarity = 0.7f; 199.58 + Flag* match = NULL; 199.59 + float score; 199.60 + float max_score = -1; 199.61 + 199.62 + for (Flag* current = &flagTable[0]; current->name != NULL; current++) { 199.63 + score = str_similar(current->name, name, length); 199.64 + if (score > max_score) { 199.65 + max_score = score; 199.66 + match = current; 199.67 + } 199.68 + } 199.69 + 199.70 + if (!(match->is_unlocked() || match->is_unlocker())) { 199.71 + if (!allow_locked) { 199.72 + return NULL; 199.73 + } 199.74 + } 199.75 + 199.76 + if (max_score < VMOptionsFuzzyMatchSimilarity) { 199.77 + return NULL; 199.78 + } 199.79 + 199.80 + return match; 199.81 +} 199.82 + 199.83 // Returns the address of the index'th element 199.84 static Flag* address_of_flag(CommandLineFlagWithType flag) { 199.85 assert((size_t)flag < Flag::numFlags, "bad command line flag index");
200.1 --- a/src/share/vm/runtime/globals.hpp Thu Jul 11 12:59:03 2013 -0400 200.2 +++ b/src/share/vm/runtime/globals.hpp Mon Jul 15 11:07:03 2013 +0100 200.3 @@ -220,7 +220,8 @@ 200.4 // number of flags 200.5 static size_t numFlags; 200.6 200.7 - static Flag* find_flag(char* name, size_t length, bool allow_locked = false); 200.8 + static Flag* find_flag(const char* name, size_t length, bool allow_locked = false); 200.9 + static Flag* fuzzy_match(const char* name, size_t length, bool allow_locked = false); 200.10 200.11 bool is_bool() const { return strcmp(type, "bool") == 0; } 200.12 bool get_bool() const { return *((bool*) addr); } 200.13 @@ -644,6 +645,9 @@ 200.14 product(bool, UseAESIntrinsics, false, \ 200.15 "use intrinsics for AES versions of crypto") \ 200.16 \ 200.17 + product(bool, UseCRC32Intrinsics, false, \ 200.18 + "use intrinsics for java.util.zip.CRC32") \ 200.19 + \ 200.20 develop(bool, TraceCallFixup, false, \ 200.21 "traces all call fixups") \ 200.22 \ 200.23 @@ -3160,6 +3164,9 @@ 200.24 product_pd(uintx, InitialCodeCacheSize, \ 200.25 "Initial code cache size (in bytes)") \ 200.26 \ 200.27 + develop_pd(uintx, CodeCacheMinimumUseSpace, \ 200.28 + "Minimum code cache size (in bytes) required to start VM.") \ 200.29 + \ 200.30 product_pd(uintx, ReservedCodeCacheSize, \ 200.31 "Reserved code cache size (in bytes) - maximum code cache size") \ 200.32 \
201.1 --- a/src/share/vm/runtime/os.cpp Thu Jul 11 12:59:03 2013 -0400 201.2 +++ b/src/share/vm/runtime/os.cpp Mon Jul 15 11:07:03 2013 +0100 201.3 @@ -647,10 +647,13 @@ 201.4 #ifndef ASSERT 201.5 NOT_PRODUCT(inc_stat_counter(&num_mallocs, 1)); 201.6 NOT_PRODUCT(inc_stat_counter(&alloc_bytes, size)); 201.7 + MemTracker::Tracker tkr = MemTracker::get_realloc_tracker(); 201.8 void* ptr = ::realloc(memblock, size); 201.9 if (ptr != NULL) { 201.10 - MemTracker::record_realloc((address)memblock, (address)ptr, size, memflags, 201.11 + tkr.record((address)memblock, (address)ptr, size, memflags, 201.12 caller == 0 ? CALLER_PC : caller); 201.13 + } else { 201.14 + tkr.discard(); 201.15 } 201.16 return ptr; 201.17 #else 201.18 @@ -1456,7 +1459,7 @@ 201.19 char* os::reserve_memory(size_t bytes, char* addr, size_t alignment_hint) { 201.20 char* result = pd_reserve_memory(bytes, addr, alignment_hint); 201.21 if (result != NULL) { 201.22 - MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); 201.23 + MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC); 201.24 } 201.25 201.26 return result; 201.27 @@ -1466,7 +1469,7 @@ 201.28 MEMFLAGS flags) { 201.29 char* result = pd_reserve_memory(bytes, addr, alignment_hint); 201.30 if (result != NULL) { 201.31 - MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); 201.32 + MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC); 201.33 MemTracker::record_virtual_memory_type((address)result, flags); 201.34 } 201.35 201.36 @@ -1476,7 +1479,7 @@ 201.37 char* os::attempt_reserve_memory_at(size_t bytes, char* addr) { 201.38 char* result = pd_attempt_reserve_memory_at(bytes, addr); 201.39 if (result != NULL) { 201.40 - MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); 201.41 + MemTracker::record_virtual_memory_reserve((address)result, bytes, mtNone, CALLER_PC); 201.42 } 201.43 return result; 201.44 } 201.45 @@ -1503,18 +1506,36 @@ 201.46 return res; 201.47 } 201.48 201.49 +void os::commit_memory_or_exit(char* addr, size_t bytes, bool executable, 201.50 + const char* mesg) { 201.51 + pd_commit_memory_or_exit(addr, bytes, executable, mesg); 201.52 + MemTracker::record_virtual_memory_commit((address)addr, bytes, CALLER_PC); 201.53 +} 201.54 + 201.55 +void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint, 201.56 + bool executable, const char* mesg) { 201.57 + os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg); 201.58 + MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC); 201.59 +} 201.60 + 201.61 bool os::uncommit_memory(char* addr, size_t bytes) { 201.62 + MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker(); 201.63 bool res = pd_uncommit_memory(addr, bytes); 201.64 if (res) { 201.65 - MemTracker::record_virtual_memory_uncommit((address)addr, bytes); 201.66 + tkr.record((address)addr, bytes); 201.67 + } else { 201.68 + tkr.discard(); 201.69 } 201.70 return res; 201.71 } 201.72 201.73 bool os::release_memory(char* addr, size_t bytes) { 201.74 + MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); 201.75 bool res = pd_release_memory(addr, bytes); 201.76 if (res) { 201.77 - MemTracker::record_virtual_memory_release((address)addr, bytes); 201.78 + tkr.record((address)addr, bytes); 201.79 + } else { 201.80 + tkr.discard(); 201.81 } 201.82 return res; 201.83 } 201.84 @@ -1525,8 +1546,7 @@ 201.85 bool allow_exec) { 201.86 char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec); 201.87 if (result != NULL) { 201.88 - MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC); 201.89 - MemTracker::record_virtual_memory_commit((address)result, bytes, CALLER_PC); 201.90 + MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, mtNone, CALLER_PC); 201.91 } 201.92 return result; 201.93 } 201.94 @@ -1539,10 +1559,12 @@ 201.95 } 201.96 201.97 bool os::unmap_memory(char *addr, size_t bytes) { 201.98 + MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); 201.99 bool result = pd_unmap_memory(addr, bytes); 201.100 if (result) { 201.101 - MemTracker::record_virtual_memory_uncommit((address)addr, bytes); 201.102 - MemTracker::record_virtual_memory_release((address)addr, bytes); 201.103 + tkr.record((address)addr, bytes); 201.104 + } else { 201.105 + tkr.discard(); 201.106 } 201.107 return result; 201.108 }
202.1 --- a/src/share/vm/runtime/os.hpp Thu Jul 11 12:59:03 2013 -0400 202.2 +++ b/src/share/vm/runtime/os.hpp Mon Jul 15 11:07:03 2013 +0100 202.3 @@ -78,6 +78,10 @@ 202.4 CriticalPriority = 11 // Critical thread priority 202.5 }; 202.6 202.7 +// Executable parameter flag for os::commit_memory() and 202.8 +// os::commit_memory_or_exit(). 202.9 +const bool ExecMem = true; 202.10 + 202.11 // Typedef for structured exception handling support 202.12 typedef void (*java_call_t)(JavaValue* value, methodHandle* method, JavaCallArguments* args, Thread* thread); 202.13 202.14 @@ -104,9 +108,16 @@ 202.15 static char* pd_attempt_reserve_memory_at(size_t bytes, char* addr); 202.16 static void pd_split_reserved_memory(char *base, size_t size, 202.17 size_t split, bool realloc); 202.18 - static bool pd_commit_memory(char* addr, size_t bytes, bool executable = false); 202.19 + static bool pd_commit_memory(char* addr, size_t bytes, bool executable); 202.20 static bool pd_commit_memory(char* addr, size_t size, size_t alignment_hint, 202.21 - bool executable = false); 202.22 + bool executable); 202.23 + // Same as pd_commit_memory() that either succeeds or calls 202.24 + // vm_exit_out_of_memory() with the specified mesg. 202.25 + static void pd_commit_memory_or_exit(char* addr, size_t bytes, 202.26 + bool executable, const char* mesg); 202.27 + static void pd_commit_memory_or_exit(char* addr, size_t size, 202.28 + size_t alignment_hint, 202.29 + bool executable, const char* mesg); 202.30 static bool pd_uncommit_memory(char* addr, size_t bytes); 202.31 static bool pd_release_memory(char* addr, size_t bytes); 202.32 202.33 @@ -261,9 +272,16 @@ 202.34 static char* attempt_reserve_memory_at(size_t bytes, char* addr); 202.35 static void split_reserved_memory(char *base, size_t size, 202.36 size_t split, bool realloc); 202.37 - static bool commit_memory(char* addr, size_t bytes, bool executable = false); 202.38 + static bool commit_memory(char* addr, size_t bytes, bool executable); 202.39 static bool commit_memory(char* addr, size_t size, size_t alignment_hint, 202.40 - bool executable = false); 202.41 + bool executable); 202.42 + // Same as commit_memory() that either succeeds or calls 202.43 + // vm_exit_out_of_memory() with the specified mesg. 202.44 + static void commit_memory_or_exit(char* addr, size_t bytes, 202.45 + bool executable, const char* mesg); 202.46 + static void commit_memory_or_exit(char* addr, size_t size, 202.47 + size_t alignment_hint, 202.48 + bool executable, const char* mesg); 202.49 static bool uncommit_memory(char* addr, size_t bytes); 202.50 static bool release_memory(char* addr, size_t bytes); 202.51
203.1 --- a/src/share/vm/runtime/reflection.cpp Thu Jul 11 12:59:03 2013 -0400 203.2 +++ b/src/share/vm/runtime/reflection.cpp Mon Jul 15 11:07:03 2013 +0100 203.3 @@ -458,7 +458,7 @@ 203.4 // doesn't have a classloader. 203.5 if ((current_class == NULL) || 203.6 (current_class == new_class) || 203.7 - (InstanceKlass::cast(new_class)->is_public()) || 203.8 + (new_class->is_public()) || 203.9 is_same_class_package(current_class, new_class)) { 203.10 return true; 203.11 }
204.1 --- a/src/share/vm/runtime/sharedRuntime.cpp Thu Jul 11 12:59:03 2013 -0400 204.2 +++ b/src/share/vm/runtime/sharedRuntime.cpp Mon Jul 15 11:07:03 2013 +0100 204.3 @@ -813,8 +813,11 @@ 204.4 // 3. Implict null exception in nmethod 204.5 204.6 if (!cb->is_nmethod()) { 204.7 - guarantee(cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(), 204.8 - "exception happened outside interpreter, nmethods and vtable stubs (1)"); 204.9 + bool is_in_blob = cb->is_adapter_blob() || cb->is_method_handles_adapter_blob(); 204.10 + if (!is_in_blob) { 204.11 + cb->print(); 204.12 + fatal(err_msg("exception happened outside interpreter, nmethods and vtable stubs at pc " INTPTR_FORMAT, pc)); 204.13 + } 204.14 Events::log_exception(thread, "NullPointerException in code blob at " INTPTR_FORMAT, pc); 204.15 // There is no handler here, so we will simply unwind. 204.16 return StubRoutines::throw_NullPointerException_at_call_entry(); 204.17 @@ -2731,7 +2734,7 @@ 204.18 // ResourceObject, so do not put any ResourceMarks in here. 204.19 char *s = sig->as_C_string(); 204.20 int len = (int)strlen(s); 204.21 - *s++; len--; // Skip opening paren 204.22 + s++; len--; // Skip opening paren 204.23 char *t = s+len; 204.24 while( *(--t) != ')' ) ; // Find close paren 204.25
205.1 --- a/src/share/vm/runtime/stubRoutines.cpp Thu Jul 11 12:59:03 2013 -0400 205.2 +++ b/src/share/vm/runtime/stubRoutines.cpp Mon Jul 15 11:07:03 2013 +0100 205.3 @@ -1,5 +1,5 @@ 205.4 /* 205.5 - * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 205.6 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. 205.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 205.8 * 205.9 * This code is free software; you can redistribute it and/or modify it 205.10 @@ -125,6 +125,9 @@ 205.11 address StubRoutines::_cipherBlockChaining_encryptAESCrypt = NULL; 205.12 address StubRoutines::_cipherBlockChaining_decryptAESCrypt = NULL; 205.13 205.14 +address StubRoutines::_updateBytesCRC32 = NULL; 205.15 +address StubRoutines::_crc_table_adr = NULL; 205.16 + 205.17 double (* StubRoutines::_intrinsic_log )(double) = NULL; 205.18 double (* StubRoutines::_intrinsic_log10 )(double) = NULL; 205.19 double (* StubRoutines::_intrinsic_exp )(double) = NULL;
206.1 --- a/src/share/vm/runtime/stubRoutines.hpp Thu Jul 11 12:59:03 2013 -0400 206.2 +++ b/src/share/vm/runtime/stubRoutines.hpp Mon Jul 15 11:07:03 2013 +0100 206.3 @@ -204,6 +204,9 @@ 206.4 static address _cipherBlockChaining_encryptAESCrypt; 206.5 static address _cipherBlockChaining_decryptAESCrypt; 206.6 206.7 + static address _updateBytesCRC32; 206.8 + static address _crc_table_adr; 206.9 + 206.10 // These are versions of the java.lang.Math methods which perform 206.11 // the same operations as the intrinsic version. They are used for 206.12 // constant folding in the compiler to ensure equivalence. If the 206.13 @@ -342,6 +345,9 @@ 206.14 static address cipherBlockChaining_encryptAESCrypt() { return _cipherBlockChaining_encryptAESCrypt; } 206.15 static address cipherBlockChaining_decryptAESCrypt() { return _cipherBlockChaining_decryptAESCrypt; } 206.16 206.17 + static address updateBytesCRC32() { return _updateBytesCRC32; } 206.18 + static address crc_table_addr() { return _crc_table_adr; } 206.19 + 206.20 static address select_fill_function(BasicType t, bool aligned, const char* &name); 206.21 206.22 static address zero_aligned_words() { return _zero_aligned_words; }
207.1 --- a/src/share/vm/runtime/thread.cpp Thu Jul 11 12:59:03 2013 -0400 207.2 +++ b/src/share/vm/runtime/thread.cpp Mon Jul 15 11:07:03 2013 +0100 207.3 @@ -220,7 +220,7 @@ 207.4 set_osthread(NULL); 207.5 set_resource_area(new (mtThread)ResourceArea()); 207.6 set_handle_area(new (mtThread) HandleArea(NULL)); 207.7 - set_metadata_handles(new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(300, true)); 207.8 + set_metadata_handles(new (ResourceObj::C_HEAP, mtClass) GrowableArray<Metadata*>(30, true)); 207.9 set_active_handles(NULL); 207.10 set_free_handle_block(NULL); 207.11 set_last_handle_mark(NULL);
208.1 --- a/src/share/vm/runtime/virtualspace.cpp Thu Jul 11 12:59:03 2013 -0400 208.2 +++ b/src/share/vm/runtime/virtualspace.cpp Mon Jul 15 11:07:03 2013 +0100 208.3 @@ -533,11 +533,13 @@ 208.4 lower_high() + lower_needs <= lower_high_boundary(), 208.5 "must not expand beyond region"); 208.6 if (!os::commit_memory(lower_high(), lower_needs, _executable)) { 208.7 - debug_only(warning("os::commit_memory failed")); 208.8 + debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT 208.9 + ", lower_needs=" SIZE_FORMAT ", %d) failed", 208.10 + lower_high(), lower_needs, _executable);) 208.11 return false; 208.12 } else { 208.13 _lower_high += lower_needs; 208.14 - } 208.15 + } 208.16 } 208.17 if (middle_needs > 0) { 208.18 assert(lower_high_boundary() <= middle_high() && 208.19 @@ -545,7 +547,10 @@ 208.20 "must not expand beyond region"); 208.21 if (!os::commit_memory(middle_high(), middle_needs, middle_alignment(), 208.22 _executable)) { 208.23 - debug_only(warning("os::commit_memory failed")); 208.24 + debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT 208.25 + ", middle_needs=" SIZE_FORMAT ", " SIZE_FORMAT 208.26 + ", %d) failed", middle_high(), middle_needs, 208.27 + middle_alignment(), _executable);) 208.28 return false; 208.29 } 208.30 _middle_high += middle_needs; 208.31 @@ -555,7 +560,9 @@ 208.32 upper_high() + upper_needs <= upper_high_boundary(), 208.33 "must not expand beyond region"); 208.34 if (!os::commit_memory(upper_high(), upper_needs, _executable)) { 208.35 - debug_only(warning("os::commit_memory failed")); 208.36 + debug_only(warning("INFO: os::commit_memory(" PTR_FORMAT 208.37 + ", upper_needs=" SIZE_FORMAT ", %d) failed", 208.38 + upper_high(), upper_needs, _executable);) 208.39 return false; 208.40 } else { 208.41 _upper_high += upper_needs;
209.1 --- a/src/share/vm/runtime/vmStructs.cpp Thu Jul 11 12:59:03 2013 -0400 209.2 +++ b/src/share/vm/runtime/vmStructs.cpp Mon Jul 15 11:07:03 2013 +0100 209.3 @@ -379,7 +379,7 @@ 209.4 nonstatic_field(ConstMethod, _size_of_parameters, u2) \ 209.5 nonstatic_field(ObjArrayKlass, _element_klass, Klass*) \ 209.6 nonstatic_field(ObjArrayKlass, _bottom_klass, Klass*) \ 209.7 - volatile_nonstatic_field(Symbol, _refcount, int) \ 209.8 + volatile_nonstatic_field(Symbol, _refcount, short) \ 209.9 nonstatic_field(Symbol, _identity_hash, int) \ 209.10 nonstatic_field(Symbol, _length, unsigned short) \ 209.11 unchecked_nonstatic_field(Symbol, _body, sizeof(jbyte)) /* NOTE: no type */ \ 209.12 @@ -437,10 +437,6 @@ 209.13 static_field(Universe, _main_thread_group, oop) \ 209.14 static_field(Universe, _system_thread_group, oop) \ 209.15 static_field(Universe, _the_empty_class_klass_array, objArrayOop) \ 209.16 - static_field(Universe, _out_of_memory_error_java_heap, oop) \ 209.17 - static_field(Universe, _out_of_memory_error_perm_gen, oop) \ 209.18 - static_field(Universe, _out_of_memory_error_array_size, oop) \ 209.19 - static_field(Universe, _out_of_memory_error_gc_overhead_limit, oop) \ 209.20 static_field(Universe, _null_ptr_exception_instance, oop) \ 209.21 static_field(Universe, _arithmetic_exception_instance, oop) \ 209.22 static_field(Universe, _vm_exception, oop) \
210.1 --- a/src/share/vm/services/diagnosticArgument.cpp Thu Jul 11 12:59:03 2013 -0400 210.2 +++ b/src/share/vm/services/diagnosticArgument.cpp Mon Jul 15 11:07:03 2013 +0100 210.3 @@ -247,7 +247,7 @@ 210.4 } else { 210.5 _value._time = 0; 210.6 _value._nanotime = 0; 210.7 - strcmp(_value._unit, "ns"); 210.8 + strcpy(_value._unit, "ns"); 210.9 } 210.10 } 210.11
211.1 --- a/src/share/vm/services/management.cpp Thu Jul 11 12:59:03 2013 -0400 211.2 +++ b/src/share/vm/services/management.cpp Mon Jul 15 11:07:03 2013 +0100 211.3 @@ -894,12 +894,6 @@ 211.4 } 211.5 } 211.6 211.7 - // In our current implementation, we make sure that all non-heap 211.8 - // pools have defined init and max sizes. Heap pools do not matter, 211.9 - // as we never use total_init and total_max for them. 211.10 - assert(heap || !has_undefined_init_size, "Undefined init size"); 211.11 - assert(heap || !has_undefined_max_size, "Undefined max size"); 211.12 - 211.13 MemoryUsage usage((heap ? InitialHeapSize : total_init), 211.14 total_used, 211.15 total_committed,
212.1 --- a/src/share/vm/services/memBaseline.cpp Thu Jul 11 12:59:03 2013 -0400 212.2 +++ b/src/share/vm/services/memBaseline.cpp Mon Jul 15 11:07:03 2013 +0100 212.3 @@ -130,7 +130,7 @@ 212.4 if (malloc_ptr->is_arena_record()) { 212.5 // see if arena memory record present 212.6 MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next(); 212.7 - if (next_malloc_ptr->is_arena_memory_record()) { 212.8 + if (next_malloc_ptr != NULL && next_malloc_ptr->is_arena_memory_record()) { 212.9 assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr), 212.10 "Arena records do not match"); 212.11 size = next_malloc_ptr->size();
213.1 --- a/src/share/vm/services/memPtr.hpp Thu Jul 11 12:59:03 2013 -0400 213.2 +++ b/src/share/vm/services/memPtr.hpp Mon Jul 15 11:07:03 2013 +0100 213.3 @@ -1,5 +1,5 @@ 213.4 /* 213.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. 213.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. 213.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 213.8 * 213.9 * This code is free software; you can redistribute it and/or modify it 213.10 @@ -457,9 +457,8 @@ 213.11 public: 213.12 SeqMemPointerRecord(): _seq(0){ } 213.13 213.14 - SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size) 213.15 - : MemPointerRecord(addr, flags, size) { 213.16 - _seq = SequenceGenerator::next(); 213.17 + SeqMemPointerRecord(address addr, MEMFLAGS flags, size_t size, jint seq) 213.18 + : MemPointerRecord(addr, flags, size), _seq(seq) { 213.19 } 213.20 213.21 SeqMemPointerRecord(const SeqMemPointerRecord& copy_from) 213.22 @@ -488,8 +487,8 @@ 213.23 SeqMemPointerRecordEx(): _seq(0) { } 213.24 213.25 SeqMemPointerRecordEx(address addr, MEMFLAGS flags, size_t size, 213.26 - address pc): MemPointerRecordEx(addr, flags, size, pc) { 213.27 - _seq = SequenceGenerator::next(); 213.28 + jint seq, address pc): 213.29 + MemPointerRecordEx(addr, flags, size, pc), _seq(seq) { 213.30 } 213.31 213.32 SeqMemPointerRecordEx(const SeqMemPointerRecordEx& copy_from)
214.1 --- a/src/share/vm/services/memRecorder.cpp Thu Jul 11 12:59:03 2013 -0400 214.2 +++ b/src/share/vm/services/memRecorder.cpp Mon Jul 15 11:07:03 2013 +0100 214.3 @@ -1,5 +1,5 @@ 214.4 /* 214.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. 214.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. 214.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 214.8 * 214.9 * This code is free software; you can redistribute it and/or modify it 214.10 @@ -69,10 +69,11 @@ 214.11 214.12 if (_pointer_records != NULL) { 214.13 // recode itself 214.14 + address pc = CURRENT_PC; 214.15 record((address)this, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder), 214.16 - sizeof(MemRecorder), CALLER_PC); 214.17 + sizeof(MemRecorder), SequenceGenerator::next(), pc); 214.18 record((address)_pointer_records, (MemPointerRecord::malloc_tag()|mtNMT|otNMTRecorder), 214.19 - _pointer_records->instance_size(),CURRENT_PC); 214.20 + _pointer_records->instance_size(), SequenceGenerator::next(), pc); 214.21 } 214.22 } 214.23 214.24 @@ -116,7 +117,8 @@ 214.25 } 214.26 } 214.27 214.28 -bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, address pc) { 214.29 +bool MemRecorder::record(address p, MEMFLAGS flags, size_t size, jint seq, address pc) { 214.30 + assert(seq > 0, "No sequence number"); 214.31 #ifdef ASSERT 214.32 if (MemPointerRecord::is_virtual_memory_record(flags)) { 214.33 assert((flags & MemPointerRecord::tag_masks) != 0, "bad virtual memory record"); 214.34 @@ -133,11 +135,11 @@ 214.35 #endif 214.36 214.37 if (MemTracker::track_callsite()) { 214.38 - SeqMemPointerRecordEx ap(p, flags, size, pc); 214.39 + SeqMemPointerRecordEx ap(p, flags, size, seq, pc); 214.40 debug_only(check_dup_seq(ap.seq());) 214.41 return _pointer_records->append(&ap); 214.42 } else { 214.43 - SeqMemPointerRecord ap(p, flags, size); 214.44 + SeqMemPointerRecord ap(p, flags, size, seq); 214.45 debug_only(check_dup_seq(ap.seq());) 214.46 return _pointer_records->append(&ap); 214.47 }
215.1 --- a/src/share/vm/services/memRecorder.hpp Thu Jul 11 12:59:03 2013 -0400 215.2 +++ b/src/share/vm/services/memRecorder.hpp Mon Jul 15 11:07:03 2013 +0100 215.3 @@ -1,5 +1,5 @@ 215.4 /* 215.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. 215.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. 215.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 215.8 * 215.9 * This code is free software; you can redistribute it and/or modify it 215.10 @@ -220,7 +220,7 @@ 215.11 ~MemRecorder(); 215.12 215.13 // record a memory operation 215.14 - bool record(address addr, MEMFLAGS flags, size_t size, address caller_pc = 0); 215.15 + bool record(address addr, MEMFLAGS flags, size_t size, jint seq, address caller_pc = 0); 215.16 215.17 // linked list support 215.18 inline void set_next(MemRecorder* rec) {
216.1 --- a/src/share/vm/services/memReporter.cpp Thu Jul 11 12:59:03 2013 -0400 216.2 +++ b/src/share/vm/services/memReporter.cpp Mon Jul 15 11:07:03 2013 +0100 216.3 @@ -188,30 +188,51 @@ 216.4 (MallocCallsitePointer*)prev_malloc_itr.current(); 216.5 216.6 while (cur_malloc_callsite != NULL || prev_malloc_callsite != NULL) { 216.7 - if (prev_malloc_callsite == NULL || 216.8 - cur_malloc_callsite->addr() < prev_malloc_callsite->addr()) { 216.9 + if (prev_malloc_callsite == NULL) { 216.10 + assert(cur_malloc_callsite != NULL, "sanity check"); 216.11 + // this is a new callsite 216.12 _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(), 216.13 amount_in_current_scale(cur_malloc_callsite->amount()), 216.14 cur_malloc_callsite->count(), 216.15 diff_in_current_scale(cur_malloc_callsite->amount(), 0), 216.16 diff(cur_malloc_callsite->count(), 0)); 216.17 cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next(); 216.18 - } else if (prev_malloc_callsite == NULL || 216.19 - cur_malloc_callsite->addr() > prev_malloc_callsite->addr()) { 216.20 - _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(), 216.21 - amount_in_current_scale(prev_malloc_callsite->amount()), 216.22 - prev_malloc_callsite->count(), 216.23 + } else if (cur_malloc_callsite == NULL) { 216.24 + assert(prev_malloc_callsite != NULL, "Sanity check"); 216.25 + // this callsite is already gone 216.26 + _outputer.diff_malloc_callsite(prev_malloc_callsite->addr(), 216.27 + 0, 0, 216.28 diff_in_current_scale(0, prev_malloc_callsite->amount()), 216.29 diff(0, prev_malloc_callsite->count())); 216.30 prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next(); 216.31 - } else { // the same callsite 216.32 - _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(), 216.33 - amount_in_current_scale(cur_malloc_callsite->amount()), 216.34 - cur_malloc_callsite->count(), 216.35 - diff_in_current_scale(cur_malloc_callsite->amount(), prev_malloc_callsite->amount()), 216.36 - diff(cur_malloc_callsite->count(), prev_malloc_callsite->count())); 216.37 - cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next(); 216.38 - prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next(); 216.39 + } else { 216.40 + assert(cur_malloc_callsite != NULL, "Sanity check"); 216.41 + assert(prev_malloc_callsite != NULL, "Sanity check"); 216.42 + if (cur_malloc_callsite->addr() < prev_malloc_callsite->addr()) { 216.43 + // this is a new callsite 216.44 + _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(), 216.45 + amount_in_current_scale(cur_malloc_callsite->amount()), 216.46 + cur_malloc_callsite->count(), 216.47 + diff_in_current_scale(cur_malloc_callsite->amount(), 0), 216.48 + diff(cur_malloc_callsite->count(), 0)); 216.49 + cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next(); 216.50 + } else if (cur_malloc_callsite->addr() > prev_malloc_callsite->addr()) { 216.51 + // this callsite is already gone 216.52 + _outputer.diff_malloc_callsite(prev_malloc_callsite->addr(), 216.53 + 0, 0, 216.54 + diff_in_current_scale(0, prev_malloc_callsite->amount()), 216.55 + diff(0, prev_malloc_callsite->count())); 216.56 + prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next(); 216.57 + } else { 216.58 + // the same callsite 216.59 + _outputer.diff_malloc_callsite(cur_malloc_callsite->addr(), 216.60 + amount_in_current_scale(cur_malloc_callsite->amount()), 216.61 + cur_malloc_callsite->count(), 216.62 + diff_in_current_scale(cur_malloc_callsite->amount(), prev_malloc_callsite->amount()), 216.63 + diff(cur_malloc_callsite->count(), prev_malloc_callsite->count())); 216.64 + cur_malloc_callsite = (MallocCallsitePointer*)cur_malloc_itr.next(); 216.65 + prev_malloc_callsite = (MallocCallsitePointer*)prev_malloc_itr.next(); 216.66 + } 216.67 } 216.68 } 216.69 216.70 @@ -222,6 +243,7 @@ 216.71 VMCallsitePointer* prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.current(); 216.72 while (cur_vm_callsite != NULL || prev_vm_callsite != NULL) { 216.73 if (prev_vm_callsite == NULL || cur_vm_callsite->addr() < prev_vm_callsite->addr()) { 216.74 + // this is a new callsite 216.75 _outputer.diff_virtual_memory_callsite(cur_vm_callsite->addr(), 216.76 amount_in_current_scale(cur_vm_callsite->reserved_amount()), 216.77 amount_in_current_scale(cur_vm_callsite->committed_amount()), 216.78 @@ -229,9 +251,10 @@ 216.79 diff_in_current_scale(cur_vm_callsite->committed_amount(), 0)); 216.80 cur_vm_callsite = (VMCallsitePointer*)cur_vm_itr.next(); 216.81 } else if (cur_vm_callsite == NULL || cur_vm_callsite->addr() > prev_vm_callsite->addr()) { 216.82 + // this callsite is already gone 216.83 _outputer.diff_virtual_memory_callsite(prev_vm_callsite->addr(), 216.84 - amount_in_current_scale(prev_vm_callsite->reserved_amount()), 216.85 - amount_in_current_scale(prev_vm_callsite->committed_amount()), 216.86 + amount_in_current_scale(0), 216.87 + amount_in_current_scale(0), 216.88 diff_in_current_scale(0, prev_vm_callsite->reserved_amount()), 216.89 diff_in_current_scale(0, prev_vm_callsite->committed_amount())); 216.90 prev_vm_callsite = (VMCallsitePointer*)prev_vm_itr.next();
217.1 --- a/src/share/vm/services/memTracker.cpp Thu Jul 11 12:59:03 2013 -0400 217.2 +++ b/src/share/vm/services/memTracker.cpp Mon Jul 15 11:07:03 2013 +0100 217.3 @@ -69,6 +69,7 @@ 217.4 volatile jint MemTracker::_pooled_recorder_count = 0; 217.5 volatile unsigned long MemTracker::_processing_generation = 0; 217.6 volatile bool MemTracker::_worker_thread_idle = false; 217.7 +volatile jint MemTracker::_pending_op_count = 0; 217.8 volatile bool MemTracker::_slowdown_calling_thread = false; 217.9 debug_only(intx MemTracker::_main_thread_tid = 0;) 217.10 NOT_PRODUCT(volatile jint MemTracker::_pending_recorder_count = 0;) 217.11 @@ -337,92 +338,14 @@ 217.12 Atomic::inc(&_pooled_recorder_count); 217.13 } 217.14 217.15 -/* 217.16 - * This is the most important method in whole nmt implementation. 217.17 - * 217.18 - * Create a memory record. 217.19 - * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM 217.20 - * still in single thread mode. 217.21 - * 2. For all threads other than JavaThread, ThreadCritical is needed 217.22 - * to write to recorders to global recorder. 217.23 - * 3. For JavaThreads that are not longer visible by safepoint, also 217.24 - * need to take ThreadCritical and records are written to global 217.25 - * recorders, since these threads are NOT walked by Threads.do_thread(). 217.26 - * 4. JavaThreads that are running in native state, have to transition 217.27 - * to VM state before writing to per-thread recorders. 217.28 - * 5. JavaThreads that are running in VM state do not need any lock and 217.29 - * records are written to per-thread recorders. 217.30 - * 6. For a thread has yet to attach VM 'Thread', they need to take 217.31 - * ThreadCritical to write to global recorder. 217.32 - * 217.33 - * Important note: 217.34 - * NO LOCK should be taken inside ThreadCritical lock !!! 217.35 - */ 217.36 -void MemTracker::create_memory_record(address addr, MEMFLAGS flags, 217.37 - size_t size, address pc, Thread* thread) { 217.38 - assert(addr != NULL, "Sanity check"); 217.39 - if (!shutdown_in_progress()) { 217.40 - // single thread, we just write records direct to global recorder,' 217.41 - // with any lock 217.42 - if (_state == NMT_bootstrapping_single_thread) { 217.43 - assert(_main_thread_tid == os::current_thread_id(), "wrong thread"); 217.44 - thread = NULL; 217.45 - } else { 217.46 - if (thread == NULL) { 217.47 - // don't use Thread::current(), since it is possible that 217.48 - // the calling thread has yet to attach to VM 'Thread', 217.49 - // which will result assertion failure 217.50 - thread = ThreadLocalStorage::thread(); 217.51 - } 217.52 - } 217.53 - 217.54 - if (thread != NULL) { 217.55 - // slow down all calling threads except NMT worker thread, so it 217.56 - // can catch up. 217.57 - if (_slowdown_calling_thread && thread != _worker_thread) { 217.58 - os::yield_all(); 217.59 - } 217.60 - 217.61 - if (thread->is_Java_thread() && ((JavaThread*)thread)->is_safepoint_visible()) { 217.62 - JavaThread* java_thread = (JavaThread*)thread; 217.63 - JavaThreadState state = java_thread->thread_state(); 217.64 - if (SafepointSynchronize::safepoint_safe(java_thread, state)) { 217.65 - // JavaThreads that are safepoint safe, can run through safepoint, 217.66 - // so ThreadCritical is needed to ensure no threads at safepoint create 217.67 - // new records while the records are being gathered and the sequence number is changing 217.68 - ThreadCritical tc; 217.69 - create_record_in_recorder(addr, flags, size, pc, java_thread); 217.70 - } else { 217.71 - create_record_in_recorder(addr, flags, size, pc, java_thread); 217.72 - } 217.73 - } else { 217.74 - // other threads, such as worker and watcher threads, etc. need to 217.75 - // take ThreadCritical to write to global recorder 217.76 - ThreadCritical tc; 217.77 - create_record_in_recorder(addr, flags, size, pc, NULL); 217.78 - } 217.79 - } else { 217.80 - if (_state == NMT_bootstrapping_single_thread) { 217.81 - // single thread, no lock needed 217.82 - create_record_in_recorder(addr, flags, size, pc, NULL); 217.83 - } else { 217.84 - // for thread has yet to attach VM 'Thread', we can not use VM mutex. 217.85 - // use native thread critical instead 217.86 - ThreadCritical tc; 217.87 - create_record_in_recorder(addr, flags, size, pc, NULL); 217.88 - } 217.89 - } 217.90 - } 217.91 -} 217.92 - 217.93 // write a record to proper recorder. No lock can be taken from this method 217.94 // down. 217.95 -void MemTracker::create_record_in_recorder(address addr, MEMFLAGS flags, 217.96 - size_t size, address pc, JavaThread* thread) { 217.97 +void MemTracker::write_tracking_record(address addr, MEMFLAGS flags, 217.98 + size_t size, jint seq, address pc, JavaThread* thread) { 217.99 217.100 MemRecorder* rc = get_thread_recorder(thread); 217.101 if (rc != NULL) { 217.102 - rc->record(addr, flags, size, pc); 217.103 + rc->record(addr, flags, size, seq, pc); 217.104 } 217.105 } 217.106 217.107 @@ -487,39 +410,43 @@ 217.108 return; 217.109 } 217.110 } 217.111 - _sync_point_skip_count = 0; 217.112 { 217.113 // This method is running at safepoint, with ThreadCritical lock, 217.114 // it should guarantee that NMT is fully sync-ed. 217.115 ThreadCritical tc; 217.116 217.117 - SequenceGenerator::reset(); 217.118 + // We can NOT execute NMT sync-point if there are pending tracking ops. 217.119 + if (_pending_op_count == 0) { 217.120 + SequenceGenerator::reset(); 217.121 + _sync_point_skip_count = 0; 217.122 217.123 - // walk all JavaThreads to collect recorders 217.124 - SyncThreadRecorderClosure stc; 217.125 - Threads::threads_do(&stc); 217.126 + // walk all JavaThreads to collect recorders 217.127 + SyncThreadRecorderClosure stc; 217.128 + Threads::threads_do(&stc); 217.129 217.130 - _thread_count = stc.get_thread_count(); 217.131 - MemRecorder* pending_recorders = get_pending_recorders(); 217.132 + _thread_count = stc.get_thread_count(); 217.133 + MemRecorder* pending_recorders = get_pending_recorders(); 217.134 217.135 - if (_global_recorder != NULL) { 217.136 - _global_recorder->set_next(pending_recorders); 217.137 - pending_recorders = _global_recorder; 217.138 - _global_recorder = NULL; 217.139 + if (_global_recorder != NULL) { 217.140 + _global_recorder->set_next(pending_recorders); 217.141 + pending_recorders = _global_recorder; 217.142 + _global_recorder = NULL; 217.143 + } 217.144 + 217.145 + // see if NMT has too many outstanding recorder instances, it usually 217.146 + // means that worker thread is lagging behind in processing them. 217.147 + if (!AutoShutdownNMT) { 217.148 + _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count); 217.149 + } 217.150 + 217.151 + // check _worker_thread with lock to avoid racing condition 217.152 + if (_worker_thread != NULL) { 217.153 + _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes()); 217.154 + } 217.155 + assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point"); 217.156 + } else { 217.157 + _sync_point_skip_count ++; 217.158 } 217.159 - 217.160 - // see if NMT has too many outstanding recorder instances, it usually 217.161 - // means that worker thread is lagging behind in processing them. 217.162 - if (!AutoShutdownNMT) { 217.163 - _slowdown_calling_thread = (MemRecorder::_instance_count > MAX_RECORDER_THREAD_RATIO * _thread_count); 217.164 - } 217.165 - 217.166 - // check _worker_thread with lock to avoid racing condition 217.167 - if (_worker_thread != NULL) { 217.168 - _worker_thread->at_sync_point(pending_recorders, InstanceKlass::number_of_instance_classes()); 217.169 - } 217.170 - 217.171 - assert(SequenceGenerator::peek() == 1, "Should not have memory activities during sync-point"); 217.172 } 217.173 } 217.174 217.175 @@ -708,3 +635,243 @@ 217.176 } 217.177 #endif 217.178 217.179 + 217.180 +// Tracker Implementation 217.181 + 217.182 +/* 217.183 + * Create a tracker. 217.184 + * This is a fairly complicated constructor, as it has to make two important decisions: 217.185 + * 1) Does it need to take ThreadCritical lock to write tracking record 217.186 + * 2) Does it need to pre-reserve a sequence number for the tracking record 217.187 + * 217.188 + * The rules to determine if ThreadCritical is needed: 217.189 + * 1. When nmt is in single-threaded bootstrapping mode, no lock is needed as VM 217.190 + * still in single thread mode. 217.191 + * 2. For all threads other than JavaThread, ThreadCritical is needed 217.192 + * to write to recorders to global recorder. 217.193 + * 3. For JavaThreads that are no longer visible by safepoint, also 217.194 + * need to take ThreadCritical and records are written to global 217.195 + * recorders, since these threads are NOT walked by Threads.do_thread(). 217.196 + * 4. JavaThreads that are running in safepoint-safe states do not stop 217.197 + * for safepoints, ThreadCritical lock should be taken to write 217.198 + * memory records. 217.199 + * 5. JavaThreads that are running in VM state do not need any lock and 217.200 + * records are written to per-thread recorders. 217.201 + * 6. For a thread has yet to attach VM 'Thread', they need to take 217.202 + * ThreadCritical to write to global recorder. 217.203 + * 217.204 + * The memory operations that need pre-reserve sequence numbers: 217.205 + * The memory operations that "release" memory blocks and the 217.206 + * operations can fail, need to pre-reserve sequence number. They 217.207 + * are realloc, uncommit and release. 217.208 + * 217.209 + * The reason for pre-reserve sequence number, is to prevent race condition: 217.210 + * Thread 1 Thread 2 217.211 + * <release> 217.212 + * <allocate> 217.213 + * <write allocate record> 217.214 + * <write release record> 217.215 + * if Thread 2 happens to obtain the memory address Thread 1 just released, 217.216 + * then NMT can mistakenly report the memory is free. 217.217 + * 217.218 + * Noticeably, free() does not need pre-reserve sequence number, because the call 217.219 + * does not fail, so we can alway write "release" record before the memory is actaully 217.220 + * freed. 217.221 + * 217.222 + * For realloc, uncommit and release, following coding pattern should be used: 217.223 + * 217.224 + * MemTracker::Tracker tkr = MemTracker::get_realloc_tracker(); 217.225 + * ptr = ::realloc(...); 217.226 + * if (ptr == NULL) { 217.227 + * tkr.record(...) 217.228 + * } else { 217.229 + * tkr.discard(); 217.230 + * } 217.231 + * 217.232 + * MemTracker::Tracker tkr = MemTracker::get_virtual_memory_uncommit_tracker(); 217.233 + * if (uncommit(...)) { 217.234 + * tkr.record(...); 217.235 + * } else { 217.236 + * tkr.discard(); 217.237 + * } 217.238 + * 217.239 + * MemTracker::Tracker tkr = MemTracker::get_virtual_memory_release_tracker(); 217.240 + * if (release(...)) { 217.241 + * tkr.record(...); 217.242 + * } else { 217.243 + * tkr.discard(); 217.244 + * } 217.245 + * 217.246 + * Since pre-reserved sequence number is only good for the generation that it is acquired, 217.247 + * when there is pending Tracker that reserved sequence number, NMT sync-point has 217.248 + * to be skipped to prevent from advancing generation. This is done by inc and dec 217.249 + * MemTracker::_pending_op_count, when MemTracker::_pending_op_count > 0, NMT sync-point is skipped. 217.250 + * Not all pre-reservation of sequence number will increment pending op count. For JavaThreads 217.251 + * that honor safepoints, safepoint can not occur during the memory operations, so the 217.252 + * pre-reserved sequence number won't cross the generation boundry. 217.253 + */ 217.254 +MemTracker::Tracker::Tracker(MemoryOperation op, Thread* thr) { 217.255 + _op = NoOp; 217.256 + _seq = 0; 217.257 + if (MemTracker::is_on()) { 217.258 + _java_thread = NULL; 217.259 + _op = op; 217.260 + 217.261 + // figure out if ThreadCritical lock is needed to write this operation 217.262 + // to MemTracker 217.263 + if (MemTracker::is_single_threaded_bootstrap()) { 217.264 + thr = NULL; 217.265 + } else if (thr == NULL) { 217.266 + // don't use Thread::current(), since it is possible that 217.267 + // the calling thread has yet to attach to VM 'Thread', 217.268 + // which will result assertion failure 217.269 + thr = ThreadLocalStorage::thread(); 217.270 + } 217.271 + 217.272 + if (thr != NULL) { 217.273 + // Check NMT load 217.274 + MemTracker::check_NMT_load(thr); 217.275 + 217.276 + if (thr->is_Java_thread() && ((JavaThread*)thr)->is_safepoint_visible()) { 217.277 + _java_thread = (JavaThread*)thr; 217.278 + JavaThreadState state = _java_thread->thread_state(); 217.279 + // JavaThreads that are safepoint safe, can run through safepoint, 217.280 + // so ThreadCritical is needed to ensure no threads at safepoint create 217.281 + // new records while the records are being gathered and the sequence number is changing 217.282 + _need_thread_critical_lock = 217.283 + SafepointSynchronize::safepoint_safe(_java_thread, state); 217.284 + } else { 217.285 + _need_thread_critical_lock = true; 217.286 + } 217.287 + } else { 217.288 + _need_thread_critical_lock 217.289 + = !MemTracker::is_single_threaded_bootstrap(); 217.290 + } 217.291 + 217.292 + // see if we need to pre-reserve sequence number for this operation 217.293 + if (_op == Realloc || _op == Uncommit || _op == Release) { 217.294 + if (_need_thread_critical_lock) { 217.295 + ThreadCritical tc; 217.296 + MemTracker::inc_pending_op_count(); 217.297 + _seq = SequenceGenerator::next(); 217.298 + } else { 217.299 + // for the threads that honor safepoints, no safepoint can occur 217.300 + // during the lifespan of tracker, so we don't need to increase 217.301 + // pending op count. 217.302 + _seq = SequenceGenerator::next(); 217.303 + } 217.304 + } 217.305 + } 217.306 +} 217.307 + 217.308 +void MemTracker::Tracker::discard() { 217.309 + if (MemTracker::is_on() && _seq != 0) { 217.310 + if (_need_thread_critical_lock) { 217.311 + ThreadCritical tc; 217.312 + MemTracker::dec_pending_op_count(); 217.313 + } 217.314 + _seq = 0; 217.315 + } 217.316 +} 217.317 + 217.318 + 217.319 +void MemTracker::Tracker::record(address old_addr, address new_addr, size_t size, 217.320 + MEMFLAGS flags, address pc) { 217.321 + assert(old_addr != NULL && new_addr != NULL, "Sanity check"); 217.322 + assert(_op == Realloc || _op == NoOp, "Wrong call"); 217.323 + if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) { 217.324 + assert(_seq > 0, "Need pre-reserve sequence number"); 217.325 + if (_need_thread_critical_lock) { 217.326 + ThreadCritical tc; 217.327 + // free old address, use pre-reserved sequence number 217.328 + MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(), 217.329 + 0, _seq, pc, _java_thread); 217.330 + MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(), 217.331 + size, SequenceGenerator::next(), pc, _java_thread); 217.332 + // decrement MemTracker pending_op_count 217.333 + MemTracker::dec_pending_op_count(); 217.334 + } else { 217.335 + // free old address, use pre-reserved sequence number 217.336 + MemTracker::write_tracking_record(old_addr, MemPointerRecord::free_tag(), 217.337 + 0, _seq, pc, _java_thread); 217.338 + MemTracker::write_tracking_record(new_addr, flags | MemPointerRecord::malloc_tag(), 217.339 + size, SequenceGenerator::next(), pc, _java_thread); 217.340 + } 217.341 + _seq = 0; 217.342 + } 217.343 +} 217.344 + 217.345 +void MemTracker::Tracker::record(address addr, size_t size, MEMFLAGS flags, address pc) { 217.346 + // OOM already? 217.347 + if (addr == NULL) return; 217.348 + 217.349 + if (MemTracker::is_on() && NMT_CAN_TRACK(flags) && _op != NoOp) { 217.350 + bool pre_reserved_seq = (_seq != 0); 217.351 + address pc = CALLER_CALLER_PC; 217.352 + MEMFLAGS orig_flags = flags; 217.353 + 217.354 + // or the tagging flags 217.355 + switch(_op) { 217.356 + case Malloc: 217.357 + flags |= MemPointerRecord::malloc_tag(); 217.358 + break; 217.359 + case Free: 217.360 + flags = MemPointerRecord::free_tag(); 217.361 + break; 217.362 + case Realloc: 217.363 + fatal("Use the other Tracker::record()"); 217.364 + break; 217.365 + case Reserve: 217.366 + case ReserveAndCommit: 217.367 + flags |= MemPointerRecord::virtual_memory_reserve_tag(); 217.368 + break; 217.369 + case Commit: 217.370 + flags = MemPointerRecord::virtual_memory_commit_tag(); 217.371 + break; 217.372 + case Type: 217.373 + flags |= MemPointerRecord::virtual_memory_type_tag(); 217.374 + break; 217.375 + case Uncommit: 217.376 + assert(pre_reserved_seq, "Need pre-reserve sequence number"); 217.377 + flags = MemPointerRecord::virtual_memory_uncommit_tag(); 217.378 + break; 217.379 + case Release: 217.380 + assert(pre_reserved_seq, "Need pre-reserve sequence number"); 217.381 + flags = MemPointerRecord::virtual_memory_release_tag(); 217.382 + break; 217.383 + case ArenaSize: 217.384 + // a bit of hack here, add a small postive offset to arena 217.385 + // address for its size record, so the size record is sorted 217.386 + // right after arena record. 217.387 + flags = MemPointerRecord::arena_size_tag(); 217.388 + addr += sizeof(void*); 217.389 + break; 217.390 + case StackRelease: 217.391 + flags = MemPointerRecord::virtual_memory_release_tag(); 217.392 + break; 217.393 + default: 217.394 + ShouldNotReachHere(); 217.395 + } 217.396 + 217.397 + // write memory tracking record 217.398 + if (_need_thread_critical_lock) { 217.399 + ThreadCritical tc; 217.400 + if (_seq == 0) _seq = SequenceGenerator::next(); 217.401 + MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread); 217.402 + if (_op == ReserveAndCommit) { 217.403 + MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(), 217.404 + size, SequenceGenerator::next(), pc, _java_thread); 217.405 + } 217.406 + if (pre_reserved_seq) MemTracker::dec_pending_op_count(); 217.407 + } else { 217.408 + if (_seq == 0) _seq = SequenceGenerator::next(); 217.409 + MemTracker::write_tracking_record(addr, flags, size, _seq, pc, _java_thread); 217.410 + if (_op == ReserveAndCommit) { 217.411 + MemTracker::write_tracking_record(addr, orig_flags | MemPointerRecord::virtual_memory_commit_tag(), 217.412 + size, SequenceGenerator::next(), pc, _java_thread); 217.413 + } 217.414 + } 217.415 + _seq = 0; 217.416 + } 217.417 +} 217.418 +
218.1 --- a/src/share/vm/services/memTracker.hpp Thu Jul 11 12:59:03 2013 -0400 218.2 +++ b/src/share/vm/services/memTracker.hpp Mon Jul 15 11:07:03 2013 +0100 218.3 @@ -1,5 +1,5 @@ 218.4 /* 218.5 - * Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. 218.6 + * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. 218.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 218.8 * 218.9 * This code is free software; you can redistribute it and/or modify it 218.10 @@ -54,6 +54,18 @@ 218.11 NMT_sequence_overflow // overflow the sequence number 218.12 }; 218.13 218.14 + class Tracker { 218.15 + public: 218.16 + void discard() { } 218.17 + 218.18 + void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL) { } 218.19 + void record(address old_addr, address new_addr, size_t size, 218.20 + MEMFLAGS flags, address pc = NULL) { } 218.21 + }; 218.22 + 218.23 + private: 218.24 + static Tracker _tkr; 218.25 + 218.26 218.27 public: 218.28 static inline void init_tracking_options(const char* option_line) { } 218.29 @@ -68,19 +80,18 @@ 218.30 static inline void record_malloc(address addr, size_t size, MEMFLAGS flags, 218.31 address pc = 0, Thread* thread = NULL) { } 218.32 static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { } 218.33 - static inline void record_realloc(address old_addr, address new_addr, size_t size, 218.34 - MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { } 218.35 static inline void record_arena_size(address addr, size_t size) { } 218.36 static inline void record_virtual_memory_reserve(address addr, size_t size, 218.37 - address pc = 0, Thread* thread = NULL) { } 218.38 + MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { } 218.39 + static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size, 218.40 + MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { } 218.41 static inline void record_virtual_memory_commit(address addr, size_t size, 218.42 address pc = 0, Thread* thread = NULL) { } 218.43 - static inline void record_virtual_memory_uncommit(address addr, size_t size, 218.44 - Thread* thread = NULL) { } 218.45 - static inline void record_virtual_memory_release(address addr, size_t size, 218.46 - Thread* thread = NULL) { } 218.47 static inline void record_virtual_memory_type(address base, MEMFLAGS flags, 218.48 Thread* thread = NULL) { } 218.49 + static inline Tracker get_realloc_tracker() { return _tkr; } 218.50 + static inline Tracker get_virtual_memory_uncommit_tracker() { return _tkr; } 218.51 + static inline Tracker get_virtual_memory_release_tracker() { return _tkr; } 218.52 static inline bool baseline() { return false; } 218.53 static inline bool has_baseline() { return false; } 218.54 218.55 @@ -165,6 +176,45 @@ 218.56 }; 218.57 218.58 public: 218.59 + class Tracker : public StackObj { 218.60 + friend class MemTracker; 218.61 + public: 218.62 + enum MemoryOperation { 218.63 + NoOp, // no op 218.64 + Malloc, // malloc 218.65 + Realloc, // realloc 218.66 + Free, // free 218.67 + Reserve, // virtual memory reserve 218.68 + Commit, // virtual memory commit 218.69 + ReserveAndCommit, // virtual memory reserve and commit 218.70 + StackAlloc = ReserveAndCommit, // allocate thread stack 218.71 + Type, // assign virtual memory type 218.72 + Uncommit, // virtual memory uncommit 218.73 + Release, // virtual memory release 218.74 + ArenaSize, // set arena size 218.75 + StackRelease // release thread stack 218.76 + }; 218.77 + 218.78 + 218.79 + protected: 218.80 + Tracker(MemoryOperation op, Thread* thr = NULL); 218.81 + 218.82 + public: 218.83 + void discard(); 218.84 + 218.85 + void record(address addr, size_t size = 0, MEMFLAGS flags = mtNone, address pc = NULL); 218.86 + void record(address old_addr, address new_addr, size_t size, 218.87 + MEMFLAGS flags, address pc = NULL); 218.88 + 218.89 + private: 218.90 + bool _need_thread_critical_lock; 218.91 + JavaThread* _java_thread; 218.92 + MemoryOperation _op; // memory operation 218.93 + jint _seq; // reserved sequence number 218.94 + }; 218.95 + 218.96 + 218.97 + public: 218.98 // native memory tracking level 218.99 enum NMTLevel { 218.100 NMT_off, // native memory tracking is off 218.101 @@ -276,109 +326,74 @@ 218.102 // record a 'malloc' call 218.103 static inline void record_malloc(address addr, size_t size, MEMFLAGS flags, 218.104 address pc = 0, Thread* thread = NULL) { 218.105 - if (is_on() && NMT_CAN_TRACK(flags)) { 218.106 - assert(size > 0, "Sanity check"); 218.107 - create_memory_record(addr, (flags|MemPointerRecord::malloc_tag()), size, pc, thread); 218.108 - } 218.109 + Tracker tkr(Tracker::Malloc, thread); 218.110 + tkr.record(addr, size, flags, pc); 218.111 } 218.112 // record a 'free' call 218.113 static inline void record_free(address addr, MEMFLAGS flags, Thread* thread = NULL) { 218.114 - if (is_on() && NMT_CAN_TRACK(flags)) { 218.115 - create_memory_record(addr, MemPointerRecord::free_tag(), 0, 0, thread); 218.116 - } 218.117 - } 218.118 - // record a 'realloc' call 218.119 - static inline void record_realloc(address old_addr, address new_addr, size_t size, 218.120 - MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { 218.121 - if (is_on() && NMT_CAN_TRACK(flags)) { 218.122 - assert(size > 0, "Sanity check"); 218.123 - record_free(old_addr, flags, thread); 218.124 - record_malloc(new_addr, size, flags, pc, thread); 218.125 - } 218.126 + Tracker tkr(Tracker::Free, thread); 218.127 + tkr.record(addr, 0, flags, DEBUG_CALLER_PC); 218.128 } 218.129 218.130 - // record arena memory size 218.131 static inline void record_arena_size(address addr, size_t size) { 218.132 - // we add a positive offset to arena address, so we can have arena memory record 218.133 - // sorted after arena record 218.134 - if (is_on() && !UseMallocOnly) { 218.135 - assert(addr != NULL, "Sanity check"); 218.136 - create_memory_record((addr + sizeof(void*)), MemPointerRecord::arena_size_tag(), size, 218.137 - DEBUG_CALLER_PC, NULL); 218.138 - } 218.139 + Tracker tkr(Tracker::ArenaSize); 218.140 + tkr.record(addr, size); 218.141 } 218.142 218.143 // record a virtual memory 'reserve' call 218.144 static inline void record_virtual_memory_reserve(address addr, size_t size, 218.145 - address pc = 0, Thread* thread = NULL) { 218.146 - if (is_on()) { 218.147 - assert(size > 0, "Sanity check"); 218.148 - create_memory_record(addr, MemPointerRecord::virtual_memory_reserve_tag(), 218.149 - size, pc, thread); 218.150 - } 218.151 + MEMFLAGS flags, address pc = 0, Thread* thread = NULL) { 218.152 + assert(size > 0, "Sanity check"); 218.153 + Tracker tkr(Tracker::Reserve, thread); 218.154 + tkr.record(addr, size, flags, pc); 218.155 } 218.156 218.157 static inline void record_thread_stack(address addr, size_t size, Thread* thr, 218.158 address pc = 0) { 218.159 - if (is_on()) { 218.160 - assert(size > 0 && thr != NULL, "Sanity check"); 218.161 - create_memory_record(addr, MemPointerRecord::virtual_memory_reserve_tag() | mtThreadStack, 218.162 - size, pc, thr); 218.163 - create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag() | mtThreadStack, 218.164 - size, pc, thr); 218.165 - } 218.166 + Tracker tkr(Tracker::StackAlloc, thr); 218.167 + tkr.record(addr, size, mtThreadStack, pc); 218.168 } 218.169 218.170 static inline void release_thread_stack(address addr, size_t size, Thread* thr) { 218.171 - if (is_on()) { 218.172 - assert(size > 0 && thr != NULL, "Sanity check"); 218.173 - assert(!thr->is_Java_thread(), "too early"); 218.174 - create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag() | mtThreadStack, 218.175 - size, DEBUG_CALLER_PC, thr); 218.176 - create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag() | mtThreadStack, 218.177 - size, DEBUG_CALLER_PC, thr); 218.178 - } 218.179 + Tracker tkr(Tracker::StackRelease, thr); 218.180 + tkr.record(addr, size, mtThreadStack, DEBUG_CALLER_PC); 218.181 } 218.182 218.183 // record a virtual memory 'commit' call 218.184 static inline void record_virtual_memory_commit(address addr, size_t size, 218.185 address pc, Thread* thread = NULL) { 218.186 - if (is_on()) { 218.187 - assert(size > 0, "Sanity check"); 218.188 - create_memory_record(addr, MemPointerRecord::virtual_memory_commit_tag(), 218.189 - size, pc, thread); 218.190 - } 218.191 + Tracker tkr(Tracker::Commit, thread); 218.192 + tkr.record(addr, size, mtNone, pc); 218.193 } 218.194 218.195 - // record a virtual memory 'uncommit' call 218.196 - static inline void record_virtual_memory_uncommit(address addr, size_t size, 218.197 - Thread* thread = NULL) { 218.198 - if (is_on()) { 218.199 - assert(size > 0, "Sanity check"); 218.200 - create_memory_record(addr, MemPointerRecord::virtual_memory_uncommit_tag(), 218.201 - size, DEBUG_CALLER_PC, thread); 218.202 - } 218.203 + static inline void record_virtual_memory_reserve_and_commit(address addr, size_t size, 218.204 + MEMFLAGS flags, address pc, Thread* thread = NULL) { 218.205 + Tracker tkr(Tracker::ReserveAndCommit, thread); 218.206 + tkr.record(addr, size, flags, pc); 218.207 } 218.208 218.209 - // record a virtual memory 'release' call 218.210 - static inline void record_virtual_memory_release(address addr, size_t size, 218.211 - Thread* thread = NULL) { 218.212 - if (is_on()) { 218.213 - assert(size > 0, "Sanity check"); 218.214 - create_memory_record(addr, MemPointerRecord::virtual_memory_release_tag(), 218.215 - size, DEBUG_CALLER_PC, thread); 218.216 - } 218.217 - } 218.218 218.219 // record memory type on virtual memory base address 218.220 static inline void record_virtual_memory_type(address base, MEMFLAGS flags, 218.221 Thread* thread = NULL) { 218.222 - if (is_on()) { 218.223 - assert(base > 0, "wrong base address"); 218.224 - assert((flags & (~mt_masks)) == 0, "memory type only"); 218.225 - create_memory_record(base, (flags | MemPointerRecord::virtual_memory_type_tag()), 218.226 - 0, DEBUG_CALLER_PC, thread); 218.227 - } 218.228 + Tracker tkr(Tracker::Type); 218.229 + tkr.record(base, 0, flags); 218.230 + } 218.231 + 218.232 + // Get memory trackers for memory operations that can result race conditions. 218.233 + // The memory tracker has to be obtained before realloc, virtual memory uncommit 218.234 + // and virtual memory release, and call tracker.record() method if operation 218.235 + // succeeded, or tracker.discard() to abort the tracking. 218.236 + static inline Tracker get_realloc_tracker() { 218.237 + return Tracker(Tracker::Realloc); 218.238 + } 218.239 + 218.240 + static inline Tracker get_virtual_memory_uncommit_tracker() { 218.241 + return Tracker(Tracker::Uncommit); 218.242 + } 218.243 + 218.244 + static inline Tracker get_virtual_memory_release_tracker() { 218.245 + return Tracker(Tracker::Release); 218.246 } 218.247 218.248 218.249 @@ -444,6 +459,31 @@ 218.250 static MemRecorder* get_pending_recorders(); 218.251 static void delete_all_pending_recorders(); 218.252 218.253 + // write a memory tracking record in recorder 218.254 + static void write_tracking_record(address addr, MEMFLAGS type, 218.255 + size_t size, jint seq, address pc, JavaThread* thread); 218.256 + 218.257 + static bool is_single_threaded_bootstrap() { 218.258 + return _state == NMT_bootstrapping_single_thread; 218.259 + } 218.260 + 218.261 + static void check_NMT_load(Thread* thr) { 218.262 + assert(thr != NULL, "Sanity check"); 218.263 + if (_slowdown_calling_thread && thr != _worker_thread) { 218.264 + os::yield_all(); 218.265 + } 218.266 + } 218.267 + 218.268 + static void inc_pending_op_count() { 218.269 + Atomic::inc(&_pending_op_count); 218.270 + } 218.271 + 218.272 + static void dec_pending_op_count() { 218.273 + Atomic::dec(&_pending_op_count); 218.274 + assert(_pending_op_count >= 0, "Sanity check"); 218.275 + } 218.276 + 218.277 + 218.278 private: 218.279 // retrieve a pooled memory record or create new one if there is not 218.280 // one available 218.281 @@ -522,6 +562,12 @@ 218.282 // if NMT should slow down calling thread to allow 218.283 // worker thread to catch up 218.284 static volatile bool _slowdown_calling_thread; 218.285 + 218.286 + // pending memory op count. 218.287 + // Certain memory ops need to pre-reserve sequence number 218.288 + // before memory operation can happen to avoid race condition. 218.289 + // See MemTracker::Tracker for detail 218.290 + static volatile jint _pending_op_count; 218.291 }; 218.292 218.293 #endif // !INCLUDE_NMT
219.1 --- a/src/share/vm/services/memoryManager.cpp Thu Jul 11 12:59:03 2013 -0400 219.2 +++ b/src/share/vm/services/memoryManager.cpp Mon Jul 15 11:07:03 2013 +0100 219.3 @@ -61,6 +61,10 @@ 219.4 return (MemoryManager*) new CodeCacheMemoryManager(); 219.5 } 219.6 219.7 +MemoryManager* MemoryManager::get_metaspace_memory_manager() { 219.8 + return (MemoryManager*) new MetaspaceMemoryManager(); 219.9 +} 219.10 + 219.11 GCMemoryManager* MemoryManager::get_copy_memory_manager() { 219.12 return (GCMemoryManager*) new CopyMemoryManager(); 219.13 }
220.1 --- a/src/share/vm/services/memoryManager.hpp Thu Jul 11 12:59:03 2013 -0400 220.2 +++ b/src/share/vm/services/memoryManager.hpp Mon Jul 15 11:07:03 2013 +0100 220.3 @@ -56,6 +56,7 @@ 220.4 enum Name { 220.5 Abstract, 220.6 CodeCache, 220.7 + Metaspace, 220.8 Copy, 220.9 MarkSweepCompact, 220.10 ParNew, 220.11 @@ -88,6 +89,7 @@ 220.12 220.13 // Static factory methods to get a memory manager of a specific type 220.14 static MemoryManager* get_code_cache_memory_manager(); 220.15 + static MemoryManager* get_metaspace_memory_manager(); 220.16 static GCMemoryManager* get_copy_memory_manager(); 220.17 static GCMemoryManager* get_msc_memory_manager(); 220.18 static GCMemoryManager* get_parnew_memory_manager(); 220.19 @@ -108,6 +110,14 @@ 220.20 const char* name() { return "CodeCacheManager"; } 220.21 }; 220.22 220.23 +class MetaspaceMemoryManager : public MemoryManager { 220.24 +public: 220.25 + MetaspaceMemoryManager() : MemoryManager() {} 220.26 + 220.27 + MemoryManager::Name kind() { return MemoryManager::Metaspace; } 220.28 + const char *name() { return "Metaspace Manager"; } 220.29 +}; 220.30 + 220.31 class GCStatInfo : public ResourceObj { 220.32 private: 220.33 size_t _index;
221.1 --- a/src/share/vm/services/memoryPool.cpp Thu Jul 11 12:59:03 2013 -0400 221.2 +++ b/src/share/vm/services/memoryPool.cpp Mon Jul 15 11:07:03 2013 +0100 221.3 @@ -25,6 +25,7 @@ 221.4 #include "precompiled.hpp" 221.5 #include "classfile/systemDictionary.hpp" 221.6 #include "classfile/vmSymbols.hpp" 221.7 +#include "memory/metaspace.hpp" 221.8 #include "oops/oop.inline.hpp" 221.9 #include "runtime/handles.inline.hpp" 221.10 #include "runtime/javaCalls.hpp" 221.11 @@ -33,6 +34,7 @@ 221.12 #include "services/memoryManager.hpp" 221.13 #include "services/memoryPool.hpp" 221.14 #include "utilities/macros.hpp" 221.15 +#include "utilities/globalDefinitions.hpp" 221.16 221.17 MemoryPool::MemoryPool(const char* name, 221.18 PoolType type, 221.19 @@ -256,3 +258,39 @@ 221.20 221.21 return MemoryUsage(initial_size(), used, committed, maxSize); 221.22 } 221.23 + 221.24 +MetaspacePool::MetaspacePool() : 221.25 + MemoryPool("Metaspace", NonHeap, capacity_in_bytes(), calculate_max_size(), true, false) { } 221.26 + 221.27 +MemoryUsage MetaspacePool::get_memory_usage() { 221.28 + size_t committed = align_size_down_(capacity_in_bytes(), os::vm_page_size()); 221.29 + return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size()); 221.30 +} 221.31 + 221.32 +size_t MetaspacePool::used_in_bytes() { 221.33 + return MetaspaceAux::allocated_used_bytes(Metaspace::NonClassType); 221.34 +} 221.35 + 221.36 +size_t MetaspacePool::capacity_in_bytes() const { 221.37 + return MetaspaceAux::allocated_capacity_bytes(Metaspace::NonClassType); 221.38 +} 221.39 + 221.40 +size_t MetaspacePool::calculate_max_size() const { 221.41 + return FLAG_IS_CMDLINE(MaxMetaspaceSize) ? MaxMetaspaceSize : max_uintx; 221.42 +} 221.43 + 221.44 +CompressedKlassSpacePool::CompressedKlassSpacePool() : 221.45 + MemoryPool("Compressed Class Space", NonHeap, capacity_in_bytes(), ClassMetaspaceSize, true, false) { } 221.46 + 221.47 +size_t CompressedKlassSpacePool::used_in_bytes() { 221.48 + return MetaspaceAux::allocated_used_bytes(Metaspace::ClassType); 221.49 +} 221.50 + 221.51 +size_t CompressedKlassSpacePool::capacity_in_bytes() const { 221.52 + return MetaspaceAux::allocated_capacity_bytes(Metaspace::ClassType); 221.53 +} 221.54 + 221.55 +MemoryUsage CompressedKlassSpacePool::get_memory_usage() { 221.56 + size_t committed = align_size_down_(capacity_in_bytes(), os::vm_page_size()); 221.57 + return MemoryUsage(initial_size(), used_in_bytes(), committed, max_size()); 221.58 +}
222.1 --- a/src/share/vm/services/memoryPool.hpp Thu Jul 11 12:59:03 2013 -0400 222.2 +++ b/src/share/vm/services/memoryPool.hpp Mon Jul 15 11:07:03 2013 +0100 222.3 @@ -222,4 +222,21 @@ 222.4 size_t used_in_bytes() { return _codeHeap->allocated_capacity(); } 222.5 }; 222.6 222.7 +class MetaspacePool : public MemoryPool { 222.8 + size_t calculate_max_size() const; 222.9 + size_t capacity_in_bytes() const; 222.10 + public: 222.11 + MetaspacePool(); 222.12 + MemoryUsage get_memory_usage(); 222.13 + size_t used_in_bytes(); 222.14 +}; 222.15 + 222.16 +class CompressedKlassSpacePool : public MemoryPool { 222.17 + size_t capacity_in_bytes() const; 222.18 + public: 222.19 + CompressedKlassSpacePool(); 222.20 + MemoryUsage get_memory_usage(); 222.21 + size_t used_in_bytes(); 222.22 +}; 222.23 + 222.24 #endif // SHARE_VM_SERVICES_MEMORYPOOL_HPP
223.1 --- a/src/share/vm/services/memoryService.cpp Thu Jul 11 12:59:03 2013 -0400 223.2 +++ b/src/share/vm/services/memoryService.cpp Mon Jul 15 11:07:03 2013 +0100 223.3 @@ -35,6 +35,7 @@ 223.4 #include "memory/memRegion.hpp" 223.5 #include "memory/tenuredGeneration.hpp" 223.6 #include "oops/oop.inline.hpp" 223.7 +#include "runtime/globals.hpp" 223.8 #include "runtime/javaCalls.hpp" 223.9 #include "services/classLoadingService.hpp" 223.10 #include "services/lowMemoryDetector.hpp" 223.11 @@ -60,9 +61,11 @@ 223.12 GrowableArray<MemoryManager*>* MemoryService::_managers_list = 223.13 new (ResourceObj::C_HEAP, mtInternal) GrowableArray<MemoryManager*>(init_managers_list_size, true); 223.14 223.15 -GCMemoryManager* MemoryService::_minor_gc_manager = NULL; 223.16 -GCMemoryManager* MemoryService::_major_gc_manager = NULL; 223.17 -MemoryPool* MemoryService::_code_heap_pool = NULL; 223.18 +GCMemoryManager* MemoryService::_minor_gc_manager = NULL; 223.19 +GCMemoryManager* MemoryService::_major_gc_manager = NULL; 223.20 +MemoryPool* MemoryService::_code_heap_pool = NULL; 223.21 +MemoryPool* MemoryService::_metaspace_pool = NULL; 223.22 +MemoryPool* MemoryService::_compressed_class_pool = NULL; 223.23 223.24 class GcThreadCountClosure: public ThreadClosure { 223.25 private: 223.26 @@ -399,6 +402,22 @@ 223.27 _managers_list->append(mgr); 223.28 } 223.29 223.30 +void MemoryService::add_metaspace_memory_pools() { 223.31 + MemoryManager* mgr = MemoryManager::get_metaspace_memory_manager(); 223.32 + 223.33 + _metaspace_pool = new MetaspacePool(); 223.34 + mgr->add_pool(_metaspace_pool); 223.35 + _pools_list->append(_metaspace_pool); 223.36 + 223.37 + if (UseCompressedKlassPointers) { 223.38 + _compressed_class_pool = new CompressedKlassSpacePool(); 223.39 + mgr->add_pool(_compressed_class_pool); 223.40 + _pools_list->append(_compressed_class_pool); 223.41 + } 223.42 + 223.43 + _managers_list->append(mgr); 223.44 +} 223.45 + 223.46 MemoryManager* MemoryService::get_memory_manager(instanceHandle mh) { 223.47 for (int i = 0; i < _managers_list->length(); i++) { 223.48 MemoryManager* mgr = _managers_list->at(i);
224.1 --- a/src/share/vm/services/memoryService.hpp Thu Jul 11 12:59:03 2013 -0400 224.2 +++ b/src/share/vm/services/memoryService.hpp Mon Jul 15 11:07:03 2013 +0100 224.3 @@ -73,6 +73,9 @@ 224.4 // Code heap memory pool 224.5 static MemoryPool* _code_heap_pool; 224.6 224.7 + static MemoryPool* _metaspace_pool; 224.8 + static MemoryPool* _compressed_class_pool; 224.9 + 224.10 static void add_generation_memory_pool(Generation* gen, 224.11 MemoryManager* major_mgr, 224.12 MemoryManager* minor_mgr); 224.13 @@ -121,6 +124,7 @@ 224.14 public: 224.15 static void set_universe_heap(CollectedHeap* heap); 224.16 static void add_code_heap_memory_pool(CodeHeap* heap); 224.17 + static void add_metaspace_memory_pools(); 224.18 224.19 static MemoryPool* get_memory_pool(instanceHandle pool); 224.20 static MemoryManager* get_memory_manager(instanceHandle mgr);
225.1 --- a/src/share/vm/services/threadService.cpp Thu Jul 11 12:59:03 2013 -0400 225.2 +++ b/src/share/vm/services/threadService.cpp Mon Jul 15 11:07:03 2013 +0100 225.3 @@ -327,27 +327,30 @@ 225.4 while (waitingToLockMonitor != NULL || waitingToLockBlocker != NULL) { 225.5 cycle->add_thread(currentThread); 225.6 if (waitingToLockMonitor != NULL) { 225.7 - currentThread = Threads::owning_thread_from_monitor_owner( 225.8 - (address)waitingToLockMonitor->owner(), 225.9 - false /* no locking needed */); 225.10 - if (currentThread == NULL) { 225.11 - // This function is called at a safepoint so the JavaThread 225.12 - // that owns waitingToLockMonitor should be findable, but 225.13 - // if it is not findable, then the previous currentThread is 225.14 - // blocked permanently. We record this as a deadlock. 225.15 - num_deadlocks++; 225.16 + address currentOwner = (address)waitingToLockMonitor->owner(); 225.17 + if (currentOwner != NULL) { 225.18 + currentThread = Threads::owning_thread_from_monitor_owner( 225.19 + currentOwner, 225.20 + false /* no locking needed */); 225.21 + if (currentThread == NULL) { 225.22 + // This function is called at a safepoint so the JavaThread 225.23 + // that owns waitingToLockMonitor should be findable, but 225.24 + // if it is not findable, then the previous currentThread is 225.25 + // blocked permanently. We record this as a deadlock. 225.26 + num_deadlocks++; 225.27 225.28 - cycle->set_deadlock(true); 225.29 + cycle->set_deadlock(true); 225.30 225.31 - // add this cycle to the deadlocks list 225.32 - if (deadlocks == NULL) { 225.33 - deadlocks = cycle; 225.34 - } else { 225.35 - last->set_next(cycle); 225.36 + // add this cycle to the deadlocks list 225.37 + if (deadlocks == NULL) { 225.38 + deadlocks = cycle; 225.39 + } else { 225.40 + last->set_next(cycle); 225.41 + } 225.42 + last = cycle; 225.43 + cycle = new DeadlockCycle(); 225.44 + break; 225.45 } 225.46 - last = cycle; 225.47 - cycle = new DeadlockCycle(); 225.48 - break; 225.49 } 225.50 } else { 225.51 if (concurrent_locks) {
226.1 --- a/src/share/vm/shark/sharkBuilder.cpp Thu Jul 11 12:59:03 2013 -0400 226.2 +++ b/src/share/vm/shark/sharkBuilder.cpp Mon Jul 15 11:07:03 2013 +0100 226.3 @@ -471,7 +471,7 @@ 226.4 226.5 Value* SharkBuilder::CreateInlineMetadata(Metadata* metadata, llvm::PointerType* type, const char* name) { 226.6 assert(metadata != NULL, "inlined metadata must not be NULL"); 226.7 - assert(metadata->is_metadata(), "sanity check"); 226.8 + assert(metadata->is_metaspace_object(), "sanity check"); 226.9 return CreateLoad( 226.10 CreateIntToPtr( 226.11 code_buffer_address(code_buffer()->inline_Metadata(metadata)),
227.1 --- a/src/share/vm/trace/trace.xml Thu Jul 11 12:59:03 2013 -0400 227.2 +++ b/src/share/vm/trace/trace.xml Mon Jul 15 11:07:03 2013 +0100 227.3 @@ -158,7 +158,7 @@ 227.4 <structvalue type="MetaspaceSizes" field="classSpace" label="Class"/> 227.5 </event> 227.6 227.7 - <event id="PSHeapSummary" path="vm/gc/heap/ps_summary" label="ParallelScavengeHeap Summary" is_instant="true"> 227.8 + <event id="PSHeapSummary" path="vm/gc/heap/ps_summary" label="Parallel Scavenge Heap Summary" is_instant="true"> 227.9 <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/> 227.10 <value type="GCWHEN" field="when" label="When" /> 227.11 227.12 @@ -203,7 +203,7 @@ 227.13 <value type="G1YCTYPE" field="type" label="Type" /> 227.14 </event> 227.15 227.16 - <event id="EvacuationInfo" path="vm/gc/detailed/evacuation_info" label="Evacuation Info" is_instant="true"> 227.17 + <event id="EvacuationInfo" path="vm/gc/detailed/evacuation_info" label="Evacuation Information" is_instant="true"> 227.18 <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/> 227.19 <value type="UINT" field="cSetRegions" label="Collection Set Regions"/> 227.20 <value type="BYTES64" field="cSetUsedBefore" label="Collection Set Before" description="Memory usage before GC in the collection set regions"/> 227.21 @@ -211,7 +211,7 @@ 227.22 <value type="UINT" field="allocationRegions" label="Allocation Regions" description="Regions chosen as allocation regions during evacuation (includes survivors and old space regions)"/> 227.23 <value type="BYTES64" field="allocRegionsUsedBefore" label="Alloc Regions Before" description="Memory usage before GC in allocation regions"/> 227.24 <value type="BYTES64" field="allocRegionsUsedAfter" label="Alloc Regions After" description="Memory usage after GC in allocation regions"/> 227.25 - <value type="BYTES64" field="bytesCopied" label="BytesCopied"/> 227.26 + <value type="BYTES64" field="bytesCopied" label="Bytes Copied"/> 227.27 <value type="UINT" field="regionsFreed" label="Regions Freed"/> 227.28 </event> 227.29 227.30 @@ -240,14 +240,14 @@ 227.31 <event id="PromotionFailed" path="vm/gc/detailed/promotion_failed" label="Promotion Failed" is_instant="true" 227.32 description="Promotion of an object failed"> 227.33 <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/> 227.34 - <structvalue type="CopyFailed" field="data" label="data"/> 227.35 + <structvalue type="CopyFailed" field="data" label="Data"/> 227.36 <value type="OSTHREAD" field="thread" label="Running thread"/> 227.37 </event> 227.38 227.39 <event id="EvacuationFailed" path="vm/gc/detailed/evacuation_failed" label="Evacuation Failed" is_instant="true" 227.40 description="Evacuation of an object failed"> 227.41 <value type="UINT" field="gcId" label="GC ID" relation="GC_ID"/> 227.42 - <structvalue type="CopyFailed" field="data" label="data"/> 227.43 + <structvalue type="CopyFailed" field="data" label="Data"/> 227.44 </event> 227.45 227.46 <event id="ConcurrentModeFailure" path="vm/gc/detailed/concurrent_mode_failure" label="Concurrent Mode Failure" 227.47 @@ -309,7 +309,7 @@ 227.48 <value type="USHORT" field="sweepFractionIndex" label="Fraction Index"/> 227.49 <value type="UINT" field="sweptCount" label="Methods Swept"/> 227.50 <value type="UINT" field="flushedCount" label="Methods Flushed"/> 227.51 - <value type="UINT" field="markedCount" label="Methods Reclaim"/> 227.52 + <value type="UINT" field="markedCount" label="Methods Reclaimed"/> 227.53 <value type="UINT" field="zombifiedCount" label="Methods Zombified"/> 227.54 </event> 227.55
228.1 --- a/src/share/vm/utilities/bitMap.cpp Thu Jul 11 12:59:03 2013 -0400 228.2 +++ b/src/share/vm/utilities/bitMap.cpp Mon Jul 15 11:07:03 2013 +0100 228.3 @@ -41,7 +41,7 @@ 228.4 228.5 228.6 BitMap::BitMap(bm_word_t* map, idx_t size_in_bits) : 228.7 - _map(map), _size(size_in_bits) 228.8 + _map(map), _size(size_in_bits), _map_allocator(false) 228.9 { 228.10 assert(sizeof(bm_word_t) == BytesPerWord, "Implementation assumption."); 228.11 assert(size_in_bits >= 0, "just checking"); 228.12 @@ -49,7 +49,7 @@ 228.13 228.14 228.15 BitMap::BitMap(idx_t size_in_bits, bool in_resource_area) : 228.16 - _map(NULL), _size(0) 228.17 + _map(NULL), _size(0), _map_allocator(false) 228.18 { 228.19 assert(sizeof(bm_word_t) == BytesPerWord, "Implementation assumption."); 228.20 resize(size_in_bits, in_resource_area); 228.21 @@ -65,8 +65,10 @@ 228.22 if (in_resource_area) { 228.23 _map = NEW_RESOURCE_ARRAY(bm_word_t, new_size_in_words); 228.24 } else { 228.25 - if (old_map != NULL) FREE_C_HEAP_ARRAY(bm_word_t, _map, mtInternal); 228.26 - _map = NEW_C_HEAP_ARRAY(bm_word_t, new_size_in_words, mtInternal); 228.27 + if (old_map != NULL) { 228.28 + _map_allocator.free(); 228.29 + } 228.30 + _map = _map_allocator.allocate(new_size_in_words); 228.31 } 228.32 Copy::disjoint_words((HeapWord*)old_map, (HeapWord*) _map, 228.33 MIN2(old_size_in_words, new_size_in_words));
229.1 --- a/src/share/vm/utilities/bitMap.hpp Thu Jul 11 12:59:03 2013 -0400 229.2 +++ b/src/share/vm/utilities/bitMap.hpp Mon Jul 15 11:07:03 2013 +0100 229.3 @@ -48,6 +48,7 @@ 229.4 } RangeSizeHint; 229.5 229.6 private: 229.7 + ArrayAllocator<bm_word_t, mtInternal> _map_allocator; 229.8 bm_word_t* _map; // First word in bitmap 229.9 idx_t _size; // Size of bitmap (in bits) 229.10 229.11 @@ -113,7 +114,7 @@ 229.12 public: 229.13 229.14 // Constructs a bitmap with no map, and size 0. 229.15 - BitMap() : _map(NULL), _size(0) {} 229.16 + BitMap() : _map(NULL), _size(0), _map_allocator(false) {} 229.17 229.18 // Constructs a bitmap with the given map and size. 229.19 BitMap(bm_word_t* map, idx_t size_in_bits);
230.1 --- a/src/share/vm/utilities/exceptions.hpp Thu Jul 11 12:59:03 2013 -0400 230.2 +++ b/src/share/vm/utilities/exceptions.hpp Mon Jul 15 11:07:03 2013 +0100 230.3 @@ -194,15 +194,15 @@ 230.4 #define HAS_PENDING_EXCEPTION (((ThreadShadow*)THREAD)->has_pending_exception()) 230.5 #define CLEAR_PENDING_EXCEPTION (((ThreadShadow*)THREAD)->clear_pending_exception()) 230.6 230.7 -#define CHECK THREAD); if (HAS_PENDING_EXCEPTION) return ; (0 230.8 -#define CHECK_(result) THREAD); if (HAS_PENDING_EXCEPTION) return result; (0 230.9 +#define CHECK THREAD); if (HAS_PENDING_EXCEPTION) return ; (void)(0 230.10 +#define CHECK_(result) THREAD); if (HAS_PENDING_EXCEPTION) return result; (void)(0 230.11 #define CHECK_0 CHECK_(0) 230.12 #define CHECK_NH CHECK_(Handle()) 230.13 #define CHECK_NULL CHECK_(NULL) 230.14 #define CHECK_false CHECK_(false) 230.15 230.16 -#define CHECK_AND_CLEAR THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return; } (0 230.17 -#define CHECK_AND_CLEAR_(result) THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return result; } (0 230.18 +#define CHECK_AND_CLEAR THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return; } (void)(0 230.19 +#define CHECK_AND_CLEAR_(result) THREAD); if (HAS_PENDING_EXCEPTION) { CLEAR_PENDING_EXCEPTION; return result; } (void)(0 230.20 #define CHECK_AND_CLEAR_0 CHECK_AND_CLEAR_(0) 230.21 #define CHECK_AND_CLEAR_NH CHECK_AND_CLEAR_(Handle()) 230.22 #define CHECK_AND_CLEAR_NULL CHECK_AND_CLEAR_(NULL) 230.23 @@ -282,7 +282,7 @@ 230.24 CLEAR_PENDING_EXCEPTION; \ 230.25 ex->print(); \ 230.26 ShouldNotReachHere(); \ 230.27 - } (0 230.28 + } (void)(0 230.29 230.30 // ExceptionMark is a stack-allocated helper class for local exception handling. 230.31 // It is used with the EXCEPTION_MARK macro.
231.1 --- a/src/share/vm/utilities/taskqueue.hpp Thu Jul 11 12:59:03 2013 -0400 231.2 +++ b/src/share/vm/utilities/taskqueue.hpp Mon Jul 15 11:07:03 2013 +0100 231.3 @@ -340,8 +340,12 @@ 231.4 if (dirty_n_elems == N - 1) { 231.5 // Actually means 0, so do the push. 231.6 uint localBot = _bottom; 231.7 - // g++ complains if the volatile result of the assignment is unused. 231.8 - const_cast<E&>(_elems[localBot] = t); 231.9 + // g++ complains if the volatile result of the assignment is 231.10 + // unused, so we cast the volatile away. We cannot cast directly 231.11 + // to void, because gcc treats that as not using the result of the 231.12 + // assignment. However, casting to E& means that we trigger an 231.13 + // unused-value warning. So, we cast the E& to void. 231.14 + (void)const_cast<E&>(_elems[localBot] = t); 231.15 OrderAccess::release_store(&_bottom, increment_index(localBot)); 231.16 TASKQUEUE_STATS_ONLY(stats.record_push()); 231.17 return true; 231.18 @@ -397,7 +401,12 @@ 231.19 return false; 231.20 } 231.21 231.22 - const_cast<E&>(t = _elems[oldAge.top()]); 231.23 + // g++ complains if the volatile result of the assignment is 231.24 + // unused, so we cast the volatile away. We cannot cast directly 231.25 + // to void, because gcc treats that as not using the result of the 231.26 + // assignment. However, casting to E& means that we trigger an 231.27 + // unused-value warning. So, we cast the E& to void. 231.28 + (void) const_cast<E&>(t = _elems[oldAge.top()]); 231.29 Age newAge(oldAge); 231.30 newAge.increment(); 231.31 Age resAge = _age.cmpxchg(newAge, oldAge); 231.32 @@ -640,8 +649,12 @@ 231.33 uint dirty_n_elems = dirty_size(localBot, top); 231.34 assert(dirty_n_elems < N, "n_elems out of range."); 231.35 if (dirty_n_elems < max_elems()) { 231.36 - // g++ complains if the volatile result of the assignment is unused. 231.37 - const_cast<E&>(_elems[localBot] = t); 231.38 + // g++ complains if the volatile result of the assignment is 231.39 + // unused, so we cast the volatile away. We cannot cast directly 231.40 + // to void, because gcc treats that as not using the result of the 231.41 + // assignment. However, casting to E& means that we trigger an 231.42 + // unused-value warning. So, we cast the E& to void. 231.43 + (void) const_cast<E&>(_elems[localBot] = t); 231.44 OrderAccess::release_store(&_bottom, increment_index(localBot)); 231.45 TASKQUEUE_STATS_ONLY(stats.record_push()); 231.46 return true; 231.47 @@ -665,7 +678,12 @@ 231.48 // This is necessary to prevent any read below from being reordered 231.49 // before the store just above. 231.50 OrderAccess::fence(); 231.51 - const_cast<E&>(t = _elems[localBot]); 231.52 + // g++ complains if the volatile result of the assignment is 231.53 + // unused, so we cast the volatile away. We cannot cast directly 231.54 + // to void, because gcc treats that as not using the result of the 231.55 + // assignment. However, casting to E& means that we trigger an 231.56 + // unused-value warning. So, we cast the E& to void. 231.57 + (void) const_cast<E&>(t = _elems[localBot]); 231.58 // This is a second read of "age"; the "size()" above is the first. 231.59 // If there's still at least one element in the queue, based on the 231.60 // "_bottom" and "age" we've read, then there can be no interference with
232.1 --- a/src/share/vm/utilities/vmError.cpp Thu Jul 11 12:59:03 2013 -0400 232.2 +++ b/src/share/vm/utilities/vmError.cpp Mon Jul 15 11:07:03 2013 +0100 232.3 @@ -1,5 +1,5 @@ 232.4 /* 232.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. 232.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. 232.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 232.8 * 232.9 * This code is free software; you can redistribute it and/or modify it 232.10 @@ -799,6 +799,14 @@ 232.11 VMError* volatile VMError::first_error = NULL; 232.12 volatile jlong VMError::first_error_tid = -1; 232.13 232.14 +// An error could happen before tty is initialized or after it has been 232.15 +// destroyed. Here we use a very simple unbuffered fdStream for printing. 232.16 +// Only out.print_raw() and out.print_raw_cr() should be used, as other 232.17 +// printing methods need to allocate large buffer on stack. To format a 232.18 +// string, use jio_snprintf() with a static buffer or use staticBufferStream. 232.19 +fdStream VMError::out(defaultStream::output_fd()); 232.20 +fdStream VMError::log; // error log used by VMError::report_and_die() 232.21 + 232.22 /** Expand a pattern into a buffer starting at pos and open a file using constructed path */ 232.23 static int expand_and_open(const char* pattern, char* buf, size_t buflen, size_t pos) { 232.24 int fd = -1; 232.25 @@ -853,13 +861,6 @@ 232.26 // Don't allocate large buffer on stack 232.27 static char buffer[O_BUFLEN]; 232.28 232.29 - // An error could happen before tty is initialized or after it has been 232.30 - // destroyed. Here we use a very simple unbuffered fdStream for printing. 232.31 - // Only out.print_raw() and out.print_raw_cr() should be used, as other 232.32 - // printing methods need to allocate large buffer on stack. To format a 232.33 - // string, use jio_snprintf() with a static buffer or use staticBufferStream. 232.34 - static fdStream out(defaultStream::output_fd()); 232.35 - 232.36 // How many errors occurred in error handler when reporting first_error. 232.37 static int recursive_error_count; 232.38 232.39 @@ -868,7 +869,6 @@ 232.40 static bool out_done = false; // done printing to standard out 232.41 static bool log_done = false; // done saving error log 232.42 static bool transmit_report_done = false; // done error reporting 232.43 - static fdStream log; // error log 232.44 232.45 // disble NMT to avoid further exception 232.46 MemTracker::shutdown(MemTracker::NMT_error_reporting);
233.1 --- a/src/share/vm/utilities/vmError.hpp Thu Jul 11 12:59:03 2013 -0400 233.2 +++ b/src/share/vm/utilities/vmError.hpp Mon Jul 15 11:07:03 2013 +0100 233.3 @@ -1,5 +1,5 @@ 233.4 /* 233.5 - * Copyright (c) 2003, 2012, Oracle and/or its affiliates. All rights reserved. 233.6 + * Copyright (c) 2003, 2013, Oracle and/or its affiliates. All rights reserved. 233.7 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 233.8 * 233.9 * This code is free software; you can redistribute it and/or modify it 233.10 @@ -96,6 +96,9 @@ 233.11 return (id != OOM_MALLOC_ERROR) && (id != OOM_MMAP_ERROR); 233.12 } 233.13 233.14 + static fdStream out; 233.15 + static fdStream log; // error log used by VMError::report_and_die() 233.16 + 233.17 public: 233.18 233.19 // Constructor for crashes
234.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 234.2 +++ b/test/compiler/7088419/CRCTest.java Mon Jul 15 11:07:03 2013 +0100 234.3 @@ -0,0 +1,132 @@ 234.4 +/* 234.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 234.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 234.7 + * 234.8 + * This code is free software; you can redistribute it and/or modify it 234.9 + * under the terms of the GNU General Public License version 2 only, as 234.10 + * published by the Free Software Foundation. 234.11 + * 234.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 234.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 234.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 234.15 + * version 2 for more details (a copy is included in the LICENSE file that 234.16 + * accompanied this code). 234.17 + * 234.18 + * You should have received a copy of the GNU General Public License version 234.19 + * 2 along with this work; if not, write to the Free Software Foundation, 234.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 234.21 + * 234.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 234.23 + * or visit www.oracle.com if you need additional information or have any 234.24 + * questions. 234.25 + */ 234.26 + 234.27 +/* 234.28 + @test 234.29 + @bug 7088419 234.30 + @run main CRCTest 234.31 + @summary Use x86 Hardware CRC32 Instruction with java.util.zip.CRC32 and java.util.zip.Adler32 234.32 + */ 234.33 + 234.34 +import java.nio.ByteBuffer; 234.35 +import java.util.zip.CRC32; 234.36 +import java.util.zip.Checksum; 234.37 + 234.38 +public class CRCTest { 234.39 + 234.40 + public static void main(String[] args) throws Exception { 234.41 + 234.42 + byte[] b = initializedBytes(4096 * 4096); 234.43 + 234.44 + { 234.45 + CRC32 crc1 = new CRC32(); 234.46 + CRC32 crc2 = new CRC32(); 234.47 + CRC32 crc3 = new CRC32(); 234.48 + CRC32 crc4 = new CRC32(); 234.49 + 234.50 + crc1.update(b, 0, b.length); 234.51 + updateSerial(crc2, b, 0, b.length); 234.52 + updateDirect(crc3, b, 0, b.length); 234.53 + updateSerialSlow(crc4, b, 0, b.length); 234.54 + 234.55 + check(crc1, crc2); 234.56 + check(crc3, crc4); 234.57 + check(crc1, crc3); 234.58 + 234.59 + crc1.update(17); 234.60 + crc2.update(17); 234.61 + crc3.update(17); 234.62 + crc4.update(17); 234.63 + 234.64 + crc1.update(b, 1, b.length-2); 234.65 + updateSerial(crc2, b, 1, b.length-2); 234.66 + updateDirect(crc3, b, 1, b.length-2); 234.67 + updateSerialSlow(crc4, b, 1, b.length-2); 234.68 + 234.69 + check(crc1, crc2); 234.70 + check(crc3, crc4); 234.71 + check(crc1, crc3); 234.72 + 234.73 + report("finished huge crc", crc1, crc2, crc3, crc4); 234.74 + 234.75 + for (int i = 0; i < 256; i++) { 234.76 + for (int j = 0; j < 256; j += 1) { 234.77 + crc1.update(b, i, j); 234.78 + updateSerial(crc2, b, i, j); 234.79 + updateDirect(crc3, b, i, j); 234.80 + updateSerialSlow(crc4, b, i, j); 234.81 + 234.82 + check(crc1, crc2); 234.83 + check(crc3, crc4); 234.84 + check(crc1, crc3); 234.85 + 234.86 + } 234.87 + } 234.88 + 234.89 + report("finished small survey crc", crc1, crc2, crc3, crc4); 234.90 + } 234.91 + 234.92 + } 234.93 + 234.94 + private static void report(String s, Checksum crc1, Checksum crc2, 234.95 + Checksum crc3, Checksum crc4) { 234.96 + System.out.println(s + ", crc1 = " + crc1.getValue() + 234.97 + ", crc2 = " + crc2.getValue()+ 234.98 + ", crc3 = " + crc3.getValue()+ 234.99 + ", crc4 = " + crc4.getValue()); 234.100 + } 234.101 + 234.102 + private static void check(Checksum crc1, Checksum crc2) throws Exception { 234.103 + if (crc1.getValue() != crc2.getValue()) { 234.104 + String s = "value 1 = " + crc1.getValue() + ", value 2 = " + crc2.getValue(); 234.105 + System.err.println(s); 234.106 + throw new Exception(s); 234.107 + } 234.108 + } 234.109 + 234.110 + private static byte[] initializedBytes(int M) { 234.111 + byte[] bytes = new byte[M]; 234.112 + for (int i = 0; i < bytes.length; i++) { 234.113 + bytes[i] = (byte) i; 234.114 + } 234.115 + return bytes; 234.116 + } 234.117 + 234.118 + private static void updateSerial(Checksum crc, byte[] b, int start, int length) { 234.119 + for (int i = 0; i < length; i++) 234.120 + crc.update(b[i+start]); 234.121 + } 234.122 + 234.123 + private static void updateSerialSlow(Checksum crc, byte[] b, int start, int length) { 234.124 + for (int i = 0; i < length; i++) 234.125 + crc.update(b[i+start]); 234.126 + crc.getValue(); 234.127 + } 234.128 + 234.129 + private static void updateDirect(CRC32 crc3, byte[] b, int start, int length) { 234.130 + ByteBuffer buf = ByteBuffer.allocateDirect(length); 234.131 + buf.put(b, start, length); 234.132 + buf.flip(); 234.133 + crc3.update(buf); 234.134 + } 234.135 +}
235.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 235.2 +++ b/test/compiler/8005956/PolynomialRoot.java Mon Jul 15 11:07:03 2013 +0100 235.3 @@ -0,0 +1,776 @@ 235.4 +//package com.polytechnik.utils; 235.5 +/* 235.6 + * (C) Vladislav Malyshkin 2010 235.7 + * This file is under GPL version 3. 235.8 + * 235.9 + */ 235.10 + 235.11 +/** Polynomial root. 235.12 + * @version $Id: PolynomialRoot.java,v 1.105 2012/08/18 00:00:05 mal Exp $ 235.13 + * @author Vladislav Malyshkin mal@gromco.com 235.14 + */ 235.15 + 235.16 +/** 235.17 +* @test 235.18 +* @bug 8005956 235.19 +* @summary C2: assert(!def_outside->member(r)) failed: Use of external LRG overlaps the same LRG defined in this block 235.20 +* 235.21 +* @run main PolynomialRoot 235.22 +*/ 235.23 + 235.24 +public class PolynomialRoot { 235.25 + 235.26 + 235.27 +public static int findPolynomialRoots(final int n, 235.28 + final double [] p, 235.29 + final double [] re_root, 235.30 + final double [] im_root) 235.31 +{ 235.32 + if(n==4) 235.33 + { 235.34 + return root4(p,re_root,im_root); 235.35 + } 235.36 + else if(n==3) 235.37 + { 235.38 + return root3(p,re_root,im_root); 235.39 + } 235.40 + else if(n==2) 235.41 + { 235.42 + return root2(p,re_root,im_root); 235.43 + } 235.44 + else if(n==1) 235.45 + { 235.46 + return root1(p,re_root,im_root); 235.47 + } 235.48 + else 235.49 + { 235.50 + throw new RuntimeException("n="+n+" is not supported yet"); 235.51 + } 235.52 +} 235.53 + 235.54 + 235.55 + 235.56 +static final double SQRT3=Math.sqrt(3.0),SQRT2=Math.sqrt(2.0); 235.57 + 235.58 + 235.59 +private static final boolean PRINT_DEBUG=false; 235.60 + 235.61 +public static int root4(final double [] p,final double [] re_root,final double [] im_root) 235.62 +{ 235.63 + if(PRINT_DEBUG) System.err.println("=====================root4:p="+java.util.Arrays.toString(p)); 235.64 + final double vs=p[4]; 235.65 + if(PRINT_DEBUG) System.err.println("p[4]="+p[4]); 235.66 + if(!(Math.abs(vs)>EPS)) 235.67 + { 235.68 + re_root[0]=re_root[1]=re_root[2]=re_root[3]= 235.69 + im_root[0]=im_root[1]=im_root[2]=im_root[3]=Double.NaN; 235.70 + return -1; 235.71 + } 235.72 + 235.73 +/* zsolve_quartic.c - finds the complex roots of 235.74 + * x^4 + a x^3 + b x^2 + c x + d = 0 235.75 + */ 235.76 + final double a=p[3]/vs,b=p[2]/vs,c=p[1]/vs,d=p[0]/vs; 235.77 + if(PRINT_DEBUG) System.err.println("input a="+a+" b="+b+" c="+c+" d="+d); 235.78 + 235.79 + 235.80 + final double r4 = 1.0 / 4.0; 235.81 + final double q2 = 1.0 / 2.0, q4 = 1.0 / 4.0, q8 = 1.0 / 8.0; 235.82 + final double q1 = 3.0 / 8.0, q3 = 3.0 / 16.0; 235.83 + final int mt; 235.84 + 235.85 + /* Deal easily with the cases where the quartic is degenerate. The 235.86 + * ordering of solutions is done explicitly. */ 235.87 + if (0 == b && 0 == c) 235.88 + { 235.89 + if (0 == d) 235.90 + { 235.91 + re_root[0]=-a; 235.92 + im_root[0]=im_root[1]=im_root[2]=im_root[3]=0; 235.93 + re_root[1]=re_root[2]=re_root[3]=0; 235.94 + return 4; 235.95 + } 235.96 + else if (0 == a) 235.97 + { 235.98 + if (d > 0) 235.99 + { 235.100 + final double sq4 = Math.sqrt(Math.sqrt(d)); 235.101 + re_root[0]=sq4*SQRT2/2; 235.102 + im_root[0]=re_root[0]; 235.103 + re_root[1]=-re_root[0]; 235.104 + im_root[1]=re_root[0]; 235.105 + re_root[2]=-re_root[0]; 235.106 + im_root[2]=-re_root[0]; 235.107 + re_root[3]=re_root[0]; 235.108 + im_root[3]=-re_root[0]; 235.109 + if(PRINT_DEBUG) System.err.println("Path a=0 d>0"); 235.110 + } 235.111 + else 235.112 + { 235.113 + final double sq4 = Math.sqrt(Math.sqrt(-d)); 235.114 + re_root[0]=sq4; 235.115 + im_root[0]=0; 235.116 + re_root[1]=0; 235.117 + im_root[1]=sq4; 235.118 + re_root[2]=0; 235.119 + im_root[2]=-sq4; 235.120 + re_root[3]=-sq4; 235.121 + im_root[3]=0; 235.122 + if(PRINT_DEBUG) System.err.println("Path a=0 d<0"); 235.123 + } 235.124 + return 4; 235.125 + } 235.126 + } 235.127 + 235.128 + if (0.0 == c && 0.0 == d) 235.129 + { 235.130 + root2(new double []{p[2],p[3],p[4]},re_root,im_root); 235.131 + re_root[2]=im_root[2]=re_root[3]=im_root[3]=0; 235.132 + return 4; 235.133 + } 235.134 + 235.135 + if(PRINT_DEBUG) System.err.println("G Path c="+c+" d="+d); 235.136 + final double [] u=new double[3]; 235.137 + 235.138 + if(PRINT_DEBUG) System.err.println("Generic Path"); 235.139 + /* For non-degenerate solutions, proceed by constructing and 235.140 + * solving the resolvent cubic */ 235.141 + final double aa = a * a; 235.142 + final double pp = b - q1 * aa; 235.143 + final double qq = c - q2 * a * (b - q4 * aa); 235.144 + final double rr = d - q4 * a * (c - q4 * a * (b - q3 * aa)); 235.145 + final double rc = q2 * pp , rc3 = rc / 3; 235.146 + final double sc = q4 * (q4 * pp * pp - rr); 235.147 + final double tc = -(q8 * qq * q8 * qq); 235.148 + if(PRINT_DEBUG) System.err.println("aa="+aa+" pp="+pp+" qq="+qq+" rr="+rr+" rc="+rc+" sc="+sc+" tc="+tc); 235.149 + final boolean flag_realroots; 235.150 + 235.151 + /* This code solves the resolvent cubic in a convenient fashion 235.152 + * for this implementation of the quartic. If there are three real 235.153 + * roots, then they are placed directly into u[]. If two are 235.154 + * complex, then the real root is put into u[0] and the real 235.155 + * and imaginary part of the complex roots are placed into 235.156 + * u[1] and u[2], respectively. */ 235.157 + { 235.158 + final double qcub = (rc * rc - 3 * sc); 235.159 + final double rcub = (rc*(2 * rc * rc - 9 * sc) + 27 * tc); 235.160 + 235.161 + final double Q = qcub / 9; 235.162 + final double R = rcub / 54; 235.163 + 235.164 + final double Q3 = Q * Q * Q; 235.165 + final double R2 = R * R; 235.166 + 235.167 + final double CR2 = 729 * rcub * rcub; 235.168 + final double CQ3 = 2916 * qcub * qcub * qcub; 235.169 + 235.170 + if(PRINT_DEBUG) System.err.println("CR2="+CR2+" CQ3="+CQ3+" R="+R+" Q="+Q); 235.171 + 235.172 + if (0 == R && 0 == Q) 235.173 + { 235.174 + flag_realroots=true; 235.175 + u[0] = -rc3; 235.176 + u[1] = -rc3; 235.177 + u[2] = -rc3; 235.178 + } 235.179 + else if (CR2 == CQ3) 235.180 + { 235.181 + flag_realroots=true; 235.182 + final double sqrtQ = Math.sqrt (Q); 235.183 + if (R > 0) 235.184 + { 235.185 + u[0] = -2 * sqrtQ - rc3; 235.186 + u[1] = sqrtQ - rc3; 235.187 + u[2] = sqrtQ - rc3; 235.188 + } 235.189 + else 235.190 + { 235.191 + u[0] = -sqrtQ - rc3; 235.192 + u[1] = -sqrtQ - rc3; 235.193 + u[2] = 2 * sqrtQ - rc3; 235.194 + } 235.195 + } 235.196 + else if (R2 < Q3) 235.197 + { 235.198 + flag_realroots=true; 235.199 + final double ratio = (R >= 0?1:-1) * Math.sqrt (R2 / Q3); 235.200 + final double theta = Math.acos (ratio); 235.201 + final double norm = -2 * Math.sqrt (Q); 235.202 + 235.203 + u[0] = norm * Math.cos (theta / 3) - rc3; 235.204 + u[1] = norm * Math.cos ((theta + 2.0 * Math.PI) / 3) - rc3; 235.205 + u[2] = norm * Math.cos ((theta - 2.0 * Math.PI) / 3) - rc3; 235.206 + } 235.207 + else 235.208 + { 235.209 + flag_realroots=false; 235.210 + final double A = -(R >= 0?1:-1)*Math.pow(Math.abs(R)+Math.sqrt(R2-Q3),1.0/3.0); 235.211 + final double B = Q / A; 235.212 + 235.213 + u[0] = A + B - rc3; 235.214 + u[1] = -0.5 * (A + B) - rc3; 235.215 + u[2] = -(SQRT3*0.5) * Math.abs (A - B); 235.216 + } 235.217 + if(PRINT_DEBUG) System.err.println("u[0]="+u[0]+" u[1]="+u[1]+" u[2]="+u[2]+" qq="+qq+" disc="+((CR2 - CQ3) / 2125764.0)); 235.218 + } 235.219 + /* End of solution to resolvent cubic */ 235.220 + 235.221 + /* Combine the square roots of the roots of the cubic 235.222 + * resolvent appropriately. Also, calculate 'mt' which 235.223 + * designates the nature of the roots: 235.224 + * mt=1 : 4 real roots 235.225 + * mt=2 : 0 real roots 235.226 + * mt=3 : 2 real roots 235.227 + */ 235.228 + 235.229 + 235.230 + final double w1_re,w1_im,w2_re,w2_im,w3_re,w3_im,mod_w1w2,mod_w1w2_squared; 235.231 + if (flag_realroots) 235.232 + { 235.233 + mod_w1w2=-1; 235.234 + mt = 2; 235.235 + int jmin=0; 235.236 + double vmin=Math.abs(u[jmin]); 235.237 + for(int j=1;j<3;j++) 235.238 + { 235.239 + final double vx=Math.abs(u[j]); 235.240 + if(vx<vmin) 235.241 + { 235.242 + vmin=vx; 235.243 + jmin=j; 235.244 + } 235.245 + } 235.246 + final double u1=u[(jmin+1)%3],u2=u[(jmin+2)%3]; 235.247 + mod_w1w2_squared=Math.abs(u1*u2); 235.248 + if(u1>=0) 235.249 + { 235.250 + w1_re=Math.sqrt(u1); 235.251 + w1_im=0; 235.252 + } 235.253 + else 235.254 + { 235.255 + w1_re=0; 235.256 + w1_im=Math.sqrt(-u1); 235.257 + } 235.258 + if(u2>=0) 235.259 + { 235.260 + w2_re=Math.sqrt(u2); 235.261 + w2_im=0; 235.262 + } 235.263 + else 235.264 + { 235.265 + w2_re=0; 235.266 + w2_im=Math.sqrt(-u2); 235.267 + } 235.268 + if(PRINT_DEBUG) System.err.println("u1="+u1+" u2="+u2+" jmin="+jmin); 235.269 + } 235.270 + else 235.271 + { 235.272 + mt = 3; 235.273 + final double w_mod2_sq=u[1]*u[1]+u[2]*u[2],w_mod2=Math.sqrt(w_mod2_sq),w_mod=Math.sqrt(w_mod2); 235.274 + if(w_mod2_sq<=0) 235.275 + { 235.276 + w1_re=w1_im=0; 235.277 + } 235.278 + else 235.279 + { 235.280 + // calculate square root of a complex number (u[1],u[2]) 235.281 + // the result is in the (w1_re,w1_im) 235.282 + final double absu1=Math.abs(u[1]),absu2=Math.abs(u[2]),w; 235.283 + if(absu1>=absu2) 235.284 + { 235.285 + final double t=absu2/absu1; 235.286 + w=Math.sqrt(absu1*0.5 * (1.0 + Math.sqrt(1.0 + t * t))); 235.287 + if(PRINT_DEBUG) System.err.println(" Path1 "); 235.288 + } 235.289 + else 235.290 + { 235.291 + final double t=absu1/absu2; 235.292 + w=Math.sqrt(absu2*0.5 * (t + Math.sqrt(1.0 + t * t))); 235.293 + if(PRINT_DEBUG) System.err.println(" Path1a "); 235.294 + } 235.295 + if(u[1]>=0) 235.296 + { 235.297 + w1_re=w; 235.298 + w1_im=u[2]/(2*w); 235.299 + if(PRINT_DEBUG) System.err.println(" Path2 "); 235.300 + } 235.301 + else 235.302 + { 235.303 + final double vi = (u[2] >= 0) ? w : -w; 235.304 + w1_re=u[2]/(2*vi); 235.305 + w1_im=vi; 235.306 + if(PRINT_DEBUG) System.err.println(" Path2a "); 235.307 + } 235.308 + } 235.309 + final double absu0=Math.abs(u[0]); 235.310 + if(w_mod2>=absu0) 235.311 + { 235.312 + mod_w1w2=w_mod2; 235.313 + mod_w1w2_squared=w_mod2_sq; 235.314 + w2_re=w1_re; 235.315 + w2_im=-w1_im; 235.316 + } 235.317 + else 235.318 + { 235.319 + mod_w1w2=-1; 235.320 + mod_w1w2_squared=w_mod2*absu0; 235.321 + if(u[0]>=0) 235.322 + { 235.323 + w2_re=Math.sqrt(absu0); 235.324 + w2_im=0; 235.325 + } 235.326 + else 235.327 + { 235.328 + w2_re=0; 235.329 + w2_im=Math.sqrt(absu0); 235.330 + } 235.331 + } 235.332 + if(PRINT_DEBUG) System.err.println("u[0]="+u[0]+"u[1]="+u[1]+" u[2]="+u[2]+" absu0="+absu0+" w_mod="+w_mod+" w_mod2="+w_mod2); 235.333 + } 235.334 + 235.335 + /* Solve the quadratic in order to obtain the roots 235.336 + * to the quartic */ 235.337 + if(mod_w1w2>0) 235.338 + { 235.339 + // a shorcut to reduce rounding error 235.340 + w3_re=qq/(-8)/mod_w1w2; 235.341 + w3_im=0; 235.342 + } 235.343 + else if(mod_w1w2_squared>0) 235.344 + { 235.345 + // regular path 235.346 + final double mqq8n=qq/(-8)/mod_w1w2_squared; 235.347 + w3_re=mqq8n*(w1_re*w2_re-w1_im*w2_im); 235.348 + w3_im=-mqq8n*(w1_re*w2_im+w2_re*w1_im); 235.349 + } 235.350 + else 235.351 + { 235.352 + // typically occur when qq==0 235.353 + w3_re=w3_im=0; 235.354 + } 235.355 + 235.356 + final double h = r4 * a; 235.357 + if(PRINT_DEBUG) System.err.println("w1_re="+w1_re+" w1_im="+w1_im+" w2_re="+w2_re+" w2_im="+w2_im+" w3_re="+w3_re+" w3_im="+w3_im+" h="+h); 235.358 + 235.359 + re_root[0]=w1_re+w2_re+w3_re-h; 235.360 + im_root[0]=w1_im+w2_im+w3_im; 235.361 + re_root[1]=-(w1_re+w2_re)+w3_re-h; 235.362 + im_root[1]=-(w1_im+w2_im)+w3_im; 235.363 + re_root[2]=w2_re-w1_re-w3_re-h; 235.364 + im_root[2]=w2_im-w1_im-w3_im; 235.365 + re_root[3]=w1_re-w2_re-w3_re-h; 235.366 + im_root[3]=w1_im-w2_im-w3_im; 235.367 + 235.368 + return 4; 235.369 +} 235.370 + 235.371 + 235.372 + 235.373 + static void setRandomP(final double [] p,final int n,java.util.Random r) 235.374 + { 235.375 + if(r.nextDouble()<0.1) 235.376 + { 235.377 + // integer coefficiens 235.378 + for(int j=0;j<p.length;j++) 235.379 + { 235.380 + if(j<=n) 235.381 + { 235.382 + p[j]=(r.nextInt(2)<=0?-1:1)*r.nextInt(10); 235.383 + } 235.384 + else 235.385 + { 235.386 + p[j]=0; 235.387 + } 235.388 + } 235.389 + } 235.390 + else 235.391 + { 235.392 + // real coefficiens 235.393 + for(int j=0;j<p.length;j++) 235.394 + { 235.395 + if(j<=n) 235.396 + { 235.397 + p[j]=-1+2*r.nextDouble(); 235.398 + } 235.399 + else 235.400 + { 235.401 + p[j]=0; 235.402 + } 235.403 + } 235.404 + } 235.405 + if(Math.abs(p[n])<1e-2) 235.406 + { 235.407 + p[n]=(r.nextInt(2)<=0?-1:1)*(0.1+r.nextDouble()); 235.408 + } 235.409 + } 235.410 + 235.411 + 235.412 + static void checkValues(final double [] p, 235.413 + final int n, 235.414 + final double rex, 235.415 + final double imx, 235.416 + final double eps, 235.417 + final String txt) 235.418 + { 235.419 + double res=0,ims=0,sabs=0; 235.420 + final double xabs=Math.abs(rex)+Math.abs(imx); 235.421 + for(int k=n;k>=0;k--) 235.422 + { 235.423 + final double res1=(res*rex-ims*imx)+p[k]; 235.424 + final double ims1=(ims*rex+res*imx); 235.425 + res=res1; 235.426 + ims=ims1; 235.427 + sabs+=xabs*sabs+p[k]; 235.428 + } 235.429 + sabs=Math.abs(sabs); 235.430 + if(false && sabs>1/eps? 235.431 + (!(Math.abs(res/sabs)<=eps)||!(Math.abs(ims/sabs)<=eps)) 235.432 + : 235.433 + (!(Math.abs(res)<=eps)||!(Math.abs(ims)<=eps))) 235.434 + { 235.435 + throw new RuntimeException( 235.436 + getPolinomTXT(p)+"\n"+ 235.437 + "\t x.r="+rex+" x.i="+imx+"\n"+ 235.438 + "res/sabs="+(res/sabs)+" ims/sabs="+(ims/sabs)+ 235.439 + " sabs="+sabs+ 235.440 + "\nres="+res+" ims="+ims+" n="+n+" eps="+eps+" "+ 235.441 + " sabs>1/eps="+(sabs>1/eps)+ 235.442 + " f1="+(!(Math.abs(res/sabs)<=eps)||!(Math.abs(ims/sabs)<=eps))+ 235.443 + " f2="+(!(Math.abs(res)<=eps)||!(Math.abs(ims)<=eps))+ 235.444 + " "+txt); 235.445 + } 235.446 + } 235.447 + 235.448 + static String getPolinomTXT(final double [] p) 235.449 + { 235.450 + final StringBuilder buf=new StringBuilder(); 235.451 + buf.append("order="+(p.length-1)+"\t"); 235.452 + for(int k=0;k<p.length;k++) 235.453 + { 235.454 + buf.append("p["+k+"]="+p[k]+";"); 235.455 + } 235.456 + return buf.toString(); 235.457 + } 235.458 + 235.459 + static String getRootsTXT(int nr,final double [] re,final double [] im) 235.460 + { 235.461 + final StringBuilder buf=new StringBuilder(); 235.462 + for(int k=0;k<nr;k++) 235.463 + { 235.464 + buf.append("x."+k+"("+re[k]+","+im[k]+")\n"); 235.465 + } 235.466 + return buf.toString(); 235.467 + } 235.468 + 235.469 + static void testRoots(final int n, 235.470 + final int n_tests, 235.471 + final java.util.Random rn, 235.472 + final double eps) 235.473 + { 235.474 + final double [] p=new double [n+1]; 235.475 + final double [] rex=new double [n],imx=new double [n]; 235.476 + for(int i=0;i<n_tests;i++) 235.477 + { 235.478 + for(int dg=n;dg-->-1;) 235.479 + { 235.480 + for(int dr=3;dr-->0;) 235.481 + { 235.482 + setRandomP(p,n,rn); 235.483 + for(int j=0;j<=dg;j++) 235.484 + { 235.485 + p[j]=0; 235.486 + } 235.487 + if(dr==0) 235.488 + { 235.489 + p[0]=-1+2.0*rn.nextDouble(); 235.490 + } 235.491 + else if(dr==1) 235.492 + { 235.493 + p[0]=p[1]=0; 235.494 + } 235.495 + 235.496 + findPolynomialRoots(n,p,rex,imx); 235.497 + 235.498 + for(int j=0;j<n;j++) 235.499 + { 235.500 + //System.err.println("j="+j); 235.501 + checkValues(p,n,rex[j],imx[j],eps," t="+i); 235.502 + } 235.503 + } 235.504 + } 235.505 + } 235.506 + System.err.println("testRoots(): n_tests="+n_tests+" OK, dim="+n); 235.507 + } 235.508 + 235.509 + 235.510 + 235.511 + 235.512 + static final double EPS=0; 235.513 + 235.514 + public static int root1(final double [] p,final double [] re_root,final double [] im_root) 235.515 + { 235.516 + if(!(Math.abs(p[1])>EPS)) 235.517 + { 235.518 + re_root[0]=im_root[0]=Double.NaN; 235.519 + return -1; 235.520 + } 235.521 + re_root[0]=-p[0]/p[1]; 235.522 + im_root[0]=0; 235.523 + return 1; 235.524 + } 235.525 + 235.526 + public static int root2(final double [] p,final double [] re_root,final double [] im_root) 235.527 + { 235.528 + if(!(Math.abs(p[2])>EPS)) 235.529 + { 235.530 + re_root[0]=re_root[1]=im_root[0]=im_root[1]=Double.NaN; 235.531 + return -1; 235.532 + } 235.533 + final double b2=0.5*(p[1]/p[2]),c=p[0]/p[2],d=b2*b2-c; 235.534 + if(d>=0) 235.535 + { 235.536 + final double sq=Math.sqrt(d); 235.537 + if(b2<0) 235.538 + { 235.539 + re_root[1]=-b2+sq; 235.540 + re_root[0]=c/re_root[1]; 235.541 + } 235.542 + else if(b2>0) 235.543 + { 235.544 + re_root[0]=-b2-sq; 235.545 + re_root[1]=c/re_root[0]; 235.546 + } 235.547 + else 235.548 + { 235.549 + re_root[0]=-b2-sq; 235.550 + re_root[1]=-b2+sq; 235.551 + } 235.552 + im_root[0]=im_root[1]=0; 235.553 + } 235.554 + else 235.555 + { 235.556 + final double sq=Math.sqrt(-d); 235.557 + re_root[0]=re_root[1]=-b2; 235.558 + im_root[0]=sq; 235.559 + im_root[1]=-sq; 235.560 + } 235.561 + return 2; 235.562 + } 235.563 + 235.564 + public static int root3(final double [] p,final double [] re_root,final double [] im_root) 235.565 + { 235.566 + final double vs=p[3]; 235.567 + if(!(Math.abs(vs)>EPS)) 235.568 + { 235.569 + re_root[0]=re_root[1]=re_root[2]= 235.570 + im_root[0]=im_root[1]=im_root[2]=Double.NaN; 235.571 + return -1; 235.572 + } 235.573 + final double a=p[2]/vs,b=p[1]/vs,c=p[0]/vs; 235.574 + /* zsolve_cubic.c - finds the complex roots of x^3 + a x^2 + b x + c = 0 235.575 + */ 235.576 + final double q = (a * a - 3 * b); 235.577 + final double r = (a*(2 * a * a - 9 * b) + 27 * c); 235.578 + 235.579 + final double Q = q / 9; 235.580 + final double R = r / 54; 235.581 + 235.582 + final double Q3 = Q * Q * Q; 235.583 + final double R2 = R * R; 235.584 + 235.585 + final double CR2 = 729 * r * r; 235.586 + final double CQ3 = 2916 * q * q * q; 235.587 + final double a3=a/3; 235.588 + 235.589 + if (R == 0 && Q == 0) 235.590 + { 235.591 + re_root[0]=re_root[1]=re_root[2]=-a3; 235.592 + im_root[0]=im_root[1]=im_root[2]=0; 235.593 + return 3; 235.594 + } 235.595 + else if (CR2 == CQ3) 235.596 + { 235.597 + /* this test is actually R2 == Q3, written in a form suitable 235.598 + for exact computation with integers */ 235.599 + 235.600 + /* Due to finite precision some double roots may be missed, and 235.601 + will be considered to be a pair of complex roots z = x +/- 235.602 + epsilon i close to the real axis. */ 235.603 + 235.604 + final double sqrtQ = Math.sqrt (Q); 235.605 + 235.606 + if (R > 0) 235.607 + { 235.608 + re_root[0] = -2 * sqrtQ - a3; 235.609 + re_root[1]=re_root[2]=sqrtQ - a3; 235.610 + im_root[0]=im_root[1]=im_root[2]=0; 235.611 + } 235.612 + else 235.613 + { 235.614 + re_root[0]=re_root[1] = -sqrtQ - a3; 235.615 + re_root[2]=2 * sqrtQ - a3; 235.616 + im_root[0]=im_root[1]=im_root[2]=0; 235.617 + } 235.618 + return 3; 235.619 + } 235.620 + else if (R2 < Q3) 235.621 + { 235.622 + final double sgnR = (R >= 0 ? 1 : -1); 235.623 + final double ratio = sgnR * Math.sqrt (R2 / Q3); 235.624 + final double theta = Math.acos (ratio); 235.625 + final double norm = -2 * Math.sqrt (Q); 235.626 + final double r0 = norm * Math.cos (theta/3) - a3; 235.627 + final double r1 = norm * Math.cos ((theta + 2.0 * Math.PI) / 3) - a3; 235.628 + final double r2 = norm * Math.cos ((theta - 2.0 * Math.PI) / 3) - a3; 235.629 + 235.630 + re_root[0]=r0; 235.631 + re_root[1]=r1; 235.632 + re_root[2]=r2; 235.633 + im_root[0]=im_root[1]=im_root[2]=0; 235.634 + return 3; 235.635 + } 235.636 + else 235.637 + { 235.638 + final double sgnR = (R >= 0 ? 1 : -1); 235.639 + final double A = -sgnR * Math.pow (Math.abs (R) + Math.sqrt (R2 - Q3), 1.0 / 3.0); 235.640 + final double B = Q / A; 235.641 + 235.642 + re_root[0]=A + B - a3; 235.643 + im_root[0]=0; 235.644 + re_root[1]=-0.5 * (A + B) - a3; 235.645 + im_root[1]=-(SQRT3*0.5) * Math.abs(A - B); 235.646 + re_root[2]=re_root[1]; 235.647 + im_root[2]=-im_root[1]; 235.648 + return 3; 235.649 + } 235.650 + 235.651 + } 235.652 + 235.653 + 235.654 + static void root3a(final double [] p,final double [] re_root,final double [] im_root) 235.655 + { 235.656 + if(Math.abs(p[3])>EPS) 235.657 + { 235.658 + final double v=p[3], 235.659 + a=p[2]/v,b=p[1]/v,c=p[0]/v, 235.660 + a3=a/3,a3a=a3*a, 235.661 + pd3=(b-a3a)/3, 235.662 + qd2=a3*(a3a/3-0.5*b)+0.5*c, 235.663 + Q=pd3*pd3*pd3+qd2*qd2; 235.664 + if(Q<0) 235.665 + { 235.666 + // three real roots 235.667 + final double SQ=Math.sqrt(-Q); 235.668 + final double th=Math.atan2(SQ,-qd2); 235.669 + im_root[0]=im_root[1]=im_root[2]=0; 235.670 + final double f=2*Math.sqrt(-pd3); 235.671 + re_root[0]=f*Math.cos(th/3)-a3; 235.672 + re_root[1]=f*Math.cos((th+2*Math.PI)/3)-a3; 235.673 + re_root[2]=f*Math.cos((th+4*Math.PI)/3)-a3; 235.674 + //System.err.println("3r"); 235.675 + } 235.676 + else 235.677 + { 235.678 + // one real & two complex roots 235.679 + final double SQ=Math.sqrt(Q); 235.680 + final double r1=-qd2+SQ,r2=-qd2-SQ; 235.681 + final double v1=Math.signum(r1)*Math.pow(Math.abs(r1),1.0/3), 235.682 + v2=Math.signum(r2)*Math.pow(Math.abs(r2),1.0/3), 235.683 + sv=v1+v2; 235.684 + // real root 235.685 + re_root[0]=sv-a3; 235.686 + im_root[0]=0; 235.687 + // complex roots 235.688 + re_root[1]=re_root[2]=-0.5*sv-a3; 235.689 + im_root[1]=(v1-v2)*(SQRT3*0.5); 235.690 + im_root[2]=-im_root[1]; 235.691 + //System.err.println("1r2c"); 235.692 + } 235.693 + } 235.694 + else 235.695 + { 235.696 + re_root[0]=re_root[1]=re_root[2]=im_root[0]=im_root[1]=im_root[2]=Double.NaN; 235.697 + } 235.698 + } 235.699 + 235.700 + 235.701 + static void printSpecialValues() 235.702 + { 235.703 + for(int st=0;st<6;st++) 235.704 + { 235.705 + //final double [] p=new double []{8,1,3,3.6,1}; 235.706 + final double [] re_root=new double [4],im_root=new double [4]; 235.707 + final double [] p; 235.708 + final int n; 235.709 + if(st<=3) 235.710 + { 235.711 + if(st<=0) 235.712 + { 235.713 + p=new double []{2,-4,6,-4,1}; 235.714 + //p=new double []{-6,6,-6,8,-2}; 235.715 + } 235.716 + else if(st==1) 235.717 + { 235.718 + p=new double []{0,-4,8,3,-9}; 235.719 + } 235.720 + else if(st==2) 235.721 + { 235.722 + p=new double []{-1,0,2,0,-1}; 235.723 + } 235.724 + else 235.725 + { 235.726 + p=new double []{-5,2,8,-2,-3}; 235.727 + } 235.728 + root4(p,re_root,im_root); 235.729 + n=4; 235.730 + } 235.731 + else 235.732 + { 235.733 + p=new double []{0,2,0,1}; 235.734 + if(st==4) 235.735 + { 235.736 + p[1]=-p[1]; 235.737 + } 235.738 + root3(p,re_root,im_root); 235.739 + n=3; 235.740 + } 235.741 + System.err.println("======== n="+n); 235.742 + for(int i=0;i<=n;i++) 235.743 + { 235.744 + if(i<n) 235.745 + { 235.746 + System.err.println(String.valueOf(i)+"\t"+ 235.747 + p[i]+"\t"+ 235.748 + re_root[i]+"\t"+ 235.749 + im_root[i]); 235.750 + } 235.751 + else 235.752 + { 235.753 + System.err.println(String.valueOf(i)+"\t"+p[i]+"\t"); 235.754 + } 235.755 + } 235.756 + } 235.757 + } 235.758 + 235.759 + 235.760 + 235.761 + public static void main(final String [] args) 235.762 + { 235.763 + final long t0=System.currentTimeMillis(); 235.764 + final double eps=1e-6; 235.765 + //checkRoots(); 235.766 + final java.util.Random r=new java.util.Random(-1381923); 235.767 + printSpecialValues(); 235.768 + 235.769 + final int n_tests=10000000; 235.770 + //testRoots(2,n_tests,r,eps); 235.771 + //testRoots(3,n_tests,r,eps); 235.772 + testRoots(4,n_tests,r,eps); 235.773 + final long t1=System.currentTimeMillis(); 235.774 + System.err.println("PolynomialRoot.main: "+n_tests+" tests OK done in "+(t1-t0)+" milliseconds. ver=$Id: PolynomialRoot.java,v 1.105 2012/08/18 00:00:05 mal Exp $"); 235.775 + } 235.776 + 235.777 + 235.778 + 235.779 +}
236.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 236.2 +++ b/test/gc/arguments/TestUnrecognizedVMOptionsHandling.java Mon Jul 15 11:07:03 2013 +0100 236.3 @@ -0,0 +1,69 @@ 236.4 +/* 236.5 +* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 236.6 +* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 236.7 +* 236.8 +* This code is free software; you can redistribute it and/or modify it 236.9 +* under the terms of the GNU General Public License version 2 only, as 236.10 +* published by the Free Software Foundation. 236.11 +* 236.12 +* This code is distributed in the hope that it will be useful, but WITHOUT 236.13 +* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 236.14 +* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 236.15 +* version 2 for more details (a copy is included in the LICENSE file that 236.16 +* accompanied this code). 236.17 +* 236.18 +* You should have received a copy of the GNU General Public License version 236.19 +* 2 along with this work; if not, write to the Free Software Foundation, 236.20 +* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 236.21 +* 236.22 +* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 236.23 +* or visit www.oracle.com if you need additional information or have any 236.24 +* questions. 236.25 +*/ 236.26 + 236.27 +/* 236.28 + * @test TestUnrecognizedVMOptionsHandling 236.29 + * @key gc 236.30 + * @bug 8017611 236.31 + * @summary Tests handling unrecognized VM options 236.32 + * @library /testlibrary 236.33 + * @run main/othervm TestUnrecognizedVMOptionsHandling 236.34 + */ 236.35 + 236.36 +import com.oracle.java.testlibrary.*; 236.37 + 236.38 +public class TestUnrecognizedVMOptionsHandling { 236.39 + 236.40 + public static void main(String args[]) throws Exception { 236.41 + // The first two JAVA processes are expected to fail, but with a correct VM option suggestion 236.42 + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( 236.43 + "-XX:+PrintGc", 236.44 + "-version" 236.45 + ); 236.46 + OutputAnalyzer outputWithError = new OutputAnalyzer(pb.start()); 236.47 + outputWithError.shouldContain("Did you mean '(+/-)PrintGC'?"); 236.48 + if (outputWithError.getExitValue() == 0) { 236.49 + throw new RuntimeException("Not expected to get exit value 0"); 236.50 + } 236.51 + 236.52 + pb = ProcessTools.createJavaProcessBuilder( 236.53 + "-XX:MaxiumHeapSize=500m", 236.54 + "-version" 236.55 + ); 236.56 + outputWithError = new OutputAnalyzer(pb.start()); 236.57 + outputWithError.shouldContain("Did you mean 'MaxHeapSize=<value>'?"); 236.58 + if (outputWithError.getExitValue() == 0) { 236.59 + throw new RuntimeException("Not expected to get exit value 0"); 236.60 + } 236.61 + 236.62 + // The last JAVA process should run successfully for the purpose of sanity check 236.63 + pb = ProcessTools.createJavaProcessBuilder( 236.64 + "-XX:+PrintGC", 236.65 + "-version" 236.66 + ); 236.67 + OutputAnalyzer outputWithNoError = new OutputAnalyzer(pb.start()); 236.68 + outputWithNoError.shouldNotContain("Did you mean '(+/-)PrintGC'?"); 236.69 + outputWithNoError.shouldHaveExitValue(0); 236.70 + } 236.71 +} 236.72 +
237.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 237.2 +++ b/test/gc/metaspace/TestMetaspaceMemoryPool.java Mon Jul 15 11:07:03 2013 +0100 237.3 @@ -0,0 +1,126 @@ 237.4 +/* 237.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 237.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 237.7 + * 237.8 + * This code is free software; you can redistribute it and/or modify it 237.9 + * under the terms of the GNU General Public License version 2 only, as 237.10 + * published by the Free Software Foundation. 237.11 + * 237.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 237.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 237.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 237.15 + * version 2 for more details (a copy is included in the LICENSE file that 237.16 + * accompanied this code). 237.17 + * 237.18 + * You should have received a copy of the GNU General Public License version 237.19 + * 2 along with this work; if not, write to the Free Software Foundation, 237.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 237.21 + * 237.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 237.23 + * or visit www.oracle.com if you need additional information or have any 237.24 + * questions. 237.25 + */ 237.26 + 237.27 +import java.util.List; 237.28 +import java.lang.management.ManagementFactory; 237.29 +import java.lang.management.MemoryManagerMXBean; 237.30 +import java.lang.management.MemoryPoolMXBean; 237.31 +import java.lang.management.MemoryUsage; 237.32 + 237.33 +import java.lang.management.RuntimeMXBean; 237.34 +import java.lang.management.ManagementFactory; 237.35 + 237.36 +/* @test TestMetaspaceMemoryPool 237.37 + * @bug 8000754 237.38 + * @summary Tests that a MemoryPoolMXBeans is created for metaspace and that a 237.39 + * MemoryManagerMXBean is created. 237.40 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops TestMetaspaceMemoryPool 237.41 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-UseCompressedOops -XX:MaxMetaspaceSize=60m TestMetaspaceMemoryPool 237.42 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers TestMetaspaceMemoryPool 237.43 + * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+UseCompressedOops -XX:+UseCompressedKlassPointers -XX:ClassMetaspaceSize=60m TestMetaspaceMemoryPool 237.44 + */ 237.45 +public class TestMetaspaceMemoryPool { 237.46 + public static void main(String[] args) { 237.47 + verifyThatMetaspaceMemoryManagerExists(); 237.48 + verifyMemoryPool(getMemoryPool("Metaspace"), isFlagDefined("MaxMetaspaceSize")); 237.49 + 237.50 + if (runsOn64bit()) { 237.51 + if (usesCompressedOops()) { 237.52 + MemoryPoolMXBean cksPool = getMemoryPool("Compressed Class Space"); 237.53 + verifyMemoryPool(cksPool, true); 237.54 + } 237.55 + } 237.56 + } 237.57 + 237.58 + private static boolean runsOn64bit() { 237.59 + return !System.getProperty("sun.arch.data.model").equals("32"); 237.60 + } 237.61 + 237.62 + private static boolean usesCompressedOops() { 237.63 + return isFlagDefined("+UseCompressedOops"); 237.64 + } 237.65 + 237.66 + private static boolean isFlagDefined(String name) { 237.67 + RuntimeMXBean runtimeMxBean = ManagementFactory.getRuntimeMXBean(); 237.68 + List<String> args = runtimeMxBean.getInputArguments(); 237.69 + for (String arg : args) { 237.70 + if (arg.startsWith("-XX:" + name)) { 237.71 + return true; 237.72 + } 237.73 + } 237.74 + return false; 237.75 + } 237.76 + 237.77 + private static void verifyThatMetaspaceMemoryManagerExists() { 237.78 + List<MemoryManagerMXBean> managers = ManagementFactory.getMemoryManagerMXBeans(); 237.79 + for (MemoryManagerMXBean manager : managers) { 237.80 + if (manager.getName().equals("Metaspace Manager")) { 237.81 + return; 237.82 + } 237.83 + } 237.84 + 237.85 + throw new RuntimeException("Expected to find a metaspace memory manager"); 237.86 + } 237.87 + 237.88 + private static MemoryPoolMXBean getMemoryPool(String name) { 237.89 + List<MemoryPoolMXBean> pools = ManagementFactory.getMemoryPoolMXBeans(); 237.90 + for (MemoryPoolMXBean pool : pools) { 237.91 + if (pool.getName().equals(name)) { 237.92 + return pool; 237.93 + } 237.94 + } 237.95 + 237.96 + throw new RuntimeException("Expected to find a memory pool with name " + name); 237.97 + } 237.98 + 237.99 + private static void verifyMemoryPool(MemoryPoolMXBean pool, boolean isMaxDefined) { 237.100 + MemoryUsage mu = pool.getUsage(); 237.101 + assertDefined(mu.getInit(), "init"); 237.102 + assertDefined(mu.getUsed(), "used"); 237.103 + assertDefined(mu.getCommitted(), "committed"); 237.104 + 237.105 + if (isMaxDefined) { 237.106 + assertDefined(mu.getMax(), "max"); 237.107 + } else { 237.108 + assertUndefined(mu.getMax(), "max"); 237.109 + } 237.110 + } 237.111 + 237.112 + private static void assertDefined(long value, String name) { 237.113 + assertTrue(value != -1, "Expected " + name + " to be defined"); 237.114 + } 237.115 + 237.116 + private static void assertUndefined(long value, String name) { 237.117 + assertEquals(value, -1, "Expected " + name + " to be undefined"); 237.118 + } 237.119 + 237.120 + private static void assertEquals(long actual, long expected, String msg) { 237.121 + assertTrue(actual == expected, msg); 237.122 + } 237.123 + 237.124 + private static void assertTrue(boolean condition, String msg) { 237.125 + if (!condition) { 237.126 + throw new RuntimeException(msg); 237.127 + } 237.128 + } 237.129 +}
238.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 238.2 +++ b/test/gc/parallelScavenge/AdaptiveGCBoundary.java Mon Jul 15 11:07:03 2013 +0100 238.3 @@ -0,0 +1,61 @@ 238.4 +/* 238.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 238.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 238.7 + * 238.8 + * This code is free software; you can redistribute it and/or modify it 238.9 + * under the terms of the GNU General Public License version 2 only, as 238.10 + * published by the Free Software Foundation. 238.11 + * 238.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 238.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 238.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 238.15 + * version 2 for more details (a copy is included in the LICENSE file that 238.16 + * accompanied this code). 238.17 + * 238.18 + * You should have received a copy of the GNU General Public License version 238.19 + * 2 along with this work; if not, write to the Free Software Foundation, 238.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 238.21 + * 238.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 238.23 + * or visit www.oracle.com if you need additional information or have any 238.24 + * questions. 238.25 + */ 238.26 + 238.27 +/** 238.28 + * @test AdaptiveGCBoundary 238.29 + * @summary UseAdaptiveGCBoundary is broken 238.30 + * @bug 8014546 238.31 + * @key gc 238.32 + * @key regression 238.33 + * @library /testlibrary 238.34 + * @run main/othervm AdaptiveGCBoundary 238.35 + * @author jon.masamitsu@oracle.com 238.36 + */ 238.37 + 238.38 +import com.oracle.java.testlibrary.*; 238.39 + 238.40 +public class AdaptiveGCBoundary { 238.41 + public static void main(String args[]) throws Exception { 238.42 + 238.43 + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( 238.44 + "-showversion", 238.45 + "-XX:+UseParallelGC", 238.46 + "-XX:+UseAdaptiveGCBoundary", 238.47 + "-XX:+PrintCommandLineFlags", 238.48 + SystemGCCaller.class.getName() 238.49 + ); 238.50 + 238.51 + OutputAnalyzer output = new OutputAnalyzer(pb.start()); 238.52 + 238.53 + output.shouldContain("+UseAdaptiveGCBoundary"); 238.54 + 238.55 + output.shouldNotContain("error"); 238.56 + 238.57 + output.shouldHaveExitValue(0); 238.58 + } 238.59 + static class SystemGCCaller { 238.60 + public static void main(String [] args) { 238.61 + System.gc(); 238.62 + } 238.63 + } 238.64 +}
239.1 --- a/test/runtime/7196045/Test7196045.java Thu Jul 11 12:59:03 2013 -0400 239.2 +++ b/test/runtime/7196045/Test7196045.java Mon Jul 15 11:07:03 2013 +0100 239.3 @@ -26,7 +26,7 @@ 239.4 * @test 239.5 * @bug 7196045 239.6 * @summary Possible JVM deadlock in ThreadTimesClosure when using HotspotInternal non-public API. 239.7 - * @run main/othervm Test7196045 239.8 + * @run main/othervm -XX:+UsePerfData Test7196045 239.9 */ 239.10 239.11 import java.lang.management.ManagementFactory;
240.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 240.2 +++ b/test/runtime/8001071/Test8001071.java Mon Jul 15 11:07:03 2013 +0100 240.3 @@ -0,0 +1,45 @@ 240.4 +/* 240.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 240.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 240.7 + * 240.8 + * This code is free software; you can redistribute it and/or modify it 240.9 + * under the terms of the GNU General Public License version 2 only, as 240.10 + * published by the Free Software Foundation. 240.11 + * 240.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 240.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 240.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 240.15 + * version 2 for more details (a copy is included in the LICENSE file that 240.16 + * accompanied this code). 240.17 + * 240.18 + * You should have received a copy of the GNU General Public License version 240.19 + * 2 along with this work; if not, write to the Free Software Foundation, 240.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 240.21 + * 240.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 240.23 + * or visit www.oracle.com if you need additional information or have any 240.24 + * questions. 240.25 + */ 240.26 + 240.27 +import sun.misc.Unsafe; 240.28 +import java.lang.reflect.Field; 240.29 + 240.30 +@SuppressWarnings("sunapi") 240.31 +public class Test8001071 { 240.32 + public static Unsafe unsafe; 240.33 + 240.34 + static { 240.35 + try { 240.36 + Field f = Unsafe.class.getDeclaredField("theUnsafe"); 240.37 + f.setAccessible(true); 240.38 + unsafe = (Unsafe) f.get(null); 240.39 + } catch ( Exception e ) { 240.40 + e.printStackTrace(); 240.41 + } 240.42 + } 240.43 + 240.44 + public static void main(String args[]) { 240.45 + unsafe.getObject(new Test8001071(), Short.MAX_VALUE); 240.46 + } 240.47 + 240.48 +}
241.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 241.2 +++ b/test/runtime/8001071/Test8001071.sh Mon Jul 15 11:07:03 2013 +0100 241.3 @@ -0,0 +1,63 @@ 241.4 +#!/bin/sh 241.5 + 241.6 +# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 241.7 +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 241.8 + 241.9 +# This code is free software; you can redistribute it and/or modify it 241.10 +# under the terms of the GNU General Public License version 2 only, as 241.11 +# published by the Free Software Foundation. 241.12 + 241.13 +# This code is distributed in the hope that it will be useful, but WITHOUT 241.14 +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 241.15 +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 241.16 +# version 2 for more details (a copy is included in the LICENSE file that 241.17 +# accompanied this code). 241.18 + 241.19 +# You should have received a copy of the GNU General Public License version 241.20 +# 2 along with this work; if not, write to the Free Software Foundation, 241.21 +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 241.22 + 241.23 +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 241.24 +# or visit www.oracle.com if you need additional information or have any 241.25 +# questions. 241.26 + 241.27 +## @test 241.28 +## @bug 8001071 241.29 +## @summary Add simple range check into VM implemenation of Unsafe access methods 241.30 +## @compile Test8001071.java 241.31 +## @run shell Test8001071.sh 241.32 +## @author filipp.zhinkin@oracle.com 241.33 + 241.34 +VERSION=`${TESTJAVA}/bin/java ${TESTVMOPTS} -version 2>&1` 241.35 + 241.36 +if [ -n "`echo $VERSION | grep debug`" -o -n "`echo $VERSION | grep jvmg`" ]; then 241.37 + echo "Build type check passed" 241.38 + echo "Continue testing" 241.39 +else 241.40 + echo "Fastdebug build is required for this test" 241.41 + exit 0 241.42 +fi 241.43 + 241.44 +${TESTJAVA}/bin/java -cp ${TESTCLASSES} ${TESTVMOPTS} Test8001071 2>&1 241.45 + 241.46 +HS_ERR_FILE=hs_err_pid*.log 241.47 + 241.48 +if [ ! -f $HS_ERR_FILE ] 241.49 +then 241.50 + echo "hs_err_pid log file was not found" 241.51 + echo "Test failed" 241.52 + exit 1 241.53 +fi 241.54 + 241.55 +grep "assert(byte_offset < p_size) failed: Unsafe access: offset.*> object's size.*" $HS_ERR_FILE 241.56 + 241.57 +if [ "0" = "$?" ]; 241.58 +then 241.59 + echo "Range check assertion failed as expected" 241.60 + echo "Test passed" 241.61 + exit 0 241.62 +else 241.63 + echo "Range check assertion was not failed" 241.64 + echo "Test failed" 241.65 + exit 1 241.66 +fi
242.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 242.2 +++ b/test/runtime/CommandLine/CompilerConfigFileWarning.java Mon Jul 15 11:07:03 2013 +0100 242.3 @@ -0,0 +1,50 @@ 242.4 +/* 242.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 242.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 242.7 + * 242.8 + * This code is free software; you can redistribute it and/or modify it 242.9 + * under the terms of the GNU General Public License version 2 only, as 242.10 + * published by the Free Software Foundation. 242.11 + * 242.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 242.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 242.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 242.15 + * version 2 for more details (a copy is included in the LICENSE file that 242.16 + * accompanied this code). 242.17 + * 242.18 + * You should have received a copy of the GNU General Public License version 242.19 + * 2 along with this work; if not, write to the Free Software Foundation, 242.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 242.21 + * 242.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 242.23 + * or visit www.oracle.com if you need additional information or have any 242.24 + * questions. 242.25 + */ 242.26 + 242.27 +/* 242.28 + * @test 242.29 + * @bug 7167142 242.30 + * @summary Warn if unused .hotspot_compiler file is present 242.31 + * @library /testlibrary 242.32 + */ 242.33 + 242.34 +import java.io.PrintWriter; 242.35 +import com.oracle.java.testlibrary.*; 242.36 + 242.37 +public class CompilerConfigFileWarning { 242.38 + public static void main(String[] args) throws Exception { 242.39 + String vmVersion = System.getProperty("java.vm.version"); 242.40 + if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) { 242.41 + System.out.println("Skip on debug builds since we'll always read the file there"); 242.42 + return; 242.43 + } 242.44 + 242.45 + PrintWriter pw = new PrintWriter(".hotspot_compiler"); 242.46 + pw.println("aa"); 242.47 + pw.close(); 242.48 + 242.49 + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-version"); 242.50 + OutputAnalyzer output = new OutputAnalyzer(pb.start()); 242.51 + output.shouldContain("warning: .hotspot_compiler file is present but has been ignored. Run with -XX:CompileCommandFile=.hotspot_compiler to load the file."); 242.52 + } 242.53 +}
243.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 243.2 +++ b/test/runtime/CommandLine/ConfigFileWarning.java Mon Jul 15 11:07:03 2013 +0100 243.3 @@ -0,0 +1,50 @@ 243.4 +/* 243.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 243.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 243.7 + * 243.8 + * This code is free software; you can redistribute it and/or modify it 243.9 + * under the terms of the GNU General Public License version 2 only, as 243.10 + * published by the Free Software Foundation. 243.11 + * 243.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 243.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 243.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 243.15 + * version 2 for more details (a copy is included in the LICENSE file that 243.16 + * accompanied this code). 243.17 + * 243.18 + * You should have received a copy of the GNU General Public License version 243.19 + * 2 along with this work; if not, write to the Free Software Foundation, 243.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 243.21 + * 243.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 243.23 + * or visit www.oracle.com if you need additional information or have any 243.24 + * questions. 243.25 + */ 243.26 + 243.27 +/* 243.28 + * @test 243.29 + * @bug 7167142 243.30 + * @summary Warn if unused .hotspot_rc file is present 243.31 + * @library /testlibrary 243.32 + */ 243.33 + 243.34 +import java.io.PrintWriter; 243.35 +import com.oracle.java.testlibrary.*; 243.36 + 243.37 +public class ConfigFileWarning { 243.38 + public static void main(String[] args) throws Exception { 243.39 + String vmVersion = System.getProperty("java.vm.version"); 243.40 + if (vmVersion.toLowerCase().contains("debug") || vmVersion.toLowerCase().contains("jvmg")) { 243.41 + System.out.println("Skip on debug builds since we'll always read the file there"); 243.42 + return; 243.43 + } 243.44 + 243.45 + PrintWriter pw = new PrintWriter(".hotspotrc"); 243.46 + pw.println("aa"); 243.47 + pw.close(); 243.48 + 243.49 + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-version"); 243.50 + OutputAnalyzer output = new OutputAnalyzer(pb.start()); 243.51 + output.shouldContain("warning: .hotspotrc file is present but has been ignored. Run with -XX:Flags=.hotspotrc to load the file."); 243.52 + } 243.53 +}
244.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 244.2 +++ b/test/runtime/SharedArchiveFile/CdsDifferentObjectAlignment.java Mon Jul 15 11:07:03 2013 +0100 244.3 @@ -0,0 +1,88 @@ 244.4 +/* 244.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 244.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 244.7 + * 244.8 + * This code is free software; you can redistribute it and/or modify it 244.9 + * under the terms of the GNU General Public License version 2 only, as 244.10 + * published by the Free Software Foundation. 244.11 + * 244.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 244.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 244.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 244.15 + * version 2 for more details (a copy is included in the LICENSE file that 244.16 + * accompanied this code). 244.17 + * 244.18 + * You should have received a copy of the GNU General Public License version 244.19 + * 2 along with this work; if not, write to the Free Software Foundation, 244.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 244.21 + * 244.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 244.23 + * or visit www.oracle.com if you need additional information or have any 244.24 + * questions. 244.25 + */ 244.26 + 244.27 +/* 244.28 + * @test CdsDifferentObjectAlignment 244.29 + * @summary Testing CDS (class data sharing) using varying object alignment. 244.30 + * Using different object alignment for each dump/load pair. 244.31 + * This is a negative test; using object alignment for loading that 244.32 + * is different from object alignment for creating a CDS file 244.33 + * should fail when loading. 244.34 + * @library /testlibrary 244.35 + */ 244.36 + 244.37 +import com.oracle.java.testlibrary.*; 244.38 + 244.39 +public class CdsDifferentObjectAlignment { 244.40 + public static void main(String[] args) throws Exception { 244.41 + String nativeWordSize = System.getProperty("sun.arch.data.model"); 244.42 + if (!Platform.is64bit()) { 244.43 + System.out.println("ObjectAlignmentInBytes for CDS is only " + 244.44 + "supported on 64bit platforms; this plaform is " + 244.45 + nativeWordSize); 244.46 + System.out.println("Skipping the test"); 244.47 + } else { 244.48 + createAndLoadSharedArchive(16, 64); 244.49 + createAndLoadSharedArchive(64, 32); 244.50 + } 244.51 + } 244.52 + 244.53 + 244.54 + // Parameters are object alignment expressed in bytes 244.55 + private static void 244.56 + createAndLoadSharedArchive(int createAlignment, int loadAlignment) 244.57 + throws Exception { 244.58 + String createAlignmentArgument = "-XX:ObjectAlignmentInBytes=" + 244.59 + createAlignment; 244.60 + String loadAlignmentArgument = "-XX:ObjectAlignmentInBytes=" + 244.61 + loadAlignment; 244.62 + 244.63 + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( 244.64 + "-XX:+UnlockDiagnosticVMOptions", 244.65 + "-XX:SharedArchiveFile=./sample.jsa", 244.66 + "-Xshare:dump", 244.67 + createAlignmentArgument); 244.68 + 244.69 + OutputAnalyzer output = new OutputAnalyzer(pb.start()); 244.70 + output.shouldContain("Loading classes to share"); 244.71 + output.shouldHaveExitValue(0); 244.72 + 244.73 + pb = ProcessTools.createJavaProcessBuilder( 244.74 + "-XX:+UnlockDiagnosticVMOptions", 244.75 + "-XX:SharedArchiveFile=./sample.jsa", 244.76 + "-Xshare:on", 244.77 + loadAlignmentArgument, 244.78 + "-version"); 244.79 + 244.80 + output = new OutputAnalyzer(pb.start()); 244.81 + String expectedErrorMsg = 244.82 + String.format( 244.83 + "The shared archive file's ObjectAlignmentInBytes of %d " + 244.84 + "does not equal the current ObjectAlignmentInBytes of %d", 244.85 + createAlignment, 244.86 + loadAlignment); 244.87 + 244.88 + output.shouldContain(expectedErrorMsg); 244.89 + output.shouldHaveExitValue(1); 244.90 + } 244.91 +}
245.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 245.2 +++ b/test/runtime/SharedArchiveFile/CdsSameObjectAlignment.java Mon Jul 15 11:07:03 2013 +0100 245.3 @@ -0,0 +1,92 @@ 245.4 +/* 245.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 245.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 245.7 + * 245.8 + * This code is free software; you can redistribute it and/or modify it 245.9 + * under the terms of the GNU General Public License version 2 only, as 245.10 + * published by the Free Software Foundation. 245.11 + * 245.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 245.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 245.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 245.15 + * version 2 for more details (a copy is included in the LICENSE file that 245.16 + * accompanied this code). 245.17 + * 245.18 + * You should have received a copy of the GNU General Public License version 245.19 + * 2 along with this work; if not, write to the Free Software Foundation, 245.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 245.21 + * 245.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 245.23 + * or visit www.oracle.com if you need additional information or have any 245.24 + * questions. 245.25 + */ 245.26 + 245.27 +/* 245.28 + * @test CdsSameObjectAlignment 245.29 + * @summary Testing CDS (class data sharing) using varying object alignment. 245.30 + * Using same object alignment for each dump/load pair 245.31 + * @library /testlibrary 245.32 + */ 245.33 + 245.34 +import com.oracle.java.testlibrary.*; 245.35 + 245.36 +public class CdsSameObjectAlignment { 245.37 + public static void main(String[] args) throws Exception { 245.38 + String nativeWordSize = System.getProperty("sun.arch.data.model"); 245.39 + if (!Platform.is64bit()) { 245.40 + System.out.println("ObjectAlignmentInBytes for CDS is only " + 245.41 + "supported on 64bit platforms; this plaform is " + 245.42 + nativeWordSize); 245.43 + System.out.println("Skipping the test"); 245.44 + } else { 245.45 + dumpAndLoadSharedArchive(8); 245.46 + dumpAndLoadSharedArchive(16); 245.47 + dumpAndLoadSharedArchive(32); 245.48 + dumpAndLoadSharedArchive(64); 245.49 + } 245.50 + } 245.51 + 245.52 + private static void 245.53 + dumpAndLoadSharedArchive(int objectAlignmentInBytes) throws Exception { 245.54 + String objectAlignmentArg = "-XX:ObjectAlignmentInBytes=" 245.55 + + objectAlignmentInBytes; 245.56 + System.out.println("dumpAndLoadSharedArchive(): objectAlignmentInBytes = " 245.57 + + objectAlignmentInBytes); 245.58 + 245.59 + // create shared archive 245.60 + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( 245.61 + "-XX:+UnlockDiagnosticVMOptions", 245.62 + "-XX:SharedArchiveFile=./sample.jsa", 245.63 + "-Xshare:dump", 245.64 + objectAlignmentArg); 245.65 + 245.66 + OutputAnalyzer output = new OutputAnalyzer(pb.start()); 245.67 + output.shouldContain("Loading classes to share"); 245.68 + output.shouldHaveExitValue(0); 245.69 + 245.70 + 245.71 + // run using the shared archive 245.72 + pb = ProcessTools.createJavaProcessBuilder( 245.73 + "-XX:+UnlockDiagnosticVMOptions", 245.74 + "-XX:SharedArchiveFile=./sample.jsa", 245.75 + "-Xshare:on", 245.76 + objectAlignmentArg, 245.77 + "-version"); 245.78 + 245.79 + output = new OutputAnalyzer(pb.start()); 245.80 + 245.81 + try { 245.82 + output.shouldContain("sharing"); 245.83 + output.shouldHaveExitValue(0); 245.84 + } catch (RuntimeException e) { 245.85 + // CDS uses absolute addresses for performance. 245.86 + // It will try to reserve memory at a specific address; 245.87 + // there is a chance such reservation will fail 245.88 + // If it does, it is NOT considered a failure of the feature, 245.89 + // rather a possible expected outcome, though not likely 245.90 + output.shouldContain( 245.91 + "Unable to reserve shared space at required address"); 245.92 + output.shouldHaveExitValue(1); 245.93 + } 245.94 + } 245.95 +}
246.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 246.2 +++ b/test/serviceability/threads/TestFalseDeadLock.java Mon Jul 15 11:07:03 2013 +0100 246.3 @@ -0,0 +1,95 @@ 246.4 +/* 246.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 246.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 246.7 + * 246.8 + * This code is free software; you can redistribute it and/or modify it 246.9 + * under the terms of the GNU General Public License version 2 only, as 246.10 + * published by the Free Software Foundation. 246.11 + * 246.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 246.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 246.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 246.15 + * version 2 for more details (a copy is included in the LICENSE file that 246.16 + * accompanied this code). 246.17 + * 246.18 + * You should have received a copy of the GNU General Public License version 246.19 + * 2 along with this work; if not, write to the Free Software Foundation, 246.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 246.21 + * 246.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 246.23 + * or visit www.oracle.com if you need additional information or have any 246.24 + * questions. 246.25 + */ 246.26 + 246.27 +import java.lang.management.ManagementFactory; 246.28 +import java.lang.management.ThreadMXBean; 246.29 +import java.util.Random; 246.30 + 246.31 +/* 246.32 + * @test 246.33 + * @bug 8016304 246.34 + * @summary Make sure no deadlock is reported for this program which has no deadlocks. 246.35 + * @run main/othervm TestFalseDeadLock 246.36 + */ 246.37 + 246.38 +/* 246.39 + * This test will not provoke the bug every time it is run since the bug is intermittent. 246.40 + * The test has a fixed running time of 5 seconds. 246.41 + */ 246.42 + 246.43 +public class TestFalseDeadLock { 246.44 + private static ThreadMXBean bean; 246.45 + private static volatile boolean running = true; 246.46 + private static volatile boolean found = false; 246.47 + 246.48 + public static void main(String[] args) throws Exception { 246.49 + bean = ManagementFactory.getThreadMXBean(); 246.50 + Thread[] threads = new Thread[500]; 246.51 + for (int i = 0; i < threads.length; i++) { 246.52 + Test t = new Test(); 246.53 + threads[i] = new Thread(t); 246.54 + threads[i].start(); 246.55 + } 246.56 + try { 246.57 + Thread.sleep(5000); 246.58 + } catch (InterruptedException ex) { 246.59 + } 246.60 + running = false; 246.61 + for (Thread t : threads) { 246.62 + t.join(); 246.63 + } 246.64 + if (found) { 246.65 + throw new Exception("Deadlock reported, but there is no deadlock."); 246.66 + } 246.67 + } 246.68 + 246.69 + public static class Test implements Runnable { 246.70 + public void run() { 246.71 + Random r = new Random(); 246.72 + while (running) { 246.73 + try { 246.74 + synchronized (this) { 246.75 + wait(r.nextInt(1000) + 1); 246.76 + } 246.77 + } catch (InterruptedException ex) { 246.78 + } 246.79 + recurse(2000); 246.80 + } 246.81 + if (bean.findDeadlockedThreads() != null) { 246.82 + System.out.println("FOUND!"); 246.83 + found = true; 246.84 + } 246.85 + } 246.86 + 246.87 + private void recurse(int i) { 246.88 + if (!running) { 246.89 + // It is important for the test to call println here 246.90 + // since there are locks inside that path. 246.91 + System.out.println("Hullo"); 246.92 + } 246.93 + else if (i > 0) { 246.94 + recurse(i - 1); 246.95 + } 246.96 + } 246.97 + } 246.98 +}
247.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000 247.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/Platform.java Mon Jul 15 11:07:03 2013 +0100 247.3 @@ -0,0 +1,62 @@ 247.4 +/* 247.5 + * Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved. 247.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 247.7 + * 247.8 + * This code is free software; you can redistribute it and/or modify it 247.9 + * under the terms of the GNU General Public License version 2 only, as 247.10 + * published by the Free Software Foundation. 247.11 + * 247.12 + * This code is distributed in the hope that it will be useful, but WITHOUT 247.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 247.14 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 247.15 + * version 2 for more details (a copy is included in the LICENSE file that 247.16 + * accompanied this code). 247.17 + * 247.18 + * You should have received a copy of the GNU General Public License version 247.19 + * 2 along with this work; if not, write to the Free Software Foundation, 247.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 247.21 + * 247.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 247.23 + * or visit www.oracle.com if you need additional information or have any 247.24 + * questions. 247.25 + */ 247.26 + 247.27 +package com.oracle.java.testlibrary; 247.28 + 247.29 +public class Platform { 247.30 + private static final String osName = System.getProperty("os.name"); 247.31 + private static final String dataModel = System.getProperty("sun.arch.data.model"); 247.32 + private static final String vmVersion = System.getProperty("java.vm.version"); 247.33 + 247.34 + public static boolean is64bit() { 247.35 + return dataModel.equals("64"); 247.36 + } 247.37 + 247.38 + public static boolean isSolaris() { 247.39 + return osName.toLowerCase().startsWith("sunos"); 247.40 + } 247.41 + 247.42 + public static boolean isWindows() { 247.43 + return osName.toLowerCase().startsWith("win"); 247.44 + } 247.45 + 247.46 + public static boolean isOSX() { 247.47 + return osName.toLowerCase().startsWith("mac"); 247.48 + } 247.49 + 247.50 + public static boolean isLinux() { 247.51 + return osName.toLowerCase().startsWith("linux"); 247.52 + } 247.53 + 247.54 + public static String getOsName() { 247.55 + return osName; 247.56 + } 247.57 + 247.58 + public static boolean isDebugBuild() { 247.59 + return vmVersion.toLowerCase().contains("debug"); 247.60 + } 247.61 + 247.62 + public static String getVMVersion() { 247.63 + return vmVersion; 247.64 + } 247.65 +}
248.1 --- a/test/testlibrary/com/oracle/java/testlibrary/ProcessTools.java Thu Jul 11 12:59:03 2013 -0400 248.2 +++ b/test/testlibrary/com/oracle/java/testlibrary/ProcessTools.java Mon Jul 15 11:07:03 2013 +0100 248.3 @@ -112,10 +112,8 @@ 248.4 * @return String[] with platform specific arguments, empty if there are none 248.5 */ 248.6 public static String[] getPlatformSpecificVMArgs() { 248.7 - String osName = System.getProperty("os.name"); 248.8 - String dataModel = System.getProperty("sun.arch.data.model"); 248.9 248.10 - if (osName.equals("SunOS") && dataModel.equals("64")) { 248.11 + if (Platform.is64bit() && Platform.isSolaris()) { 248.12 return new String[] { "-d64" }; 248.13 } 248.14