Fri, 27 Feb 2009 13:27:09 -0800
6810672: Comment typos
Summary: I have collected some typos I have found while looking at the code.
Reviewed-by: kvn, never
1.1 --- a/src/cpu/sparc/vm/interp_masm_sparc.cpp Fri Feb 27 08:34:19 2009 -0800 1.2 +++ b/src/cpu/sparc/vm/interp_masm_sparc.cpp Fri Feb 27 13:27:09 2009 -0800 1.3 @@ -2465,7 +2465,7 @@ 1.4 // InterpreterRuntime::post_method_entry(); 1.5 // } 1.6 // if (DTraceMethodProbes) { 1.7 -// SharedRuntime::dtrace_method_entry(method, reciever); 1.8 +// SharedRuntime::dtrace_method_entry(method, receiver); 1.9 // } 1.10 1.11 void InterpreterMacroAssembler::notify_method_entry() {
2.1 --- a/src/cpu/sparc/vm/nativeInst_sparc.hpp Fri Feb 27 08:34:19 2009 -0800 2.2 +++ b/src/cpu/sparc/vm/nativeInst_sparc.hpp Fri Feb 27 13:27:09 2009 -0800 2.3 @@ -243,7 +243,7 @@ 2.4 2.5 // Regenerate the instruction sequence that performs the 64 bit 2.6 // sethi. This only does the sethi. The disp field (bottom 10 bits) 2.7 - // must be handled seperately. 2.8 + // must be handled separately. 2.9 static void set_data64_sethi(address instaddr, intptr_t x); 2.10 2.11 // combine the fields of a sethi/simm13 pair (simm13 = or, add, jmpl, ld/st)
3.1 --- a/src/cpu/sparc/vm/sparc.ad Fri Feb 27 08:34:19 2009 -0800 3.2 +++ b/src/cpu/sparc/vm/sparc.ad Fri Feb 27 13:27:09 2009 -0800 3.3 @@ -189,7 +189,7 @@ 3.4 // double fp register numbers. FloatRegisterImpl in register_sparc.hpp 3.5 // wants 0-63, so we have to convert every time we want to use fp regs 3.6 // with the macroassembler, using reg_to_DoubleFloatRegister_object(). 3.7 -// 255 is a flag meaning 'dont go here'. 3.8 +// 255 is a flag meaning "don't go here". 3.9 // I believe we can't handle callee-save doubles D32 and up until 3.10 // the place in the sparc stack crawler that asserts on the 255 is 3.11 // fixed up. 3.12 @@ -462,7 +462,7 @@ 3.13 3.14 // Macros to extract hi & lo halves from a long pair. 3.15 // G0 is not part of any long pair, so assert on that. 3.16 -// Prevents accidently using G1 instead of G0. 3.17 +// Prevents accidentally using G1 instead of G0. 3.18 #define LONG_HI_REG(x) (x) 3.19 #define LONG_LO_REG(x) (x) 3.20 3.21 @@ -1431,7 +1431,7 @@ 3.22 3.23 #ifndef _LP64 3.24 // In the LP64 build, all registers can be moved as aligned/adjacent 3.25 - // pairs, so there's never any need to move the high bits seperately. 3.26 + // pairs, so there's never any need to move the high bits separately. 3.27 // The 32-bit builds have to deal with the 32-bit ABI which can force 3.28 // all sorts of silly alignment problems. 3.29 3.30 @@ -1624,7 +1624,7 @@ 3.31 Register temp_reg = G3; 3.32 assert( G5_ic_reg != temp_reg, "conflicting registers" ); 3.33 3.34 - // Load klass from reciever 3.35 + // Load klass from receiver 3.36 __ load_klass(O0, temp_reg); 3.37 // Compare against expected klass 3.38 __ cmp(temp_reg, G5_ic_reg); 3.39 @@ -4149,7 +4149,7 @@ 3.40 3.41 //----------OPERAND CLASSES---------------------------------------------------- 3.42 // Operand Classes are groups of operands that are used to simplify 3.43 -// instruction definitions by not requiring the AD writer to specify seperate 3.44 +// instruction definitions by not requiring the AD writer to specify separate 3.45 // instructions for every form of operand when the instruction accepts 3.46 // multiple operand types with the same basic encoding and format. The classic 3.47 // case of this is memory operands. 3.48 @@ -6847,7 +6847,7 @@ 3.49 ins_pipe(sdiv_reg_reg); 3.50 %} 3.51 3.52 -// Magic constant, reciprical of 10 3.53 +// Magic constant, reciprocal of 10 3.54 instruct loadConI_x66666667(iRegIsafe dst) %{ 3.55 effect( DEF dst ); 3.56 3.57 @@ -6857,7 +6857,7 @@ 3.58 ins_pipe(ialu_hi_lo_reg); 3.59 %} 3.60 3.61 -// Register Shift Right Arithmatic Long by 32-63 3.62 +// Register Shift Right Arithmetic Long by 32-63 3.63 instruct sra_31( iRegI dst, iRegI src ) %{ 3.64 effect( DEF dst, USE src ); 3.65 format %{ "SRA $src,31,$dst\t! Used in div-by-10" %} 3.66 @@ -9048,7 +9048,7 @@ 3.67 // These must follow all instruction definitions as they use the names 3.68 // defined in the instructions definitions. 3.69 // 3.70 -// peepmatch ( root_instr_name [preceeding_instruction]* ); 3.71 +// peepmatch ( root_instr_name [preceding_instruction]* ); 3.72 // 3.73 // peepconstraint %{ 3.74 // (instruction_number.operand_name relational_op instruction_number.operand_name
4.1 --- a/src/cpu/sparc/vm/templateTable_sparc.cpp Fri Feb 27 08:34:19 2009 -0800 4.2 +++ b/src/cpu/sparc/vm/templateTable_sparc.cpp Fri Feb 27 13:27:09 2009 -0800 4.3 @@ -1545,7 +1545,7 @@ 4.4 4.5 // Handle all the JSR stuff here, then exit. 4.6 // It's much shorter and cleaner than intermingling with the 4.7 - // non-JSR normal-branch stuff occuring below. 4.8 + // non-JSR normal-branch stuff occurring below. 4.9 if( is_jsr ) { 4.10 // compute return address as bci in Otos_i 4.11 __ ld_ptr(Address(Lmethod, 0, in_bytes(methodOopDesc::const_offset())), G3_scratch); 4.12 @@ -3079,7 +3079,7 @@ 4.13 Label ok; 4.14 4.15 // Check that entry is non-null. Null entries are probably a bytecode 4.16 - // problem. If the interface isn't implemented by the reciever class, 4.17 + // problem. If the interface isn't implemented by the receiver class, 4.18 // the VM should throw IncompatibleClassChangeError. linkResolver checks 4.19 // this too but that's only if the entry isn't already resolved, so we 4.20 // need to check again.
5.1 --- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Fri Feb 27 08:34:19 2009 -0800 5.2 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Fri Feb 27 13:27:09 2009 -0800 5.3 @@ -501,7 +501,7 @@ 5.4 LIRItem right(x->y(), this); 5.5 5.6 left.load_item(); 5.7 - // dont load constants to save register 5.8 + // don't load constants to save register 5.9 right.load_nonconstant(); 5.10 rlock_result(x); 5.11 arithmetic_op_long(x->op(), x->operand(), left.result(), right.result(), NULL);
6.1 --- a/src/cpu/x86/vm/cppInterpreter_x86.cpp Fri Feb 27 08:34:19 2009 -0800 6.2 +++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp Fri Feb 27 13:27:09 2009 -0800 6.3 @@ -523,7 +523,7 @@ 6.4 #ifdef _LP64 6.5 // Make sure stack is properly aligned and sized for the abi 6.6 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 6.7 - __ andptr(rsp, -16); // must be 16 byte boundry (see amd64 ABI) 6.8 + __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 6.9 #endif // _LP64 6.10 6.11 6.12 @@ -970,7 +970,7 @@ 6.13 #ifdef _LP64 6.14 // duplicate the alignment rsp got after setting stack_base 6.15 __ subptr(rax, frame::arg_reg_save_area_bytes); // windows 6.16 - __ andptr(rax, -16); // must be 16 byte boundry (see amd64 ABI) 6.17 + __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI) 6.18 #endif // _LP64 6.19 __ cmpptr(rax, rsp); 6.20 __ jcc(Assembler::equal, L); 6.21 @@ -1067,7 +1067,7 @@ 6.22 #ifdef _LP64 6.23 __ subptr(rsp, t); 6.24 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 6.25 - __ andptr(rsp, -16); // must be 16 byte boundry (see amd64 ABI) 6.26 + __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 6.27 #else 6.28 __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror 6.29 __ subptr(rsp, t);
7.1 --- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Fri Feb 27 08:34:19 2009 -0800 7.2 +++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp Fri Feb 27 13:27:09 2009 -0800 7.3 @@ -1350,7 +1350,7 @@ 7.4 { 7.5 Label L; 7.6 __ mov(rax, rsp); 7.7 - __ andptr(rax, -16); // must be 16 byte boundry (see amd64 ABI) 7.8 + __ andptr(rax, -16); // must be 16 byte boundary (see amd64 ABI) 7.9 __ cmpptr(rax, rsp); 7.10 __ jcc(Assembler::equal, L); 7.11 __ stop("improperly aligned stack");
8.1 --- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Fri Feb 27 08:34:19 2009 -0800 8.2 +++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp Fri Feb 27 13:27:09 2009 -0800 8.3 @@ -826,7 +826,7 @@ 8.4 8.5 __ subptr(rsp, t); 8.6 __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows 8.7 - __ andptr(rsp, -16); // must be 16 byte boundry (see amd64 ABI) 8.8 + __ andptr(rsp, -16); // must be 16 byte boundary (see amd64 ABI) 8.9 8.10 // get signature handler 8.11 {
9.1 --- a/src/cpu/x86/vm/templateTable_x86_32.cpp Fri Feb 27 08:34:19 2009 -0800 9.2 +++ b/src/cpu/x86/vm/templateTable_x86_32.cpp Fri Feb 27 13:27:09 2009 -0800 9.3 @@ -1586,7 +1586,7 @@ 9.4 9.5 // Handle all the JSR stuff here, then exit. 9.6 // It's much shorter and cleaner than intermingling with the 9.7 - // non-JSR normal-branch stuff occuring below. 9.8 + // non-JSR normal-branch stuff occurring below. 9.9 if (is_jsr) { 9.10 // Pre-load the next target bytecode into EBX 9.11 __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
10.1 --- a/src/cpu/x86/vm/templateTable_x86_64.cpp Fri Feb 27 08:34:19 2009 -0800 10.2 +++ b/src/cpu/x86/vm/templateTable_x86_64.cpp Fri Feb 27 13:27:09 2009 -0800 10.3 @@ -1559,7 +1559,7 @@ 10.4 10.5 // Handle all the JSR stuff here, then exit. 10.6 // It's much shorter and cleaner than intermingling with the non-JSR 10.7 - // normal-branch stuff occuring below. 10.8 + // normal-branch stuff occurring below. 10.9 if (is_jsr) { 10.10 // Pre-load the next target bytecode into rbx 10.11 __ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0));
11.1 --- a/src/cpu/x86/vm/x86_32.ad Fri Feb 27 08:34:19 2009 -0800 11.2 +++ b/src/cpu/x86/vm/x86_32.ad Fri Feb 27 13:27:09 2009 -0800 11.3 @@ -130,7 +130,7 @@ 11.4 // allocation. Highest priority is first. A useful heuristic is to 11.5 // give registers a low priority when they are required by machine 11.6 // instructions, like EAX and EDX. Registers which are used as 11.7 -// pairs must fall on an even boundry (witness the FPR#L's in this list). 11.8 +// pairs must fall on an even boundary (witness the FPR#L's in this list). 11.9 // For the Intel integer registers, the equivalent Long pairs are 11.10 // EDX:EAX, EBX:ECX, and EDI:EBP. 11.11 alloc_class chunk0( ECX, EBX, EBP, EDI, EAX, EDX, ESI, ESP, 11.12 @@ -5857,7 +5857,7 @@ 11.13 11.14 //----------OPERAND CLASSES---------------------------------------------------- 11.15 // Operand Classes are groups of operands that are used as to simplify 11.16 -// instruction definitions by not requiring the AD writer to specify seperate 11.17 +// instruction definitions by not requiring the AD writer to specify separate 11.18 // instructions for every form of operand when the instruction accepts 11.19 // multiple operand types with the same basic encoding and format. The classic 11.20 // case of this is memory operands. 11.21 @@ -13220,7 +13220,7 @@ 11.22 // These must follow all instruction definitions as they use the names 11.23 // defined in the instructions definitions. 11.24 // 11.25 -// peepmatch ( root_instr_name [preceeding_instruction]* ); 11.26 +// peepmatch ( root_instr_name [preceding_instruction]* ); 11.27 // 11.28 // peepconstraint %{ 11.29 // (instruction_number.operand_name relational_op instruction_number.operand_name
12.1 --- a/src/cpu/x86/vm/x86_64.ad Fri Feb 27 08:34:19 2009 -0800 12.2 +++ b/src/cpu/x86/vm/x86_64.ad Fri Feb 27 13:27:09 2009 -0800 12.3 @@ -5483,7 +5483,7 @@ 12.4 12.5 //----------OPERAND CLASSES---------------------------------------------------- 12.6 // Operand Classes are groups of operands that are used as to simplify 12.7 -// instruction definitions by not requiring the AD writer to specify seperate 12.8 +// instruction definitions by not requiring the AD writer to specify separate 12.9 // instructions for every form of operand when the instruction accepts 12.10 // multiple operand types with the same basic encoding and format. The classic 12.11 // case of this is memory operands. 12.12 @@ -8363,7 +8363,7 @@ 12.13 //----------- DivL-By-Constant-Expansions-------------------------------------- 12.14 // DivI cases are handled by the compiler 12.15 12.16 -// Magic constant, reciprical of 10 12.17 +// Magic constant, reciprocal of 10 12.18 instruct loadConL_0x6666666666666667(rRegL dst) 12.19 %{ 12.20 effect(DEF dst); 12.21 @@ -12082,7 +12082,7 @@ 12.22 // These must follow all instruction definitions as they use the names 12.23 // defined in the instructions definitions. 12.24 // 12.25 -// peepmatch ( root_instr_name [precerding_instruction]* ); 12.26 +// peepmatch ( root_instr_name [preceding_instruction]* ); 12.27 // 12.28 // peepconstraint %{ 12.29 // (instruction_number.operand_name relational_op instruction_number.operand_name
13.1 --- a/src/os/linux/launcher/java.c Fri Feb 27 08:34:19 2009 -0800 13.2 +++ b/src/os/linux/launcher/java.c Fri Feb 27 13:27:09 2009 -0800 13.3 @@ -419,7 +419,7 @@ 13.4 goto leave; 13.5 } 13.6 mainClass = LoadClass(env, classname); 13.7 - if(mainClass == NULL) { /* exception occured */ 13.8 + if(mainClass == NULL) { /* exception occurred */ 13.9 ReportExceptionDescription(env); 13.10 message = "Could not find the main class. Program will exit."; 13.11 goto leave; 13.12 @@ -441,7 +441,7 @@ 13.13 goto leave; 13.14 } 13.15 mainClass = LoadClass(env, classname); 13.16 - if(mainClass == NULL) { /* exception occured */ 13.17 + if(mainClass == NULL) { /* exception occurred */ 13.18 ReportExceptionDescription(env); 13.19 message = "Could not find the main class. Program will exit."; 13.20 goto leave;
14.1 --- a/src/os/linux/launcher/java_md.h Fri Feb 27 08:34:19 2009 -0800 14.2 +++ b/src/os/linux/launcher/java_md.h Fri Feb 27 13:27:09 2009 -0800 14.3 @@ -47,7 +47,7 @@ 14.4 #ifdef JAVA_ARGS 14.5 /* 14.6 * ApplicationHome is prepended to each of these entries; the resulting 14.7 - * strings are concatenated (seperated by PATH_SEPARATOR) and used as the 14.8 + * strings are concatenated (separated by PATH_SEPARATOR) and used as the 14.9 * value of -cp option to the launcher. 14.10 */ 14.11 #ifndef APP_CLASSPATH
15.1 --- a/src/os/linux/vm/perfMemory_linux.cpp Fri Feb 27 08:34:19 2009 -0800 15.2 +++ b/src/os/linux/vm/perfMemory_linux.cpp Fri Feb 27 13:27:09 2009 -0800 15.3 @@ -192,7 +192,7 @@ 15.4 // check if the given path is considered a secure directory for 15.5 // the backing store files. Returns true if the directory exists 15.6 // and is considered a secure location. Returns false if the path 15.7 -// is a symbolic link or if an error occured. 15.8 +// is a symbolic link or if an error occurred. 15.9 // 15.10 static bool is_directory_secure(const char* path) { 15.11 struct stat statbuf;
16.1 --- a/src/os/solaris/launcher/java.c Fri Feb 27 08:34:19 2009 -0800 16.2 +++ b/src/os/solaris/launcher/java.c Fri Feb 27 13:27:09 2009 -0800 16.3 @@ -419,7 +419,7 @@ 16.4 goto leave; 16.5 } 16.6 mainClass = LoadClass(env, classname); 16.7 - if(mainClass == NULL) { /* exception occured */ 16.8 + if(mainClass == NULL) { /* exception occurred */ 16.9 ReportExceptionDescription(env); 16.10 message = "Could not find the main class. Program will exit."; 16.11 goto leave; 16.12 @@ -441,7 +441,7 @@ 16.13 goto leave; 16.14 } 16.15 mainClass = LoadClass(env, classname); 16.16 - if(mainClass == NULL) { /* exception occured */ 16.17 + if(mainClass == NULL) { /* exception occurred */ 16.18 ReportExceptionDescription(env); 16.19 message = "Could not find the main class. Program will exit."; 16.20 goto leave;
17.1 --- a/src/os/solaris/launcher/java_md.h Fri Feb 27 08:34:19 2009 -0800 17.2 +++ b/src/os/solaris/launcher/java_md.h Fri Feb 27 13:27:09 2009 -0800 17.3 @@ -47,7 +47,7 @@ 17.4 #ifdef JAVA_ARGS 17.5 /* 17.6 * ApplicationHome is prepended to each of these entries; the resulting 17.7 - * strings are concatenated (seperated by PATH_SEPARATOR) and used as the 17.8 + * strings are concatenated (separated by PATH_SEPARATOR) and used as the 17.9 * value of -cp option to the launcher. 17.10 */ 17.11 #ifndef APP_CLASSPATH
18.1 --- a/src/os/solaris/vm/perfMemory_solaris.cpp Fri Feb 27 08:34:19 2009 -0800 18.2 +++ b/src/os/solaris/vm/perfMemory_solaris.cpp Fri Feb 27 13:27:09 2009 -0800 18.3 @@ -194,7 +194,7 @@ 18.4 // check if the given path is considered a secure directory for 18.5 // the backing store files. Returns true if the directory exists 18.6 // and is considered a secure location. Returns false if the path 18.7 -// is a symbolic link or if an error occured. 18.8 +// is a symbolic link or if an error occurred. 18.9 // 18.10 static bool is_directory_secure(const char* path) { 18.11 struct stat statbuf;
19.1 --- a/src/os/windows/vm/perfMemory_windows.cpp Fri Feb 27 08:34:19 2009 -0800 19.2 +++ b/src/os/windows/vm/perfMemory_windows.cpp Fri Feb 27 13:27:09 2009 -0800 19.3 @@ -195,7 +195,7 @@ 19.4 // check if the given path is considered a secure directory for 19.5 // the backing store files. Returns true if the directory exists 19.6 // and is considered a secure location. Returns false if the path 19.7 -// is a symbolic link or if an error occured. 19.8 +// is a symbolic link or if an error occurred. 19.9 // 19.10 static bool is_directory_secure(const char* path) { 19.11 19.12 @@ -994,7 +994,7 @@ 19.13 return false; 19.14 } 19.15 19.16 - // if running on windows 2000 or later, set the automatic inheritence 19.17 + // if running on windows 2000 or later, set the automatic inheritance 19.18 // control flags. 19.19 SetSecurityDescriptorControlFnPtr _SetSecurityDescriptorControl; 19.20 _SetSecurityDescriptorControl = (SetSecurityDescriptorControlFnPtr) 19.21 @@ -1002,7 +1002,7 @@ 19.22 "SetSecurityDescriptorControl"); 19.23 19.24 if (_SetSecurityDescriptorControl != NULL) { 19.25 - // We do not want to further propogate inherited DACLs, so making them 19.26 + // We do not want to further propagate inherited DACLs, so making them 19.27 // protected prevents that. 19.28 if (!_SetSecurityDescriptorControl(pSD, SE_DACL_PROTECTED, 19.29 SE_DACL_PROTECTED)) {
20.1 --- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Fri Feb 27 08:34:19 2009 -0800 20.2 +++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp Fri Feb 27 13:27:09 2009 -0800 20.3 @@ -532,7 +532,7 @@ 20.4 if (oldAct.sa_sigaction != signalHandler) { 20.5 void* sighand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 20.6 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 20.7 - warning("Unexpected Signal %d occured under user-defined signal handler " INTPTR_FORMAT, sig, (intptr_t)sighand); 20.8 + warning("Unexpected Signal %d occurred under user-defined signal handler " INTPTR_FORMAT, sig, (intptr_t)sighand); 20.9 } 20.10 } 20.11
21.1 --- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Fri Feb 27 08:34:19 2009 -0800 21.2 +++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp Fri Feb 27 13:27:09 2009 -0800 21.3 @@ -694,7 +694,7 @@ 21.4 if (oldAct.sa_sigaction != signalHandler) { 21.5 void* sighand = oldAct.sa_sigaction ? CAST_FROM_FN_PTR(void*, oldAct.sa_sigaction) 21.6 : CAST_FROM_FN_PTR(void*, oldAct.sa_handler); 21.7 - warning("Unexpected Signal %d occured under user-defined signal handler %#lx", sig, (long)sighand); 21.8 + warning("Unexpected Signal %d occurred under user-defined signal handler %#lx", sig, (long)sighand); 21.9 } 21.10 } 21.11
22.1 --- a/src/share/tools/MakeDeps/Database.java Fri Feb 27 08:34:19 2009 -0800 22.2 +++ b/src/share/tools/MakeDeps/Database.java Fri Feb 27 13:27:09 2009 -0800 22.3 @@ -365,7 +365,7 @@ 22.4 22.5 // HACK ALERT. The compilation of ad_<arch> files is very slow. 22.6 // We want to start compiling them as early as possible. The compilation 22.7 - // order on unix is dependant on the order we emit files here. 22.8 + // order on unix is dependent on the order we emit files here. 22.9 // By sorting the output before emitting it, we expect 22.10 // that ad_<arch> will be compiled early. 22.11 boolean shouldSortObjFiles = true;
23.1 --- a/src/share/vm/adlc/Doc/Syntax.doc Fri Feb 27 08:34:19 2009 -0800 23.2 +++ b/src/share/vm/adlc/Doc/Syntax.doc Fri Feb 27 13:27:09 2009 -0800 23.3 @@ -88,7 +88,7 @@ 23.4 // these are used for constraints, etc. 23.5 23.6 alloc_class class1(AX, BX); // form an allocation class of registers 23.7 - // used by the register allocator for seperate 23.8 + // used by the register allocator for separate 23.9 // allocation of target register classes 23.10 23.11 3. Pipeline Syntax for Scheduling 23.12 @@ -150,7 +150,7 @@ 23.13 b. %} (block terminator) 23.14 c. EOF (file terminator) 23.15 23.16 - 4. Each statement must start on a seperate line 23.17 + 4. Each statement must start on a separate line 23.18 23.19 5. Identifiers cannot contain: (){}%;,"/\ 23.20
24.1 --- a/src/share/vm/adlc/adlparse.cpp Fri Feb 27 08:34:19 2009 -0800 24.2 +++ b/src/share/vm/adlc/adlparse.cpp Fri Feb 27 13:27:09 2009 -0800 24.3 @@ -4555,7 +4555,7 @@ 24.4 24.5 //---------------------------ensure_start_of_line------------------------------ 24.6 // A preprocessor directive has been encountered. Be sure it has fallen at 24.7 -// the begining of a line, or else report an error. 24.8 +// the beginning of a line, or else report an error. 24.9 void ADLParser::ensure_start_of_line(void) { 24.10 if (_curchar == '\n') { next_line(); return; } 24.11 assert( _ptr >= _curline && _ptr < _curline+strlen(_curline),
25.1 --- a/src/share/vm/adlc/dict2.cpp Fri Feb 27 08:34:19 2009 -0800 25.2 +++ b/src/share/vm/adlc/dict2.cpp Fri Feb 27 13:27:09 2009 -0800 25.3 @@ -275,7 +275,7 @@ 25.4 // Convert string to hash key. This algorithm implements a universal hash 25.5 // function with the multipliers frozen (ok, so it's not universal). The 25.6 // multipliers (and allowable characters) are all odd, so the resultant sum 25.7 -// is odd - guarenteed not divisible by any power of two, so the hash tables 25.8 +// is odd - guaranteed not divisible by any power of two, so the hash tables 25.9 // can be any power of two with good results. Also, I choose multipliers 25.10 // that have only 2 bits set (the low is always set to be odd) so 25.11 // multiplication requires only shifts and adds. Characters are required to 25.12 @@ -296,7 +296,7 @@ 25.13 } 25.14 25.15 //------------------------------hashptr-------------------------------------- 25.16 -// Slimey cheap hash function; no guarenteed performance. Better than the 25.17 +// Slimey cheap hash function; no guaranteed performance. Better than the 25.18 // default for pointers, especially on MS-DOS machines. 25.19 int hashptr(const void *key) { 25.20 #ifdef __TURBOC__ 25.21 @@ -306,7 +306,7 @@ 25.22 #endif 25.23 } 25.24 25.25 -// Slimey cheap hash function; no guarenteed performance. 25.26 +// Slimey cheap hash function; no guaranteed performance. 25.27 int hashkey(const void *key) { 25.28 return (int)((intptr_t)key); 25.29 }
26.1 --- a/src/share/vm/adlc/dict2.hpp Fri Feb 27 08:34:19 2009 -0800 26.2 +++ b/src/share/vm/adlc/dict2.hpp Fri Feb 27 13:27:09 2009 -0800 26.3 @@ -89,10 +89,10 @@ 26.4 26.5 // Hashing functions 26.6 int hashstr(const void *s); // Nice string hash 26.7 -// Slimey cheap hash function; no guarenteed performance. Better than the 26.8 +// Slimey cheap hash function; no guaranteed performance. Better than the 26.9 // default for pointers, especially on MS-DOS machines. 26.10 int hashptr(const void *key); 26.11 -// Slimey cheap hash function; no guarenteed performance. 26.12 +// Slimey cheap hash function; no guaranteed performance. 26.13 int hashkey(const void *key); 26.14 26.15 // Key comparators
27.1 --- a/src/share/vm/adlc/filebuff.cpp Fri Feb 27 08:34:19 2009 -0800 27.2 +++ b/src/share/vm/adlc/filebuff.cpp Fri Feb 27 13:27:09 2009 -0800 27.3 @@ -50,10 +50,10 @@ 27.4 file_error(SEMERR, 0, "Buffer allocation failed\n"); 27.5 exit(1); // Exit on allocation failure 27.6 } 27.7 - *_bigbuf = '\n'; // Lead with a sentinal newline 27.8 - _buf = _bigbuf+1; // Skip sentinal 27.9 + *_bigbuf = '\n'; // Lead with a sentinel newline 27.10 + _buf = _bigbuf+1; // Skip sentinel 27.11 _bufmax = _buf; // Buffer is empty 27.12 - _bufeol = _bigbuf; // _bufeol points at sentinal 27.13 + _bufeol = _bigbuf; // _bufeol points at sentinel 27.14 _filepos = -1; // filepos is in sync with _bufeol 27.15 _bufoff = _offset = 0L; // Offset at file start 27.16 27.17 @@ -62,8 +62,8 @@ 27.18 file_error(SEMERR, 0, "File read error, no input read\n"); 27.19 exit(1); // Exit on read error 27.20 } 27.21 - *_bufmax = '\n'; // End with a sentinal new-line 27.22 - *(_bufmax+1) = '\0'; // Then end with a sentinal NULL 27.23 + *_bufmax = '\n'; // End with a sentinel new-line 27.24 + *(_bufmax+1) = '\0'; // Then end with a sentinel NULL 27.25 } 27.26 27.27 //------------------------------~FileBuff-------------------------------------- 27.28 @@ -81,7 +81,7 @@ 27.29 27.30 _linenum++; 27.31 retval = ++_bufeol; // return character following end of previous line 27.32 - if (*retval == '\0') return NULL; // Check for EOF sentinal 27.33 + if (*retval == '\0') return NULL; // Check for EOF sentinel 27.34 // Search for newline character which must end each line 27.35 for(_filepos++; *_bufeol != '\n'; _bufeol++) 27.36 _filepos++; // keep filepos in sync with _bufeol
28.1 --- a/src/share/vm/adlc/filebuff.hpp Fri Feb 27 08:34:19 2009 -0800 28.2 +++ b/src/share/vm/adlc/filebuff.hpp Fri Feb 27 13:27:09 2009 -0800 28.3 @@ -37,7 +37,7 @@ 28.4 28.5 //------------------------------FileBuff-------------------------------------- 28.6 // This class defines a nicely behaved buffer of text. Entire file of text 28.7 -// is read into buffer at creation, with sentinals at start and end. 28.8 +// is read into buffer at creation, with sentinels at start and end. 28.9 class FileBuff { 28.10 friend class FileBuffRegion; 28.11 private: 28.12 @@ -46,8 +46,8 @@ 28.13 long _bufoff; // Start of buffer file offset 28.14 28.15 char *_buf; // The buffer itself. 28.16 - char *_bigbuf; // The buffer plus sentinals; actual heap area 28.17 - char *_bufmax; // A pointer to the buffer end sentinal 28.18 + char *_bigbuf; // The buffer plus sentinels; actual heap area 28.19 + char *_bufmax; // A pointer to the buffer end sentinel 28.20 char *_bufeol; // A pointer to the last complete line end 28.21 28.22 int _err; // Error flag for file seek/read operations
29.1 --- a/src/share/vm/adlc/formssel.cpp Fri Feb 27 08:34:19 2009 -0800 29.2 +++ b/src/share/vm/adlc/formssel.cpp Fri Feb 27 13:27:09 2009 -0800 29.3 @@ -1281,7 +1281,7 @@ 29.4 _num_uniq = num_uniq; 29.5 } 29.6 29.7 -// Generate index values needed for determing the operand position 29.8 +// Generate index values needed for determining the operand position 29.9 void InstructForm::index_temps(FILE *fp, FormDict &globals, const char *prefix, const char *receiver) { 29.10 uint idx = 0; // position of operand in match rule 29.11 int cur_num_opnds = num_opnds(); 29.12 @@ -2197,7 +2197,7 @@ 29.13 // Return zero-based position in component list, only counting constants; 29.14 // Return -1 if not in list. 29.15 int OperandForm::constant_position(FormDict &globals, const Component *last) { 29.16 - // Iterate through components and count constants preceeding 'constant' 29.17 + // Iterate through components and count constants preceding 'constant' 29.18 int position = 0; 29.19 Component *comp; 29.20 _components.reset(); 29.21 @@ -2235,7 +2235,7 @@ 29.22 // Return zero-based position in component list, only counting constants; 29.23 // Return -1 if not in list. 29.24 int OperandForm::register_position(FormDict &globals, const char *reg_name) { 29.25 - // Iterate through components and count registers preceeding 'last' 29.26 + // Iterate through components and count registers preceding 'last' 29.27 uint position = 0; 29.28 Component *comp; 29.29 _components.reset();
30.1 --- a/src/share/vm/adlc/formssel.hpp Fri Feb 27 08:34:19 2009 -0800 30.2 +++ b/src/share/vm/adlc/formssel.hpp Fri Feb 27 13:27:09 2009 -0800 30.3 @@ -277,7 +277,7 @@ 30.4 // 30.5 // Generate the format call for the replacement variable 30.6 void rep_var_format(FILE *fp, const char *rep_var); 30.7 - // Generate index values needed for determing the operand position 30.8 + // Generate index values needed for determining the operand position 30.9 void index_temps (FILE *fp, FormDict &globals, const char *prefix = "", const char *receiver = ""); 30.10 // --------------------------- 30.11 30.12 @@ -344,7 +344,7 @@ 30.13 30.14 // --------------------------- Code Block 30.15 // Add code 30.16 - void add_code(const char *string_preceeding_replacement_var); 30.17 + void add_code(const char *string_preceding_replacement_var); 30.18 // Add a replacement variable or one of its subfields 30.19 // Subfields are stored with a leading '$' 30.20 void add_rep_var(char *replacement_var);
31.1 --- a/src/share/vm/adlc/output_h.cpp Fri Feb 27 08:34:19 2009 -0800 31.2 +++ b/src/share/vm/adlc/output_h.cpp Fri Feb 27 13:27:09 2009 -0800 31.3 @@ -574,7 +574,7 @@ 31.4 // Generate the user-defined portion of the format 31.5 if( inst._format ) { 31.6 // If there are replacement variables, 31.7 - // Generate index values needed for determing the operand position 31.8 + // Generate index values needed for determining the operand position 31.9 if( inst._format->_rep_vars.count() ) 31.10 inst.index_temps(fp, globals); 31.11
32.1 --- a/src/share/vm/asm/assembler.cpp Fri Feb 27 08:34:19 2009 -0800 32.2 +++ b/src/share/vm/asm/assembler.cpp Fri Feb 27 13:27:09 2009 -0800 32.3 @@ -31,7 +31,7 @@ 32.4 // The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster, 32.5 // the assembler keeps a copy of the code buffers boundaries & modifies them when 32.6 // emitting bytes rather than using the code buffers accessor functions all the time. 32.7 -// The code buffer is updated via set_code_end(...) after emiting a whole instruction. 32.8 +// The code buffer is updated via set_code_end(...) after emitting a whole instruction. 32.9 32.10 AbstractAssembler::AbstractAssembler(CodeBuffer* code) { 32.11 if (code == NULL) return;
33.1 --- a/src/share/vm/asm/assembler.hpp Fri Feb 27 08:34:19 2009 -0800 33.2 +++ b/src/share/vm/asm/assembler.hpp Fri Feb 27 13:27:09 2009 -0800 33.3 @@ -22,7 +22,7 @@ 33.4 * 33.5 */ 33.6 33.7 -// This file contains platform-independant assembler declarations. 33.8 +// This file contains platform-independent assembler declarations. 33.9 33.10 class CodeBuffer; 33.11 class MacroAssembler;
34.1 --- a/src/share/vm/ci/ciTypeFlow.cpp Fri Feb 27 08:34:19 2009 -0800 34.2 +++ b/src/share/vm/ci/ciTypeFlow.cpp Fri Feb 27 13:27:09 2009 -0800 34.3 @@ -541,7 +541,7 @@ 34.4 // is report a value that will meet correctly with any downstream 34.5 // reference types on paths that will truly be executed. This null type 34.6 // meets with any reference type to yield that same reference type. 34.7 - // (The compiler will generate an unconditonal exception here.) 34.8 + // (The compiler will generate an unconditional exception here.) 34.9 push(null_type()); 34.10 return; 34.11 }
35.1 --- a/src/share/vm/classfile/symbolTable.cpp Fri Feb 27 08:34:19 2009 -0800 35.2 +++ b/src/share/vm/classfile/symbolTable.cpp Fri Feb 27 13:27:09 2009 -0800 35.3 @@ -156,7 +156,7 @@ 35.4 35.5 symbolOop test = lookup(index, (char*)name, len, hashValue); 35.6 if (test != NULL) { 35.7 - // A race occured and another thread introduced the symbol, this one 35.8 + // A race occurred and another thread introduced the symbol, this one 35.9 // will be dropped and collected. 35.10 return test; 35.11 } 35.12 @@ -193,7 +193,7 @@ 35.13 int index = hash_to_index(hashValues[i]); 35.14 symbolOop test = lookup(index, names[i], lengths[i], hashValues[i]); 35.15 if (test != NULL) { 35.16 - // A race occured and another thread introduced the symbol, this one 35.17 + // A race occurred and another thread introduced the symbol, this one 35.18 // will be dropped and collected. Use test instead. 35.19 cp->symbol_at_put(cp_indices[i], test); 35.20 } else {
36.1 --- a/src/share/vm/code/nmethod.cpp Fri Feb 27 08:34:19 2009 -0800 36.2 +++ b/src/share/vm/code/nmethod.cpp Fri Feb 27 13:27:09 2009 -0800 36.3 @@ -380,7 +380,7 @@ 36.4 void nmethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) { 36.5 // There are potential race conditions during exception cache updates, so we 36.6 // must own the ExceptionCache_lock before doing ANY modifications. Because 36.7 - // we dont lock during reads, it is possible to have several threads attempt 36.8 + // we don't lock during reads, it is possible to have several threads attempt 36.9 // to update the cache with the same data. We need to check for already inserted 36.10 // copies of the current data before adding it. 36.11
37.1 --- a/src/share/vm/code/nmethod.hpp Fri Feb 27 08:34:19 2009 -0800 37.2 +++ b/src/share/vm/code/nmethod.hpp Fri Feb 27 13:27:09 2009 -0800 37.3 @@ -167,7 +167,7 @@ 37.4 nmFlags flags; // various flags to keep track of nmethod state 37.5 bool _markedForDeoptimization; // Used for stack deoptimization 37.6 enum { alive = 0, 37.7 - not_entrant = 1, // uncommon trap has happend but activations may still exist 37.8 + not_entrant = 1, // uncommon trap has happened but activations may still exist 37.9 zombie = 2, 37.10 unloaded = 3 }; 37.11
38.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp Fri Feb 27 08:34:19 2009 -0800 38.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsAdaptiveSizePolicy.hpp Fri Feb 27 13:27:09 2009 -0800 38.3 @@ -393,7 +393,7 @@ 38.4 // Restarts the concurrent phases timer. 38.5 void concurrent_phases_resume(); 38.6 38.7 - // Time begining and end of the marking phase for 38.8 + // Time beginning and end of the marking phase for 38.9 // a synchronous MS collection. A MS collection 38.10 // that finishes in the foreground can have started 38.11 // in the background. These methods capture the
39.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp Fri Feb 27 08:34:19 2009 -0800 39.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsGCAdaptivePolicyCounters.hpp Fri Feb 27 13:27:09 2009 -0800 39.3 @@ -69,7 +69,7 @@ 39.4 // end of the sweep of the tenured generation. 39.5 PerfVariable* _avg_cms_free_counter; 39.6 // Average of the free space in the tenured generation at the 39.7 - // start of the sweep of the tenured genertion. 39.8 + // start of the sweep of the tenured generation. 39.9 PerfVariable* _avg_cms_free_at_sweep_counter; 39.10 // Average of the free space in the tenured generation at the 39.11 // after any resizing of the tenured generation at the end
40.1 --- a/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Feb 27 08:34:19 2009 -0800 40.2 +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp Fri Feb 27 13:27:09 2009 -0800 40.3 @@ -4178,7 +4178,7 @@ 40.4 // and is deferred for now; see CR# TBF. 07252005YSR. XXX 40.5 assert(!CMSAbortSemantics || tsk.aborted(), "Inconsistency"); 40.6 // If _restart_addr is non-NULL, a marking stack overflow 40.7 - // occured; we need to do a fresh marking iteration from the 40.8 + // occurred; we need to do a fresh marking iteration from the 40.9 // indicated restart address. 40.10 if (_foregroundGCIsActive && asynch) { 40.11 // We may be running into repeated stack overflows, having 40.12 @@ -4221,7 +4221,7 @@ 40.13 // should be incremental with periodic yields. 40.14 _markBitMap.iterate(&markFromRootsClosure); 40.15 // If _restart_addr is non-NULL, a marking stack overflow 40.16 - // occured; we need to do a fresh iteration from the 40.17 + // occurred; we need to do a fresh iteration from the 40.18 // indicated restart address. 40.19 while (_restart_addr != NULL) { 40.20 if (_foregroundGCIsActive && asynch) {
41.1 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Feb 27 08:34:19 2009 -0800 41.2 +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp Fri Feb 27 13:27:09 2009 -0800 41.3 @@ -2513,7 +2513,7 @@ 41.4 } 41.5 save_marks(); 41.6 41.7 - // We must do this before any possible evacuation that should propogate 41.8 + // We must do this before any possible evacuation that should propagate 41.9 // marks, including evacuation of popular objects in a popular pause. 41.10 if (mark_in_progress()) { 41.11 double start_time_sec = os::elapsedTime();
42.1 --- a/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Fri Feb 27 08:34:19 2009 -0800 42.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/cardTableExtension.cpp Fri Feb 27 13:27:09 2009 -0800 42.3 @@ -78,7 +78,7 @@ 42.4 } 42.5 42.6 // Card marks are not precise. The current system can leave us with 42.7 - // a mismash of precise marks and begining of object marks. This means 42.8 + // a mismash of precise marks and beginning of object marks. This means 42.9 // we test for missing precise marks first. If any are found, we don't 42.10 // fail unless the object head is also unmarked. 42.11 virtual void do_object(oop obj) { 42.12 @@ -258,7 +258,7 @@ 42.13 if (!start_array->object_starts_in_range(slice_start, slice_end)) { 42.14 continue; 42.15 } 42.16 - // Update our begining addr 42.17 + // Update our beginning addr 42.18 HeapWord* first_object = start_array->object_start(slice_start); 42.19 debug_only(oop* first_object_within_slice = (oop*) first_object;) 42.20 if (first_object < slice_start) {
43.1 --- a/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp Fri Feb 27 08:34:19 2009 -0800 43.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/objectStartArray.hpp Fri Feb 27 13:27:09 2009 -0800 43.3 @@ -127,7 +127,7 @@ 43.4 // Optimized for finding the first object that crosses into 43.5 // a given block. The blocks contain the offset of the last 43.6 // object in that block. Scroll backwards by one, and the first 43.7 - // object hit should be at the begining of the block 43.8 + // object hit should be at the beginning of the block 43.9 HeapWord* object_start(HeapWord* addr) const { 43.10 assert(_covered_region.contains(addr), "Must be in covered region"); 43.11 jbyte* block = block_for_addr(addr);
44.1 --- a/src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp Fri Feb 27 08:34:19 2009 -0800 44.2 +++ b/src/share/vm/gc_implementation/parallelScavenge/prefetchQueue.hpp Fri Feb 27 13:27:09 2009 -0800 44.3 @@ -26,7 +26,7 @@ 44.4 // PrefetchQueue is a FIFO queue of variable length (currently 8). 44.5 // 44.6 // We need to examine the performance penalty of variable lengths. 44.7 -// We may also want to split this into cpu dependant bits. 44.8 +// We may also want to split this into cpu dependent bits. 44.9 // 44.10 44.11 const int PREFETCH_QUEUE_SIZE = 8;
45.1 --- a/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Fri Feb 27 08:34:19 2009 -0800 45.2 +++ b/src/share/vm/gc_implementation/shared/mutableNUMASpace.cpp Fri Feb 27 13:27:09 2009 -0800 45.3 @@ -74,7 +74,7 @@ 45.4 for (int i = 0; i < lgrp_spaces()->length(); i++) { 45.5 LGRPSpace *ls = lgrp_spaces()->at(i); 45.6 MutableSpace *s = ls->space(); 45.7 - if (s->top() < top()) { // For all spaces preceeding the one containing top() 45.8 + if (s->top() < top()) { // For all spaces preceding the one containing top() 45.9 if (s->free_in_words() > 0) { 45.10 size_t area_touched_words = pointer_delta(s->end(), s->top()); 45.11 CollectedHeap::fill_with_object(s->top(), area_touched_words);
46.1 --- a/src/share/vm/interpreter/abstractInterpreter.hpp Fri Feb 27 08:34:19 2009 -0800 46.2 +++ b/src/share/vm/interpreter/abstractInterpreter.hpp Fri Feb 27 13:27:09 2009 -0800 46.3 @@ -22,7 +22,7 @@ 46.4 * 46.5 */ 46.6 46.7 -// This file contains the platform-independant parts 46.8 +// This file contains the platform-independent parts 46.9 // of the abstract interpreter and the abstract interpreter generator. 46.10 46.11 // Organization of the interpreter(s). There exists two different interpreters in hotpot
47.1 --- a/src/share/vm/interpreter/bytecodeInterpreter.cpp Fri Feb 27 08:34:19 2009 -0800 47.2 +++ b/src/share/vm/interpreter/bytecodeInterpreter.cpp Fri Feb 27 13:27:09 2009 -0800 47.3 @@ -2642,7 +2642,7 @@ 47.4 // two interpreted frames). We need to save the current arguments in C heap so that 47.5 // the deoptimized frame when it restarts can copy the arguments to its expression 47.6 // stack and re-execute the call. We also have to notify deoptimization that this 47.7 - // has occured and to pick the preerved args copy them to the deoptimized frame's 47.8 + // has occurred and to pick the preserved args copy them to the deoptimized frame's 47.9 // java expression stack. Yuck. 47.10 // 47.11 THREAD->popframe_preserve_args(in_ByteSize(METHOD->size_of_parameters() * wordSize),
48.1 --- a/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp Fri Feb 27 08:34:19 2009 -0800 48.2 +++ b/src/share/vm/interpreter/bytecodeInterpreter.inline.hpp Fri Feb 27 13:27:09 2009 -0800 48.3 @@ -22,7 +22,7 @@ 48.4 * 48.5 */ 48.6 48.7 -// This file holds platform-independant bodies of inline functions for the C++ based interpreter 48.8 +// This file holds platform-independent bodies of inline functions for the C++ based interpreter 48.9 48.10 #ifdef CC_INTERP 48.11
49.1 --- a/src/share/vm/interpreter/cppInterpreter.hpp Fri Feb 27 08:34:19 2009 -0800 49.2 +++ b/src/share/vm/interpreter/cppInterpreter.hpp Fri Feb 27 13:27:09 2009 -0800 49.3 @@ -24,7 +24,7 @@ 49.4 49.5 #ifdef CC_INTERP 49.6 49.7 -// This file contains the platform-independant parts 49.8 +// This file contains the platform-independent parts 49.9 // of the c++ interpreter 49.10 49.11 class CppInterpreter: public AbstractInterpreter {
50.1 --- a/src/share/vm/interpreter/cppInterpreterGenerator.hpp Fri Feb 27 08:34:19 2009 -0800 50.2 +++ b/src/share/vm/interpreter/cppInterpreterGenerator.hpp Fri Feb 27 13:27:09 2009 -0800 50.3 @@ -22,7 +22,7 @@ 50.4 * 50.5 */ 50.6 50.7 -// This file contains the platform-independant parts 50.8 +// This file contains the platform-independent parts 50.9 // of the template interpreter generator. 50.10 50.11 #ifdef CC_INTERP
51.1 --- a/src/share/vm/interpreter/interpreter.hpp Fri Feb 27 08:34:19 2009 -0800 51.2 +++ b/src/share/vm/interpreter/interpreter.hpp Fri Feb 27 13:27:09 2009 -0800 51.3 @@ -22,7 +22,7 @@ 51.4 * 51.5 */ 51.6 51.7 -// This file contains the platform-independant parts 51.8 +// This file contains the platform-independent parts 51.9 // of the interpreter and the interpreter generator. 51.10 51.11 //------------------------------------------------------------------------------------------------------------------------
52.1 --- a/src/share/vm/interpreter/interpreterGenerator.hpp Fri Feb 27 08:34:19 2009 -0800 52.2 +++ b/src/share/vm/interpreter/interpreterGenerator.hpp Fri Feb 27 13:27:09 2009 -0800 52.3 @@ -22,7 +22,7 @@ 52.4 * 52.5 */ 52.6 52.7 -// This file contains the platform-independant parts 52.8 +// This file contains the platform-independent parts 52.9 // of the interpreter generator. 52.10 52.11
53.1 --- a/src/share/vm/interpreter/templateInterpreter.hpp Fri Feb 27 08:34:19 2009 -0800 53.2 +++ b/src/share/vm/interpreter/templateInterpreter.hpp Fri Feb 27 13:27:09 2009 -0800 53.3 @@ -22,7 +22,7 @@ 53.4 * 53.5 */ 53.6 53.7 -// This file contains the platform-independant parts 53.8 +// This file contains the platform-independent parts 53.9 // of the template interpreter and the template interpreter generator. 53.10 53.11 #ifndef CC_INTERP
54.1 --- a/src/share/vm/interpreter/templateInterpreterGenerator.hpp Fri Feb 27 08:34:19 2009 -0800 54.2 +++ b/src/share/vm/interpreter/templateInterpreterGenerator.hpp Fri Feb 27 13:27:09 2009 -0800 54.3 @@ -22,7 +22,7 @@ 54.4 * 54.5 */ 54.6 54.7 -// This file contains the platform-independant parts 54.8 +// This file contains the platform-independent parts 54.9 // of the template interpreter generator. 54.10 54.11 #ifndef CC_INTERP
55.1 --- a/src/share/vm/libadt/dict.cpp Fri Feb 27 08:34:19 2009 -0800 55.2 +++ b/src/share/vm/libadt/dict.cpp Fri Feb 27 13:27:09 2009 -0800 55.3 @@ -306,7 +306,7 @@ 55.4 // Convert string to hash key. This algorithm implements a universal hash 55.5 // function with the multipliers frozen (ok, so it's not universal). The 55.6 // multipliers (and allowable characters) are all odd, so the resultant sum 55.7 -// is odd - guarenteed not divisible by any power of two, so the hash tables 55.8 +// is odd - guaranteed not divisible by any power of two, so the hash tables 55.9 // can be any power of two with good results. Also, I choose multipliers 55.10 // that have only 2 bits set (the low is always set to be odd) so 55.11 // multiplication requires only shifts and adds. Characters are required to 55.12 @@ -326,7 +326,7 @@ 55.13 } 55.14 55.15 //------------------------------hashptr-------------------------------------- 55.16 -// Slimey cheap hash function; no guarenteed performance. Better than the 55.17 +// Slimey cheap hash function; no guaranteed performance. Better than the 55.18 // default for pointers, especially on MS-DOS machines. 55.19 int hashptr(const void *key) { 55.20 #ifdef __TURBOC__ 55.21 @@ -336,7 +336,7 @@ 55.22 #endif 55.23 } 55.24 55.25 -// Slimey cheap hash function; no guarenteed performance. 55.26 +// Slimey cheap hash function; no guaranteed performance. 55.27 int hashkey(const void *key) { 55.28 return (intptr_t)key; 55.29 }
56.1 --- a/src/share/vm/libadt/dict.hpp Fri Feb 27 08:34:19 2009 -0800 56.2 +++ b/src/share/vm/libadt/dict.hpp Fri Feb 27 13:27:09 2009 -0800 56.3 @@ -86,10 +86,10 @@ 56.4 56.5 // Hashing functions 56.6 int hashstr(const void *s); // Nice string hash 56.7 -// Slimey cheap hash function; no guarenteed performance. Better than the 56.8 +// Slimey cheap hash function; no guaranteed performance. Better than the 56.9 // default for pointers, especially on MS-DOS machines. 56.10 int hashptr(const void *key); 56.11 -// Slimey cheap hash function; no guarenteed performance. 56.12 +// Slimey cheap hash function; no guaranteed performance. 56.13 int hashkey(const void *key); 56.14 56.15 // Key comparators
57.1 --- a/src/share/vm/memory/filemap.cpp Fri Feb 27 08:34:19 2009 -0800 57.2 +++ b/src/share/vm/memory/filemap.cpp Fri Feb 27 13:27:09 2009 -0800 57.3 @@ -35,14 +35,14 @@ 57.4 extern address JVM_FunctionAtStart(); 57.5 extern address JVM_FunctionAtEnd(); 57.6 57.7 -// Complain and stop. All error conditions occuring during the writing of 57.8 +// Complain and stop. All error conditions occurring during the writing of 57.9 // an archive file should stop the process. Unrecoverable errors during 57.10 // the reading of the archive file should stop the process. 57.11 57.12 static void fail(const char *msg, va_list ap) { 57.13 // This occurs very early during initialization: tty is not initialized. 57.14 jio_fprintf(defaultStream::error_stream(), 57.15 - "An error has occured while processing the" 57.16 + "An error has occurred while processing the" 57.17 " shared archive file.\n"); 57.18 jio_vfprintf(defaultStream::error_stream(), msg, ap); 57.19 jio_fprintf(defaultStream::error_stream(), "\n");
58.1 --- a/src/share/vm/memory/permGen.hpp Fri Feb 27 08:34:19 2009 -0800 58.2 +++ b/src/share/vm/memory/permGen.hpp Fri Feb 27 13:27:09 2009 -0800 58.3 @@ -36,7 +36,7 @@ 58.4 friend class VMStructs; 58.5 protected: 58.6 size_t _capacity_expansion_limit; // maximum expansion allowed without a 58.7 - // full gc occuring 58.8 + // full gc occurring 58.9 58.10 HeapWord* mem_allocate_in_gen(size_t size, Generation* gen); 58.11
59.1 --- a/src/share/vm/oops/generateOopMap.cpp Fri Feb 27 08:34:19 2009 -0800 59.2 +++ b/src/share/vm/oops/generateOopMap.cpp Fri Feb 27 13:27:09 2009 -0800 59.3 @@ -2003,7 +2003,7 @@ 59.4 // ============ Main Entry Point =========== 59.5 // 59.6 GenerateOopMap::GenerateOopMap(methodHandle method) { 59.7 - // We have to initialize all variables here, that can be queried direcly 59.8 + // We have to initialize all variables here, that can be queried directly 59.9 _method = method; 59.10 _max_locals=0; 59.11 _init_vars = NULL;
60.1 --- a/src/share/vm/oops/generateOopMap.hpp Fri Feb 27 08:34:19 2009 -0800 60.2 +++ b/src/share/vm/oops/generateOopMap.hpp Fri Feb 27 13:27:09 2009 -0800 60.3 @@ -292,7 +292,7 @@ 60.4 int _max_stack; // Cached value of max. stack depth 60.5 int _max_monitors; // Cached value of max. monitor stack depth 60.6 int _has_exceptions; // True, if exceptions exist for method 60.7 - bool _got_error; // True, if an error occured during interpretation. 60.8 + bool _got_error; // True, if an error occurred during interpretation. 60.9 Handle _exception; // Exception if got_error is true. 60.10 bool _did_rewriting; // was bytecodes rewritten 60.11 bool _did_relocation; // was relocation neccessary 60.12 @@ -422,7 +422,7 @@ 60.13 void add_to_ref_init_set (int localNo); 60.14 60.15 // Conflicts rewrite logic 60.16 - bool _conflict; // True, if a conflict occured during interpretation 60.17 + bool _conflict; // True, if a conflict occurred during interpretation 60.18 int _nof_refval_conflicts; // No. of conflicts that require rewrites 60.19 int * _new_var_map; 60.20
61.1 --- a/src/share/vm/oops/instanceKlass.cpp Fri Feb 27 08:34:19 2009 -0800 61.2 +++ b/src/share/vm/oops/instanceKlass.cpp Fri Feb 27 13:27:09 2009 -0800 61.3 @@ -1917,7 +1917,7 @@ 61.4 / itableOffsetEntry::size(); 61.5 61.6 for (int cnt = 0 ; ; cnt ++, ioe ++) { 61.7 - // If the interface isn't implemented by the reciever class, 61.8 + // If the interface isn't implemented by the receiver class, 61.9 // the VM should throw IncompatibleClassChangeError. 61.10 if (cnt >= nof_interfaces) { 61.11 THROW_OOP_0(vmSymbols::java_lang_IncompatibleClassChangeError());
62.1 --- a/src/share/vm/oops/klass.cpp Fri Feb 27 08:34:19 2009 -0800 62.2 +++ b/src/share/vm/oops/klass.cpp Fri Feb 27 13:27:09 2009 -0800 62.3 @@ -71,7 +71,7 @@ 62.4 return r; // Return the 1 concrete class 62.5 } 62.6 62.7 -// Find LCA in class heirarchy 62.8 +// Find LCA in class hierarchy 62.9 Klass *Klass::LCA( Klass *k2 ) { 62.10 Klass *k1 = this; 62.11 while( 1 ) {
63.1 --- a/src/share/vm/oops/klass.hpp Fri Feb 27 08:34:19 2009 -0800 63.2 +++ b/src/share/vm/oops/klass.hpp Fri Feb 27 13:27:09 2009 -0800 63.3 @@ -471,7 +471,7 @@ 63.4 } 63.5 bool search_secondary_supers(klassOop k) const; 63.6 63.7 - // Find LCA in class heirarchy 63.8 + // Find LCA in class hierarchy 63.9 Klass *LCA( Klass *k ); 63.10 63.11 // Check whether reflection/jni/jvm code is allowed to instantiate this class;
64.1 --- a/src/share/vm/oops/methodOop.hpp Fri Feb 27 08:34:19 2009 -0800 64.2 +++ b/src/share/vm/oops/methodOop.hpp Fri Feb 27 13:27:09 2009 -0800 64.3 @@ -296,7 +296,7 @@ 64.4 void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; } 64.5 #endif // not PRODUCT 64.6 64.7 - // Clear (non-shared space) pointers which could not be relevent 64.8 + // Clear (non-shared space) pointers which could not be relevant 64.9 // if this (shared) method were mapped into another JVM. 64.10 void remove_unshareable_info(); 64.11
65.1 --- a/src/share/vm/opto/block.cpp Fri Feb 27 08:34:19 2009 -0800 65.2 +++ b/src/share/vm/opto/block.cpp Fri Feb 27 13:27:09 2009 -0800 65.3 @@ -181,7 +181,7 @@ 65.4 } 65.5 65.6 //------------------------------has_uncommon_code------------------------------ 65.7 -// Return true if the block's code implies that it is not likely to be 65.8 +// Return true if the block's code implies that it is likely to be 65.9 // executed infrequently. Check to see if the block ends in a Halt or 65.10 // a low probability call. 65.11 bool Block::has_uncommon_code() const { 65.12 @@ -1311,7 +1311,7 @@ 65.13 } 65.14 } else if (e->state() == CFGEdge::open) { 65.15 // Append traces, even without a fall-thru connection. 65.16 - // But leave root entry at the begining of the block list. 65.17 + // But leave root entry at the beginning of the block list. 65.18 if (targ_trace != trace(_cfg._broot)) { 65.19 e->set_state(CFGEdge::connected); 65.20 src_trace->append(targ_trace); 65.21 @@ -1434,7 +1434,7 @@ 65.22 } 65.23 65.24 // Backbranch to the top of a trace 65.25 - // Scroll foward through the trace from the targ_block. If we find 65.26 + // Scroll forward through the trace from the targ_block. If we find 65.27 // a loop head before another loop top, use the the loop head alignment. 65.28 for (Block *b = targ_block; b != NULL; b = next(b)) { 65.29 if (b->has_loop_alignment()) {
66.1 --- a/src/share/vm/opto/block.hpp Fri Feb 27 08:34:19 2009 -0800 66.2 +++ b/src/share/vm/opto/block.hpp Fri Feb 27 13:27:09 2009 -0800 66.3 @@ -609,7 +609,7 @@ 66.4 Block * next(Block *b) const { return _next_list[b->_pre_order]; } 66.5 void set_next(Block *b, Block *n) const { _next_list[b->_pre_order] = n; } 66.6 66.7 - // Return the block that preceeds "b" in the trace. 66.8 + // Return the block that precedes "b" in the trace. 66.9 Block * prev(Block *b) const { return _prev_list[b->_pre_order]; } 66.10 void set_prev(Block *b, Block *p) const { _prev_list[b->_pre_order] = p; } 66.11
67.1 --- a/src/share/vm/opto/buildOopMap.cpp Fri Feb 27 08:34:19 2009 -0800 67.2 +++ b/src/share/vm/opto/buildOopMap.cpp Fri Feb 27 13:27:09 2009 -0800 67.3 @@ -55,7 +55,7 @@ 67.4 // breadth-first approach but it was worse (showed O(n^2) in the 67.5 // pick-next-block code). 67.6 // 67.7 -// The relevent data is kept in a struct of arrays (it could just as well be 67.8 +// The relevant data is kept in a struct of arrays (it could just as well be 67.9 // an array of structs, but the struct-of-arrays is generally a little more 67.10 // efficient). The arrays are indexed by register number (including 67.11 // stack-slots as registers) and so is bounded by 200 to 300 elements in
68.1 --- a/src/share/vm/opto/cfgnode.cpp Fri Feb 27 08:34:19 2009 -0800 68.2 +++ b/src/share/vm/opto/cfgnode.cpp Fri Feb 27 13:27:09 2009 -0800 68.3 @@ -1350,7 +1350,7 @@ 68.4 } 68.5 68.6 // Register the new node but do not transform it. Cannot transform until the 68.7 - // entire Region/Phi conglerate has been hacked as a single huge transform. 68.8 + // entire Region/Phi conglomerate has been hacked as a single huge transform. 68.9 igvn->register_new_node_with_optimizer( newn ); 68.10 // Now I can point to the new node. 68.11 n->add_req(newn); 68.12 @@ -1381,7 +1381,7 @@ 68.13 Node *val = phi->in(i); // Constant to split for 68.14 uint hit = 0; // Number of times it occurs 68.15 68.16 - for( ; i < phi->req(); i++ ){ // Count occurances of constant 68.17 + for( ; i < phi->req(); i++ ){ // Count occurrences of constant 68.18 Node *n = phi->in(i); 68.19 if( !n ) return NULL; 68.20 if( phase->type(n) == Type::TOP ) return NULL; 68.21 @@ -1423,7 +1423,7 @@ 68.22 68.23 //============================================================================= 68.24 //------------------------------simple_data_loop_check------------------------- 68.25 -// Try to determing if the phi node in a simple safe/unsafe data loop. 68.26 +// Try to determining if the phi node in a simple safe/unsafe data loop. 68.27 // Returns: 68.28 // enum LoopSafety { Safe = 0, Unsafe, UnsafeLoop }; 68.29 // Safe - safe case when the phi and it's inputs reference only safe data 68.30 @@ -1687,7 +1687,7 @@ 68.31 progress = phase->C->top(); 68.32 break; 68.33 } 68.34 - // If tranformed to a MergeMem, get the desired slice 68.35 + // If transformed to a MergeMem, get the desired slice 68.36 // Otherwise the returned node represents memory for every slice 68.37 Node *new_mem = (m->is_MergeMem()) ? 68.38 m->as_MergeMem()->memory_at(alias_idx) : m; 68.39 @@ -1962,7 +1962,7 @@ 68.40 f[CatchProjNode::fall_through_index] = Type::TOP; 68.41 } else if( call->req() > TypeFunc::Parms ) { 68.42 const Type *arg0 = phase->type( call->in(TypeFunc::Parms) ); 68.43 - // Check for null reciever to virtual or interface calls 68.44 + // Check for null receiver to virtual or interface calls 68.45 if( call->is_CallDynamicJava() && 68.46 arg0->higher_equal(TypePtr::NULL_PTR) ) { 68.47 f[CatchProjNode::fall_through_index] = Type::TOP; 68.48 @@ -1995,7 +1995,7 @@ 68.49 // also remove any exception table entry. Thus we must know the call 68.50 // feeding the Catch will not really throw an exception. This is ok for 68.51 // the main fall-thru control (happens when we know a call can never throw 68.52 - // an exception) or for "rethrow", because a further optimnization will 68.53 + // an exception) or for "rethrow", because a further optimization will 68.54 // yank the rethrow (happens when we inline a function that can throw an 68.55 // exception and the caller has no handler). Not legal, e.g., for passing 68.56 // a NULL receiver to a v-call, or passing bad types to a slow-check-cast.
69.1 --- a/src/share/vm/opto/chaitin.cpp Fri Feb 27 08:34:19 2009 -0800 69.2 +++ b/src/share/vm/opto/chaitin.cpp Fri Feb 27 13:27:09 2009 -0800 69.3 @@ -1246,7 +1246,7 @@ 69.4 69.5 // If the live range is not bound, then we actually had some choices 69.6 // to make. In this case, the mask has more bits in it than the colors 69.7 - // choosen. Restrict the mask to just what was picked. 69.8 + // chosen. Restrict the mask to just what was picked. 69.9 if( lrg->num_regs() == 1 ) { // Size 1 live range 69.10 lrg->Clear(); // Clear the mask 69.11 lrg->Insert(reg); // Set regmask to match selected reg
70.1 --- a/src/share/vm/opto/chaitin.hpp Fri Feb 27 08:34:19 2009 -0800 70.2 +++ b/src/share/vm/opto/chaitin.hpp Fri Feb 27 13:27:09 2009 -0800 70.3 @@ -327,7 +327,7 @@ 70.4 // True if lidx is used before any real register is def'd in the block 70.5 bool prompt_use( Block *b, uint lidx ); 70.6 Node *get_spillcopy_wide( Node *def, Node *use, uint uidx ); 70.7 - // Insert the spill at chosen location. Skip over any interveneing Proj's or 70.8 + // Insert the spill at chosen location. Skip over any intervening Proj's or 70.9 // Phis. Skip over a CatchNode and projs, inserting in the fall-through block 70.10 // instead. Update high-pressure indices. Create a new live range. 70.11 void insert_proj( Block *b, uint i, Node *spill, uint maxlrg ); 70.12 @@ -431,7 +431,7 @@ 70.13 void Simplify(); 70.14 70.15 // Select colors by re-inserting edges into the IFG. 70.16 - // Return TRUE if any spills occured. 70.17 + // Return TRUE if any spills occurred. 70.18 uint Select( ); 70.19 // Helper function for select which allows biased coloring 70.20 OptoReg::Name choose_color( LRG &lrg, int chunk );
71.1 --- a/src/share/vm/opto/coalesce.cpp Fri Feb 27 08:34:19 2009 -0800 71.2 +++ b/src/share/vm/opto/coalesce.cpp Fri Feb 27 13:27:09 2009 -0800 71.3 @@ -123,7 +123,7 @@ 71.4 } 71.5 71.6 //------------------------------clone_projs------------------------------------ 71.7 -// After cloning some rematierialized instruction, clone any MachProj's that 71.8 +// After cloning some rematerialized instruction, clone any MachProj's that 71.9 // follow it. Example: Intel zero is XOR, kills flags. Sparc FP constants 71.10 // use G3 as an address temp. 71.11 int PhaseChaitin::clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg ) { 71.12 @@ -694,8 +694,8 @@ 71.13 } // End of if not infinite-stack neighbor 71.14 } // End of if actually inserted 71.15 } // End of if live range overlaps 71.16 - } // End of else collect intereferences for 1 node 71.17 - } // End of while forever, scan back for intereferences 71.18 + } // End of else collect interferences for 1 node 71.19 + } // End of while forever, scan back for interferences 71.20 return reg_degree; 71.21 } 71.22 71.23 @@ -786,7 +786,7 @@ 71.24 if( rm_size == 0 ) return false; 71.25 71.26 // Another early bail-out test is when we are double-coalescing and the 71.27 - // 2 copies are seperated by some control flow. 71.28 + // 2 copies are separated by some control flow. 71.29 if( dst_copy != src_copy ) { 71.30 Block *src_b = _phc._cfg._bbs[src_copy->_idx]; 71.31 Block *b2 = b;
72.1 --- a/src/share/vm/opto/compile.cpp Fri Feb 27 08:34:19 2009 -0800 72.2 +++ b/src/share/vm/opto/compile.cpp Fri Feb 27 13:27:09 2009 -0800 72.3 @@ -337,7 +337,7 @@ 72.4 tty->print_cr("*********************************************************"); 72.5 } 72.6 if (env()->break_at_compile()) { 72.7 - // Open the debugger when compiing this method. 72.8 + // Open the debugger when compiling this method. 72.9 tty->print("### Breaking when compiling: "); 72.10 method()->print_short_name(); 72.11 tty->cr(); 72.12 @@ -1191,8 +1191,8 @@ 72.13 default: ShouldNotReachHere(); 72.14 } 72.15 break; 72.16 - case 2: // No collasping at level 2; keep all splits 72.17 - case 3: // No collasping at level 3; keep all splits 72.18 + case 2: // No collapsing at level 2; keep all splits 72.19 + case 3: // No collapsing at level 3; keep all splits 72.20 break; 72.21 default: 72.22 Unimplemented(); 72.23 @@ -2102,7 +2102,7 @@ 72.24 // [base_reg + offset] 72.25 // NullCheck base_reg 72.26 // 72.27 - // Pin the new DecodeN node to non-null path on these patforms (Sparc) 72.28 + // Pin the new DecodeN node to non-null path on these platform (Sparc) 72.29 // to keep the information to which NULL check the new DecodeN node 72.30 // corresponds to use it as value in implicit_null_check(). 72.31 //
73.1 --- a/src/share/vm/opto/connode.cpp Fri Feb 27 08:34:19 2009 -0800 73.2 +++ b/src/share/vm/opto/connode.cpp Fri Feb 27 13:27:09 2009 -0800 73.3 @@ -71,7 +71,7 @@ 73.4 to figure out which test post-dominates. The real problem is that it doesn't 73.5 matter which one you pick. After you pick up, the dominating-test elider in 73.6 IGVN can remove the test and allow you to hoist up to the dominating test on 73.7 -the choosen oop bypassing the test on the not-choosen oop. Seen in testing. 73.8 +the chosen oop bypassing the test on the not-chosen oop. Seen in testing. 73.9 Oops. 73.10 73.11 (3) Leave the CastPP's in. This makes the graph more accurate in some sense;
74.1 --- a/src/share/vm/opto/divnode.cpp Fri Feb 27 08:34:19 2009 -0800 74.2 +++ b/src/share/vm/opto/divnode.cpp Fri Feb 27 13:27:09 2009 -0800 74.3 @@ -35,7 +35,7 @@ 74.4 // by constant into a multiply/shift/add series. Return false if calculations 74.5 // fail. 74.6 // 74.7 -// Borrowed almost verbatum from Hacker's Delight by Henry S. Warren, Jr. with 74.8 +// Borrowed almost verbatim from Hacker's Delight by Henry S. Warren, Jr. with 74.9 // minor type name and parameter changes. 74.10 static bool magic_int_divide_constants(jint d, jint &M, jint &s) { 74.11 int32_t p; 74.12 @@ -202,7 +202,7 @@ 74.13 // by constant into a multiply/shift/add series. Return false if calculations 74.14 // fail. 74.15 // 74.16 -// Borrowed almost verbatum from Hacker's Delight by Henry S. Warren, Jr. with 74.17 +// Borrowed almost verbatim from Hacker's Delight by Henry S. Warren, Jr. with 74.18 // minor type name and parameter changes. Adjusted to 64 bit word width. 74.19 static bool magic_long_divide_constants(jlong d, jlong &M, jint &s) { 74.20 int64_t p; 74.21 @@ -1069,7 +1069,7 @@ 74.22 74.23 int log2_con = -1; 74.24 74.25 - // If this is a power of two, they maybe we can mask it 74.26 + // If this is a power of two, then maybe we can mask it 74.27 if( is_power_of_2_long(pos_con) ) { 74.28 log2_con = log2_long(pos_con); 74.29
75.1 --- a/src/share/vm/opto/domgraph.cpp Fri Feb 27 08:34:19 2009 -0800 75.2 +++ b/src/share/vm/opto/domgraph.cpp Fri Feb 27 13:27:09 2009 -0800 75.3 @@ -183,7 +183,7 @@ 75.4 if (pre_order == 1) 75.5 t->_parent = NULL; // first block doesn't have parent 75.6 else { 75.7 - // Save parent (currernt top block on stack) in DFS 75.8 + // Save parent (current top block on stack) in DFS 75.9 t->_parent = &_tarjan[_stack_top->block->_pre_order]; 75.10 } 75.11 // Now put this block on stack
76.1 --- a/src/share/vm/opto/escape.cpp Fri Feb 27 08:34:19 2009 -0800 76.2 +++ b/src/share/vm/opto/escape.cpp Fri Feb 27 13:27:09 2009 -0800 76.3 @@ -515,7 +515,7 @@ 76.4 // cause the failure in add_offset() with narrow oops since TypeOopPtr() 76.5 // constructor verifies correctness of the offset. 76.6 // 76.7 - // It could happend on subclass's branch (from the type profiling 76.8 + // It could happened on subclass's branch (from the type profiling 76.9 // inlining) which was not eliminated during parsing since the exactness 76.10 // of the allocation type was not propagated to the subclass type check. 76.11 // 76.12 @@ -703,7 +703,7 @@ 76.13 while (prev != result) { 76.14 prev = result; 76.15 if (result == start_mem) 76.16 - break; // hit one of our sentinals 76.17 + break; // hit one of our sentinels 76.18 if (result->is_Mem()) { 76.19 const Type *at = phase->type(result->in(MemNode::Address)); 76.20 if (at != Type::TOP) { 76.21 @@ -720,7 +720,7 @@ 76.22 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 76.23 Node *proj_in = result->in(0); 76.24 if (proj_in->is_Allocate() && proj_in->_idx == (uint)tinst->instance_id()) { 76.25 - break; // hit one of our sentinals 76.26 + break; // hit one of our sentinels 76.27 } else if (proj_in->is_Call()) { 76.28 CallNode *call = proj_in->as_Call(); 76.29 if (!call->may_modify(tinst, phase)) { 76.30 @@ -804,7 +804,7 @@ 76.31 // Phase 2: Process MemNode's from memnode_worklist. compute new address type and 76.32 // search the Memory chain for a store with the appropriate type 76.33 // address type. If a Phi is found, create a new version with 76.34 -// the approriate memory slices from each of the Phi inputs. 76.35 +// the appropriate memory slices from each of the Phi inputs. 76.36 // For stores, process the users as follows: 76.37 // MemNode: push on memnode_worklist 76.38 // MergeMem: push on mergemem_worklist 76.39 @@ -1558,7 +1558,7 @@ 76.40 has_non_escaping_obj = true; // Non GlobalEscape 76.41 Node* n = ptn->_node; 76.42 if (n->is_Allocate() && ptn->_scalar_replaceable ) { 76.43 - // Push scalar replaceable alocations on alloc_worklist 76.44 + // Push scalar replaceable allocations on alloc_worklist 76.45 // for processing in split_unique_types(). 76.46 alloc_worklist.append(n); 76.47 }
77.1 --- a/src/share/vm/opto/gcm.cpp Fri Feb 27 08:34:19 2009 -0800 77.2 +++ b/src/share/vm/opto/gcm.cpp Fri Feb 27 13:27:09 2009 -0800 77.3 @@ -606,7 +606,7 @@ 77.4 if (pred_block != early) { 77.5 // If any predecessor of the Phi matches the load's "early block", 77.6 // we do not need a precedence edge between the Phi and 'load' 77.7 - // since the load will be forced into a block preceeding the Phi. 77.8 + // since the load will be forced into a block preceding the Phi. 77.9 pred_block->set_raise_LCA_mark(load_index); 77.10 assert(!LCA_orig->dominates(pred_block) || 77.11 early->dominates(pred_block), "early is high enough"); 77.12 @@ -1399,7 +1399,7 @@ 77.13 #ifdef ASSERT 77.14 for (uint i = 0; i < _num_blocks; i++ ) { 77.15 Block *b = _blocks[i]; 77.16 - assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requiers meaningful block frequency"); 77.17 + assert(b->_freq >= MIN_BLOCK_FREQUENCY, "Register Allocator requires meaningful block frequency"); 77.18 } 77.19 #endif 77.20 77.21 @@ -1652,7 +1652,7 @@ 77.22 // successor blocks. 77.23 assert(_num_succs == 2, "expecting 2 successors of a null check"); 77.24 // If either successor has only one predecessor, then the 77.25 - // probabiltity estimate can be derived using the 77.26 + // probability estimate can be derived using the 77.27 // relative frequency of the successor and this block. 77.28 if (_succs[i]->num_preds() == 2) { 77.29 return _succs[i]->_freq / _freq; 77.30 @@ -1854,7 +1854,7 @@ 77.31 } 77.32 77.33 //------------------------------update_succ_freq------------------------------- 77.34 -// Update the appropriate frequency associated with block 'b', a succesor of 77.35 +// Update the appropriate frequency associated with block 'b', a successor of 77.36 // a block in this loop. 77.37 void CFGLoop::update_succ_freq(Block* b, float freq) { 77.38 if (b->_loop == this) {
78.1 --- a/src/share/vm/opto/graphKit.cpp Fri Feb 27 08:34:19 2009 -0800 78.2 +++ b/src/share/vm/opto/graphKit.cpp Fri Feb 27 13:27:09 2009 -0800 78.3 @@ -1148,7 +1148,7 @@ 78.4 Node *tst = _gvn.transform( btst ); 78.5 78.6 //----------- 78.7 - // if peephole optimizations occured, a prior test existed. 78.8 + // if peephole optimizations occurred, a prior test existed. 78.9 // If a prior test existed, maybe it dominates as we can avoid this test. 78.10 if (tst != btst && type == T_OBJECT) { 78.11 // At this point we want to scan up the CFG to see if we can 78.12 @@ -1196,7 +1196,7 @@ 78.13 // Consider using 'Reason_class_check' instead? 78.14 78.15 // To cause an implicit null check, we set the not-null probability 78.16 - // to the maximum (PROB_MAX). For an explicit check the probablity 78.17 + // to the maximum (PROB_MAX). For an explicit check the probability 78.18 // is set to a smaller value. 78.19 if (null_control != NULL || too_many_traps(reason)) { 78.20 // probability is less likely
79.1 --- a/src/share/vm/opto/ifg.cpp Fri Feb 27 08:34:19 2009 -0800 79.2 +++ b/src/share/vm/opto/ifg.cpp Fri Feb 27 13:27:09 2009 -0800 79.3 @@ -292,7 +292,7 @@ 79.4 //------------------------------interfere_with_live---------------------------- 79.5 // Interfere this register with everything currently live. Use the RegMasks 79.6 // to trim the set of possible interferences. Return a count of register-only 79.7 -// inteferences as an estimate of register pressure. 79.8 +// interferences as an estimate of register pressure. 79.9 void PhaseChaitin::interfere_with_live( uint r, IndexSet *liveout ) { 79.10 uint retval = 0; 79.11 // Interfere with everything live.
80.1 --- a/src/share/vm/opto/ifnode.cpp Fri Feb 27 08:34:19 2009 -0800 80.2 +++ b/src/share/vm/opto/ifnode.cpp Fri Feb 27 13:27:09 2009 -0800 80.3 @@ -81,7 +81,7 @@ 80.4 uint i4; 80.5 for( i4 = 1; i4 < phi->req(); i4++ ) { 80.6 con1 = phi->in(i4); 80.7 - if( !con1 ) return NULL; // Do not optimize partially collaped merges 80.8 + if( !con1 ) return NULL; // Do not optimize partially collapsed merges 80.9 if( con1->is_Con() ) break; // Found a constant 80.10 // Also allow null-vs-not-null checks 80.11 const TypePtr *tp = igvn->type(con1)->isa_ptr(); 80.12 @@ -204,7 +204,7 @@ 80.13 // T F T F T F 80.14 // ..s.. ..t .. ..s.. ..t.. ..s.. ..t.. 80.15 // 80.16 - // Split the paths coming into the merge point into 2 seperate groups of 80.17 + // Split the paths coming into the merge point into 2 separate groups of 80.18 // merges. On the left will be all the paths feeding constants into the 80.19 // Cmp's Phi. On the right will be the remaining paths. The Cmp's Phi 80.20 // will fold up into a constant; this will let the Cmp fold up as well as 80.21 @@ -236,7 +236,7 @@ 80.22 } 80.23 80.24 // Register the new RegionNodes but do not transform them. Cannot 80.25 - // transform until the entire Region/Phi conglerate has been hacked 80.26 + // transform until the entire Region/Phi conglomerate has been hacked 80.27 // as a single huge transform. 80.28 igvn->register_new_node_with_optimizer( region_c ); 80.29 igvn->register_new_node_with_optimizer( region_x ); 80.30 @@ -599,7 +599,7 @@ 80.31 80.32 //------------------------------fold_compares---------------------------- 80.33 // See if a pair of CmpIs can be converted into a CmpU. In some cases 80.34 -// the direction of this if is determined by the preciding if so it 80.35 +// the direction of this if is determined by the preceding if so it 80.36 // can be eliminate entirely. Given an if testing (CmpI n c) check 80.37 // for an immediately control dependent if that is testing (CmpI n c2) 80.38 // and has one projection leading to this if and the other projection 80.39 @@ -811,7 +811,7 @@ 80.40 // Try to remove extra range checks. All 'up_one_dom' gives up at merges 80.41 // so all checks we inspect post-dominate the top-most check we find. 80.42 // If we are going to fail the current check and we reach the top check 80.43 - // then we are guarenteed to fail, so just start interpreting there. 80.44 + // then we are guaranteed to fail, so just start interpreting there. 80.45 // We 'expand' the top 2 range checks to include all post-dominating 80.46 // checks. 80.47
81.1 --- a/src/share/vm/opto/library_call.cpp Fri Feb 27 08:34:19 2009 -0800 81.2 +++ b/src/share/vm/opto/library_call.cpp Fri Feb 27 13:27:09 2009 -0800 81.3 @@ -992,7 +992,7 @@ 81.4 Node *argument = pop(); // pop non-receiver first: it was pushed second 81.5 Node *receiver = pop(); 81.6 81.7 - // don't intrinsify is argument isn't a constant string. 81.8 + // don't intrinsify if argument isn't a constant string. 81.9 if (!argument->is_Con()) { 81.10 return false; 81.11 } 81.12 @@ -1267,7 +1267,7 @@ 81.13 // result = DPow(x,y); 81.14 // } 81.15 // if (result != result)? { 81.16 - // ucommon_trap(); 81.17 + // uncommon_trap(); 81.18 // } 81.19 // return result; 81.20 81.21 @@ -1324,7 +1324,7 @@ 81.22 // Check if (y isn't int) then go to slow path 81.23 81.24 Node *bol2 = _gvn.transform( new (C, 2) BoolNode( cmpinty, BoolTest::ne ) ); 81.25 - // Branch eith way 81.26 + // Branch either way 81.27 IfNode *if2 = create_and_xform_if(complex_path,bol2, PROB_STATIC_INFREQUENT, COUNT_UNKNOWN); 81.28 Node *slow_path = opt_iff(r,if2); // Set region path 2 81.29 81.30 @@ -1715,8 +1715,8 @@ 81.31 } 81.32 81.33 //----------------------------inline_reverseBytes_int/long------------------- 81.34 -// inline Int.reverseBytes(int) 81.35 -// inline Long.reverseByes(long) 81.36 +// inline Integer.reverseBytes(int) 81.37 +// inline Long.reverseBytes(long) 81.38 bool LibraryCallKit::inline_reverseBytes(vmIntrinsics::ID id) { 81.39 assert(id == vmIntrinsics::_reverseBytes_i || id == vmIntrinsics::_reverseBytes_l, "not reverse Bytes"); 81.40 if (id == vmIntrinsics::_reverseBytes_i && !Matcher::has_match_rule(Op_ReverseBytesI)) return false; 81.41 @@ -1915,7 +1915,7 @@ 81.42 // addition to memory membars when is_volatile. This is a little 81.43 // too strong, but avoids the need to insert per-alias-type 81.44 // volatile membars (for stores; compare Parse::do_put_xxx), which 81.45 - // we cannot do effctively here because we probably only have a 81.46 + // we cannot do effectively here because we probably only have a 81.47 // rough approximation of type. 81.48 need_mem_bar = true; 81.49 // For Stores, place a memory ordering barrier now. 81.50 @@ -2099,7 +2099,7 @@ 81.51 // overly confusing. (This is a true fact! I originally combined 81.52 // them, but even I was confused by it!) As much code/comments as 81.53 // possible are retained from inline_unsafe_access though to make 81.54 - // the correspondances clearer. - dl 81.55 + // the correspondences clearer. - dl 81.56 81.57 if (callee()->is_static()) return false; // caller must have the capability! 81.58 81.59 @@ -2166,7 +2166,7 @@ 81.60 int alias_idx = C->get_alias_index(adr_type); 81.61 81.62 // Memory-model-wise, a CAS acts like a little synchronized block, 81.63 - // so needs barriers on each side. These don't't translate into 81.64 + // so needs barriers on each side. These don't translate into 81.65 // actual barriers on most machines, but we still need rest of 81.66 // compiler to respect ordering. 81.67 81.68 @@ -3208,7 +3208,7 @@ 81.69 Node *hash_shift = _gvn.intcon(markOopDesc::hash_shift); 81.70 Node *hshifted_header= _gvn.transform( new (C, 3) URShiftXNode(header, hash_shift) ); 81.71 // This hack lets the hash bits live anywhere in the mark object now, as long 81.72 - // as the shift drops the relevent bits into the low 32 bits. Note that 81.73 + // as the shift drops the relevant bits into the low 32 bits. Note that 81.74 // Java spec says that HashCode is an int so there's no point in capturing 81.75 // an 'X'-sized hashcode (32 in 32-bit build or 64 in 64-bit build). 81.76 hshifted_header = ConvX2I(hshifted_header); 81.77 @@ -3255,7 +3255,7 @@ 81.78 } 81.79 81.80 //---------------------------inline_native_getClass---------------------------- 81.81 -// Build special case code for calls to hashCode on an object. 81.82 +// Build special case code for calls to getClass on an object. 81.83 bool LibraryCallKit::inline_native_getClass() { 81.84 Node* obj = null_check_receiver(callee()); 81.85 if (stopped()) return true; 81.86 @@ -4594,7 +4594,7 @@ 81.87 } 81.88 81.89 // The memory edges above are precise in order to model effects around 81.90 - // array copyies accurately to allow value numbering of field loads around 81.91 + // array copies accurately to allow value numbering of field loads around 81.92 // arraycopy. Such field loads, both before and after, are common in Java 81.93 // collections and similar classes involving header/array data structures. 81.94 //
82.1 --- a/src/share/vm/opto/live.cpp Fri Feb 27 08:34:19 2009 -0800 82.2 +++ b/src/share/vm/opto/live.cpp Fri Feb 27 13:27:09 2009 -0800 82.3 @@ -39,7 +39,7 @@ 82.4 // Leftover bits become the new live-in for the predecessor block, and the pred 82.5 // block is put on the worklist. 82.6 // The locally live-in stuff is computed once and added to predecessor 82.7 -// live-out sets. This seperate compilation is done in the outer loop below. 82.8 +// live-out sets. This separate compilation is done in the outer loop below. 82.9 PhaseLive::PhaseLive( const PhaseCFG &cfg, LRG_List &names, Arena *arena ) : Phase(LIVE), _cfg(cfg), _names(names), _arena(arena), _live(0) { 82.10 } 82.11
83.1 --- a/src/share/vm/opto/locknode.cpp Fri Feb 27 08:34:19 2009 -0800 83.2 +++ b/src/share/vm/opto/locknode.cpp Fri Feb 27 13:27:09 2009 -0800 83.3 @@ -121,7 +121,7 @@ 83.4 kill_dead_locals(); 83.5 83.6 pop(); // Pop oop to unlock 83.7 - // Because monitors are guarenteed paired (else we bail out), we know 83.8 + // Because monitors are guaranteed paired (else we bail out), we know 83.9 // the matching Lock for this Unlock. Hence we know there is no need 83.10 // for a null check on Unlock. 83.11 shared_unlock(map()->peek_monitor_box(), map()->peek_monitor_obj());
84.1 --- a/src/share/vm/opto/loopTransform.cpp Fri Feb 27 08:34:19 2009 -0800 84.2 +++ b/src/share/vm/opto/loopTransform.cpp Fri Feb 27 13:27:09 2009 -0800 84.3 @@ -119,7 +119,7 @@ 84.4 84.5 //---------------------is_invariant_addition----------------------------- 84.6 // Return nonzero index of invariant operand for an Add or Sub 84.7 -// of (nonconstant) invariant and variant values. Helper for reassoicate_invariants. 84.8 +// of (nonconstant) invariant and variant values. Helper for reassociate_invariants. 84.9 int IdealLoopTree::is_invariant_addition(Node* n, PhaseIdealLoop *phase) { 84.10 int op = n->Opcode(); 84.11 if (op == Op_AddI || op == Op_SubI) { 84.12 @@ -520,7 +520,7 @@ 84.13 //------------------------------policy_align----------------------------------- 84.14 // Return TRUE or FALSE if the loop should be cache-line aligned. Gather the 84.15 // expression that does the alignment. Note that only one array base can be 84.16 -// aligned in a loop (unless the VM guarentees mutual alignment). Note that 84.17 +// aligned in a loop (unless the VM guarantees mutual alignment). Note that 84.18 // if we vectorize short memory ops into longer memory ops, we may want to 84.19 // increase alignment. 84.20 bool IdealLoopTree::policy_align( PhaseIdealLoop *phase ) const {
85.1 --- a/src/share/vm/opto/loopUnswitch.cpp Fri Feb 27 08:34:19 2009 -0800 85.2 +++ b/src/share/vm/opto/loopUnswitch.cpp Fri Feb 27 13:27:09 2009 -0800 85.3 @@ -131,7 +131,7 @@ 85.4 85.5 ProjNode* proj_false = invar_iff->proj_out(0)->as_Proj(); 85.6 85.7 - // Hoist invariant casts out of each loop to the appropiate 85.8 + // Hoist invariant casts out of each loop to the appropriate 85.9 // control projection. 85.10 85.11 Node_List worklist;
86.1 --- a/src/share/vm/opto/loopnode.cpp Fri Feb 27 08:34:19 2009 -0800 86.2 +++ b/src/share/vm/opto/loopnode.cpp Fri Feb 27 13:27:09 2009 -0800 86.3 @@ -274,7 +274,7 @@ 86.4 // 86.5 // Canonicalize the condition on the test. If we can exactly determine 86.6 // the trip-counter exit value, then set limit to that value and use 86.7 - // a '!=' test. Otherwise use conditon '<' for count-up loops and 86.8 + // a '!=' test. Otherwise use condition '<' for count-up loops and 86.9 // '>' for count-down loops. If the condition is inverted and we will 86.10 // be rolling through MININT to MAXINT, then bail out. 86.11 86.12 @@ -290,7 +290,7 @@ 86.13 86.14 // If compare points to incr, we are ok. Otherwise the compare 86.15 // can directly point to the phi; in this case adjust the compare so that 86.16 - // it points to the incr by adusting the limit. 86.17 + // it points to the incr by adjusting the limit. 86.18 if( cmp->in(1) == phi || cmp->in(2) == phi ) 86.19 limit = gvn->transform(new (C, 3) AddINode(limit,stride)); 86.20 86.21 @@ -471,7 +471,7 @@ 86.22 lazy_replace( x, l ); 86.23 set_idom(l, init_control, dom_depth(x)); 86.24 86.25 - // Check for immediately preceeding SafePoint and remove 86.26 + // Check for immediately preceding SafePoint and remove 86.27 Node *sfpt2 = le->in(0); 86.28 if( sfpt2->Opcode() == Op_SafePoint && is_deleteable_safept(sfpt2)) 86.29 lazy_replace( sfpt2, sfpt2->in(TypeFunc::Control)); 86.30 @@ -1506,7 +1506,7 @@ 86.31 86.32 // Build Dominators for elision of NULL checks & loop finding. 86.33 // Since nodes do not have a slot for immediate dominator, make 86.34 - // a persistant side array for that info indexed on node->_idx. 86.35 + // a persistent side array for that info indexed on node->_idx. 86.36 _idom_size = C->unique(); 86.37 _idom = NEW_RESOURCE_ARRAY( Node*, _idom_size ); 86.38 _dom_depth = NEW_RESOURCE_ARRAY( uint, _idom_size ); 86.39 @@ -1529,7 +1529,7 @@ 86.40 86.41 // Given dominators, try to find inner loops with calls that must 86.42 // always be executed (call dominates loop tail). These loops do 86.43 - // not need a seperate safepoint. 86.44 + // not need a separate safepoint. 86.45 Node_List cisstack(a); 86.46 _ltree_root->check_safepts(visited, cisstack); 86.47 86.48 @@ -2332,7 +2332,7 @@ 86.49 if (done) { 86.50 // All of n's inputs have been processed, complete post-processing. 86.51 86.52 - // Compute earilest point this Node can go. 86.53 + // Compute earliest point this Node can go. 86.54 // CFG, Phi, pinned nodes already know their controlling input. 86.55 if (!has_node(n)) { 86.56 // Record earliest legal location 86.57 @@ -2672,9 +2672,9 @@ 86.58 pinned = false; 86.59 } 86.60 if( pinned ) { 86.61 - IdealLoopTree *choosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n)); 86.62 - if( !choosen_loop->_child ) // Inner loop? 86.63 - choosen_loop->_body.push(n); // Collect inner loops 86.64 + IdealLoopTree *chosen_loop = get_loop(n->is_CFG() ? n : get_ctrl(n)); 86.65 + if( !chosen_loop->_child ) // Inner loop? 86.66 + chosen_loop->_body.push(n); // Collect inner loops 86.67 return; 86.68 } 86.69 } else { // No slot zero 86.70 @@ -2746,9 +2746,9 @@ 86.71 set_ctrl(n, least); 86.72 86.73 // Collect inner loop bodies 86.74 - IdealLoopTree *choosen_loop = get_loop(least); 86.75 - if( !choosen_loop->_child ) // Inner loop? 86.76 - choosen_loop->_body.push(n);// Collect inner loops 86.77 + IdealLoopTree *chosen_loop = get_loop(least); 86.78 + if( !chosen_loop->_child ) // Inner loop? 86.79 + chosen_loop->_body.push(n);// Collect inner loops 86.80 } 86.81 86.82 #ifndef PRODUCT
87.1 --- a/src/share/vm/opto/loopnode.hpp Fri Feb 27 08:34:19 2009 -0800 87.2 +++ b/src/share/vm/opto/loopnode.hpp Fri Feb 27 13:27:09 2009 -0800 87.3 @@ -390,7 +390,7 @@ 87.4 87.5 // Return TRUE or FALSE if the loop should be cache-line aligned. 87.6 // Gather the expression that does the alignment. Note that only 87.7 - // one array base can be aligned in a loop (unless the VM guarentees 87.8 + // one array base can be aligned in a loop (unless the VM guarantees 87.9 // mutual alignment). Note that if we vectorize short memory ops 87.10 // into longer memory ops, we may want to increase alignment. 87.11 bool policy_align( PhaseIdealLoop *phase ) const; 87.12 @@ -403,7 +403,7 @@ 87.13 // Reassociate invariant add and subtract expressions. 87.14 Node* reassociate_add_sub(Node* n1, PhaseIdealLoop *phase); 87.15 // Return nonzero index of invariant operand if invariant and variant 87.16 - // are combined with an Add or Sub. Helper for reassoicate_invariants. 87.17 + // are combined with an Add or Sub. Helper for reassociate_invariants. 87.18 int is_invariant_addition(Node* n, PhaseIdealLoop *phase); 87.19 87.20 // Return true if n is invariant
88.1 --- a/src/share/vm/opto/loopopts.cpp Fri Feb 27 08:34:19 2009 -0800 88.2 +++ b/src/share/vm/opto/loopopts.cpp Fri Feb 27 13:27:09 2009 -0800 88.3 @@ -97,7 +97,7 @@ 88.4 // (Note: This tweaking with igvn only works because x is a new node.) 88.5 _igvn.set_type(x, t); 88.6 // If x is a TypeNode, capture any more-precise type permanently into Node 88.7 - // othewise it will be not updated during igvn->transform since 88.8 + // otherwise it will be not updated during igvn->transform since 88.9 // igvn->type(x) is set to x->Value() already. 88.10 x->raise_bottom_type(t); 88.11 Node *y = x->Identity(&_igvn); 88.12 @@ -879,7 +879,7 @@ 88.13 Node *x_ctrl = NULL; 88.14 if( u->is_Phi() ) { 88.15 // Replace all uses of normal nodes. Replace Phi uses 88.16 - // individually, so the seperate Nodes can sink down 88.17 + // individually, so the separate Nodes can sink down 88.18 // different paths. 88.19 uint k = 1; 88.20 while( u->in(k) != n ) k++;
89.1 --- a/src/share/vm/opto/machnode.cpp Fri Feb 27 08:34:19 2009 -0800 89.2 +++ b/src/share/vm/opto/machnode.cpp Fri Feb 27 13:27:09 2009 -0800 89.3 @@ -136,7 +136,7 @@ 89.4 // Size of instruction in bytes 89.5 uint MachNode::size(PhaseRegAlloc *ra_) const { 89.6 // If a virtual was not defined for this specific instruction, 89.7 - // Call the helper which finds the size by emiting the bits. 89.8 + // Call the helper which finds the size by emitting the bits. 89.9 return MachNode::emit_size(ra_); 89.10 } 89.11
90.1 --- a/src/share/vm/opto/macro.cpp Fri Feb 27 08:34:19 2009 -0800 90.2 +++ b/src/share/vm/opto/macro.cpp Fri Feb 27 13:27:09 2009 -0800 90.3 @@ -216,7 +216,7 @@ 90.4 const TypeOopPtr *tinst = phase->C->get_adr_type(alias_idx)->isa_oopptr(); 90.5 while (true) { 90.6 if (mem == alloc_mem || mem == start_mem ) { 90.7 - return mem; // hit one of our sentinals 90.8 + return mem; // hit one of our sentinels 90.9 } else if (mem->is_MergeMem()) { 90.10 mem = mem->as_MergeMem()->memory_at(alias_idx); 90.11 } else if (mem->is_Proj() && mem->as_Proj()->_con == TypeFunc::Memory) { 90.12 @@ -1668,7 +1668,7 @@ 90.13 90.14 if (UseOptoBiasInlining) { 90.15 /* 90.16 - * See the full descrition in MacroAssembler::biased_locking_enter(). 90.17 + * See the full description in MacroAssembler::biased_locking_enter(). 90.18 * 90.19 * if( (mark_word & biased_lock_mask) == biased_lock_pattern ) { 90.20 * // The object is biased. 90.21 @@ -1904,7 +1904,7 @@ 90.22 90.23 if (UseOptoBiasInlining) { 90.24 // Check for biased locking unlock case, which is a no-op. 90.25 - // See the full descrition in MacroAssembler::biased_locking_exit(). 90.26 + // See the full description in MacroAssembler::biased_locking_exit(). 90.27 region = new (C, 4) RegionNode(4); 90.28 // create a Phi for the memory state 90.29 mem_phi = new (C, 4) PhiNode( region, Type::MEMORY, TypeRawPtr::BOTTOM);
91.1 --- a/src/share/vm/opto/matcher.cpp Fri Feb 27 08:34:19 2009 -0800 91.2 +++ b/src/share/vm/opto/matcher.cpp Fri Feb 27 13:27:09 2009 -0800 91.3 @@ -897,7 +897,7 @@ 91.4 #ifdef ASSERT 91.5 _new2old_map.map(m->_idx, n); 91.6 #endif 91.7 - mstack.push(m, Post_Visit, n, i); // Don't neet to visit 91.8 + mstack.push(m, Post_Visit, n, i); // Don't need to visit 91.9 mstack.push(m->in(0), Visit, m, 0); 91.10 } else { 91.11 mstack.push(m, Visit, n, i); 91.12 @@ -1267,7 +1267,7 @@ 91.13 } 91.14 } 91.15 91.16 - // Not forceably cloning. If shared, put it into a register. 91.17 + // Not forceable cloning. If shared, put it into a register. 91.18 return shared; 91.19 } 91.20 91.21 @@ -1542,7 +1542,7 @@ 91.22 // This is what my child will give me. 91.23 int opnd_class_instance = s->_rule[op]; 91.24 // Choose between operand class or not. 91.25 - // This is what I will recieve. 91.26 + // This is what I will receive. 91.27 int catch_op = (FIRST_OPERAND_CLASS <= op && op < NUM_OPERANDS) ? opnd_class_instance : op; 91.28 // New rule for child. Chase operand classes to get the actual rule. 91.29 int newrule = s->_rule[catch_op]; 91.30 @@ -1966,7 +1966,7 @@ 91.31 // BoolNode::match_edge always returns a zero. 91.32 91.33 // We reorder the Op_If in a pre-order manner, so we can visit without 91.34 - // accidently sharing the Cmp (the Bool and the If make 2 users). 91.35 + // accidentally sharing the Cmp (the Bool and the If make 2 users). 91.36 n->add_req( n->in(1)->in(1) ); // Add the Cmp next to the Bool 91.37 } 91.38 else if (nstate == Post_Visit) {
92.1 --- a/src/share/vm/opto/memnode.cpp Fri Feb 27 08:34:19 2009 -0800 92.2 +++ b/src/share/vm/opto/memnode.cpp Fri Feb 27 13:27:09 2009 -0800 92.3 @@ -100,12 +100,12 @@ 92.4 while (prev != result) { 92.5 prev = result; 92.6 if (result == start_mem) 92.7 - break; // hit one of our sentinals 92.8 + break; // hit one of our sentinels 92.9 // skip over a call which does not affect this memory slice 92.10 if (result->is_Proj() && result->as_Proj()->_con == TypeFunc::Memory) { 92.11 Node *proj_in = result->in(0); 92.12 if (proj_in->is_Allocate() && proj_in->_idx == instance_id) { 92.13 - break; // hit one of our sentinals 92.14 + break; // hit one of our sentinels 92.15 } else if (proj_in->is_Call()) { 92.16 CallNode *call = proj_in->as_Call(); 92.17 if (!call->may_modify(t_adr, phase)) { 92.18 @@ -198,7 +198,7 @@ 92.19 // If not, we can update the input infinitely along a MergeMem cycle 92.20 // Equivalent code in PhiNode::Ideal 92.21 Node* m = phase->transform(mmem); 92.22 - // If tranformed to a MergeMem, get the desired slice 92.23 + // If transformed to a MergeMem, get the desired slice 92.24 // Otherwise the returned node represents memory for every slice 92.25 mem = (m->is_MergeMem())? m->as_MergeMem()->memory_at(alias_idx) : m; 92.26 // Update input if it is progress over what we have now 92.27 @@ -970,7 +970,7 @@ 92.28 } 92.29 92.30 // Search for an existing data phi which was generated before for the same 92.31 - // instance's field to avoid infinite genertion of phis in a loop. 92.32 + // instance's field to avoid infinite generation of phis in a loop. 92.33 Node *region = mem->in(0); 92.34 if (is_instance_field_load_with_local_phi(region)) { 92.35 const TypePtr *addr_t = in(MemNode::Address)->bottom_type()->isa_ptr(); 92.36 @@ -1254,7 +1254,7 @@ 92.37 // (This tweaking with igvn only works because x is a new node.) 92.38 igvn->set_type(x, t); 92.39 // If x is a TypeNode, capture any more-precise type permanently into Node 92.40 - // othewise it will be not updated during igvn->transform since 92.41 + // otherwise it will be not updated during igvn->transform since 92.42 // igvn->type(x) is set to x->Value() already. 92.43 x->raise_bottom_type(t); 92.44 Node *y = x->Identity(igvn); 92.45 @@ -2591,7 +2591,7 @@ 92.46 // capturing of nearby memory operations. 92.47 // 92.48 // During macro-expansion, all captured initializations which store 92.49 -// constant values of 32 bits or smaller are coalesced (if advantagous) 92.50 +// constant values of 32 bits or smaller are coalesced (if advantageous) 92.51 // into larger 'tiles' 32 or 64 bits. This allows an object to be 92.52 // initialized in fewer memory operations. Memory words which are 92.53 // covered by neither tiles nor non-constant stores are pre-zeroed 92.54 @@ -3678,7 +3678,7 @@ 92.55 else if (old_mmem != NULL) { 92.56 new_mem = old_mmem->memory_at(i); 92.57 } 92.58 - // else preceeding memory was not a MergeMem 92.59 + // else preceding memory was not a MergeMem 92.60 92.61 // replace equivalent phis (unfortunately, they do not GVN together) 92.62 if (new_mem != NULL && new_mem != new_base &&
93.1 --- a/src/share/vm/opto/memnode.hpp Fri Feb 27 08:34:19 2009 -0800 93.2 +++ b/src/share/vm/opto/memnode.hpp Fri Feb 27 13:27:09 2009 -0800 93.3 @@ -757,10 +757,10 @@ 93.4 // Model. Monitor-enter and volatile-load act as Aquires: no following ref 93.5 // can be moved to before them. We insert a MemBar-Acquire after a FastLock or 93.6 // volatile-load. Monitor-exit and volatile-store act as Release: no 93.7 -// preceeding ref can be moved to after them. We insert a MemBar-Release 93.8 +// preceding ref can be moved to after them. We insert a MemBar-Release 93.9 // before a FastUnlock or volatile-store. All volatiles need to be 93.10 // serialized, so we follow all volatile-stores with a MemBar-Volatile to 93.11 -// seperate it from any following volatile-load. 93.12 +// separate it from any following volatile-load. 93.13 class MemBarNode: public MultiNode { 93.14 virtual uint hash() const ; // { return NO_HASH; } 93.15 virtual uint cmp( const Node &n ) const ; // Always fail, except on self
94.1 --- a/src/share/vm/opto/node.cpp Fri Feb 27 08:34:19 2009 -0800 94.2 +++ b/src/share/vm/opto/node.cpp Fri Feb 27 13:27:09 2009 -0800 94.3 @@ -968,22 +968,23 @@ 94.4 // Example: when reshape "(X+3)+4" into "X+7" you must leave the Node for 94.5 // "X+3" unchanged in case it is shared. 94.6 // 94.7 -// If you modify the 'this' pointer's inputs, you must use 'set_req' with 94.8 -// def-use info. If you are making a new Node (either as the new root or 94.9 -// some new internal piece) you must NOT use set_req with def-use info. 94.10 -// You can make a new Node with either 'new' or 'clone'. In either case, 94.11 -// def-use info is (correctly) not generated. 94.12 +// If you modify the 'this' pointer's inputs, you should use 94.13 +// 'set_req'. If you are making a new Node (either as the new root or 94.14 +// some new internal piece) you may use 'init_req' to set the initial 94.15 +// value. You can make a new Node with either 'new' or 'clone'. In 94.16 +// either case, def-use info is correctly maintained. 94.17 +// 94.18 // Example: reshape "(X+3)+4" into "X+7": 94.19 -// set_req(1,in(1)->in(1) /* grab X */, du /* must use DU on 'this' */); 94.20 -// set_req(2,phase->intcon(7),du); 94.21 +// set_req(1, in(1)->in(1)); 94.22 +// set_req(2, phase->intcon(7)); 94.23 // return this; 94.24 -// Example: reshape "X*4" into "X<<1" 94.25 -// return new (C,3) LShiftINode( in(1), phase->intcon(1) ); 94.26 +// Example: reshape "X*4" into "X<<2" 94.27 +// return new (C,3) LShiftINode(in(1), phase->intcon(2)); 94.28 // 94.29 // You must call 'phase->transform(X)' on any new Nodes X you make, except 94.30 -// for the returned root node. Example: reshape "X*31" with "(X<<5)-1". 94.31 +// for the returned root node. Example: reshape "X*31" with "(X<<5)-X". 94.32 // Node *shift=phase->transform(new(C,3)LShiftINode(in(1),phase->intcon(5))); 94.33 -// return new (C,3) AddINode(shift, phase->intcon(-1)); 94.34 +// return new (C,3) AddINode(shift, in(1)); 94.35 // 94.36 // When making a Node for a constant use 'phase->makecon' or 'phase->intcon'. 94.37 // These forms are faster than 'phase->transform(new (C,1) ConNode())' and Do 94.38 @@ -1679,7 +1680,7 @@ 94.39 if (visited.member(this)) return; 94.40 visited.push(this); 94.41 94.42 - // Walk over all input edges, checking for correspondance 94.43 + // Walk over all input edges, checking for correspondence 94.44 for( i = 0; i < len(); i++ ) { 94.45 n = in(i); 94.46 if (n != NULL && !n->is_top()) { 94.47 @@ -1723,7 +1724,7 @@ 94.48 // Contained in new_space or old_space? 94.49 VectorSet *v = C->node_arena()->contains(n) ? &new_space : &old_space; 94.50 // Check for visited in the proper space. Numberings are not unique 94.51 - // across spaces so we need a seperate VectorSet for each space. 94.52 + // across spaces so we need a separate VectorSet for each space. 94.53 if( v->test_set(n->_idx) ) return; 94.54 94.55 if (n->is_Con() && n->bottom_type() == Type::TOP) {
95.1 --- a/src/share/vm/opto/node.hpp Fri Feb 27 08:34:19 2009 -0800 95.2 +++ b/src/share/vm/opto/node.hpp Fri Feb 27 13:27:09 2009 -0800 95.3 @@ -257,7 +257,7 @@ 95.4 Node **_in; // Array of use-def references to Nodes 95.5 Node **_out; // Array of def-use references to Nodes 95.6 95.7 - // Input edges are split into two catagories. Required edges are required 95.8 + // Input edges are split into two categories. Required edges are required 95.9 // for semantic correctness; order is important and NULLs are allowed. 95.10 // Precedence edges are used to help determine execution order and are 95.11 // added, e.g., for scheduling purposes. They are unordered and not 95.12 @@ -854,7 +854,7 @@ 95.13 95.14 // If the hash function returns the special sentinel value NO_HASH, 95.15 // the node is guaranteed never to compare equal to any other node. 95.16 - // If we accidently generate a hash with value NO_HASH the node 95.17 + // If we accidentally generate a hash with value NO_HASH the node 95.18 // won't go into the table and we'll lose a little optimization. 95.19 enum { NO_HASH = 0 }; 95.20 virtual uint hash() const;
96.1 --- a/src/share/vm/opto/output.cpp Fri Feb 27 08:34:19 2009 -0800 96.2 +++ b/src/share/vm/opto/output.cpp Fri Feb 27 13:27:09 2009 -0800 96.3 @@ -1171,7 +1171,7 @@ 96.4 cb->flush_bundle(false); 96.5 96.6 // The following logic is duplicated in the code ifdeffed for 96.7 - // ENABLE_ZAP_DEAD_LOCALS which apppears above in this file. It 96.8 + // ENABLE_ZAP_DEAD_LOCALS which appears above in this file. It 96.9 // should be factored out. Or maybe dispersed to the nodes? 96.10 96.11 // Special handling for SafePoint/Call Nodes 96.12 @@ -1275,7 +1275,7 @@ 96.13 } 96.14 96.15 #ifdef ASSERT 96.16 - // Check that oop-store preceeds the card-mark 96.17 + // Check that oop-store precedes the card-mark 96.18 else if( mach->ideal_Opcode() == Op_StoreCM ) { 96.19 uint storeCM_idx = j; 96.20 Node *oop_store = mach->in(mach->_cnt); // First precedence edge 96.21 @@ -1291,7 +1291,7 @@ 96.22 #endif 96.23 96.24 else if( !n->is_Proj() ) { 96.25 - // Remember the begining of the previous instruction, in case 96.26 + // Remember the beginning of the previous instruction, in case 96.27 // it's followed by a flag-kill and a null-check. Happens on 96.28 // Intel all the time, with add-to-memory kind of opcodes. 96.29 previous_offset = current_offset; 96.30 @@ -1567,7 +1567,7 @@ 96.31 96.32 compile.set_node_bundling_limit(_node_bundling_limit); 96.33 96.34 - // This one is persistant within the Compile class 96.35 + // This one is persistent within the Compile class 96.36 _node_bundling_base = NEW_ARENA_ARRAY(compile.comp_arena(), Bundle, node_max); 96.37 96.38 // Allocate space for fixed-size arrays 96.39 @@ -1666,7 +1666,7 @@ 96.40 // Compute the latency of all the instructions. This is fairly simple, 96.41 // because we already have a legal ordering. Walk over the instructions 96.42 // from first to last, and compute the latency of the instruction based 96.43 -// on the latency of the preceeding instruction(s). 96.44 +// on the latency of the preceding instruction(s). 96.45 void Scheduling::ComputeLocalLatenciesForward(const Block *bb) { 96.46 #ifndef PRODUCT 96.47 if (_cfg->C->trace_opto_output()) 96.48 @@ -1931,7 +1931,7 @@ 96.49 uint siz = _available.size(); 96.50 96.51 // Conditional branches can support an instruction that 96.52 - // is unconditionally executed and not dependant by the 96.53 + // is unconditionally executed and not dependent by the 96.54 // branch, OR a conditionally executed instruction if 96.55 // the branch is taken. In practice, this means that 96.56 // the first instruction at the branch target is 96.57 @@ -1947,7 +1947,7 @@ 96.58 #endif 96.59 96.60 // At least 1 instruction is on the available list 96.61 - // that is not dependant on the branch 96.62 + // that is not dependent on the branch 96.63 for (uint i = 0; i < siz; i++) { 96.64 Node *d = _available[i]; 96.65 const Pipeline *avail_pipeline = d->pipeline();
97.1 --- a/src/share/vm/opto/parse.hpp Fri Feb 27 08:34:19 2009 -0800 97.2 +++ b/src/share/vm/opto/parse.hpp Fri Feb 27 13:27:09 2009 -0800 97.3 @@ -78,7 +78,7 @@ 97.4 }; 97.5 97.6 // See if it is OK to inline. 97.7 - // The reciever is the inline tree for the caller. 97.8 + // The receiver is the inline tree for the caller. 97.9 // 97.10 // The result is a temperature indication. If it is hot or cold, 97.11 // inlining is immediate or undesirable. Otherwise, the info block
98.1 --- a/src/share/vm/opto/parse1.cpp Fri Feb 27 08:34:19 2009 -0800 98.2 +++ b/src/share/vm/opto/parse1.cpp Fri Feb 27 13:27:09 2009 -0800 98.3 @@ -607,7 +607,7 @@ 98.4 if (control()->is_Region() && !block->is_loop_head() && !has_irreducible && !block->is_handler()) { 98.5 // In the absence of irreducible loops, the Region and Phis 98.6 // associated with a merge that doesn't involve a backedge can 98.7 - // be simplfied now since the RPO parsing order guarantees 98.8 + // be simplified now since the RPO parsing order guarantees 98.9 // that any path which was supposed to reach here has already 98.10 // been parsed or must be dead. 98.11 Node* c = control();
99.1 --- a/src/share/vm/opto/parse2.cpp Fri Feb 27 08:34:19 2009 -0800 99.2 +++ b/src/share/vm/opto/parse2.cpp Fri Feb 27 13:27:09 2009 -0800 99.3 @@ -32,7 +32,7 @@ 99.4 void Parse::array_load(BasicType elem_type) { 99.5 const Type* elem = Type::TOP; 99.6 Node* adr = array_addressing(elem_type, 0, &elem); 99.7 - if (stopped()) return; // guarenteed null or range check 99.8 + if (stopped()) return; // guaranteed null or range check 99.9 _sp -= 2; // Pop array and index 99.10 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type); 99.11 Node* ld = make_load(control(), adr, elem, elem_type, adr_type); 99.12 @@ -43,7 +43,7 @@ 99.13 //--------------------------------array_store---------------------------------- 99.14 void Parse::array_store(BasicType elem_type) { 99.15 Node* adr = array_addressing(elem_type, 1); 99.16 - if (stopped()) return; // guarenteed null or range check 99.17 + if (stopped()) return; // guaranteed null or range check 99.18 Node* val = pop(); 99.19 _sp -= 2; // Pop array and index 99.20 const TypeAryPtr* adr_type = TypeAryPtr::get_array_body_type(elem_type); 99.21 @@ -1541,14 +1541,14 @@ 99.22 case Bytecodes::_aaload: array_load(T_OBJECT); break; 99.23 case Bytecodes::_laload: { 99.24 a = array_addressing(T_LONG, 0); 99.25 - if (stopped()) return; // guarenteed null or range check 99.26 + if (stopped()) return; // guaranteed null or range check 99.27 _sp -= 2; // Pop array and index 99.28 push_pair( make_load(control(), a, TypeLong::LONG, T_LONG, TypeAryPtr::LONGS)); 99.29 break; 99.30 } 99.31 case Bytecodes::_daload: { 99.32 a = array_addressing(T_DOUBLE, 0); 99.33 - if (stopped()) return; // guarenteed null or range check 99.34 + if (stopped()) return; // guaranteed null or range check 99.35 _sp -= 2; // Pop array and index 99.36 push_pair( make_load(control(), a, Type::DOUBLE, T_DOUBLE, TypeAryPtr::DOUBLES)); 99.37 break; 99.38 @@ -1560,7 +1560,7 @@ 99.39 case Bytecodes::_fastore: array_store(T_FLOAT); break; 99.40 case Bytecodes::_aastore: { 99.41 d = array_addressing(T_OBJECT, 1); 99.42 - if (stopped()) return; // guarenteed null or range check 99.43 + if (stopped()) return; // guaranteed null or range check 99.44 array_store_check(); 99.45 c = pop(); // Oop to store 99.46 b = pop(); // index (already used) 99.47 @@ -1572,7 +1572,7 @@ 99.48 } 99.49 case Bytecodes::_lastore: { 99.50 a = array_addressing(T_LONG, 2); 99.51 - if (stopped()) return; // guarenteed null or range check 99.52 + if (stopped()) return; // guaranteed null or range check 99.53 c = pop_pair(); 99.54 _sp -= 2; // Pop array and index 99.55 store_to_memory(control(), a, c, T_LONG, TypeAryPtr::LONGS); 99.56 @@ -1580,7 +1580,7 @@ 99.57 } 99.58 case Bytecodes::_dastore: { 99.59 a = array_addressing(T_DOUBLE, 2); 99.60 - if (stopped()) return; // guarenteed null or range check 99.61 + if (stopped()) return; // guaranteed null or range check 99.62 c = pop_pair(); 99.63 _sp -= 2; // Pop array and index 99.64 c = dstore_rounding(c);
100.1 --- a/src/share/vm/opto/phase.cpp Fri Feb 27 08:34:19 2009 -0800 100.2 +++ b/src/share/vm/opto/phase.cpp Fri Feb 27 13:27:09 2009 -0800 100.3 @@ -73,7 +73,7 @@ 100.4 100.5 //------------------------------Phase------------------------------------------ 100.6 Phase::Phase( PhaseNumber pnum ) : _pnum(pnum), C( pnum == Compiler ? NULL : Compile::current()) { 100.7 - // Poll for requests from shutdown mechanism to quiesce comiler (4448539, 4448544). 100.8 + // Poll for requests from shutdown mechanism to quiesce compiler (4448539, 4448544). 100.9 // This is an effective place to poll, since the compiler is full of phases. 100.10 // In particular, every inlining site uses a recursively created Parse phase. 100.11 CompileBroker::maybe_block();
101.1 --- a/src/share/vm/opto/phaseX.cpp Fri Feb 27 08:34:19 2009 -0800 101.2 +++ b/src/share/vm/opto/phaseX.cpp Fri Feb 27 13:27:09 2009 -0800 101.3 @@ -196,7 +196,7 @@ 101.4 } 101.5 101.6 //------------------------------hash_delete------------------------------------ 101.7 -// Replace in hash table with sentinal 101.8 +// Replace in hash table with sentinel 101.9 bool NodeHash::hash_delete( const Node *n ) { 101.10 Node *k; 101.11 uint hash = n->hash(); 101.12 @@ -207,7 +207,7 @@ 101.13 uint key = hash & (_max-1); 101.14 uint stride = key | 0x01; 101.15 debug_only( uint counter = 0; ); 101.16 - for( ; /* (k != NULL) && (k != _sentinal) */; ) { 101.17 + for( ; /* (k != NULL) && (k != _sentinel) */; ) { 101.18 debug_only( counter++ ); 101.19 debug_only( _delete_probes++ ); 101.20 k = _table[key]; // Get hashed value 101.21 @@ -715,7 +715,7 @@ 101.22 101.23 #ifdef ASSERT 101.24 //------------------------------dead_loop_check-------------------------------- 101.25 -// Check for a simple dead loop when a data node references itself direcly 101.26 +// Check for a simple dead loop when a data node references itself directly 101.27 // or through an other data node excluding cons and phis. 101.28 void PhaseGVN::dead_loop_check( Node *n ) { 101.29 // Phi may reference itself in a loop 101.30 @@ -1359,7 +1359,7 @@ 101.31 worklist.push(p); // Propagate change to user 101.32 } 101.33 } 101.34 - // If we changed the reciever type to a call, we need to revisit 101.35 + // If we changed the receiver type to a call, we need to revisit 101.36 // the Catch following the call. It's looking for a non-NULL 101.37 // receiver to know when to enable the regular fall-through path 101.38 // in addition to the NullPtrException path
102.1 --- a/src/share/vm/opto/postaloc.cpp Fri Feb 27 08:34:19 2009 -0800 102.2 +++ b/src/share/vm/opto/postaloc.cpp Fri Feb 27 13:27:09 2009 -0800 102.3 @@ -46,7 +46,7 @@ 102.4 // be splitting live ranges for callee save registers to such 102.5 // an extent that in large methods the chains can be very long 102.6 // (50+). The conservative answer is to return true if we don't 102.7 - // know as this prevents optimizations from occuring. 102.8 + // know as this prevents optimizations from occurring. 102.9 102.10 const int limit = 60; 102.11 int i; 102.12 @@ -286,7 +286,7 @@ 102.13 // 102.14 // n will be replaced with the old value but n might have 102.15 // kills projections associated with it so remove them now so that 102.16 - // yank_if_dead will be able to elminate the copy once the uses 102.17 + // yank_if_dead will be able to eliminate the copy once the uses 102.18 // have been transferred to the old[value]. 102.19 for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) { 102.20 Node* use = n->fast_out(i);
103.1 --- a/src/share/vm/opto/reg_split.cpp Fri Feb 27 08:34:19 2009 -0800 103.2 +++ b/src/share/vm/opto/reg_split.cpp Fri Feb 27 13:27:09 2009 -0800 103.3 @@ -26,8 +26,8 @@ 103.4 #include "incls/_reg_split.cpp.incl" 103.5 103.6 //------------------------------Split-------------------------------------- 103.7 -// Walk the graph in RPO and for each lrg which spills, propogate reaching 103.8 -// definitions. During propogation, split the live range around regions of 103.9 +// Walk the graph in RPO and for each lrg which spills, propagate reaching 103.10 +// definitions. During propagation, split the live range around regions of 103.11 // High Register Pressure (HRP). If a Def is in a region of Low Register 103.12 // Pressure (LRP), it will not get spilled until we encounter a region of 103.13 // HRP between it and one of its uses. We will spill at the transition 103.14 @@ -88,7 +88,7 @@ 103.15 } 103.16 103.17 //------------------------------insert_proj------------------------------------ 103.18 -// Insert the spill at chosen location. Skip over any interveneing Proj's or 103.19 +// Insert the spill at chosen location. Skip over any intervening Proj's or 103.20 // Phis. Skip over a CatchNode and projs, inserting in the fall-through block 103.21 // instead. Update high-pressure indices. Create a new live range. 103.22 void PhaseChaitin::insert_proj( Block *b, uint i, Node *spill, uint maxlrg ) { 103.23 @@ -125,7 +125,7 @@ 103.24 } 103.25 103.26 //------------------------------split_DEF-------------------------------------- 103.27 -// There are four catagories of Split; UP/DOWN x DEF/USE 103.28 +// There are four categories of Split; UP/DOWN x DEF/USE 103.29 // Only three of these really occur as DOWN/USE will always color 103.30 // Any Split with a DEF cannot CISC-Spill now. Thus we need 103.31 // two helper routines, one for Split DEFS (insert after instruction), 103.32 @@ -726,7 +726,7 @@ 103.33 // ********** Handle Crossing HRP Boundry ********** 103.34 if( (insidx == b->_ihrp_index) || (insidx == b->_fhrp_index) ) { 103.35 for( slidx = 0; slidx < spill_cnt; slidx++ ) { 103.36 - // Check for need to split at HRP boundry - split if UP 103.37 + // Check for need to split at HRP boundary - split if UP 103.38 n1 = Reachblock[slidx]; 103.39 // bail out if no reaching DEF 103.40 if( n1 == NULL ) continue;
104.1 --- a/src/share/vm/opto/runtime.cpp Fri Feb 27 08:34:19 2009 -0800 104.2 +++ b/src/share/vm/opto/runtime.cpp Fri Feb 27 13:27:09 2009 -0800 104.3 @@ -1196,7 +1196,7 @@ 104.4 104.5 // The following does not work because for one thing, the 104.6 // thread state is wrong; it expects java, but it is native. 104.7 -// Also, the invarients in a native stub are different and 104.8 +// Also, the invariants in a native stub are different and 104.9 // I'm not sure it is safe to have a MachCalRuntimeDirectNode 104.10 // in there. 104.11 // So for now, we do not zap in native stubs.
105.1 --- a/src/share/vm/opto/split_if.cpp Fri Feb 27 08:34:19 2009 -0800 105.2 +++ b/src/share/vm/opto/split_if.cpp Fri Feb 27 13:27:09 2009 -0800 105.3 @@ -318,7 +318,7 @@ 105.4 105.5 if( use->is_Phi() ) { // Phi uses in prior block 105.6 // Grab the first Phi use; there may be many. 105.7 - // Each will be handled as a seperate iteration of 105.8 + // Each will be handled as a separate iteration of 105.9 // the "while( phi->outcnt() )" loop. 105.10 uint j; 105.11 for( j = 1; j < use->req(); j++ )
106.1 --- a/src/share/vm/opto/superword.cpp Fri Feb 27 08:34:19 2009 -0800 106.2 +++ b/src/share/vm/opto/superword.cpp Fri Feb 27 13:27:09 2009 -0800 106.3 @@ -470,7 +470,7 @@ 106.4 } 106.5 106.6 //------------------------------stmts_can_pack--------------------------- 106.7 -// Can s1 and s2 be in a pack with s1 immediately preceeding s2 and 106.8 +// Can s1 and s2 be in a pack with s1 immediately preceding s2 and 106.9 // s1 aligned at "align" 106.10 bool SuperWord::stmts_can_pack(Node* s1, Node* s2, int align) { 106.11 if (isomorphic(s1, s2)) { 106.12 @@ -869,7 +869,7 @@ 106.13 for (uint i = start; i < end; i++) { 106.14 if (!is_vector_use(p0, i)) { 106.15 // For now, return false if not scalar promotion case (inputs are the same.) 106.16 - // Later, implement PackNode and allow differring, non-vector inputs 106.17 + // Later, implement PackNode and allow differing, non-vector inputs 106.18 // (maybe just the ones from outside the block.) 106.19 Node* p0_def = p0->in(i); 106.20 for (uint j = 1; j < p->size(); j++) {
107.1 --- a/src/share/vm/opto/superword.hpp Fri Feb 27 08:34:19 2009 -0800 107.2 +++ b/src/share/vm/opto/superword.hpp Fri Feb 27 13:27:09 2009 -0800 107.3 @@ -308,7 +308,7 @@ 107.4 void dependence_graph(); 107.5 // Return a memory slice (node list) in predecessor order starting at "start" 107.6 void mem_slice_preds(Node* start, Node* stop, GrowableArray<Node*> &preds); 107.7 - // Can s1 and s2 be in a pack with s1 immediately preceeding s2 and s1 aligned at "align" 107.8 + // Can s1 and s2 be in a pack with s1 immediately preceding s2 and s1 aligned at "align" 107.9 bool stmts_can_pack(Node* s1, Node* s2, int align); 107.10 // Does s exist in a pack at position pos? 107.11 bool exists_at(Node* s, uint pos);
108.1 --- a/src/share/vm/opto/type.cpp Fri Feb 27 08:34:19 2009 -0800 108.2 +++ b/src/share/vm/opto/type.cpp Fri Feb 27 13:27:09 2009 -0800 108.3 @@ -2455,7 +2455,7 @@ 108.4 // code and dereferenced at the time the nmethod is made. Until that time, 108.5 // it is not reasonable to do arithmetic with the addresses of oops (we don't 108.6 // have access to the addresses!). This does not seem to currently happen, 108.7 - // but this assertion here is to help prevent its occurrance. 108.8 + // but this assertion here is to help prevent its occurence. 108.9 tty->print_cr("Found oop constant with non-zero offset"); 108.10 ShouldNotReachHere(); 108.11 } 108.12 @@ -2761,7 +2761,7 @@ 108.13 // LCA is object_klass, but if we subclass from the top we can do better 108.14 if( above_centerline(_ptr) ) { // if( _ptr == TopPTR || _ptr == AnyNull ) 108.15 // If 'this' (InstPtr) is above the centerline and it is Object class 108.16 - // then we can subclass in the Java class heirarchy. 108.17 + // then we can subclass in the Java class hierarchy. 108.18 if (klass()->equals(ciEnv::current()->Object_klass())) { 108.19 // that is, tp's array type is a subtype of my klass 108.20 return TypeAryPtr::make(ptr, tp->ary(), tp->klass(), tp->klass_is_exact(), offset, instance_id); 108.21 @@ -3022,7 +3022,7 @@ 108.22 108.23 //------------------------------xdual------------------------------------------ 108.24 // Dual: do NOT dual on klasses. This means I do NOT understand the Java 108.25 -// inheritence mechanism. 108.26 +// inheritance mechanism. 108.27 const Type *TypeInstPtr::xdual() const { 108.28 return new TypeInstPtr( dual_ptr(), klass(), klass_is_exact(), const_oop(), dual_offset(), dual_instance_id() ); 108.29 } 108.30 @@ -3176,7 +3176,7 @@ 108.31 bool chg = false; 108.32 if (lo < min_lo) { lo = min_lo; chg = true; } 108.33 if (hi > max_hi) { hi = max_hi; chg = true; } 108.34 - // Negative length arrays will produce weird intermediate dead fath-path code 108.35 + // Negative length arrays will produce weird intermediate dead fast-path code 108.36 if (lo > hi) 108.37 return TypeInt::ZERO; 108.38 if (!chg) 108.39 @@ -3358,7 +3358,7 @@ 108.40 // LCA is object_klass, but if we subclass from the top we can do better 108.41 if (above_centerline(tp->ptr())) { 108.42 // If 'tp' is above the centerline and it is Object class 108.43 - // then we can subclass in the Java class heirarchy. 108.44 + // then we can subclass in the Java class hierarchy. 108.45 if( tp->klass()->equals(ciEnv::current()->Object_klass()) ) { 108.46 // that is, my array type is a subtype of 'tp' klass 108.47 return make( ptr, _ary, _klass, _klass_is_exact, offset, instance_id );
109.1 --- a/src/share/vm/prims/jvmtiRedefineClasses.cpp Fri Feb 27 08:34:19 2009 -0800 109.2 +++ b/src/share/vm/prims/jvmtiRedefineClasses.cpp Fri Feb 27 13:27:09 2009 -0800 109.3 @@ -1349,39 +1349,39 @@ 109.4 109.5 // rewrite constant pool references in the methods: 109.6 if (!rewrite_cp_refs_in_methods(scratch_class, THREAD)) { 109.7 - // propogate failure back to caller 109.8 + // propagate failure back to caller 109.9 return false; 109.10 } 109.11 109.12 // rewrite constant pool references in the class_annotations: 109.13 if (!rewrite_cp_refs_in_class_annotations(scratch_class, THREAD)) { 109.14 - // propogate failure back to caller 109.15 + // propagate failure back to caller 109.16 return false; 109.17 } 109.18 109.19 // rewrite constant pool references in the fields_annotations: 109.20 if (!rewrite_cp_refs_in_fields_annotations(scratch_class, THREAD)) { 109.21 - // propogate failure back to caller 109.22 + // propagate failure back to caller 109.23 return false; 109.24 } 109.25 109.26 // rewrite constant pool references in the methods_annotations: 109.27 if (!rewrite_cp_refs_in_methods_annotations(scratch_class, THREAD)) { 109.28 - // propogate failure back to caller 109.29 + // propagate failure back to caller 109.30 return false; 109.31 } 109.32 109.33 // rewrite constant pool references in the methods_parameter_annotations: 109.34 if (!rewrite_cp_refs_in_methods_parameter_annotations(scratch_class, 109.35 THREAD)) { 109.36 - // propogate failure back to caller 109.37 + // propagate failure back to caller 109.38 return false; 109.39 } 109.40 109.41 // rewrite constant pool references in the methods_default_annotations: 109.42 if (!rewrite_cp_refs_in_methods_default_annotations(scratch_class, 109.43 THREAD)) { 109.44 - // propogate failure back to caller 109.45 + // propagate failure back to caller 109.46 return false; 109.47 } 109.48 109.49 @@ -1600,7 +1600,7 @@ 109.50 byte_i_ref, THREAD)) { 109.51 RC_TRACE_WITH_THREAD(0x02000000, THREAD, 109.52 ("bad annotation_struct at %d", calc_num_annotations)); 109.53 - // propogate failure back to caller 109.54 + // propagate failure back to caller 109.55 return false; 109.56 } 109.57 } 109.58 @@ -1666,7 +1666,7 @@ 109.59 byte_i_ref, THREAD)) { 109.60 RC_TRACE_WITH_THREAD(0x02000000, THREAD, 109.61 ("bad element_value at %d", calc_num_element_value_pairs)); 109.62 - // propogate failure back to caller 109.63 + // propagate failure back to caller 109.64 return false; 109.65 } 109.66 } // end for each component 109.67 @@ -1815,7 +1815,7 @@ 109.68 // field. This is a nested annotation. 109.69 if (!rewrite_cp_refs_in_annotation_struct(annotations_typeArray, 109.70 byte_i_ref, THREAD)) { 109.71 - // propogate failure back to caller 109.72 + // propagate failure back to caller 109.73 return false; 109.74 } 109.75 break; 109.76 @@ -1842,7 +1842,7 @@ 109.77 annotations_typeArray, byte_i_ref, THREAD)) { 109.78 RC_TRACE_WITH_THREAD(0x02000000, THREAD, 109.79 ("bad nested element_value at %d", calc_num_values)); 109.80 - // propogate failure back to caller 109.81 + // propagate failure back to caller 109.82 return false; 109.83 } 109.84 } 109.85 @@ -1886,7 +1886,7 @@ 109.86 THREAD)) { 109.87 RC_TRACE_WITH_THREAD(0x02000000, THREAD, 109.88 ("bad field_annotations at %d", i)); 109.89 - // propogate failure back to caller 109.90 + // propagate failure back to caller 109.91 return false; 109.92 } 109.93 } 109.94 @@ -1923,7 +1923,7 @@ 109.95 THREAD)) { 109.96 RC_TRACE_WITH_THREAD(0x02000000, THREAD, 109.97 ("bad method_annotations at %d", i)); 109.98 - // propogate failure back to caller 109.99 + // propagate failure back to caller 109.100 return false; 109.101 } 109.102 } 109.103 @@ -1991,7 +1991,7 @@ 109.104 method_parameter_annotations, byte_i, THREAD)) { 109.105 RC_TRACE_WITH_THREAD(0x02000000, THREAD, 109.106 ("bad method_parameter_annotations at %d", calc_num_parameters)); 109.107 - // propogate failure back to caller 109.108 + // propagate failure back to caller 109.109 return false; 109.110 } 109.111 } 109.112 @@ -2041,7 +2041,7 @@ 109.113 method_default_annotations, byte_i, THREAD)) { 109.114 RC_TRACE_WITH_THREAD(0x02000000, THREAD, 109.115 ("bad default element_value at %d", i)); 109.116 - // propogate failure back to caller 109.117 + // propagate failure back to caller 109.118 return false; 109.119 } 109.120 }
110.1 --- a/src/share/vm/runtime/extendedPC.hpp Fri Feb 27 08:34:19 2009 -0800 110.2 +++ b/src/share/vm/runtime/extendedPC.hpp Fri Feb 27 13:27:09 2009 -0800 110.3 @@ -23,7 +23,7 @@ 110.4 */ 110.5 110.6 // An ExtendedPC contains the _pc from a signal handler in a platform 110.7 -// independant way. 110.8 +// independent way. 110.9 110.10 class ExtendedPC VALUE_OBJ_CLASS_SPEC { 110.11 private:
111.1 --- a/src/share/vm/runtime/fprofiler.cpp Fri Feb 27 08:34:19 2009 -0800 111.2 +++ b/src/share/vm/runtime/fprofiler.cpp Fri Feb 27 13:27:09 2009 -0800 111.3 @@ -988,7 +988,7 @@ 111.4 111.5 111.6 void ThreadProfiler::record_tick_for_running_frame(JavaThread* thread, frame fr) { 111.7 - // The tick happend in real code -> non VM code 111.8 + // The tick happened in real code -> non VM code 111.9 if (fr.is_interpreted_frame()) { 111.10 interval_data_ref()->inc_interpreted(); 111.11 record_interpreted_tick(thread, fr, tp_code, FlatProfiler::bytecode_ticks); 111.12 @@ -1019,7 +1019,7 @@ 111.13 } 111.14 111.15 void ThreadProfiler::record_tick_for_calling_frame(JavaThread* thread, frame fr) { 111.16 - // The tick happend in VM code 111.17 + // The tick happened in VM code 111.18 interval_data_ref()->inc_native(); 111.19 if (fr.is_interpreted_frame()) { 111.20 record_interpreted_tick(thread, fr, tp_native, FlatProfiler::bytecode_ticks_stub);
112.1 --- a/src/share/vm/runtime/frame.cpp Fri Feb 27 08:34:19 2009 -0800 112.2 +++ b/src/share/vm/runtime/frame.cpp Fri Feb 27 13:27:09 2009 -0800 112.3 @@ -930,7 +930,7 @@ 112.4 // => process callee's arguments 112.5 // 112.6 // Note: The expression stack can be empty if an exception 112.7 - // occured during method resolution/execution. In all 112.8 + // occurred during method resolution/execution. In all 112.9 // cases we empty the expression stack completely be- 112.10 // fore handling the exception (the exception handling 112.11 // code in the interpreter calls a blocking runtime
113.1 --- a/src/share/vm/runtime/frame.inline.hpp Fri Feb 27 08:34:19 2009 -0800 113.2 +++ b/src/share/vm/runtime/frame.inline.hpp Fri Feb 27 13:27:09 2009 -0800 113.3 @@ -22,7 +22,7 @@ 113.4 * 113.5 */ 113.6 113.7 -// This file holds platform-independant bodies of inline functions for frames. 113.8 +// This file holds platform-independent bodies of inline functions for frames. 113.9 113.10 // Note: The bcx usually contains the bcp; however during GC it contains the bci 113.11 // (changed by gc_prologue() and gc_epilogue()) to be methodOop position
114.1 --- a/src/share/vm/runtime/mutex.hpp Fri Feb 27 08:34:19 2009 -0800 114.2 +++ b/src/share/vm/runtime/mutex.hpp Fri Feb 27 13:27:09 2009 -0800 114.3 @@ -82,7 +82,7 @@ 114.4 // *in that order*. If their implementations change such that these 114.5 // assumptions are violated, a whole lot of code will break. 114.6 114.7 -// The default length of monitor name is choosen to be 64 to avoid false sharing. 114.8 +// The default length of monitor name is chosen to be 64 to avoid false sharing. 114.9 static const int MONITOR_NAME_LEN = 64; 114.10 114.11 class Monitor : public CHeapObj {
115.1 --- a/src/share/vm/runtime/orderAccess.hpp Fri Feb 27 08:34:19 2009 -0800 115.2 +++ b/src/share/vm/runtime/orderAccess.hpp Fri Feb 27 13:27:09 2009 -0800 115.3 @@ -31,7 +31,7 @@ 115.4 // at runtime. 115.5 // 115.6 // In the following, the terms 'previous', 'subsequent', 'before', 115.7 -// 'after', 'preceeding' and 'succeeding' refer to program order. The 115.8 +// 'after', 'preceding' and 'succeeding' refer to program order. The 115.9 // terms 'down' and 'below' refer to forward load or store motion 115.10 // relative to program order, while 'up' and 'above' refer to backward 115.11 // motion.
116.1 --- a/src/share/vm/runtime/os.cpp Fri Feb 27 08:34:19 2009 -0800 116.2 +++ b/src/share/vm/runtime/os.cpp Fri Feb 27 13:27:09 2009 -0800 116.3 @@ -943,7 +943,7 @@ 116.4 assert(StackRedPages > 0 && StackYellowPages > 0,"Sanity check"); 116.5 address sp = current_stack_pointer(); 116.6 // Check if we have StackShadowPages above the yellow zone. This parameter 116.7 - // is dependant on the depth of the maximum VM call stack possible from 116.8 + // is dependent on the depth of the maximum VM call stack possible from 116.9 // the handler for stack overflow. 'instanceof' in the stack overflow 116.10 // handler or a println uses at least 8k stack of VM and native code 116.11 // respectively.
117.1 --- a/src/share/vm/runtime/safepoint.cpp Fri Feb 27 08:34:19 2009 -0800 117.2 +++ b/src/share/vm/runtime/safepoint.cpp Fri Feb 27 13:27:09 2009 -0800 117.3 @@ -369,7 +369,7 @@ 117.4 117.5 // Start suspended threads 117.6 for(JavaThread *current = Threads::first(); current; current = current->next()) { 117.7 - // A problem occuring on Solaris is when attempting to restart threads 117.8 + // A problem occurring on Solaris is when attempting to restart threads 117.9 // the first #cpus - 1 go well, but then the VMThread is preempted when we get 117.10 // to the next one (since it has been running the longest). We then have 117.11 // to wait for a cpu to become available before we can continue restarting
118.1 --- a/src/share/vm/runtime/signature.hpp Fri Feb 27 08:34:19 2009 -0800 118.2 +++ b/src/share/vm/runtime/signature.hpp Fri Feb 27 13:27:09 2009 -0800 118.3 @@ -266,7 +266,7 @@ 118.4 class NativeSignatureIterator: public SignatureIterator { 118.5 private: 118.6 methodHandle _method; 118.7 -// We need seperate JNI and Java offset values because in 64 bit mode, 118.8 +// We need separate JNI and Java offset values because in 64 bit mode, 118.9 // the argument offsets are not in sync with the Java stack. 118.10 // For example a long takes up 1 "C" stack entry but 2 Java stack entries. 118.11 int _offset; // The java stack offset
119.1 --- a/src/share/vm/runtime/threadCritical.hpp Fri Feb 27 08:34:19 2009 -0800 119.2 +++ b/src/share/vm/runtime/threadCritical.hpp Fri Feb 27 13:27:09 2009 -0800 119.3 @@ -29,7 +29,7 @@ 119.4 // 119.5 // Due to race conditions during vm exit, some of the os level 119.6 // synchronization primitives may not be deallocated at exit. It 119.7 -// is a good plan to implement the platform dependant sections of 119.8 +// is a good plan to implement the platform dependent sections of 119.9 // code with resources that are recoverable during process 119.10 // cleanup by the os. Calling the initialize method before use 119.11 // is also problematic, it is best to use preinitialized primitives
120.1 --- a/src/share/vm/utilities/globalDefinitions.hpp Fri Feb 27 08:34:19 2009 -0800 120.2 +++ b/src/share/vm/utilities/globalDefinitions.hpp Fri Feb 27 13:27:09 2009 -0800 120.3 @@ -881,7 +881,7 @@ 120.4 i++; p *= 2; 120.5 } 120.6 // p = 2^(i+1) && x < p (i.e., 2^i <= x < 2^(i+1)) 120.7 - // (if p = 0 then overflow occured and i = 31) 120.8 + // (if p = 0 then overflow occurred and i = 31) 120.9 return i; 120.10 } 120.11 120.12 @@ -895,7 +895,7 @@ 120.13 i++; p *= 2; 120.14 } 120.15 // p = 2^(i+1) && x < p (i.e., 2^i <= x < 2^(i+1)) 120.16 - // (if p = 0 then overflow occured and i = 63) 120.17 + // (if p = 0 then overflow occurred and i = 63) 120.18 return i; 120.19 } 120.20