1.1 --- a/src/cpu/x86/vm/c1_FrameMap_x86.cpp Tue Nov 23 13:22:55 2010 -0800 1.2 +++ b/src/cpu/x86/vm/c1_FrameMap_x86.cpp Tue Nov 30 23:23:40 2010 -0800 1.3 @@ -158,9 +158,11 @@ 1.4 map_register( 6, r8); r8_opr = LIR_OprFact::single_cpu(6); 1.5 map_register( 7, r9); r9_opr = LIR_OprFact::single_cpu(7); 1.6 map_register( 8, r11); r11_opr = LIR_OprFact::single_cpu(8); 1.7 - map_register( 9, r12); r12_opr = LIR_OprFact::single_cpu(9); 1.8 - map_register(10, r13); r13_opr = LIR_OprFact::single_cpu(10); 1.9 - map_register(11, r14); r14_opr = LIR_OprFact::single_cpu(11); 1.10 + map_register( 9, r13); r13_opr = LIR_OprFact::single_cpu(9); 1.11 + map_register(10, r14); r14_opr = LIR_OprFact::single_cpu(10); 1.12 + // r12 is allocated conditionally. With compressed oops it holds 1.13 + // the heapbase value and is not visible to the allocator. 1.14 + map_register(11, r12); r12_opr = LIR_OprFact::single_cpu(11); 1.15 // The unallocatable registers are at the end 1.16 map_register(12, r10); r10_opr = LIR_OprFact::single_cpu(12); 1.17 map_register(13, r15); r15_opr = LIR_OprFact::single_cpu(13); 1.18 @@ -191,9 +193,9 @@ 1.19 _caller_save_cpu_regs[6] = r8_opr; 1.20 _caller_save_cpu_regs[7] = r9_opr; 1.21 _caller_save_cpu_regs[8] = r11_opr; 1.22 - _caller_save_cpu_regs[9] = r12_opr; 1.23 - _caller_save_cpu_regs[10] = r13_opr; 1.24 - _caller_save_cpu_regs[11] = r14_opr; 1.25 + _caller_save_cpu_regs[9] = r13_opr; 1.26 + _caller_save_cpu_regs[10] = r14_opr; 1.27 + _caller_save_cpu_regs[11] = r12_opr; 1.28 #endif // _LP64 1.29 1.30