Tue, 30 Nov 2010 23:23:40 -0800
6985015: C1 needs to support compressed oops
Summary: This change implements compressed oops for C1 for x64 and sparc. The changes are mostly on the codegen level, with a few exceptions when we do access things outside of the heap that are uncompressed from the IR. Compressed oops are now also enabled with tiered.
Reviewed-by: twisti, kvn, never, phh
1.1 --- a/src/cpu/sparc/vm/assembler_sparc.cpp Tue Nov 23 13:22:55 2010 -0800 1.2 +++ b/src/cpu/sparc/vm/assembler_sparc.cpp Tue Nov 30 23:23:40 2010 -0800 1.3 @@ -909,10 +909,10 @@ 1.4 #if defined(COMPILER2) && !defined(_LP64) 1.5 // Save & restore possible 64-bit Long arguments in G-regs 1.6 sllx(L0,32,G2); // Move old high G1 bits high in G2 1.7 - sllx(G1, 0,G1); // Clear current high G1 bits 1.8 + srl(G1, 0,G1); // Clear current high G1 bits 1.9 or3 (G1,G2,G1); // Recover 64-bit G1 1.10 sllx(L6,32,G2); // Move old high G4 bits high in G2 1.11 - sllx(G4, 0,G4); // Clear current high G4 bits 1.12 + srl(G4, 0,G4); // Clear current high G4 bits 1.13 or3 (G4,G2,G4); // Recover 64-bit G4 1.14 #endif 1.15 restore(O0, 0, G2_thread);
2.1 --- a/src/cpu/sparc/vm/assembler_sparc.hpp Tue Nov 23 13:22:55 2010 -0800 2.2 +++ b/src/cpu/sparc/vm/assembler_sparc.hpp Tue Nov 30 23:23:40 2010 -0800 2.3 @@ -1798,6 +1798,7 @@ 2.4 // branches that use right instruction for v8 vs. v9 2.5 inline void br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none ); 2.6 inline void br( Condition c, bool a, Predict p, Label& L ); 2.7 + 2.8 inline void fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none ); 2.9 inline void fb( Condition c, bool a, Predict p, Label& L ); 2.10
3.1 --- a/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Tue Nov 23 13:22:55 2010 -0800 3.2 +++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp Tue Nov 30 23:23:40 2010 -0800 3.3 @@ -434,7 +434,7 @@ 3.4 3.5 Register pre_val_reg = pre_val()->as_register(); 3.6 3.7 - ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false); 3.8 + ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); 3.9 if (__ is_in_wdisp16_range(_continuation)) { 3.10 __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt, 3.11 pre_val_reg, _continuation);
4.1 --- a/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp Tue Nov 23 13:22:55 2010 -0800 4.2 +++ b/src/cpu/sparc/vm/c1_FrameMap_sparc.hpp Tue Nov 30 23:23:40 2010 -0800 4.3 @@ -155,4 +155,7 @@ 4.4 static bool is_caller_save_register (LIR_Opr reg); 4.5 static bool is_caller_save_register (Register r); 4.6 4.7 + static int nof_caller_save_cpu_regs() { return pd_nof_caller_save_cpu_regs_frame_map; } 4.8 + static int last_cpu_reg() { return pd_last_cpu_reg; } 4.9 + 4.10 #endif // CPU_SPARC_VM_C1_FRAMEMAP_SPARC_HPP
5.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Tue Nov 23 13:22:55 2010 -0800 5.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp Tue Nov 30 23:23:40 2010 -0800 5.3 @@ -100,6 +100,11 @@ 5.4 return false; 5.5 } 5.6 5.7 + if (UseCompressedOops) { 5.8 + if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false; 5.9 + if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false; 5.10 + } 5.11 + 5.12 if (dst->is_register()) { 5.13 if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) { 5.14 return !PatchALot; 5.15 @@ -253,7 +258,7 @@ 5.16 int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position 5.17 int count_offset = java_lang_String:: count_offset_in_bytes(); 5.18 5.19 - __ ld_ptr(str0, value_offset, tmp0); 5.20 + __ load_heap_oop(str0, value_offset, tmp0); 5.21 __ ld(str0, offset_offset, tmp2); 5.22 __ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0); 5.23 __ ld(str0, count_offset, str0); 5.24 @@ -262,7 +267,7 @@ 5.25 // str1 may be null 5.26 add_debug_info_for_null_check_here(info); 5.27 5.28 - __ ld_ptr(str1, value_offset, tmp1); 5.29 + __ load_heap_oop(str1, value_offset, tmp1); 5.30 __ add(tmp0, tmp2, tmp0); 5.31 5.32 __ ld(str1, offset_offset, tmp2); 5.33 @@ -766,7 +771,7 @@ 5.34 5.35 void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) { 5.36 add_debug_info_for_null_check_here(op->info()); 5.37 - __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch); 5.38 + __ load_klass(O0, G3_scratch); 5.39 if (__ is_simm13(op->vtable_offset())) { 5.40 __ ld_ptr(G3_scratch, op->vtable_offset(), G5_method); 5.41 } else { 5.42 @@ -780,138 +785,17 @@ 5.43 // the peephole pass fills the delay slot 5.44 } 5.45 5.46 - 5.47 -// load with 32-bit displacement 5.48 -int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) { 5.49 - int load_offset = code_offset(); 5.50 - if (Assembler::is_simm13(disp)) { 5.51 - if (info != NULL) add_debug_info_for_null_check_here(info); 5.52 - switch(ld_type) { 5.53 - case T_BOOLEAN: // fall through 5.54 - case T_BYTE : __ ldsb(s, disp, d); break; 5.55 - case T_CHAR : __ lduh(s, disp, d); break; 5.56 - case T_SHORT : __ ldsh(s, disp, d); break; 5.57 - case T_INT : __ ld(s, disp, d); break; 5.58 - case T_ADDRESS:// fall through 5.59 - case T_ARRAY : // fall through 5.60 - case T_OBJECT: __ ld_ptr(s, disp, d); break; 5.61 - default : ShouldNotReachHere(); 5.62 - } 5.63 - } else { 5.64 - __ set(disp, O7); 5.65 - if (info != NULL) add_debug_info_for_null_check_here(info); 5.66 - load_offset = code_offset(); 5.67 - switch(ld_type) { 5.68 - case T_BOOLEAN: // fall through 5.69 - case T_BYTE : __ ldsb(s, O7, d); break; 5.70 - case T_CHAR : __ lduh(s, O7, d); break; 5.71 - case T_SHORT : __ ldsh(s, O7, d); break; 5.72 - case T_INT : __ ld(s, O7, d); break; 5.73 - case T_ADDRESS:// fall through 5.74 - case T_ARRAY : // fall through 5.75 - case T_OBJECT: __ ld_ptr(s, O7, d); break; 5.76 - default : ShouldNotReachHere(); 5.77 - } 5.78 - } 5.79 - if (ld_type == T_ARRAY || ld_type == T_OBJECT) __ verify_oop(d); 5.80 - return load_offset; 5.81 -} 5.82 - 5.83 - 5.84 -// store with 32-bit displacement 5.85 -void LIR_Assembler::store(Register value, Register base, int offset, BasicType type, CodeEmitInfo *info) { 5.86 - if (Assembler::is_simm13(offset)) { 5.87 - if (info != NULL) add_debug_info_for_null_check_here(info); 5.88 - switch (type) { 5.89 - case T_BOOLEAN: // fall through 5.90 - case T_BYTE : __ stb(value, base, offset); break; 5.91 - case T_CHAR : __ sth(value, base, offset); break; 5.92 - case T_SHORT : __ sth(value, base, offset); break; 5.93 - case T_INT : __ stw(value, base, offset); break; 5.94 - case T_ADDRESS:// fall through 5.95 - case T_ARRAY : // fall through 5.96 - case T_OBJECT: __ st_ptr(value, base, offset); break; 5.97 - default : ShouldNotReachHere(); 5.98 - } 5.99 - } else { 5.100 - __ set(offset, O7); 5.101 - if (info != NULL) add_debug_info_for_null_check_here(info); 5.102 - switch (type) { 5.103 - case T_BOOLEAN: // fall through 5.104 - case T_BYTE : __ stb(value, base, O7); break; 5.105 - case T_CHAR : __ sth(value, base, O7); break; 5.106 - case T_SHORT : __ sth(value, base, O7); break; 5.107 - case T_INT : __ stw(value, base, O7); break; 5.108 - case T_ADDRESS:// fall through 5.109 - case T_ARRAY : //fall through 5.110 - case T_OBJECT: __ st_ptr(value, base, O7); break; 5.111 - default : ShouldNotReachHere(); 5.112 - } 5.113 - } 5.114 - // Note: Do the store before verification as the code might be patched! 5.115 - if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(value); 5.116 -} 5.117 - 5.118 - 5.119 -// load float with 32-bit displacement 5.120 -void LIR_Assembler::load(Register s, int disp, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) { 5.121 - FloatRegisterImpl::Width w; 5.122 - switch(ld_type) { 5.123 - case T_FLOAT : w = FloatRegisterImpl::S; break; 5.124 - case T_DOUBLE: w = FloatRegisterImpl::D; break; 5.125 - default : ShouldNotReachHere(); 5.126 - } 5.127 - 5.128 - if (Assembler::is_simm13(disp)) { 5.129 - if (info != NULL) add_debug_info_for_null_check_here(info); 5.130 - if (disp % BytesPerLong != 0 && w == FloatRegisterImpl::D) { 5.131 - __ ldf(FloatRegisterImpl::S, s, disp + BytesPerWord, d->successor()); 5.132 - __ ldf(FloatRegisterImpl::S, s, disp , d); 5.133 - } else { 5.134 - __ ldf(w, s, disp, d); 5.135 - } 5.136 - } else { 5.137 - __ set(disp, O7); 5.138 - if (info != NULL) add_debug_info_for_null_check_here(info); 5.139 - __ ldf(w, s, O7, d); 5.140 - } 5.141 -} 5.142 - 5.143 - 5.144 -// store float with 32-bit displacement 5.145 -void LIR_Assembler::store(FloatRegister value, Register base, int offset, BasicType type, CodeEmitInfo *info) { 5.146 - FloatRegisterImpl::Width w; 5.147 - switch(type) { 5.148 - case T_FLOAT : w = FloatRegisterImpl::S; break; 5.149 - case T_DOUBLE: w = FloatRegisterImpl::D; break; 5.150 - default : ShouldNotReachHere(); 5.151 - } 5.152 - 5.153 - if (Assembler::is_simm13(offset)) { 5.154 - if (info != NULL) add_debug_info_for_null_check_here(info); 5.155 - if (w == FloatRegisterImpl::D && offset % BytesPerLong != 0) { 5.156 - __ stf(FloatRegisterImpl::S, value->successor(), base, offset + BytesPerWord); 5.157 - __ stf(FloatRegisterImpl::S, value , base, offset); 5.158 - } else { 5.159 - __ stf(w, value, base, offset); 5.160 - } 5.161 - } else { 5.162 - __ set(offset, O7); 5.163 - if (info != NULL) add_debug_info_for_null_check_here(info); 5.164 - __ stf(w, value, O7, base); 5.165 - } 5.166 -} 5.167 - 5.168 - 5.169 -int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned) { 5.170 +int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) { 5.171 int store_offset; 5.172 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 5.173 assert(!unaligned, "can't handle this"); 5.174 // for offsets larger than a simm13 we setup the offset in O7 5.175 __ set(offset, O7); 5.176 - store_offset = store(from_reg, base, O7, type); 5.177 + store_offset = store(from_reg, base, O7, type, wide); 5.178 } else { 5.179 - if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register()); 5.180 + if (type == T_ARRAY || type == T_OBJECT) { 5.181 + __ verify_oop(from_reg->as_register()); 5.182 + } 5.183 store_offset = code_offset(); 5.184 switch (type) { 5.185 case T_BOOLEAN: // fall through 5.186 @@ -934,9 +818,22 @@ 5.187 __ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes); 5.188 #endif 5.189 break; 5.190 - case T_ADDRESS:// fall through 5.191 + case T_ADDRESS: 5.192 + __ st_ptr(from_reg->as_register(), base, offset); 5.193 + break; 5.194 case T_ARRAY : // fall through 5.195 - case T_OBJECT: __ st_ptr(from_reg->as_register(), base, offset); break; 5.196 + case T_OBJECT: 5.197 + { 5.198 + if (UseCompressedOops && !wide) { 5.199 + __ encode_heap_oop(from_reg->as_register(), G3_scratch); 5.200 + store_offset = code_offset(); 5.201 + __ stw(G3_scratch, base, offset); 5.202 + } else { 5.203 + __ st_ptr(from_reg->as_register(), base, offset); 5.204 + } 5.205 + break; 5.206 + } 5.207 + 5.208 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break; 5.209 case T_DOUBLE: 5.210 { 5.211 @@ -958,8 +855,10 @@ 5.212 } 5.213 5.214 5.215 -int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type) { 5.216 - if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register()); 5.217 +int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) { 5.218 + if (type == T_ARRAY || type == T_OBJECT) { 5.219 + __ verify_oop(from_reg->as_register()); 5.220 + } 5.221 int store_offset = code_offset(); 5.222 switch (type) { 5.223 case T_BOOLEAN: // fall through 5.224 @@ -975,9 +874,21 @@ 5.225 __ std(from_reg->as_register_hi(), base, disp); 5.226 #endif 5.227 break; 5.228 - case T_ADDRESS:// fall through 5.229 + case T_ADDRESS: 5.230 + __ st_ptr(from_reg->as_register(), base, disp); 5.231 + break; 5.232 case T_ARRAY : // fall through 5.233 - case T_OBJECT: __ st_ptr(from_reg->as_register(), base, disp); break; 5.234 + case T_OBJECT: 5.235 + { 5.236 + if (UseCompressedOops && !wide) { 5.237 + __ encode_heap_oop(from_reg->as_register(), G3_scratch); 5.238 + store_offset = code_offset(); 5.239 + __ stw(G3_scratch, base, disp); 5.240 + } else { 5.241 + __ st_ptr(from_reg->as_register(), base, disp); 5.242 + } 5.243 + break; 5.244 + } 5.245 case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break; 5.246 case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break; 5.247 default : ShouldNotReachHere(); 5.248 @@ -986,14 +897,14 @@ 5.249 } 5.250 5.251 5.252 -int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool unaligned) { 5.253 +int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) { 5.254 int load_offset; 5.255 if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) { 5.256 assert(base != O7, "destroying register"); 5.257 assert(!unaligned, "can't handle this"); 5.258 // for offsets larger than a simm13 we setup the offset in O7 5.259 __ set(offset, O7); 5.260 - load_offset = load(base, O7, to_reg, type); 5.261 + load_offset = load(base, O7, to_reg, type, wide); 5.262 } else { 5.263 load_offset = code_offset(); 5.264 switch(type) { 5.265 @@ -1030,9 +941,18 @@ 5.266 #endif 5.267 } 5.268 break; 5.269 - case T_ADDRESS:// fall through 5.270 + case T_ADDRESS: __ ld_ptr(base, offset, to_reg->as_register()); break; 5.271 case T_ARRAY : // fall through 5.272 - case T_OBJECT: __ ld_ptr(base, offset, to_reg->as_register()); break; 5.273 + case T_OBJECT: 5.274 + { 5.275 + if (UseCompressedOops && !wide) { 5.276 + __ lduw(base, offset, to_reg->as_register()); 5.277 + __ decode_heap_oop(to_reg->as_register()); 5.278 + } else { 5.279 + __ ld_ptr(base, offset, to_reg->as_register()); 5.280 + } 5.281 + break; 5.282 + } 5.283 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break; 5.284 case T_DOUBLE: 5.285 { 5.286 @@ -1048,23 +968,34 @@ 5.287 } 5.288 default : ShouldNotReachHere(); 5.289 } 5.290 - if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register()); 5.291 + if (type == T_ARRAY || type == T_OBJECT) { 5.292 + __ verify_oop(to_reg->as_register()); 5.293 + } 5.294 } 5.295 return load_offset; 5.296 } 5.297 5.298 5.299 -int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type) { 5.300 +int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) { 5.301 int load_offset = code_offset(); 5.302 switch(type) { 5.303 case T_BOOLEAN: // fall through 5.304 - case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break; 5.305 - case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break; 5.306 - case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break; 5.307 - case T_INT : __ ld(base, disp, to_reg->as_register()); break; 5.308 - case T_ADDRESS:// fall through 5.309 + case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break; 5.310 + case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break; 5.311 + case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break; 5.312 + case T_INT : __ ld(base, disp, to_reg->as_register()); break; 5.313 + case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break; 5.314 case T_ARRAY : // fall through 5.315 - case T_OBJECT: __ ld_ptr(base, disp, to_reg->as_register()); break; 5.316 + case T_OBJECT: 5.317 + { 5.318 + if (UseCompressedOops && !wide) { 5.319 + __ lduw(base, disp, to_reg->as_register()); 5.320 + __ decode_heap_oop(to_reg->as_register()); 5.321 + } else { 5.322 + __ ld_ptr(base, disp, to_reg->as_register()); 5.323 + } 5.324 + break; 5.325 + } 5.326 case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break; 5.327 case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break; 5.328 case T_LONG : 5.329 @@ -1078,60 +1009,28 @@ 5.330 break; 5.331 default : ShouldNotReachHere(); 5.332 } 5.333 - if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register()); 5.334 + if (type == T_ARRAY || type == T_OBJECT) { 5.335 + __ verify_oop(to_reg->as_register()); 5.336 + } 5.337 return load_offset; 5.338 } 5.339 5.340 - 5.341 -// load/store with an Address 5.342 -void LIR_Assembler::load(const Address& a, Register d, BasicType ld_type, CodeEmitInfo *info, int offset) { 5.343 - load(a.base(), a.disp() + offset, d, ld_type, info); 5.344 -} 5.345 - 5.346 - 5.347 -void LIR_Assembler::store(Register value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) { 5.348 - store(value, dest.base(), dest.disp() + offset, type, info); 5.349 -} 5.350 - 5.351 - 5.352 -// loadf/storef with an Address 5.353 -void LIR_Assembler::load(const Address& a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info, int offset) { 5.354 - load(a.base(), a.disp() + offset, d, ld_type, info); 5.355 -} 5.356 - 5.357 - 5.358 -void LIR_Assembler::store(FloatRegister value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) { 5.359 - store(value, dest.base(), dest.disp() + offset, type, info); 5.360 -} 5.361 - 5.362 - 5.363 -// load/store with an Address 5.364 -void LIR_Assembler::load(LIR_Address* a, Register d, BasicType ld_type, CodeEmitInfo *info) { 5.365 - load(as_Address(a), d, ld_type, info); 5.366 -} 5.367 - 5.368 - 5.369 -void LIR_Assembler::store(Register value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) { 5.370 - store(value, as_Address(dest), type, info); 5.371 -} 5.372 - 5.373 - 5.374 -// loadf/storef with an Address 5.375 -void LIR_Assembler::load(LIR_Address* a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) { 5.376 - load(as_Address(a), d, ld_type, info); 5.377 -} 5.378 - 5.379 - 5.380 -void LIR_Assembler::store(FloatRegister value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) { 5.381 - store(value, as_Address(dest), type, info); 5.382 -} 5.383 - 5.384 - 5.385 void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) { 5.386 LIR_Const* c = src->as_constant_ptr(); 5.387 switch (c->type()) { 5.388 case T_INT: 5.389 - case T_FLOAT: 5.390 + case T_FLOAT: { 5.391 + Register src_reg = O7; 5.392 + int value = c->as_jint_bits(); 5.393 + if (value == 0) { 5.394 + src_reg = G0; 5.395 + } else { 5.396 + __ set(value, O7); 5.397 + } 5.398 + Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 5.399 + __ stw(src_reg, addr.base(), addr.disp()); 5.400 + break; 5.401 + } 5.402 case T_ADDRESS: { 5.403 Register src_reg = O7; 5.404 int value = c->as_jint_bits(); 5.405 @@ -1141,7 +1040,7 @@ 5.406 __ set(value, O7); 5.407 } 5.408 Address addr = frame_map()->address_for_slot(dest->single_stack_ix()); 5.409 - __ stw(src_reg, addr.base(), addr.disp()); 5.410 + __ st_ptr(src_reg, addr.base(), addr.disp()); 5.411 break; 5.412 } 5.413 case T_OBJECT: { 5.414 @@ -1178,14 +1077,12 @@ 5.415 } 5.416 5.417 5.418 -void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) { 5.419 +void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 5.420 LIR_Const* c = src->as_constant_ptr(); 5.421 LIR_Address* addr = dest->as_address_ptr(); 5.422 Register base = addr->base()->as_pointer_register(); 5.423 - 5.424 - if (info != NULL) { 5.425 - add_debug_info_for_null_check_here(info); 5.426 - } 5.427 + int offset = -1; 5.428 + 5.429 switch (c->type()) { 5.430 case T_INT: 5.431 case T_FLOAT: 5.432 @@ -1199,10 +1096,10 @@ 5.433 } 5.434 if (addr->index()->is_valid()) { 5.435 assert(addr->disp() == 0, "must be zero"); 5.436 - store(tmp, base, addr->index()->as_pointer_register(), type); 5.437 + offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 5.438 } else { 5.439 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 5.440 - store(tmp, base, addr->disp(), type); 5.441 + offset = store(tmp, base, addr->disp(), type, wide, false); 5.442 } 5.443 break; 5.444 } 5.445 @@ -1212,21 +1109,21 @@ 5.446 assert(Assembler::is_simm13(addr->disp()) && 5.447 Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses"); 5.448 5.449 - Register tmp = O7; 5.450 + LIR_Opr tmp = FrameMap::O7_opr; 5.451 int value_lo = c->as_jint_lo_bits(); 5.452 if (value_lo == 0) { 5.453 - tmp = G0; 5.454 + tmp = FrameMap::G0_opr; 5.455 } else { 5.456 __ set(value_lo, O7); 5.457 } 5.458 - store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT); 5.459 + offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false); 5.460 int value_hi = c->as_jint_hi_bits(); 5.461 if (value_hi == 0) { 5.462 - tmp = G0; 5.463 + tmp = FrameMap::G0_opr; 5.464 } else { 5.465 __ set(value_hi, O7); 5.466 } 5.467 - store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT); 5.468 + offset = store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false); 5.469 break; 5.470 } 5.471 case T_OBJECT: { 5.472 @@ -1241,10 +1138,10 @@ 5.473 // handle either reg+reg or reg+disp address 5.474 if (addr->index()->is_valid()) { 5.475 assert(addr->disp() == 0, "must be zero"); 5.476 - store(tmp, base, addr->index()->as_pointer_register(), type); 5.477 + offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide); 5.478 } else { 5.479 assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses"); 5.480 - store(tmp, base, addr->disp(), type); 5.481 + offset = store(tmp, base, addr->disp(), type, wide, false); 5.482 } 5.483 5.484 break; 5.485 @@ -1252,6 +1149,10 @@ 5.486 default: 5.487 Unimplemented(); 5.488 } 5.489 + if (info != NULL) { 5.490 + assert(offset != -1, "offset should've been set"); 5.491 + add_debug_info_for_null_check(offset, info); 5.492 + } 5.493 } 5.494 5.495 5.496 @@ -1336,7 +1237,7 @@ 5.497 assert(to_reg->is_single_cpu(), "Must be a cpu register."); 5.498 5.499 __ set(const_addrlit, O7); 5.500 - load(O7, 0, to_reg->as_register(), T_INT); 5.501 + __ ld(O7, 0, to_reg->as_register()); 5.502 } 5.503 } 5.504 break; 5.505 @@ -1429,7 +1330,7 @@ 5.506 5.507 5.508 void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type, 5.509 - LIR_PatchCode patch_code, CodeEmitInfo* info, bool unaligned) { 5.510 + LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) { 5.511 5.512 LIR_Address* addr = src_opr->as_address_ptr(); 5.513 LIR_Opr to_reg = dest; 5.514 @@ -1475,16 +1376,15 @@ 5.515 5.516 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 5.517 if (disp_reg == noreg) { 5.518 - offset = load(src, disp_value, to_reg, type, unaligned); 5.519 + offset = load(src, disp_value, to_reg, type, wide, unaligned); 5.520 } else { 5.521 assert(!unaligned, "can't handle this"); 5.522 - offset = load(src, disp_reg, to_reg, type); 5.523 + offset = load(src, disp_reg, to_reg, type, wide); 5.524 } 5.525 5.526 if (patch != NULL) { 5.527 patching_epilog(patch, patch_code, src, info); 5.528 } 5.529 - 5.530 if (info != NULL) add_debug_info_for_null_check(offset, info); 5.531 } 5.532 5.533 @@ -1518,7 +1418,7 @@ 5.534 } 5.535 5.536 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 5.537 - load(addr.base(), addr.disp(), dest, dest->type(), unaligned); 5.538 + load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned); 5.539 } 5.540 5.541 5.542 @@ -1530,7 +1430,7 @@ 5.543 addr = frame_map()->address_for_slot(dest->double_stack_ix()); 5.544 } 5.545 bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0; 5.546 - store(from_reg, addr.base(), addr.disp(), from_reg->type(), unaligned); 5.547 + store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned); 5.548 } 5.549 5.550 5.551 @@ -1578,7 +1478,7 @@ 5.552 5.553 void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type, 5.554 LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, 5.555 - bool unaligned) { 5.556 + bool wide, bool unaligned) { 5.557 LIR_Address* addr = dest->as_address_ptr(); 5.558 5.559 Register src = addr->base()->as_pointer_register(); 5.560 @@ -1622,10 +1522,10 @@ 5.561 5.562 assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up"); 5.563 if (disp_reg == noreg) { 5.564 - offset = store(from_reg, src, disp_value, type, unaligned); 5.565 + offset = store(from_reg, src, disp_value, type, wide, unaligned); 5.566 } else { 5.567 assert(!unaligned, "can't handle this"); 5.568 - offset = store(from_reg, src, disp_reg, type); 5.569 + offset = store(from_reg, src, disp_reg, type, wide); 5.570 } 5.571 5.572 if (patch != NULL) { 5.573 @@ -2184,13 +2084,13 @@ 5.574 // make sure src and dst are non-null and load array length 5.575 if (flags & LIR_OpArrayCopy::src_null_check) { 5.576 __ tst(src); 5.577 - __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); 5.578 + __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 5.579 __ delayed()->nop(); 5.580 } 5.581 5.582 if (flags & LIR_OpArrayCopy::dst_null_check) { 5.583 __ tst(dst); 5.584 - __ br(Assembler::equal, false, Assembler::pn, *stub->entry()); 5.585 + __ brx(Assembler::equal, false, Assembler::pn, *stub->entry()); 5.586 __ delayed()->nop(); 5.587 } 5.588 5.589 @@ -2232,10 +2132,18 @@ 5.590 } 5.591 5.592 if (flags & LIR_OpArrayCopy::type_check) { 5.593 - __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); 5.594 - __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 5.595 - __ cmp(tmp, tmp2); 5.596 - __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 5.597 + if (UseCompressedOops) { 5.598 + // We don't need decode because we just need to compare 5.599 + __ lduw(src, oopDesc::klass_offset_in_bytes(), tmp); 5.600 + __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 5.601 + __ cmp(tmp, tmp2); 5.602 + __ br(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 5.603 + } else { 5.604 + __ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp); 5.605 + __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 5.606 + __ cmp(tmp, tmp2); 5.607 + __ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry()); 5.608 + } 5.609 __ delayed()->nop(); 5.610 } 5.611 5.612 @@ -2250,20 +2158,44 @@ 5.613 // but not necessarily exactly of type default_type. 5.614 Label known_ok, halt; 5.615 jobject2reg(op->expected_type()->constant_encoding(), tmp); 5.616 - __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 5.617 - if (basic_type != T_OBJECT) { 5.618 - __ cmp(tmp, tmp2); 5.619 - __ br(Assembler::notEqual, false, Assembler::pn, halt); 5.620 - __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2); 5.621 - __ cmp(tmp, tmp2); 5.622 - __ br(Assembler::equal, false, Assembler::pn, known_ok); 5.623 - __ delayed()->nop(); 5.624 + if (UseCompressedOops) { 5.625 + // tmp holds the default type. It currently comes uncompressed after the 5.626 + // load of a constant, so encode it. 5.627 + __ encode_heap_oop(tmp); 5.628 + // load the raw value of the dst klass, since we will be comparing 5.629 + // uncompressed values directly. 5.630 + __ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2); 5.631 + if (basic_type != T_OBJECT) { 5.632 + __ cmp(tmp, tmp2); 5.633 + __ br(Assembler::notEqual, false, Assembler::pn, halt); 5.634 + // load the raw value of the src klass. 5.635 + __ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2); 5.636 + __ cmp(tmp, tmp2); 5.637 + __ br(Assembler::equal, false, Assembler::pn, known_ok); 5.638 + __ delayed()->nop(); 5.639 + } else { 5.640 + __ cmp(tmp, tmp2); 5.641 + __ br(Assembler::equal, false, Assembler::pn, known_ok); 5.642 + __ delayed()->cmp(src, dst); 5.643 + __ brx(Assembler::equal, false, Assembler::pn, known_ok); 5.644 + __ delayed()->nop(); 5.645 + } 5.646 } else { 5.647 - __ cmp(tmp, tmp2); 5.648 - __ br(Assembler::equal, false, Assembler::pn, known_ok); 5.649 - __ delayed()->cmp(src, dst); 5.650 - __ br(Assembler::equal, false, Assembler::pn, known_ok); 5.651 - __ delayed()->nop(); 5.652 + __ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2); 5.653 + if (basic_type != T_OBJECT) { 5.654 + __ cmp(tmp, tmp2); 5.655 + __ brx(Assembler::notEqual, false, Assembler::pn, halt); 5.656 + __ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2); 5.657 + __ cmp(tmp, tmp2); 5.658 + __ brx(Assembler::equal, false, Assembler::pn, known_ok); 5.659 + __ delayed()->nop(); 5.660 + } else { 5.661 + __ cmp(tmp, tmp2); 5.662 + __ brx(Assembler::equal, false, Assembler::pn, known_ok); 5.663 + __ delayed()->cmp(src, dst); 5.664 + __ brx(Assembler::equal, false, Assembler::pn, known_ok); 5.665 + __ delayed()->nop(); 5.666 + } 5.667 } 5.668 __ bind(halt); 5.669 __ stop("incorrect type information in arraycopy"); 5.670 @@ -2471,7 +2403,7 @@ 5.671 Label next_test; 5.672 Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) - 5.673 mdo_offset_bias); 5.674 - load(recv_addr, tmp1, T_OBJECT); 5.675 + __ ld_ptr(recv_addr, tmp1); 5.676 __ br_notnull(tmp1, false, Assembler::pt, next_test); 5.677 __ delayed()->nop(); 5.678 __ st_ptr(recv, recv_addr); 5.679 @@ -2563,7 +2495,7 @@ 5.680 5.681 // get object class 5.682 // not a safepoint as obj null check happens earlier 5.683 - load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL); 5.684 + __ load_klass(obj, klass_RInfo); 5.685 if (op->fast_check()) { 5.686 assert_different_registers(klass_RInfo, k_RInfo); 5.687 __ cmp(k_RInfo, klass_RInfo); 5.688 @@ -2605,7 +2537,7 @@ 5.689 __ set(mdo_offset_bias, tmp1); 5.690 __ add(mdo, tmp1, mdo); 5.691 } 5.692 - load(Address(obj, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); 5.693 + __ load_klass(obj, recv); 5.694 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success); 5.695 // Jump over the failure case 5.696 __ ba(false, *success); 5.697 @@ -2674,11 +2606,12 @@ 5.698 __ br_null(value, false, Assembler::pn, done); 5.699 __ delayed()->nop(); 5.700 } 5.701 - load(array, oopDesc::klass_offset_in_bytes(), k_RInfo, T_OBJECT, op->info_for_exception()); 5.702 - load(value, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL); 5.703 + add_debug_info_for_null_check_here(op->info_for_exception()); 5.704 + __ load_klass(array, k_RInfo); 5.705 + __ load_klass(value, klass_RInfo); 5.706 5.707 // get instance klass 5.708 - load(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc), k_RInfo, T_OBJECT, NULL); 5.709 + __ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)), k_RInfo); 5.710 // perform the fast part of the checking logic 5.711 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL); 5.712 5.713 @@ -2700,7 +2633,7 @@ 5.714 __ set(mdo_offset_bias, tmp1); 5.715 __ add(mdo, tmp1, mdo); 5.716 } 5.717 - load(Address(value, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); 5.718 + __ load_klass(value, recv); 5.719 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done); 5.720 __ ba(false, done); 5.721 __ delayed()->nop(); 5.722 @@ -2781,14 +2714,17 @@ 5.723 Register t2 = op->tmp2()->as_register(); 5.724 __ mov(cmp_value, t1); 5.725 __ mov(new_value, t2); 5.726 -#ifdef _LP64 5.727 if (op->code() == lir_cas_obj) { 5.728 - __ casx(addr, t1, t2); 5.729 - } else 5.730 -#endif 5.731 - { 5.732 + if (UseCompressedOops) { 5.733 + __ encode_heap_oop(t1); 5.734 + __ encode_heap_oop(t2); 5.735 __ cas(addr, t1, t2); 5.736 + } else { 5.737 + __ casx(addr, t1, t2); 5.738 } 5.739 + } else { 5.740 + __ cas(addr, t1, t2); 5.741 + } 5.742 __ cmp(t1, t2); 5.743 } else { 5.744 Unimplemented(); 5.745 @@ -2966,7 +2902,7 @@ 5.746 } 5.747 } 5.748 } else { 5.749 - load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT); 5.750 + __ load_klass(recv, recv); 5.751 Label update_done; 5.752 type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done); 5.753 // Receiver did not match any saved receiver and there is no empty row for it. 5.754 @@ -3160,7 +3096,7 @@ 5.755 } else { 5.756 // use normal move for all other volatiles since they don't need 5.757 // special handling to remain atomic. 5.758 - move_op(src, dest, type, lir_patch_none, info, false, false); 5.759 + move_op(src, dest, type, lir_patch_none, info, false, false, false); 5.760 } 5.761 } 5.762
6.1 --- a/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp Tue Nov 23 13:22:55 2010 -0800 6.2 +++ b/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp Tue Nov 30 23:23:40 2010 -0800 6.3 @@ -40,33 +40,11 @@ 6.4 // and then a load or store is emitted with ([O7] + [d]). 6.5 // 6.6 6.7 - // some load/store variants return the code_offset for proper positioning of debug info for null checks 6.8 + int store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned); 6.9 + int store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide); 6.10 6.11 - // load/store with 32 bit displacement 6.12 - int load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo* info = NULL); 6.13 - void store(Register value, Register base, int offset, BasicType type, CodeEmitInfo *info = NULL); 6.14 - 6.15 - // loadf/storef with 32 bit displacement 6.16 - void load(Register s, int disp, FloatRegister d, BasicType ld_type, CodeEmitInfo* info = NULL); 6.17 - void store(FloatRegister d, Register s1, int disp, BasicType st_type, CodeEmitInfo* info = NULL); 6.18 - 6.19 - // convienence methods for calling load/store with an Address 6.20 - void load(const Address& a, Register d, BasicType ld_type, CodeEmitInfo* info = NULL, int offset = 0); 6.21 - void store(Register d, const Address& a, BasicType st_type, CodeEmitInfo* info = NULL, int offset = 0); 6.22 - void load(const Address& a, FloatRegister d, BasicType ld_type, CodeEmitInfo* info = NULL, int offset = 0); 6.23 - void store(FloatRegister d, const Address& a, BasicType st_type, CodeEmitInfo* info = NULL, int offset = 0); 6.24 - 6.25 - // convienence methods for calling load/store with an LIR_Address 6.26 - void load(LIR_Address* a, Register d, BasicType ld_type, CodeEmitInfo* info = NULL); 6.27 - void store(Register d, LIR_Address* a, BasicType st_type, CodeEmitInfo* info = NULL); 6.28 - void load(LIR_Address* a, FloatRegister d, BasicType ld_type, CodeEmitInfo* info = NULL); 6.29 - void store(FloatRegister d, LIR_Address* a, BasicType st_type, CodeEmitInfo* info = NULL); 6.30 - 6.31 - int store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned = false); 6.32 - int store(LIR_Opr from_reg, Register base, Register disp, BasicType type); 6.33 - 6.34 - int load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool unaligned = false); 6.35 - int load(Register base, Register disp, LIR_Opr to_reg, BasicType type); 6.36 + int load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned); 6.37 + int load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide); 6.38 6.39 void monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no); 6.40
7.1 --- a/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Tue Nov 23 13:22:55 2010 -0800 7.2 +++ b/src/cpu/sparc/vm/c1_MacroAssembler_sparc.cpp Tue Nov 30 23:23:40 2010 -0800 7.3 @@ -40,7 +40,7 @@ 7.4 const Register temp_reg = G3_scratch; 7.5 // Note: needs more testing of out-of-line vs. inline slow case 7.6 verify_oop(receiver); 7.7 - ld_ptr(receiver, oopDesc::klass_offset_in_bytes(), temp_reg); 7.8 + load_klass(receiver, temp_reg); 7.9 cmp(temp_reg, iCache); 7.10 brx(Assembler::equal, true, Assembler::pt, L); 7.11 delayed()->nop(); 7.12 @@ -185,9 +185,19 @@ 7.13 } else { 7.14 set((intx)markOopDesc::prototype(), t1); 7.15 } 7.16 - st_ptr(t1 , obj, oopDesc::mark_offset_in_bytes ()); 7.17 - st_ptr(klass, obj, oopDesc::klass_offset_in_bytes ()); 7.18 - if (len->is_valid()) st(len , obj, arrayOopDesc::length_offset_in_bytes()); 7.19 + st_ptr(t1, obj, oopDesc::mark_offset_in_bytes()); 7.20 + if (UseCompressedOops) { 7.21 + // Save klass 7.22 + mov(klass, t1); 7.23 + encode_heap_oop_not_null(t1); 7.24 + stw(t1, obj, oopDesc::klass_offset_in_bytes()); 7.25 + } else { 7.26 + st_ptr(klass, obj, oopDesc::klass_offset_in_bytes()); 7.27 + } 7.28 + if (len->is_valid()) st(len, obj, arrayOopDesc::length_offset_in_bytes()); 7.29 + else if (UseCompressedOops) { 7.30 + store_klass_gap(G0, obj); 7.31 + } 7.32 } 7.33 7.34 7.35 @@ -235,7 +245,7 @@ 7.36 Register t1, // temp register 7.37 Register t2 // temp register 7.38 ) { 7.39 - const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes(); 7.40 + const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize; 7.41 7.42 initialize_header(obj, klass, noreg, t1, t2); 7.43
8.1 --- a/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Tue Nov 23 13:22:55 2010 -0800 8.2 +++ b/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp Tue Nov 30 23:23:40 2010 -0800 8.3 @@ -612,7 +612,7 @@ 8.4 // load the klass and check the has finalizer flag 8.5 Label register_finalizer; 8.6 Register t = O1; 8.7 - __ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), t); 8.8 + __ load_klass(O0, t); 8.9 __ ld(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), t); 8.10 __ set(JVM_ACC_HAS_FINALIZER, G3); 8.11 __ andcc(G3, t, G0);
9.1 --- a/src/cpu/x86/vm/assembler_x86.hpp Tue Nov 23 13:22:55 2010 -0800 9.2 +++ b/src/cpu/x86/vm/assembler_x86.hpp Tue Nov 30 23:23:40 2010 -0800 9.3 @@ -135,6 +135,7 @@ 9.4 // Using noreg ensures if the dead code is incorrectly live and executed it 9.5 // will cause an assertion failure 9.6 #define rscratch1 noreg 9.7 +#define rscratch2 noreg 9.8 9.9 #endif // _LP64 9.10
10.1 --- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Tue Nov 23 13:22:55 2010 -0800 10.2 +++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp Tue Nov 30 23:23:40 2010 -0800 10.3 @@ -483,7 +483,7 @@ 10.4 10.5 Register pre_val_reg = pre_val()->as_register(); 10.6 10.7 - ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false); 10.8 + ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/); 10.9 10.10 __ cmpptr(pre_val_reg, (int32_t) NULL_WORD); 10.11 __ jcc(Assembler::equal, _continuation);
11.1 --- a/src/cpu/x86/vm/c1_Defs_x86.hpp Tue Nov 23 13:22:55 2010 -0800 11.2 +++ b/src/cpu/x86/vm/c1_Defs_x86.hpp Tue Nov 30 23:23:40 2010 -0800 11.3 @@ -61,8 +61,8 @@ 11.4 pd_nof_xmm_regs_linearscan = pd_nof_xmm_regs_frame_map, // number of registers visible to linear scan 11.5 pd_first_cpu_reg = 0, 11.6 pd_last_cpu_reg = NOT_LP64(5) LP64_ONLY(11), 11.7 - pd_first_byte_reg = 2, 11.8 - pd_last_byte_reg = 5, 11.9 + pd_first_byte_reg = NOT_LP64(2) LP64_ONLY(0), 11.10 + pd_last_byte_reg = NOT_LP64(5) LP64_ONLY(11), 11.11 pd_first_fpu_reg = pd_nof_cpu_regs_frame_map, 11.12 pd_last_fpu_reg = pd_first_fpu_reg + 7, 11.13 pd_first_xmm_reg = pd_nof_cpu_regs_frame_map + pd_nof_fpu_regs_frame_map,
12.1 --- a/src/cpu/x86/vm/c1_FrameMap_x86.cpp Tue Nov 23 13:22:55 2010 -0800 12.2 +++ b/src/cpu/x86/vm/c1_FrameMap_x86.cpp Tue Nov 30 23:23:40 2010 -0800 12.3 @@ -158,9 +158,11 @@ 12.4 map_register( 6, r8); r8_opr = LIR_OprFact::single_cpu(6); 12.5 map_register( 7, r9); r9_opr = LIR_OprFact::single_cpu(7); 12.6 map_register( 8, r11); r11_opr = LIR_OprFact::single_cpu(8); 12.7 - map_register( 9, r12); r12_opr = LIR_OprFact::single_cpu(9); 12.8 - map_register(10, r13); r13_opr = LIR_OprFact::single_cpu(10); 12.9 - map_register(11, r14); r14_opr = LIR_OprFact::single_cpu(11); 12.10 + map_register( 9, r13); r13_opr = LIR_OprFact::single_cpu(9); 12.11 + map_register(10, r14); r14_opr = LIR_OprFact::single_cpu(10); 12.12 + // r12 is allocated conditionally. With compressed oops it holds 12.13 + // the heapbase value and is not visible to the allocator. 12.14 + map_register(11, r12); r12_opr = LIR_OprFact::single_cpu(11); 12.15 // The unallocatable registers are at the end 12.16 map_register(12, r10); r10_opr = LIR_OprFact::single_cpu(12); 12.17 map_register(13, r15); r15_opr = LIR_OprFact::single_cpu(13); 12.18 @@ -191,9 +193,9 @@ 12.19 _caller_save_cpu_regs[6] = r8_opr; 12.20 _caller_save_cpu_regs[7] = r9_opr; 12.21 _caller_save_cpu_regs[8] = r11_opr; 12.22 - _caller_save_cpu_regs[9] = r12_opr; 12.23 - _caller_save_cpu_regs[10] = r13_opr; 12.24 - _caller_save_cpu_regs[11] = r14_opr; 12.25 + _caller_save_cpu_regs[9] = r13_opr; 12.26 + _caller_save_cpu_regs[10] = r14_opr; 12.27 + _caller_save_cpu_regs[11] = r12_opr; 12.28 #endif // _LP64 12.29 12.30
13.1 --- a/src/cpu/x86/vm/c1_FrameMap_x86.hpp Tue Nov 23 13:22:55 2010 -0800 13.2 +++ b/src/cpu/x86/vm/c1_FrameMap_x86.hpp Tue Nov 30 23:23:40 2010 -0800 13.3 @@ -130,4 +130,15 @@ 13.4 return _caller_save_xmm_regs[i]; 13.5 } 13.6 13.7 + static int adjust_reg_range(int range) { 13.8 + // Reduce the number of available regs (to free r12) in case of compressed oops 13.9 + if (UseCompressedOops) return range - 1; 13.10 + return range; 13.11 + } 13.12 + 13.13 + static int nof_caller_save_cpu_regs() { return adjust_reg_range(pd_nof_caller_save_cpu_regs_frame_map); } 13.14 + static int last_cpu_reg() { return adjust_reg_range(pd_last_cpu_reg); } 13.15 + static int last_byte_reg() { return adjust_reg_range(pd_last_byte_reg); } 13.16 + 13.17 #endif // CPU_X86_VM_C1_FRAMEMAP_X86_HPP 13.18 +
14.1 --- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Tue Nov 23 13:22:55 2010 -0800 14.2 +++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp Tue Nov 30 23:23:40 2010 -0800 14.3 @@ -343,8 +343,8 @@ 14.4 Register receiver = FrameMap::receiver_opr->as_register(); 14.5 Register ic_klass = IC_Klass; 14.6 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); 14.7 - 14.8 - if (!VerifyOops) { 14.9 + const bool do_post_padding = VerifyOops || UseCompressedOops; 14.10 + if (!do_post_padding) { 14.11 // insert some nops so that the verified entry point is aligned on CodeEntryAlignment 14.12 while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) { 14.13 __ nop(); 14.14 @@ -352,8 +352,8 @@ 14.15 } 14.16 int offset = __ offset(); 14.17 __ inline_cache_check(receiver, IC_Klass); 14.18 - assert(__ offset() % CodeEntryAlignment == 0 || VerifyOops, "alignment must be correct"); 14.19 - if (VerifyOops) { 14.20 + assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct"); 14.21 + if (do_post_padding) { 14.22 // force alignment after the cache check. 14.23 // It's been verified to be aligned if !VerifyOops 14.24 __ align(CodeEntryAlignment); 14.25 @@ -559,16 +559,16 @@ 14.26 __ movptr (rax, arg1->as_register()); 14.27 14.28 // Get addresses of first characters from both Strings 14.29 - __ movptr (rsi, Address(rax, java_lang_String::value_offset_in_bytes())); 14.30 - __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes())); 14.31 - __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 14.32 + __ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes())); 14.33 + __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes())); 14.34 + __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 14.35 14.36 14.37 // rbx, may be NULL 14.38 add_debug_info_for_null_check_here(info); 14.39 - __ movptr (rdi, Address(rbx, java_lang_String::value_offset_in_bytes())); 14.40 - __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes())); 14.41 - __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 14.42 + __ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes())); 14.43 + __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes())); 14.44 + __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR))); 14.45 14.46 // compute minimum length (in rax) and difference of lengths (on top of stack) 14.47 if (VM_Version::supports_cmov()) { 14.48 @@ -696,10 +696,15 @@ 14.49 LIR_Const* c = src->as_constant_ptr(); 14.50 14.51 switch (c->type()) { 14.52 - case T_INT: 14.53 + case T_INT: { 14.54 + assert(patch_code == lir_patch_none, "no patching handled here"); 14.55 + __ movl(dest->as_register(), c->as_jint()); 14.56 + break; 14.57 + } 14.58 + 14.59 case T_ADDRESS: { 14.60 assert(patch_code == lir_patch_none, "no patching handled here"); 14.61 - __ movl(dest->as_register(), c->as_jint()); 14.62 + __ movptr(dest->as_register(), c->as_jint()); 14.63 break; 14.64 } 14.65 14.66 @@ -780,8 +785,11 @@ 14.67 switch (c->type()) { 14.68 case T_INT: // fall through 14.69 case T_FLOAT: 14.70 + __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 14.71 + break; 14.72 + 14.73 case T_ADDRESS: 14.74 - __ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 14.75 + __ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits()); 14.76 break; 14.77 14.78 case T_OBJECT: 14.79 @@ -806,7 +814,7 @@ 14.80 } 14.81 } 14.82 14.83 -void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) { 14.84 +void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) { 14.85 assert(src->is_constant(), "should not call otherwise"); 14.86 assert(dest->is_address(), "should not call otherwise"); 14.87 LIR_Const* c = src->as_constant_ptr(); 14.88 @@ -816,14 +824,21 @@ 14.89 switch (type) { 14.90 case T_INT: // fall through 14.91 case T_FLOAT: 14.92 + __ movl(as_Address(addr), c->as_jint_bits()); 14.93 + break; 14.94 + 14.95 case T_ADDRESS: 14.96 - __ movl(as_Address(addr), c->as_jint_bits()); 14.97 + __ movptr(as_Address(addr), c->as_jint_bits()); 14.98 break; 14.99 14.100 case T_OBJECT: // fall through 14.101 case T_ARRAY: 14.102 if (c->as_jobject() == NULL) { 14.103 - __ movptr(as_Address(addr), NULL_WORD); 14.104 + if (UseCompressedOops && !wide) { 14.105 + __ movl(as_Address(addr), (int32_t)NULL_WORD); 14.106 + } else { 14.107 + __ movptr(as_Address(addr), NULL_WORD); 14.108 + } 14.109 } else { 14.110 if (is_literal_address(addr)) { 14.111 ShouldNotReachHere(); 14.112 @@ -831,8 +846,14 @@ 14.113 } else { 14.114 #ifdef _LP64 14.115 __ movoop(rscratch1, c->as_jobject()); 14.116 - null_check_here = code_offset(); 14.117 - __ movptr(as_Address_lo(addr), rscratch1); 14.118 + if (UseCompressedOops && !wide) { 14.119 + __ encode_heap_oop(rscratch1); 14.120 + null_check_here = code_offset(); 14.121 + __ movl(as_Address_lo(addr), rscratch1); 14.122 + } else { 14.123 + null_check_here = code_offset(); 14.124 + __ movptr(as_Address_lo(addr), rscratch1); 14.125 + } 14.126 #else 14.127 __ movoop(as_Address(addr), c->as_jobject()); 14.128 #endif 14.129 @@ -1009,22 +1030,28 @@ 14.130 } 14.131 14.132 14.133 -void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool /* unaligned */) { 14.134 +void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) { 14.135 LIR_Address* to_addr = dest->as_address_ptr(); 14.136 PatchingStub* patch = NULL; 14.137 + Register compressed_src = rscratch1; 14.138 14.139 if (type == T_ARRAY || type == T_OBJECT) { 14.140 __ verify_oop(src->as_register()); 14.141 +#ifdef _LP64 14.142 + if (UseCompressedOops && !wide) { 14.143 + __ movptr(compressed_src, src->as_register()); 14.144 + __ encode_heap_oop(compressed_src); 14.145 + } 14.146 +#endif 14.147 } 14.148 + 14.149 if (patch_code != lir_patch_none) { 14.150 patch = new PatchingStub(_masm, PatchingStub::access_field_id); 14.151 Address toa = as_Address(to_addr); 14.152 assert(toa.disp() != 0, "must have"); 14.153 } 14.154 - if (info != NULL) { 14.155 - add_debug_info_for_null_check_here(info); 14.156 - } 14.157 - 14.158 + 14.159 + int null_check_here = code_offset(); 14.160 switch (type) { 14.161 case T_FLOAT: { 14.162 if (src->is_single_xmm()) { 14.163 @@ -1050,13 +1077,17 @@ 14.164 break; 14.165 } 14.166 14.167 - case T_ADDRESS: // fall through 14.168 case T_ARRAY: // fall through 14.169 case T_OBJECT: // fall through 14.170 -#ifdef _LP64 14.171 + if (UseCompressedOops && !wide) { 14.172 + __ movl(as_Address(to_addr), compressed_src); 14.173 + } else { 14.174 + __ movptr(as_Address(to_addr), src->as_register()); 14.175 + } 14.176 + break; 14.177 + case T_ADDRESS: 14.178 __ movptr(as_Address(to_addr), src->as_register()); 14.179 break; 14.180 -#endif // _LP64 14.181 case T_INT: 14.182 __ movl(as_Address(to_addr), src->as_register()); 14.183 break; 14.184 @@ -1113,6 +1144,9 @@ 14.185 default: 14.186 ShouldNotReachHere(); 14.187 } 14.188 + if (info != NULL) { 14.189 + add_debug_info_for_null_check(null_check_here, info); 14.190 + } 14.191 14.192 if (patch_code != lir_patch_none) { 14.193 patching_epilog(patch, patch_code, to_addr->base()->as_register(), info); 14.194 @@ -1196,7 +1230,7 @@ 14.195 } 14.196 14.197 14.198 -void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool /* unaligned */) { 14.199 +void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) { 14.200 assert(src->is_address(), "should not call otherwise"); 14.201 assert(dest->is_register(), "should not call otherwise"); 14.202 14.203 @@ -1250,13 +1284,18 @@ 14.204 break; 14.205 } 14.206 14.207 - case T_ADDRESS: // fall through 14.208 case T_OBJECT: // fall through 14.209 case T_ARRAY: // fall through 14.210 -#ifdef _LP64 14.211 + if (UseCompressedOops && !wide) { 14.212 + __ movl(dest->as_register(), from_addr); 14.213 + } else { 14.214 + __ movptr(dest->as_register(), from_addr); 14.215 + } 14.216 + break; 14.217 + 14.218 + case T_ADDRESS: 14.219 __ movptr(dest->as_register(), from_addr); 14.220 break; 14.221 -#endif // _L64 14.222 case T_INT: 14.223 __ movl(dest->as_register(), from_addr); 14.224 break; 14.225 @@ -1351,6 +1390,11 @@ 14.226 } 14.227 14.228 if (type == T_ARRAY || type == T_OBJECT) { 14.229 +#ifdef _LP64 14.230 + if (UseCompressedOops && !wide) { 14.231 + __ decode_heap_oop(dest->as_register()); 14.232 + } 14.233 +#endif 14.234 __ verify_oop(dest->as_register()); 14.235 } 14.236 } 14.237 @@ -1690,7 +1734,7 @@ 14.238 } else if (obj == klass_RInfo) { 14.239 klass_RInfo = dst; 14.240 } 14.241 - if (k->is_loaded()) { 14.242 + if (k->is_loaded() && !UseCompressedOops) { 14.243 select_different_registers(obj, dst, k_RInfo, klass_RInfo); 14.244 } else { 14.245 Rtmp1 = op->tmp3()->as_register(); 14.246 @@ -1727,21 +1771,26 @@ 14.247 if (op->fast_check()) { 14.248 // get object class 14.249 // not a safepoint as obj null check happens earlier 14.250 - if (k->is_loaded()) { 14.251 #ifdef _LP64 14.252 - __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 14.253 -#else 14.254 - __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); 14.255 -#endif // _LP64 14.256 + if (UseCompressedOops) { 14.257 + __ load_klass(Rtmp1, obj); 14.258 + __ cmpptr(k_RInfo, Rtmp1); 14.259 } else { 14.260 __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 14.261 } 14.262 +#else 14.263 + if (k->is_loaded()) { 14.264 + __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding()); 14.265 + } else { 14.266 + __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 14.267 + } 14.268 +#endif 14.269 __ jcc(Assembler::notEqual, *failure_target); 14.270 // successful cast, fall through to profile or jump 14.271 } else { 14.272 // get object class 14.273 // not a safepoint as obj null check happens earlier 14.274 - __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); 14.275 + __ load_klass(klass_RInfo, obj); 14.276 if (k->is_loaded()) { 14.277 // See if we get an immediate positive hit 14.278 #ifdef _LP64 14.279 @@ -1796,7 +1845,7 @@ 14.280 Register mdo = klass_RInfo, recv = k_RInfo; 14.281 __ bind(profile_cast_success); 14.282 __ movoop(mdo, md->constant_encoding()); 14.283 - __ movptr(recv, Address(obj, oopDesc::klass_offset_in_bytes())); 14.284 + __ load_klass(recv, obj); 14.285 Label update_done; 14.286 type_profile_helper(mdo, md, data, recv, success); 14.287 __ jmp(*success); 14.288 @@ -1860,10 +1909,10 @@ 14.289 } 14.290 14.291 add_debug_info_for_null_check_here(op->info_for_exception()); 14.292 - __ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes())); 14.293 - __ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes())); 14.294 - 14.295 - // get instance klass 14.296 + __ load_klass(k_RInfo, array); 14.297 + __ load_klass(klass_RInfo, value); 14.298 + 14.299 + // get instance klass (it's already uncompressed) 14.300 __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc))); 14.301 // perform the fast part of the checking logic 14.302 __ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL); 14.303 @@ -1882,7 +1931,7 @@ 14.304 Register mdo = klass_RInfo, recv = k_RInfo; 14.305 __ bind(profile_cast_success); 14.306 __ movoop(mdo, md->constant_encoding()); 14.307 - __ movptr(recv, Address(value, oopDesc::klass_offset_in_bytes())); 14.308 + __ load_klass(recv, value); 14.309 Label update_done; 14.310 type_profile_helper(mdo, md, data, recv, &done); 14.311 __ jmpb(done); 14.312 @@ -1946,12 +1995,32 @@ 14.313 assert(cmpval != newval, "cmp and new values must be in different registers"); 14.314 assert(cmpval != addr, "cmp and addr must be in different registers"); 14.315 assert(newval != addr, "new value and addr must be in different registers"); 14.316 - if (os::is_MP()) { 14.317 - __ lock(); 14.318 - } 14.319 + 14.320 if ( op->code() == lir_cas_obj) { 14.321 - __ cmpxchgptr(newval, Address(addr, 0)); 14.322 - } else if (op->code() == lir_cas_int) { 14.323 +#ifdef _LP64 14.324 + if (UseCompressedOops) { 14.325 + __ mov(rscratch1, cmpval); 14.326 + __ encode_heap_oop(cmpval); 14.327 + __ mov(rscratch2, newval); 14.328 + __ encode_heap_oop(rscratch2); 14.329 + if (os::is_MP()) { 14.330 + __ lock(); 14.331 + } 14.332 + __ cmpxchgl(rscratch2, Address(addr, 0)); 14.333 + __ mov(cmpval, rscratch1); 14.334 + } else 14.335 +#endif 14.336 + { 14.337 + if (os::is_MP()) { 14.338 + __ lock(); 14.339 + } 14.340 + __ cmpxchgptr(newval, Address(addr, 0)); 14.341 + } 14.342 + } else { 14.343 + assert(op->code() == lir_cas_int, "lir_cas_int expected"); 14.344 + if (os::is_MP()) { 14.345 + __ lock(); 14.346 + } 14.347 __ cmpxchgl(newval, Address(addr, 0)); 14.348 } 14.349 #ifdef _LP64 14.350 @@ -3193,8 +3262,13 @@ 14.351 } 14.352 14.353 if (flags & LIR_OpArrayCopy::type_check) { 14.354 - __ movptr(tmp, src_klass_addr); 14.355 - __ cmpptr(tmp, dst_klass_addr); 14.356 + if (UseCompressedOops) { 14.357 + __ movl(tmp, src_klass_addr); 14.358 + __ cmpl(tmp, dst_klass_addr); 14.359 + } else { 14.360 + __ movptr(tmp, src_klass_addr); 14.361 + __ cmpptr(tmp, dst_klass_addr); 14.362 + } 14.363 __ jcc(Assembler::notEqual, *stub->entry()); 14.364 } 14.365 14.366 @@ -3209,13 +3283,23 @@ 14.367 // but not necessarily exactly of type default_type. 14.368 Label known_ok, halt; 14.369 __ movoop(tmp, default_type->constant_encoding()); 14.370 +#ifdef _LP64 14.371 + if (UseCompressedOops) { 14.372 + __ encode_heap_oop(tmp); 14.373 + } 14.374 +#endif 14.375 + 14.376 if (basic_type != T_OBJECT) { 14.377 - __ cmpptr(tmp, dst_klass_addr); 14.378 + 14.379 + if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr); 14.380 + else __ cmpptr(tmp, dst_klass_addr); 14.381 __ jcc(Assembler::notEqual, halt); 14.382 - __ cmpptr(tmp, src_klass_addr); 14.383 + if (UseCompressedOops) __ cmpl(tmp, src_klass_addr); 14.384 + else __ cmpptr(tmp, src_klass_addr); 14.385 __ jcc(Assembler::equal, known_ok); 14.386 } else { 14.387 - __ cmpptr(tmp, dst_klass_addr); 14.388 + if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr); 14.389 + else __ cmpptr(tmp, dst_klass_addr); 14.390 __ jcc(Assembler::equal, known_ok); 14.391 __ cmpptr(src, dst); 14.392 __ jcc(Assembler::equal, known_ok); 14.393 @@ -3344,7 +3428,7 @@ 14.394 } 14.395 } 14.396 } else { 14.397 - __ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes())); 14.398 + __ load_klass(recv, recv); 14.399 Label update_done; 14.400 type_profile_helper(mdo, md, data, recv, &update_done); 14.401 // Receiver did not match any saved receiver and there is no empty row for it.
15.1 --- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Tue Nov 23 13:22:55 2010 -0800 15.2 +++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp Tue Nov 30 23:23:40 2010 -0800 15.3 @@ -1151,9 +1151,12 @@ 15.4 stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception); 15.5 } 15.6 LIR_Opr reg = rlock_result(x); 15.7 + LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 15.8 + if (!x->klass()->is_loaded() || UseCompressedOops) { 15.9 + tmp3 = new_register(objectType); 15.10 + } 15.11 __ checkcast(reg, obj.result(), x->klass(), 15.12 - new_register(objectType), new_register(objectType), 15.13 - !x->klass()->is_loaded() ? new_register(objectType) : LIR_OprFact::illegalOpr, 15.14 + new_register(objectType), new_register(objectType), tmp3, 15.15 x->direct_compare(), info_for_exception, patching_info, stub, 15.16 x->profiled_method(), x->profiled_bci()); 15.17 } 15.18 @@ -1170,9 +1173,12 @@ 15.19 patching_info = state_for(x, x->state_before()); 15.20 } 15.21 obj.load_item(); 15.22 + LIR_Opr tmp3 = LIR_OprFact::illegalOpr; 15.23 + if (!x->klass()->is_loaded() || UseCompressedOops) { 15.24 + tmp3 = new_register(objectType); 15.25 + } 15.26 __ instanceof(reg, obj.result(), x->klass(), 15.27 - new_register(objectType), new_register(objectType), 15.28 - !x->klass()->is_loaded() ? new_register(objectType) : LIR_OprFact::illegalOpr, 15.29 + new_register(objectType), new_register(objectType), tmp3, 15.30 x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci()); 15.31 } 15.32
16.1 --- a/src/cpu/x86/vm/c1_LinearScan_x86.hpp Tue Nov 23 13:22:55 2010 -0800 16.2 +++ b/src/cpu/x86/vm/c1_LinearScan_x86.hpp Tue Nov 30 23:23:40 2010 -0800 16.3 @@ -31,18 +31,17 @@ 16.4 assert(FrameMap::rsp_opr->cpu_regnr() == 6, "wrong assumption below"); 16.5 assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below"); 16.6 assert(reg_num >= 0, "invalid reg_num"); 16.7 - 16.8 - return reg_num < 6 || reg_num > 7; 16.9 #else 16.10 - // rsp and rbp, r10, r15 (numbers 6 ancd 7) are ignored 16.11 + // rsp and rbp, r10, r15 (numbers [12,15]) are ignored 16.12 + // r12 (number 11) is conditional on compressed oops. 16.13 + assert(FrameMap::r12_opr->cpu_regnr() == 11, "wrong assumption below"); 16.14 assert(FrameMap::r10_opr->cpu_regnr() == 12, "wrong assumption below"); 16.15 assert(FrameMap::r15_opr->cpu_regnr() == 13, "wrong assumption below"); 16.16 assert(FrameMap::rsp_opr->cpu_regnrLo() == 14, "wrong assumption below"); 16.17 assert(FrameMap::rbp_opr->cpu_regnrLo() == 15, "wrong assumption below"); 16.18 assert(reg_num >= 0, "invalid reg_num"); 16.19 - 16.20 - return reg_num < 12 || reg_num > 15; 16.21 #endif // _LP64 16.22 + return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map; 16.23 } 16.24 16.25 inline int LinearScan::num_physical_regs(BasicType type) { 16.26 @@ -104,7 +103,7 @@ 16.27 if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::byte_reg)) { 16.28 assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only"); 16.29 _first_reg = pd_first_byte_reg; 16.30 - _last_reg = pd_last_byte_reg; 16.31 + _last_reg = FrameMap::last_byte_reg(); 16.32 return true; 16.33 } else if ((UseSSE >= 1 && cur->type() == T_FLOAT) || (UseSSE >= 2 && cur->type() == T_DOUBLE)) { 16.34 _first_reg = pd_first_xmm_reg;
17.1 --- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp Tue Nov 23 13:22:55 2010 -0800 17.2 +++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp Tue Nov 30 23:23:40 2010 -0800 17.3 @@ -155,11 +155,26 @@ 17.4 // This assumes that all prototype bits fit in an int32_t 17.5 movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype()); 17.6 } 17.7 +#ifdef _LP64 17.8 + if (UseCompressedOops) { // Take care not to kill klass 17.9 + movptr(t1, klass); 17.10 + encode_heap_oop_not_null(t1); 17.11 + movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); 17.12 + } else 17.13 +#endif 17.14 + { 17.15 + movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); 17.16 + } 17.17 17.18 - movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass); 17.19 if (len->is_valid()) { 17.20 movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len); 17.21 } 17.22 +#ifdef _LP64 17.23 + else if (UseCompressedOops) { 17.24 + xorptr(t1, t1); 17.25 + store_klass_gap(obj, t1); 17.26 + } 17.27 +#endif 17.28 } 17.29 17.30 17.31 @@ -230,7 +245,7 @@ 17.32 void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) { 17.33 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, 17.34 "con_size_in_bytes is not multiple of alignment"); 17.35 - const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes(); 17.36 + const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize; 17.37 17.38 initialize_header(obj, klass, noreg, t1, t2); 17.39 17.40 @@ -317,13 +332,19 @@ 17.41 // check against inline cache 17.42 assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); 17.43 int start_offset = offset(); 17.44 - cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); 17.45 + 17.46 + if (UseCompressedOops) { 17.47 + load_klass(rscratch1, receiver); 17.48 + cmpptr(rscratch1, iCache); 17.49 + } else { 17.50 + cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); 17.51 + } 17.52 // if icache check fails, then jump to runtime routine 17.53 // Note: RECEIVER must still contain the receiver! 17.54 jump_cc(Assembler::notEqual, 17.55 RuntimeAddress(SharedRuntime::get_ic_miss_stub())); 17.56 const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9); 17.57 - assert(offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry"); 17.58 + assert(UseCompressedOops || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry"); 17.59 } 17.60 17.61
18.1 --- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp Tue Nov 23 13:22:55 2010 -0800 18.2 +++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp Tue Nov 30 23:23:40 2010 -0800 18.3 @@ -1261,7 +1261,7 @@ 18.4 // load the klass and check the has finalizer flag 18.5 Label register_finalizer; 18.6 Register t = rsi; 18.7 - __ movptr(t, Address(rax, oopDesc::klass_offset_in_bytes())); 18.8 + __ load_klass(t, rax); 18.9 __ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc))); 18.10 __ testl(t, JVM_ACC_HAS_FINALIZER); 18.11 __ jcc(Assembler::notZero, register_finalizer);
19.1 --- a/src/share/vm/c1/c1_FrameMap.hpp Tue Nov 23 13:22:55 2010 -0800 19.2 +++ b/src/share/vm/c1/c1_FrameMap.hpp Tue Nov 30 23:23:40 2010 -0800 19.3 @@ -76,8 +76,8 @@ 19.4 nof_cpu_regs_reg_alloc = pd_nof_cpu_regs_reg_alloc, 19.5 nof_fpu_regs_reg_alloc = pd_nof_fpu_regs_reg_alloc, 19.6 19.7 - nof_caller_save_cpu_regs = pd_nof_caller_save_cpu_regs_frame_map, 19.8 - nof_caller_save_fpu_regs = pd_nof_caller_save_fpu_regs_frame_map, 19.9 + max_nof_caller_save_cpu_regs = pd_nof_caller_save_cpu_regs_frame_map, 19.10 + nof_caller_save_fpu_regs = pd_nof_caller_save_fpu_regs_frame_map, 19.11 19.12 spill_slot_size_in_bytes = 4 19.13 }; 19.14 @@ -97,7 +97,7 @@ 19.15 static Register _cpu_rnr2reg [nof_cpu_regs]; 19.16 static int _cpu_reg2rnr [nof_cpu_regs]; 19.17 19.18 - static LIR_Opr _caller_save_cpu_regs [nof_caller_save_cpu_regs]; 19.19 + static LIR_Opr _caller_save_cpu_regs [max_nof_caller_save_cpu_regs]; 19.20 static LIR_Opr _caller_save_fpu_regs [nof_caller_save_fpu_regs]; 19.21 19.22 int _framesize; 19.23 @@ -243,7 +243,7 @@ 19.24 VMReg regname(LIR_Opr opr) const; 19.25 19.26 static LIR_Opr caller_save_cpu_reg_at(int i) { 19.27 - assert(i >= 0 && i < nof_caller_save_cpu_regs, "out of bounds"); 19.28 + assert(i >= 0 && i < max_nof_caller_save_cpu_regs, "out of bounds"); 19.29 return _caller_save_cpu_regs[i]; 19.30 } 19.31
20.1 --- a/src/share/vm/c1/c1_GraphBuilder.cpp Tue Nov 23 13:22:55 2010 -0800 20.2 +++ b/src/share/vm/c1/c1_GraphBuilder.cpp Tue Nov 30 23:23:40 2010 -0800 20.3 @@ -2795,7 +2795,7 @@ 20.4 get = append(new UnsafeGetRaw(as_BasicType(local->type()), e, 20.5 append(new Constant(new IntConstant(offset))), 20.6 0, 20.7 - true)); 20.8 + true /*unaligned*/, true /*wide*/)); 20.9 } 20.10 _state->store_local(index, get); 20.11 }
21.1 --- a/src/share/vm/c1/c1_Instruction.hpp Tue Nov 23 13:22:55 2010 -0800 21.2 +++ b/src/share/vm/c1/c1_Instruction.hpp Tue Nov 30 23:23:40 2010 -0800 21.3 @@ -2110,20 +2110,23 @@ 21.4 21.5 LEAF(UnsafeGetRaw, UnsafeRawOp) 21.6 private: 21.7 - bool _may_be_unaligned; // For OSREntry 21.8 + bool _may_be_unaligned, _is_wide; // For OSREntry 21.9 21.10 public: 21.11 - UnsafeGetRaw(BasicType basic_type, Value addr, bool may_be_unaligned) 21.12 + UnsafeGetRaw(BasicType basic_type, Value addr, bool may_be_unaligned, bool is_wide = false) 21.13 : UnsafeRawOp(basic_type, addr, false) { 21.14 _may_be_unaligned = may_be_unaligned; 21.15 + _is_wide = is_wide; 21.16 } 21.17 21.18 - UnsafeGetRaw(BasicType basic_type, Value base, Value index, int log2_scale, bool may_be_unaligned) 21.19 + UnsafeGetRaw(BasicType basic_type, Value base, Value index, int log2_scale, bool may_be_unaligned, bool is_wide = false) 21.20 : UnsafeRawOp(basic_type, base, index, log2_scale, false) { 21.21 _may_be_unaligned = may_be_unaligned; 21.22 + _is_wide = is_wide; 21.23 } 21.24 21.25 - bool may_be_unaligned() { return _may_be_unaligned; } 21.26 + bool may_be_unaligned() { return _may_be_unaligned; } 21.27 + bool is_wide() { return _is_wide; } 21.28 }; 21.29 21.30
22.1 --- a/src/share/vm/c1/c1_LIR.cpp Tue Nov 23 13:22:55 2010 -0800 22.2 +++ b/src/share/vm/c1/c1_LIR.cpp Tue Nov 30 23:23:40 2010 -0800 22.3 @@ -1742,6 +1742,8 @@ 22.4 return "unaligned move"; 22.5 case lir_move_volatile: 22.6 return "volatile_move"; 22.7 + case lir_move_wide: 22.8 + return "wide_move"; 22.9 default: 22.10 ShouldNotReachHere(); 22.11 return "illegal_op";
23.1 --- a/src/share/vm/c1/c1_LIR.hpp Tue Nov 23 13:22:55 2010 -0800 23.2 +++ b/src/share/vm/c1/c1_LIR.hpp Tue Nov 30 23:23:40 2010 -0800 23.3 @@ -985,6 +985,7 @@ 23.4 lir_move_normal, 23.5 lir_move_volatile, 23.6 lir_move_unaligned, 23.7 + lir_move_wide, 23.8 lir_move_max_flag 23.9 }; 23.10 23.11 @@ -1932,7 +1933,20 @@ 23.12 void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); } 23.13 void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); } 23.14 void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); } 23.15 - 23.16 + void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { 23.17 + if (UseCompressedOops) { 23.18 + append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide)); 23.19 + } else { 23.20 + move(src, dst, info); 23.21 + } 23.22 + } 23.23 + void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { 23.24 + if (UseCompressedOops) { 23.25 + append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide)); 23.26 + } else { 23.27 + move(src, dst, info); 23.28 + } 23.29 + } 23.30 void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); } 23.31 23.32 void oop2reg (jobject o, LIR_Opr reg) { append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o), reg)); }
24.1 --- a/src/share/vm/c1/c1_LIRAssembler.cpp Tue Nov 23 13:22:55 2010 -0800 24.2 +++ b/src/share/vm/c1/c1_LIRAssembler.cpp Tue Nov 30 23:23:40 2010 -0800 24.3 @@ -489,7 +489,9 @@ 24.4 volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info()); 24.5 } else { 24.6 move_op(op->in_opr(), op->result_opr(), op->type(), 24.7 - op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned); 24.8 + op->patch_code(), op->info(), op->pop_fpu_stack(), 24.9 + op->move_kind() == lir_move_unaligned, 24.10 + op->move_kind() == lir_move_wide); 24.11 } 24.12 break; 24.13 24.14 @@ -758,7 +760,7 @@ 24.15 } 24.16 24.17 24.18 -void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned) { 24.19 +void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) { 24.20 if (src->is_register()) { 24.21 if (dest->is_register()) { 24.22 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 24.23 @@ -767,7 +769,7 @@ 24.24 assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here"); 24.25 reg2stack(src, dest, type, pop_fpu_stack); 24.26 } else if (dest->is_address()) { 24.27 - reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, unaligned); 24.28 + reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned); 24.29 } else { 24.30 ShouldNotReachHere(); 24.31 } 24.32 @@ -790,13 +792,13 @@ 24.33 const2stack(src, dest); 24.34 } else if (dest->is_address()) { 24.35 assert(patch_code == lir_patch_none, "no patching allowed here"); 24.36 - const2mem(src, dest, type, info); 24.37 + const2mem(src, dest, type, info, wide); 24.38 } else { 24.39 ShouldNotReachHere(); 24.40 } 24.41 24.42 } else if (src->is_address()) { 24.43 - mem2reg(src, dest, type, patch_code, info, unaligned); 24.44 + mem2reg(src, dest, type, patch_code, info, wide, unaligned); 24.45 24.46 } else { 24.47 ShouldNotReachHere();
25.1 --- a/src/share/vm/c1/c1_LIRAssembler.hpp Tue Nov 23 13:22:55 2010 -0800 25.2 +++ b/src/share/vm/c1/c1_LIRAssembler.hpp Tue Nov 30 23:23:40 2010 -0800 25.3 @@ -165,15 +165,17 @@ 25.4 25.5 void const2reg (LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info); 25.6 void const2stack(LIR_Opr src, LIR_Opr dest); 25.7 - void const2mem (LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info); 25.8 + void const2mem (LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide); 25.9 void reg2stack (LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack); 25.10 void reg2reg (LIR_Opr src, LIR_Opr dest); 25.11 - void reg2mem (LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned); 25.12 + void reg2mem (LIR_Opr src, LIR_Opr dest, BasicType type, 25.13 + LIR_PatchCode patch_code, CodeEmitInfo* info, 25.14 + bool pop_fpu_stack, bool wide, bool unaligned); 25.15 void stack2reg (LIR_Opr src, LIR_Opr dest, BasicType type); 25.16 void stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type); 25.17 void mem2reg (LIR_Opr src, LIR_Opr dest, BasicType type, 25.18 - LIR_PatchCode patch_code = lir_patch_none, 25.19 - CodeEmitInfo* info = NULL, bool unaligned = false); 25.20 + LIR_PatchCode patch_code, 25.21 + CodeEmitInfo* info, bool wide, bool unaligned); 25.22 25.23 void prefetchr (LIR_Opr src); 25.24 void prefetchw (LIR_Opr src); 25.25 @@ -211,7 +213,7 @@ 25.26 25.27 void roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack); 25.28 void move_op(LIR_Opr src, LIR_Opr result, BasicType type, 25.29 - LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned); 25.30 + LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide); 25.31 void volatile_move_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info); 25.32 void comp_mem_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info); // info set for null exceptions 25.33 void comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr result, LIR_Op2* op);
26.1 --- a/src/share/vm/c1/c1_LIRGenerator.cpp Tue Nov 23 13:22:55 2010 -0800 26.2 +++ b/src/share/vm/c1/c1_LIRGenerator.cpp Tue Nov 30 23:23:40 2010 -0800 26.3 @@ -864,11 +864,11 @@ 26.4 // MDO cells are intptr_t, so the data_reg width is arch-dependent. 26.5 LIR_Opr data_reg = new_pointer_register(); 26.6 LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type()); 26.7 - __ move(LIR_OprFact::address(data_addr), data_reg); 26.8 + __ move(data_addr, data_reg); 26.9 // Use leal instead of add to avoid destroying condition codes on x86 26.10 LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT); 26.11 __ leal(LIR_OprFact::address(fake_incr_value), data_reg); 26.12 - __ move(data_reg, LIR_OprFact::address(data_addr)); 26.13 + __ move(data_reg, data_addr); 26.14 } 26.15 } 26.16 26.17 @@ -1009,12 +1009,12 @@ 26.18 operand_for_instruction(phi)); 26.19 26.20 LIR_Opr thread_reg = getThreadPointer(); 26.21 - __ move(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT), 26.22 - exceptionOopOpr()); 26.23 - __ move(LIR_OprFact::oopConst(NULL), 26.24 - new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT)); 26.25 - __ move(LIR_OprFact::oopConst(NULL), 26.26 - new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT)); 26.27 + __ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT), 26.28 + exceptionOopOpr()); 26.29 + __ move_wide(LIR_OprFact::oopConst(NULL), 26.30 + new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT)); 26.31 + __ move_wide(LIR_OprFact::oopConst(NULL), 26.32 + new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT)); 26.33 26.34 LIR_Opr result = new_register(T_OBJECT); 26.35 __ move(exceptionOopOpr(), result); 26.36 @@ -1085,7 +1085,7 @@ 26.37 void LIRGenerator::do_Return(Return* x) { 26.38 if (compilation()->env()->dtrace_method_probes()) { 26.39 BasicTypeList signature; 26.40 - signature.append(T_INT); // thread 26.41 + signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread 26.42 signature.append(T_OBJECT); // methodOop 26.43 LIR_OprList* args = new LIR_OprList(); 26.44 args->append(getThreadPointer()); 26.45 @@ -1122,8 +1122,8 @@ 26.46 info = state_for(x); 26.47 } 26.48 __ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info); 26.49 - __ move(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() + 26.50 - klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result); 26.51 + __ move_wide(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() + 26.52 + klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result); 26.53 } 26.54 26.55 26.56 @@ -1131,7 +1131,7 @@ 26.57 void LIRGenerator::do_currentThread(Intrinsic* x) { 26.58 assert(x->number_of_arguments() == 0, "wrong type"); 26.59 LIR_Opr reg = rlock_result(x); 26.60 - __ load(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg); 26.61 + __ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg); 26.62 } 26.63 26.64 26.65 @@ -1908,7 +1908,11 @@ 26.66 if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) { 26.67 __ unaligned_move(addr, reg); 26.68 } else { 26.69 - __ move(addr, reg); 26.70 + if (dst_type == T_OBJECT && x->is_wide()) { 26.71 + __ move_wide(addr, reg); 26.72 + } else { 26.73 + __ move(addr, reg); 26.74 + } 26.75 } 26.76 } 26.77 26.78 @@ -2287,7 +2291,7 @@ 26.79 26.80 if (compilation()->env()->dtrace_method_probes()) { 26.81 BasicTypeList signature; 26.82 - signature.append(T_INT); // thread 26.83 + signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread 26.84 signature.append(T_OBJECT); // methodOop 26.85 LIR_OprList* args = new LIR_OprList(); 26.86 args->append(getThreadPointer()); 26.87 @@ -2352,11 +2356,14 @@ 26.88 } else { 26.89 LIR_Address* addr = loc->as_address_ptr(); 26.90 param->load_for_store(addr->type()); 26.91 - if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 26.92 - __ unaligned_move(param->result(), addr); 26.93 - } else { 26.94 - __ move(param->result(), addr); 26.95 - } 26.96 + if (addr->type() == T_OBJECT) { 26.97 + __ move_wide(param->result(), addr); 26.98 + } else 26.99 + if (addr->type() == T_LONG || addr->type() == T_DOUBLE) { 26.100 + __ unaligned_move(param->result(), addr); 26.101 + } else { 26.102 + __ move(param->result(), addr); 26.103 + } 26.104 } 26.105 } 26.106 26.107 @@ -2368,7 +2375,7 @@ 26.108 } else { 26.109 assert(loc->is_address(), "just checking"); 26.110 receiver->load_for_store(T_OBJECT); 26.111 - __ move(receiver->result(), loc); 26.112 + __ move_wide(receiver->result(), loc->as_address_ptr()); 26.113 } 26.114 } 26.115 }
27.1 --- a/src/share/vm/c1/c1_LinearScan.cpp Tue Nov 23 13:22:55 2010 -0800 27.2 +++ b/src/share/vm/c1/c1_LinearScan.cpp Tue Nov 30 23:23:40 2010 -0800 27.3 @@ -1273,7 +1273,7 @@ 27.4 int caller_save_registers[LinearScan::nof_regs]; 27.5 27.6 int i; 27.7 - for (i = 0; i < FrameMap::nof_caller_save_cpu_regs; i++) { 27.8 + for (i = 0; i < FrameMap::nof_caller_save_cpu_regs(); i++) { 27.9 LIR_Opr opr = FrameMap::caller_save_cpu_reg_at(i); 27.10 assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands"); 27.11 assert(reg_numHi(opr) == -1, "missing addition of range for hi-register"); 27.12 @@ -3557,7 +3557,7 @@ 27.13 27.14 // invalidate all caller save registers at calls 27.15 if (visitor.has_call()) { 27.16 - for (j = 0; j < FrameMap::nof_caller_save_cpu_regs; j++) { 27.17 + for (j = 0; j < FrameMap::nof_caller_save_cpu_regs(); j++) { 27.18 state_put(input_state, reg_num(FrameMap::caller_save_cpu_reg_at(j)), NULL); 27.19 } 27.20 for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) { 27.21 @@ -5596,7 +5596,7 @@ 27.22 _last_reg = pd_last_fpu_reg; 27.23 } else { 27.24 _first_reg = pd_first_cpu_reg; 27.25 - _last_reg = pd_last_cpu_reg; 27.26 + _last_reg = FrameMap::last_cpu_reg(); 27.27 } 27.28 27.29 assert(0 <= _first_reg && _first_reg < LinearScan::nof_regs, "out of range");
28.1 --- a/src/share/vm/c1/c1_Runtime1.cpp Tue Nov 23 13:22:55 2010 -0800 28.2 +++ b/src/share/vm/c1/c1_Runtime1.cpp Tue Nov 30 23:23:40 2010 -0800 28.3 @@ -1174,7 +1174,7 @@ 28.4 memmove(dst_addr, src_addr, length << l2es); 28.5 return ac_ok; 28.6 } else if (src->is_objArray() && dst->is_objArray()) { 28.7 - if (UseCompressedOops) { // will need for tiered 28.8 + if (UseCompressedOops) { 28.9 narrowOop *src_addr = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos); 28.10 narrowOop *dst_addr = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos); 28.11 return obj_arraycopy_work(src, src_addr, dst, dst_addr, length); 28.12 @@ -1210,10 +1210,11 @@ 28.13 assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well."); 28.14 if (UseCompressedOops) { 28.15 bs->write_ref_array_pre((narrowOop*)dst, num); 28.16 + Copy::conjoint_oops_atomic((narrowOop*) src, (narrowOop*) dst, num); 28.17 } else { 28.18 bs->write_ref_array_pre((oop*)dst, num); 28.19 + Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num); 28.20 } 28.21 - Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num); 28.22 bs->write_ref_array(dst, num); 28.23 JRT_END 28.24
29.1 --- a/src/share/vm/code/relocInfo.cpp Tue Nov 23 13:22:55 2010 -0800 29.2 +++ b/src/share/vm/code/relocInfo.cpp Tue Nov 30 23:23:40 2010 -0800 29.3 @@ -1093,8 +1093,8 @@ 29.4 tty->print_cr("(no relocs)"); 29.5 return; 29.6 } 29.7 - tty->print("relocInfo@" INTPTR_FORMAT " [type=%d(%s) addr=" INTPTR_FORMAT, 29.8 - _current, type(), reloc_type_string((relocInfo::relocType) type()), _addr); 29.9 + tty->print("relocInfo@" INTPTR_FORMAT " [type=%d(%s) addr=" INTPTR_FORMAT " offset=%d", 29.10 + _current, type(), reloc_type_string((relocInfo::relocType) type()), _addr, _current->addr_offset()); 29.11 if (current()->format() != 0) 29.12 tty->print(" format=%d", current()->format()); 29.13 if (datalen() == 1) {
30.1 --- a/src/share/vm/runtime/arguments.cpp Tue Nov 23 13:22:55 2010 -0800 30.2 +++ b/src/share/vm/runtime/arguments.cpp Tue Nov 30 23:23:40 2010 -0800 30.3 @@ -1007,24 +1007,9 @@ 30.4 void Arguments::check_compressed_oops_compat() { 30.5 #ifdef _LP64 30.6 assert(UseCompressedOops, "Precondition"); 30.7 -# if defined(COMPILER1) && !defined(TIERED) 30.8 - // Until c1 supports compressed oops turn them off. 30.9 - FLAG_SET_DEFAULT(UseCompressedOops, false); 30.10 -# else 30.11 // Is it on by default or set on ergonomically 30.12 bool is_on_by_default = FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops); 30.13 30.14 - // Tiered currently doesn't work with compressed oops 30.15 - if (TieredCompilation) { 30.16 - if (is_on_by_default) { 30.17 - FLAG_SET_DEFAULT(UseCompressedOops, false); 30.18 - return; 30.19 - } else { 30.20 - vm_exit_during_initialization( 30.21 - "Tiered compilation is not supported with compressed oops yet", NULL); 30.22 - } 30.23 - } 30.24 - 30.25 // If dumping an archive or forcing its use, disable compressed oops if possible 30.26 if (DumpSharedSpaces || RequireSharedSpaces) { 30.27 if (is_on_by_default) { 30.28 @@ -1038,9 +1023,7 @@ 30.29 // UseSharedSpaces is on by default. With compressed oops, we turn it off. 30.30 FLAG_SET_DEFAULT(UseSharedSpaces, false); 30.31 } 30.32 - 30.33 -# endif // defined(COMPILER1) && !defined(TIERED) 30.34 -#endif // _LP64 30.35 +#endif 30.36 } 30.37 30.38 void Arguments::set_tiered_flags() { 30.39 @@ -3075,11 +3058,9 @@ 30.40 // Set flags based on ergonomics. 30.41 set_ergonomics_flags(); 30.42 30.43 -#ifdef _LP64 30.44 if (UseCompressedOops) { 30.45 check_compressed_oops_compat(); 30.46 } 30.47 -#endif 30.48 30.49 // Check the GC selections again. 30.50 if (!check_gc_consistency()) {