src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp

changeset 435
a61af66fc99e
child 777
37f87013dfd8
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp	Sat Dec 01 00:00:00 2007 +0000
     1.3 @@ -0,0 +1,407 @@
     1.4 +/*
     1.5 + * Copyright 1999-2007 Sun Microsystems, Inc.  All Rights Reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
    1.23 + * CA 95054 USA or visit www.sun.com if you need additional information or
    1.24 + * have any questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "incls/_precompiled.incl"
    1.29 +#include "incls/_c1_CodeStubs_sparc.cpp.incl"
    1.30 +
    1.31 +#define __ ce->masm()->
    1.32 +
    1.33 +RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
    1.34 +                               bool throw_index_out_of_bounds_exception)
    1.35 +  : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
    1.36 +  , _index(index)
    1.37 +{
    1.38 +  _info = new CodeEmitInfo(info);
    1.39 +}
    1.40 +
    1.41 +
    1.42 +void RangeCheckStub::emit_code(LIR_Assembler* ce) {
    1.43 +  __ bind(_entry);
    1.44 +
    1.45 +  if (_index->is_register()) {
    1.46 +    __ mov(_index->as_register(), G4);
    1.47 +  } else {
    1.48 +    __ set(_index->as_jint(), G4);
    1.49 +  }
    1.50 +  if (_throw_index_out_of_bounds_exception) {
    1.51 +    __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
    1.52 +  } else {
    1.53 +    __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
    1.54 +  }
    1.55 +  __ delayed()->nop();
    1.56 +  ce->add_call_info_here(_info);
    1.57 +  ce->verify_oop_map(_info);
    1.58 +#ifdef ASSERT
    1.59 +  __ should_not_reach_here();
    1.60 +#endif
    1.61 +}
    1.62 +
    1.63 +#ifdef TIERED
    1.64 +
    1.65 +void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
    1.66 +  __ bind(_entry);
    1.67 +  __ set(_bci, G4);
    1.68 +  __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
    1.69 +  __ delayed()->nop();
    1.70 +  ce->add_call_info_here(_info);
    1.71 +  ce->verify_oop_map(_info);
    1.72 +
    1.73 +  __ br(Assembler::always, true, Assembler::pt, _continuation);
    1.74 +  __ delayed()->nop();
    1.75 +}
    1.76 +
    1.77 +#endif // TIERED
    1.78 +
    1.79 +void DivByZeroStub::emit_code(LIR_Assembler* ce) {
    1.80 +  if (_offset != -1) {
    1.81 +    ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
    1.82 +  }
    1.83 +  __ bind(_entry);
    1.84 +  __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type);
    1.85 +  __ delayed()->nop();
    1.86 +  ce->add_call_info_here(_info);
    1.87 +  ce->verify_oop_map(_info);
    1.88 +#ifdef ASSERT
    1.89 +  __ should_not_reach_here();
    1.90 +#endif
    1.91 +}
    1.92 +
    1.93 +
    1.94 +void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
    1.95 +  ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
    1.96 +  __ bind(_entry);
    1.97 +  __ call(Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id),
    1.98 +          relocInfo::runtime_call_type);
    1.99 +  __ delayed()->nop();
   1.100 +  ce->add_call_info_here(_info);
   1.101 +  ce->verify_oop_map(_info);
   1.102 +#ifdef ASSERT
   1.103 +  __ should_not_reach_here();
   1.104 +#endif
   1.105 +}
   1.106 +
   1.107 +
   1.108 +// Implementation of SimpleExceptionStub
   1.109 +// Note: %g1 and %g3 are already in use
   1.110 +void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
   1.111 +  __ bind(_entry);
   1.112 +  __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
   1.113 +
   1.114 +  if (_obj->is_valid()) {
   1.115 +    __ delayed()->mov(_obj->as_register(), G4); // _obj contains the optional argument to the stub
   1.116 +  } else {
   1.117 +    __ delayed()->mov(G0, G4);
   1.118 +  }
   1.119 +  ce->add_call_info_here(_info);
   1.120 +#ifdef ASSERT
   1.121 +  __ should_not_reach_here();
   1.122 +#endif
   1.123 +}
   1.124 +
   1.125 +
   1.126 +// Implementation of ArrayStoreExceptionStub
   1.127 +
   1.128 +ArrayStoreExceptionStub::ArrayStoreExceptionStub(CodeEmitInfo* info):
   1.129 +  _info(info) {
   1.130 +}
   1.131 +
   1.132 +
   1.133 +void ArrayStoreExceptionStub::emit_code(LIR_Assembler* ce) {
   1.134 +  __ bind(_entry);
   1.135 +  __ call(Runtime1::entry_for(Runtime1::throw_array_store_exception_id), relocInfo::runtime_call_type);
   1.136 +  __ delayed()->nop();
   1.137 +  ce->add_call_info_here(_info);
   1.138 +  ce->verify_oop_map(_info);
   1.139 +#ifdef ASSERT
   1.140 +  __ should_not_reach_here();
   1.141 +#endif
   1.142 +}
   1.143 +
   1.144 +
   1.145 +
   1.146 +
   1.147 +// Implementation of NewInstanceStub
   1.148 +
   1.149 +NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
   1.150 +  _result = result;
   1.151 +  _klass = klass;
   1.152 +  _klass_reg = klass_reg;
   1.153 +  _info = new CodeEmitInfo(info);
   1.154 +  assert(stub_id == Runtime1::new_instance_id                 ||
   1.155 +         stub_id == Runtime1::fast_new_instance_id            ||
   1.156 +         stub_id == Runtime1::fast_new_instance_init_check_id,
   1.157 +         "need new_instance id");
   1.158 +  _stub_id   = stub_id;
   1.159 +}
   1.160 +
   1.161 +
   1.162 +void NewInstanceStub::emit_code(LIR_Assembler* ce) {
   1.163 +  __ bind(_entry);
   1.164 +  __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
   1.165 +  __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
   1.166 +  ce->add_call_info_here(_info);
   1.167 +  ce->verify_oop_map(_info);
   1.168 +  __ br(Assembler::always, false, Assembler::pt, _continuation);
   1.169 +  __ delayed()->mov_or_nop(O0, _result->as_register());
   1.170 +}
   1.171 +
   1.172 +
   1.173 +// Implementation of NewTypeArrayStub
   1.174 +NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
   1.175 +  _klass_reg = klass_reg;
   1.176 +  _length = length;
   1.177 +  _result = result;
   1.178 +  _info = new CodeEmitInfo(info);
   1.179 +}
   1.180 +
   1.181 +
   1.182 +void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
   1.183 +  __ bind(_entry);
   1.184 +
   1.185 +  __ mov(_length->as_register(), G4);
   1.186 +  __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
   1.187 +  __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
   1.188 +  ce->add_call_info_here(_info);
   1.189 +  ce->verify_oop_map(_info);
   1.190 +  __ br(Assembler::always, false, Assembler::pt, _continuation);
   1.191 +  __ delayed()->mov_or_nop(O0, _result->as_register());
   1.192 +}
   1.193 +
   1.194 +
   1.195 +// Implementation of NewObjectArrayStub
   1.196 +
   1.197 +NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
   1.198 +  _klass_reg = klass_reg;
   1.199 +  _length = length;
   1.200 +  _result = result;
   1.201 +  _info = new CodeEmitInfo(info);
   1.202 +}
   1.203 +
   1.204 +
   1.205 +void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
   1.206 +  __ bind(_entry);
   1.207 +
   1.208 +  __ mov(_length->as_register(), G4);
   1.209 +  __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
   1.210 +  __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
   1.211 +  ce->add_call_info_here(_info);
   1.212 +  ce->verify_oop_map(_info);
   1.213 +  __ br(Assembler::always, false, Assembler::pt, _continuation);
   1.214 +  __ delayed()->mov_or_nop(O0, _result->as_register());
   1.215 +}
   1.216 +
   1.217 +
   1.218 +// Implementation of MonitorAccessStubs
   1.219 +MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
   1.220 +  : MonitorAccessStub(obj_reg, lock_reg) {
   1.221 +  _info = new CodeEmitInfo(info);
   1.222 +}
   1.223 +
   1.224 +
   1.225 +void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
   1.226 +  __ bind(_entry);
   1.227 +  __ mov(_obj_reg->as_register(), G4);
   1.228 +  if (ce->compilation()->has_fpu_code()) {
   1.229 +    __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type);
   1.230 +  } else {
   1.231 +    __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type);
   1.232 +  }
   1.233 +  __ delayed()->mov_or_nop(_lock_reg->as_register(), G5);
   1.234 +  ce->add_call_info_here(_info);
   1.235 +  ce->verify_oop_map(_info);
   1.236 +  __ br(Assembler::always, true, Assembler::pt, _continuation);
   1.237 +  __ delayed()->nop();
   1.238 +}
   1.239 +
   1.240 +
   1.241 +void MonitorExitStub::emit_code(LIR_Assembler* ce) {
   1.242 +  __ bind(_entry);
   1.243 +  if (_compute_lock) {
   1.244 +    ce->monitor_address(_monitor_ix, _lock_reg);
   1.245 +  }
   1.246 +  if (ce->compilation()->has_fpu_code()) {
   1.247 +    __ call(Runtime1::entry_for(Runtime1::monitorexit_id), relocInfo::runtime_call_type);
   1.248 +  } else {
   1.249 +    __ call(Runtime1::entry_for(Runtime1::monitorexit_nofpu_id), relocInfo::runtime_call_type);
   1.250 +  }
   1.251 +
   1.252 +  __ delayed()->mov_or_nop(_lock_reg->as_register(), G4);
   1.253 +  __ br(Assembler::always, true, Assembler::pt, _continuation);
   1.254 +  __ delayed()->nop();
   1.255 +}
   1.256 +
   1.257 +// Implementation of patching:
   1.258 +// - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
   1.259 +// - Replace original code with a call to the stub
   1.260 +// At Runtime:
   1.261 +// - call to stub, jump to runtime
   1.262 +// - in runtime: preserve all registers (especially objects, i.e., source and destination object)
   1.263 +// - in runtime: after initializing class, restore original code, reexecute instruction
   1.264 +
   1.265 +int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
   1.266 +
   1.267 +void PatchingStub::align_patch_site(MacroAssembler* ) {
   1.268 +  // patch sites on sparc are always properly aligned.
   1.269 +}
   1.270 +
   1.271 +void PatchingStub::emit_code(LIR_Assembler* ce) {
   1.272 +  // copy original code here
   1.273 +  assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
   1.274 +         "not enough room for call");
   1.275 +  assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
   1.276 +
   1.277 +  Label call_patch;
   1.278 +
   1.279 +  int being_initialized_entry = __ offset();
   1.280 +
   1.281 +  if (_id == load_klass_id) {
   1.282 +    // produce a copy of the load klass instruction for use by the being initialized case
   1.283 +    address start = __ pc();
   1.284 +    Address addr = Address(_obj, address(NULL), oop_Relocation::spec(_oop_index));
   1.285 +    __ sethi(addr, true);
   1.286 +    __ add(addr, _obj, 0);
   1.287 +
   1.288 +#ifdef ASSERT
   1.289 +    for (int i = 0; i < _bytes_to_copy; i++) {
   1.290 +      address ptr = (address)(_pc_start + i);
   1.291 +      int a_byte = (*ptr) & 0xFF;
   1.292 +      assert(a_byte == *start++, "should be the same code");
   1.293 +    }
   1.294 +#endif
   1.295 +  } else {
   1.296 +    // make a copy the code which is going to be patched.
   1.297 +    for (int i = 0; i < _bytes_to_copy; i++) {
   1.298 +      address ptr = (address)(_pc_start + i);
   1.299 +      int a_byte = (*ptr) & 0xFF;
   1.300 +      __ a_byte (a_byte);
   1.301 +    }
   1.302 +  }
   1.303 +
   1.304 +  address end_of_patch = __ pc();
   1.305 +  int bytes_to_skip = 0;
   1.306 +  if (_id == load_klass_id) {
   1.307 +    int offset = __ offset();
   1.308 +    if (CommentedAssembly) {
   1.309 +      __ block_comment(" being_initialized check");
   1.310 +    }
   1.311 +
   1.312 +    // static field accesses have special semantics while the class
   1.313 +    // initializer is being run so we emit a test which can be used to
   1.314 +    // check that this code is being executed by the initializing
   1.315 +    // thread.
   1.316 +    assert(_obj != noreg, "must be a valid register");
   1.317 +    assert(_oop_index >= 0, "must have oop index");
   1.318 +    __ ld_ptr(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3);
   1.319 +    __ cmp(G2_thread, G3);
   1.320 +    __ br(Assembler::notEqual, false, Assembler::pn, call_patch);
   1.321 +    __ delayed()->nop();
   1.322 +
   1.323 +    // load_klass patches may execute the patched code before it's
   1.324 +    // copied back into place so we need to jump back into the main
   1.325 +    // code of the nmethod to continue execution.
   1.326 +    __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation);
   1.327 +    __ delayed()->nop();
   1.328 +
   1.329 +    // make sure this extra code gets skipped
   1.330 +    bytes_to_skip += __ offset() - offset;
   1.331 +  }
   1.332 +
   1.333 +  // Now emit the patch record telling the runtime how to find the
   1.334 +  // pieces of the patch.  We only need 3 bytes but it has to be
   1.335 +  // aligned as an instruction so emit 4 bytes.
   1.336 +  int sizeof_patch_record = 4;
   1.337 +  bytes_to_skip += sizeof_patch_record;
   1.338 +
   1.339 +  // emit the offsets needed to find the code to patch
   1.340 +  int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
   1.341 +
   1.342 +  // Emit the patch record.  We need to emit a full word, so emit an extra empty byte
   1.343 +  __ a_byte(0);
   1.344 +  __ a_byte(being_initialized_entry_offset);
   1.345 +  __ a_byte(bytes_to_skip);
   1.346 +  __ a_byte(_bytes_to_copy);
   1.347 +  address patch_info_pc = __ pc();
   1.348 +  assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
   1.349 +
   1.350 +  address entry = __ pc();
   1.351 +  NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
   1.352 +  address target = NULL;
   1.353 +  switch (_id) {
   1.354 +    case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
   1.355 +    case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break;
   1.356 +    default: ShouldNotReachHere();
   1.357 +  }
   1.358 +  __ bind(call_patch);
   1.359 +
   1.360 +  if (CommentedAssembly) {
   1.361 +    __ block_comment("patch entry point");
   1.362 +  }
   1.363 +  __ call(target, relocInfo::runtime_call_type);
   1.364 +  __ delayed()->nop();
   1.365 +  assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
   1.366 +  ce->add_call_info_here(_info);
   1.367 +  __ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
   1.368 +  __ delayed()->nop();
   1.369 +  if (_id == load_klass_id) {
   1.370 +    CodeSection* cs = __ code_section();
   1.371 +    address pc = (address)_pc_start;
   1.372 +    RelocIterator iter(cs, pc, pc + 1);
   1.373 +    relocInfo::change_reloc_info_for_address(&iter, (address) pc, relocInfo::oop_type, relocInfo::none);
   1.374 +
   1.375 +    pc = (address)(_pc_start + NativeMovConstReg::add_offset);
   1.376 +    RelocIterator iter2(cs, pc, pc+1);
   1.377 +    relocInfo::change_reloc_info_for_address(&iter2, (address) pc, relocInfo::oop_type, relocInfo::none);
   1.378 +  }
   1.379 +
   1.380 +}
   1.381 +
   1.382 +void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
   1.383 +  //---------------slow case: call to native-----------------
   1.384 +  __ bind(_entry);
   1.385 +  __ mov(src()->as_register(),     O0);
   1.386 +  __ mov(src_pos()->as_register(), O1);
   1.387 +  __ mov(dst()->as_register(),     O2);
   1.388 +  __ mov(dst_pos()->as_register(), O3);
   1.389 +  __ mov(length()->as_register(),  O4);
   1.390 +
   1.391 +  ce->emit_static_call_stub();
   1.392 +
   1.393 +  __ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
   1.394 +  __ delayed()->nop();
   1.395 +  ce->add_call_info_here(info());
   1.396 +  ce->verify_oop_map(info());
   1.397 +
   1.398 +#ifndef PRODUCT
   1.399 +  __ set((intptr_t)&Runtime1::_arraycopy_slowcase_cnt, O0);
   1.400 +  __ ld(O0, 0, O1);
   1.401 +  __ inc(O1);
   1.402 +  __ st(O1, 0, O0);
   1.403 +#endif
   1.404 +
   1.405 +  __ br(Assembler::always, false, Assembler::pt, _continuation);
   1.406 +  __ delayed()->nop();
   1.407 +}
   1.408 +
   1.409 +
   1.410 +#undef __

mercurial