Wed, 21 Aug 2013 13:34:45 +0200
7199175: JSR 292: C1 needs patching when invokedynamic/invokehandle call site is not linked
Summary: Do patching rather bailing out for unlinked call with appendix
Reviewed-by: twisti, kvn
1 /*
2 * Copyright (c) 1999, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "c1/c1_CodeStubs.hpp"
27 #include "c1/c1_FrameMap.hpp"
28 #include "c1/c1_LIRAssembler.hpp"
29 #include "c1/c1_MacroAssembler.hpp"
30 #include "c1/c1_Runtime1.hpp"
31 #include "nativeInst_sparc.hpp"
32 #include "runtime/sharedRuntime.hpp"
33 #include "utilities/macros.hpp"
34 #include "vmreg_sparc.inline.hpp"
35 #if INCLUDE_ALL_GCS
36 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
37 #endif // INCLUDE_ALL_GCS
39 #define __ ce->masm()->
41 RangeCheckStub::RangeCheckStub(CodeEmitInfo* info, LIR_Opr index,
42 bool throw_index_out_of_bounds_exception)
43 : _throw_index_out_of_bounds_exception(throw_index_out_of_bounds_exception)
44 , _index(index)
45 {
46 assert(info != NULL, "must have info");
47 _info = new CodeEmitInfo(info);
48 }
51 void RangeCheckStub::emit_code(LIR_Assembler* ce) {
52 __ bind(_entry);
54 if (_info->deoptimize_on_exception()) {
55 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
56 __ call(a, relocInfo::runtime_call_type);
57 __ delayed()->nop();
58 ce->add_call_info_here(_info);
59 ce->verify_oop_map(_info);
60 debug_only(__ should_not_reach_here());
61 return;
62 }
64 if (_index->is_register()) {
65 __ mov(_index->as_register(), G4);
66 } else {
67 __ set(_index->as_jint(), G4);
68 }
69 if (_throw_index_out_of_bounds_exception) {
70 __ call(Runtime1::entry_for(Runtime1::throw_index_exception_id), relocInfo::runtime_call_type);
71 } else {
72 __ call(Runtime1::entry_for(Runtime1::throw_range_check_failed_id), relocInfo::runtime_call_type);
73 }
74 __ delayed()->nop();
75 ce->add_call_info_here(_info);
76 ce->verify_oop_map(_info);
77 debug_only(__ should_not_reach_here());
78 }
80 PredicateFailedStub::PredicateFailedStub(CodeEmitInfo* info) {
81 _info = new CodeEmitInfo(info);
82 }
84 void PredicateFailedStub::emit_code(LIR_Assembler* ce) {
85 __ bind(_entry);
86 address a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
87 __ call(a, relocInfo::runtime_call_type);
88 __ delayed()->nop();
89 ce->add_call_info_here(_info);
90 ce->verify_oop_map(_info);
91 debug_only(__ should_not_reach_here());
92 }
94 void CounterOverflowStub::emit_code(LIR_Assembler* ce) {
95 __ bind(_entry);
96 __ set(_bci, G4);
97 __ call(Runtime1::entry_for(Runtime1::counter_overflow_id), relocInfo::runtime_call_type);
98 __ delayed()->mov_or_nop(_method->as_register(), G5);
99 ce->add_call_info_here(_info);
100 ce->verify_oop_map(_info);
102 __ br(Assembler::always, true, Assembler::pt, _continuation);
103 __ delayed()->nop();
104 }
107 void DivByZeroStub::emit_code(LIR_Assembler* ce) {
108 if (_offset != -1) {
109 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
110 }
111 __ bind(_entry);
112 __ call(Runtime1::entry_for(Runtime1::throw_div0_exception_id), relocInfo::runtime_call_type);
113 __ delayed()->nop();
114 ce->add_call_info_here(_info);
115 ce->verify_oop_map(_info);
116 #ifdef ASSERT
117 __ should_not_reach_here();
118 #endif
119 }
122 void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
123 address a;
124 if (_info->deoptimize_on_exception()) {
125 // Deoptimize, do not throw the exception, because it is probably wrong to do it here.
126 a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
127 } else {
128 a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
129 }
131 ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
132 __ bind(_entry);
133 __ call(a, relocInfo::runtime_call_type);
134 __ delayed()->nop();
135 ce->add_call_info_here(_info);
136 ce->verify_oop_map(_info);
137 #ifdef ASSERT
138 __ should_not_reach_here();
139 #endif
140 }
143 // Implementation of SimpleExceptionStub
144 // Note: %g1 and %g3 are already in use
145 void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
146 __ bind(_entry);
147 __ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
149 if (_obj->is_valid()) {
150 __ delayed()->mov(_obj->as_register(), G4); // _obj contains the optional argument to the stub
151 } else {
152 __ delayed()->mov(G0, G4);
153 }
154 ce->add_call_info_here(_info);
155 #ifdef ASSERT
156 __ should_not_reach_here();
157 #endif
158 }
161 // Implementation of NewInstanceStub
163 NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKlass* klass, CodeEmitInfo* info, Runtime1::StubID stub_id) {
164 _result = result;
165 _klass = klass;
166 _klass_reg = klass_reg;
167 _info = new CodeEmitInfo(info);
168 assert(stub_id == Runtime1::new_instance_id ||
169 stub_id == Runtime1::fast_new_instance_id ||
170 stub_id == Runtime1::fast_new_instance_init_check_id,
171 "need new_instance id");
172 _stub_id = stub_id;
173 }
176 void NewInstanceStub::emit_code(LIR_Assembler* ce) {
177 __ bind(_entry);
178 __ call(Runtime1::entry_for(_stub_id), relocInfo::runtime_call_type);
179 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
180 ce->add_call_info_here(_info);
181 ce->verify_oop_map(_info);
182 __ br(Assembler::always, false, Assembler::pt, _continuation);
183 __ delayed()->mov_or_nop(O0, _result->as_register());
184 }
187 // Implementation of NewTypeArrayStub
188 NewTypeArrayStub::NewTypeArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
189 _klass_reg = klass_reg;
190 _length = length;
191 _result = result;
192 _info = new CodeEmitInfo(info);
193 }
196 void NewTypeArrayStub::emit_code(LIR_Assembler* ce) {
197 __ bind(_entry);
199 __ mov(_length->as_register(), G4);
200 __ call(Runtime1::entry_for(Runtime1::new_type_array_id), relocInfo::runtime_call_type);
201 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
202 ce->add_call_info_here(_info);
203 ce->verify_oop_map(_info);
204 __ br(Assembler::always, false, Assembler::pt, _continuation);
205 __ delayed()->mov_or_nop(O0, _result->as_register());
206 }
209 // Implementation of NewObjectArrayStub
211 NewObjectArrayStub::NewObjectArrayStub(LIR_Opr klass_reg, LIR_Opr length, LIR_Opr result, CodeEmitInfo* info) {
212 _klass_reg = klass_reg;
213 _length = length;
214 _result = result;
215 _info = new CodeEmitInfo(info);
216 }
219 void NewObjectArrayStub::emit_code(LIR_Assembler* ce) {
220 __ bind(_entry);
222 __ mov(_length->as_register(), G4);
223 __ call(Runtime1::entry_for(Runtime1::new_object_array_id), relocInfo::runtime_call_type);
224 __ delayed()->mov_or_nop(_klass_reg->as_register(), G5);
225 ce->add_call_info_here(_info);
226 ce->verify_oop_map(_info);
227 __ br(Assembler::always, false, Assembler::pt, _continuation);
228 __ delayed()->mov_or_nop(O0, _result->as_register());
229 }
232 // Implementation of MonitorAccessStubs
233 MonitorEnterStub::MonitorEnterStub(LIR_Opr obj_reg, LIR_Opr lock_reg, CodeEmitInfo* info)
234 : MonitorAccessStub(obj_reg, lock_reg) {
235 _info = new CodeEmitInfo(info);
236 }
239 void MonitorEnterStub::emit_code(LIR_Assembler* ce) {
240 __ bind(_entry);
241 __ mov(_obj_reg->as_register(), G4);
242 if (ce->compilation()->has_fpu_code()) {
243 __ call(Runtime1::entry_for(Runtime1::monitorenter_id), relocInfo::runtime_call_type);
244 } else {
245 __ call(Runtime1::entry_for(Runtime1::monitorenter_nofpu_id), relocInfo::runtime_call_type);
246 }
247 __ delayed()->mov_or_nop(_lock_reg->as_register(), G5);
248 ce->add_call_info_here(_info);
249 ce->verify_oop_map(_info);
250 __ br(Assembler::always, true, Assembler::pt, _continuation);
251 __ delayed()->nop();
252 }
255 void MonitorExitStub::emit_code(LIR_Assembler* ce) {
256 __ bind(_entry);
257 if (_compute_lock) {
258 ce->monitor_address(_monitor_ix, _lock_reg);
259 }
260 if (ce->compilation()->has_fpu_code()) {
261 __ call(Runtime1::entry_for(Runtime1::monitorexit_id), relocInfo::runtime_call_type);
262 } else {
263 __ call(Runtime1::entry_for(Runtime1::monitorexit_nofpu_id), relocInfo::runtime_call_type);
264 }
266 __ delayed()->mov_or_nop(_lock_reg->as_register(), G4);
267 __ br(Assembler::always, true, Assembler::pt, _continuation);
268 __ delayed()->nop();
269 }
271 // Implementation of patching:
272 // - Copy the code at given offset to an inlined buffer (first the bytes, then the number of bytes)
273 // - Replace original code with a call to the stub
274 // At Runtime:
275 // - call to stub, jump to runtime
276 // - in runtime: preserve all registers (especially objects, i.e., source and destination object)
277 // - in runtime: after initializing class, restore original code, reexecute instruction
279 int PatchingStub::_patch_info_offset = -NativeGeneralJump::instruction_size;
281 void PatchingStub::align_patch_site(MacroAssembler* ) {
282 // patch sites on sparc are always properly aligned.
283 }
285 void PatchingStub::emit_code(LIR_Assembler* ce) {
286 // copy original code here
287 assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
288 "not enough room for call");
289 assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");
291 Label call_patch;
293 int being_initialized_entry = __ offset();
295 if (_id == load_klass_id) {
296 // produce a copy of the load klass instruction for use by the being initialized case
297 #ifdef ASSERT
298 address start = __ pc();
299 #endif
300 AddressLiteral addrlit(NULL, metadata_Relocation::spec(_index));
301 __ patchable_set(addrlit, _obj);
303 #ifdef ASSERT
304 for (int i = 0; i < _bytes_to_copy; i++) {
305 address ptr = (address)(_pc_start + i);
306 int a_byte = (*ptr) & 0xFF;
307 assert(a_byte == *start++, "should be the same code");
308 }
309 #endif
310 } else if (_id == load_mirror_id || _id == load_appendix_id) {
311 // produce a copy of the load mirror instruction for use by the being initialized case
312 #ifdef ASSERT
313 address start = __ pc();
314 #endif
315 AddressLiteral addrlit(NULL, oop_Relocation::spec(_index));
316 __ patchable_set(addrlit, _obj);
318 #ifdef ASSERT
319 for (int i = 0; i < _bytes_to_copy; i++) {
320 address ptr = (address)(_pc_start + i);
321 int a_byte = (*ptr) & 0xFF;
322 assert(a_byte == *start++, "should be the same code");
323 }
324 #endif
325 } else {
326 // make a copy the code which is going to be patched.
327 for (int i = 0; i < _bytes_to_copy; i++) {
328 address ptr = (address)(_pc_start + i);
329 int a_byte = (*ptr) & 0xFF;
330 __ emit_int8 (a_byte);
331 }
332 }
334 address end_of_patch = __ pc();
335 int bytes_to_skip = 0;
336 if (_id == load_mirror_id) {
337 int offset = __ offset();
338 if (CommentedAssembly) {
339 __ block_comment(" being_initialized check");
340 }
342 // static field accesses have special semantics while the class
343 // initializer is being run so we emit a test which can be used to
344 // check that this code is being executed by the initializing
345 // thread.
346 assert(_obj != noreg, "must be a valid register");
347 assert(_index >= 0, "must have oop index");
348 __ ld_ptr(_obj, java_lang_Class::klass_offset_in_bytes(), G3);
349 __ ld_ptr(G3, in_bytes(InstanceKlass::init_thread_offset()), G3);
350 __ cmp_and_brx_short(G2_thread, G3, Assembler::notEqual, Assembler::pn, call_patch);
352 // load_klass patches may execute the patched code before it's
353 // copied back into place so we need to jump back into the main
354 // code of the nmethod to continue execution.
355 __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation);
356 __ delayed()->nop();
358 // make sure this extra code gets skipped
359 bytes_to_skip += __ offset() - offset;
360 }
362 // Now emit the patch record telling the runtime how to find the
363 // pieces of the patch. We only need 3 bytes but it has to be
364 // aligned as an instruction so emit 4 bytes.
365 int sizeof_patch_record = 4;
366 bytes_to_skip += sizeof_patch_record;
368 // emit the offsets needed to find the code to patch
369 int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;
371 // Emit the patch record. We need to emit a full word, so emit an extra empty byte
372 __ emit_int8(0);
373 __ emit_int8(being_initialized_entry_offset);
374 __ emit_int8(bytes_to_skip);
375 __ emit_int8(_bytes_to_copy);
376 address patch_info_pc = __ pc();
377 assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");
379 address entry = __ pc();
380 NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
381 address target = NULL;
382 relocInfo::relocType reloc_type = relocInfo::none;
383 switch (_id) {
384 case access_field_id: target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
385 case load_klass_id: target = Runtime1::entry_for(Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
386 case load_mirror_id: target = Runtime1::entry_for(Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
387 case load_appendix_id: target = Runtime1::entry_for(Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
388 default: ShouldNotReachHere();
389 }
390 __ bind(call_patch);
392 if (CommentedAssembly) {
393 __ block_comment("patch entry point");
394 }
395 __ call(target, relocInfo::runtime_call_type);
396 __ delayed()->nop();
397 assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
398 ce->add_call_info_here(_info);
399 __ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
400 __ delayed()->nop();
401 if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
402 CodeSection* cs = __ code_section();
403 address pc = (address)_pc_start;
404 RelocIterator iter(cs, pc, pc + 1);
405 relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
407 pc = (address)(_pc_start + NativeMovConstReg::add_offset);
408 RelocIterator iter2(cs, pc, pc+1);
409 relocInfo::change_reloc_info_for_address(&iter2, (address) pc, reloc_type, relocInfo::none);
410 }
412 }
415 void DeoptimizeStub::emit_code(LIR_Assembler* ce) {
416 __ bind(_entry);
417 __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type);
418 __ delayed()->nop();
419 ce->add_call_info_here(_info);
420 DEBUG_ONLY(__ should_not_reach_here());
421 }
424 void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
425 //---------------slow case: call to native-----------------
426 __ bind(_entry);
427 __ mov(src()->as_register(), O0);
428 __ mov(src_pos()->as_register(), O1);
429 __ mov(dst()->as_register(), O2);
430 __ mov(dst_pos()->as_register(), O3);
431 __ mov(length()->as_register(), O4);
433 ce->emit_static_call_stub();
435 __ call(SharedRuntime::get_resolve_static_call_stub(), relocInfo::static_call_type);
436 __ delayed()->nop();
437 ce->add_call_info_here(info());
438 ce->verify_oop_map(info());
440 #ifndef PRODUCT
441 __ set((intptr_t)&Runtime1::_arraycopy_slowcase_cnt, O0);
442 __ ld(O0, 0, O1);
443 __ inc(O1);
444 __ st(O1, 0, O0);
445 #endif
447 __ br(Assembler::always, false, Assembler::pt, _continuation);
448 __ delayed()->nop();
449 }
452 ///////////////////////////////////////////////////////////////////////////////////
453 #if INCLUDE_ALL_GCS
455 void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
456 // At this point we know that marking is in progress.
457 // If do_load() is true then we have to emit the
458 // load of the previous value; otherwise it has already
459 // been loaded into _pre_val.
461 __ bind(_entry);
463 assert(pre_val()->is_register(), "Precondition.");
464 Register pre_val_reg = pre_val()->as_register();
466 if (do_load()) {
467 ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
468 }
470 if (__ is_in_wdisp16_range(_continuation)) {
471 __ br_null(pre_val_reg, /*annul*/false, Assembler::pt, _continuation);
472 } else {
473 __ cmp(pre_val_reg, G0);
474 __ brx(Assembler::equal, false, Assembler::pn, _continuation);
475 }
476 __ delayed()->nop();
478 __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_pre_barrier_slow_id));
479 __ delayed()->mov(pre_val_reg, G4);
480 __ br(Assembler::always, false, Assembler::pt, _continuation);
481 __ delayed()->nop();
483 }
485 jbyte* G1PostBarrierStub::_byte_map_base = NULL;
487 jbyte* G1PostBarrierStub::byte_map_base_slow() {
488 BarrierSet* bs = Universe::heap()->barrier_set();
489 assert(bs->is_a(BarrierSet::G1SATBCTLogging),
490 "Must be if we're using this.");
491 return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
492 }
494 void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
495 __ bind(_entry);
497 assert(addr()->is_register(), "Precondition.");
498 assert(new_val()->is_register(), "Precondition.");
499 Register addr_reg = addr()->as_pointer_register();
500 Register new_val_reg = new_val()->as_register();
502 if (__ is_in_wdisp16_range(_continuation)) {
503 __ br_null(new_val_reg, /*annul*/false, Assembler::pt, _continuation);
504 } else {
505 __ cmp(new_val_reg, G0);
506 __ brx(Assembler::equal, false, Assembler::pn, _continuation);
507 }
508 __ delayed()->nop();
510 __ call(Runtime1::entry_for(Runtime1::Runtime1::g1_post_barrier_slow_id));
511 __ delayed()->mov(addr_reg, G4);
512 __ br(Assembler::always, false, Assembler::pt, _continuation);
513 __ delayed()->nop();
514 }
516 #endif // INCLUDE_ALL_GCS
517 ///////////////////////////////////////////////////////////////////////////////////
519 #undef __