Fri, 07 Jun 2013 16:46:37 -0700
8008407: remove SPARC V8 support
Summary: Removed most of the SPARC V8 instructions
Reviewed-by: kvn, twisti
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "interpreter/interpreter.hpp"
27 #include "interpreter/interpreterRuntime.hpp"
28 #include "interpreter/templateTable.hpp"
29 #include "memory/universe.inline.hpp"
30 #include "oops/methodData.hpp"
31 #include "oops/objArrayKlass.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/stubRoutines.hpp"
36 #include "runtime/synchronizer.hpp"
37 #include "utilities/macros.hpp"
39 #ifndef CC_INTERP
40 #define __ _masm->
42 // Misc helpers
44 // Do an oop store like *(base + index + offset) = val
45 // index can be noreg,
46 static void do_oop_store(InterpreterMacroAssembler* _masm,
47 Register base,
48 Register index,
49 int offset,
50 Register val,
51 Register tmp,
52 BarrierSet::Name barrier,
53 bool precise) {
54 assert(tmp != val && tmp != base && tmp != index, "register collision");
55 assert(index == noreg || offset == 0, "only one offset");
56 switch (barrier) {
57 #if INCLUDE_ALL_GCS
58 case BarrierSet::G1SATBCT:
59 case BarrierSet::G1SATBCTLogging:
60 {
61 // Load and record the previous value.
62 __ g1_write_barrier_pre(base, index, offset,
63 noreg /* pre_val */,
64 tmp, true /*preserve_o_regs*/);
66 // G1 barrier needs uncompressed oop for region cross check.
67 Register new_val = val;
68 if (UseCompressedOops && val != G0) {
69 new_val = tmp;
70 __ mov(val, new_val);
71 }
73 if (index == noreg ) {
74 assert(Assembler::is_simm13(offset), "fix this code");
75 __ store_heap_oop(val, base, offset);
76 } else {
77 __ store_heap_oop(val, base, index);
78 }
80 // No need for post barrier if storing NULL
81 if (val != G0) {
82 if (precise) {
83 if (index == noreg) {
84 __ add(base, offset, base);
85 } else {
86 __ add(base, index, base);
87 }
88 }
89 __ g1_write_barrier_post(base, new_val, tmp);
90 }
91 }
92 break;
93 #endif // INCLUDE_ALL_GCS
94 case BarrierSet::CardTableModRef:
95 case BarrierSet::CardTableExtension:
96 {
97 if (index == noreg ) {
98 assert(Assembler::is_simm13(offset), "fix this code");
99 __ store_heap_oop(val, base, offset);
100 } else {
101 __ store_heap_oop(val, base, index);
102 }
103 // No need for post barrier if storing NULL
104 if (val != G0) {
105 if (precise) {
106 if (index == noreg) {
107 __ add(base, offset, base);
108 } else {
109 __ add(base, index, base);
110 }
111 }
112 __ card_write_barrier_post(base, val, tmp);
113 }
114 }
115 break;
116 case BarrierSet::ModRef:
117 case BarrierSet::Other:
118 ShouldNotReachHere();
119 break;
120 default :
121 ShouldNotReachHere();
123 }
124 }
127 //----------------------------------------------------------------------------------------------------
128 // Platform-dependent initialization
130 void TemplateTable::pd_initialize() {
131 // (none)
132 }
135 //----------------------------------------------------------------------------------------------------
136 // Condition conversion
137 Assembler::Condition ccNot(TemplateTable::Condition cc) {
138 switch (cc) {
139 case TemplateTable::equal : return Assembler::notEqual;
140 case TemplateTable::not_equal : return Assembler::equal;
141 case TemplateTable::less : return Assembler::greaterEqual;
142 case TemplateTable::less_equal : return Assembler::greater;
143 case TemplateTable::greater : return Assembler::lessEqual;
144 case TemplateTable::greater_equal: return Assembler::less;
145 }
146 ShouldNotReachHere();
147 return Assembler::zero;
148 }
150 //----------------------------------------------------------------------------------------------------
151 // Miscelaneous helper routines
154 Address TemplateTable::at_bcp(int offset) {
155 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
156 return Address(Lbcp, offset);
157 }
160 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
161 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
162 int byte_no) {
163 // With sharing on, may need to test Method* flag.
164 if (!RewriteBytecodes) return;
165 Label L_patch_done;
167 switch (bc) {
168 case Bytecodes::_fast_aputfield:
169 case Bytecodes::_fast_bputfield:
170 case Bytecodes::_fast_cputfield:
171 case Bytecodes::_fast_dputfield:
172 case Bytecodes::_fast_fputfield:
173 case Bytecodes::_fast_iputfield:
174 case Bytecodes::_fast_lputfield:
175 case Bytecodes::_fast_sputfield:
176 {
177 // We skip bytecode quickening for putfield instructions when
178 // the put_code written to the constant pool cache is zero.
179 // This is required so that every execution of this instruction
180 // calls out to InterpreterRuntime::resolve_get_put to do
181 // additional, required work.
182 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
183 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
184 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
185 __ set(bc, bc_reg);
186 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch
187 }
188 break;
189 default:
190 assert(byte_no == -1, "sanity");
191 if (load_bc_into_bc_reg) {
192 __ set(bc, bc_reg);
193 }
194 }
196 if (JvmtiExport::can_post_breakpoint()) {
197 Label L_fast_patch;
198 __ ldub(at_bcp(0), temp_reg);
199 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch);
200 // perform the quickening, slowly, in the bowels of the breakpoint table
201 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg);
202 __ ba_short(L_patch_done);
203 __ bind(L_fast_patch);
204 }
206 #ifdef ASSERT
207 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc);
208 Label L_okay;
209 __ ldub(at_bcp(0), temp_reg);
210 __ cmp(temp_reg, orig_bytecode);
211 __ br(Assembler::equal, false, Assembler::pt, L_okay);
212 __ delayed()->cmp(temp_reg, bc_reg);
213 __ br(Assembler::equal, false, Assembler::pt, L_okay);
214 __ delayed()->nop();
215 __ stop("patching the wrong bytecode");
216 __ bind(L_okay);
217 #endif
219 // patch bytecode
220 __ stb(bc_reg, at_bcp(0));
221 __ bind(L_patch_done);
222 }
224 //----------------------------------------------------------------------------------------------------
225 // Individual instructions
227 void TemplateTable::nop() {
228 transition(vtos, vtos);
229 // nothing to do
230 }
232 void TemplateTable::shouldnotreachhere() {
233 transition(vtos, vtos);
234 __ stop("shouldnotreachhere bytecode");
235 }
237 void TemplateTable::aconst_null() {
238 transition(vtos, atos);
239 __ clr(Otos_i);
240 }
243 void TemplateTable::iconst(int value) {
244 transition(vtos, itos);
245 __ set(value, Otos_i);
246 }
249 void TemplateTable::lconst(int value) {
250 transition(vtos, ltos);
251 assert(value >= 0, "check this code");
252 #ifdef _LP64
253 __ set(value, Otos_l);
254 #else
255 __ set(value, Otos_l2);
256 __ clr( Otos_l1);
257 #endif
258 }
261 void TemplateTable::fconst(int value) {
262 transition(vtos, ftos);
263 static float zero = 0.0, one = 1.0, two = 2.0;
264 float* p;
265 switch( value ) {
266 default: ShouldNotReachHere();
267 case 0: p = &zero; break;
268 case 1: p = &one; break;
269 case 2: p = &two; break;
270 }
271 AddressLiteral a(p);
272 __ sethi(a, G3_scratch);
273 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f);
274 }
277 void TemplateTable::dconst(int value) {
278 transition(vtos, dtos);
279 static double zero = 0.0, one = 1.0;
280 double* p;
281 switch( value ) {
282 default: ShouldNotReachHere();
283 case 0: p = &zero; break;
284 case 1: p = &one; break;
285 }
286 AddressLiteral a(p);
287 __ sethi(a, G3_scratch);
288 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d);
289 }
292 // %%%%% Should factore most snippet templates across platforms
294 void TemplateTable::bipush() {
295 transition(vtos, itos);
296 __ ldsb( at_bcp(1), Otos_i );
297 }
299 void TemplateTable::sipush() {
300 transition(vtos, itos);
301 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed);
302 }
304 void TemplateTable::ldc(bool wide) {
305 transition(vtos, vtos);
306 Label call_ldc, notInt, isString, notString, notClass, exit;
308 if (wide) {
309 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
310 } else {
311 __ ldub(Lbcp, 1, O1);
312 }
313 __ get_cpool_and_tags(O0, O2);
315 const int base_offset = ConstantPool::header_size() * wordSize;
316 const int tags_offset = Array<u1>::base_offset_in_bytes();
318 // get type from tags
319 __ add(O2, tags_offset, O2);
320 __ ldub(O2, O1, O2);
322 // unresolved class? If so, must resolve
323 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc);
325 // unresolved class in error state
326 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc);
328 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class
329 __ brx(Assembler::notEqual, true, Assembler::pt, notClass);
330 __ delayed()->add(O0, base_offset, O0);
332 __ bind(call_ldc);
333 __ set(wide, O1);
334 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1);
335 __ push(atos);
336 __ ba_short(exit);
338 __ bind(notClass);
339 // __ add(O0, base_offset, O0);
340 __ sll(O1, LogBytesPerWord, O1);
341 __ cmp(O2, JVM_CONSTANT_Integer);
342 __ brx(Assembler::notEqual, true, Assembler::pt, notInt);
343 __ delayed()->cmp(O2, JVM_CONSTANT_String);
344 __ ld(O0, O1, Otos_i);
345 __ push(itos);
346 __ ba_short(exit);
348 __ bind(notInt);
349 // __ cmp(O2, JVM_CONSTANT_String);
350 __ brx(Assembler::notEqual, true, Assembler::pt, notString);
351 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
352 __ bind(isString);
353 __ stop("string should be rewritten to fast_aldc");
354 __ ba_short(exit);
356 __ bind(notString);
357 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
358 __ push(ftos);
360 __ bind(exit);
361 }
363 // Fast path for caching oop constants.
364 // %%% We should use this to handle Class and String constants also.
365 // %%% It will simplify the ldc/primitive path considerably.
366 void TemplateTable::fast_aldc(bool wide) {
367 transition(vtos, atos);
369 int index_size = wide ? sizeof(u2) : sizeof(u1);
370 Label resolved;
372 // We are resolved if the resolved reference cache entry contains a
373 // non-null object (CallSite, etc.)
374 assert_different_registers(Otos_i, G3_scratch);
375 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch
376 __ load_resolved_reference_at_index(Otos_i, G3_scratch);
377 __ tst(Otos_i);
378 __ br(Assembler::notEqual, false, Assembler::pt, resolved);
379 __ delayed()->set((int)bytecode(), O1);
381 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
383 // first time invocation - must resolve first
384 __ call_VM(Otos_i, entry, O1);
385 __ bind(resolved);
386 __ verify_oop(Otos_i);
387 }
390 void TemplateTable::ldc2_w() {
391 transition(vtos, vtos);
392 Label Long, exit;
394 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
395 __ get_cpool_and_tags(O0, O2);
397 const int base_offset = ConstantPool::header_size() * wordSize;
398 const int tags_offset = Array<u1>::base_offset_in_bytes();
399 // get type from tags
400 __ add(O2, tags_offset, O2);
401 __ ldub(O2, O1, O2);
403 __ sll(O1, LogBytesPerWord, O1);
404 __ add(O0, O1, G3_scratch);
406 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long);
407 // A double can be placed at word-aligned locations in the constant pool.
408 // Check out Conversions.java for an example.
409 // Also ConstantPool::header_size() is 20, which makes it very difficult
410 // to double-align double on the constant pool. SG, 11/7/97
411 #ifdef _LP64
412 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
413 #else
414 FloatRegister f = Ftos_d;
415 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
416 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
417 f->successor());
418 #endif
419 __ push(dtos);
420 __ ba_short(exit);
422 __ bind(Long);
423 #ifdef _LP64
424 __ ldx(G3_scratch, base_offset, Otos_l);
425 #else
426 __ ld(G3_scratch, base_offset, Otos_l);
427 __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
428 #endif
429 __ push(ltos);
431 __ bind(exit);
432 }
435 void TemplateTable::locals_index(Register reg, int offset) {
436 __ ldub( at_bcp(offset), reg );
437 }
440 void TemplateTable::locals_index_wide(Register reg) {
441 // offset is 2, not 1, because Lbcp points to wide prefix code
442 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
443 }
445 void TemplateTable::iload() {
446 transition(vtos, itos);
447 // Rewrite iload,iload pair into fast_iload2
448 // iload,caload pair into fast_icaload
449 if (RewriteFrequentPairs) {
450 Label rewrite, done;
452 // get next byte
453 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch);
455 // if _iload, wait to rewrite to iload2. We only want to rewrite the
456 // last two iloads in a pair. Comparing against fast_iload means that
457 // the next bytecode is neither an iload or a caload, and therefore
458 // an iload pair.
459 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done);
461 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload);
462 __ br(Assembler::equal, false, Assembler::pn, rewrite);
463 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch);
465 __ cmp(G3_scratch, (int)Bytecodes::_caload);
466 __ br(Assembler::equal, false, Assembler::pn, rewrite);
467 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch);
469 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again
470 // rewrite
471 // G4_scratch: fast bytecode
472 __ bind(rewrite);
473 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false);
474 __ bind(done);
475 }
477 // Get the local value into tos
478 locals_index(G3_scratch);
479 __ access_local_int( G3_scratch, Otos_i );
480 }
482 void TemplateTable::fast_iload2() {
483 transition(vtos, itos);
484 locals_index(G3_scratch);
485 __ access_local_int( G3_scratch, Otos_i );
486 __ push_i();
487 locals_index(G3_scratch, 3); // get next bytecode's local index.
488 __ access_local_int( G3_scratch, Otos_i );
489 }
491 void TemplateTable::fast_iload() {
492 transition(vtos, itos);
493 locals_index(G3_scratch);
494 __ access_local_int( G3_scratch, Otos_i );
495 }
497 void TemplateTable::lload() {
498 transition(vtos, ltos);
499 locals_index(G3_scratch);
500 __ access_local_long( G3_scratch, Otos_l );
501 }
504 void TemplateTable::fload() {
505 transition(vtos, ftos);
506 locals_index(G3_scratch);
507 __ access_local_float( G3_scratch, Ftos_f );
508 }
511 void TemplateTable::dload() {
512 transition(vtos, dtos);
513 locals_index(G3_scratch);
514 __ access_local_double( G3_scratch, Ftos_d );
515 }
518 void TemplateTable::aload() {
519 transition(vtos, atos);
520 locals_index(G3_scratch);
521 __ access_local_ptr( G3_scratch, Otos_i);
522 }
525 void TemplateTable::wide_iload() {
526 transition(vtos, itos);
527 locals_index_wide(G3_scratch);
528 __ access_local_int( G3_scratch, Otos_i );
529 }
532 void TemplateTable::wide_lload() {
533 transition(vtos, ltos);
534 locals_index_wide(G3_scratch);
535 __ access_local_long( G3_scratch, Otos_l );
536 }
539 void TemplateTable::wide_fload() {
540 transition(vtos, ftos);
541 locals_index_wide(G3_scratch);
542 __ access_local_float( G3_scratch, Ftos_f );
543 }
546 void TemplateTable::wide_dload() {
547 transition(vtos, dtos);
548 locals_index_wide(G3_scratch);
549 __ access_local_double( G3_scratch, Ftos_d );
550 }
553 void TemplateTable::wide_aload() {
554 transition(vtos, atos);
555 locals_index_wide(G3_scratch);
556 __ access_local_ptr( G3_scratch, Otos_i );
557 __ verify_oop(Otos_i);
558 }
561 void TemplateTable::iaload() {
562 transition(itos, itos);
563 // Otos_i: index
564 // tos: array
565 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
566 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i);
567 }
570 void TemplateTable::laload() {
571 transition(itos, ltos);
572 // Otos_i: index
573 // O2: array
574 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
575 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l);
576 }
579 void TemplateTable::faload() {
580 transition(itos, ftos);
581 // Otos_i: index
582 // O2: array
583 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
584 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f);
585 }
588 void TemplateTable::daload() {
589 transition(itos, dtos);
590 // Otos_i: index
591 // O2: array
592 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
593 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d);
594 }
597 void TemplateTable::aaload() {
598 transition(itos, atos);
599 // Otos_i: index
600 // tos: array
601 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3);
602 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
603 __ verify_oop(Otos_i);
604 }
607 void TemplateTable::baload() {
608 transition(itos, itos);
609 // Otos_i: index
610 // tos: array
611 __ index_check(O2, Otos_i, 0, G3_scratch, O3);
612 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i);
613 }
616 void TemplateTable::caload() {
617 transition(itos, itos);
618 // Otos_i: index
619 // tos: array
620 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
621 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
622 }
624 void TemplateTable::fast_icaload() {
625 transition(vtos, itos);
626 // Otos_i: index
627 // tos: array
628 locals_index(G3_scratch);
629 __ access_local_int( G3_scratch, Otos_i );
630 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
631 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
632 }
635 void TemplateTable::saload() {
636 transition(itos, itos);
637 // Otos_i: index
638 // tos: array
639 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
640 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i);
641 }
644 void TemplateTable::iload(int n) {
645 transition(vtos, itos);
646 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
647 }
650 void TemplateTable::lload(int n) {
651 transition(vtos, ltos);
652 assert(n+1 < Argument::n_register_parameters, "would need more code");
653 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
654 }
657 void TemplateTable::fload(int n) {
658 transition(vtos, ftos);
659 assert(n < Argument::n_register_parameters, "would need more code");
660 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f );
661 }
664 void TemplateTable::dload(int n) {
665 transition(vtos, dtos);
666 FloatRegister dst = Ftos_d;
667 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
668 }
671 void TemplateTable::aload(int n) {
672 transition(vtos, atos);
673 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
674 }
677 void TemplateTable::aload_0() {
678 transition(vtos, atos);
680 // According to bytecode histograms, the pairs:
681 //
682 // _aload_0, _fast_igetfield (itos)
683 // _aload_0, _fast_agetfield (atos)
684 // _aload_0, _fast_fgetfield (ftos)
685 //
686 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
687 // bytecode checks the next bytecode and then rewrites the current
688 // bytecode into a pair bytecode; otherwise it rewrites the current
689 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
690 //
691 if (RewriteFrequentPairs) {
692 Label rewrite, done;
694 // get next byte
695 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch);
697 // do actual aload_0
698 aload(0);
700 // if _getfield then wait with rewrite
701 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done);
703 // if _igetfield then rewrite to _fast_iaccess_0
704 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
705 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield);
706 __ br(Assembler::equal, false, Assembler::pn, rewrite);
707 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch);
709 // if _agetfield then rewrite to _fast_aaccess_0
710 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
711 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield);
712 __ br(Assembler::equal, false, Assembler::pn, rewrite);
713 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch);
715 // if _fgetfield then rewrite to _fast_faccess_0
716 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
717 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield);
718 __ br(Assembler::equal, false, Assembler::pn, rewrite);
719 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch);
721 // else rewrite to _fast_aload0
722 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
723 __ set(Bytecodes::_fast_aload_0, G4_scratch);
725 // rewrite
726 // G4_scratch: fast bytecode
727 __ bind(rewrite);
728 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false);
729 __ bind(done);
730 } else {
731 aload(0);
732 }
733 }
736 void TemplateTable::istore() {
737 transition(itos, vtos);
738 locals_index(G3_scratch);
739 __ store_local_int( G3_scratch, Otos_i );
740 }
743 void TemplateTable::lstore() {
744 transition(ltos, vtos);
745 locals_index(G3_scratch);
746 __ store_local_long( G3_scratch, Otos_l );
747 }
750 void TemplateTable::fstore() {
751 transition(ftos, vtos);
752 locals_index(G3_scratch);
753 __ store_local_float( G3_scratch, Ftos_f );
754 }
757 void TemplateTable::dstore() {
758 transition(dtos, vtos);
759 locals_index(G3_scratch);
760 __ store_local_double( G3_scratch, Ftos_d );
761 }
764 void TemplateTable::astore() {
765 transition(vtos, vtos);
766 __ load_ptr(0, Otos_i);
767 __ inc(Lesp, Interpreter::stackElementSize);
768 __ verify_oop_or_return_address(Otos_i, G3_scratch);
769 locals_index(G3_scratch);
770 __ store_local_ptr(G3_scratch, Otos_i);
771 }
774 void TemplateTable::wide_istore() {
775 transition(vtos, vtos);
776 __ pop_i();
777 locals_index_wide(G3_scratch);
778 __ store_local_int( G3_scratch, Otos_i );
779 }
782 void TemplateTable::wide_lstore() {
783 transition(vtos, vtos);
784 __ pop_l();
785 locals_index_wide(G3_scratch);
786 __ store_local_long( G3_scratch, Otos_l );
787 }
790 void TemplateTable::wide_fstore() {
791 transition(vtos, vtos);
792 __ pop_f();
793 locals_index_wide(G3_scratch);
794 __ store_local_float( G3_scratch, Ftos_f );
795 }
798 void TemplateTable::wide_dstore() {
799 transition(vtos, vtos);
800 __ pop_d();
801 locals_index_wide(G3_scratch);
802 __ store_local_double( G3_scratch, Ftos_d );
803 }
806 void TemplateTable::wide_astore() {
807 transition(vtos, vtos);
808 __ load_ptr(0, Otos_i);
809 __ inc(Lesp, Interpreter::stackElementSize);
810 __ verify_oop_or_return_address(Otos_i, G3_scratch);
811 locals_index_wide(G3_scratch);
812 __ store_local_ptr(G3_scratch, Otos_i);
813 }
816 void TemplateTable::iastore() {
817 transition(itos, vtos);
818 __ pop_i(O2); // index
819 // Otos_i: val
820 // O3: array
821 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
822 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT));
823 }
826 void TemplateTable::lastore() {
827 transition(ltos, vtos);
828 __ pop_i(O2); // index
829 // Otos_l: val
830 // O3: array
831 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
832 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG));
833 }
836 void TemplateTable::fastore() {
837 transition(ftos, vtos);
838 __ pop_i(O2); // index
839 // Ftos_f: val
840 // O3: array
841 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
842 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
843 }
846 void TemplateTable::dastore() {
847 transition(dtos, vtos);
848 __ pop_i(O2); // index
849 // Fos_d: val
850 // O3: array
851 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
852 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
853 }
856 void TemplateTable::aastore() {
857 Label store_ok, is_null, done;
858 transition(vtos, vtos);
859 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
860 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index
861 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array
862 // Otos_i: val
863 // O2: index
864 // O3: array
865 __ verify_oop(Otos_i);
866 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
868 // do array store check - check for NULL value first
869 __ br_null_short( Otos_i, Assembler::pn, is_null );
871 __ load_klass(O3, O4); // get array klass
872 __ load_klass(Otos_i, O5); // get value klass
874 // do fast instanceof cache test
876 __ ld_ptr(O4, in_bytes(ObjArrayKlass::element_klass_offset()), O4);
878 assert(Otos_i == O0, "just checking");
880 // Otos_i: value
881 // O1: addr - offset
882 // O2: index
883 // O3: array
884 // O4: array element klass
885 // O5: value klass
887 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
889 // Generate a fast subtype check. Branch to store_ok if no
890 // failure. Throw if failure.
891 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok );
893 // Not a subtype; so must throw exception
894 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch );
896 // Store is OK.
897 __ bind(store_ok);
898 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
900 __ ba(done);
901 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
903 __ bind(is_null);
904 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true);
906 __ profile_null_seen(G3_scratch);
907 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
908 __ bind(done);
909 }
912 void TemplateTable::bastore() {
913 transition(itos, vtos);
914 __ pop_i(O2); // index
915 // Otos_i: val
916 // O3: array
917 __ index_check(O3, O2, 0, G3_scratch, O2);
918 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE));
919 }
922 void TemplateTable::castore() {
923 transition(itos, vtos);
924 __ pop_i(O2); // index
925 // Otos_i: val
926 // O3: array
927 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2);
928 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
929 }
932 void TemplateTable::sastore() {
933 // %%%%% Factor across platform
934 castore();
935 }
938 void TemplateTable::istore(int n) {
939 transition(itos, vtos);
940 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
941 }
944 void TemplateTable::lstore(int n) {
945 transition(ltos, vtos);
946 assert(n+1 < Argument::n_register_parameters, "only handle register cases");
947 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
949 }
952 void TemplateTable::fstore(int n) {
953 transition(ftos, vtos);
954 assert(n < Argument::n_register_parameters, "only handle register cases");
955 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
956 }
959 void TemplateTable::dstore(int n) {
960 transition(dtos, vtos);
961 FloatRegister src = Ftos_d;
962 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
963 }
966 void TemplateTable::astore(int n) {
967 transition(vtos, vtos);
968 __ load_ptr(0, Otos_i);
969 __ inc(Lesp, Interpreter::stackElementSize);
970 __ verify_oop_or_return_address(Otos_i, G3_scratch);
971 __ store_local_ptr(n, Otos_i);
972 }
975 void TemplateTable::pop() {
976 transition(vtos, vtos);
977 __ inc(Lesp, Interpreter::stackElementSize);
978 }
981 void TemplateTable::pop2() {
982 transition(vtos, vtos);
983 __ inc(Lesp, 2 * Interpreter::stackElementSize);
984 }
987 void TemplateTable::dup() {
988 transition(vtos, vtos);
989 // stack: ..., a
990 // load a and tag
991 __ load_ptr(0, Otos_i);
992 __ push_ptr(Otos_i);
993 // stack: ..., a, a
994 }
997 void TemplateTable::dup_x1() {
998 transition(vtos, vtos);
999 // stack: ..., a, b
1000 __ load_ptr( 1, G3_scratch); // get a
1001 __ load_ptr( 0, Otos_l1); // get b
1002 __ store_ptr(1, Otos_l1); // put b
1003 __ store_ptr(0, G3_scratch); // put a - like swap
1004 __ push_ptr(Otos_l1); // push b
1005 // stack: ..., b, a, b
1006 }
1009 void TemplateTable::dup_x2() {
1010 transition(vtos, vtos);
1011 // stack: ..., a, b, c
1012 // get c and push on stack, reuse registers
1013 __ load_ptr( 0, G3_scratch); // get c
1014 __ push_ptr(G3_scratch); // push c with tag
1015 // stack: ..., a, b, c, c (c in reg) (Lesp - 4)
1016 // (stack offsets n+1 now)
1017 __ load_ptr( 3, Otos_l1); // get a
1018 __ store_ptr(3, G3_scratch); // put c at 3
1019 // stack: ..., c, b, c, c (a in reg)
1020 __ load_ptr( 2, G3_scratch); // get b
1021 __ store_ptr(2, Otos_l1); // put a at 2
1022 // stack: ..., c, a, c, c (b in reg)
1023 __ store_ptr(1, G3_scratch); // put b at 1
1024 // stack: ..., c, a, b, c
1025 }
1028 void TemplateTable::dup2() {
1029 transition(vtos, vtos);
1030 __ load_ptr(1, G3_scratch); // get a
1031 __ load_ptr(0, Otos_l1); // get b
1032 __ push_ptr(G3_scratch); // push a
1033 __ push_ptr(Otos_l1); // push b
1034 // stack: ..., a, b, a, b
1035 }
1038 void TemplateTable::dup2_x1() {
1039 transition(vtos, vtos);
1040 // stack: ..., a, b, c
1041 __ load_ptr( 1, Lscratch); // get b
1042 __ load_ptr( 2, Otos_l1); // get a
1043 __ store_ptr(2, Lscratch); // put b at a
1044 // stack: ..., b, b, c
1045 __ load_ptr( 0, G3_scratch); // get c
1046 __ store_ptr(1, G3_scratch); // put c at b
1047 // stack: ..., b, c, c
1048 __ store_ptr(0, Otos_l1); // put a at c
1049 // stack: ..., b, c, a
1050 __ push_ptr(Lscratch); // push b
1051 __ push_ptr(G3_scratch); // push c
1052 // stack: ..., b, c, a, b, c
1053 }
1056 // The spec says that these types can be a mixture of category 1 (1 word)
1057 // types and/or category 2 types (long and doubles)
1058 void TemplateTable::dup2_x2() {
1059 transition(vtos, vtos);
1060 // stack: ..., a, b, c, d
1061 __ load_ptr( 1, Lscratch); // get c
1062 __ load_ptr( 3, Otos_l1); // get a
1063 __ store_ptr(3, Lscratch); // put c at 3
1064 __ store_ptr(1, Otos_l1); // put a at 1
1065 // stack: ..., c, b, a, d
1066 __ load_ptr( 2, G3_scratch); // get b
1067 __ load_ptr( 0, Otos_l1); // get d
1068 __ store_ptr(0, G3_scratch); // put b at 0
1069 __ store_ptr(2, Otos_l1); // put d at 2
1070 // stack: ..., c, d, a, b
1071 __ push_ptr(Lscratch); // push c
1072 __ push_ptr(Otos_l1); // push d
1073 // stack: ..., c, d, a, b, c, d
1074 }
1077 void TemplateTable::swap() {
1078 transition(vtos, vtos);
1079 // stack: ..., a, b
1080 __ load_ptr( 1, G3_scratch); // get a
1081 __ load_ptr( 0, Otos_l1); // get b
1082 __ store_ptr(0, G3_scratch); // put b
1083 __ store_ptr(1, Otos_l1); // put a
1084 // stack: ..., b, a
1085 }
1088 void TemplateTable::iop2(Operation op) {
1089 transition(itos, itos);
1090 __ pop_i(O1);
1091 switch (op) {
1092 case add: __ add(O1, Otos_i, Otos_i); break;
1093 case sub: __ sub(O1, Otos_i, Otos_i); break;
1094 // %%%%% Mul may not exist: better to call .mul?
1095 case mul: __ smul(O1, Otos_i, Otos_i); break;
1096 case _and: __ and3(O1, Otos_i, Otos_i); break;
1097 case _or: __ or3(O1, Otos_i, Otos_i); break;
1098 case _xor: __ xor3(O1, Otos_i, Otos_i); break;
1099 case shl: __ sll(O1, Otos_i, Otos_i); break;
1100 case shr: __ sra(O1, Otos_i, Otos_i); break;
1101 case ushr: __ srl(O1, Otos_i, Otos_i); break;
1102 default: ShouldNotReachHere();
1103 }
1104 }
1107 void TemplateTable::lop2(Operation op) {
1108 transition(ltos, ltos);
1109 __ pop_l(O2);
1110 switch (op) {
1111 #ifdef _LP64
1112 case add: __ add(O2, Otos_l, Otos_l); break;
1113 case sub: __ sub(O2, Otos_l, Otos_l); break;
1114 case _and: __ and3(O2, Otos_l, Otos_l); break;
1115 case _or: __ or3(O2, Otos_l, Otos_l); break;
1116 case _xor: __ xor3(O2, Otos_l, Otos_l); break;
1117 #else
1118 case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break;
1119 case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break;
1120 case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break;
1121 case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break;
1122 case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break;
1123 #endif
1124 default: ShouldNotReachHere();
1125 }
1126 }
1129 void TemplateTable::idiv() {
1130 // %%%%% Later: ForSPARC/V7 call .sdiv library routine,
1131 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
1133 transition(itos, itos);
1134 __ pop_i(O1); // get 1st op
1136 // Y contains upper 32 bits of result, set it to 0 or all ones
1137 __ wry(G0);
1138 __ mov(~0, G3_scratch);
1140 __ tst(O1);
1141 Label neg;
1142 __ br(Assembler::negative, true, Assembler::pn, neg);
1143 __ delayed()->wry(G3_scratch);
1144 __ bind(neg);
1146 Label ok;
1147 __ tst(Otos_i);
1148 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
1150 const int min_int = 0x80000000;
1151 Label regular;
1152 __ cmp(Otos_i, -1);
1153 __ br(Assembler::notEqual, false, Assembler::pt, regular);
1154 #ifdef _LP64
1155 // Don't put set in delay slot
1156 // Set will turn into multiple instructions in 64 bit mode
1157 __ delayed()->nop();
1158 __ set(min_int, G4_scratch);
1159 #else
1160 __ delayed()->set(min_int, G4_scratch);
1161 #endif
1162 Label done;
1163 __ cmp(O1, G4_scratch);
1164 __ br(Assembler::equal, true, Assembler::pt, done);
1165 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken)
1167 __ bind(regular);
1168 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction!
1169 __ bind(done);
1170 }
1173 void TemplateTable::irem() {
1174 transition(itos, itos);
1175 __ mov(Otos_i, O2); // save divisor
1176 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1
1177 __ smul(Otos_i, O2, Otos_i);
1178 __ sub(O1, Otos_i, Otos_i);
1179 }
1182 void TemplateTable::lmul() {
1183 transition(ltos, ltos);
1184 __ pop_l(O2);
1185 #ifdef _LP64
1186 __ mulx(Otos_l, O2, Otos_l);
1187 #else
1188 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
1189 #endif
1191 }
1194 void TemplateTable::ldiv() {
1195 transition(ltos, ltos);
1197 // check for zero
1198 __ pop_l(O2);
1199 #ifdef _LP64
1200 __ tst(Otos_l);
1201 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1202 __ sdivx(O2, Otos_l, Otos_l);
1203 #else
1204 __ orcc(Otos_l1, Otos_l2, G0);
1205 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1206 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1207 #endif
1208 }
1211 void TemplateTable::lrem() {
1212 transition(ltos, ltos);
1214 // check for zero
1215 __ pop_l(O2);
1216 #ifdef _LP64
1217 __ tst(Otos_l);
1218 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1219 __ sdivx(O2, Otos_l, Otos_l2);
1220 __ mulx (Otos_l2, Otos_l, Otos_l2);
1221 __ sub (O2, Otos_l2, Otos_l);
1222 #else
1223 __ orcc(Otos_l1, Otos_l2, G0);
1224 __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1225 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1226 #endif
1227 }
1230 void TemplateTable::lshl() {
1231 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
1233 __ pop_l(O2); // shift value in O2, O3
1234 #ifdef _LP64
1235 __ sllx(O2, Otos_i, Otos_l);
1236 #else
1237 __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1238 #endif
1239 }
1242 void TemplateTable::lshr() {
1243 transition(itos, ltos); // %%%% see lshl comment
1245 __ pop_l(O2); // shift value in O2, O3
1246 #ifdef _LP64
1247 __ srax(O2, Otos_i, Otos_l);
1248 #else
1249 __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1250 #endif
1251 }
1255 void TemplateTable::lushr() {
1256 transition(itos, ltos); // %%%% see lshl comment
1258 __ pop_l(O2); // shift value in O2, O3
1259 #ifdef _LP64
1260 __ srlx(O2, Otos_i, Otos_l);
1261 #else
1262 __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1263 #endif
1264 }
1267 void TemplateTable::fop2(Operation op) {
1268 transition(ftos, ftos);
1269 switch (op) {
1270 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1271 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1272 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1273 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1274 case rem:
1275 assert(Ftos_f == F0, "just checking");
1276 #ifdef _LP64
1277 // LP64 calling conventions use F1, F3 for passing 2 floats
1278 __ pop_f(F1);
1279 __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
1280 #else
1281 __ pop_i(O0);
1282 __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
1283 __ ld( __ d_tmp, O1 );
1284 #endif
1285 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1286 assert( Ftos_f == F0, "fix this code" );
1287 break;
1289 default: ShouldNotReachHere();
1290 }
1291 }
1294 void TemplateTable::dop2(Operation op) {
1295 transition(dtos, dtos);
1296 switch (op) {
1297 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1298 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1299 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1300 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1301 case rem:
1302 #ifdef _LP64
1303 // Pass arguments in D0, D2
1304 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
1305 __ pop_d( F0 );
1306 #else
1307 // Pass arguments in O0O1, O2O3
1308 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1309 __ ldd( __ d_tmp, O2 );
1310 __ pop_d(Ftos_f);
1311 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1312 __ ldd( __ d_tmp, O0 );
1313 #endif
1314 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1315 assert( Ftos_d == F0, "fix this code" );
1316 break;
1318 default: ShouldNotReachHere();
1319 }
1320 }
1323 void TemplateTable::ineg() {
1324 transition(itos, itos);
1325 __ neg(Otos_i);
1326 }
1329 void TemplateTable::lneg() {
1330 transition(ltos, ltos);
1331 #ifdef _LP64
1332 __ sub(G0, Otos_l, Otos_l);
1333 #else
1334 __ lneg(Otos_l1, Otos_l2);
1335 #endif
1336 }
1339 void TemplateTable::fneg() {
1340 transition(ftos, ftos);
1341 __ fneg(FloatRegisterImpl::S, Ftos_f, Ftos_f);
1342 }
1345 void TemplateTable::dneg() {
1346 transition(dtos, dtos);
1347 __ fneg(FloatRegisterImpl::D, Ftos_f, Ftos_f);
1348 }
1351 void TemplateTable::iinc() {
1352 transition(vtos, vtos);
1353 locals_index(G3_scratch);
1354 __ ldsb(Lbcp, 2, O2); // load constant
1355 __ access_local_int(G3_scratch, Otos_i);
1356 __ add(Otos_i, O2, Otos_i);
1357 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1358 }
1361 void TemplateTable::wide_iinc() {
1362 transition(vtos, vtos);
1363 locals_index_wide(G3_scratch);
1364 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed);
1365 __ access_local_int(G3_scratch, Otos_i);
1366 __ add(Otos_i, O3, Otos_i);
1367 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1368 }
1371 void TemplateTable::convert() {
1372 // %%%%% Factor this first part accross platforms
1373 #ifdef ASSERT
1374 TosState tos_in = ilgl;
1375 TosState tos_out = ilgl;
1376 switch (bytecode()) {
1377 case Bytecodes::_i2l: // fall through
1378 case Bytecodes::_i2f: // fall through
1379 case Bytecodes::_i2d: // fall through
1380 case Bytecodes::_i2b: // fall through
1381 case Bytecodes::_i2c: // fall through
1382 case Bytecodes::_i2s: tos_in = itos; break;
1383 case Bytecodes::_l2i: // fall through
1384 case Bytecodes::_l2f: // fall through
1385 case Bytecodes::_l2d: tos_in = ltos; break;
1386 case Bytecodes::_f2i: // fall through
1387 case Bytecodes::_f2l: // fall through
1388 case Bytecodes::_f2d: tos_in = ftos; break;
1389 case Bytecodes::_d2i: // fall through
1390 case Bytecodes::_d2l: // fall through
1391 case Bytecodes::_d2f: tos_in = dtos; break;
1392 default : ShouldNotReachHere();
1393 }
1394 switch (bytecode()) {
1395 case Bytecodes::_l2i: // fall through
1396 case Bytecodes::_f2i: // fall through
1397 case Bytecodes::_d2i: // fall through
1398 case Bytecodes::_i2b: // fall through
1399 case Bytecodes::_i2c: // fall through
1400 case Bytecodes::_i2s: tos_out = itos; break;
1401 case Bytecodes::_i2l: // fall through
1402 case Bytecodes::_f2l: // fall through
1403 case Bytecodes::_d2l: tos_out = ltos; break;
1404 case Bytecodes::_i2f: // fall through
1405 case Bytecodes::_l2f: // fall through
1406 case Bytecodes::_d2f: tos_out = ftos; break;
1407 case Bytecodes::_i2d: // fall through
1408 case Bytecodes::_l2d: // fall through
1409 case Bytecodes::_f2d: tos_out = dtos; break;
1410 default : ShouldNotReachHere();
1411 }
1412 transition(tos_in, tos_out);
1413 #endif
1416 // Conversion
1417 Label done;
1418 switch (bytecode()) {
1419 case Bytecodes::_i2l:
1420 #ifdef _LP64
1421 // Sign extend the 32 bits
1422 __ sra ( Otos_i, 0, Otos_l );
1423 #else
1424 __ addcc(Otos_i, 0, Otos_l2);
1425 __ br(Assembler::greaterEqual, true, Assembler::pt, done);
1426 __ delayed()->clr(Otos_l1);
1427 __ set(~0, Otos_l1);
1428 #endif
1429 break;
1431 case Bytecodes::_i2f:
1432 __ st(Otos_i, __ d_tmp );
1433 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1434 __ fitof(FloatRegisterImpl::S, F0, Ftos_f);
1435 break;
1437 case Bytecodes::_i2d:
1438 __ st(Otos_i, __ d_tmp);
1439 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1440 __ fitof(FloatRegisterImpl::D, F0, Ftos_f);
1441 break;
1443 case Bytecodes::_i2b:
1444 __ sll(Otos_i, 24, Otos_i);
1445 __ sra(Otos_i, 24, Otos_i);
1446 break;
1448 case Bytecodes::_i2c:
1449 __ sll(Otos_i, 16, Otos_i);
1450 __ srl(Otos_i, 16, Otos_i);
1451 break;
1453 case Bytecodes::_i2s:
1454 __ sll(Otos_i, 16, Otos_i);
1455 __ sra(Otos_i, 16, Otos_i);
1456 break;
1458 case Bytecodes::_l2i:
1459 #ifndef _LP64
1460 __ mov(Otos_l2, Otos_i);
1461 #else
1462 // Sign-extend into the high 32 bits
1463 __ sra(Otos_l, 0, Otos_i);
1464 #endif
1465 break;
1467 case Bytecodes::_l2f:
1468 case Bytecodes::_l2d:
1469 __ st_long(Otos_l, __ d_tmp);
1470 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
1472 if (bytecode() == Bytecodes::_l2f) {
1473 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
1474 } else {
1475 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
1476 }
1477 break;
1479 case Bytecodes::_f2i: {
1480 Label isNaN;
1481 // result must be 0 if value is NaN; test by comparing value to itself
1482 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
1483 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
1484 __ delayed()->clr(Otos_i); // NaN
1485 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
1486 __ stf(FloatRegisterImpl::S, F30, __ d_tmp);
1487 __ ld(__ d_tmp, Otos_i);
1488 __ bind(isNaN);
1489 }
1490 break;
1492 case Bytecodes::_f2l:
1493 // must uncache tos
1494 __ push_f();
1495 #ifdef _LP64
1496 __ pop_f(F1);
1497 #else
1498 __ pop_i(O0);
1499 #endif
1500 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1501 break;
1503 case Bytecodes::_f2d:
1504 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f);
1505 break;
1507 case Bytecodes::_d2i:
1508 case Bytecodes::_d2l:
1509 // must uncache tos
1510 __ push_d();
1511 #ifdef _LP64
1512 // LP64 calling conventions pass first double arg in D0
1513 __ pop_d( Ftos_d );
1514 #else
1515 __ pop_i( O0 );
1516 __ pop_i( O1 );
1517 #endif
1518 __ call_VM_leaf(Lscratch,
1519 bytecode() == Bytecodes::_d2i
1520 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
1521 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1522 break;
1524 case Bytecodes::_d2f:
1525 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
1526 break;
1528 default: ShouldNotReachHere();
1529 }
1530 __ bind(done);
1531 }
1534 void TemplateTable::lcmp() {
1535 transition(ltos, itos);
1537 #ifdef _LP64
1538 __ pop_l(O1); // pop off value 1, value 2 is in O0
1539 __ lcmp( O1, Otos_l, Otos_i );
1540 #else
1541 __ pop_l(O2); // cmp O2,3 to O0,1
1542 __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
1543 #endif
1544 }
1547 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1549 if (is_float) __ pop_f(F2);
1550 else __ pop_d(F2);
1552 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:");
1554 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i );
1555 }
1557 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1558 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1559 __ verify_thread();
1561 const Register O2_bumped_count = O2;
1562 __ profile_taken_branch(G3_scratch, O2_bumped_count);
1564 // get (wide) offset to O1_disp
1565 const Register O1_disp = O1;
1566 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC);
1567 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
1569 // Handle all the JSR stuff here, then exit.
1570 // It's much shorter and cleaner than intermingling with the
1571 // non-JSR normal-branch stuff occurring below.
1572 if( is_jsr ) {
1573 // compute return address as bci in Otos_i
1574 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1575 __ sub(Lbcp, G3_scratch, G3_scratch);
1576 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
1578 // Bump Lbcp to target of JSR
1579 __ add(Lbcp, O1_disp, Lbcp);
1580 // Push returnAddress for "ret" on stack
1581 __ push_ptr(Otos_i);
1582 // And away we go!
1583 __ dispatch_next(vtos);
1584 return;
1585 }
1587 // Normal (non-jsr) branch handling
1589 // Save the current Lbcp
1590 const Register l_cur_bcp = Lscratch;
1591 __ mov( Lbcp, l_cur_bcp );
1593 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1594 if ( increment_invocation_counter_for_backward_branches ) {
1595 Label Lforward;
1596 // check branch direction
1597 __ br( Assembler::positive, false, Assembler::pn, Lforward );
1598 // Bump bytecode pointer by displacement (take the branch)
1599 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
1601 const Register Rcounters = G3_scratch;
1602 __ get_method_counters(Lmethod, Rcounters, Lforward);
1604 if (TieredCompilation) {
1605 Label Lno_mdo, Loverflow;
1606 int increment = InvocationCounter::count_increment;
1607 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1608 if (ProfileInterpreter) {
1609 // If no method data exists, go to profile_continue.
1610 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
1611 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo);
1613 // Increment backedge counter in the MDO
1614 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) +
1615 in_bytes(InvocationCounter::counter_offset()));
1616 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0,
1617 Assembler::notZero, &Lforward);
1618 __ ba_short(Loverflow);
1619 }
1621 // If there's no MDO, increment counter in MethodCounters*
1622 __ bind(Lno_mdo);
1623 Address backedge_counter(Rcounters,
1624 in_bytes(MethodCounters::backedge_counter_offset()) +
1625 in_bytes(InvocationCounter::counter_offset()));
1626 __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0,
1627 Assembler::notZero, &Lforward);
1628 __ bind(Loverflow);
1630 // notify point for loop, pass branch bytecode
1631 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp);
1633 // Was an OSR adapter generated?
1634 // O0 = osr nmethod
1635 __ br_null_short(O0, Assembler::pn, Lforward);
1637 // Has the nmethod been invalidated already?
1638 __ ld(O0, nmethod::entry_bci_offset(), O2);
1639 __ cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, Lforward);
1641 // migrate the interpreter frame off of the stack
1643 __ mov(G2_thread, L7);
1644 // save nmethod
1645 __ mov(O0, L6);
1646 __ set_last_Java_frame(SP, noreg);
1647 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
1648 __ reset_last_Java_frame();
1649 __ mov(L7, G2_thread);
1651 // move OSR nmethod to I1
1652 __ mov(L6, I1);
1654 // OSR buffer to I0
1655 __ mov(O0, I0);
1657 // remove the interpreter frame
1658 __ restore(I5_savedSP, 0, SP);
1660 // Jump to the osr code.
1661 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
1662 __ jmp(O2, G0);
1663 __ delayed()->nop();
1665 } else {
1666 // Update Backedge branch separately from invocations
1667 const Register G4_invoke_ctr = G4;
1668 __ increment_backedge_counter(Rcounters, G4_invoke_ctr, G1_scratch);
1669 if (ProfileInterpreter) {
1670 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward);
1671 if (UseOnStackReplacement) {
1672 __ test_backedge_count_for_osr(O2_bumped_count, l_cur_bcp, G3_scratch);
1673 }
1674 } else {
1675 if (UseOnStackReplacement) {
1676 __ test_backedge_count_for_osr(G4_invoke_ctr, l_cur_bcp, G3_scratch);
1677 }
1678 }
1679 }
1681 __ bind(Lforward);
1682 } else
1683 // Bump bytecode pointer by displacement (take the branch)
1684 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr
1686 // continue with bytecode @ target
1687 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1688 // %%%%% and changing dispatch_next to dispatch_only
1689 __ dispatch_next(vtos);
1690 }
1693 // Note Condition in argument is TemplateTable::Condition
1694 // arg scope is within class scope
1696 void TemplateTable::if_0cmp(Condition cc) {
1697 // no pointers, integer only!
1698 transition(itos, vtos);
1699 // assume branch is more often taken than not (loops use backward branches)
1700 __ cmp( Otos_i, 0);
1701 __ if_cmp(ccNot(cc), false);
1702 }
1705 void TemplateTable::if_icmp(Condition cc) {
1706 transition(itos, vtos);
1707 __ pop_i(O1);
1708 __ cmp(O1, Otos_i);
1709 __ if_cmp(ccNot(cc), false);
1710 }
1713 void TemplateTable::if_nullcmp(Condition cc) {
1714 transition(atos, vtos);
1715 __ tst(Otos_i);
1716 __ if_cmp(ccNot(cc), true);
1717 }
1720 void TemplateTable::if_acmp(Condition cc) {
1721 transition(atos, vtos);
1722 __ pop_ptr(O1);
1723 __ verify_oop(O1);
1724 __ verify_oop(Otos_i);
1725 __ cmp(O1, Otos_i);
1726 __ if_cmp(ccNot(cc), true);
1727 }
1731 void TemplateTable::ret() {
1732 transition(vtos, vtos);
1733 locals_index(G3_scratch);
1734 __ access_local_returnAddress(G3_scratch, Otos_i);
1735 // Otos_i contains the bci, compute the bcp from that
1737 #ifdef _LP64
1738 #ifdef ASSERT
1739 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
1740 // the result. The return address (really a BCI) was stored with an
1741 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in
1742 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
1743 // loaded value.
1744 { Label zzz ;
1745 __ set (65536, G3_scratch) ;
1746 __ cmp (Otos_i, G3_scratch) ;
1747 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
1748 __ delayed()->nop();
1749 __ stop("BCI is in the wrong register half?");
1750 __ bind (zzz) ;
1751 }
1752 #endif
1753 #endif
1755 __ profile_ret(vtos, Otos_i, G4_scratch);
1757 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1758 __ add(G3_scratch, Otos_i, G3_scratch);
1759 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1760 __ dispatch_next(vtos);
1761 }
1764 void TemplateTable::wide_ret() {
1765 transition(vtos, vtos);
1766 locals_index_wide(G3_scratch);
1767 __ access_local_returnAddress(G3_scratch, Otos_i);
1768 // Otos_i contains the bci, compute the bcp from that
1770 __ profile_ret(vtos, Otos_i, G4_scratch);
1772 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1773 __ add(G3_scratch, Otos_i, G3_scratch);
1774 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1775 __ dispatch_next(vtos);
1776 }
1779 void TemplateTable::tableswitch() {
1780 transition(itos, vtos);
1781 Label default_case, continue_execution;
1783 // align bcp
1784 __ add(Lbcp, BytesPerInt, O1);
1785 __ and3(O1, -BytesPerInt, O1);
1786 // load lo, hi
1787 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte
1788 __ ld(O1, 2 * BytesPerInt, O3); // High Byte
1789 #ifdef _LP64
1790 // Sign extend the 32 bits
1791 __ sra ( Otos_i, 0, Otos_i );
1792 #endif /* _LP64 */
1794 // check against lo & hi
1795 __ cmp( Otos_i, O2);
1796 __ br( Assembler::less, false, Assembler::pn, default_case);
1797 __ delayed()->cmp( Otos_i, O3 );
1798 __ br( Assembler::greater, false, Assembler::pn, default_case);
1799 // lookup dispatch offset
1800 __ delayed()->sub(Otos_i, O2, O2);
1801 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
1802 __ sll(O2, LogBytesPerInt, O2);
1803 __ add(O2, 3 * BytesPerInt, O2);
1804 __ ba(continue_execution);
1805 __ delayed()->ld(O1, O2, O2);
1806 // handle default
1807 __ bind(default_case);
1808 __ profile_switch_default(O3);
1809 __ ld(O1, 0, O2); // get default offset
1810 // continue execution
1811 __ bind(continue_execution);
1812 __ add(Lbcp, O2, Lbcp);
1813 __ dispatch_next(vtos);
1814 }
1817 void TemplateTable::lookupswitch() {
1818 transition(itos, itos);
1819 __ stop("lookupswitch bytecode should have been rewritten");
1820 }
1822 void TemplateTable::fast_linearswitch() {
1823 transition(itos, vtos);
1824 Label loop_entry, loop, found, continue_execution;
1825 // align bcp
1826 __ add(Lbcp, BytesPerInt, O1);
1827 __ and3(O1, -BytesPerInt, O1);
1828 // set counter
1829 __ ld(O1, BytesPerInt, O2);
1830 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs
1831 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr
1832 __ ba(loop_entry);
1833 __ delayed()->add(O3, O2, O2); // counter now points past last pair
1835 // table search
1836 __ bind(loop);
1837 __ cmp(O4, Otos_i);
1838 __ br(Assembler::equal, true, Assembler::pn, found);
1839 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4
1840 __ inc(O3, 2 * BytesPerInt);
1842 __ bind(loop_entry);
1843 __ cmp(O2, O3);
1844 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop);
1845 __ delayed()->ld(O3, 0, O4);
1847 // default case
1848 __ ld(O1, 0, O4); // get default offset
1849 if (ProfileInterpreter) {
1850 __ profile_switch_default(O3);
1851 __ ba_short(continue_execution);
1852 }
1854 // entry found -> get offset
1855 __ bind(found);
1856 if (ProfileInterpreter) {
1857 __ sub(O3, O1, O3);
1858 __ sub(O3, 2*BytesPerInt, O3);
1859 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs
1860 __ profile_switch_case(O3, O1, O2, G3_scratch);
1862 __ bind(continue_execution);
1863 }
1864 __ add(Lbcp, O4, Lbcp);
1865 __ dispatch_next(vtos);
1866 }
1869 void TemplateTable::fast_binaryswitch() {
1870 transition(itos, vtos);
1871 // Implementation using the following core algorithm: (copied from Intel)
1872 //
1873 // int binary_search(int key, LookupswitchPair* array, int n) {
1874 // // Binary search according to "Methodik des Programmierens" by
1875 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1876 // int i = 0;
1877 // int j = n;
1878 // while (i+1 < j) {
1879 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1880 // // with Q: for all i: 0 <= i < n: key < a[i]
1881 // // where a stands for the array and assuming that the (inexisting)
1882 // // element a[n] is infinitely big.
1883 // int h = (i + j) >> 1;
1884 // // i < h < j
1885 // if (key < array[h].fast_match()) {
1886 // j = h;
1887 // } else {
1888 // i = h;
1889 // }
1890 // }
1891 // // R: a[i] <= key < a[i+1] or Q
1892 // // (i.e., if key is within array, i is the correct index)
1893 // return i;
1894 // }
1896 // register allocation
1897 assert(Otos_i == O0, "alias checking");
1898 const Register Rkey = Otos_i; // already set (tosca)
1899 const Register Rarray = O1;
1900 const Register Ri = O2;
1901 const Register Rj = O3;
1902 const Register Rh = O4;
1903 const Register Rscratch = O5;
1905 const int log_entry_size = 3;
1906 const int entry_size = 1 << log_entry_size;
1908 Label found;
1909 // Find Array start
1910 __ add(Lbcp, 3 * BytesPerInt, Rarray);
1911 __ and3(Rarray, -BytesPerInt, Rarray);
1912 // initialize i & j (in delay slot)
1913 __ clr( Ri );
1915 // and start
1916 Label entry;
1917 __ ba(entry);
1918 __ delayed()->ld( Rarray, -BytesPerInt, Rj);
1919 // (Rj is already in the native byte-ordering.)
1921 // binary search loop
1922 { Label loop;
1923 __ bind( loop );
1924 // int h = (i + j) >> 1;
1925 __ sra( Rh, 1, Rh );
1926 // if (key < array[h].fast_match()) {
1927 // j = h;
1928 // } else {
1929 // i = h;
1930 // }
1931 __ sll( Rh, log_entry_size, Rscratch );
1932 __ ld( Rarray, Rscratch, Rscratch );
1933 // (Rscratch is already in the native byte-ordering.)
1934 __ cmp( Rkey, Rscratch );
1935 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
1936 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
1938 // while (i+1 < j)
1939 __ bind( entry );
1940 __ add( Ri, 1, Rscratch );
1941 __ cmp(Rscratch, Rj);
1942 __ br( Assembler::less, true, Assembler::pt, loop );
1943 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1;
1944 }
1946 // end of binary search, result index is i (must check again!)
1947 Label default_case;
1948 Label continue_execution;
1949 if (ProfileInterpreter) {
1950 __ mov( Ri, Rh ); // Save index in i for profiling
1951 }
1952 __ sll( Ri, log_entry_size, Ri );
1953 __ ld( Rarray, Ri, Rscratch );
1954 // (Rscratch is already in the native byte-ordering.)
1955 __ cmp( Rkey, Rscratch );
1956 __ br( Assembler::notEqual, true, Assembler::pn, default_case );
1957 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j
1959 // entry found -> j = offset
1960 __ inc( Ri, BytesPerInt );
1961 __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
1962 __ ld( Rarray, Ri, Rj );
1963 // (Rj is already in the native byte-ordering.)
1965 if (ProfileInterpreter) {
1966 __ ba_short(continue_execution);
1967 }
1969 __ bind(default_case); // fall through (if not profiling)
1970 __ profile_switch_default(Ri);
1972 __ bind(continue_execution);
1973 __ add( Lbcp, Rj, Lbcp );
1974 __ dispatch_next( vtos );
1975 }
1978 void TemplateTable::_return(TosState state) {
1979 transition(state, state);
1980 assert(_desc->calls_vm(), "inconsistent calls_vm information");
1982 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
1983 assert(state == vtos, "only valid state");
1984 __ mov(G0, G3_scratch);
1985 __ access_local_ptr(G3_scratch, Otos_i);
1986 __ load_klass(Otos_i, O2);
1987 __ set(JVM_ACC_HAS_FINALIZER, G3);
1988 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2);
1989 __ andcc(G3, O2, G0);
1990 Label skip_register_finalizer;
1991 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
1992 __ delayed()->nop();
1994 // Call out to do finalizer registration
1995 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i);
1997 __ bind(skip_register_finalizer);
1998 }
2000 __ remove_activation(state, /* throw_monitor_exception */ true);
2002 // The caller's SP was adjusted upon method entry to accomodate
2003 // the callee's non-argument locals. Undo that adjustment.
2004 __ ret(); // return to caller
2005 __ delayed()->restore(I5_savedSP, G0, SP);
2006 }
2009 // ----------------------------------------------------------------------------
2010 // Volatile variables demand their effects be made known to all CPU's in
2011 // order. Store buffers on most chips allow reads & writes to reorder; the
2012 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2013 // memory barrier (i.e., it's not sufficient that the interpreter does not
2014 // reorder volatile references, the hardware also must not reorder them).
2015 //
2016 // According to the new Java Memory Model (JMM):
2017 // (1) All volatiles are serialized wrt to each other.
2018 // ALSO reads & writes act as aquire & release, so:
2019 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2020 // the read float up to before the read. It's OK for non-volatile memory refs
2021 // that happen before the volatile read to float down below it.
2022 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2023 // that happen BEFORE the write float down to after the write. It's OK for
2024 // non-volatile memory refs that happen after the volatile write to float up
2025 // before it.
2026 //
2027 // We only put in barriers around volatile refs (they are expensive), not
2028 // _between_ memory refs (that would require us to track the flavor of the
2029 // previous memory refs). Requirements (2) and (3) require some barriers
2030 // before volatile stores and after volatile loads. These nearly cover
2031 // requirement (1) but miss the volatile-store-volatile-load case. This final
2032 // case is placed after volatile-stores although it could just as well go
2033 // before volatile-loads.
2034 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) {
2035 // Helper function to insert a is-volatile test and memory barrier
2036 // All current sparc implementations run in TSO, needing only StoreLoad
2037 if ((order_constraint & Assembler::StoreLoad) == 0) return;
2038 __ membar( order_constraint );
2039 }
2041 // ----------------------------------------------------------------------------
2042 void TemplateTable::resolve_cache_and_index(int byte_no,
2043 Register Rcache,
2044 Register index,
2045 size_t index_size) {
2046 // Depends on cpCacheOop layout!
2047 Label resolved;
2049 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2050 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size);
2051 __ cmp(Lbyte_code, (int) bytecode()); // have we resolved this bytecode?
2052 __ br(Assembler::equal, false, Assembler::pt, resolved);
2053 __ delayed()->set((int)bytecode(), O1);
2055 address entry;
2056 switch (bytecode()) {
2057 case Bytecodes::_getstatic : // fall through
2058 case Bytecodes::_putstatic : // fall through
2059 case Bytecodes::_getfield : // fall through
2060 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2061 case Bytecodes::_invokevirtual : // fall through
2062 case Bytecodes::_invokespecial : // fall through
2063 case Bytecodes::_invokestatic : // fall through
2064 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2065 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2066 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2067 default:
2068 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2069 break;
2070 }
2071 // first time invocation - must resolve first
2072 __ call_VM(noreg, entry, O1);
2073 // Update registers with resolved info
2074 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2075 __ bind(resolved);
2076 }
2078 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2079 Register method,
2080 Register itable_index,
2081 Register flags,
2082 bool is_invokevirtual,
2083 bool is_invokevfinal,
2084 bool is_invokedynamic) {
2085 // Uses both G3_scratch and G4_scratch
2086 Register cache = G3_scratch;
2087 Register index = G4_scratch;
2088 assert_different_registers(cache, method, itable_index);
2090 // determine constant pool cache field offsets
2091 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2092 const int method_offset = in_bytes(
2093 ConstantPoolCache::base_offset() +
2094 ((byte_no == f2_byte)
2095 ? ConstantPoolCacheEntry::f2_offset()
2096 : ConstantPoolCacheEntry::f1_offset()
2097 )
2098 );
2099 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2100 ConstantPoolCacheEntry::flags_offset());
2101 // access constant pool cache fields
2102 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2103 ConstantPoolCacheEntry::f2_offset());
2105 if (is_invokevfinal) {
2106 __ get_cache_and_index_at_bcp(cache, index, 1);
2107 __ ld_ptr(Address(cache, method_offset), method);
2108 } else {
2109 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2110 resolve_cache_and_index(byte_no, cache, index, index_size);
2111 __ ld_ptr(Address(cache, method_offset), method);
2112 }
2114 if (itable_index != noreg) {
2115 // pick up itable or appendix index from f2 also:
2116 __ ld_ptr(Address(cache, index_offset), itable_index);
2117 }
2118 __ ld_ptr(Address(cache, flags_offset), flags);
2119 }
2121 // The Rcache register must be set before call
2122 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2123 Register Rcache,
2124 Register index,
2125 Register Roffset,
2126 Register Rflags,
2127 bool is_static) {
2128 assert_different_registers(Rcache, Rflags, Roffset);
2130 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2132 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2133 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2134 if (is_static) {
2135 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj);
2136 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2137 __ ld_ptr( Robj, mirror_offset, Robj);
2138 }
2139 }
2141 // The registers Rcache and index expected to be set before call.
2142 // Correct values of the Rcache and index registers are preserved.
2143 void TemplateTable::jvmti_post_field_access(Register Rcache,
2144 Register index,
2145 bool is_static,
2146 bool has_tos) {
2147 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2149 if (JvmtiExport::can_post_field_access()) {
2150 // Check to see if a field access watch has been set before we take
2151 // the time to call into the VM.
2152 Label Label1;
2153 assert_different_registers(Rcache, index, G1_scratch);
2154 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr());
2155 __ load_contents(get_field_access_count_addr, G1_scratch);
2156 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1);
2158 __ add(Rcache, in_bytes(cp_base_offset), Rcache);
2160 if (is_static) {
2161 __ clr(Otos_i);
2162 } else {
2163 if (has_tos) {
2164 // save object pointer before call_VM() clobbers it
2165 __ push_ptr(Otos_i); // put object on tos where GC wants it.
2166 } else {
2167 // Load top of stack (do not pop the value off the stack);
2168 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
2169 }
2170 __ verify_oop(Otos_i);
2171 }
2172 // Otos_i: object pointer or NULL if static
2173 // Rcache: cache entry pointer
2174 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2175 Otos_i, Rcache);
2176 if (!is_static && has_tos) {
2177 __ pop_ptr(Otos_i); // restore object pointer
2178 __ verify_oop(Otos_i);
2179 }
2180 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2181 __ bind(Label1);
2182 }
2183 }
2185 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2186 transition(vtos, vtos);
2188 Register Rcache = G3_scratch;
2189 Register index = G4_scratch;
2190 Register Rclass = Rcache;
2191 Register Roffset= G4_scratch;
2192 Register Rflags = G1_scratch;
2193 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2195 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2));
2196 jvmti_post_field_access(Rcache, index, is_static, false);
2197 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2199 if (!is_static) {
2200 pop_and_check_object(Rclass);
2201 } else {
2202 __ verify_oop(Rclass);
2203 }
2205 Label exit;
2207 Assembler::Membar_mask_bits membar_bits =
2208 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2210 if (__ membar_has_effect(membar_bits)) {
2211 // Get volatile flag
2212 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2213 __ and3(Rflags, Lscratch, Lscratch);
2214 }
2216 Label checkVolatile;
2218 // compute field type
2219 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj;
2220 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2221 // Make sure we don't need to mask Rflags after the above shift
2222 ConstantPoolCacheEntry::verify_tos_state_shift();
2224 // Check atos before itos for getstatic, more likely (in Queens at least)
2225 __ cmp(Rflags, atos);
2226 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2227 __ delayed() ->cmp(Rflags, itos);
2229 // atos
2230 __ load_heap_oop(Rclass, Roffset, Otos_i);
2231 __ verify_oop(Otos_i);
2232 __ push(atos);
2233 if (!is_static) {
2234 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
2235 }
2236 __ ba(checkVolatile);
2237 __ delayed()->tst(Lscratch);
2239 __ bind(notObj);
2241 // cmp(Rflags, itos);
2242 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2243 __ delayed() ->cmp(Rflags, ltos);
2245 // itos
2246 __ ld(Rclass, Roffset, Otos_i);
2247 __ push(itos);
2248 if (!is_static) {
2249 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
2250 }
2251 __ ba(checkVolatile);
2252 __ delayed()->tst(Lscratch);
2254 __ bind(notInt);
2256 // cmp(Rflags, ltos);
2257 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2258 __ delayed() ->cmp(Rflags, btos);
2260 // ltos
2261 // load must be atomic
2262 __ ld_long(Rclass, Roffset, Otos_l);
2263 __ push(ltos);
2264 if (!is_static) {
2265 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
2266 }
2267 __ ba(checkVolatile);
2268 __ delayed()->tst(Lscratch);
2270 __ bind(notLong);
2272 // cmp(Rflags, btos);
2273 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2274 __ delayed() ->cmp(Rflags, ctos);
2276 // btos
2277 __ ldsb(Rclass, Roffset, Otos_i);
2278 __ push(itos);
2279 if (!is_static) {
2280 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
2281 }
2282 __ ba(checkVolatile);
2283 __ delayed()->tst(Lscratch);
2285 __ bind(notByte);
2287 // cmp(Rflags, ctos);
2288 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2289 __ delayed() ->cmp(Rflags, stos);
2291 // ctos
2292 __ lduh(Rclass, Roffset, Otos_i);
2293 __ push(itos);
2294 if (!is_static) {
2295 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
2296 }
2297 __ ba(checkVolatile);
2298 __ delayed()->tst(Lscratch);
2300 __ bind(notChar);
2302 // cmp(Rflags, stos);
2303 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2304 __ delayed() ->cmp(Rflags, ftos);
2306 // stos
2307 __ ldsh(Rclass, Roffset, Otos_i);
2308 __ push(itos);
2309 if (!is_static) {
2310 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
2311 }
2312 __ ba(checkVolatile);
2313 __ delayed()->tst(Lscratch);
2315 __ bind(notShort);
2318 // cmp(Rflags, ftos);
2319 __ br(Assembler::notEqual, false, Assembler::pt, notFloat);
2320 __ delayed() ->tst(Lscratch);
2322 // ftos
2323 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f);
2324 __ push(ftos);
2325 if (!is_static) {
2326 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
2327 }
2328 __ ba(checkVolatile);
2329 __ delayed()->tst(Lscratch);
2331 __ bind(notFloat);
2334 // dtos
2335 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d);
2336 __ push(dtos);
2337 if (!is_static) {
2338 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch);
2339 }
2341 __ bind(checkVolatile);
2342 if (__ membar_has_effect(membar_bits)) {
2343 // __ tst(Lscratch); executed in delay slot
2344 __ br(Assembler::zero, false, Assembler::pt, exit);
2345 __ delayed()->nop();
2346 volatile_barrier(membar_bits);
2347 }
2349 __ bind(exit);
2350 }
2353 void TemplateTable::getfield(int byte_no) {
2354 getfield_or_static(byte_no, false);
2355 }
2357 void TemplateTable::getstatic(int byte_no) {
2358 getfield_or_static(byte_no, true);
2359 }
2362 void TemplateTable::fast_accessfield(TosState state) {
2363 transition(atos, state);
2364 Register Rcache = G3_scratch;
2365 Register index = G4_scratch;
2366 Register Roffset = G4_scratch;
2367 Register Rflags = Rcache;
2368 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2370 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2371 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true);
2373 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2375 __ null_check(Otos_i);
2376 __ verify_oop(Otos_i);
2378 Label exit;
2380 Assembler::Membar_mask_bits membar_bits =
2381 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2382 if (__ membar_has_effect(membar_bits)) {
2383 // Get volatile flag
2384 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags);
2385 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2386 }
2388 switch (bytecode()) {
2389 case Bytecodes::_fast_bgetfield:
2390 __ ldsb(Otos_i, Roffset, Otos_i);
2391 break;
2392 case Bytecodes::_fast_cgetfield:
2393 __ lduh(Otos_i, Roffset, Otos_i);
2394 break;
2395 case Bytecodes::_fast_sgetfield:
2396 __ ldsh(Otos_i, Roffset, Otos_i);
2397 break;
2398 case Bytecodes::_fast_igetfield:
2399 __ ld(Otos_i, Roffset, Otos_i);
2400 break;
2401 case Bytecodes::_fast_lgetfield:
2402 __ ld_long(Otos_i, Roffset, Otos_l);
2403 break;
2404 case Bytecodes::_fast_fgetfield:
2405 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f);
2406 break;
2407 case Bytecodes::_fast_dgetfield:
2408 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
2409 break;
2410 case Bytecodes::_fast_agetfield:
2411 __ load_heap_oop(Otos_i, Roffset, Otos_i);
2412 break;
2413 default:
2414 ShouldNotReachHere();
2415 }
2417 if (__ membar_has_effect(membar_bits)) {
2418 __ btst(Lscratch, Rflags);
2419 __ br(Assembler::zero, false, Assembler::pt, exit);
2420 __ delayed()->nop();
2421 volatile_barrier(membar_bits);
2422 __ bind(exit);
2423 }
2425 if (state == atos) {
2426 __ verify_oop(Otos_i); // does not blow flags!
2427 }
2428 }
2430 void TemplateTable::jvmti_post_fast_field_mod() {
2431 if (JvmtiExport::can_post_field_modification()) {
2432 // Check to see if a field modification watch has been set before we take
2433 // the time to call into the VM.
2434 Label done;
2435 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2436 __ load_contents(get_field_modification_count_addr, G4_scratch);
2437 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done);
2438 __ pop_ptr(G4_scratch); // copy the object pointer from tos
2439 __ verify_oop(G4_scratch);
2440 __ push_ptr(G4_scratch); // put the object pointer back on tos
2441 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1);
2442 // Save tos values before call_VM() clobbers them. Since we have
2443 // to do it for every data type, we use the saved values as the
2444 // jvalue object.
2445 switch (bytecode()) { // save tos values before call_VM() clobbers them
2446 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break;
2447 case Bytecodes::_fast_bputfield: // fall through
2448 case Bytecodes::_fast_sputfield: // fall through
2449 case Bytecodes::_fast_cputfield: // fall through
2450 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break;
2451 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break;
2452 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break;
2453 // get words in right order for use as jvalue object
2454 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break;
2455 }
2456 // setup pointer to jvalue object
2457 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize);
2458 // G4_scratch: object pointer
2459 // G1_scratch: cache entry pointer
2460 // G3_scratch: jvalue object on the stack
2461 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch);
2462 switch (bytecode()) { // restore tos values
2463 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break;
2464 case Bytecodes::_fast_bputfield: // fall through
2465 case Bytecodes::_fast_sputfield: // fall through
2466 case Bytecodes::_fast_cputfield: // fall through
2467 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break;
2468 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break;
2469 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break;
2470 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break;
2471 }
2472 __ bind(done);
2473 }
2474 }
2476 // The registers Rcache and index expected to be set before call.
2477 // The function may destroy various registers, just not the Rcache and index registers.
2478 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) {
2479 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2481 if (JvmtiExport::can_post_field_modification()) {
2482 // Check to see if a field modification watch has been set before we take
2483 // the time to call into the VM.
2484 Label Label1;
2485 assert_different_registers(Rcache, index, G1_scratch);
2486 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2487 __ load_contents(get_field_modification_count_addr, G1_scratch);
2488 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1);
2490 // The Rcache and index registers have been already set.
2491 // This allows to eliminate this call but the Rcache and index
2492 // registers must be correspondingly used after this line.
2493 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1);
2495 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch);
2496 if (is_static) {
2497 // Life is simple. Null out the object pointer.
2498 __ clr(G4_scratch);
2499 } else {
2500 Register Rflags = G1_scratch;
2501 // Life is harder. The stack holds the value on top, followed by the
2502 // object. We don't know the size of the value, though; it could be
2503 // one or two words depending on its type. As a result, we must find
2504 // the type to determine where the object is.
2506 Label two_word, valsizeknown;
2507 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2508 __ mov(Lesp, G4_scratch);
2509 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2510 // Make sure we don't need to mask Rflags after the above shift
2511 ConstantPoolCacheEntry::verify_tos_state_shift();
2512 __ cmp(Rflags, ltos);
2513 __ br(Assembler::equal, false, Assembler::pt, two_word);
2514 __ delayed()->cmp(Rflags, dtos);
2515 __ br(Assembler::equal, false, Assembler::pt, two_word);
2516 __ delayed()->nop();
2517 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1));
2518 __ ba_short(valsizeknown);
2519 __ bind(two_word);
2521 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2));
2523 __ bind(valsizeknown);
2524 // setup object pointer
2525 __ ld_ptr(G4_scratch, 0, G4_scratch);
2526 __ verify_oop(G4_scratch);
2527 }
2528 // setup pointer to jvalue object
2529 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize);
2530 // G4_scratch: object pointer or NULL if static
2531 // G3_scratch: cache entry pointer
2532 // G1_scratch: jvalue object on the stack
2533 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2534 G4_scratch, G3_scratch, G1_scratch);
2535 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2536 __ bind(Label1);
2537 }
2538 }
2540 void TemplateTable::pop_and_check_object(Register r) {
2541 __ pop_ptr(r);
2542 __ null_check(r); // for field access must check obj.
2543 __ verify_oop(r);
2544 }
2546 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2547 transition(vtos, vtos);
2548 Register Rcache = G3_scratch;
2549 Register index = G4_scratch;
2550 Register Rclass = Rcache;
2551 Register Roffset= G4_scratch;
2552 Register Rflags = G1_scratch;
2553 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2555 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2));
2556 jvmti_post_field_mod(Rcache, index, is_static);
2557 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2559 Assembler::Membar_mask_bits read_bits =
2560 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2561 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2563 Label notVolatile, checkVolatile, exit;
2564 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2565 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2566 __ and3(Rflags, Lscratch, Lscratch);
2568 if (__ membar_has_effect(read_bits)) {
2569 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2570 volatile_barrier(read_bits);
2571 __ bind(notVolatile);
2572 }
2573 }
2575 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2576 // Make sure we don't need to mask Rflags after the above shift
2577 ConstantPoolCacheEntry::verify_tos_state_shift();
2579 // compute field type
2580 Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat;
2582 if (is_static) {
2583 // putstatic with object type most likely, check that first
2584 __ cmp(Rflags, atos);
2585 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2586 __ delayed()->cmp(Rflags, itos);
2588 // atos
2589 {
2590 __ pop_ptr();
2591 __ verify_oop(Otos_i);
2592 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2593 __ ba(checkVolatile);
2594 __ delayed()->tst(Lscratch);
2595 }
2597 __ bind(notObj);
2598 // cmp(Rflags, itos);
2599 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2600 __ delayed()->cmp(Rflags, btos);
2602 // itos
2603 {
2604 __ pop_i();
2605 __ st(Otos_i, Rclass, Roffset);
2606 __ ba(checkVolatile);
2607 __ delayed()->tst(Lscratch);
2608 }
2610 __ bind(notInt);
2611 } else {
2612 // putfield with int type most likely, check that first
2613 __ cmp(Rflags, itos);
2614 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2615 __ delayed()->cmp(Rflags, atos);
2617 // itos
2618 {
2619 __ pop_i();
2620 pop_and_check_object(Rclass);
2621 __ st(Otos_i, Rclass, Roffset);
2622 patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no);
2623 __ ba(checkVolatile);
2624 __ delayed()->tst(Lscratch);
2625 }
2627 __ bind(notInt);
2628 // cmp(Rflags, atos);
2629 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2630 __ delayed()->cmp(Rflags, btos);
2632 // atos
2633 {
2634 __ pop_ptr();
2635 pop_and_check_object(Rclass);
2636 __ verify_oop(Otos_i);
2637 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2638 patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no);
2639 __ ba(checkVolatile);
2640 __ delayed()->tst(Lscratch);
2641 }
2643 __ bind(notObj);
2644 }
2646 // cmp(Rflags, btos);
2647 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2648 __ delayed()->cmp(Rflags, ltos);
2650 // btos
2651 {
2652 __ pop_i();
2653 if (!is_static) pop_and_check_object(Rclass);
2654 __ stb(Otos_i, Rclass, Roffset);
2655 if (!is_static) {
2656 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no);
2657 }
2658 __ ba(checkVolatile);
2659 __ delayed()->tst(Lscratch);
2660 }
2662 __ bind(notByte);
2663 // cmp(Rflags, ltos);
2664 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2665 __ delayed()->cmp(Rflags, ctos);
2667 // ltos
2668 {
2669 __ pop_l();
2670 if (!is_static) pop_and_check_object(Rclass);
2671 __ st_long(Otos_l, Rclass, Roffset);
2672 if (!is_static) {
2673 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no);
2674 }
2675 __ ba(checkVolatile);
2676 __ delayed()->tst(Lscratch);
2677 }
2679 __ bind(notLong);
2680 // cmp(Rflags, ctos);
2681 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2682 __ delayed()->cmp(Rflags, stos);
2684 // ctos (char)
2685 {
2686 __ pop_i();
2687 if (!is_static) pop_and_check_object(Rclass);
2688 __ sth(Otos_i, Rclass, Roffset);
2689 if (!is_static) {
2690 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no);
2691 }
2692 __ ba(checkVolatile);
2693 __ delayed()->tst(Lscratch);
2694 }
2696 __ bind(notChar);
2697 // cmp(Rflags, stos);
2698 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2699 __ delayed()->cmp(Rflags, ftos);
2701 // stos (short)
2702 {
2703 __ pop_i();
2704 if (!is_static) pop_and_check_object(Rclass);
2705 __ sth(Otos_i, Rclass, Roffset);
2706 if (!is_static) {
2707 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no);
2708 }
2709 __ ba(checkVolatile);
2710 __ delayed()->tst(Lscratch);
2711 }
2713 __ bind(notShort);
2714 // cmp(Rflags, ftos);
2715 __ br(Assembler::notZero, false, Assembler::pt, notFloat);
2716 __ delayed()->nop();
2718 // ftos
2719 {
2720 __ pop_f();
2721 if (!is_static) pop_and_check_object(Rclass);
2722 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2723 if (!is_static) {
2724 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no);
2725 }
2726 __ ba(checkVolatile);
2727 __ delayed()->tst(Lscratch);
2728 }
2730 __ bind(notFloat);
2732 // dtos
2733 {
2734 __ pop_d();
2735 if (!is_static) pop_and_check_object(Rclass);
2736 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2737 if (!is_static) {
2738 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no);
2739 }
2740 }
2742 __ bind(checkVolatile);
2743 __ tst(Lscratch);
2745 if (__ membar_has_effect(write_bits)) {
2746 // __ tst(Lscratch); in delay slot
2747 __ br(Assembler::zero, false, Assembler::pt, exit);
2748 __ delayed()->nop();
2749 volatile_barrier(Assembler::StoreLoad);
2750 __ bind(exit);
2751 }
2752 }
2754 void TemplateTable::fast_storefield(TosState state) {
2755 transition(state, vtos);
2756 Register Rcache = G3_scratch;
2757 Register Rclass = Rcache;
2758 Register Roffset= G4_scratch;
2759 Register Rflags = G1_scratch;
2760 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2762 jvmti_post_fast_field_mod();
2764 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1);
2766 Assembler::Membar_mask_bits read_bits =
2767 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2768 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2770 Label notVolatile, checkVolatile, exit;
2771 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2772 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2773 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2774 __ and3(Rflags, Lscratch, Lscratch);
2775 if (__ membar_has_effect(read_bits)) {
2776 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2777 volatile_barrier(read_bits);
2778 __ bind(notVolatile);
2779 }
2780 }
2782 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2783 pop_and_check_object(Rclass);
2785 switch (bytecode()) {
2786 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break;
2787 case Bytecodes::_fast_cputfield: /* fall through */
2788 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break;
2789 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break;
2790 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break;
2791 case Bytecodes::_fast_fputfield:
2792 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2793 break;
2794 case Bytecodes::_fast_dputfield:
2795 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2796 break;
2797 case Bytecodes::_fast_aputfield:
2798 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2799 break;
2800 default:
2801 ShouldNotReachHere();
2802 }
2804 if (__ membar_has_effect(write_bits)) {
2805 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit);
2806 volatile_barrier(Assembler::StoreLoad);
2807 __ bind(exit);
2808 }
2809 }
2812 void TemplateTable::putfield(int byte_no) {
2813 putfield_or_static(byte_no, false);
2814 }
2816 void TemplateTable::putstatic(int byte_no) {
2817 putfield_or_static(byte_no, true);
2818 }
2821 void TemplateTable::fast_xaccess(TosState state) {
2822 transition(vtos, state);
2823 Register Rcache = G3_scratch;
2824 Register Roffset = G4_scratch;
2825 Register Rflags = G4_scratch;
2826 Register Rreceiver = Lscratch;
2828 __ ld_ptr(Llocals, 0, Rreceiver);
2830 // access constant pool cache (is resolved)
2831 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
2832 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset);
2833 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp
2835 __ verify_oop(Rreceiver);
2836 __ null_check(Rreceiver);
2837 if (state == atos) {
2838 __ load_heap_oop(Rreceiver, Roffset, Otos_i);
2839 } else if (state == itos) {
2840 __ ld (Rreceiver, Roffset, Otos_i) ;
2841 } else if (state == ftos) {
2842 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f);
2843 } else {
2844 ShouldNotReachHere();
2845 }
2847 Assembler::Membar_mask_bits membar_bits =
2848 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2849 if (__ membar_has_effect(membar_bits)) {
2851 // Get is_volatile value in Rflags and check if membar is needed
2852 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags);
2854 // Test volatile
2855 Label notVolatile;
2856 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2857 __ btst(Rflags, Lscratch);
2858 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2859 __ delayed()->nop();
2860 volatile_barrier(membar_bits);
2861 __ bind(notVolatile);
2862 }
2864 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
2865 __ sub(Lbcp, 1, Lbcp);
2866 }
2868 //----------------------------------------------------------------------------------------------------
2869 // Calls
2871 void TemplateTable::count_calls(Register method, Register temp) {
2872 // implemented elsewhere
2873 ShouldNotReachHere();
2874 }
2876 void TemplateTable::prepare_invoke(int byte_no,
2877 Register method, // linked method (or i-klass)
2878 Register ra, // return address
2879 Register index, // itable index, MethodType, etc.
2880 Register recv, // if caller wants to see it
2881 Register flags // if caller wants to test it
2882 ) {
2883 // determine flags
2884 const Bytecodes::Code code = bytecode();
2885 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2886 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2887 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2888 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2889 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2890 const bool load_receiver = (recv != noreg);
2891 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2892 assert(recv == noreg || recv == O0, "");
2893 assert(flags == noreg || flags == O1, "");
2895 // setup registers & access constant pool cache
2896 if (recv == noreg) recv = O0;
2897 if (flags == noreg) flags = O1;
2898 const Register temp = O2;
2899 assert_different_registers(method, ra, index, recv, flags, temp);
2901 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2903 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2905 // maybe push appendix to arguments
2906 if (is_invokedynamic || is_invokehandle) {
2907 Label L_no_push;
2908 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp);
2909 __ btst(flags, temp);
2910 __ br(Assembler::zero, false, Assembler::pt, L_no_push);
2911 __ delayed()->nop();
2912 // Push the appendix as a trailing parameter.
2913 // This must be done before we get the receiver,
2914 // since the parameter_size includes it.
2915 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
2916 __ load_resolved_reference_at_index(temp, index);
2917 __ verify_oop(temp);
2918 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.)
2919 __ bind(L_no_push);
2920 }
2922 // load receiver if needed (after appendix is pushed so parameter size is correct)
2923 if (load_receiver) {
2924 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size
2925 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp
2926 __ verify_oop(recv);
2927 }
2929 // compute return type
2930 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra);
2931 // Make sure we don't need to mask flags after the above shift
2932 ConstantPoolCacheEntry::verify_tos_state_shift();
2933 // load return address
2934 {
2935 const address table_addr = (is_invokeinterface || is_invokedynamic) ?
2936 (address)Interpreter::return_5_addrs_by_index_table() :
2937 (address)Interpreter::return_3_addrs_by_index_table();
2938 AddressLiteral table(table_addr);
2939 __ set(table, temp);
2940 __ sll(ra, LogBytesPerWord, ra);
2941 __ ld_ptr(Address(temp, ra), ra);
2942 }
2943 }
2946 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
2947 Register Rtemp = G4_scratch;
2948 Register Rcall = Rindex;
2949 assert_different_registers(Rcall, G5_method, Gargs, Rret);
2951 // get target Method* & entry point
2952 __ lookup_virtual_method(Rrecv, Rindex, G5_method);
2953 __ call_from_interpreter(Rcall, Gargs, Rret);
2954 }
2956 void TemplateTable::invokevirtual(int byte_no) {
2957 transition(vtos, vtos);
2958 assert(byte_no == f2_byte, "use this argument");
2960 Register Rscratch = G3_scratch;
2961 Register Rtemp = G4_scratch;
2962 Register Rret = Lscratch;
2963 Register O0_recv = O0;
2964 Label notFinal;
2966 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
2967 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2969 // Check for vfinal
2970 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch);
2971 __ btst(Rret, G4_scratch);
2972 __ br(Assembler::zero, false, Assembler::pt, notFinal);
2973 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters
2975 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
2977 invokevfinal_helper(Rscratch, Rret);
2979 __ bind(notFinal);
2981 __ mov(G5_method, Rscratch); // better scratch register
2982 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop
2983 // receiver is in O0_recv
2984 __ verify_oop(O0_recv);
2986 // get return address
2987 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
2988 __ set(table, Rtemp);
2989 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
2990 // Make sure we don't need to mask Rret after the above shift
2991 ConstantPoolCacheEntry::verify_tos_state_shift();
2992 __ sll(Rret, LogBytesPerWord, Rret);
2993 __ ld_ptr(Rtemp, Rret, Rret); // get return address
2995 // get receiver klass
2996 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
2997 __ load_klass(O0_recv, O0_recv);
2998 __ verify_klass_ptr(O0_recv);
3000 __ profile_virtual_call(O0_recv, O4);
3002 generate_vtable_call(O0_recv, Rscratch, Rret);
3003 }
3005 void TemplateTable::fast_invokevfinal(int byte_no) {
3006 transition(vtos, vtos);
3007 assert(byte_no == f2_byte, "use this argument");
3009 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
3010 /*is_invokevfinal*/true, false);
3011 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3012 invokevfinal_helper(G3_scratch, Lscratch);
3013 }
3015 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
3016 Register Rtemp = G4_scratch;
3018 // Load receiver from stack slot
3019 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch);
3020 __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch);
3021 __ load_receiver(G4_scratch, O0);
3023 // receiver NULL check
3024 __ null_check(O0);
3026 __ profile_final_call(O4);
3028 // get return address
3029 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
3030 __ set(table, Rtemp);
3031 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
3032 // Make sure we don't need to mask Rret after the above shift
3033 ConstantPoolCacheEntry::verify_tos_state_shift();
3034 __ sll(Rret, LogBytesPerWord, Rret);
3035 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3038 // do the call
3039 __ call_from_interpreter(Rscratch, Gargs, Rret);
3040 }
3043 void TemplateTable::invokespecial(int byte_no) {
3044 transition(vtos, vtos);
3045 assert(byte_no == f1_byte, "use this argument");
3047 const Register Rret = Lscratch;
3048 const Register O0_recv = O0;
3049 const Register Rscratch = G3_scratch;
3051 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check
3052 __ null_check(O0_recv);
3054 // do the call
3055 __ profile_call(O4);
3056 __ call_from_interpreter(Rscratch, Gargs, Rret);
3057 }
3060 void TemplateTable::invokestatic(int byte_no) {
3061 transition(vtos, vtos);
3062 assert(byte_no == f1_byte, "use this argument");
3064 const Register Rret = Lscratch;
3065 const Register Rscratch = G3_scratch;
3067 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method*
3069 // do the call
3070 __ profile_call(O4);
3071 __ call_from_interpreter(Rscratch, Gargs, Rret);
3072 }
3074 void TemplateTable::invokeinterface_object_method(Register RKlass,
3075 Register Rcall,
3076 Register Rret,
3077 Register Rflags) {
3078 Register Rscratch = G4_scratch;
3079 Register Rindex = Lscratch;
3081 assert_different_registers(Rscratch, Rindex, Rret);
3083 Label notFinal;
3085 // Check for vfinal
3086 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch);
3087 __ btst(Rflags, Rscratch);
3088 __ br(Assembler::zero, false, Assembler::pt, notFinal);
3089 __ delayed()->nop();
3091 __ profile_final_call(O4);
3093 // do the call - the index (f2) contains the Method*
3094 assert_different_registers(G5_method, Gargs, Rcall);
3095 __ mov(Rindex, G5_method);
3096 __ call_from_interpreter(Rcall, Gargs, Rret);
3097 __ bind(notFinal);
3099 __ profile_virtual_call(RKlass, O4);
3100 generate_vtable_call(RKlass, Rindex, Rret);
3101 }
3104 void TemplateTable::invokeinterface(int byte_no) {
3105 transition(vtos, vtos);
3106 assert(byte_no == f1_byte, "use this argument");
3108 const Register Rinterface = G1_scratch;
3109 const Register Rret = G3_scratch;
3110 const Register Rindex = Lscratch;
3111 const Register O0_recv = O0;
3112 const Register O1_flags = O1;
3113 const Register O2_Klass = O2;
3114 const Register Rscratch = G4_scratch;
3115 assert_different_registers(Rscratch, G5_method);
3117 prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags);
3119 // get receiver klass
3120 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
3121 __ load_klass(O0_recv, O2_Klass);
3123 // Special case of invokeinterface called for virtual method of
3124 // java.lang.Object. See cpCacheOop.cpp for details.
3125 // This code isn't produced by javac, but could be produced by
3126 // another compliant java compiler.
3127 Label notMethod;
3128 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch);
3129 __ btst(O1_flags, Rscratch);
3130 __ br(Assembler::zero, false, Assembler::pt, notMethod);
3131 __ delayed()->nop();
3133 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags);
3135 __ bind(notMethod);
3137 __ profile_virtual_call(O2_Klass, O4);
3139 //
3140 // find entry point to call
3141 //
3143 // compute start of first itableOffsetEntry (which is at end of vtable)
3144 const int base = InstanceKlass::vtable_start_offset() * wordSize;
3145 Label search;
3146 Register Rtemp = O1_flags;
3148 __ ld(O2_Klass, InstanceKlass::vtable_length_offset() * wordSize, Rtemp);
3149 if (align_object_offset(1) > 1) {
3150 __ round_to(Rtemp, align_object_offset(1));
3151 }
3152 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4;
3153 if (Assembler::is_simm13(base)) {
3154 __ add(Rtemp, base, Rtemp);
3155 } else {
3156 __ set(base, Rscratch);
3157 __ add(Rscratch, Rtemp, Rtemp);
3158 }
3159 __ add(O2_Klass, Rtemp, Rscratch);
3161 __ bind(search);
3163 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp);
3164 {
3165 Label ok;
3167 // Check that entry is non-null. Null entries are probably a bytecode
3168 // problem. If the interface isn't implemented by the receiver class,
3169 // the VM should throw IncompatibleClassChangeError. linkResolver checks
3170 // this too but that's only if the entry isn't already resolved, so we
3171 // need to check again.
3172 __ br_notnull_short( Rtemp, Assembler::pt, ok);
3173 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3174 __ should_not_reach_here();
3175 __ bind(ok);
3176 }
3178 __ cmp(Rinterface, Rtemp);
3179 __ brx(Assembler::notEqual, true, Assembler::pn, search);
3180 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch);
3182 // entry found and Rscratch points to it
3183 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch);
3185 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below");
3186 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8;
3187 __ add(Rscratch, Rindex, Rscratch);
3188 __ ld_ptr(O2_Klass, Rscratch, G5_method);
3190 // Check for abstract method error.
3191 {
3192 Label ok;
3193 __ br_notnull_short(G5_method, Assembler::pt, ok);
3194 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3195 __ should_not_reach_here();
3196 __ bind(ok);
3197 }
3199 Register Rcall = Rinterface;
3200 assert_different_registers(Rcall, G5_method, Gargs, Rret);
3202 __ call_from_interpreter(Rcall, Gargs, Rret);
3203 }
3205 void TemplateTable::invokehandle(int byte_no) {
3206 transition(vtos, vtos);
3207 assert(byte_no == f1_byte, "use this argument");
3209 if (!EnableInvokeDynamic) {
3210 // rewriter does not generate this bytecode
3211 __ should_not_reach_here();
3212 return;
3213 }
3215 const Register Rret = Lscratch;
3216 const Register G4_mtype = G4_scratch;
3217 const Register O0_recv = O0;
3218 const Register Rscratch = G3_scratch;
3220 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv);
3221 __ null_check(O0_recv);
3223 // G4: MethodType object (from cpool->resolved_references[f1], if necessary)
3224 // G5: MH.invokeExact_MT method (from f2)
3226 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke
3228 // do the call
3229 __ verify_oop(G4_mtype);
3230 __ profile_final_call(O4); // FIXME: profile the LambdaForm also
3231 __ call_from_interpreter(Rscratch, Gargs, Rret);
3232 }
3235 void TemplateTable::invokedynamic(int byte_no) {
3236 transition(vtos, vtos);
3237 assert(byte_no == f1_byte, "use this argument");
3239 if (!EnableInvokeDynamic) {
3240 // We should not encounter this bytecode if !EnableInvokeDynamic.
3241 // The verifier will stop it. However, if we get past the verifier,
3242 // this will stop the thread in a reasonable way, without crashing the JVM.
3243 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3244 InterpreterRuntime::throw_IncompatibleClassChangeError));
3245 // the call_VM checks for exception, so we should never return here.
3246 __ should_not_reach_here();
3247 return;
3248 }
3250 const Register Rret = Lscratch;
3251 const Register G4_callsite = G4_scratch;
3252 const Register Rscratch = G3_scratch;
3254 prepare_invoke(byte_no, G5_method, Rret, G4_callsite);
3256 // G4: CallSite object (from cpool->resolved_references[f1])
3257 // G5: MH.linkToCallSite method (from f2)
3259 // Note: G4_callsite is already pushed by prepare_invoke
3261 // %%% should make a type profile for any invokedynamic that takes a ref argument
3262 // profile this call
3263 __ profile_call(O4);
3265 // do the call
3266 __ verify_oop(G4_callsite);
3267 __ call_from_interpreter(Rscratch, Gargs, Rret);
3268 }
3271 //----------------------------------------------------------------------------------------------------
3272 // Allocation
3274 void TemplateTable::_new() {
3275 transition(vtos, atos);
3277 Label slow_case;
3278 Label done;
3279 Label initialize_header;
3280 Label initialize_object; // including clearing the fields
3282 Register RallocatedObject = Otos_i;
3283 Register RinstanceKlass = O1;
3284 Register Roffset = O3;
3285 Register Rscratch = O4;
3287 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3288 __ get_cpool_and_tags(Rscratch, G3_scratch);
3289 // make sure the class we're about to instantiate has been resolved
3290 // This is done before loading InstanceKlass to be consistent with the order
3291 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3292 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3293 __ ldub(G3_scratch, Roffset, G3_scratch);
3294 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3295 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3296 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3297 // get InstanceKlass
3298 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot
3299 __ add(Roffset, sizeof(ConstantPool), Roffset);
3300 __ ld_ptr(Rscratch, Roffset, RinstanceKlass);
3302 // make sure klass is fully initialized:
3303 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch);
3304 __ cmp(G3_scratch, InstanceKlass::fully_initialized);
3305 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3306 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
3308 // get instance_size in InstanceKlass (already aligned)
3309 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
3311 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
3312 __ btst(Klass::_lh_instance_slow_path_bit, Roffset);
3313 __ br(Assembler::notZero, false, Assembler::pn, slow_case);
3314 __ delayed()->nop();
3316 // allocate the instance
3317 // 1) Try to allocate in the TLAB
3318 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
3319 // 3) if the above fails (or is not applicable), go to a slow case
3320 // (creates a new TLAB, etc.)
3322 const bool allow_shared_alloc =
3323 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3325 if(UseTLAB) {
3326 Register RoldTopValue = RallocatedObject;
3327 Register RtlabWasteLimitValue = G3_scratch;
3328 Register RnewTopValue = G1_scratch;
3329 Register RendValue = Rscratch;
3330 Register RfreeValue = RnewTopValue;
3332 // check if we can allocate in the TLAB
3333 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
3334 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue);
3335 __ add(RoldTopValue, Roffset, RnewTopValue);
3337 // if there is enough space, we do not CAS and do not clear
3338 __ cmp(RnewTopValue, RendValue);
3339 if(ZeroTLAB) {
3340 // the fields have already been cleared
3341 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header);
3342 } else {
3343 // initialize both the header and fields
3344 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object);
3345 }
3346 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3348 if (allow_shared_alloc) {
3349 // Check if tlab should be discarded (refill_waste_limit >= free)
3350 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
3351 __ sub(RendValue, RoldTopValue, RfreeValue);
3352 #ifdef _LP64
3353 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
3354 #else
3355 __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
3356 #endif
3357 __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
3359 // increment waste limit to prevent getting stuck on this slow path
3360 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
3361 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
3362 } else {
3363 // No allocation in the shared eden.
3364 __ ba_short(slow_case);
3365 }
3366 }
3368 // Allocation in the shared Eden
3369 if (allow_shared_alloc) {
3370 Register RoldTopValue = G1_scratch;
3371 Register RtopAddr = G3_scratch;
3372 Register RnewTopValue = RallocatedObject;
3373 Register RendValue = Rscratch;
3375 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
3377 Label retry;
3378 __ bind(retry);
3379 __ set((intptr_t)Universe::heap()->end_addr(), RendValue);
3380 __ ld_ptr(RendValue, 0, RendValue);
3381 __ ld_ptr(RtopAddr, 0, RoldTopValue);
3382 __ add(RoldTopValue, Roffset, RnewTopValue);
3384 // RnewTopValue contains the top address after the new object
3385 // has been allocated.
3386 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
3388 __ cas_ptr(RtopAddr, RoldTopValue, RnewTopValue);
3390 // if someone beat us on the allocation, try again, otherwise continue
3391 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
3393 // bump total bytes allocated by this thread
3394 // RoldTopValue and RtopAddr are dead, so can use G1 and G3
3395 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
3396 }
3398 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3399 // clear object fields
3400 __ bind(initialize_object);
3401 __ deccc(Roffset, sizeof(oopDesc));
3402 __ br(Assembler::zero, false, Assembler::pt, initialize_header);
3403 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
3405 // initialize remaining object fields
3406 if (UseBlockZeroing) {
3407 // Use BIS for zeroing
3408 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header);
3409 } else {
3410 Label loop;
3411 __ subcc(Roffset, wordSize, Roffset);
3412 __ bind(loop);
3413 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot
3414 __ st_ptr(G0, G3_scratch, Roffset);
3415 __ br(Assembler::notEqual, false, Assembler::pt, loop);
3416 __ delayed()->subcc(Roffset, wordSize, Roffset);
3417 }
3418 __ ba_short(initialize_header);
3419 }
3421 // slow case
3422 __ bind(slow_case);
3423 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned);
3424 __ get_constant_pool(O1);
3426 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2);
3428 __ ba_short(done);
3430 // Initialize the header: mark, klass
3431 __ bind(initialize_header);
3433 if (UseBiasedLocking) {
3434 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch);
3435 } else {
3436 __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
3437 }
3438 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
3439 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed
3440 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms)
3442 {
3443 SkipIfEqual skip_if(
3444 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero);
3445 // Trigger dtrace event
3446 __ push(atos);
3447 __ call_VM_leaf(noreg,
3448 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0);
3449 __ pop(atos);
3450 }
3452 // continue
3453 __ bind(done);
3454 }
3458 void TemplateTable::newarray() {
3459 transition(itos, atos);
3460 __ ldub(Lbcp, 1, O1);
3461 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i);
3462 }
3465 void TemplateTable::anewarray() {
3466 transition(itos, atos);
3467 __ get_constant_pool(O1);
3468 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned);
3469 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i);
3470 }
3473 void TemplateTable::arraylength() {
3474 transition(atos, itos);
3475 Label ok;
3476 __ verify_oop(Otos_i);
3477 __ tst(Otos_i);
3478 __ throw_if_not_1_x( Assembler::notZero, ok );
3479 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i);
3480 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3481 }
3484 void TemplateTable::checkcast() {
3485 transition(atos, atos);
3486 Label done, is_null, quicked, cast_ok, resolved;
3487 Register Roffset = G1_scratch;
3488 Register RobjKlass = O5;
3489 Register RspecifiedKlass = O4;
3491 // Check for casting a NULL
3492 __ br_null_short(Otos_i, Assembler::pn, is_null);
3494 // Get value klass in RobjKlass
3495 __ load_klass(Otos_i, RobjKlass); // get value klass
3497 // Get constant pool tag
3498 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3500 // See if the checkcast has been quickened
3501 __ get_cpool_and_tags(Lscratch, G3_scratch);
3502 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3503 __ ldub(G3_scratch, Roffset, G3_scratch);
3504 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3505 __ br(Assembler::equal, true, Assembler::pt, quicked);
3506 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3508 __ push_ptr(); // save receiver for result, and for GC
3509 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3510 __ get_vm_result_2(RspecifiedKlass);
3511 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3513 __ ba_short(resolved);
3515 // Extract target class from constant pool
3516 __ bind(quicked);
3517 __ add(Roffset, sizeof(ConstantPool), Roffset);
3518 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3519 __ bind(resolved);
3520 __ load_klass(Otos_i, RobjKlass); // get value klass
3522 // Generate a fast subtype check. Branch to cast_ok if no
3523 // failure. Throw exception if failure.
3524 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok );
3526 // Not a subtype; so must throw exception
3527 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch );
3529 __ bind(cast_ok);
3531 if (ProfileInterpreter) {
3532 __ ba_short(done);
3533 }
3534 __ bind(is_null);
3535 __ profile_null_seen(G3_scratch);
3536 __ bind(done);
3537 }
3540 void TemplateTable::instanceof() {
3541 Label done, is_null, quicked, resolved;
3542 transition(atos, itos);
3543 Register Roffset = G1_scratch;
3544 Register RobjKlass = O5;
3545 Register RspecifiedKlass = O4;
3547 // Check for casting a NULL
3548 __ br_null_short(Otos_i, Assembler::pt, is_null);
3550 // Get value klass in RobjKlass
3551 __ load_klass(Otos_i, RobjKlass); // get value klass
3553 // Get constant pool tag
3554 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3556 // See if the checkcast has been quickened
3557 __ get_cpool_and_tags(Lscratch, G3_scratch);
3558 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3559 __ ldub(G3_scratch, Roffset, G3_scratch);
3560 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3561 __ br(Assembler::equal, true, Assembler::pt, quicked);
3562 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3564 __ push_ptr(); // save receiver for result, and for GC
3565 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3566 __ get_vm_result_2(RspecifiedKlass);
3567 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3569 __ ba_short(resolved);
3571 // Extract target class from constant pool
3572 __ bind(quicked);
3573 __ add(Roffset, sizeof(ConstantPool), Roffset);
3574 __ get_constant_pool(Lscratch);
3575 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3576 __ bind(resolved);
3577 __ load_klass(Otos_i, RobjKlass); // get value klass
3579 // Generate a fast subtype check. Branch to cast_ok if no
3580 // failure. Return 0 if failure.
3581 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed
3582 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done );
3583 // Not a subtype; return 0;
3584 __ clr( Otos_i );
3586 if (ProfileInterpreter) {
3587 __ ba_short(done);
3588 }
3589 __ bind(is_null);
3590 __ profile_null_seen(G3_scratch);
3591 __ bind(done);
3592 }
3594 void TemplateTable::_breakpoint() {
3596 // Note: We get here even if we are single stepping..
3597 // jbug inists on setting breakpoints at every bytecode
3598 // even if we are in single step mode.
3600 transition(vtos, vtos);
3601 // get the unpatched byte code
3602 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp);
3603 __ mov(O0, Lbyte_code);
3605 // post the breakpoint event
3606 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp);
3608 // complete the execution of original bytecode
3609 __ dispatch_normal(vtos);
3610 }
3613 //----------------------------------------------------------------------------------------------------
3614 // Exceptions
3616 void TemplateTable::athrow() {
3617 transition(atos, vtos);
3619 // This works because exception is cached in Otos_i which is same as O0,
3620 // which is same as what throw_exception_entry_expects
3621 assert(Otos_i == Oexception, "see explanation above");
3623 __ verify_oop(Otos_i);
3624 __ null_check(Otos_i);
3625 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch);
3626 }
3629 //----------------------------------------------------------------------------------------------------
3630 // Synchronization
3633 // See frame_sparc.hpp for monitor block layout.
3634 // Monitor elements are dynamically allocated by growing stack as needed.
3636 void TemplateTable::monitorenter() {
3637 transition(atos, vtos);
3638 __ verify_oop(Otos_i);
3639 // Try to acquire a lock on the object
3640 // Repeat until succeeded (i.e., until
3641 // monitorenter returns true).
3643 { Label ok;
3644 __ tst(Otos_i);
3645 __ throw_if_not_1_x( Assembler::notZero, ok);
3646 __ delayed()->mov(Otos_i, Lscratch); // save obj
3647 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3648 }
3650 assert(O0 == Otos_i, "Be sure where the object to lock is");
3652 // find a free slot in the monitor block
3655 // initialize entry pointer
3656 __ clr(O1); // points to free slot or NULL
3658 {
3659 Label entry, loop, exit;
3660 __ add( __ top_most_monitor(), O2 ); // last one to check
3661 __ ba( entry );
3662 __ delayed()->mov( Lmonitors, O3 ); // first one to check
3665 __ bind( loop );
3667 __ verify_oop(O4); // verify each monitor's oop
3668 __ tst(O4); // is this entry unused?
3669 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
3671 __ cmp(O4, O0); // check if current entry is for same object
3672 __ brx( Assembler::equal, false, Assembler::pn, exit );
3673 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one
3675 __ bind( entry );
3677 __ cmp( O3, O2 );
3678 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3679 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4);
3681 __ bind( exit );
3682 }
3684 { Label allocated;
3686 // found free slot?
3687 __ br_notnull_short(O1, Assembler::pn, allocated);
3689 __ add_monitor_to_stack( false, O2, O3 );
3690 __ mov(Lmonitors, O1);
3692 __ bind(allocated);
3693 }
3695 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3696 // The object has already been poped from the stack, so the expression stack looks correct.
3697 __ inc(Lbcp);
3699 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object
3700 __ lock_object(O1, O0);
3702 // check if there's enough space on the stack for the monitors after locking
3703 __ generate_stack_overflow_check(0);
3705 // The bcp has already been incremented. Just need to dispatch to next instruction.
3706 __ dispatch_next(vtos);
3707 }
3710 void TemplateTable::monitorexit() {
3711 transition(atos, vtos);
3712 __ verify_oop(Otos_i);
3713 __ tst(Otos_i);
3714 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch );
3716 assert(O0 == Otos_i, "just checking");
3718 { Label entry, loop, found;
3719 __ add( __ top_most_monitor(), O2 ); // last one to check
3720 __ ba(entry);
3721 // use Lscratch to hold monitor elem to check, start with most recent monitor,
3722 // By using a local it survives the call to the C routine.
3723 __ delayed()->mov( Lmonitors, Lscratch );
3725 __ bind( loop );
3727 __ verify_oop(O4); // verify each monitor's oop
3728 __ cmp(O4, O0); // check if current entry is for desired object
3729 __ brx( Assembler::equal, true, Assembler::pt, found );
3730 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit
3732 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next
3734 __ bind( entry );
3736 __ cmp( Lscratch, O2 );
3737 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3738 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4);
3740 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3741 __ should_not_reach_here();
3743 __ bind(found);
3744 }
3745 __ unlock_object(O1);
3746 }
3749 //----------------------------------------------------------------------------------------------------
3750 // Wide instructions
3752 void TemplateTable::wide() {
3753 transition(vtos, vtos);
3754 __ ldub(Lbcp, 1, G3_scratch);// get next bc
3755 __ sll(G3_scratch, LogBytesPerWord, G3_scratch);
3756 AddressLiteral ep(Interpreter::_wentry_point);
3757 __ set(ep, G4_scratch);
3758 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch);
3759 __ jmp(G3_scratch, G0);
3760 __ delayed()->nop();
3761 // Note: the Lbcp increment step is part of the individual wide bytecode implementations
3762 }
3765 //----------------------------------------------------------------------------------------------------
3766 // Multi arrays
3768 void TemplateTable::multianewarray() {
3769 transition(vtos, atos);
3770 // put ndims * wordSize into Lscratch
3771 __ ldub( Lbcp, 3, Lscratch);
3772 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch);
3773 // Lesp points past last_dim, so set to O1 to first_dim address
3774 __ add( Lesp, Lscratch, O1);
3775 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
3776 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack
3777 }
3778 #endif /* !CC_INTERP */