Tue, 09 Apr 2013 17:17:41 -0400
8010862: The Method counter fields used for profiling can be allocated lazily.
Summary: Allocate the method's profiling related metadata until they are needed.
Reviewed-by: coleenp, roland
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "interpreter/interpreter.hpp"
27 #include "interpreter/interpreterRuntime.hpp"
28 #include "interpreter/templateTable.hpp"
29 #include "memory/universe.inline.hpp"
30 #include "oops/methodData.hpp"
31 #include "oops/objArrayKlass.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/stubRoutines.hpp"
36 #include "runtime/synchronizer.hpp"
37 #include "utilities/macros.hpp"
39 #ifndef CC_INTERP
40 #define __ _masm->
42 // Misc helpers
44 // Do an oop store like *(base + index + offset) = val
45 // index can be noreg,
46 static void do_oop_store(InterpreterMacroAssembler* _masm,
47 Register base,
48 Register index,
49 int offset,
50 Register val,
51 Register tmp,
52 BarrierSet::Name barrier,
53 bool precise) {
54 assert(tmp != val && tmp != base && tmp != index, "register collision");
55 assert(index == noreg || offset == 0, "only one offset");
56 switch (barrier) {
57 #if INCLUDE_ALL_GCS
58 case BarrierSet::G1SATBCT:
59 case BarrierSet::G1SATBCTLogging:
60 {
61 // Load and record the previous value.
62 __ g1_write_barrier_pre(base, index, offset,
63 noreg /* pre_val */,
64 tmp, true /*preserve_o_regs*/);
66 if (index == noreg ) {
67 assert(Assembler::is_simm13(offset), "fix this code");
68 __ store_heap_oop(val, base, offset);
69 } else {
70 __ store_heap_oop(val, base, index);
71 }
73 // No need for post barrier if storing NULL
74 if (val != G0) {
75 if (precise) {
76 if (index == noreg) {
77 __ add(base, offset, base);
78 } else {
79 __ add(base, index, base);
80 }
81 }
82 __ g1_write_barrier_post(base, val, tmp);
83 }
84 }
85 break;
86 #endif // INCLUDE_ALL_GCS
87 case BarrierSet::CardTableModRef:
88 case BarrierSet::CardTableExtension:
89 {
90 if (index == noreg ) {
91 assert(Assembler::is_simm13(offset), "fix this code");
92 __ store_heap_oop(val, base, offset);
93 } else {
94 __ store_heap_oop(val, base, index);
95 }
96 // No need for post barrier if storing NULL
97 if (val != G0) {
98 if (precise) {
99 if (index == noreg) {
100 __ add(base, offset, base);
101 } else {
102 __ add(base, index, base);
103 }
104 }
105 __ card_write_barrier_post(base, val, tmp);
106 }
107 }
108 break;
109 case BarrierSet::ModRef:
110 case BarrierSet::Other:
111 ShouldNotReachHere();
112 break;
113 default :
114 ShouldNotReachHere();
116 }
117 }
120 //----------------------------------------------------------------------------------------------------
121 // Platform-dependent initialization
123 void TemplateTable::pd_initialize() {
124 // (none)
125 }
128 //----------------------------------------------------------------------------------------------------
129 // Condition conversion
130 Assembler::Condition ccNot(TemplateTable::Condition cc) {
131 switch (cc) {
132 case TemplateTable::equal : return Assembler::notEqual;
133 case TemplateTable::not_equal : return Assembler::equal;
134 case TemplateTable::less : return Assembler::greaterEqual;
135 case TemplateTable::less_equal : return Assembler::greater;
136 case TemplateTable::greater : return Assembler::lessEqual;
137 case TemplateTable::greater_equal: return Assembler::less;
138 }
139 ShouldNotReachHere();
140 return Assembler::zero;
141 }
143 //----------------------------------------------------------------------------------------------------
144 // Miscelaneous helper routines
147 Address TemplateTable::at_bcp(int offset) {
148 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
149 return Address(Lbcp, offset);
150 }
153 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
154 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
155 int byte_no) {
156 // With sharing on, may need to test Method* flag.
157 if (!RewriteBytecodes) return;
158 Label L_patch_done;
160 switch (bc) {
161 case Bytecodes::_fast_aputfield:
162 case Bytecodes::_fast_bputfield:
163 case Bytecodes::_fast_cputfield:
164 case Bytecodes::_fast_dputfield:
165 case Bytecodes::_fast_fputfield:
166 case Bytecodes::_fast_iputfield:
167 case Bytecodes::_fast_lputfield:
168 case Bytecodes::_fast_sputfield:
169 {
170 // We skip bytecode quickening for putfield instructions when
171 // the put_code written to the constant pool cache is zero.
172 // This is required so that every execution of this instruction
173 // calls out to InterpreterRuntime::resolve_get_put to do
174 // additional, required work.
175 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
176 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
177 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
178 __ set(bc, bc_reg);
179 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch
180 }
181 break;
182 default:
183 assert(byte_no == -1, "sanity");
184 if (load_bc_into_bc_reg) {
185 __ set(bc, bc_reg);
186 }
187 }
189 if (JvmtiExport::can_post_breakpoint()) {
190 Label L_fast_patch;
191 __ ldub(at_bcp(0), temp_reg);
192 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch);
193 // perform the quickening, slowly, in the bowels of the breakpoint table
194 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg);
195 __ ba_short(L_patch_done);
196 __ bind(L_fast_patch);
197 }
199 #ifdef ASSERT
200 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc);
201 Label L_okay;
202 __ ldub(at_bcp(0), temp_reg);
203 __ cmp(temp_reg, orig_bytecode);
204 __ br(Assembler::equal, false, Assembler::pt, L_okay);
205 __ delayed()->cmp(temp_reg, bc_reg);
206 __ br(Assembler::equal, false, Assembler::pt, L_okay);
207 __ delayed()->nop();
208 __ stop("patching the wrong bytecode");
209 __ bind(L_okay);
210 #endif
212 // patch bytecode
213 __ stb(bc_reg, at_bcp(0));
214 __ bind(L_patch_done);
215 }
217 //----------------------------------------------------------------------------------------------------
218 // Individual instructions
220 void TemplateTable::nop() {
221 transition(vtos, vtos);
222 // nothing to do
223 }
225 void TemplateTable::shouldnotreachhere() {
226 transition(vtos, vtos);
227 __ stop("shouldnotreachhere bytecode");
228 }
230 void TemplateTable::aconst_null() {
231 transition(vtos, atos);
232 __ clr(Otos_i);
233 }
236 void TemplateTable::iconst(int value) {
237 transition(vtos, itos);
238 __ set(value, Otos_i);
239 }
242 void TemplateTable::lconst(int value) {
243 transition(vtos, ltos);
244 assert(value >= 0, "check this code");
245 #ifdef _LP64
246 __ set(value, Otos_l);
247 #else
248 __ set(value, Otos_l2);
249 __ clr( Otos_l1);
250 #endif
251 }
254 void TemplateTable::fconst(int value) {
255 transition(vtos, ftos);
256 static float zero = 0.0, one = 1.0, two = 2.0;
257 float* p;
258 switch( value ) {
259 default: ShouldNotReachHere();
260 case 0: p = &zero; break;
261 case 1: p = &one; break;
262 case 2: p = &two; break;
263 }
264 AddressLiteral a(p);
265 __ sethi(a, G3_scratch);
266 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f);
267 }
270 void TemplateTable::dconst(int value) {
271 transition(vtos, dtos);
272 static double zero = 0.0, one = 1.0;
273 double* p;
274 switch( value ) {
275 default: ShouldNotReachHere();
276 case 0: p = &zero; break;
277 case 1: p = &one; break;
278 }
279 AddressLiteral a(p);
280 __ sethi(a, G3_scratch);
281 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d);
282 }
285 // %%%%% Should factore most snippet templates across platforms
287 void TemplateTable::bipush() {
288 transition(vtos, itos);
289 __ ldsb( at_bcp(1), Otos_i );
290 }
292 void TemplateTable::sipush() {
293 transition(vtos, itos);
294 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed);
295 }
297 void TemplateTable::ldc(bool wide) {
298 transition(vtos, vtos);
299 Label call_ldc, notInt, isString, notString, notClass, exit;
301 if (wide) {
302 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
303 } else {
304 __ ldub(Lbcp, 1, O1);
305 }
306 __ get_cpool_and_tags(O0, O2);
308 const int base_offset = ConstantPool::header_size() * wordSize;
309 const int tags_offset = Array<u1>::base_offset_in_bytes();
311 // get type from tags
312 __ add(O2, tags_offset, O2);
313 __ ldub(O2, O1, O2);
315 // unresolved class? If so, must resolve
316 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc);
318 // unresolved class in error state
319 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc);
321 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class
322 __ brx(Assembler::notEqual, true, Assembler::pt, notClass);
323 __ delayed()->add(O0, base_offset, O0);
325 __ bind(call_ldc);
326 __ set(wide, O1);
327 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1);
328 __ push(atos);
329 __ ba_short(exit);
331 __ bind(notClass);
332 // __ add(O0, base_offset, O0);
333 __ sll(O1, LogBytesPerWord, O1);
334 __ cmp(O2, JVM_CONSTANT_Integer);
335 __ brx(Assembler::notEqual, true, Assembler::pt, notInt);
336 __ delayed()->cmp(O2, JVM_CONSTANT_String);
337 __ ld(O0, O1, Otos_i);
338 __ push(itos);
339 __ ba_short(exit);
341 __ bind(notInt);
342 // __ cmp(O2, JVM_CONSTANT_String);
343 __ brx(Assembler::notEqual, true, Assembler::pt, notString);
344 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
345 __ bind(isString);
346 __ stop("string should be rewritten to fast_aldc");
347 __ ba_short(exit);
349 __ bind(notString);
350 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
351 __ push(ftos);
353 __ bind(exit);
354 }
356 // Fast path for caching oop constants.
357 // %%% We should use this to handle Class and String constants also.
358 // %%% It will simplify the ldc/primitive path considerably.
359 void TemplateTable::fast_aldc(bool wide) {
360 transition(vtos, atos);
362 int index_size = wide ? sizeof(u2) : sizeof(u1);
363 Label resolved;
365 // We are resolved if the resolved reference cache entry contains a
366 // non-null object (CallSite, etc.)
367 assert_different_registers(Otos_i, G3_scratch);
368 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch
369 __ load_resolved_reference_at_index(Otos_i, G3_scratch);
370 __ tst(Otos_i);
371 __ br(Assembler::notEqual, false, Assembler::pt, resolved);
372 __ delayed()->set((int)bytecode(), O1);
374 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
376 // first time invocation - must resolve first
377 __ call_VM(Otos_i, entry, O1);
378 __ bind(resolved);
379 __ verify_oop(Otos_i);
380 }
383 void TemplateTable::ldc2_w() {
384 transition(vtos, vtos);
385 Label Long, exit;
387 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
388 __ get_cpool_and_tags(O0, O2);
390 const int base_offset = ConstantPool::header_size() * wordSize;
391 const int tags_offset = Array<u1>::base_offset_in_bytes();
392 // get type from tags
393 __ add(O2, tags_offset, O2);
394 __ ldub(O2, O1, O2);
396 __ sll(O1, LogBytesPerWord, O1);
397 __ add(O0, O1, G3_scratch);
399 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long);
400 // A double can be placed at word-aligned locations in the constant pool.
401 // Check out Conversions.java for an example.
402 // Also ConstantPool::header_size() is 20, which makes it very difficult
403 // to double-align double on the constant pool. SG, 11/7/97
404 #ifdef _LP64
405 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
406 #else
407 FloatRegister f = Ftos_d;
408 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
409 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
410 f->successor());
411 #endif
412 __ push(dtos);
413 __ ba_short(exit);
415 __ bind(Long);
416 #ifdef _LP64
417 __ ldx(G3_scratch, base_offset, Otos_l);
418 #else
419 __ ld(G3_scratch, base_offset, Otos_l);
420 __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
421 #endif
422 __ push(ltos);
424 __ bind(exit);
425 }
428 void TemplateTable::locals_index(Register reg, int offset) {
429 __ ldub( at_bcp(offset), reg );
430 }
433 void TemplateTable::locals_index_wide(Register reg) {
434 // offset is 2, not 1, because Lbcp points to wide prefix code
435 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
436 }
438 void TemplateTable::iload() {
439 transition(vtos, itos);
440 // Rewrite iload,iload pair into fast_iload2
441 // iload,caload pair into fast_icaload
442 if (RewriteFrequentPairs) {
443 Label rewrite, done;
445 // get next byte
446 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch);
448 // if _iload, wait to rewrite to iload2. We only want to rewrite the
449 // last two iloads in a pair. Comparing against fast_iload means that
450 // the next bytecode is neither an iload or a caload, and therefore
451 // an iload pair.
452 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done);
454 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload);
455 __ br(Assembler::equal, false, Assembler::pn, rewrite);
456 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch);
458 __ cmp(G3_scratch, (int)Bytecodes::_caload);
459 __ br(Assembler::equal, false, Assembler::pn, rewrite);
460 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch);
462 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again
463 // rewrite
464 // G4_scratch: fast bytecode
465 __ bind(rewrite);
466 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false);
467 __ bind(done);
468 }
470 // Get the local value into tos
471 locals_index(G3_scratch);
472 __ access_local_int( G3_scratch, Otos_i );
473 }
475 void TemplateTable::fast_iload2() {
476 transition(vtos, itos);
477 locals_index(G3_scratch);
478 __ access_local_int( G3_scratch, Otos_i );
479 __ push_i();
480 locals_index(G3_scratch, 3); // get next bytecode's local index.
481 __ access_local_int( G3_scratch, Otos_i );
482 }
484 void TemplateTable::fast_iload() {
485 transition(vtos, itos);
486 locals_index(G3_scratch);
487 __ access_local_int( G3_scratch, Otos_i );
488 }
490 void TemplateTable::lload() {
491 transition(vtos, ltos);
492 locals_index(G3_scratch);
493 __ access_local_long( G3_scratch, Otos_l );
494 }
497 void TemplateTable::fload() {
498 transition(vtos, ftos);
499 locals_index(G3_scratch);
500 __ access_local_float( G3_scratch, Ftos_f );
501 }
504 void TemplateTable::dload() {
505 transition(vtos, dtos);
506 locals_index(G3_scratch);
507 __ access_local_double( G3_scratch, Ftos_d );
508 }
511 void TemplateTable::aload() {
512 transition(vtos, atos);
513 locals_index(G3_scratch);
514 __ access_local_ptr( G3_scratch, Otos_i);
515 }
518 void TemplateTable::wide_iload() {
519 transition(vtos, itos);
520 locals_index_wide(G3_scratch);
521 __ access_local_int( G3_scratch, Otos_i );
522 }
525 void TemplateTable::wide_lload() {
526 transition(vtos, ltos);
527 locals_index_wide(G3_scratch);
528 __ access_local_long( G3_scratch, Otos_l );
529 }
532 void TemplateTable::wide_fload() {
533 transition(vtos, ftos);
534 locals_index_wide(G3_scratch);
535 __ access_local_float( G3_scratch, Ftos_f );
536 }
539 void TemplateTable::wide_dload() {
540 transition(vtos, dtos);
541 locals_index_wide(G3_scratch);
542 __ access_local_double( G3_scratch, Ftos_d );
543 }
546 void TemplateTable::wide_aload() {
547 transition(vtos, atos);
548 locals_index_wide(G3_scratch);
549 __ access_local_ptr( G3_scratch, Otos_i );
550 __ verify_oop(Otos_i);
551 }
554 void TemplateTable::iaload() {
555 transition(itos, itos);
556 // Otos_i: index
557 // tos: array
558 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
559 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i);
560 }
563 void TemplateTable::laload() {
564 transition(itos, ltos);
565 // Otos_i: index
566 // O2: array
567 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
568 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l);
569 }
572 void TemplateTable::faload() {
573 transition(itos, ftos);
574 // Otos_i: index
575 // O2: array
576 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
577 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f);
578 }
581 void TemplateTable::daload() {
582 transition(itos, dtos);
583 // Otos_i: index
584 // O2: array
585 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
586 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d);
587 }
590 void TemplateTable::aaload() {
591 transition(itos, atos);
592 // Otos_i: index
593 // tos: array
594 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3);
595 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
596 __ verify_oop(Otos_i);
597 }
600 void TemplateTable::baload() {
601 transition(itos, itos);
602 // Otos_i: index
603 // tos: array
604 __ index_check(O2, Otos_i, 0, G3_scratch, O3);
605 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i);
606 }
609 void TemplateTable::caload() {
610 transition(itos, itos);
611 // Otos_i: index
612 // tos: array
613 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
614 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
615 }
617 void TemplateTable::fast_icaload() {
618 transition(vtos, itos);
619 // Otos_i: index
620 // tos: array
621 locals_index(G3_scratch);
622 __ access_local_int( G3_scratch, Otos_i );
623 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
624 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
625 }
628 void TemplateTable::saload() {
629 transition(itos, itos);
630 // Otos_i: index
631 // tos: array
632 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
633 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i);
634 }
637 void TemplateTable::iload(int n) {
638 transition(vtos, itos);
639 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
640 }
643 void TemplateTable::lload(int n) {
644 transition(vtos, ltos);
645 assert(n+1 < Argument::n_register_parameters, "would need more code");
646 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
647 }
650 void TemplateTable::fload(int n) {
651 transition(vtos, ftos);
652 assert(n < Argument::n_register_parameters, "would need more code");
653 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f );
654 }
657 void TemplateTable::dload(int n) {
658 transition(vtos, dtos);
659 FloatRegister dst = Ftos_d;
660 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
661 }
664 void TemplateTable::aload(int n) {
665 transition(vtos, atos);
666 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
667 }
670 void TemplateTable::aload_0() {
671 transition(vtos, atos);
673 // According to bytecode histograms, the pairs:
674 //
675 // _aload_0, _fast_igetfield (itos)
676 // _aload_0, _fast_agetfield (atos)
677 // _aload_0, _fast_fgetfield (ftos)
678 //
679 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
680 // bytecode checks the next bytecode and then rewrites the current
681 // bytecode into a pair bytecode; otherwise it rewrites the current
682 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
683 //
684 if (RewriteFrequentPairs) {
685 Label rewrite, done;
687 // get next byte
688 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch);
690 // do actual aload_0
691 aload(0);
693 // if _getfield then wait with rewrite
694 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done);
696 // if _igetfield then rewrite to _fast_iaccess_0
697 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
698 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield);
699 __ br(Assembler::equal, false, Assembler::pn, rewrite);
700 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch);
702 // if _agetfield then rewrite to _fast_aaccess_0
703 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
704 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield);
705 __ br(Assembler::equal, false, Assembler::pn, rewrite);
706 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch);
708 // if _fgetfield then rewrite to _fast_faccess_0
709 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
710 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield);
711 __ br(Assembler::equal, false, Assembler::pn, rewrite);
712 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch);
714 // else rewrite to _fast_aload0
715 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
716 __ set(Bytecodes::_fast_aload_0, G4_scratch);
718 // rewrite
719 // G4_scratch: fast bytecode
720 __ bind(rewrite);
721 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false);
722 __ bind(done);
723 } else {
724 aload(0);
725 }
726 }
729 void TemplateTable::istore() {
730 transition(itos, vtos);
731 locals_index(G3_scratch);
732 __ store_local_int( G3_scratch, Otos_i );
733 }
736 void TemplateTable::lstore() {
737 transition(ltos, vtos);
738 locals_index(G3_scratch);
739 __ store_local_long( G3_scratch, Otos_l );
740 }
743 void TemplateTable::fstore() {
744 transition(ftos, vtos);
745 locals_index(G3_scratch);
746 __ store_local_float( G3_scratch, Ftos_f );
747 }
750 void TemplateTable::dstore() {
751 transition(dtos, vtos);
752 locals_index(G3_scratch);
753 __ store_local_double( G3_scratch, Ftos_d );
754 }
757 void TemplateTable::astore() {
758 transition(vtos, vtos);
759 __ load_ptr(0, Otos_i);
760 __ inc(Lesp, Interpreter::stackElementSize);
761 __ verify_oop_or_return_address(Otos_i, G3_scratch);
762 locals_index(G3_scratch);
763 __ store_local_ptr(G3_scratch, Otos_i);
764 }
767 void TemplateTable::wide_istore() {
768 transition(vtos, vtos);
769 __ pop_i();
770 locals_index_wide(G3_scratch);
771 __ store_local_int( G3_scratch, Otos_i );
772 }
775 void TemplateTable::wide_lstore() {
776 transition(vtos, vtos);
777 __ pop_l();
778 locals_index_wide(G3_scratch);
779 __ store_local_long( G3_scratch, Otos_l );
780 }
783 void TemplateTable::wide_fstore() {
784 transition(vtos, vtos);
785 __ pop_f();
786 locals_index_wide(G3_scratch);
787 __ store_local_float( G3_scratch, Ftos_f );
788 }
791 void TemplateTable::wide_dstore() {
792 transition(vtos, vtos);
793 __ pop_d();
794 locals_index_wide(G3_scratch);
795 __ store_local_double( G3_scratch, Ftos_d );
796 }
799 void TemplateTable::wide_astore() {
800 transition(vtos, vtos);
801 __ load_ptr(0, Otos_i);
802 __ inc(Lesp, Interpreter::stackElementSize);
803 __ verify_oop_or_return_address(Otos_i, G3_scratch);
804 locals_index_wide(G3_scratch);
805 __ store_local_ptr(G3_scratch, Otos_i);
806 }
809 void TemplateTable::iastore() {
810 transition(itos, vtos);
811 __ pop_i(O2); // index
812 // Otos_i: val
813 // O3: array
814 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
815 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT));
816 }
819 void TemplateTable::lastore() {
820 transition(ltos, vtos);
821 __ pop_i(O2); // index
822 // Otos_l: val
823 // O3: array
824 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
825 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG));
826 }
829 void TemplateTable::fastore() {
830 transition(ftos, vtos);
831 __ pop_i(O2); // index
832 // Ftos_f: val
833 // O3: array
834 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
835 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
836 }
839 void TemplateTable::dastore() {
840 transition(dtos, vtos);
841 __ pop_i(O2); // index
842 // Fos_d: val
843 // O3: array
844 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
845 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
846 }
849 void TemplateTable::aastore() {
850 Label store_ok, is_null, done;
851 transition(vtos, vtos);
852 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
853 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index
854 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array
855 // Otos_i: val
856 // O2: index
857 // O3: array
858 __ verify_oop(Otos_i);
859 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
861 // do array store check - check for NULL value first
862 __ br_null_short( Otos_i, Assembler::pn, is_null );
864 __ load_klass(O3, O4); // get array klass
865 __ load_klass(Otos_i, O5); // get value klass
867 // do fast instanceof cache test
869 __ ld_ptr(O4, in_bytes(ObjArrayKlass::element_klass_offset()), O4);
871 assert(Otos_i == O0, "just checking");
873 // Otos_i: value
874 // O1: addr - offset
875 // O2: index
876 // O3: array
877 // O4: array element klass
878 // O5: value klass
880 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
882 // Generate a fast subtype check. Branch to store_ok if no
883 // failure. Throw if failure.
884 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok );
886 // Not a subtype; so must throw exception
887 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch );
889 // Store is OK.
890 __ bind(store_ok);
891 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
893 __ ba(done);
894 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
896 __ bind(is_null);
897 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true);
899 __ profile_null_seen(G3_scratch);
900 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
901 __ bind(done);
902 }
905 void TemplateTable::bastore() {
906 transition(itos, vtos);
907 __ pop_i(O2); // index
908 // Otos_i: val
909 // O3: array
910 __ index_check(O3, O2, 0, G3_scratch, O2);
911 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE));
912 }
915 void TemplateTable::castore() {
916 transition(itos, vtos);
917 __ pop_i(O2); // index
918 // Otos_i: val
919 // O3: array
920 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2);
921 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
922 }
925 void TemplateTable::sastore() {
926 // %%%%% Factor across platform
927 castore();
928 }
931 void TemplateTable::istore(int n) {
932 transition(itos, vtos);
933 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
934 }
937 void TemplateTable::lstore(int n) {
938 transition(ltos, vtos);
939 assert(n+1 < Argument::n_register_parameters, "only handle register cases");
940 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
942 }
945 void TemplateTable::fstore(int n) {
946 transition(ftos, vtos);
947 assert(n < Argument::n_register_parameters, "only handle register cases");
948 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
949 }
952 void TemplateTable::dstore(int n) {
953 transition(dtos, vtos);
954 FloatRegister src = Ftos_d;
955 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
956 }
959 void TemplateTable::astore(int n) {
960 transition(vtos, vtos);
961 __ load_ptr(0, Otos_i);
962 __ inc(Lesp, Interpreter::stackElementSize);
963 __ verify_oop_or_return_address(Otos_i, G3_scratch);
964 __ store_local_ptr(n, Otos_i);
965 }
968 void TemplateTable::pop() {
969 transition(vtos, vtos);
970 __ inc(Lesp, Interpreter::stackElementSize);
971 }
974 void TemplateTable::pop2() {
975 transition(vtos, vtos);
976 __ inc(Lesp, 2 * Interpreter::stackElementSize);
977 }
980 void TemplateTable::dup() {
981 transition(vtos, vtos);
982 // stack: ..., a
983 // load a and tag
984 __ load_ptr(0, Otos_i);
985 __ push_ptr(Otos_i);
986 // stack: ..., a, a
987 }
990 void TemplateTable::dup_x1() {
991 transition(vtos, vtos);
992 // stack: ..., a, b
993 __ load_ptr( 1, G3_scratch); // get a
994 __ load_ptr( 0, Otos_l1); // get b
995 __ store_ptr(1, Otos_l1); // put b
996 __ store_ptr(0, G3_scratch); // put a - like swap
997 __ push_ptr(Otos_l1); // push b
998 // stack: ..., b, a, b
999 }
1002 void TemplateTable::dup_x2() {
1003 transition(vtos, vtos);
1004 // stack: ..., a, b, c
1005 // get c and push on stack, reuse registers
1006 __ load_ptr( 0, G3_scratch); // get c
1007 __ push_ptr(G3_scratch); // push c with tag
1008 // stack: ..., a, b, c, c (c in reg) (Lesp - 4)
1009 // (stack offsets n+1 now)
1010 __ load_ptr( 3, Otos_l1); // get a
1011 __ store_ptr(3, G3_scratch); // put c at 3
1012 // stack: ..., c, b, c, c (a in reg)
1013 __ load_ptr( 2, G3_scratch); // get b
1014 __ store_ptr(2, Otos_l1); // put a at 2
1015 // stack: ..., c, a, c, c (b in reg)
1016 __ store_ptr(1, G3_scratch); // put b at 1
1017 // stack: ..., c, a, b, c
1018 }
1021 void TemplateTable::dup2() {
1022 transition(vtos, vtos);
1023 __ load_ptr(1, G3_scratch); // get a
1024 __ load_ptr(0, Otos_l1); // get b
1025 __ push_ptr(G3_scratch); // push a
1026 __ push_ptr(Otos_l1); // push b
1027 // stack: ..., a, b, a, b
1028 }
1031 void TemplateTable::dup2_x1() {
1032 transition(vtos, vtos);
1033 // stack: ..., a, b, c
1034 __ load_ptr( 1, Lscratch); // get b
1035 __ load_ptr( 2, Otos_l1); // get a
1036 __ store_ptr(2, Lscratch); // put b at a
1037 // stack: ..., b, b, c
1038 __ load_ptr( 0, G3_scratch); // get c
1039 __ store_ptr(1, G3_scratch); // put c at b
1040 // stack: ..., b, c, c
1041 __ store_ptr(0, Otos_l1); // put a at c
1042 // stack: ..., b, c, a
1043 __ push_ptr(Lscratch); // push b
1044 __ push_ptr(G3_scratch); // push c
1045 // stack: ..., b, c, a, b, c
1046 }
1049 // The spec says that these types can be a mixture of category 1 (1 word)
1050 // types and/or category 2 types (long and doubles)
1051 void TemplateTable::dup2_x2() {
1052 transition(vtos, vtos);
1053 // stack: ..., a, b, c, d
1054 __ load_ptr( 1, Lscratch); // get c
1055 __ load_ptr( 3, Otos_l1); // get a
1056 __ store_ptr(3, Lscratch); // put c at 3
1057 __ store_ptr(1, Otos_l1); // put a at 1
1058 // stack: ..., c, b, a, d
1059 __ load_ptr( 2, G3_scratch); // get b
1060 __ load_ptr( 0, Otos_l1); // get d
1061 __ store_ptr(0, G3_scratch); // put b at 0
1062 __ store_ptr(2, Otos_l1); // put d at 2
1063 // stack: ..., c, d, a, b
1064 __ push_ptr(Lscratch); // push c
1065 __ push_ptr(Otos_l1); // push d
1066 // stack: ..., c, d, a, b, c, d
1067 }
1070 void TemplateTable::swap() {
1071 transition(vtos, vtos);
1072 // stack: ..., a, b
1073 __ load_ptr( 1, G3_scratch); // get a
1074 __ load_ptr( 0, Otos_l1); // get b
1075 __ store_ptr(0, G3_scratch); // put b
1076 __ store_ptr(1, Otos_l1); // put a
1077 // stack: ..., b, a
1078 }
1081 void TemplateTable::iop2(Operation op) {
1082 transition(itos, itos);
1083 __ pop_i(O1);
1084 switch (op) {
1085 case add: __ add(O1, Otos_i, Otos_i); break;
1086 case sub: __ sub(O1, Otos_i, Otos_i); break;
1087 // %%%%% Mul may not exist: better to call .mul?
1088 case mul: __ smul(O1, Otos_i, Otos_i); break;
1089 case _and: __ and3(O1, Otos_i, Otos_i); break;
1090 case _or: __ or3(O1, Otos_i, Otos_i); break;
1091 case _xor: __ xor3(O1, Otos_i, Otos_i); break;
1092 case shl: __ sll(O1, Otos_i, Otos_i); break;
1093 case shr: __ sra(O1, Otos_i, Otos_i); break;
1094 case ushr: __ srl(O1, Otos_i, Otos_i); break;
1095 default: ShouldNotReachHere();
1096 }
1097 }
1100 void TemplateTable::lop2(Operation op) {
1101 transition(ltos, ltos);
1102 __ pop_l(O2);
1103 switch (op) {
1104 #ifdef _LP64
1105 case add: __ add(O2, Otos_l, Otos_l); break;
1106 case sub: __ sub(O2, Otos_l, Otos_l); break;
1107 case _and: __ and3(O2, Otos_l, Otos_l); break;
1108 case _or: __ or3(O2, Otos_l, Otos_l); break;
1109 case _xor: __ xor3(O2, Otos_l, Otos_l); break;
1110 #else
1111 case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break;
1112 case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break;
1113 case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break;
1114 case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break;
1115 case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break;
1116 #endif
1117 default: ShouldNotReachHere();
1118 }
1119 }
1122 void TemplateTable::idiv() {
1123 // %%%%% Later: ForSPARC/V7 call .sdiv library routine,
1124 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
1126 transition(itos, itos);
1127 __ pop_i(O1); // get 1st op
1129 // Y contains upper 32 bits of result, set it to 0 or all ones
1130 __ wry(G0);
1131 __ mov(~0, G3_scratch);
1133 __ tst(O1);
1134 Label neg;
1135 __ br(Assembler::negative, true, Assembler::pn, neg);
1136 __ delayed()->wry(G3_scratch);
1137 __ bind(neg);
1139 Label ok;
1140 __ tst(Otos_i);
1141 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
1143 const int min_int = 0x80000000;
1144 Label regular;
1145 __ cmp(Otos_i, -1);
1146 __ br(Assembler::notEqual, false, Assembler::pt, regular);
1147 #ifdef _LP64
1148 // Don't put set in delay slot
1149 // Set will turn into multiple instructions in 64 bit mode
1150 __ delayed()->nop();
1151 __ set(min_int, G4_scratch);
1152 #else
1153 __ delayed()->set(min_int, G4_scratch);
1154 #endif
1155 Label done;
1156 __ cmp(O1, G4_scratch);
1157 __ br(Assembler::equal, true, Assembler::pt, done);
1158 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken)
1160 __ bind(regular);
1161 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction!
1162 __ bind(done);
1163 }
1166 void TemplateTable::irem() {
1167 transition(itos, itos);
1168 __ mov(Otos_i, O2); // save divisor
1169 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1
1170 __ smul(Otos_i, O2, Otos_i);
1171 __ sub(O1, Otos_i, Otos_i);
1172 }
1175 void TemplateTable::lmul() {
1176 transition(ltos, ltos);
1177 __ pop_l(O2);
1178 #ifdef _LP64
1179 __ mulx(Otos_l, O2, Otos_l);
1180 #else
1181 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
1182 #endif
1184 }
1187 void TemplateTable::ldiv() {
1188 transition(ltos, ltos);
1190 // check for zero
1191 __ pop_l(O2);
1192 #ifdef _LP64
1193 __ tst(Otos_l);
1194 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1195 __ sdivx(O2, Otos_l, Otos_l);
1196 #else
1197 __ orcc(Otos_l1, Otos_l2, G0);
1198 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1199 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1200 #endif
1201 }
1204 void TemplateTable::lrem() {
1205 transition(ltos, ltos);
1207 // check for zero
1208 __ pop_l(O2);
1209 #ifdef _LP64
1210 __ tst(Otos_l);
1211 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1212 __ sdivx(O2, Otos_l, Otos_l2);
1213 __ mulx (Otos_l2, Otos_l, Otos_l2);
1214 __ sub (O2, Otos_l2, Otos_l);
1215 #else
1216 __ orcc(Otos_l1, Otos_l2, G0);
1217 __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1218 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1219 #endif
1220 }
1223 void TemplateTable::lshl() {
1224 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
1226 __ pop_l(O2); // shift value in O2, O3
1227 #ifdef _LP64
1228 __ sllx(O2, Otos_i, Otos_l);
1229 #else
1230 __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1231 #endif
1232 }
1235 void TemplateTable::lshr() {
1236 transition(itos, ltos); // %%%% see lshl comment
1238 __ pop_l(O2); // shift value in O2, O3
1239 #ifdef _LP64
1240 __ srax(O2, Otos_i, Otos_l);
1241 #else
1242 __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1243 #endif
1244 }
1248 void TemplateTable::lushr() {
1249 transition(itos, ltos); // %%%% see lshl comment
1251 __ pop_l(O2); // shift value in O2, O3
1252 #ifdef _LP64
1253 __ srlx(O2, Otos_i, Otos_l);
1254 #else
1255 __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1256 #endif
1257 }
1260 void TemplateTable::fop2(Operation op) {
1261 transition(ftos, ftos);
1262 switch (op) {
1263 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1264 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1265 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1266 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1267 case rem:
1268 assert(Ftos_f == F0, "just checking");
1269 #ifdef _LP64
1270 // LP64 calling conventions use F1, F3 for passing 2 floats
1271 __ pop_f(F1);
1272 __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
1273 #else
1274 __ pop_i(O0);
1275 __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
1276 __ ld( __ d_tmp, O1 );
1277 #endif
1278 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1279 assert( Ftos_f == F0, "fix this code" );
1280 break;
1282 default: ShouldNotReachHere();
1283 }
1284 }
1287 void TemplateTable::dop2(Operation op) {
1288 transition(dtos, dtos);
1289 switch (op) {
1290 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1291 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1292 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1293 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1294 case rem:
1295 #ifdef _LP64
1296 // Pass arguments in D0, D2
1297 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
1298 __ pop_d( F0 );
1299 #else
1300 // Pass arguments in O0O1, O2O3
1301 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1302 __ ldd( __ d_tmp, O2 );
1303 __ pop_d(Ftos_f);
1304 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1305 __ ldd( __ d_tmp, O0 );
1306 #endif
1307 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1308 assert( Ftos_d == F0, "fix this code" );
1309 break;
1311 default: ShouldNotReachHere();
1312 }
1313 }
1316 void TemplateTable::ineg() {
1317 transition(itos, itos);
1318 __ neg(Otos_i);
1319 }
1322 void TemplateTable::lneg() {
1323 transition(ltos, ltos);
1324 #ifdef _LP64
1325 __ sub(G0, Otos_l, Otos_l);
1326 #else
1327 __ lneg(Otos_l1, Otos_l2);
1328 #endif
1329 }
1332 void TemplateTable::fneg() {
1333 transition(ftos, ftos);
1334 __ fneg(FloatRegisterImpl::S, Ftos_f);
1335 }
1338 void TemplateTable::dneg() {
1339 transition(dtos, dtos);
1340 // v8 has fnegd if source and dest are the same
1341 __ fneg(FloatRegisterImpl::D, Ftos_f);
1342 }
1345 void TemplateTable::iinc() {
1346 transition(vtos, vtos);
1347 locals_index(G3_scratch);
1348 __ ldsb(Lbcp, 2, O2); // load constant
1349 __ access_local_int(G3_scratch, Otos_i);
1350 __ add(Otos_i, O2, Otos_i);
1351 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1352 }
1355 void TemplateTable::wide_iinc() {
1356 transition(vtos, vtos);
1357 locals_index_wide(G3_scratch);
1358 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed);
1359 __ access_local_int(G3_scratch, Otos_i);
1360 __ add(Otos_i, O3, Otos_i);
1361 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1362 }
1365 void TemplateTable::convert() {
1366 // %%%%% Factor this first part accross platforms
1367 #ifdef ASSERT
1368 TosState tos_in = ilgl;
1369 TosState tos_out = ilgl;
1370 switch (bytecode()) {
1371 case Bytecodes::_i2l: // fall through
1372 case Bytecodes::_i2f: // fall through
1373 case Bytecodes::_i2d: // fall through
1374 case Bytecodes::_i2b: // fall through
1375 case Bytecodes::_i2c: // fall through
1376 case Bytecodes::_i2s: tos_in = itos; break;
1377 case Bytecodes::_l2i: // fall through
1378 case Bytecodes::_l2f: // fall through
1379 case Bytecodes::_l2d: tos_in = ltos; break;
1380 case Bytecodes::_f2i: // fall through
1381 case Bytecodes::_f2l: // fall through
1382 case Bytecodes::_f2d: tos_in = ftos; break;
1383 case Bytecodes::_d2i: // fall through
1384 case Bytecodes::_d2l: // fall through
1385 case Bytecodes::_d2f: tos_in = dtos; break;
1386 default : ShouldNotReachHere();
1387 }
1388 switch (bytecode()) {
1389 case Bytecodes::_l2i: // fall through
1390 case Bytecodes::_f2i: // fall through
1391 case Bytecodes::_d2i: // fall through
1392 case Bytecodes::_i2b: // fall through
1393 case Bytecodes::_i2c: // fall through
1394 case Bytecodes::_i2s: tos_out = itos; break;
1395 case Bytecodes::_i2l: // fall through
1396 case Bytecodes::_f2l: // fall through
1397 case Bytecodes::_d2l: tos_out = ltos; break;
1398 case Bytecodes::_i2f: // fall through
1399 case Bytecodes::_l2f: // fall through
1400 case Bytecodes::_d2f: tos_out = ftos; break;
1401 case Bytecodes::_i2d: // fall through
1402 case Bytecodes::_l2d: // fall through
1403 case Bytecodes::_f2d: tos_out = dtos; break;
1404 default : ShouldNotReachHere();
1405 }
1406 transition(tos_in, tos_out);
1407 #endif
1410 // Conversion
1411 Label done;
1412 switch (bytecode()) {
1413 case Bytecodes::_i2l:
1414 #ifdef _LP64
1415 // Sign extend the 32 bits
1416 __ sra ( Otos_i, 0, Otos_l );
1417 #else
1418 __ addcc(Otos_i, 0, Otos_l2);
1419 __ br(Assembler::greaterEqual, true, Assembler::pt, done);
1420 __ delayed()->clr(Otos_l1);
1421 __ set(~0, Otos_l1);
1422 #endif
1423 break;
1425 case Bytecodes::_i2f:
1426 __ st(Otos_i, __ d_tmp );
1427 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1428 __ fitof(FloatRegisterImpl::S, F0, Ftos_f);
1429 break;
1431 case Bytecodes::_i2d:
1432 __ st(Otos_i, __ d_tmp);
1433 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1434 __ fitof(FloatRegisterImpl::D, F0, Ftos_f);
1435 break;
1437 case Bytecodes::_i2b:
1438 __ sll(Otos_i, 24, Otos_i);
1439 __ sra(Otos_i, 24, Otos_i);
1440 break;
1442 case Bytecodes::_i2c:
1443 __ sll(Otos_i, 16, Otos_i);
1444 __ srl(Otos_i, 16, Otos_i);
1445 break;
1447 case Bytecodes::_i2s:
1448 __ sll(Otos_i, 16, Otos_i);
1449 __ sra(Otos_i, 16, Otos_i);
1450 break;
1452 case Bytecodes::_l2i:
1453 #ifndef _LP64
1454 __ mov(Otos_l2, Otos_i);
1455 #else
1456 // Sign-extend into the high 32 bits
1457 __ sra(Otos_l, 0, Otos_i);
1458 #endif
1459 break;
1461 case Bytecodes::_l2f:
1462 case Bytecodes::_l2d:
1463 __ st_long(Otos_l, __ d_tmp);
1464 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
1466 if (VM_Version::v9_instructions_work()) {
1467 if (bytecode() == Bytecodes::_l2f) {
1468 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
1469 } else {
1470 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
1471 }
1472 } else {
1473 __ call_VM_leaf(
1474 Lscratch,
1475 bytecode() == Bytecodes::_l2f
1476 ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
1477 : CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
1478 );
1479 }
1480 break;
1482 case Bytecodes::_f2i: {
1483 Label isNaN;
1484 // result must be 0 if value is NaN; test by comparing value to itself
1485 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
1486 // According to the v8 manual, you have to have a non-fp instruction
1487 // between fcmp and fb.
1488 if (!VM_Version::v9_instructions_work()) {
1489 __ nop();
1490 }
1491 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
1492 __ delayed()->clr(Otos_i); // NaN
1493 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
1494 __ stf(FloatRegisterImpl::S, F30, __ d_tmp);
1495 __ ld(__ d_tmp, Otos_i);
1496 __ bind(isNaN);
1497 }
1498 break;
1500 case Bytecodes::_f2l:
1501 // must uncache tos
1502 __ push_f();
1503 #ifdef _LP64
1504 __ pop_f(F1);
1505 #else
1506 __ pop_i(O0);
1507 #endif
1508 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1509 break;
1511 case Bytecodes::_f2d:
1512 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f);
1513 break;
1515 case Bytecodes::_d2i:
1516 case Bytecodes::_d2l:
1517 // must uncache tos
1518 __ push_d();
1519 #ifdef _LP64
1520 // LP64 calling conventions pass first double arg in D0
1521 __ pop_d( Ftos_d );
1522 #else
1523 __ pop_i( O0 );
1524 __ pop_i( O1 );
1525 #endif
1526 __ call_VM_leaf(Lscratch,
1527 bytecode() == Bytecodes::_d2i
1528 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
1529 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1530 break;
1532 case Bytecodes::_d2f:
1533 if (VM_Version::v9_instructions_work()) {
1534 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
1535 }
1536 else {
1537 // must uncache tos
1538 __ push_d();
1539 __ pop_i(O0);
1540 __ pop_i(O1);
1541 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
1542 }
1543 break;
1545 default: ShouldNotReachHere();
1546 }
1547 __ bind(done);
1548 }
1551 void TemplateTable::lcmp() {
1552 transition(ltos, itos);
1554 #ifdef _LP64
1555 __ pop_l(O1); // pop off value 1, value 2 is in O0
1556 __ lcmp( O1, Otos_l, Otos_i );
1557 #else
1558 __ pop_l(O2); // cmp O2,3 to O0,1
1559 __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
1560 #endif
1561 }
1564 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1566 if (is_float) __ pop_f(F2);
1567 else __ pop_d(F2);
1569 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:");
1571 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i );
1572 }
1574 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1575 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1576 __ verify_thread();
1578 const Register O2_bumped_count = O2;
1579 __ profile_taken_branch(G3_scratch, O2_bumped_count);
1581 // get (wide) offset to O1_disp
1582 const Register O1_disp = O1;
1583 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC);
1584 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
1586 // Handle all the JSR stuff here, then exit.
1587 // It's much shorter and cleaner than intermingling with the
1588 // non-JSR normal-branch stuff occurring below.
1589 if( is_jsr ) {
1590 // compute return address as bci in Otos_i
1591 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1592 __ sub(Lbcp, G3_scratch, G3_scratch);
1593 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
1595 // Bump Lbcp to target of JSR
1596 __ add(Lbcp, O1_disp, Lbcp);
1597 // Push returnAddress for "ret" on stack
1598 __ push_ptr(Otos_i);
1599 // And away we go!
1600 __ dispatch_next(vtos);
1601 return;
1602 }
1604 // Normal (non-jsr) branch handling
1606 // Save the current Lbcp
1607 const Register l_cur_bcp = Lscratch;
1608 __ mov( Lbcp, l_cur_bcp );
1610 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1611 if ( increment_invocation_counter_for_backward_branches ) {
1612 Label Lforward;
1613 // check branch direction
1614 __ br( Assembler::positive, false, Assembler::pn, Lforward );
1615 // Bump bytecode pointer by displacement (take the branch)
1616 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
1618 const Register Rcounters = G3_scratch;
1619 __ get_method_counters(Lmethod, Rcounters, Lforward);
1621 if (TieredCompilation) {
1622 Label Lno_mdo, Loverflow;
1623 int increment = InvocationCounter::count_increment;
1624 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1625 if (ProfileInterpreter) {
1626 // If no method data exists, go to profile_continue.
1627 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
1628 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo);
1630 // Increment backedge counter in the MDO
1631 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) +
1632 in_bytes(InvocationCounter::counter_offset()));
1633 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, O0,
1634 Assembler::notZero, &Lforward);
1635 __ ba_short(Loverflow);
1636 }
1638 // If there's no MDO, increment counter in MethodCounters*
1639 __ bind(Lno_mdo);
1640 Address backedge_counter(Rcounters,
1641 in_bytes(MethodCounters::backedge_counter_offset()) +
1642 in_bytes(InvocationCounter::counter_offset()));
1643 __ increment_mask_and_jump(backedge_counter, increment, mask, G4_scratch, O0,
1644 Assembler::notZero, &Lforward);
1645 __ bind(Loverflow);
1647 // notify point for loop, pass branch bytecode
1648 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), l_cur_bcp);
1650 // Was an OSR adapter generated?
1651 // O0 = osr nmethod
1652 __ br_null_short(O0, Assembler::pn, Lforward);
1654 // Has the nmethod been invalidated already?
1655 __ ld(O0, nmethod::entry_bci_offset(), O2);
1656 __ cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, Lforward);
1658 // migrate the interpreter frame off of the stack
1660 __ mov(G2_thread, L7);
1661 // save nmethod
1662 __ mov(O0, L6);
1663 __ set_last_Java_frame(SP, noreg);
1664 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
1665 __ reset_last_Java_frame();
1666 __ mov(L7, G2_thread);
1668 // move OSR nmethod to I1
1669 __ mov(L6, I1);
1671 // OSR buffer to I0
1672 __ mov(O0, I0);
1674 // remove the interpreter frame
1675 __ restore(I5_savedSP, 0, SP);
1677 // Jump to the osr code.
1678 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
1679 __ jmp(O2, G0);
1680 __ delayed()->nop();
1682 } else {
1683 // Update Backedge branch separately from invocations
1684 const Register G4_invoke_ctr = G4;
1685 __ increment_backedge_counter(Rcounters, G4_invoke_ctr, G1_scratch);
1686 if (ProfileInterpreter) {
1687 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward);
1688 if (UseOnStackReplacement) {
1689 __ test_backedge_count_for_osr(O2_bumped_count, l_cur_bcp, G3_scratch);
1690 }
1691 } else {
1692 if (UseOnStackReplacement) {
1693 __ test_backedge_count_for_osr(G4_invoke_ctr, l_cur_bcp, G3_scratch);
1694 }
1695 }
1696 }
1698 __ bind(Lforward);
1699 } else
1700 // Bump bytecode pointer by displacement (take the branch)
1701 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr
1703 // continue with bytecode @ target
1704 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1705 // %%%%% and changing dispatch_next to dispatch_only
1706 __ dispatch_next(vtos);
1707 }
1710 // Note Condition in argument is TemplateTable::Condition
1711 // arg scope is within class scope
1713 void TemplateTable::if_0cmp(Condition cc) {
1714 // no pointers, integer only!
1715 transition(itos, vtos);
1716 // assume branch is more often taken than not (loops use backward branches)
1717 __ cmp( Otos_i, 0);
1718 __ if_cmp(ccNot(cc), false);
1719 }
1722 void TemplateTable::if_icmp(Condition cc) {
1723 transition(itos, vtos);
1724 __ pop_i(O1);
1725 __ cmp(O1, Otos_i);
1726 __ if_cmp(ccNot(cc), false);
1727 }
1730 void TemplateTable::if_nullcmp(Condition cc) {
1731 transition(atos, vtos);
1732 __ tst(Otos_i);
1733 __ if_cmp(ccNot(cc), true);
1734 }
1737 void TemplateTable::if_acmp(Condition cc) {
1738 transition(atos, vtos);
1739 __ pop_ptr(O1);
1740 __ verify_oop(O1);
1741 __ verify_oop(Otos_i);
1742 __ cmp(O1, Otos_i);
1743 __ if_cmp(ccNot(cc), true);
1744 }
1748 void TemplateTable::ret() {
1749 transition(vtos, vtos);
1750 locals_index(G3_scratch);
1751 __ access_local_returnAddress(G3_scratch, Otos_i);
1752 // Otos_i contains the bci, compute the bcp from that
1754 #ifdef _LP64
1755 #ifdef ASSERT
1756 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
1757 // the result. The return address (really a BCI) was stored with an
1758 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in
1759 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
1760 // loaded value.
1761 { Label zzz ;
1762 __ set (65536, G3_scratch) ;
1763 __ cmp (Otos_i, G3_scratch) ;
1764 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
1765 __ delayed()->nop();
1766 __ stop("BCI is in the wrong register half?");
1767 __ bind (zzz) ;
1768 }
1769 #endif
1770 #endif
1772 __ profile_ret(vtos, Otos_i, G4_scratch);
1774 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1775 __ add(G3_scratch, Otos_i, G3_scratch);
1776 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1777 __ dispatch_next(vtos);
1778 }
1781 void TemplateTable::wide_ret() {
1782 transition(vtos, vtos);
1783 locals_index_wide(G3_scratch);
1784 __ access_local_returnAddress(G3_scratch, Otos_i);
1785 // Otos_i contains the bci, compute the bcp from that
1787 __ profile_ret(vtos, Otos_i, G4_scratch);
1789 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1790 __ add(G3_scratch, Otos_i, G3_scratch);
1791 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1792 __ dispatch_next(vtos);
1793 }
1796 void TemplateTable::tableswitch() {
1797 transition(itos, vtos);
1798 Label default_case, continue_execution;
1800 // align bcp
1801 __ add(Lbcp, BytesPerInt, O1);
1802 __ and3(O1, -BytesPerInt, O1);
1803 // load lo, hi
1804 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte
1805 __ ld(O1, 2 * BytesPerInt, O3); // High Byte
1806 #ifdef _LP64
1807 // Sign extend the 32 bits
1808 __ sra ( Otos_i, 0, Otos_i );
1809 #endif /* _LP64 */
1811 // check against lo & hi
1812 __ cmp( Otos_i, O2);
1813 __ br( Assembler::less, false, Assembler::pn, default_case);
1814 __ delayed()->cmp( Otos_i, O3 );
1815 __ br( Assembler::greater, false, Assembler::pn, default_case);
1816 // lookup dispatch offset
1817 __ delayed()->sub(Otos_i, O2, O2);
1818 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
1819 __ sll(O2, LogBytesPerInt, O2);
1820 __ add(O2, 3 * BytesPerInt, O2);
1821 __ ba(continue_execution);
1822 __ delayed()->ld(O1, O2, O2);
1823 // handle default
1824 __ bind(default_case);
1825 __ profile_switch_default(O3);
1826 __ ld(O1, 0, O2); // get default offset
1827 // continue execution
1828 __ bind(continue_execution);
1829 __ add(Lbcp, O2, Lbcp);
1830 __ dispatch_next(vtos);
1831 }
1834 void TemplateTable::lookupswitch() {
1835 transition(itos, itos);
1836 __ stop("lookupswitch bytecode should have been rewritten");
1837 }
1839 void TemplateTable::fast_linearswitch() {
1840 transition(itos, vtos);
1841 Label loop_entry, loop, found, continue_execution;
1842 // align bcp
1843 __ add(Lbcp, BytesPerInt, O1);
1844 __ and3(O1, -BytesPerInt, O1);
1845 // set counter
1846 __ ld(O1, BytesPerInt, O2);
1847 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs
1848 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr
1849 __ ba(loop_entry);
1850 __ delayed()->add(O3, O2, O2); // counter now points past last pair
1852 // table search
1853 __ bind(loop);
1854 __ cmp(O4, Otos_i);
1855 __ br(Assembler::equal, true, Assembler::pn, found);
1856 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4
1857 __ inc(O3, 2 * BytesPerInt);
1859 __ bind(loop_entry);
1860 __ cmp(O2, O3);
1861 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop);
1862 __ delayed()->ld(O3, 0, O4);
1864 // default case
1865 __ ld(O1, 0, O4); // get default offset
1866 if (ProfileInterpreter) {
1867 __ profile_switch_default(O3);
1868 __ ba_short(continue_execution);
1869 }
1871 // entry found -> get offset
1872 __ bind(found);
1873 if (ProfileInterpreter) {
1874 __ sub(O3, O1, O3);
1875 __ sub(O3, 2*BytesPerInt, O3);
1876 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs
1877 __ profile_switch_case(O3, O1, O2, G3_scratch);
1879 __ bind(continue_execution);
1880 }
1881 __ add(Lbcp, O4, Lbcp);
1882 __ dispatch_next(vtos);
1883 }
1886 void TemplateTable::fast_binaryswitch() {
1887 transition(itos, vtos);
1888 // Implementation using the following core algorithm: (copied from Intel)
1889 //
1890 // int binary_search(int key, LookupswitchPair* array, int n) {
1891 // // Binary search according to "Methodik des Programmierens" by
1892 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1893 // int i = 0;
1894 // int j = n;
1895 // while (i+1 < j) {
1896 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1897 // // with Q: for all i: 0 <= i < n: key < a[i]
1898 // // where a stands for the array and assuming that the (inexisting)
1899 // // element a[n] is infinitely big.
1900 // int h = (i + j) >> 1;
1901 // // i < h < j
1902 // if (key < array[h].fast_match()) {
1903 // j = h;
1904 // } else {
1905 // i = h;
1906 // }
1907 // }
1908 // // R: a[i] <= key < a[i+1] or Q
1909 // // (i.e., if key is within array, i is the correct index)
1910 // return i;
1911 // }
1913 // register allocation
1914 assert(Otos_i == O0, "alias checking");
1915 const Register Rkey = Otos_i; // already set (tosca)
1916 const Register Rarray = O1;
1917 const Register Ri = O2;
1918 const Register Rj = O3;
1919 const Register Rh = O4;
1920 const Register Rscratch = O5;
1922 const int log_entry_size = 3;
1923 const int entry_size = 1 << log_entry_size;
1925 Label found;
1926 // Find Array start
1927 __ add(Lbcp, 3 * BytesPerInt, Rarray);
1928 __ and3(Rarray, -BytesPerInt, Rarray);
1929 // initialize i & j (in delay slot)
1930 __ clr( Ri );
1932 // and start
1933 Label entry;
1934 __ ba(entry);
1935 __ delayed()->ld( Rarray, -BytesPerInt, Rj);
1936 // (Rj is already in the native byte-ordering.)
1938 // binary search loop
1939 { Label loop;
1940 __ bind( loop );
1941 // int h = (i + j) >> 1;
1942 __ sra( Rh, 1, Rh );
1943 // if (key < array[h].fast_match()) {
1944 // j = h;
1945 // } else {
1946 // i = h;
1947 // }
1948 __ sll( Rh, log_entry_size, Rscratch );
1949 __ ld( Rarray, Rscratch, Rscratch );
1950 // (Rscratch is already in the native byte-ordering.)
1951 __ cmp( Rkey, Rscratch );
1952 if ( VM_Version::v9_instructions_work() ) {
1953 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
1954 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
1955 }
1956 else {
1957 Label end_of_if;
1958 __ br( Assembler::less, true, Assembler::pt, end_of_if );
1959 __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
1960 __ mov( Rh, Ri ); // else i = h
1961 __ bind(end_of_if); // }
1962 }
1964 // while (i+1 < j)
1965 __ bind( entry );
1966 __ add( Ri, 1, Rscratch );
1967 __ cmp(Rscratch, Rj);
1968 __ br( Assembler::less, true, Assembler::pt, loop );
1969 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1;
1970 }
1972 // end of binary search, result index is i (must check again!)
1973 Label default_case;
1974 Label continue_execution;
1975 if (ProfileInterpreter) {
1976 __ mov( Ri, Rh ); // Save index in i for profiling
1977 }
1978 __ sll( Ri, log_entry_size, Ri );
1979 __ ld( Rarray, Ri, Rscratch );
1980 // (Rscratch is already in the native byte-ordering.)
1981 __ cmp( Rkey, Rscratch );
1982 __ br( Assembler::notEqual, true, Assembler::pn, default_case );
1983 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j
1985 // entry found -> j = offset
1986 __ inc( Ri, BytesPerInt );
1987 __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
1988 __ ld( Rarray, Ri, Rj );
1989 // (Rj is already in the native byte-ordering.)
1991 if (ProfileInterpreter) {
1992 __ ba_short(continue_execution);
1993 }
1995 __ bind(default_case); // fall through (if not profiling)
1996 __ profile_switch_default(Ri);
1998 __ bind(continue_execution);
1999 __ add( Lbcp, Rj, Lbcp );
2000 __ dispatch_next( vtos );
2001 }
2004 void TemplateTable::_return(TosState state) {
2005 transition(state, state);
2006 assert(_desc->calls_vm(), "inconsistent calls_vm information");
2008 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2009 assert(state == vtos, "only valid state");
2010 __ mov(G0, G3_scratch);
2011 __ access_local_ptr(G3_scratch, Otos_i);
2012 __ load_klass(Otos_i, O2);
2013 __ set(JVM_ACC_HAS_FINALIZER, G3);
2014 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2);
2015 __ andcc(G3, O2, G0);
2016 Label skip_register_finalizer;
2017 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
2018 __ delayed()->nop();
2020 // Call out to do finalizer registration
2021 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i);
2023 __ bind(skip_register_finalizer);
2024 }
2026 __ remove_activation(state, /* throw_monitor_exception */ true);
2028 // The caller's SP was adjusted upon method entry to accomodate
2029 // the callee's non-argument locals. Undo that adjustment.
2030 __ ret(); // return to caller
2031 __ delayed()->restore(I5_savedSP, G0, SP);
2032 }
2035 // ----------------------------------------------------------------------------
2036 // Volatile variables demand their effects be made known to all CPU's in
2037 // order. Store buffers on most chips allow reads & writes to reorder; the
2038 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2039 // memory barrier (i.e., it's not sufficient that the interpreter does not
2040 // reorder volatile references, the hardware also must not reorder them).
2041 //
2042 // According to the new Java Memory Model (JMM):
2043 // (1) All volatiles are serialized wrt to each other.
2044 // ALSO reads & writes act as aquire & release, so:
2045 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2046 // the read float up to before the read. It's OK for non-volatile memory refs
2047 // that happen before the volatile read to float down below it.
2048 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2049 // that happen BEFORE the write float down to after the write. It's OK for
2050 // non-volatile memory refs that happen after the volatile write to float up
2051 // before it.
2052 //
2053 // We only put in barriers around volatile refs (they are expensive), not
2054 // _between_ memory refs (that would require us to track the flavor of the
2055 // previous memory refs). Requirements (2) and (3) require some barriers
2056 // before volatile stores and after volatile loads. These nearly cover
2057 // requirement (1) but miss the volatile-store-volatile-load case. This final
2058 // case is placed after volatile-stores although it could just as well go
2059 // before volatile-loads.
2060 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) {
2061 // Helper function to insert a is-volatile test and memory barrier
2062 // All current sparc implementations run in TSO, needing only StoreLoad
2063 if ((order_constraint & Assembler::StoreLoad) == 0) return;
2064 __ membar( order_constraint );
2065 }
2067 // ----------------------------------------------------------------------------
2068 void TemplateTable::resolve_cache_and_index(int byte_no,
2069 Register Rcache,
2070 Register index,
2071 size_t index_size) {
2072 // Depends on cpCacheOop layout!
2073 Label resolved;
2075 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2076 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size);
2077 __ cmp(Lbyte_code, (int) bytecode()); // have we resolved this bytecode?
2078 __ br(Assembler::equal, false, Assembler::pt, resolved);
2079 __ delayed()->set((int)bytecode(), O1);
2081 address entry;
2082 switch (bytecode()) {
2083 case Bytecodes::_getstatic : // fall through
2084 case Bytecodes::_putstatic : // fall through
2085 case Bytecodes::_getfield : // fall through
2086 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2087 case Bytecodes::_invokevirtual : // fall through
2088 case Bytecodes::_invokespecial : // fall through
2089 case Bytecodes::_invokestatic : // fall through
2090 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2091 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2092 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2093 default:
2094 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2095 break;
2096 }
2097 // first time invocation - must resolve first
2098 __ call_VM(noreg, entry, O1);
2099 // Update registers with resolved info
2100 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2101 __ bind(resolved);
2102 }
2104 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2105 Register method,
2106 Register itable_index,
2107 Register flags,
2108 bool is_invokevirtual,
2109 bool is_invokevfinal,
2110 bool is_invokedynamic) {
2111 // Uses both G3_scratch and G4_scratch
2112 Register cache = G3_scratch;
2113 Register index = G4_scratch;
2114 assert_different_registers(cache, method, itable_index);
2116 // determine constant pool cache field offsets
2117 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2118 const int method_offset = in_bytes(
2119 ConstantPoolCache::base_offset() +
2120 ((byte_no == f2_byte)
2121 ? ConstantPoolCacheEntry::f2_offset()
2122 : ConstantPoolCacheEntry::f1_offset()
2123 )
2124 );
2125 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2126 ConstantPoolCacheEntry::flags_offset());
2127 // access constant pool cache fields
2128 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2129 ConstantPoolCacheEntry::f2_offset());
2131 if (is_invokevfinal) {
2132 __ get_cache_and_index_at_bcp(cache, index, 1);
2133 __ ld_ptr(Address(cache, method_offset), method);
2134 } else {
2135 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2136 resolve_cache_and_index(byte_no, cache, index, index_size);
2137 __ ld_ptr(Address(cache, method_offset), method);
2138 }
2140 if (itable_index != noreg) {
2141 // pick up itable or appendix index from f2 also:
2142 __ ld_ptr(Address(cache, index_offset), itable_index);
2143 }
2144 __ ld_ptr(Address(cache, flags_offset), flags);
2145 }
2147 // The Rcache register must be set before call
2148 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2149 Register Rcache,
2150 Register index,
2151 Register Roffset,
2152 Register Rflags,
2153 bool is_static) {
2154 assert_different_registers(Rcache, Rflags, Roffset);
2156 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2158 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2159 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2160 if (is_static) {
2161 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj);
2162 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2163 __ ld_ptr( Robj, mirror_offset, Robj);
2164 }
2165 }
2167 // The registers Rcache and index expected to be set before call.
2168 // Correct values of the Rcache and index registers are preserved.
2169 void TemplateTable::jvmti_post_field_access(Register Rcache,
2170 Register index,
2171 bool is_static,
2172 bool has_tos) {
2173 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2175 if (JvmtiExport::can_post_field_access()) {
2176 // Check to see if a field access watch has been set before we take
2177 // the time to call into the VM.
2178 Label Label1;
2179 assert_different_registers(Rcache, index, G1_scratch);
2180 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr());
2181 __ load_contents(get_field_access_count_addr, G1_scratch);
2182 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1);
2184 __ add(Rcache, in_bytes(cp_base_offset), Rcache);
2186 if (is_static) {
2187 __ clr(Otos_i);
2188 } else {
2189 if (has_tos) {
2190 // save object pointer before call_VM() clobbers it
2191 __ push_ptr(Otos_i); // put object on tos where GC wants it.
2192 } else {
2193 // Load top of stack (do not pop the value off the stack);
2194 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
2195 }
2196 __ verify_oop(Otos_i);
2197 }
2198 // Otos_i: object pointer or NULL if static
2199 // Rcache: cache entry pointer
2200 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2201 Otos_i, Rcache);
2202 if (!is_static && has_tos) {
2203 __ pop_ptr(Otos_i); // restore object pointer
2204 __ verify_oop(Otos_i);
2205 }
2206 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2207 __ bind(Label1);
2208 }
2209 }
2211 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2212 transition(vtos, vtos);
2214 Register Rcache = G3_scratch;
2215 Register index = G4_scratch;
2216 Register Rclass = Rcache;
2217 Register Roffset= G4_scratch;
2218 Register Rflags = G1_scratch;
2219 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2221 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2));
2222 jvmti_post_field_access(Rcache, index, is_static, false);
2223 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2225 if (!is_static) {
2226 pop_and_check_object(Rclass);
2227 } else {
2228 __ verify_oop(Rclass);
2229 }
2231 Label exit;
2233 Assembler::Membar_mask_bits membar_bits =
2234 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2236 if (__ membar_has_effect(membar_bits)) {
2237 // Get volatile flag
2238 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2239 __ and3(Rflags, Lscratch, Lscratch);
2240 }
2242 Label checkVolatile;
2244 // compute field type
2245 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj;
2246 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2247 // Make sure we don't need to mask Rflags after the above shift
2248 ConstantPoolCacheEntry::verify_tos_state_shift();
2250 // Check atos before itos for getstatic, more likely (in Queens at least)
2251 __ cmp(Rflags, atos);
2252 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2253 __ delayed() ->cmp(Rflags, itos);
2255 // atos
2256 __ load_heap_oop(Rclass, Roffset, Otos_i);
2257 __ verify_oop(Otos_i);
2258 __ push(atos);
2259 if (!is_static) {
2260 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
2261 }
2262 __ ba(checkVolatile);
2263 __ delayed()->tst(Lscratch);
2265 __ bind(notObj);
2267 // cmp(Rflags, itos);
2268 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2269 __ delayed() ->cmp(Rflags, ltos);
2271 // itos
2272 __ ld(Rclass, Roffset, Otos_i);
2273 __ push(itos);
2274 if (!is_static) {
2275 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
2276 }
2277 __ ba(checkVolatile);
2278 __ delayed()->tst(Lscratch);
2280 __ bind(notInt);
2282 // cmp(Rflags, ltos);
2283 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2284 __ delayed() ->cmp(Rflags, btos);
2286 // ltos
2287 // load must be atomic
2288 __ ld_long(Rclass, Roffset, Otos_l);
2289 __ push(ltos);
2290 if (!is_static) {
2291 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
2292 }
2293 __ ba(checkVolatile);
2294 __ delayed()->tst(Lscratch);
2296 __ bind(notLong);
2298 // cmp(Rflags, btos);
2299 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2300 __ delayed() ->cmp(Rflags, ctos);
2302 // btos
2303 __ ldsb(Rclass, Roffset, Otos_i);
2304 __ push(itos);
2305 if (!is_static) {
2306 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
2307 }
2308 __ ba(checkVolatile);
2309 __ delayed()->tst(Lscratch);
2311 __ bind(notByte);
2313 // cmp(Rflags, ctos);
2314 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2315 __ delayed() ->cmp(Rflags, stos);
2317 // ctos
2318 __ lduh(Rclass, Roffset, Otos_i);
2319 __ push(itos);
2320 if (!is_static) {
2321 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
2322 }
2323 __ ba(checkVolatile);
2324 __ delayed()->tst(Lscratch);
2326 __ bind(notChar);
2328 // cmp(Rflags, stos);
2329 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2330 __ delayed() ->cmp(Rflags, ftos);
2332 // stos
2333 __ ldsh(Rclass, Roffset, Otos_i);
2334 __ push(itos);
2335 if (!is_static) {
2336 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
2337 }
2338 __ ba(checkVolatile);
2339 __ delayed()->tst(Lscratch);
2341 __ bind(notShort);
2344 // cmp(Rflags, ftos);
2345 __ br(Assembler::notEqual, false, Assembler::pt, notFloat);
2346 __ delayed() ->tst(Lscratch);
2348 // ftos
2349 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f);
2350 __ push(ftos);
2351 if (!is_static) {
2352 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
2353 }
2354 __ ba(checkVolatile);
2355 __ delayed()->tst(Lscratch);
2357 __ bind(notFloat);
2360 // dtos
2361 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d);
2362 __ push(dtos);
2363 if (!is_static) {
2364 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch);
2365 }
2367 __ bind(checkVolatile);
2368 if (__ membar_has_effect(membar_bits)) {
2369 // __ tst(Lscratch); executed in delay slot
2370 __ br(Assembler::zero, false, Assembler::pt, exit);
2371 __ delayed()->nop();
2372 volatile_barrier(membar_bits);
2373 }
2375 __ bind(exit);
2376 }
2379 void TemplateTable::getfield(int byte_no) {
2380 getfield_or_static(byte_no, false);
2381 }
2383 void TemplateTable::getstatic(int byte_no) {
2384 getfield_or_static(byte_no, true);
2385 }
2388 void TemplateTable::fast_accessfield(TosState state) {
2389 transition(atos, state);
2390 Register Rcache = G3_scratch;
2391 Register index = G4_scratch;
2392 Register Roffset = G4_scratch;
2393 Register Rflags = Rcache;
2394 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2396 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2397 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true);
2399 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2401 __ null_check(Otos_i);
2402 __ verify_oop(Otos_i);
2404 Label exit;
2406 Assembler::Membar_mask_bits membar_bits =
2407 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2408 if (__ membar_has_effect(membar_bits)) {
2409 // Get volatile flag
2410 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags);
2411 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2412 }
2414 switch (bytecode()) {
2415 case Bytecodes::_fast_bgetfield:
2416 __ ldsb(Otos_i, Roffset, Otos_i);
2417 break;
2418 case Bytecodes::_fast_cgetfield:
2419 __ lduh(Otos_i, Roffset, Otos_i);
2420 break;
2421 case Bytecodes::_fast_sgetfield:
2422 __ ldsh(Otos_i, Roffset, Otos_i);
2423 break;
2424 case Bytecodes::_fast_igetfield:
2425 __ ld(Otos_i, Roffset, Otos_i);
2426 break;
2427 case Bytecodes::_fast_lgetfield:
2428 __ ld_long(Otos_i, Roffset, Otos_l);
2429 break;
2430 case Bytecodes::_fast_fgetfield:
2431 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f);
2432 break;
2433 case Bytecodes::_fast_dgetfield:
2434 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
2435 break;
2436 case Bytecodes::_fast_agetfield:
2437 __ load_heap_oop(Otos_i, Roffset, Otos_i);
2438 break;
2439 default:
2440 ShouldNotReachHere();
2441 }
2443 if (__ membar_has_effect(membar_bits)) {
2444 __ btst(Lscratch, Rflags);
2445 __ br(Assembler::zero, false, Assembler::pt, exit);
2446 __ delayed()->nop();
2447 volatile_barrier(membar_bits);
2448 __ bind(exit);
2449 }
2451 if (state == atos) {
2452 __ verify_oop(Otos_i); // does not blow flags!
2453 }
2454 }
2456 void TemplateTable::jvmti_post_fast_field_mod() {
2457 if (JvmtiExport::can_post_field_modification()) {
2458 // Check to see if a field modification watch has been set before we take
2459 // the time to call into the VM.
2460 Label done;
2461 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2462 __ load_contents(get_field_modification_count_addr, G4_scratch);
2463 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done);
2464 __ pop_ptr(G4_scratch); // copy the object pointer from tos
2465 __ verify_oop(G4_scratch);
2466 __ push_ptr(G4_scratch); // put the object pointer back on tos
2467 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1);
2468 // Save tos values before call_VM() clobbers them. Since we have
2469 // to do it for every data type, we use the saved values as the
2470 // jvalue object.
2471 switch (bytecode()) { // save tos values before call_VM() clobbers them
2472 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break;
2473 case Bytecodes::_fast_bputfield: // fall through
2474 case Bytecodes::_fast_sputfield: // fall through
2475 case Bytecodes::_fast_cputfield: // fall through
2476 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break;
2477 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break;
2478 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break;
2479 // get words in right order for use as jvalue object
2480 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break;
2481 }
2482 // setup pointer to jvalue object
2483 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize);
2484 // G4_scratch: object pointer
2485 // G1_scratch: cache entry pointer
2486 // G3_scratch: jvalue object on the stack
2487 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch);
2488 switch (bytecode()) { // restore tos values
2489 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break;
2490 case Bytecodes::_fast_bputfield: // fall through
2491 case Bytecodes::_fast_sputfield: // fall through
2492 case Bytecodes::_fast_cputfield: // fall through
2493 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break;
2494 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break;
2495 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break;
2496 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break;
2497 }
2498 __ bind(done);
2499 }
2500 }
2502 // The registers Rcache and index expected to be set before call.
2503 // The function may destroy various registers, just not the Rcache and index registers.
2504 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) {
2505 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2507 if (JvmtiExport::can_post_field_modification()) {
2508 // Check to see if a field modification watch has been set before we take
2509 // the time to call into the VM.
2510 Label Label1;
2511 assert_different_registers(Rcache, index, G1_scratch);
2512 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2513 __ load_contents(get_field_modification_count_addr, G1_scratch);
2514 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1);
2516 // The Rcache and index registers have been already set.
2517 // This allows to eliminate this call but the Rcache and index
2518 // registers must be correspondingly used after this line.
2519 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1);
2521 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch);
2522 if (is_static) {
2523 // Life is simple. Null out the object pointer.
2524 __ clr(G4_scratch);
2525 } else {
2526 Register Rflags = G1_scratch;
2527 // Life is harder. The stack holds the value on top, followed by the
2528 // object. We don't know the size of the value, though; it could be
2529 // one or two words depending on its type. As a result, we must find
2530 // the type to determine where the object is.
2532 Label two_word, valsizeknown;
2533 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2534 __ mov(Lesp, G4_scratch);
2535 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2536 // Make sure we don't need to mask Rflags after the above shift
2537 ConstantPoolCacheEntry::verify_tos_state_shift();
2538 __ cmp(Rflags, ltos);
2539 __ br(Assembler::equal, false, Assembler::pt, two_word);
2540 __ delayed()->cmp(Rflags, dtos);
2541 __ br(Assembler::equal, false, Assembler::pt, two_word);
2542 __ delayed()->nop();
2543 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1));
2544 __ ba_short(valsizeknown);
2545 __ bind(two_word);
2547 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2));
2549 __ bind(valsizeknown);
2550 // setup object pointer
2551 __ ld_ptr(G4_scratch, 0, G4_scratch);
2552 __ verify_oop(G4_scratch);
2553 }
2554 // setup pointer to jvalue object
2555 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize);
2556 // G4_scratch: object pointer or NULL if static
2557 // G3_scratch: cache entry pointer
2558 // G1_scratch: jvalue object on the stack
2559 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2560 G4_scratch, G3_scratch, G1_scratch);
2561 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2562 __ bind(Label1);
2563 }
2564 }
2566 void TemplateTable::pop_and_check_object(Register r) {
2567 __ pop_ptr(r);
2568 __ null_check(r); // for field access must check obj.
2569 __ verify_oop(r);
2570 }
2572 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2573 transition(vtos, vtos);
2574 Register Rcache = G3_scratch;
2575 Register index = G4_scratch;
2576 Register Rclass = Rcache;
2577 Register Roffset= G4_scratch;
2578 Register Rflags = G1_scratch;
2579 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2581 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2));
2582 jvmti_post_field_mod(Rcache, index, is_static);
2583 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2585 Assembler::Membar_mask_bits read_bits =
2586 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2587 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2589 Label notVolatile, checkVolatile, exit;
2590 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2591 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2592 __ and3(Rflags, Lscratch, Lscratch);
2594 if (__ membar_has_effect(read_bits)) {
2595 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2596 volatile_barrier(read_bits);
2597 __ bind(notVolatile);
2598 }
2599 }
2601 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2602 // Make sure we don't need to mask Rflags after the above shift
2603 ConstantPoolCacheEntry::verify_tos_state_shift();
2605 // compute field type
2606 Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat;
2608 if (is_static) {
2609 // putstatic with object type most likely, check that first
2610 __ cmp(Rflags, atos);
2611 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2612 __ delayed()->cmp(Rflags, itos);
2614 // atos
2615 {
2616 __ pop_ptr();
2617 __ verify_oop(Otos_i);
2618 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2619 __ ba(checkVolatile);
2620 __ delayed()->tst(Lscratch);
2621 }
2623 __ bind(notObj);
2624 // cmp(Rflags, itos);
2625 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2626 __ delayed()->cmp(Rflags, btos);
2628 // itos
2629 {
2630 __ pop_i();
2631 __ st(Otos_i, Rclass, Roffset);
2632 __ ba(checkVolatile);
2633 __ delayed()->tst(Lscratch);
2634 }
2636 __ bind(notInt);
2637 } else {
2638 // putfield with int type most likely, check that first
2639 __ cmp(Rflags, itos);
2640 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2641 __ delayed()->cmp(Rflags, atos);
2643 // itos
2644 {
2645 __ pop_i();
2646 pop_and_check_object(Rclass);
2647 __ st(Otos_i, Rclass, Roffset);
2648 patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no);
2649 __ ba(checkVolatile);
2650 __ delayed()->tst(Lscratch);
2651 }
2653 __ bind(notInt);
2654 // cmp(Rflags, atos);
2655 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2656 __ delayed()->cmp(Rflags, btos);
2658 // atos
2659 {
2660 __ pop_ptr();
2661 pop_and_check_object(Rclass);
2662 __ verify_oop(Otos_i);
2663 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2664 patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no);
2665 __ ba(checkVolatile);
2666 __ delayed()->tst(Lscratch);
2667 }
2669 __ bind(notObj);
2670 }
2672 // cmp(Rflags, btos);
2673 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2674 __ delayed()->cmp(Rflags, ltos);
2676 // btos
2677 {
2678 __ pop_i();
2679 if (!is_static) pop_and_check_object(Rclass);
2680 __ stb(Otos_i, Rclass, Roffset);
2681 if (!is_static) {
2682 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no);
2683 }
2684 __ ba(checkVolatile);
2685 __ delayed()->tst(Lscratch);
2686 }
2688 __ bind(notByte);
2689 // cmp(Rflags, ltos);
2690 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2691 __ delayed()->cmp(Rflags, ctos);
2693 // ltos
2694 {
2695 __ pop_l();
2696 if (!is_static) pop_and_check_object(Rclass);
2697 __ st_long(Otos_l, Rclass, Roffset);
2698 if (!is_static) {
2699 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no);
2700 }
2701 __ ba(checkVolatile);
2702 __ delayed()->tst(Lscratch);
2703 }
2705 __ bind(notLong);
2706 // cmp(Rflags, ctos);
2707 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2708 __ delayed()->cmp(Rflags, stos);
2710 // ctos (char)
2711 {
2712 __ pop_i();
2713 if (!is_static) pop_and_check_object(Rclass);
2714 __ sth(Otos_i, Rclass, Roffset);
2715 if (!is_static) {
2716 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no);
2717 }
2718 __ ba(checkVolatile);
2719 __ delayed()->tst(Lscratch);
2720 }
2722 __ bind(notChar);
2723 // cmp(Rflags, stos);
2724 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2725 __ delayed()->cmp(Rflags, ftos);
2727 // stos (short)
2728 {
2729 __ pop_i();
2730 if (!is_static) pop_and_check_object(Rclass);
2731 __ sth(Otos_i, Rclass, Roffset);
2732 if (!is_static) {
2733 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no);
2734 }
2735 __ ba(checkVolatile);
2736 __ delayed()->tst(Lscratch);
2737 }
2739 __ bind(notShort);
2740 // cmp(Rflags, ftos);
2741 __ br(Assembler::notZero, false, Assembler::pt, notFloat);
2742 __ delayed()->nop();
2744 // ftos
2745 {
2746 __ pop_f();
2747 if (!is_static) pop_and_check_object(Rclass);
2748 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2749 if (!is_static) {
2750 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no);
2751 }
2752 __ ba(checkVolatile);
2753 __ delayed()->tst(Lscratch);
2754 }
2756 __ bind(notFloat);
2758 // dtos
2759 {
2760 __ pop_d();
2761 if (!is_static) pop_and_check_object(Rclass);
2762 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2763 if (!is_static) {
2764 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no);
2765 }
2766 }
2768 __ bind(checkVolatile);
2769 __ tst(Lscratch);
2771 if (__ membar_has_effect(write_bits)) {
2772 // __ tst(Lscratch); in delay slot
2773 __ br(Assembler::zero, false, Assembler::pt, exit);
2774 __ delayed()->nop();
2775 volatile_barrier(Assembler::StoreLoad);
2776 __ bind(exit);
2777 }
2778 }
2780 void TemplateTable::fast_storefield(TosState state) {
2781 transition(state, vtos);
2782 Register Rcache = G3_scratch;
2783 Register Rclass = Rcache;
2784 Register Roffset= G4_scratch;
2785 Register Rflags = G1_scratch;
2786 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2788 jvmti_post_fast_field_mod();
2790 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1);
2792 Assembler::Membar_mask_bits read_bits =
2793 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2794 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2796 Label notVolatile, checkVolatile, exit;
2797 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2798 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2799 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2800 __ and3(Rflags, Lscratch, Lscratch);
2801 if (__ membar_has_effect(read_bits)) {
2802 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2803 volatile_barrier(read_bits);
2804 __ bind(notVolatile);
2805 }
2806 }
2808 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2809 pop_and_check_object(Rclass);
2811 switch (bytecode()) {
2812 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break;
2813 case Bytecodes::_fast_cputfield: /* fall through */
2814 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break;
2815 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break;
2816 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break;
2817 case Bytecodes::_fast_fputfield:
2818 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2819 break;
2820 case Bytecodes::_fast_dputfield:
2821 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2822 break;
2823 case Bytecodes::_fast_aputfield:
2824 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2825 break;
2826 default:
2827 ShouldNotReachHere();
2828 }
2830 if (__ membar_has_effect(write_bits)) {
2831 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit);
2832 volatile_barrier(Assembler::StoreLoad);
2833 __ bind(exit);
2834 }
2835 }
2838 void TemplateTable::putfield(int byte_no) {
2839 putfield_or_static(byte_no, false);
2840 }
2842 void TemplateTable::putstatic(int byte_no) {
2843 putfield_or_static(byte_no, true);
2844 }
2847 void TemplateTable::fast_xaccess(TosState state) {
2848 transition(vtos, state);
2849 Register Rcache = G3_scratch;
2850 Register Roffset = G4_scratch;
2851 Register Rflags = G4_scratch;
2852 Register Rreceiver = Lscratch;
2854 __ ld_ptr(Llocals, 0, Rreceiver);
2856 // access constant pool cache (is resolved)
2857 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
2858 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset);
2859 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp
2861 __ verify_oop(Rreceiver);
2862 __ null_check(Rreceiver);
2863 if (state == atos) {
2864 __ load_heap_oop(Rreceiver, Roffset, Otos_i);
2865 } else if (state == itos) {
2866 __ ld (Rreceiver, Roffset, Otos_i) ;
2867 } else if (state == ftos) {
2868 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f);
2869 } else {
2870 ShouldNotReachHere();
2871 }
2873 Assembler::Membar_mask_bits membar_bits =
2874 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2875 if (__ membar_has_effect(membar_bits)) {
2877 // Get is_volatile value in Rflags and check if membar is needed
2878 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags);
2880 // Test volatile
2881 Label notVolatile;
2882 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2883 __ btst(Rflags, Lscratch);
2884 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2885 __ delayed()->nop();
2886 volatile_barrier(membar_bits);
2887 __ bind(notVolatile);
2888 }
2890 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
2891 __ sub(Lbcp, 1, Lbcp);
2892 }
2894 //----------------------------------------------------------------------------------------------------
2895 // Calls
2897 void TemplateTable::count_calls(Register method, Register temp) {
2898 // implemented elsewhere
2899 ShouldNotReachHere();
2900 }
2902 void TemplateTable::prepare_invoke(int byte_no,
2903 Register method, // linked method (or i-klass)
2904 Register ra, // return address
2905 Register index, // itable index, MethodType, etc.
2906 Register recv, // if caller wants to see it
2907 Register flags // if caller wants to test it
2908 ) {
2909 // determine flags
2910 const Bytecodes::Code code = bytecode();
2911 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2912 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2913 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2914 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2915 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2916 const bool load_receiver = (recv != noreg);
2917 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2918 assert(recv == noreg || recv == O0, "");
2919 assert(flags == noreg || flags == O1, "");
2921 // setup registers & access constant pool cache
2922 if (recv == noreg) recv = O0;
2923 if (flags == noreg) flags = O1;
2924 const Register temp = O2;
2925 assert_different_registers(method, ra, index, recv, flags, temp);
2927 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2929 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2931 // maybe push appendix to arguments
2932 if (is_invokedynamic || is_invokehandle) {
2933 Label L_no_push;
2934 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp);
2935 __ btst(flags, temp);
2936 __ br(Assembler::zero, false, Assembler::pt, L_no_push);
2937 __ delayed()->nop();
2938 // Push the appendix as a trailing parameter.
2939 // This must be done before we get the receiver,
2940 // since the parameter_size includes it.
2941 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
2942 __ load_resolved_reference_at_index(temp, index);
2943 __ verify_oop(temp);
2944 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.)
2945 __ bind(L_no_push);
2946 }
2948 // load receiver if needed (after appendix is pushed so parameter size is correct)
2949 if (load_receiver) {
2950 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size
2951 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp
2952 __ verify_oop(recv);
2953 }
2955 // compute return type
2956 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra);
2957 // Make sure we don't need to mask flags after the above shift
2958 ConstantPoolCacheEntry::verify_tos_state_shift();
2959 // load return address
2960 {
2961 const address table_addr = (is_invokeinterface || is_invokedynamic) ?
2962 (address)Interpreter::return_5_addrs_by_index_table() :
2963 (address)Interpreter::return_3_addrs_by_index_table();
2964 AddressLiteral table(table_addr);
2965 __ set(table, temp);
2966 __ sll(ra, LogBytesPerWord, ra);
2967 __ ld_ptr(Address(temp, ra), ra);
2968 }
2969 }
2972 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
2973 Register Rtemp = G4_scratch;
2974 Register Rcall = Rindex;
2975 assert_different_registers(Rcall, G5_method, Gargs, Rret);
2977 // get target Method* & entry point
2978 __ lookup_virtual_method(Rrecv, Rindex, G5_method);
2979 __ call_from_interpreter(Rcall, Gargs, Rret);
2980 }
2982 void TemplateTable::invokevirtual(int byte_no) {
2983 transition(vtos, vtos);
2984 assert(byte_no == f2_byte, "use this argument");
2986 Register Rscratch = G3_scratch;
2987 Register Rtemp = G4_scratch;
2988 Register Rret = Lscratch;
2989 Register O0_recv = O0;
2990 Label notFinal;
2992 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
2993 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2995 // Check for vfinal
2996 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch);
2997 __ btst(Rret, G4_scratch);
2998 __ br(Assembler::zero, false, Assembler::pt, notFinal);
2999 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters
3001 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
3003 invokevfinal_helper(Rscratch, Rret);
3005 __ bind(notFinal);
3007 __ mov(G5_method, Rscratch); // better scratch register
3008 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop
3009 // receiver is in O0_recv
3010 __ verify_oop(O0_recv);
3012 // get return address
3013 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
3014 __ set(table, Rtemp);
3015 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
3016 // Make sure we don't need to mask Rret after the above shift
3017 ConstantPoolCacheEntry::verify_tos_state_shift();
3018 __ sll(Rret, LogBytesPerWord, Rret);
3019 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3021 // get receiver klass
3022 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
3023 __ load_klass(O0_recv, O0_recv);
3024 __ verify_klass_ptr(O0_recv);
3026 __ profile_virtual_call(O0_recv, O4);
3028 generate_vtable_call(O0_recv, Rscratch, Rret);
3029 }
3031 void TemplateTable::fast_invokevfinal(int byte_no) {
3032 transition(vtos, vtos);
3033 assert(byte_no == f2_byte, "use this argument");
3035 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
3036 /*is_invokevfinal*/true, false);
3037 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3038 invokevfinal_helper(G3_scratch, Lscratch);
3039 }
3041 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
3042 Register Rtemp = G4_scratch;
3044 // Load receiver from stack slot
3045 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch);
3046 __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch);
3047 __ load_receiver(G4_scratch, O0);
3049 // receiver NULL check
3050 __ null_check(O0);
3052 __ profile_final_call(O4);
3054 // get return address
3055 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
3056 __ set(table, Rtemp);
3057 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
3058 // Make sure we don't need to mask Rret after the above shift
3059 ConstantPoolCacheEntry::verify_tos_state_shift();
3060 __ sll(Rret, LogBytesPerWord, Rret);
3061 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3064 // do the call
3065 __ call_from_interpreter(Rscratch, Gargs, Rret);
3066 }
3069 void TemplateTable::invokespecial(int byte_no) {
3070 transition(vtos, vtos);
3071 assert(byte_no == f1_byte, "use this argument");
3073 const Register Rret = Lscratch;
3074 const Register O0_recv = O0;
3075 const Register Rscratch = G3_scratch;
3077 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check
3078 __ null_check(O0_recv);
3080 // do the call
3081 __ profile_call(O4);
3082 __ call_from_interpreter(Rscratch, Gargs, Rret);
3083 }
3086 void TemplateTable::invokestatic(int byte_no) {
3087 transition(vtos, vtos);
3088 assert(byte_no == f1_byte, "use this argument");
3090 const Register Rret = Lscratch;
3091 const Register Rscratch = G3_scratch;
3093 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method*
3095 // do the call
3096 __ profile_call(O4);
3097 __ call_from_interpreter(Rscratch, Gargs, Rret);
3098 }
3100 void TemplateTable::invokeinterface_object_method(Register RKlass,
3101 Register Rcall,
3102 Register Rret,
3103 Register Rflags) {
3104 Register Rscratch = G4_scratch;
3105 Register Rindex = Lscratch;
3107 assert_different_registers(Rscratch, Rindex, Rret);
3109 Label notFinal;
3111 // Check for vfinal
3112 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch);
3113 __ btst(Rflags, Rscratch);
3114 __ br(Assembler::zero, false, Assembler::pt, notFinal);
3115 __ delayed()->nop();
3117 __ profile_final_call(O4);
3119 // do the call - the index (f2) contains the Method*
3120 assert_different_registers(G5_method, Gargs, Rcall);
3121 __ mov(Rindex, G5_method);
3122 __ call_from_interpreter(Rcall, Gargs, Rret);
3123 __ bind(notFinal);
3125 __ profile_virtual_call(RKlass, O4);
3126 generate_vtable_call(RKlass, Rindex, Rret);
3127 }
3130 void TemplateTable::invokeinterface(int byte_no) {
3131 transition(vtos, vtos);
3132 assert(byte_no == f1_byte, "use this argument");
3134 const Register Rinterface = G1_scratch;
3135 const Register Rret = G3_scratch;
3136 const Register Rindex = Lscratch;
3137 const Register O0_recv = O0;
3138 const Register O1_flags = O1;
3139 const Register O2_Klass = O2;
3140 const Register Rscratch = G4_scratch;
3141 assert_different_registers(Rscratch, G5_method);
3143 prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags);
3145 // get receiver klass
3146 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
3147 __ load_klass(O0_recv, O2_Klass);
3149 // Special case of invokeinterface called for virtual method of
3150 // java.lang.Object. See cpCacheOop.cpp for details.
3151 // This code isn't produced by javac, but could be produced by
3152 // another compliant java compiler.
3153 Label notMethod;
3154 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch);
3155 __ btst(O1_flags, Rscratch);
3156 __ br(Assembler::zero, false, Assembler::pt, notMethod);
3157 __ delayed()->nop();
3159 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags);
3161 __ bind(notMethod);
3163 __ profile_virtual_call(O2_Klass, O4);
3165 //
3166 // find entry point to call
3167 //
3169 // compute start of first itableOffsetEntry (which is at end of vtable)
3170 const int base = InstanceKlass::vtable_start_offset() * wordSize;
3171 Label search;
3172 Register Rtemp = O1_flags;
3174 __ ld(O2_Klass, InstanceKlass::vtable_length_offset() * wordSize, Rtemp);
3175 if (align_object_offset(1) > 1) {
3176 __ round_to(Rtemp, align_object_offset(1));
3177 }
3178 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4;
3179 if (Assembler::is_simm13(base)) {
3180 __ add(Rtemp, base, Rtemp);
3181 } else {
3182 __ set(base, Rscratch);
3183 __ add(Rscratch, Rtemp, Rtemp);
3184 }
3185 __ add(O2_Klass, Rtemp, Rscratch);
3187 __ bind(search);
3189 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp);
3190 {
3191 Label ok;
3193 // Check that entry is non-null. Null entries are probably a bytecode
3194 // problem. If the interface isn't implemented by the receiver class,
3195 // the VM should throw IncompatibleClassChangeError. linkResolver checks
3196 // this too but that's only if the entry isn't already resolved, so we
3197 // need to check again.
3198 __ br_notnull_short( Rtemp, Assembler::pt, ok);
3199 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3200 __ should_not_reach_here();
3201 __ bind(ok);
3202 }
3204 __ cmp(Rinterface, Rtemp);
3205 __ brx(Assembler::notEqual, true, Assembler::pn, search);
3206 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch);
3208 // entry found and Rscratch points to it
3209 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch);
3211 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below");
3212 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8;
3213 __ add(Rscratch, Rindex, Rscratch);
3214 __ ld_ptr(O2_Klass, Rscratch, G5_method);
3216 // Check for abstract method error.
3217 {
3218 Label ok;
3219 __ br_notnull_short(G5_method, Assembler::pt, ok);
3220 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3221 __ should_not_reach_here();
3222 __ bind(ok);
3223 }
3225 Register Rcall = Rinterface;
3226 assert_different_registers(Rcall, G5_method, Gargs, Rret);
3228 __ call_from_interpreter(Rcall, Gargs, Rret);
3229 }
3231 void TemplateTable::invokehandle(int byte_no) {
3232 transition(vtos, vtos);
3233 assert(byte_no == f1_byte, "use this argument");
3235 if (!EnableInvokeDynamic) {
3236 // rewriter does not generate this bytecode
3237 __ should_not_reach_here();
3238 return;
3239 }
3241 const Register Rret = Lscratch;
3242 const Register G4_mtype = G4_scratch;
3243 const Register O0_recv = O0;
3244 const Register Rscratch = G3_scratch;
3246 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv);
3247 __ null_check(O0_recv);
3249 // G4: MethodType object (from cpool->resolved_references[f1], if necessary)
3250 // G5: MH.invokeExact_MT method (from f2)
3252 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke
3254 // do the call
3255 __ verify_oop(G4_mtype);
3256 __ profile_final_call(O4); // FIXME: profile the LambdaForm also
3257 __ call_from_interpreter(Rscratch, Gargs, Rret);
3258 }
3261 void TemplateTable::invokedynamic(int byte_no) {
3262 transition(vtos, vtos);
3263 assert(byte_no == f1_byte, "use this argument");
3265 if (!EnableInvokeDynamic) {
3266 // We should not encounter this bytecode if !EnableInvokeDynamic.
3267 // The verifier will stop it. However, if we get past the verifier,
3268 // this will stop the thread in a reasonable way, without crashing the JVM.
3269 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3270 InterpreterRuntime::throw_IncompatibleClassChangeError));
3271 // the call_VM checks for exception, so we should never return here.
3272 __ should_not_reach_here();
3273 return;
3274 }
3276 const Register Rret = Lscratch;
3277 const Register G4_callsite = G4_scratch;
3278 const Register Rscratch = G3_scratch;
3280 prepare_invoke(byte_no, G5_method, Rret, G4_callsite);
3282 // G4: CallSite object (from cpool->resolved_references[f1])
3283 // G5: MH.linkToCallSite method (from f2)
3285 // Note: G4_callsite is already pushed by prepare_invoke
3287 // %%% should make a type profile for any invokedynamic that takes a ref argument
3288 // profile this call
3289 __ profile_call(O4);
3291 // do the call
3292 __ verify_oop(G4_callsite);
3293 __ call_from_interpreter(Rscratch, Gargs, Rret);
3294 }
3297 //----------------------------------------------------------------------------------------------------
3298 // Allocation
3300 void TemplateTable::_new() {
3301 transition(vtos, atos);
3303 Label slow_case;
3304 Label done;
3305 Label initialize_header;
3306 Label initialize_object; // including clearing the fields
3308 Register RallocatedObject = Otos_i;
3309 Register RinstanceKlass = O1;
3310 Register Roffset = O3;
3311 Register Rscratch = O4;
3313 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3314 __ get_cpool_and_tags(Rscratch, G3_scratch);
3315 // make sure the class we're about to instantiate has been resolved
3316 // This is done before loading InstanceKlass to be consistent with the order
3317 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3318 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3319 __ ldub(G3_scratch, Roffset, G3_scratch);
3320 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3321 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3322 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3323 // get InstanceKlass
3324 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot
3325 __ add(Roffset, sizeof(ConstantPool), Roffset);
3326 __ ld_ptr(Rscratch, Roffset, RinstanceKlass);
3328 // make sure klass is fully initialized:
3329 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch);
3330 __ cmp(G3_scratch, InstanceKlass::fully_initialized);
3331 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3332 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
3334 // get instance_size in InstanceKlass (already aligned)
3335 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
3337 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
3338 __ btst(Klass::_lh_instance_slow_path_bit, Roffset);
3339 __ br(Assembler::notZero, false, Assembler::pn, slow_case);
3340 __ delayed()->nop();
3342 // allocate the instance
3343 // 1) Try to allocate in the TLAB
3344 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
3345 // 3) if the above fails (or is not applicable), go to a slow case
3346 // (creates a new TLAB, etc.)
3348 const bool allow_shared_alloc =
3349 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3351 if(UseTLAB) {
3352 Register RoldTopValue = RallocatedObject;
3353 Register RtlabWasteLimitValue = G3_scratch;
3354 Register RnewTopValue = G1_scratch;
3355 Register RendValue = Rscratch;
3356 Register RfreeValue = RnewTopValue;
3358 // check if we can allocate in the TLAB
3359 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
3360 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue);
3361 __ add(RoldTopValue, Roffset, RnewTopValue);
3363 // if there is enough space, we do not CAS and do not clear
3364 __ cmp(RnewTopValue, RendValue);
3365 if(ZeroTLAB) {
3366 // the fields have already been cleared
3367 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header);
3368 } else {
3369 // initialize both the header and fields
3370 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object);
3371 }
3372 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3374 if (allow_shared_alloc) {
3375 // Check if tlab should be discarded (refill_waste_limit >= free)
3376 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
3377 __ sub(RendValue, RoldTopValue, RfreeValue);
3378 #ifdef _LP64
3379 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
3380 #else
3381 __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
3382 #endif
3383 __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
3385 // increment waste limit to prevent getting stuck on this slow path
3386 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
3387 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
3388 } else {
3389 // No allocation in the shared eden.
3390 __ ba_short(slow_case);
3391 }
3392 }
3394 // Allocation in the shared Eden
3395 if (allow_shared_alloc) {
3396 Register RoldTopValue = G1_scratch;
3397 Register RtopAddr = G3_scratch;
3398 Register RnewTopValue = RallocatedObject;
3399 Register RendValue = Rscratch;
3401 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
3403 Label retry;
3404 __ bind(retry);
3405 __ set((intptr_t)Universe::heap()->end_addr(), RendValue);
3406 __ ld_ptr(RendValue, 0, RendValue);
3407 __ ld_ptr(RtopAddr, 0, RoldTopValue);
3408 __ add(RoldTopValue, Roffset, RnewTopValue);
3410 // RnewTopValue contains the top address after the new object
3411 // has been allocated.
3412 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
3414 __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
3415 VM_Version::v9_instructions_work() ? NULL :
3416 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3418 // if someone beat us on the allocation, try again, otherwise continue
3419 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
3421 // bump total bytes allocated by this thread
3422 // RoldTopValue and RtopAddr are dead, so can use G1 and G3
3423 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
3424 }
3426 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3427 // clear object fields
3428 __ bind(initialize_object);
3429 __ deccc(Roffset, sizeof(oopDesc));
3430 __ br(Assembler::zero, false, Assembler::pt, initialize_header);
3431 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
3433 // initialize remaining object fields
3434 if (UseBlockZeroing) {
3435 // Use BIS for zeroing
3436 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header);
3437 } else {
3438 Label loop;
3439 __ subcc(Roffset, wordSize, Roffset);
3440 __ bind(loop);
3441 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot
3442 __ st_ptr(G0, G3_scratch, Roffset);
3443 __ br(Assembler::notEqual, false, Assembler::pt, loop);
3444 __ delayed()->subcc(Roffset, wordSize, Roffset);
3445 }
3446 __ ba_short(initialize_header);
3447 }
3449 // slow case
3450 __ bind(slow_case);
3451 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned);
3452 __ get_constant_pool(O1);
3454 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2);
3456 __ ba_short(done);
3458 // Initialize the header: mark, klass
3459 __ bind(initialize_header);
3461 if (UseBiasedLocking) {
3462 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch);
3463 } else {
3464 __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
3465 }
3466 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
3467 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed
3468 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms)
3470 {
3471 SkipIfEqual skip_if(
3472 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero);
3473 // Trigger dtrace event
3474 __ push(atos);
3475 __ call_VM_leaf(noreg,
3476 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0);
3477 __ pop(atos);
3478 }
3480 // continue
3481 __ bind(done);
3482 }
3486 void TemplateTable::newarray() {
3487 transition(itos, atos);
3488 __ ldub(Lbcp, 1, O1);
3489 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i);
3490 }
3493 void TemplateTable::anewarray() {
3494 transition(itos, atos);
3495 __ get_constant_pool(O1);
3496 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned);
3497 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i);
3498 }
3501 void TemplateTable::arraylength() {
3502 transition(atos, itos);
3503 Label ok;
3504 __ verify_oop(Otos_i);
3505 __ tst(Otos_i);
3506 __ throw_if_not_1_x( Assembler::notZero, ok );
3507 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i);
3508 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3509 }
3512 void TemplateTable::checkcast() {
3513 transition(atos, atos);
3514 Label done, is_null, quicked, cast_ok, resolved;
3515 Register Roffset = G1_scratch;
3516 Register RobjKlass = O5;
3517 Register RspecifiedKlass = O4;
3519 // Check for casting a NULL
3520 __ br_null_short(Otos_i, Assembler::pn, is_null);
3522 // Get value klass in RobjKlass
3523 __ load_klass(Otos_i, RobjKlass); // get value klass
3525 // Get constant pool tag
3526 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3528 // See if the checkcast has been quickened
3529 __ get_cpool_and_tags(Lscratch, G3_scratch);
3530 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3531 __ ldub(G3_scratch, Roffset, G3_scratch);
3532 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3533 __ br(Assembler::equal, true, Assembler::pt, quicked);
3534 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3536 __ push_ptr(); // save receiver for result, and for GC
3537 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3538 __ get_vm_result_2(RspecifiedKlass);
3539 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3541 __ ba_short(resolved);
3543 // Extract target class from constant pool
3544 __ bind(quicked);
3545 __ add(Roffset, sizeof(ConstantPool), Roffset);
3546 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3547 __ bind(resolved);
3548 __ load_klass(Otos_i, RobjKlass); // get value klass
3550 // Generate a fast subtype check. Branch to cast_ok if no
3551 // failure. Throw exception if failure.
3552 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok );
3554 // Not a subtype; so must throw exception
3555 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch );
3557 __ bind(cast_ok);
3559 if (ProfileInterpreter) {
3560 __ ba_short(done);
3561 }
3562 __ bind(is_null);
3563 __ profile_null_seen(G3_scratch);
3564 __ bind(done);
3565 }
3568 void TemplateTable::instanceof() {
3569 Label done, is_null, quicked, resolved;
3570 transition(atos, itos);
3571 Register Roffset = G1_scratch;
3572 Register RobjKlass = O5;
3573 Register RspecifiedKlass = O4;
3575 // Check for casting a NULL
3576 __ br_null_short(Otos_i, Assembler::pt, is_null);
3578 // Get value klass in RobjKlass
3579 __ load_klass(Otos_i, RobjKlass); // get value klass
3581 // Get constant pool tag
3582 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3584 // See if the checkcast has been quickened
3585 __ get_cpool_and_tags(Lscratch, G3_scratch);
3586 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3587 __ ldub(G3_scratch, Roffset, G3_scratch);
3588 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3589 __ br(Assembler::equal, true, Assembler::pt, quicked);
3590 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3592 __ push_ptr(); // save receiver for result, and for GC
3593 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3594 __ get_vm_result_2(RspecifiedKlass);
3595 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3597 __ ba_short(resolved);
3599 // Extract target class from constant pool
3600 __ bind(quicked);
3601 __ add(Roffset, sizeof(ConstantPool), Roffset);
3602 __ get_constant_pool(Lscratch);
3603 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3604 __ bind(resolved);
3605 __ load_klass(Otos_i, RobjKlass); // get value klass
3607 // Generate a fast subtype check. Branch to cast_ok if no
3608 // failure. Return 0 if failure.
3609 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed
3610 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done );
3611 // Not a subtype; return 0;
3612 __ clr( Otos_i );
3614 if (ProfileInterpreter) {
3615 __ ba_short(done);
3616 }
3617 __ bind(is_null);
3618 __ profile_null_seen(G3_scratch);
3619 __ bind(done);
3620 }
3622 void TemplateTable::_breakpoint() {
3624 // Note: We get here even if we are single stepping..
3625 // jbug inists on setting breakpoints at every bytecode
3626 // even if we are in single step mode.
3628 transition(vtos, vtos);
3629 // get the unpatched byte code
3630 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp);
3631 __ mov(O0, Lbyte_code);
3633 // post the breakpoint event
3634 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp);
3636 // complete the execution of original bytecode
3637 __ dispatch_normal(vtos);
3638 }
3641 //----------------------------------------------------------------------------------------------------
3642 // Exceptions
3644 void TemplateTable::athrow() {
3645 transition(atos, vtos);
3647 // This works because exception is cached in Otos_i which is same as O0,
3648 // which is same as what throw_exception_entry_expects
3649 assert(Otos_i == Oexception, "see explanation above");
3651 __ verify_oop(Otos_i);
3652 __ null_check(Otos_i);
3653 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch);
3654 }
3657 //----------------------------------------------------------------------------------------------------
3658 // Synchronization
3661 // See frame_sparc.hpp for monitor block layout.
3662 // Monitor elements are dynamically allocated by growing stack as needed.
3664 void TemplateTable::monitorenter() {
3665 transition(atos, vtos);
3666 __ verify_oop(Otos_i);
3667 // Try to acquire a lock on the object
3668 // Repeat until succeeded (i.e., until
3669 // monitorenter returns true).
3671 { Label ok;
3672 __ tst(Otos_i);
3673 __ throw_if_not_1_x( Assembler::notZero, ok);
3674 __ delayed()->mov(Otos_i, Lscratch); // save obj
3675 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3676 }
3678 assert(O0 == Otos_i, "Be sure where the object to lock is");
3680 // find a free slot in the monitor block
3683 // initialize entry pointer
3684 __ clr(O1); // points to free slot or NULL
3686 {
3687 Label entry, loop, exit;
3688 __ add( __ top_most_monitor(), O2 ); // last one to check
3689 __ ba( entry );
3690 __ delayed()->mov( Lmonitors, O3 ); // first one to check
3693 __ bind( loop );
3695 __ verify_oop(O4); // verify each monitor's oop
3696 __ tst(O4); // is this entry unused?
3697 if (VM_Version::v9_instructions_work())
3698 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
3699 else {
3700 Label L;
3701 __ br( Assembler::zero, true, Assembler::pn, L );
3702 __ delayed()->mov(O3, O1); // rememeber this one if match
3703 __ bind(L);
3704 }
3706 __ cmp(O4, O0); // check if current entry is for same object
3707 __ brx( Assembler::equal, false, Assembler::pn, exit );
3708 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one
3710 __ bind( entry );
3712 __ cmp( O3, O2 );
3713 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3714 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4);
3716 __ bind( exit );
3717 }
3719 { Label allocated;
3721 // found free slot?
3722 __ br_notnull_short(O1, Assembler::pn, allocated);
3724 __ add_monitor_to_stack( false, O2, O3 );
3725 __ mov(Lmonitors, O1);
3727 __ bind(allocated);
3728 }
3730 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3731 // The object has already been poped from the stack, so the expression stack looks correct.
3732 __ inc(Lbcp);
3734 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object
3735 __ lock_object(O1, O0);
3737 // check if there's enough space on the stack for the monitors after locking
3738 __ generate_stack_overflow_check(0);
3740 // The bcp has already been incremented. Just need to dispatch to next instruction.
3741 __ dispatch_next(vtos);
3742 }
3745 void TemplateTable::monitorexit() {
3746 transition(atos, vtos);
3747 __ verify_oop(Otos_i);
3748 __ tst(Otos_i);
3749 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch );
3751 assert(O0 == Otos_i, "just checking");
3753 { Label entry, loop, found;
3754 __ add( __ top_most_monitor(), O2 ); // last one to check
3755 __ ba(entry);
3756 // use Lscratch to hold monitor elem to check, start with most recent monitor,
3757 // By using a local it survives the call to the C routine.
3758 __ delayed()->mov( Lmonitors, Lscratch );
3760 __ bind( loop );
3762 __ verify_oop(O4); // verify each monitor's oop
3763 __ cmp(O4, O0); // check if current entry is for desired object
3764 __ brx( Assembler::equal, true, Assembler::pt, found );
3765 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit
3767 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next
3769 __ bind( entry );
3771 __ cmp( Lscratch, O2 );
3772 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3773 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4);
3775 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3776 __ should_not_reach_here();
3778 __ bind(found);
3779 }
3780 __ unlock_object(O1);
3781 }
3784 //----------------------------------------------------------------------------------------------------
3785 // Wide instructions
3787 void TemplateTable::wide() {
3788 transition(vtos, vtos);
3789 __ ldub(Lbcp, 1, G3_scratch);// get next bc
3790 __ sll(G3_scratch, LogBytesPerWord, G3_scratch);
3791 AddressLiteral ep(Interpreter::_wentry_point);
3792 __ set(ep, G4_scratch);
3793 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch);
3794 __ jmp(G3_scratch, G0);
3795 __ delayed()->nop();
3796 // Note: the Lbcp increment step is part of the individual wide bytecode implementations
3797 }
3800 //----------------------------------------------------------------------------------------------------
3801 // Multi arrays
3803 void TemplateTable::multianewarray() {
3804 transition(vtos, atos);
3805 // put ndims * wordSize into Lscratch
3806 __ ldub( Lbcp, 3, Lscratch);
3807 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch);
3808 // Lesp points past last_dim, so set to O1 to first_dim address
3809 __ add( Lesp, Lscratch, O1);
3810 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
3811 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack
3812 }
3813 #endif /* !CC_INTERP */