Mon, 26 Sep 2011 10:24:05 -0700
7081933: Use zeroing elimination optimization for large array
Summary: Don't zero new typeArray during runtime call if the allocation is followed by arraycopy into it.
Reviewed-by: twisti
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "interpreter/interpreter.hpp"
27 #include "interpreter/interpreterRuntime.hpp"
28 #include "interpreter/templateTable.hpp"
29 #include "memory/universe.inline.hpp"
30 #include "oops/methodDataOop.hpp"
31 #include "oops/objArrayKlass.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/stubRoutines.hpp"
36 #include "runtime/synchronizer.hpp"
38 #ifndef CC_INTERP
39 #define __ _masm->
41 // Misc helpers
43 // Do an oop store like *(base + index + offset) = val
44 // index can be noreg,
45 static void do_oop_store(InterpreterMacroAssembler* _masm,
46 Register base,
47 Register index,
48 int offset,
49 Register val,
50 Register tmp,
51 BarrierSet::Name barrier,
52 bool precise) {
53 assert(tmp != val && tmp != base && tmp != index, "register collision");
54 assert(index == noreg || offset == 0, "only one offset");
55 switch (barrier) {
56 #ifndef SERIALGC
57 case BarrierSet::G1SATBCT:
58 case BarrierSet::G1SATBCTLogging:
59 {
60 // Load and record the previous value.
61 __ g1_write_barrier_pre(base, index, offset,
62 noreg /* pre_val */,
63 tmp, true /*preserve_o_regs*/);
65 if (index == noreg ) {
66 assert(Assembler::is_simm13(offset), "fix this code");
67 __ store_heap_oop(val, base, offset);
68 } else {
69 __ store_heap_oop(val, base, index);
70 }
72 // No need for post barrier if storing NULL
73 if (val != G0) {
74 if (precise) {
75 if (index == noreg) {
76 __ add(base, offset, base);
77 } else {
78 __ add(base, index, base);
79 }
80 }
81 __ g1_write_barrier_post(base, val, tmp);
82 }
83 }
84 break;
85 #endif // SERIALGC
86 case BarrierSet::CardTableModRef:
87 case BarrierSet::CardTableExtension:
88 {
89 if (index == noreg ) {
90 assert(Assembler::is_simm13(offset), "fix this code");
91 __ store_heap_oop(val, base, offset);
92 } else {
93 __ store_heap_oop(val, base, index);
94 }
95 // No need for post barrier if storing NULL
96 if (val != G0) {
97 if (precise) {
98 if (index == noreg) {
99 __ add(base, offset, base);
100 } else {
101 __ add(base, index, base);
102 }
103 }
104 __ card_write_barrier_post(base, val, tmp);
105 }
106 }
107 break;
108 case BarrierSet::ModRef:
109 case BarrierSet::Other:
110 ShouldNotReachHere();
111 break;
112 default :
113 ShouldNotReachHere();
115 }
116 }
119 //----------------------------------------------------------------------------------------------------
120 // Platform-dependent initialization
122 void TemplateTable::pd_initialize() {
123 // (none)
124 }
127 //----------------------------------------------------------------------------------------------------
128 // Condition conversion
129 Assembler::Condition ccNot(TemplateTable::Condition cc) {
130 switch (cc) {
131 case TemplateTable::equal : return Assembler::notEqual;
132 case TemplateTable::not_equal : return Assembler::equal;
133 case TemplateTable::less : return Assembler::greaterEqual;
134 case TemplateTable::less_equal : return Assembler::greater;
135 case TemplateTable::greater : return Assembler::lessEqual;
136 case TemplateTable::greater_equal: return Assembler::less;
137 }
138 ShouldNotReachHere();
139 return Assembler::zero;
140 }
142 //----------------------------------------------------------------------------------------------------
143 // Miscelaneous helper routines
146 Address TemplateTable::at_bcp(int offset) {
147 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
148 return Address(Lbcp, offset);
149 }
152 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
153 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
154 int byte_no) {
155 // With sharing on, may need to test methodOop flag.
156 if (!RewriteBytecodes) return;
157 Label L_patch_done;
159 switch (bc) {
160 case Bytecodes::_fast_aputfield:
161 case Bytecodes::_fast_bputfield:
162 case Bytecodes::_fast_cputfield:
163 case Bytecodes::_fast_dputfield:
164 case Bytecodes::_fast_fputfield:
165 case Bytecodes::_fast_iputfield:
166 case Bytecodes::_fast_lputfield:
167 case Bytecodes::_fast_sputfield:
168 {
169 // We skip bytecode quickening for putfield instructions when
170 // the put_code written to the constant pool cache is zero.
171 // This is required so that every execution of this instruction
172 // calls out to InterpreterRuntime::resolve_get_put to do
173 // additional, required work.
174 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
175 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
176 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
177 __ set(bc, bc_reg);
178 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch
179 }
180 break;
181 default:
182 assert(byte_no == -1, "sanity");
183 if (load_bc_into_bc_reg) {
184 __ set(bc, bc_reg);
185 }
186 }
188 if (JvmtiExport::can_post_breakpoint()) {
189 Label L_fast_patch;
190 __ ldub(at_bcp(0), temp_reg);
191 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch);
192 // perform the quickening, slowly, in the bowels of the breakpoint table
193 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg);
194 __ ba_short(L_patch_done);
195 __ bind(L_fast_patch);
196 }
198 #ifdef ASSERT
199 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc);
200 Label L_okay;
201 __ ldub(at_bcp(0), temp_reg);
202 __ cmp(temp_reg, orig_bytecode);
203 __ br(Assembler::equal, false, Assembler::pt, L_okay);
204 __ delayed()->cmp(temp_reg, bc_reg);
205 __ br(Assembler::equal, false, Assembler::pt, L_okay);
206 __ delayed()->nop();
207 __ stop("patching the wrong bytecode");
208 __ bind(L_okay);
209 #endif
211 // patch bytecode
212 __ stb(bc_reg, at_bcp(0));
213 __ bind(L_patch_done);
214 }
216 //----------------------------------------------------------------------------------------------------
217 // Individual instructions
219 void TemplateTable::nop() {
220 transition(vtos, vtos);
221 // nothing to do
222 }
224 void TemplateTable::shouldnotreachhere() {
225 transition(vtos, vtos);
226 __ stop("shouldnotreachhere bytecode");
227 }
229 void TemplateTable::aconst_null() {
230 transition(vtos, atos);
231 __ clr(Otos_i);
232 }
235 void TemplateTable::iconst(int value) {
236 transition(vtos, itos);
237 __ set(value, Otos_i);
238 }
241 void TemplateTable::lconst(int value) {
242 transition(vtos, ltos);
243 assert(value >= 0, "check this code");
244 #ifdef _LP64
245 __ set(value, Otos_l);
246 #else
247 __ set(value, Otos_l2);
248 __ clr( Otos_l1);
249 #endif
250 }
253 void TemplateTable::fconst(int value) {
254 transition(vtos, ftos);
255 static float zero = 0.0, one = 1.0, two = 2.0;
256 float* p;
257 switch( value ) {
258 default: ShouldNotReachHere();
259 case 0: p = &zero; break;
260 case 1: p = &one; break;
261 case 2: p = &two; break;
262 }
263 AddressLiteral a(p);
264 __ sethi(a, G3_scratch);
265 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f);
266 }
269 void TemplateTable::dconst(int value) {
270 transition(vtos, dtos);
271 static double zero = 0.0, one = 1.0;
272 double* p;
273 switch( value ) {
274 default: ShouldNotReachHere();
275 case 0: p = &zero; break;
276 case 1: p = &one; break;
277 }
278 AddressLiteral a(p);
279 __ sethi(a, G3_scratch);
280 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d);
281 }
284 // %%%%% Should factore most snippet templates across platforms
286 void TemplateTable::bipush() {
287 transition(vtos, itos);
288 __ ldsb( at_bcp(1), Otos_i );
289 }
291 void TemplateTable::sipush() {
292 transition(vtos, itos);
293 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed);
294 }
296 void TemplateTable::ldc(bool wide) {
297 transition(vtos, vtos);
298 Label call_ldc, notInt, isString, notString, notClass, exit;
300 if (wide) {
301 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
302 } else {
303 __ ldub(Lbcp, 1, O1);
304 }
305 __ get_cpool_and_tags(O0, O2);
307 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
308 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
310 // get type from tags
311 __ add(O2, tags_offset, O2);
312 __ ldub(O2, O1, O2);
313 // unresolved string? If so, must resolve
314 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedString, Assembler::equal, Assembler::pt, call_ldc);
316 // unresolved class? If so, must resolve
317 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc);
319 // unresolved class in error state
320 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc);
322 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class
323 __ brx(Assembler::notEqual, true, Assembler::pt, notClass);
324 __ delayed()->add(O0, base_offset, O0);
326 __ bind(call_ldc);
327 __ set(wide, O1);
328 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1);
329 __ push(atos);
330 __ ba_short(exit);
332 __ bind(notClass);
333 // __ add(O0, base_offset, O0);
334 __ sll(O1, LogBytesPerWord, O1);
335 __ cmp(O2, JVM_CONSTANT_Integer);
336 __ brx(Assembler::notEqual, true, Assembler::pt, notInt);
337 __ delayed()->cmp(O2, JVM_CONSTANT_String);
338 __ ld(O0, O1, Otos_i);
339 __ push(itos);
340 __ ba_short(exit);
342 __ bind(notInt);
343 // __ cmp(O2, JVM_CONSTANT_String);
344 __ brx(Assembler::equal, true, Assembler::pt, isString);
345 __ delayed()->cmp(O2, JVM_CONSTANT_Object);
346 __ brx(Assembler::notEqual, true, Assembler::pt, notString);
347 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
348 __ bind(isString);
349 __ ld_ptr(O0, O1, Otos_i);
350 __ verify_oop(Otos_i);
351 __ push(atos);
352 __ ba_short(exit);
354 __ bind(notString);
355 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
356 __ push(ftos);
358 __ bind(exit);
359 }
361 // Fast path for caching oop constants.
362 // %%% We should use this to handle Class and String constants also.
363 // %%% It will simplify the ldc/primitive path considerably.
364 void TemplateTable::fast_aldc(bool wide) {
365 transition(vtos, atos);
367 if (!EnableInvokeDynamic) {
368 // We should not encounter this bytecode if !EnableInvokeDynamic.
369 // The verifier will stop it. However, if we get past the verifier,
370 // this will stop the thread in a reasonable way, without crashing the JVM.
371 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
372 InterpreterRuntime::throw_IncompatibleClassChangeError));
373 // the call_VM checks for exception, so we should never return here.
374 __ should_not_reach_here();
375 return;
376 }
378 Register Rcache = G3_scratch;
379 Register Rscratch = G4_scratch;
381 resolve_cache_and_index(f1_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1));
383 __ verify_oop(Otos_i);
385 Label L_done;
386 const Register Rcon_klass = G3_scratch; // same as Rcache
387 const Register Rarray_klass = G4_scratch; // same as Rscratch
388 __ load_klass(Otos_i, Rcon_klass);
389 AddressLiteral array_klass_addr((address)Universe::systemObjArrayKlassObj_addr());
390 __ load_contents(array_klass_addr, Rarray_klass);
391 __ cmp_and_brx_short(Rarray_klass, Rcon_klass, Assembler::notEqual, Assembler::pt, L_done);
392 __ ld(Address(Otos_i, arrayOopDesc::length_offset_in_bytes()), Rcon_klass);
393 __ tst(Rcon_klass);
394 __ brx(Assembler::zero, true, Assembler::pt, L_done);
395 __ delayed()->clr(Otos_i); // executed only if branch is taken
397 // Load the exception from the system-array which wraps it:
398 __ load_heap_oop(Otos_i, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
399 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch);
401 __ bind(L_done);
402 }
404 void TemplateTable::ldc2_w() {
405 transition(vtos, vtos);
406 Label retry, resolved, Long, exit;
408 __ bind(retry);
409 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
410 __ get_cpool_and_tags(O0, O2);
412 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
413 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
414 // get type from tags
415 __ add(O2, tags_offset, O2);
416 __ ldub(O2, O1, O2);
418 __ sll(O1, LogBytesPerWord, O1);
419 __ add(O0, O1, G3_scratch);
421 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long);
422 // A double can be placed at word-aligned locations in the constant pool.
423 // Check out Conversions.java for an example.
424 // Also constantPoolOopDesc::header_size() is 20, which makes it very difficult
425 // to double-align double on the constant pool. SG, 11/7/97
426 #ifdef _LP64
427 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
428 #else
429 FloatRegister f = Ftos_d;
430 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
431 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
432 f->successor());
433 #endif
434 __ push(dtos);
435 __ ba_short(exit);
437 __ bind(Long);
438 #ifdef _LP64
439 __ ldx(G3_scratch, base_offset, Otos_l);
440 #else
441 __ ld(G3_scratch, base_offset, Otos_l);
442 __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
443 #endif
444 __ push(ltos);
446 __ bind(exit);
447 }
450 void TemplateTable::locals_index(Register reg, int offset) {
451 __ ldub( at_bcp(offset), reg );
452 }
455 void TemplateTable::locals_index_wide(Register reg) {
456 // offset is 2, not 1, because Lbcp points to wide prefix code
457 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
458 }
460 void TemplateTable::iload() {
461 transition(vtos, itos);
462 // Rewrite iload,iload pair into fast_iload2
463 // iload,caload pair into fast_icaload
464 if (RewriteFrequentPairs) {
465 Label rewrite, done;
467 // get next byte
468 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch);
470 // if _iload, wait to rewrite to iload2. We only want to rewrite the
471 // last two iloads in a pair. Comparing against fast_iload means that
472 // the next bytecode is neither an iload or a caload, and therefore
473 // an iload pair.
474 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done);
476 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload);
477 __ br(Assembler::equal, false, Assembler::pn, rewrite);
478 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch);
480 __ cmp(G3_scratch, (int)Bytecodes::_caload);
481 __ br(Assembler::equal, false, Assembler::pn, rewrite);
482 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch);
484 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again
485 // rewrite
486 // G4_scratch: fast bytecode
487 __ bind(rewrite);
488 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false);
489 __ bind(done);
490 }
492 // Get the local value into tos
493 locals_index(G3_scratch);
494 __ access_local_int( G3_scratch, Otos_i );
495 }
497 void TemplateTable::fast_iload2() {
498 transition(vtos, itos);
499 locals_index(G3_scratch);
500 __ access_local_int( G3_scratch, Otos_i );
501 __ push_i();
502 locals_index(G3_scratch, 3); // get next bytecode's local index.
503 __ access_local_int( G3_scratch, Otos_i );
504 }
506 void TemplateTable::fast_iload() {
507 transition(vtos, itos);
508 locals_index(G3_scratch);
509 __ access_local_int( G3_scratch, Otos_i );
510 }
512 void TemplateTable::lload() {
513 transition(vtos, ltos);
514 locals_index(G3_scratch);
515 __ access_local_long( G3_scratch, Otos_l );
516 }
519 void TemplateTable::fload() {
520 transition(vtos, ftos);
521 locals_index(G3_scratch);
522 __ access_local_float( G3_scratch, Ftos_f );
523 }
526 void TemplateTable::dload() {
527 transition(vtos, dtos);
528 locals_index(G3_scratch);
529 __ access_local_double( G3_scratch, Ftos_d );
530 }
533 void TemplateTable::aload() {
534 transition(vtos, atos);
535 locals_index(G3_scratch);
536 __ access_local_ptr( G3_scratch, Otos_i);
537 }
540 void TemplateTable::wide_iload() {
541 transition(vtos, itos);
542 locals_index_wide(G3_scratch);
543 __ access_local_int( G3_scratch, Otos_i );
544 }
547 void TemplateTable::wide_lload() {
548 transition(vtos, ltos);
549 locals_index_wide(G3_scratch);
550 __ access_local_long( G3_scratch, Otos_l );
551 }
554 void TemplateTable::wide_fload() {
555 transition(vtos, ftos);
556 locals_index_wide(G3_scratch);
557 __ access_local_float( G3_scratch, Ftos_f );
558 }
561 void TemplateTable::wide_dload() {
562 transition(vtos, dtos);
563 locals_index_wide(G3_scratch);
564 __ access_local_double( G3_scratch, Ftos_d );
565 }
568 void TemplateTable::wide_aload() {
569 transition(vtos, atos);
570 locals_index_wide(G3_scratch);
571 __ access_local_ptr( G3_scratch, Otos_i );
572 __ verify_oop(Otos_i);
573 }
576 void TemplateTable::iaload() {
577 transition(itos, itos);
578 // Otos_i: index
579 // tos: array
580 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
581 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i);
582 }
585 void TemplateTable::laload() {
586 transition(itos, ltos);
587 // Otos_i: index
588 // O2: array
589 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
590 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l);
591 }
594 void TemplateTable::faload() {
595 transition(itos, ftos);
596 // Otos_i: index
597 // O2: array
598 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
599 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f);
600 }
603 void TemplateTable::daload() {
604 transition(itos, dtos);
605 // Otos_i: index
606 // O2: array
607 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
608 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d);
609 }
612 void TemplateTable::aaload() {
613 transition(itos, atos);
614 // Otos_i: index
615 // tos: array
616 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3);
617 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
618 __ verify_oop(Otos_i);
619 }
622 void TemplateTable::baload() {
623 transition(itos, itos);
624 // Otos_i: index
625 // tos: array
626 __ index_check(O2, Otos_i, 0, G3_scratch, O3);
627 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i);
628 }
631 void TemplateTable::caload() {
632 transition(itos, itos);
633 // Otos_i: index
634 // tos: array
635 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
636 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
637 }
639 void TemplateTable::fast_icaload() {
640 transition(vtos, itos);
641 // Otos_i: index
642 // tos: array
643 locals_index(G3_scratch);
644 __ access_local_int( G3_scratch, Otos_i );
645 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
646 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
647 }
650 void TemplateTable::saload() {
651 transition(itos, itos);
652 // Otos_i: index
653 // tos: array
654 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
655 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i);
656 }
659 void TemplateTable::iload(int n) {
660 transition(vtos, itos);
661 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
662 }
665 void TemplateTable::lload(int n) {
666 transition(vtos, ltos);
667 assert(n+1 < Argument::n_register_parameters, "would need more code");
668 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
669 }
672 void TemplateTable::fload(int n) {
673 transition(vtos, ftos);
674 assert(n < Argument::n_register_parameters, "would need more code");
675 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f );
676 }
679 void TemplateTable::dload(int n) {
680 transition(vtos, dtos);
681 FloatRegister dst = Ftos_d;
682 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
683 }
686 void TemplateTable::aload(int n) {
687 transition(vtos, atos);
688 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
689 }
692 void TemplateTable::aload_0() {
693 transition(vtos, atos);
695 // According to bytecode histograms, the pairs:
696 //
697 // _aload_0, _fast_igetfield (itos)
698 // _aload_0, _fast_agetfield (atos)
699 // _aload_0, _fast_fgetfield (ftos)
700 //
701 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
702 // bytecode checks the next bytecode and then rewrites the current
703 // bytecode into a pair bytecode; otherwise it rewrites the current
704 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
705 //
706 if (RewriteFrequentPairs) {
707 Label rewrite, done;
709 // get next byte
710 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch);
712 // do actual aload_0
713 aload(0);
715 // if _getfield then wait with rewrite
716 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done);
718 // if _igetfield then rewrite to _fast_iaccess_0
719 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
720 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield);
721 __ br(Assembler::equal, false, Assembler::pn, rewrite);
722 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch);
724 // if _agetfield then rewrite to _fast_aaccess_0
725 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
726 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield);
727 __ br(Assembler::equal, false, Assembler::pn, rewrite);
728 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch);
730 // if _fgetfield then rewrite to _fast_faccess_0
731 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
732 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield);
733 __ br(Assembler::equal, false, Assembler::pn, rewrite);
734 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch);
736 // else rewrite to _fast_aload0
737 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
738 __ set(Bytecodes::_fast_aload_0, G4_scratch);
740 // rewrite
741 // G4_scratch: fast bytecode
742 __ bind(rewrite);
743 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false);
744 __ bind(done);
745 } else {
746 aload(0);
747 }
748 }
751 void TemplateTable::istore() {
752 transition(itos, vtos);
753 locals_index(G3_scratch);
754 __ store_local_int( G3_scratch, Otos_i );
755 }
758 void TemplateTable::lstore() {
759 transition(ltos, vtos);
760 locals_index(G3_scratch);
761 __ store_local_long( G3_scratch, Otos_l );
762 }
765 void TemplateTable::fstore() {
766 transition(ftos, vtos);
767 locals_index(G3_scratch);
768 __ store_local_float( G3_scratch, Ftos_f );
769 }
772 void TemplateTable::dstore() {
773 transition(dtos, vtos);
774 locals_index(G3_scratch);
775 __ store_local_double( G3_scratch, Ftos_d );
776 }
779 void TemplateTable::astore() {
780 transition(vtos, vtos);
781 __ load_ptr(0, Otos_i);
782 __ inc(Lesp, Interpreter::stackElementSize);
783 __ verify_oop_or_return_address(Otos_i, G3_scratch);
784 locals_index(G3_scratch);
785 __ store_local_ptr(G3_scratch, Otos_i);
786 }
789 void TemplateTable::wide_istore() {
790 transition(vtos, vtos);
791 __ pop_i();
792 locals_index_wide(G3_scratch);
793 __ store_local_int( G3_scratch, Otos_i );
794 }
797 void TemplateTable::wide_lstore() {
798 transition(vtos, vtos);
799 __ pop_l();
800 locals_index_wide(G3_scratch);
801 __ store_local_long( G3_scratch, Otos_l );
802 }
805 void TemplateTable::wide_fstore() {
806 transition(vtos, vtos);
807 __ pop_f();
808 locals_index_wide(G3_scratch);
809 __ store_local_float( G3_scratch, Ftos_f );
810 }
813 void TemplateTable::wide_dstore() {
814 transition(vtos, vtos);
815 __ pop_d();
816 locals_index_wide(G3_scratch);
817 __ store_local_double( G3_scratch, Ftos_d );
818 }
821 void TemplateTable::wide_astore() {
822 transition(vtos, vtos);
823 __ load_ptr(0, Otos_i);
824 __ inc(Lesp, Interpreter::stackElementSize);
825 __ verify_oop_or_return_address(Otos_i, G3_scratch);
826 locals_index_wide(G3_scratch);
827 __ store_local_ptr(G3_scratch, Otos_i);
828 }
831 void TemplateTable::iastore() {
832 transition(itos, vtos);
833 __ pop_i(O2); // index
834 // Otos_i: val
835 // O3: array
836 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
837 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT));
838 }
841 void TemplateTable::lastore() {
842 transition(ltos, vtos);
843 __ pop_i(O2); // index
844 // Otos_l: val
845 // O3: array
846 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
847 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG));
848 }
851 void TemplateTable::fastore() {
852 transition(ftos, vtos);
853 __ pop_i(O2); // index
854 // Ftos_f: val
855 // O3: array
856 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
857 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
858 }
861 void TemplateTable::dastore() {
862 transition(dtos, vtos);
863 __ pop_i(O2); // index
864 // Fos_d: val
865 // O3: array
866 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
867 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
868 }
871 void TemplateTable::aastore() {
872 Label store_ok, is_null, done;
873 transition(vtos, vtos);
874 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
875 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index
876 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array
877 // Otos_i: val
878 // O2: index
879 // O3: array
880 __ verify_oop(Otos_i);
881 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
883 // do array store check - check for NULL value first
884 __ br_null_short( Otos_i, Assembler::pn, is_null );
886 __ load_klass(O3, O4); // get array klass
887 __ load_klass(Otos_i, O5); // get value klass
889 // do fast instanceof cache test
891 __ ld_ptr(O4, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes(), O4);
893 assert(Otos_i == O0, "just checking");
895 // Otos_i: value
896 // O1: addr - offset
897 // O2: index
898 // O3: array
899 // O4: array element klass
900 // O5: value klass
902 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
904 // Generate a fast subtype check. Branch to store_ok if no
905 // failure. Throw if failure.
906 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok );
908 // Not a subtype; so must throw exception
909 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch );
911 // Store is OK.
912 __ bind(store_ok);
913 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
915 __ ba(done);
916 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
918 __ bind(is_null);
919 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true);
921 __ profile_null_seen(G3_scratch);
922 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
923 __ bind(done);
924 }
927 void TemplateTable::bastore() {
928 transition(itos, vtos);
929 __ pop_i(O2); // index
930 // Otos_i: val
931 // O3: array
932 __ index_check(O3, O2, 0, G3_scratch, O2);
933 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE));
934 }
937 void TemplateTable::castore() {
938 transition(itos, vtos);
939 __ pop_i(O2); // index
940 // Otos_i: val
941 // O3: array
942 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2);
943 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
944 }
947 void TemplateTable::sastore() {
948 // %%%%% Factor across platform
949 castore();
950 }
953 void TemplateTable::istore(int n) {
954 transition(itos, vtos);
955 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
956 }
959 void TemplateTable::lstore(int n) {
960 transition(ltos, vtos);
961 assert(n+1 < Argument::n_register_parameters, "only handle register cases");
962 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
964 }
967 void TemplateTable::fstore(int n) {
968 transition(ftos, vtos);
969 assert(n < Argument::n_register_parameters, "only handle register cases");
970 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
971 }
974 void TemplateTable::dstore(int n) {
975 transition(dtos, vtos);
976 FloatRegister src = Ftos_d;
977 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
978 }
981 void TemplateTable::astore(int n) {
982 transition(vtos, vtos);
983 __ load_ptr(0, Otos_i);
984 __ inc(Lesp, Interpreter::stackElementSize);
985 __ verify_oop_or_return_address(Otos_i, G3_scratch);
986 __ store_local_ptr(n, Otos_i);
987 }
990 void TemplateTable::pop() {
991 transition(vtos, vtos);
992 __ inc(Lesp, Interpreter::stackElementSize);
993 }
996 void TemplateTable::pop2() {
997 transition(vtos, vtos);
998 __ inc(Lesp, 2 * Interpreter::stackElementSize);
999 }
1002 void TemplateTable::dup() {
1003 transition(vtos, vtos);
1004 // stack: ..., a
1005 // load a and tag
1006 __ load_ptr(0, Otos_i);
1007 __ push_ptr(Otos_i);
1008 // stack: ..., a, a
1009 }
1012 void TemplateTable::dup_x1() {
1013 transition(vtos, vtos);
1014 // stack: ..., a, b
1015 __ load_ptr( 1, G3_scratch); // get a
1016 __ load_ptr( 0, Otos_l1); // get b
1017 __ store_ptr(1, Otos_l1); // put b
1018 __ store_ptr(0, G3_scratch); // put a - like swap
1019 __ push_ptr(Otos_l1); // push b
1020 // stack: ..., b, a, b
1021 }
1024 void TemplateTable::dup_x2() {
1025 transition(vtos, vtos);
1026 // stack: ..., a, b, c
1027 // get c and push on stack, reuse registers
1028 __ load_ptr( 0, G3_scratch); // get c
1029 __ push_ptr(G3_scratch); // push c with tag
1030 // stack: ..., a, b, c, c (c in reg) (Lesp - 4)
1031 // (stack offsets n+1 now)
1032 __ load_ptr( 3, Otos_l1); // get a
1033 __ store_ptr(3, G3_scratch); // put c at 3
1034 // stack: ..., c, b, c, c (a in reg)
1035 __ load_ptr( 2, G3_scratch); // get b
1036 __ store_ptr(2, Otos_l1); // put a at 2
1037 // stack: ..., c, a, c, c (b in reg)
1038 __ store_ptr(1, G3_scratch); // put b at 1
1039 // stack: ..., c, a, b, c
1040 }
1043 void TemplateTable::dup2() {
1044 transition(vtos, vtos);
1045 __ load_ptr(1, G3_scratch); // get a
1046 __ load_ptr(0, Otos_l1); // get b
1047 __ push_ptr(G3_scratch); // push a
1048 __ push_ptr(Otos_l1); // push b
1049 // stack: ..., a, b, a, b
1050 }
1053 void TemplateTable::dup2_x1() {
1054 transition(vtos, vtos);
1055 // stack: ..., a, b, c
1056 __ load_ptr( 1, Lscratch); // get b
1057 __ load_ptr( 2, Otos_l1); // get a
1058 __ store_ptr(2, Lscratch); // put b at a
1059 // stack: ..., b, b, c
1060 __ load_ptr( 0, G3_scratch); // get c
1061 __ store_ptr(1, G3_scratch); // put c at b
1062 // stack: ..., b, c, c
1063 __ store_ptr(0, Otos_l1); // put a at c
1064 // stack: ..., b, c, a
1065 __ push_ptr(Lscratch); // push b
1066 __ push_ptr(G3_scratch); // push c
1067 // stack: ..., b, c, a, b, c
1068 }
1071 // The spec says that these types can be a mixture of category 1 (1 word)
1072 // types and/or category 2 types (long and doubles)
1073 void TemplateTable::dup2_x2() {
1074 transition(vtos, vtos);
1075 // stack: ..., a, b, c, d
1076 __ load_ptr( 1, Lscratch); // get c
1077 __ load_ptr( 3, Otos_l1); // get a
1078 __ store_ptr(3, Lscratch); // put c at 3
1079 __ store_ptr(1, Otos_l1); // put a at 1
1080 // stack: ..., c, b, a, d
1081 __ load_ptr( 2, G3_scratch); // get b
1082 __ load_ptr( 0, Otos_l1); // get d
1083 __ store_ptr(0, G3_scratch); // put b at 0
1084 __ store_ptr(2, Otos_l1); // put d at 2
1085 // stack: ..., c, d, a, b
1086 __ push_ptr(Lscratch); // push c
1087 __ push_ptr(Otos_l1); // push d
1088 // stack: ..., c, d, a, b, c, d
1089 }
1092 void TemplateTable::swap() {
1093 transition(vtos, vtos);
1094 // stack: ..., a, b
1095 __ load_ptr( 1, G3_scratch); // get a
1096 __ load_ptr( 0, Otos_l1); // get b
1097 __ store_ptr(0, G3_scratch); // put b
1098 __ store_ptr(1, Otos_l1); // put a
1099 // stack: ..., b, a
1100 }
1103 void TemplateTable::iop2(Operation op) {
1104 transition(itos, itos);
1105 __ pop_i(O1);
1106 switch (op) {
1107 case add: __ add(O1, Otos_i, Otos_i); break;
1108 case sub: __ sub(O1, Otos_i, Otos_i); break;
1109 // %%%%% Mul may not exist: better to call .mul?
1110 case mul: __ smul(O1, Otos_i, Otos_i); break;
1111 case _and: __ and3(O1, Otos_i, Otos_i); break;
1112 case _or: __ or3(O1, Otos_i, Otos_i); break;
1113 case _xor: __ xor3(O1, Otos_i, Otos_i); break;
1114 case shl: __ sll(O1, Otos_i, Otos_i); break;
1115 case shr: __ sra(O1, Otos_i, Otos_i); break;
1116 case ushr: __ srl(O1, Otos_i, Otos_i); break;
1117 default: ShouldNotReachHere();
1118 }
1119 }
1122 void TemplateTable::lop2(Operation op) {
1123 transition(ltos, ltos);
1124 __ pop_l(O2);
1125 switch (op) {
1126 #ifdef _LP64
1127 case add: __ add(O2, Otos_l, Otos_l); break;
1128 case sub: __ sub(O2, Otos_l, Otos_l); break;
1129 case _and: __ and3(O2, Otos_l, Otos_l); break;
1130 case _or: __ or3(O2, Otos_l, Otos_l); break;
1131 case _xor: __ xor3(O2, Otos_l, Otos_l); break;
1132 #else
1133 case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break;
1134 case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break;
1135 case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break;
1136 case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break;
1137 case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break;
1138 #endif
1139 default: ShouldNotReachHere();
1140 }
1141 }
1144 void TemplateTable::idiv() {
1145 // %%%%% Later: ForSPARC/V7 call .sdiv library routine,
1146 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
1148 transition(itos, itos);
1149 __ pop_i(O1); // get 1st op
1151 // Y contains upper 32 bits of result, set it to 0 or all ones
1152 __ wry(G0);
1153 __ mov(~0, G3_scratch);
1155 __ tst(O1);
1156 Label neg;
1157 __ br(Assembler::negative, true, Assembler::pn, neg);
1158 __ delayed()->wry(G3_scratch);
1159 __ bind(neg);
1161 Label ok;
1162 __ tst(Otos_i);
1163 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
1165 const int min_int = 0x80000000;
1166 Label regular;
1167 __ cmp(Otos_i, -1);
1168 __ br(Assembler::notEqual, false, Assembler::pt, regular);
1169 #ifdef _LP64
1170 // Don't put set in delay slot
1171 // Set will turn into multiple instructions in 64 bit mode
1172 __ delayed()->nop();
1173 __ set(min_int, G4_scratch);
1174 #else
1175 __ delayed()->set(min_int, G4_scratch);
1176 #endif
1177 Label done;
1178 __ cmp(O1, G4_scratch);
1179 __ br(Assembler::equal, true, Assembler::pt, done);
1180 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken)
1182 __ bind(regular);
1183 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction!
1184 __ bind(done);
1185 }
1188 void TemplateTable::irem() {
1189 transition(itos, itos);
1190 __ mov(Otos_i, O2); // save divisor
1191 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1
1192 __ smul(Otos_i, O2, Otos_i);
1193 __ sub(O1, Otos_i, Otos_i);
1194 }
1197 void TemplateTable::lmul() {
1198 transition(ltos, ltos);
1199 __ pop_l(O2);
1200 #ifdef _LP64
1201 __ mulx(Otos_l, O2, Otos_l);
1202 #else
1203 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
1204 #endif
1206 }
1209 void TemplateTable::ldiv() {
1210 transition(ltos, ltos);
1212 // check for zero
1213 __ pop_l(O2);
1214 #ifdef _LP64
1215 __ tst(Otos_l);
1216 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1217 __ sdivx(O2, Otos_l, Otos_l);
1218 #else
1219 __ orcc(Otos_l1, Otos_l2, G0);
1220 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1221 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1222 #endif
1223 }
1226 void TemplateTable::lrem() {
1227 transition(ltos, ltos);
1229 // check for zero
1230 __ pop_l(O2);
1231 #ifdef _LP64
1232 __ tst(Otos_l);
1233 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1234 __ sdivx(O2, Otos_l, Otos_l2);
1235 __ mulx (Otos_l2, Otos_l, Otos_l2);
1236 __ sub (O2, Otos_l2, Otos_l);
1237 #else
1238 __ orcc(Otos_l1, Otos_l2, G0);
1239 __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1240 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1241 #endif
1242 }
1245 void TemplateTable::lshl() {
1246 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
1248 __ pop_l(O2); // shift value in O2, O3
1249 #ifdef _LP64
1250 __ sllx(O2, Otos_i, Otos_l);
1251 #else
1252 __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1253 #endif
1254 }
1257 void TemplateTable::lshr() {
1258 transition(itos, ltos); // %%%% see lshl comment
1260 __ pop_l(O2); // shift value in O2, O3
1261 #ifdef _LP64
1262 __ srax(O2, Otos_i, Otos_l);
1263 #else
1264 __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1265 #endif
1266 }
1270 void TemplateTable::lushr() {
1271 transition(itos, ltos); // %%%% see lshl comment
1273 __ pop_l(O2); // shift value in O2, O3
1274 #ifdef _LP64
1275 __ srlx(O2, Otos_i, Otos_l);
1276 #else
1277 __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1278 #endif
1279 }
1282 void TemplateTable::fop2(Operation op) {
1283 transition(ftos, ftos);
1284 switch (op) {
1285 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1286 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1287 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1288 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1289 case rem:
1290 assert(Ftos_f == F0, "just checking");
1291 #ifdef _LP64
1292 // LP64 calling conventions use F1, F3 for passing 2 floats
1293 __ pop_f(F1);
1294 __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
1295 #else
1296 __ pop_i(O0);
1297 __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
1298 __ ld( __ d_tmp, O1 );
1299 #endif
1300 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1301 assert( Ftos_f == F0, "fix this code" );
1302 break;
1304 default: ShouldNotReachHere();
1305 }
1306 }
1309 void TemplateTable::dop2(Operation op) {
1310 transition(dtos, dtos);
1311 switch (op) {
1312 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1313 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1314 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1315 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1316 case rem:
1317 #ifdef _LP64
1318 // Pass arguments in D0, D2
1319 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
1320 __ pop_d( F0 );
1321 #else
1322 // Pass arguments in O0O1, O2O3
1323 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1324 __ ldd( __ d_tmp, O2 );
1325 __ pop_d(Ftos_f);
1326 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1327 __ ldd( __ d_tmp, O0 );
1328 #endif
1329 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1330 assert( Ftos_d == F0, "fix this code" );
1331 break;
1333 default: ShouldNotReachHere();
1334 }
1335 }
1338 void TemplateTable::ineg() {
1339 transition(itos, itos);
1340 __ neg(Otos_i);
1341 }
1344 void TemplateTable::lneg() {
1345 transition(ltos, ltos);
1346 #ifdef _LP64
1347 __ sub(G0, Otos_l, Otos_l);
1348 #else
1349 __ lneg(Otos_l1, Otos_l2);
1350 #endif
1351 }
1354 void TemplateTable::fneg() {
1355 transition(ftos, ftos);
1356 __ fneg(FloatRegisterImpl::S, Ftos_f);
1357 }
1360 void TemplateTable::dneg() {
1361 transition(dtos, dtos);
1362 // v8 has fnegd if source and dest are the same
1363 __ fneg(FloatRegisterImpl::D, Ftos_f);
1364 }
1367 void TemplateTable::iinc() {
1368 transition(vtos, vtos);
1369 locals_index(G3_scratch);
1370 __ ldsb(Lbcp, 2, O2); // load constant
1371 __ access_local_int(G3_scratch, Otos_i);
1372 __ add(Otos_i, O2, Otos_i);
1373 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1374 }
1377 void TemplateTable::wide_iinc() {
1378 transition(vtos, vtos);
1379 locals_index_wide(G3_scratch);
1380 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed);
1381 __ access_local_int(G3_scratch, Otos_i);
1382 __ add(Otos_i, O3, Otos_i);
1383 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1384 }
1387 void TemplateTable::convert() {
1388 // %%%%% Factor this first part accross platforms
1389 #ifdef ASSERT
1390 TosState tos_in = ilgl;
1391 TosState tos_out = ilgl;
1392 switch (bytecode()) {
1393 case Bytecodes::_i2l: // fall through
1394 case Bytecodes::_i2f: // fall through
1395 case Bytecodes::_i2d: // fall through
1396 case Bytecodes::_i2b: // fall through
1397 case Bytecodes::_i2c: // fall through
1398 case Bytecodes::_i2s: tos_in = itos; break;
1399 case Bytecodes::_l2i: // fall through
1400 case Bytecodes::_l2f: // fall through
1401 case Bytecodes::_l2d: tos_in = ltos; break;
1402 case Bytecodes::_f2i: // fall through
1403 case Bytecodes::_f2l: // fall through
1404 case Bytecodes::_f2d: tos_in = ftos; break;
1405 case Bytecodes::_d2i: // fall through
1406 case Bytecodes::_d2l: // fall through
1407 case Bytecodes::_d2f: tos_in = dtos; break;
1408 default : ShouldNotReachHere();
1409 }
1410 switch (bytecode()) {
1411 case Bytecodes::_l2i: // fall through
1412 case Bytecodes::_f2i: // fall through
1413 case Bytecodes::_d2i: // fall through
1414 case Bytecodes::_i2b: // fall through
1415 case Bytecodes::_i2c: // fall through
1416 case Bytecodes::_i2s: tos_out = itos; break;
1417 case Bytecodes::_i2l: // fall through
1418 case Bytecodes::_f2l: // fall through
1419 case Bytecodes::_d2l: tos_out = ltos; break;
1420 case Bytecodes::_i2f: // fall through
1421 case Bytecodes::_l2f: // fall through
1422 case Bytecodes::_d2f: tos_out = ftos; break;
1423 case Bytecodes::_i2d: // fall through
1424 case Bytecodes::_l2d: // fall through
1425 case Bytecodes::_f2d: tos_out = dtos; break;
1426 default : ShouldNotReachHere();
1427 }
1428 transition(tos_in, tos_out);
1429 #endif
1432 // Conversion
1433 Label done;
1434 switch (bytecode()) {
1435 case Bytecodes::_i2l:
1436 #ifdef _LP64
1437 // Sign extend the 32 bits
1438 __ sra ( Otos_i, 0, Otos_l );
1439 #else
1440 __ addcc(Otos_i, 0, Otos_l2);
1441 __ br(Assembler::greaterEqual, true, Assembler::pt, done);
1442 __ delayed()->clr(Otos_l1);
1443 __ set(~0, Otos_l1);
1444 #endif
1445 break;
1447 case Bytecodes::_i2f:
1448 __ st(Otos_i, __ d_tmp );
1449 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1450 __ fitof(FloatRegisterImpl::S, F0, Ftos_f);
1451 break;
1453 case Bytecodes::_i2d:
1454 __ st(Otos_i, __ d_tmp);
1455 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1456 __ fitof(FloatRegisterImpl::D, F0, Ftos_f);
1457 break;
1459 case Bytecodes::_i2b:
1460 __ sll(Otos_i, 24, Otos_i);
1461 __ sra(Otos_i, 24, Otos_i);
1462 break;
1464 case Bytecodes::_i2c:
1465 __ sll(Otos_i, 16, Otos_i);
1466 __ srl(Otos_i, 16, Otos_i);
1467 break;
1469 case Bytecodes::_i2s:
1470 __ sll(Otos_i, 16, Otos_i);
1471 __ sra(Otos_i, 16, Otos_i);
1472 break;
1474 case Bytecodes::_l2i:
1475 #ifndef _LP64
1476 __ mov(Otos_l2, Otos_i);
1477 #else
1478 // Sign-extend into the high 32 bits
1479 __ sra(Otos_l, 0, Otos_i);
1480 #endif
1481 break;
1483 case Bytecodes::_l2f:
1484 case Bytecodes::_l2d:
1485 __ st_long(Otos_l, __ d_tmp);
1486 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
1488 if (VM_Version::v9_instructions_work()) {
1489 if (bytecode() == Bytecodes::_l2f) {
1490 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
1491 } else {
1492 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
1493 }
1494 } else {
1495 __ call_VM_leaf(
1496 Lscratch,
1497 bytecode() == Bytecodes::_l2f
1498 ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
1499 : CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
1500 );
1501 }
1502 break;
1504 case Bytecodes::_f2i: {
1505 Label isNaN;
1506 // result must be 0 if value is NaN; test by comparing value to itself
1507 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
1508 // According to the v8 manual, you have to have a non-fp instruction
1509 // between fcmp and fb.
1510 if (!VM_Version::v9_instructions_work()) {
1511 __ nop();
1512 }
1513 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
1514 __ delayed()->clr(Otos_i); // NaN
1515 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
1516 __ stf(FloatRegisterImpl::S, F30, __ d_tmp);
1517 __ ld(__ d_tmp, Otos_i);
1518 __ bind(isNaN);
1519 }
1520 break;
1522 case Bytecodes::_f2l:
1523 // must uncache tos
1524 __ push_f();
1525 #ifdef _LP64
1526 __ pop_f(F1);
1527 #else
1528 __ pop_i(O0);
1529 #endif
1530 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1531 break;
1533 case Bytecodes::_f2d:
1534 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f);
1535 break;
1537 case Bytecodes::_d2i:
1538 case Bytecodes::_d2l:
1539 // must uncache tos
1540 __ push_d();
1541 #ifdef _LP64
1542 // LP64 calling conventions pass first double arg in D0
1543 __ pop_d( Ftos_d );
1544 #else
1545 __ pop_i( O0 );
1546 __ pop_i( O1 );
1547 #endif
1548 __ call_VM_leaf(Lscratch,
1549 bytecode() == Bytecodes::_d2i
1550 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
1551 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1552 break;
1554 case Bytecodes::_d2f:
1555 if (VM_Version::v9_instructions_work()) {
1556 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
1557 }
1558 else {
1559 // must uncache tos
1560 __ push_d();
1561 __ pop_i(O0);
1562 __ pop_i(O1);
1563 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
1564 }
1565 break;
1567 default: ShouldNotReachHere();
1568 }
1569 __ bind(done);
1570 }
1573 void TemplateTable::lcmp() {
1574 transition(ltos, itos);
1576 #ifdef _LP64
1577 __ pop_l(O1); // pop off value 1, value 2 is in O0
1578 __ lcmp( O1, Otos_l, Otos_i );
1579 #else
1580 __ pop_l(O2); // cmp O2,3 to O0,1
1581 __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
1582 #endif
1583 }
1586 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1588 if (is_float) __ pop_f(F2);
1589 else __ pop_d(F2);
1591 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:");
1593 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i );
1594 }
1596 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1597 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1598 __ verify_oop(Lmethod);
1599 __ verify_thread();
1601 const Register O2_bumped_count = O2;
1602 __ profile_taken_branch(G3_scratch, O2_bumped_count);
1604 // get (wide) offset to O1_disp
1605 const Register O1_disp = O1;
1606 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC);
1607 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
1609 // Handle all the JSR stuff here, then exit.
1610 // It's much shorter and cleaner than intermingling with the
1611 // non-JSR normal-branch stuff occurring below.
1612 if( is_jsr ) {
1613 // compute return address as bci in Otos_i
1614 __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch);
1615 __ sub(Lbcp, G3_scratch, G3_scratch);
1616 __ sub(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
1618 // Bump Lbcp to target of JSR
1619 __ add(Lbcp, O1_disp, Lbcp);
1620 // Push returnAddress for "ret" on stack
1621 __ push_ptr(Otos_i);
1622 // And away we go!
1623 __ dispatch_next(vtos);
1624 return;
1625 }
1627 // Normal (non-jsr) branch handling
1629 // Save the current Lbcp
1630 const Register O0_cur_bcp = O0;
1631 __ mov( Lbcp, O0_cur_bcp );
1634 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1635 if ( increment_invocation_counter_for_backward_branches ) {
1636 Label Lforward;
1637 // check branch direction
1638 __ br( Assembler::positive, false, Assembler::pn, Lforward );
1639 // Bump bytecode pointer by displacement (take the branch)
1640 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
1642 if (TieredCompilation) {
1643 Label Lno_mdo, Loverflow;
1644 int increment = InvocationCounter::count_increment;
1645 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1646 if (ProfileInterpreter) {
1647 // If no method data exists, go to profile_continue.
1648 __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
1649 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo);
1651 // Increment backedge counter in the MDO
1652 Address mdo_backedge_counter(G4_scratch, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
1653 in_bytes(InvocationCounter::counter_offset()));
1654 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch,
1655 Assembler::notZero, &Lforward);
1656 __ ba_short(Loverflow);
1657 }
1659 // If there's no MDO, increment counter in methodOop
1660 __ bind(Lno_mdo);
1661 Address backedge_counter(Lmethod, in_bytes(methodOopDesc::backedge_counter_offset()) +
1662 in_bytes(InvocationCounter::counter_offset()));
1663 __ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch,
1664 Assembler::notZero, &Lforward);
1665 __ bind(Loverflow);
1667 // notify point for loop, pass branch bytecode
1668 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp);
1670 // Was an OSR adapter generated?
1671 // O0 = osr nmethod
1672 __ br_null_short(O0, Assembler::pn, Lforward);
1674 // Has the nmethod been invalidated already?
1675 __ ld(O0, nmethod::entry_bci_offset(), O2);
1676 __ cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, Lforward);
1678 // migrate the interpreter frame off of the stack
1680 __ mov(G2_thread, L7);
1681 // save nmethod
1682 __ mov(O0, L6);
1683 __ set_last_Java_frame(SP, noreg);
1684 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
1685 __ reset_last_Java_frame();
1686 __ mov(L7, G2_thread);
1688 // move OSR nmethod to I1
1689 __ mov(L6, I1);
1691 // OSR buffer to I0
1692 __ mov(O0, I0);
1694 // remove the interpreter frame
1695 __ restore(I5_savedSP, 0, SP);
1697 // Jump to the osr code.
1698 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
1699 __ jmp(O2, G0);
1700 __ delayed()->nop();
1702 } else {
1703 // Update Backedge branch separately from invocations
1704 const Register G4_invoke_ctr = G4;
1705 __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
1706 if (ProfileInterpreter) {
1707 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward);
1708 if (UseOnStackReplacement) {
1709 __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
1710 }
1711 } else {
1712 if (UseOnStackReplacement) {
1713 __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
1714 }
1715 }
1716 }
1718 __ bind(Lforward);
1719 } else
1720 // Bump bytecode pointer by displacement (take the branch)
1721 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr
1723 // continue with bytecode @ target
1724 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1725 // %%%%% and changing dispatch_next to dispatch_only
1726 __ dispatch_next(vtos);
1727 }
1730 // Note Condition in argument is TemplateTable::Condition
1731 // arg scope is within class scope
1733 void TemplateTable::if_0cmp(Condition cc) {
1734 // no pointers, integer only!
1735 transition(itos, vtos);
1736 // assume branch is more often taken than not (loops use backward branches)
1737 __ cmp( Otos_i, 0);
1738 __ if_cmp(ccNot(cc), false);
1739 }
1742 void TemplateTable::if_icmp(Condition cc) {
1743 transition(itos, vtos);
1744 __ pop_i(O1);
1745 __ cmp(O1, Otos_i);
1746 __ if_cmp(ccNot(cc), false);
1747 }
1750 void TemplateTable::if_nullcmp(Condition cc) {
1751 transition(atos, vtos);
1752 __ tst(Otos_i);
1753 __ if_cmp(ccNot(cc), true);
1754 }
1757 void TemplateTable::if_acmp(Condition cc) {
1758 transition(atos, vtos);
1759 __ pop_ptr(O1);
1760 __ verify_oop(O1);
1761 __ verify_oop(Otos_i);
1762 __ cmp(O1, Otos_i);
1763 __ if_cmp(ccNot(cc), true);
1764 }
1768 void TemplateTable::ret() {
1769 transition(vtos, vtos);
1770 locals_index(G3_scratch);
1771 __ access_local_returnAddress(G3_scratch, Otos_i);
1772 // Otos_i contains the bci, compute the bcp from that
1774 #ifdef _LP64
1775 #ifdef ASSERT
1776 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
1777 // the result. The return address (really a BCI) was stored with an
1778 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in
1779 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
1780 // loaded value.
1781 { Label zzz ;
1782 __ set (65536, G3_scratch) ;
1783 __ cmp (Otos_i, G3_scratch) ;
1784 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
1785 __ delayed()->nop();
1786 __ stop("BCI is in the wrong register half?");
1787 __ bind (zzz) ;
1788 }
1789 #endif
1790 #endif
1792 __ profile_ret(vtos, Otos_i, G4_scratch);
1794 __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch);
1795 __ add(G3_scratch, Otos_i, G3_scratch);
1796 __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
1797 __ dispatch_next(vtos);
1798 }
1801 void TemplateTable::wide_ret() {
1802 transition(vtos, vtos);
1803 locals_index_wide(G3_scratch);
1804 __ access_local_returnAddress(G3_scratch, Otos_i);
1805 // Otos_i contains the bci, compute the bcp from that
1807 __ profile_ret(vtos, Otos_i, G4_scratch);
1809 __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch);
1810 __ add(G3_scratch, Otos_i, G3_scratch);
1811 __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
1812 __ dispatch_next(vtos);
1813 }
1816 void TemplateTable::tableswitch() {
1817 transition(itos, vtos);
1818 Label default_case, continue_execution;
1820 // align bcp
1821 __ add(Lbcp, BytesPerInt, O1);
1822 __ and3(O1, -BytesPerInt, O1);
1823 // load lo, hi
1824 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte
1825 __ ld(O1, 2 * BytesPerInt, O3); // High Byte
1826 #ifdef _LP64
1827 // Sign extend the 32 bits
1828 __ sra ( Otos_i, 0, Otos_i );
1829 #endif /* _LP64 */
1831 // check against lo & hi
1832 __ cmp( Otos_i, O2);
1833 __ br( Assembler::less, false, Assembler::pn, default_case);
1834 __ delayed()->cmp( Otos_i, O3 );
1835 __ br( Assembler::greater, false, Assembler::pn, default_case);
1836 // lookup dispatch offset
1837 __ delayed()->sub(Otos_i, O2, O2);
1838 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
1839 __ sll(O2, LogBytesPerInt, O2);
1840 __ add(O2, 3 * BytesPerInt, O2);
1841 __ ba(continue_execution);
1842 __ delayed()->ld(O1, O2, O2);
1843 // handle default
1844 __ bind(default_case);
1845 __ profile_switch_default(O3);
1846 __ ld(O1, 0, O2); // get default offset
1847 // continue execution
1848 __ bind(continue_execution);
1849 __ add(Lbcp, O2, Lbcp);
1850 __ dispatch_next(vtos);
1851 }
1854 void TemplateTable::lookupswitch() {
1855 transition(itos, itos);
1856 __ stop("lookupswitch bytecode should have been rewritten");
1857 }
1859 void TemplateTable::fast_linearswitch() {
1860 transition(itos, vtos);
1861 Label loop_entry, loop, found, continue_execution;
1862 // align bcp
1863 __ add(Lbcp, BytesPerInt, O1);
1864 __ and3(O1, -BytesPerInt, O1);
1865 // set counter
1866 __ ld(O1, BytesPerInt, O2);
1867 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs
1868 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr
1869 __ ba(loop_entry);
1870 __ delayed()->add(O3, O2, O2); // counter now points past last pair
1872 // table search
1873 __ bind(loop);
1874 __ cmp(O4, Otos_i);
1875 __ br(Assembler::equal, true, Assembler::pn, found);
1876 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4
1877 __ inc(O3, 2 * BytesPerInt);
1879 __ bind(loop_entry);
1880 __ cmp(O2, O3);
1881 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop);
1882 __ delayed()->ld(O3, 0, O4);
1884 // default case
1885 __ ld(O1, 0, O4); // get default offset
1886 if (ProfileInterpreter) {
1887 __ profile_switch_default(O3);
1888 __ ba_short(continue_execution);
1889 }
1891 // entry found -> get offset
1892 __ bind(found);
1893 if (ProfileInterpreter) {
1894 __ sub(O3, O1, O3);
1895 __ sub(O3, 2*BytesPerInt, O3);
1896 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs
1897 __ profile_switch_case(O3, O1, O2, G3_scratch);
1899 __ bind(continue_execution);
1900 }
1901 __ add(Lbcp, O4, Lbcp);
1902 __ dispatch_next(vtos);
1903 }
1906 void TemplateTable::fast_binaryswitch() {
1907 transition(itos, vtos);
1908 // Implementation using the following core algorithm: (copied from Intel)
1909 //
1910 // int binary_search(int key, LookupswitchPair* array, int n) {
1911 // // Binary search according to "Methodik des Programmierens" by
1912 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1913 // int i = 0;
1914 // int j = n;
1915 // while (i+1 < j) {
1916 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1917 // // with Q: for all i: 0 <= i < n: key < a[i]
1918 // // where a stands for the array and assuming that the (inexisting)
1919 // // element a[n] is infinitely big.
1920 // int h = (i + j) >> 1;
1921 // // i < h < j
1922 // if (key < array[h].fast_match()) {
1923 // j = h;
1924 // } else {
1925 // i = h;
1926 // }
1927 // }
1928 // // R: a[i] <= key < a[i+1] or Q
1929 // // (i.e., if key is within array, i is the correct index)
1930 // return i;
1931 // }
1933 // register allocation
1934 assert(Otos_i == O0, "alias checking");
1935 const Register Rkey = Otos_i; // already set (tosca)
1936 const Register Rarray = O1;
1937 const Register Ri = O2;
1938 const Register Rj = O3;
1939 const Register Rh = O4;
1940 const Register Rscratch = O5;
1942 const int log_entry_size = 3;
1943 const int entry_size = 1 << log_entry_size;
1945 Label found;
1946 // Find Array start
1947 __ add(Lbcp, 3 * BytesPerInt, Rarray);
1948 __ and3(Rarray, -BytesPerInt, Rarray);
1949 // initialize i & j (in delay slot)
1950 __ clr( Ri );
1952 // and start
1953 Label entry;
1954 __ ba(entry);
1955 __ delayed()->ld( Rarray, -BytesPerInt, Rj);
1956 // (Rj is already in the native byte-ordering.)
1958 // binary search loop
1959 { Label loop;
1960 __ bind( loop );
1961 // int h = (i + j) >> 1;
1962 __ sra( Rh, 1, Rh );
1963 // if (key < array[h].fast_match()) {
1964 // j = h;
1965 // } else {
1966 // i = h;
1967 // }
1968 __ sll( Rh, log_entry_size, Rscratch );
1969 __ ld( Rarray, Rscratch, Rscratch );
1970 // (Rscratch is already in the native byte-ordering.)
1971 __ cmp( Rkey, Rscratch );
1972 if ( VM_Version::v9_instructions_work() ) {
1973 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
1974 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
1975 }
1976 else {
1977 Label end_of_if;
1978 __ br( Assembler::less, true, Assembler::pt, end_of_if );
1979 __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
1980 __ mov( Rh, Ri ); // else i = h
1981 __ bind(end_of_if); // }
1982 }
1984 // while (i+1 < j)
1985 __ bind( entry );
1986 __ add( Ri, 1, Rscratch );
1987 __ cmp(Rscratch, Rj);
1988 __ br( Assembler::less, true, Assembler::pt, loop );
1989 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1;
1990 }
1992 // end of binary search, result index is i (must check again!)
1993 Label default_case;
1994 Label continue_execution;
1995 if (ProfileInterpreter) {
1996 __ mov( Ri, Rh ); // Save index in i for profiling
1997 }
1998 __ sll( Ri, log_entry_size, Ri );
1999 __ ld( Rarray, Ri, Rscratch );
2000 // (Rscratch is already in the native byte-ordering.)
2001 __ cmp( Rkey, Rscratch );
2002 __ br( Assembler::notEqual, true, Assembler::pn, default_case );
2003 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j
2005 // entry found -> j = offset
2006 __ inc( Ri, BytesPerInt );
2007 __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
2008 __ ld( Rarray, Ri, Rj );
2009 // (Rj is already in the native byte-ordering.)
2011 if (ProfileInterpreter) {
2012 __ ba_short(continue_execution);
2013 }
2015 __ bind(default_case); // fall through (if not profiling)
2016 __ profile_switch_default(Ri);
2018 __ bind(continue_execution);
2019 __ add( Lbcp, Rj, Lbcp );
2020 __ dispatch_next( vtos );
2021 }
2024 void TemplateTable::_return(TosState state) {
2025 transition(state, state);
2026 assert(_desc->calls_vm(), "inconsistent calls_vm information");
2028 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2029 assert(state == vtos, "only valid state");
2030 __ mov(G0, G3_scratch);
2031 __ access_local_ptr(G3_scratch, Otos_i);
2032 __ load_klass(Otos_i, O2);
2033 __ set(JVM_ACC_HAS_FINALIZER, G3);
2034 __ ld(O2, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), O2);
2035 __ andcc(G3, O2, G0);
2036 Label skip_register_finalizer;
2037 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
2038 __ delayed()->nop();
2040 // Call out to do finalizer registration
2041 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i);
2043 __ bind(skip_register_finalizer);
2044 }
2046 __ remove_activation(state, /* throw_monitor_exception */ true);
2048 // The caller's SP was adjusted upon method entry to accomodate
2049 // the callee's non-argument locals. Undo that adjustment.
2050 __ ret(); // return to caller
2051 __ delayed()->restore(I5_savedSP, G0, SP);
2052 }
2055 // ----------------------------------------------------------------------------
2056 // Volatile variables demand their effects be made known to all CPU's in
2057 // order. Store buffers on most chips allow reads & writes to reorder; the
2058 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2059 // memory barrier (i.e., it's not sufficient that the interpreter does not
2060 // reorder volatile references, the hardware also must not reorder them).
2061 //
2062 // According to the new Java Memory Model (JMM):
2063 // (1) All volatiles are serialized wrt to each other.
2064 // ALSO reads & writes act as aquire & release, so:
2065 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2066 // the read float up to before the read. It's OK for non-volatile memory refs
2067 // that happen before the volatile read to float down below it.
2068 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2069 // that happen BEFORE the write float down to after the write. It's OK for
2070 // non-volatile memory refs that happen after the volatile write to float up
2071 // before it.
2072 //
2073 // We only put in barriers around volatile refs (they are expensive), not
2074 // _between_ memory refs (that would require us to track the flavor of the
2075 // previous memory refs). Requirements (2) and (3) require some barriers
2076 // before volatile stores and after volatile loads. These nearly cover
2077 // requirement (1) but miss the volatile-store-volatile-load case. This final
2078 // case is placed after volatile-stores although it could just as well go
2079 // before volatile-loads.
2080 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) {
2081 // Helper function to insert a is-volatile test and memory barrier
2082 // All current sparc implementations run in TSO, needing only StoreLoad
2083 if ((order_constraint & Assembler::StoreLoad) == 0) return;
2084 __ membar( order_constraint );
2085 }
2087 // ----------------------------------------------------------------------------
2088 void TemplateTable::resolve_cache_and_index(int byte_no,
2089 Register result,
2090 Register Rcache,
2091 Register index,
2092 size_t index_size) {
2093 // Depends on cpCacheOop layout!
2094 Label resolved;
2096 if (byte_no == f1_oop) {
2097 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2098 // This kind of CP cache entry does not need to match the flags byte, because
2099 // there is a 1-1 relation between bytecode type and CP entry type.
2100 assert_different_registers(result, Rcache);
2101 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2102 __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
2103 ConstantPoolCacheEntry::f1_offset(), result);
2104 __ tst(result);
2105 __ br(Assembler::notEqual, false, Assembler::pt, resolved);
2106 __ delayed()->set((int)bytecode(), O1);
2107 } else {
2108 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2109 assert(result == noreg, ""); //else change code for setting result
2110 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size);
2111 __ cmp(Lbyte_code, (int) bytecode()); // have we resolved this bytecode?
2112 __ br(Assembler::equal, false, Assembler::pt, resolved);
2113 __ delayed()->set((int)bytecode(), O1);
2114 }
2116 address entry;
2117 switch (bytecode()) {
2118 case Bytecodes::_getstatic : // fall through
2119 case Bytecodes::_putstatic : // fall through
2120 case Bytecodes::_getfield : // fall through
2121 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2122 case Bytecodes::_invokevirtual : // fall through
2123 case Bytecodes::_invokespecial : // fall through
2124 case Bytecodes::_invokestatic : // fall through
2125 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2126 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2127 case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2128 case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2129 default : ShouldNotReachHere(); break;
2130 }
2131 // first time invocation - must resolve first
2132 __ call_VM(noreg, entry, O1);
2133 // Update registers with resolved info
2134 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2135 if (result != noreg)
2136 __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
2137 ConstantPoolCacheEntry::f1_offset(), result);
2138 __ bind(resolved);
2139 }
2141 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2142 Register Rmethod,
2143 Register Ritable_index,
2144 Register Rflags,
2145 bool is_invokevirtual,
2146 bool is_invokevfinal,
2147 bool is_invokedynamic) {
2148 // Uses both G3_scratch and G4_scratch
2149 Register Rcache = G3_scratch;
2150 Register Rscratch = G4_scratch;
2151 assert_different_registers(Rcache, Rmethod, Ritable_index);
2153 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2155 // determine constant pool cache field offsets
2156 const int method_offset = in_bytes(
2157 cp_base_offset +
2158 (is_invokevirtual
2159 ? ConstantPoolCacheEntry::f2_offset()
2160 : ConstantPoolCacheEntry::f1_offset()
2161 )
2162 );
2163 const int flags_offset = in_bytes(cp_base_offset +
2164 ConstantPoolCacheEntry::flags_offset());
2165 // access constant pool cache fields
2166 const int index_offset = in_bytes(cp_base_offset +
2167 ConstantPoolCacheEntry::f2_offset());
2169 if (is_invokevfinal) {
2170 __ get_cache_and_index_at_bcp(Rcache, Rscratch, 1);
2171 __ ld_ptr(Rcache, method_offset, Rmethod);
2172 } else if (byte_no == f1_oop) {
2173 // Resolved f1_oop goes directly into 'method' register.
2174 resolve_cache_and_index(byte_no, Rmethod, Rcache, Rscratch, sizeof(u4));
2175 } else {
2176 resolve_cache_and_index(byte_no, noreg, Rcache, Rscratch, sizeof(u2));
2177 __ ld_ptr(Rcache, method_offset, Rmethod);
2178 }
2180 if (Ritable_index != noreg) {
2181 __ ld_ptr(Rcache, index_offset, Ritable_index);
2182 }
2183 __ ld_ptr(Rcache, flags_offset, Rflags);
2184 }
2186 // The Rcache register must be set before call
2187 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2188 Register Rcache,
2189 Register index,
2190 Register Roffset,
2191 Register Rflags,
2192 bool is_static) {
2193 assert_different_registers(Rcache, Rflags, Roffset);
2195 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2197 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2198 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2199 if (is_static) {
2200 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj);
2201 }
2202 }
2204 // The registers Rcache and index expected to be set before call.
2205 // Correct values of the Rcache and index registers are preserved.
2206 void TemplateTable::jvmti_post_field_access(Register Rcache,
2207 Register index,
2208 bool is_static,
2209 bool has_tos) {
2210 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2212 if (JvmtiExport::can_post_field_access()) {
2213 // Check to see if a field access watch has been set before we take
2214 // the time to call into the VM.
2215 Label Label1;
2216 assert_different_registers(Rcache, index, G1_scratch);
2217 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr());
2218 __ load_contents(get_field_access_count_addr, G1_scratch);
2219 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1);
2221 __ add(Rcache, in_bytes(cp_base_offset), Rcache);
2223 if (is_static) {
2224 __ clr(Otos_i);
2225 } else {
2226 if (has_tos) {
2227 // save object pointer before call_VM() clobbers it
2228 __ push_ptr(Otos_i); // put object on tos where GC wants it.
2229 } else {
2230 // Load top of stack (do not pop the value off the stack);
2231 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
2232 }
2233 __ verify_oop(Otos_i);
2234 }
2235 // Otos_i: object pointer or NULL if static
2236 // Rcache: cache entry pointer
2237 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2238 Otos_i, Rcache);
2239 if (!is_static && has_tos) {
2240 __ pop_ptr(Otos_i); // restore object pointer
2241 __ verify_oop(Otos_i);
2242 }
2243 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2244 __ bind(Label1);
2245 }
2246 }
2248 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2249 transition(vtos, vtos);
2251 Register Rcache = G3_scratch;
2252 Register index = G4_scratch;
2253 Register Rclass = Rcache;
2254 Register Roffset= G4_scratch;
2255 Register Rflags = G1_scratch;
2256 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2258 resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
2259 jvmti_post_field_access(Rcache, index, is_static, false);
2260 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2262 if (!is_static) {
2263 pop_and_check_object(Rclass);
2264 } else {
2265 __ verify_oop(Rclass);
2266 }
2268 Label exit;
2270 Assembler::Membar_mask_bits membar_bits =
2271 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2273 if (__ membar_has_effect(membar_bits)) {
2274 // Get volatile flag
2275 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2276 __ and3(Rflags, Lscratch, Lscratch);
2277 }
2279 Label checkVolatile;
2281 // compute field type
2282 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj;
2283 __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
2284 // Make sure we don't need to mask Rflags for tosBits after the above shift
2285 ConstantPoolCacheEntry::verify_tosBits();
2287 // Check atos before itos for getstatic, more likely (in Queens at least)
2288 __ cmp(Rflags, atos);
2289 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2290 __ delayed() ->cmp(Rflags, itos);
2292 // atos
2293 __ load_heap_oop(Rclass, Roffset, Otos_i);
2294 __ verify_oop(Otos_i);
2295 __ push(atos);
2296 if (!is_static) {
2297 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
2298 }
2299 __ ba(checkVolatile);
2300 __ delayed()->tst(Lscratch);
2302 __ bind(notObj);
2304 // cmp(Rflags, itos);
2305 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2306 __ delayed() ->cmp(Rflags, ltos);
2308 // itos
2309 __ ld(Rclass, Roffset, Otos_i);
2310 __ push(itos);
2311 if (!is_static) {
2312 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
2313 }
2314 __ ba(checkVolatile);
2315 __ delayed()->tst(Lscratch);
2317 __ bind(notInt);
2319 // cmp(Rflags, ltos);
2320 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2321 __ delayed() ->cmp(Rflags, btos);
2323 // ltos
2324 // load must be atomic
2325 __ ld_long(Rclass, Roffset, Otos_l);
2326 __ push(ltos);
2327 if (!is_static) {
2328 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
2329 }
2330 __ ba(checkVolatile);
2331 __ delayed()->tst(Lscratch);
2333 __ bind(notLong);
2335 // cmp(Rflags, btos);
2336 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2337 __ delayed() ->cmp(Rflags, ctos);
2339 // btos
2340 __ ldsb(Rclass, Roffset, Otos_i);
2341 __ push(itos);
2342 if (!is_static) {
2343 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
2344 }
2345 __ ba(checkVolatile);
2346 __ delayed()->tst(Lscratch);
2348 __ bind(notByte);
2350 // cmp(Rflags, ctos);
2351 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2352 __ delayed() ->cmp(Rflags, stos);
2354 // ctos
2355 __ lduh(Rclass, Roffset, Otos_i);
2356 __ push(itos);
2357 if (!is_static) {
2358 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
2359 }
2360 __ ba(checkVolatile);
2361 __ delayed()->tst(Lscratch);
2363 __ bind(notChar);
2365 // cmp(Rflags, stos);
2366 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2367 __ delayed() ->cmp(Rflags, ftos);
2369 // stos
2370 __ ldsh(Rclass, Roffset, Otos_i);
2371 __ push(itos);
2372 if (!is_static) {
2373 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
2374 }
2375 __ ba(checkVolatile);
2376 __ delayed()->tst(Lscratch);
2378 __ bind(notShort);
2381 // cmp(Rflags, ftos);
2382 __ br(Assembler::notEqual, false, Assembler::pt, notFloat);
2383 __ delayed() ->tst(Lscratch);
2385 // ftos
2386 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f);
2387 __ push(ftos);
2388 if (!is_static) {
2389 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
2390 }
2391 __ ba(checkVolatile);
2392 __ delayed()->tst(Lscratch);
2394 __ bind(notFloat);
2397 // dtos
2398 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d);
2399 __ push(dtos);
2400 if (!is_static) {
2401 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch);
2402 }
2404 __ bind(checkVolatile);
2405 if (__ membar_has_effect(membar_bits)) {
2406 // __ tst(Lscratch); executed in delay slot
2407 __ br(Assembler::zero, false, Assembler::pt, exit);
2408 __ delayed()->nop();
2409 volatile_barrier(membar_bits);
2410 }
2412 __ bind(exit);
2413 }
2416 void TemplateTable::getfield(int byte_no) {
2417 getfield_or_static(byte_no, false);
2418 }
2420 void TemplateTable::getstatic(int byte_no) {
2421 getfield_or_static(byte_no, true);
2422 }
2425 void TemplateTable::fast_accessfield(TosState state) {
2426 transition(atos, state);
2427 Register Rcache = G3_scratch;
2428 Register index = G4_scratch;
2429 Register Roffset = G4_scratch;
2430 Register Rflags = Rcache;
2431 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2433 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2434 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true);
2436 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2438 __ null_check(Otos_i);
2439 __ verify_oop(Otos_i);
2441 Label exit;
2443 Assembler::Membar_mask_bits membar_bits =
2444 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2445 if (__ membar_has_effect(membar_bits)) {
2446 // Get volatile flag
2447 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags);
2448 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2449 }
2451 switch (bytecode()) {
2452 case Bytecodes::_fast_bgetfield:
2453 __ ldsb(Otos_i, Roffset, Otos_i);
2454 break;
2455 case Bytecodes::_fast_cgetfield:
2456 __ lduh(Otos_i, Roffset, Otos_i);
2457 break;
2458 case Bytecodes::_fast_sgetfield:
2459 __ ldsh(Otos_i, Roffset, Otos_i);
2460 break;
2461 case Bytecodes::_fast_igetfield:
2462 __ ld(Otos_i, Roffset, Otos_i);
2463 break;
2464 case Bytecodes::_fast_lgetfield:
2465 __ ld_long(Otos_i, Roffset, Otos_l);
2466 break;
2467 case Bytecodes::_fast_fgetfield:
2468 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f);
2469 break;
2470 case Bytecodes::_fast_dgetfield:
2471 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
2472 break;
2473 case Bytecodes::_fast_agetfield:
2474 __ load_heap_oop(Otos_i, Roffset, Otos_i);
2475 break;
2476 default:
2477 ShouldNotReachHere();
2478 }
2480 if (__ membar_has_effect(membar_bits)) {
2481 __ btst(Lscratch, Rflags);
2482 __ br(Assembler::zero, false, Assembler::pt, exit);
2483 __ delayed()->nop();
2484 volatile_barrier(membar_bits);
2485 __ bind(exit);
2486 }
2488 if (state == atos) {
2489 __ verify_oop(Otos_i); // does not blow flags!
2490 }
2491 }
2493 void TemplateTable::jvmti_post_fast_field_mod() {
2494 if (JvmtiExport::can_post_field_modification()) {
2495 // Check to see if a field modification watch has been set before we take
2496 // the time to call into the VM.
2497 Label done;
2498 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2499 __ load_contents(get_field_modification_count_addr, G4_scratch);
2500 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done);
2501 __ pop_ptr(G4_scratch); // copy the object pointer from tos
2502 __ verify_oop(G4_scratch);
2503 __ push_ptr(G4_scratch); // put the object pointer back on tos
2504 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1);
2505 // Save tos values before call_VM() clobbers them. Since we have
2506 // to do it for every data type, we use the saved values as the
2507 // jvalue object.
2508 switch (bytecode()) { // save tos values before call_VM() clobbers them
2509 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break;
2510 case Bytecodes::_fast_bputfield: // fall through
2511 case Bytecodes::_fast_sputfield: // fall through
2512 case Bytecodes::_fast_cputfield: // fall through
2513 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break;
2514 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break;
2515 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break;
2516 // get words in right order for use as jvalue object
2517 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break;
2518 }
2519 // setup pointer to jvalue object
2520 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize);
2521 // G4_scratch: object pointer
2522 // G1_scratch: cache entry pointer
2523 // G3_scratch: jvalue object on the stack
2524 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch);
2525 switch (bytecode()) { // restore tos values
2526 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break;
2527 case Bytecodes::_fast_bputfield: // fall through
2528 case Bytecodes::_fast_sputfield: // fall through
2529 case Bytecodes::_fast_cputfield: // fall through
2530 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break;
2531 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break;
2532 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break;
2533 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break;
2534 }
2535 __ bind(done);
2536 }
2537 }
2539 // The registers Rcache and index expected to be set before call.
2540 // The function may destroy various registers, just not the Rcache and index registers.
2541 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) {
2542 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2544 if (JvmtiExport::can_post_field_modification()) {
2545 // Check to see if a field modification watch has been set before we take
2546 // the time to call into the VM.
2547 Label Label1;
2548 assert_different_registers(Rcache, index, G1_scratch);
2549 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2550 __ load_contents(get_field_modification_count_addr, G1_scratch);
2551 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1);
2553 // The Rcache and index registers have been already set.
2554 // This allows to eliminate this call but the Rcache and index
2555 // registers must be correspondingly used after this line.
2556 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1);
2558 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch);
2559 if (is_static) {
2560 // Life is simple. Null out the object pointer.
2561 __ clr(G4_scratch);
2562 } else {
2563 Register Rflags = G1_scratch;
2564 // Life is harder. The stack holds the value on top, followed by the
2565 // object. We don't know the size of the value, though; it could be
2566 // one or two words depending on its type. As a result, we must find
2567 // the type to determine where the object is.
2569 Label two_word, valsizeknown;
2570 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2571 __ mov(Lesp, G4_scratch);
2572 __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
2573 // Make sure we don't need to mask Rflags for tosBits after the above shift
2574 ConstantPoolCacheEntry::verify_tosBits();
2575 __ cmp(Rflags, ltos);
2576 __ br(Assembler::equal, false, Assembler::pt, two_word);
2577 __ delayed()->cmp(Rflags, dtos);
2578 __ br(Assembler::equal, false, Assembler::pt, two_word);
2579 __ delayed()->nop();
2580 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1));
2581 __ ba_short(valsizeknown);
2582 __ bind(two_word);
2584 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2));
2586 __ bind(valsizeknown);
2587 // setup object pointer
2588 __ ld_ptr(G4_scratch, 0, G4_scratch);
2589 __ verify_oop(G4_scratch);
2590 }
2591 // setup pointer to jvalue object
2592 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize);
2593 // G4_scratch: object pointer or NULL if static
2594 // G3_scratch: cache entry pointer
2595 // G1_scratch: jvalue object on the stack
2596 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2597 G4_scratch, G3_scratch, G1_scratch);
2598 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2599 __ bind(Label1);
2600 }
2601 }
2603 void TemplateTable::pop_and_check_object(Register r) {
2604 __ pop_ptr(r);
2605 __ null_check(r); // for field access must check obj.
2606 __ verify_oop(r);
2607 }
2609 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2610 transition(vtos, vtos);
2611 Register Rcache = G3_scratch;
2612 Register index = G4_scratch;
2613 Register Rclass = Rcache;
2614 Register Roffset= G4_scratch;
2615 Register Rflags = G1_scratch;
2616 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2618 resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
2619 jvmti_post_field_mod(Rcache, index, is_static);
2620 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2622 Assembler::Membar_mask_bits read_bits =
2623 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2624 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2626 Label notVolatile, checkVolatile, exit;
2627 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2628 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2629 __ and3(Rflags, Lscratch, Lscratch);
2631 if (__ membar_has_effect(read_bits)) {
2632 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2633 volatile_barrier(read_bits);
2634 __ bind(notVolatile);
2635 }
2636 }
2638 __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
2639 // Make sure we don't need to mask Rflags for tosBits after the above shift
2640 ConstantPoolCacheEntry::verify_tosBits();
2642 // compute field type
2643 Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat;
2645 if (is_static) {
2646 // putstatic with object type most likely, check that first
2647 __ cmp(Rflags, atos);
2648 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2649 __ delayed()->cmp(Rflags, itos);
2651 // atos
2652 {
2653 __ pop_ptr();
2654 __ verify_oop(Otos_i);
2655 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2656 __ ba(checkVolatile);
2657 __ delayed()->tst(Lscratch);
2658 }
2660 __ bind(notObj);
2661 // cmp(Rflags, itos);
2662 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2663 __ delayed()->cmp(Rflags, btos);
2665 // itos
2666 {
2667 __ pop_i();
2668 __ st(Otos_i, Rclass, Roffset);
2669 __ ba(checkVolatile);
2670 __ delayed()->tst(Lscratch);
2671 }
2673 __ bind(notInt);
2674 } else {
2675 // putfield with int type most likely, check that first
2676 __ cmp(Rflags, itos);
2677 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2678 __ delayed()->cmp(Rflags, atos);
2680 // itos
2681 {
2682 __ pop_i();
2683 pop_and_check_object(Rclass);
2684 __ st(Otos_i, Rclass, Roffset);
2685 patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no);
2686 __ ba(checkVolatile);
2687 __ delayed()->tst(Lscratch);
2688 }
2690 __ bind(notInt);
2691 // cmp(Rflags, atos);
2692 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2693 __ delayed()->cmp(Rflags, btos);
2695 // atos
2696 {
2697 __ pop_ptr();
2698 pop_and_check_object(Rclass);
2699 __ verify_oop(Otos_i);
2700 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2701 patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no);
2702 __ ba(checkVolatile);
2703 __ delayed()->tst(Lscratch);
2704 }
2706 __ bind(notObj);
2707 }
2709 // cmp(Rflags, btos);
2710 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2711 __ delayed()->cmp(Rflags, ltos);
2713 // btos
2714 {
2715 __ pop_i();
2716 if (!is_static) pop_and_check_object(Rclass);
2717 __ stb(Otos_i, Rclass, Roffset);
2718 if (!is_static) {
2719 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no);
2720 }
2721 __ ba(checkVolatile);
2722 __ delayed()->tst(Lscratch);
2723 }
2725 __ bind(notByte);
2726 // cmp(Rflags, ltos);
2727 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2728 __ delayed()->cmp(Rflags, ctos);
2730 // ltos
2731 {
2732 __ pop_l();
2733 if (!is_static) pop_and_check_object(Rclass);
2734 __ st_long(Otos_l, Rclass, Roffset);
2735 if (!is_static) {
2736 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no);
2737 }
2738 __ ba(checkVolatile);
2739 __ delayed()->tst(Lscratch);
2740 }
2742 __ bind(notLong);
2743 // cmp(Rflags, ctos);
2744 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2745 __ delayed()->cmp(Rflags, stos);
2747 // ctos (char)
2748 {
2749 __ pop_i();
2750 if (!is_static) pop_and_check_object(Rclass);
2751 __ sth(Otos_i, Rclass, Roffset);
2752 if (!is_static) {
2753 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no);
2754 }
2755 __ ba(checkVolatile);
2756 __ delayed()->tst(Lscratch);
2757 }
2759 __ bind(notChar);
2760 // cmp(Rflags, stos);
2761 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2762 __ delayed()->cmp(Rflags, ftos);
2764 // stos (short)
2765 {
2766 __ pop_i();
2767 if (!is_static) pop_and_check_object(Rclass);
2768 __ sth(Otos_i, Rclass, Roffset);
2769 if (!is_static) {
2770 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no);
2771 }
2772 __ ba(checkVolatile);
2773 __ delayed()->tst(Lscratch);
2774 }
2776 __ bind(notShort);
2777 // cmp(Rflags, ftos);
2778 __ br(Assembler::notZero, false, Assembler::pt, notFloat);
2779 __ delayed()->nop();
2781 // ftos
2782 {
2783 __ pop_f();
2784 if (!is_static) pop_and_check_object(Rclass);
2785 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2786 if (!is_static) {
2787 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no);
2788 }
2789 __ ba(checkVolatile);
2790 __ delayed()->tst(Lscratch);
2791 }
2793 __ bind(notFloat);
2795 // dtos
2796 {
2797 __ pop_d();
2798 if (!is_static) pop_and_check_object(Rclass);
2799 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2800 if (!is_static) {
2801 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no);
2802 }
2803 }
2805 __ bind(checkVolatile);
2806 __ tst(Lscratch);
2808 if (__ membar_has_effect(write_bits)) {
2809 // __ tst(Lscratch); in delay slot
2810 __ br(Assembler::zero, false, Assembler::pt, exit);
2811 __ delayed()->nop();
2812 volatile_barrier(Assembler::StoreLoad);
2813 __ bind(exit);
2814 }
2815 }
2817 void TemplateTable::fast_storefield(TosState state) {
2818 transition(state, vtos);
2819 Register Rcache = G3_scratch;
2820 Register Rclass = Rcache;
2821 Register Roffset= G4_scratch;
2822 Register Rflags = G1_scratch;
2823 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2825 jvmti_post_fast_field_mod();
2827 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1);
2829 Assembler::Membar_mask_bits read_bits =
2830 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2831 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2833 Label notVolatile, checkVolatile, exit;
2834 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2835 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2836 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2837 __ and3(Rflags, Lscratch, Lscratch);
2838 if (__ membar_has_effect(read_bits)) {
2839 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2840 volatile_barrier(read_bits);
2841 __ bind(notVolatile);
2842 }
2843 }
2845 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2846 pop_and_check_object(Rclass);
2848 switch (bytecode()) {
2849 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break;
2850 case Bytecodes::_fast_cputfield: /* fall through */
2851 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break;
2852 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break;
2853 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break;
2854 case Bytecodes::_fast_fputfield:
2855 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2856 break;
2857 case Bytecodes::_fast_dputfield:
2858 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2859 break;
2860 case Bytecodes::_fast_aputfield:
2861 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2862 break;
2863 default:
2864 ShouldNotReachHere();
2865 }
2867 if (__ membar_has_effect(write_bits)) {
2868 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit);
2869 volatile_barrier(Assembler::StoreLoad);
2870 __ bind(exit);
2871 }
2872 }
2875 void TemplateTable::putfield(int byte_no) {
2876 putfield_or_static(byte_no, false);
2877 }
2879 void TemplateTable::putstatic(int byte_no) {
2880 putfield_or_static(byte_no, true);
2881 }
2884 void TemplateTable::fast_xaccess(TosState state) {
2885 transition(vtos, state);
2886 Register Rcache = G3_scratch;
2887 Register Roffset = G4_scratch;
2888 Register Rflags = G4_scratch;
2889 Register Rreceiver = Lscratch;
2891 __ ld_ptr(Llocals, 0, Rreceiver);
2893 // access constant pool cache (is resolved)
2894 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
2895 __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset);
2896 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp
2898 __ verify_oop(Rreceiver);
2899 __ null_check(Rreceiver);
2900 if (state == atos) {
2901 __ load_heap_oop(Rreceiver, Roffset, Otos_i);
2902 } else if (state == itos) {
2903 __ ld (Rreceiver, Roffset, Otos_i) ;
2904 } else if (state == ftos) {
2905 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f);
2906 } else {
2907 ShouldNotReachHere();
2908 }
2910 Assembler::Membar_mask_bits membar_bits =
2911 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2912 if (__ membar_has_effect(membar_bits)) {
2914 // Get is_volatile value in Rflags and check if membar is needed
2915 __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags);
2917 // Test volatile
2918 Label notVolatile;
2919 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2920 __ btst(Rflags, Lscratch);
2921 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2922 __ delayed()->nop();
2923 volatile_barrier(membar_bits);
2924 __ bind(notVolatile);
2925 }
2927 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
2928 __ sub(Lbcp, 1, Lbcp);
2929 }
2931 //----------------------------------------------------------------------------------------------------
2932 // Calls
2934 void TemplateTable::count_calls(Register method, Register temp) {
2935 // implemented elsewhere
2936 ShouldNotReachHere();
2937 }
2939 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
2940 Register Rtemp = G4_scratch;
2941 Register Rcall = Rindex;
2942 assert_different_registers(Rcall, G5_method, Gargs, Rret);
2944 // get target methodOop & entry point
2945 const int base = instanceKlass::vtable_start_offset() * wordSize;
2946 if (vtableEntry::size() % 3 == 0) {
2947 // scale the vtable index by 12:
2948 int one_third = vtableEntry::size() / 3;
2949 __ sll(Rindex, exact_log2(one_third * 1 * wordSize), Rtemp);
2950 __ sll(Rindex, exact_log2(one_third * 2 * wordSize), Rindex);
2951 __ add(Rindex, Rtemp, Rindex);
2952 } else {
2953 // scale the vtable index by 8:
2954 __ sll(Rindex, exact_log2(vtableEntry::size() * wordSize), Rindex);
2955 }
2957 __ add(Rrecv, Rindex, Rrecv);
2958 __ ld_ptr(Rrecv, base + vtableEntry::method_offset_in_bytes(), G5_method);
2960 __ call_from_interpreter(Rcall, Gargs, Rret);
2961 }
2963 void TemplateTable::invokevirtual(int byte_no) {
2964 transition(vtos, vtos);
2965 assert(byte_no == f2_byte, "use this argument");
2967 Register Rscratch = G3_scratch;
2968 Register Rtemp = G4_scratch;
2969 Register Rret = Lscratch;
2970 Register Rrecv = G5_method;
2971 Label notFinal;
2973 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
2974 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2976 // Check for vfinal
2977 __ set((1 << ConstantPoolCacheEntry::vfinalMethod), G4_scratch);
2978 __ btst(Rret, G4_scratch);
2979 __ br(Assembler::zero, false, Assembler::pt, notFinal);
2980 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters
2982 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
2984 invokevfinal_helper(Rscratch, Rret);
2986 __ bind(notFinal);
2988 __ mov(G5_method, Rscratch); // better scratch register
2989 __ load_receiver(G4_scratch, O0); // gets receiverOop
2990 // receiver is in O0
2991 __ verify_oop(O0);
2993 // get return address
2994 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
2995 __ set(table, Rtemp);
2996 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
2997 // Make sure we don't need to mask Rret for tosBits after the above shift
2998 ConstantPoolCacheEntry::verify_tosBits();
2999 __ sll(Rret, LogBytesPerWord, Rret);
3000 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3002 // get receiver klass
3003 __ null_check(O0, oopDesc::klass_offset_in_bytes());
3004 __ load_klass(O0, Rrecv);
3005 __ verify_oop(Rrecv);
3007 __ profile_virtual_call(Rrecv, O4);
3009 generate_vtable_call(Rrecv, Rscratch, Rret);
3010 }
3012 void TemplateTable::fast_invokevfinal(int byte_no) {
3013 transition(vtos, vtos);
3014 assert(byte_no == f2_byte, "use this argument");
3016 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
3017 /*is_invokevfinal*/true, false);
3018 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3019 invokevfinal_helper(G3_scratch, Lscratch);
3020 }
3022 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
3023 Register Rtemp = G4_scratch;
3025 __ verify_oop(G5_method);
3027 // Load receiver from stack slot
3028 __ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch);
3029 __ load_receiver(G4_scratch, O0);
3031 // receiver NULL check
3032 __ null_check(O0);
3034 __ profile_final_call(O4);
3036 // get return address
3037 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
3038 __ set(table, Rtemp);
3039 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
3040 // Make sure we don't need to mask Rret for tosBits after the above shift
3041 ConstantPoolCacheEntry::verify_tosBits();
3042 __ sll(Rret, LogBytesPerWord, Rret);
3043 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3046 // do the call
3047 __ call_from_interpreter(Rscratch, Gargs, Rret);
3048 }
3050 void TemplateTable::invokespecial(int byte_no) {
3051 transition(vtos, vtos);
3052 assert(byte_no == f1_byte, "use this argument");
3054 Register Rscratch = G3_scratch;
3055 Register Rtemp = G4_scratch;
3056 Register Rret = Lscratch;
3058 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
3059 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3061 __ verify_oop(G5_method);
3063 __ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch);
3064 __ load_receiver(G4_scratch, O0);
3066 // receiver NULL check
3067 __ null_check(O0);
3069 __ profile_call(O4);
3071 // get return address
3072 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
3073 __ set(table, Rtemp);
3074 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
3075 // Make sure we don't need to mask Rret for tosBits after the above shift
3076 ConstantPoolCacheEntry::verify_tosBits();
3077 __ sll(Rret, LogBytesPerWord, Rret);
3078 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3080 // do the call
3081 __ call_from_interpreter(Rscratch, Gargs, Rret);
3082 }
3084 void TemplateTable::invokestatic(int byte_no) {
3085 transition(vtos, vtos);
3086 assert(byte_no == f1_byte, "use this argument");
3088 Register Rscratch = G3_scratch;
3089 Register Rtemp = G4_scratch;
3090 Register Rret = Lscratch;
3092 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
3093 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3095 __ verify_oop(G5_method);
3097 __ profile_call(O4);
3099 // get return address
3100 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
3101 __ set(table, Rtemp);
3102 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
3103 // Make sure we don't need to mask Rret for tosBits after the above shift
3104 ConstantPoolCacheEntry::verify_tosBits();
3105 __ sll(Rret, LogBytesPerWord, Rret);
3106 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3108 // do the call
3109 __ call_from_interpreter(Rscratch, Gargs, Rret);
3110 }
3113 void TemplateTable::invokeinterface_object_method(Register RklassOop,
3114 Register Rcall,
3115 Register Rret,
3116 Register Rflags) {
3117 Register Rscratch = G4_scratch;
3118 Register Rindex = Lscratch;
3120 assert_different_registers(Rscratch, Rindex, Rret);
3122 Label notFinal;
3124 // Check for vfinal
3125 __ set((1 << ConstantPoolCacheEntry::vfinalMethod), Rscratch);
3126 __ btst(Rflags, Rscratch);
3127 __ br(Assembler::zero, false, Assembler::pt, notFinal);
3128 __ delayed()->nop();
3130 __ profile_final_call(O4);
3132 // do the call - the index (f2) contains the methodOop
3133 assert_different_registers(G5_method, Gargs, Rcall);
3134 __ mov(Rindex, G5_method);
3135 __ call_from_interpreter(Rcall, Gargs, Rret);
3136 __ bind(notFinal);
3138 __ profile_virtual_call(RklassOop, O4);
3139 generate_vtable_call(RklassOop, Rindex, Rret);
3140 }
3143 void TemplateTable::invokeinterface(int byte_no) {
3144 transition(vtos, vtos);
3145 assert(byte_no == f1_byte, "use this argument");
3147 Register Rscratch = G4_scratch;
3148 Register Rret = G3_scratch;
3149 Register Rindex = Lscratch;
3150 Register Rinterface = G1_scratch;
3151 Register RklassOop = G5_method;
3152 Register Rflags = O1;
3153 assert_different_registers(Rscratch, G5_method);
3155 load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, /*virtual*/ false, false, false);
3156 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3158 // get receiver
3159 __ and3(Rflags, 0xFF, Rscratch); // gets number of parameters
3160 __ load_receiver(Rscratch, O0);
3161 __ verify_oop(O0);
3163 __ mov(Rflags, Rret);
3165 // get return address
3166 AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
3167 __ set(table, Rscratch);
3168 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
3169 // Make sure we don't need to mask Rret for tosBits after the above shift
3170 ConstantPoolCacheEntry::verify_tosBits();
3171 __ sll(Rret, LogBytesPerWord, Rret);
3172 __ ld_ptr(Rscratch, Rret, Rret); // get return address
3174 // get receiver klass
3175 __ null_check(O0, oopDesc::klass_offset_in_bytes());
3176 __ load_klass(O0, RklassOop);
3177 __ verify_oop(RklassOop);
3179 // Special case of invokeinterface called for virtual method of
3180 // java.lang.Object. See cpCacheOop.cpp for details.
3181 // This code isn't produced by javac, but could be produced by
3182 // another compliant java compiler.
3183 Label notMethod;
3184 __ set((1 << ConstantPoolCacheEntry::methodInterface), Rscratch);
3185 __ btst(Rflags, Rscratch);
3186 __ br(Assembler::zero, false, Assembler::pt, notMethod);
3187 __ delayed()->nop();
3189 invokeinterface_object_method(RklassOop, Rinterface, Rret, Rflags);
3191 __ bind(notMethod);
3193 __ profile_virtual_call(RklassOop, O4);
3195 //
3196 // find entry point to call
3197 //
3199 // compute start of first itableOffsetEntry (which is at end of vtable)
3200 const int base = instanceKlass::vtable_start_offset() * wordSize;
3201 Label search;
3202 Register Rtemp = Rflags;
3204 __ ld(RklassOop, instanceKlass::vtable_length_offset() * wordSize, Rtemp);
3205 if (align_object_offset(1) > 1) {
3206 __ round_to(Rtemp, align_object_offset(1));
3207 }
3208 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4;
3209 if (Assembler::is_simm13(base)) {
3210 __ add(Rtemp, base, Rtemp);
3211 } else {
3212 __ set(base, Rscratch);
3213 __ add(Rscratch, Rtemp, Rtemp);
3214 }
3215 __ add(RklassOop, Rtemp, Rscratch);
3217 __ bind(search);
3219 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp);
3220 {
3221 Label ok;
3223 // Check that entry is non-null. Null entries are probably a bytecode
3224 // problem. If the interface isn't implemented by the receiver class,
3225 // the VM should throw IncompatibleClassChangeError. linkResolver checks
3226 // this too but that's only if the entry isn't already resolved, so we
3227 // need to check again.
3228 __ br_notnull_short( Rtemp, Assembler::pt, ok);
3229 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3230 __ should_not_reach_here();
3231 __ bind(ok);
3232 __ verify_oop(Rtemp);
3233 }
3235 __ verify_oop(Rinterface);
3237 __ cmp(Rinterface, Rtemp);
3238 __ brx(Assembler::notEqual, true, Assembler::pn, search);
3239 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch);
3241 // entry found and Rscratch points to it
3242 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch);
3244 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below");
3245 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8;
3246 __ add(Rscratch, Rindex, Rscratch);
3247 __ ld_ptr(RklassOop, Rscratch, G5_method);
3249 // Check for abstract method error.
3250 {
3251 Label ok;
3252 __ br_notnull_short(G5_method, Assembler::pt, ok);
3253 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3254 __ should_not_reach_here();
3255 __ bind(ok);
3256 }
3258 Register Rcall = Rinterface;
3259 assert_different_registers(Rcall, G5_method, Gargs, Rret);
3261 __ verify_oop(G5_method);
3262 __ call_from_interpreter(Rcall, Gargs, Rret);
3264 }
3267 void TemplateTable::invokedynamic(int byte_no) {
3268 transition(vtos, vtos);
3269 assert(byte_no == f1_oop, "use this argument");
3271 if (!EnableInvokeDynamic) {
3272 // We should not encounter this bytecode if !EnableInvokeDynamic.
3273 // The verifier will stop it. However, if we get past the verifier,
3274 // this will stop the thread in a reasonable way, without crashing the JVM.
3275 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3276 InterpreterRuntime::throw_IncompatibleClassChangeError));
3277 // the call_VM checks for exception, so we should never return here.
3278 __ should_not_reach_here();
3279 return;
3280 }
3282 // G5: CallSite object (f1)
3283 // XX: unused (f2)
3284 // XX: flags (unused)
3286 Register G5_callsite = G5_method;
3287 Register Rscratch = G3_scratch;
3288 Register Rtemp = G1_scratch;
3289 Register Rret = Lscratch;
3291 load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret,
3292 /*virtual*/ false, /*vfinal*/ false, /*indy*/ true);
3293 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3295 // profile this call
3296 __ profile_call(O4);
3298 // get return address
3299 AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
3300 __ set(table, Rtemp);
3301 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
3302 // Make sure we don't need to mask Rret for tosBits after the above shift
3303 ConstantPoolCacheEntry::verify_tosBits();
3304 __ sll(Rret, LogBytesPerWord, Rret);
3305 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3307 __ verify_oop(G5_callsite);
3308 __ load_heap_oop(G5_callsite, __ delayed_value(java_lang_invoke_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
3309 __ null_check(G3_method_handle);
3310 __ verify_oop(G3_method_handle);
3312 // Adjust Rret first so Llast_SP can be same as Rret
3313 __ add(Rret, -frame::pc_return_offset, O7);
3314 __ add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
3315 __ jump_to_method_handle_entry(G3_method_handle, Rtemp, /* emit_delayed_nop */ false);
3316 // Record SP so we can remove any stack space allocated by adapter transition
3317 __ delayed()->mov(SP, Llast_SP);
3318 }
3321 //----------------------------------------------------------------------------------------------------
3322 // Allocation
3324 void TemplateTable::_new() {
3325 transition(vtos, atos);
3327 Label slow_case;
3328 Label done;
3329 Label initialize_header;
3330 Label initialize_object; // including clearing the fields
3332 Register RallocatedObject = Otos_i;
3333 Register RinstanceKlass = O1;
3334 Register Roffset = O3;
3335 Register Rscratch = O4;
3337 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3338 __ get_cpool_and_tags(Rscratch, G3_scratch);
3339 // make sure the class we're about to instantiate has been resolved
3340 // This is done before loading instanceKlass to be consistent with the order
3341 // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
3342 __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
3343 __ ldub(G3_scratch, Roffset, G3_scratch);
3344 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3345 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3346 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3347 // get instanceKlass
3348 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot
3349 __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
3350 __ ld_ptr(Rscratch, Roffset, RinstanceKlass);
3352 // make sure klass is fully initialized:
3353 __ ld(RinstanceKlass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_scratch);
3354 __ cmp(G3_scratch, instanceKlass::fully_initialized);
3355 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3356 __ delayed()->ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
3358 // get instance_size in instanceKlass (already aligned)
3359 //__ ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
3361 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
3362 __ btst(Klass::_lh_instance_slow_path_bit, Roffset);
3363 __ br(Assembler::notZero, false, Assembler::pn, slow_case);
3364 __ delayed()->nop();
3366 // allocate the instance
3367 // 1) Try to allocate in the TLAB
3368 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
3369 // 3) if the above fails (or is not applicable), go to a slow case
3370 // (creates a new TLAB, etc.)
3372 const bool allow_shared_alloc =
3373 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3375 if(UseTLAB) {
3376 Register RoldTopValue = RallocatedObject;
3377 Register RtlabWasteLimitValue = G3_scratch;
3378 Register RnewTopValue = G1_scratch;
3379 Register RendValue = Rscratch;
3380 Register RfreeValue = RnewTopValue;
3382 // check if we can allocate in the TLAB
3383 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
3384 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue);
3385 __ add(RoldTopValue, Roffset, RnewTopValue);
3387 // if there is enough space, we do not CAS and do not clear
3388 __ cmp(RnewTopValue, RendValue);
3389 if(ZeroTLAB) {
3390 // the fields have already been cleared
3391 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header);
3392 } else {
3393 // initialize both the header and fields
3394 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object);
3395 }
3396 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3398 if (allow_shared_alloc) {
3399 // Check if tlab should be discarded (refill_waste_limit >= free)
3400 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
3401 __ sub(RendValue, RoldTopValue, RfreeValue);
3402 #ifdef _LP64
3403 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
3404 #else
3405 __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
3406 #endif
3407 __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
3409 // increment waste limit to prevent getting stuck on this slow path
3410 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
3411 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
3412 } else {
3413 // No allocation in the shared eden.
3414 __ ba_short(slow_case);
3415 }
3416 }
3418 // Allocation in the shared Eden
3419 if (allow_shared_alloc) {
3420 Register RoldTopValue = G1_scratch;
3421 Register RtopAddr = G3_scratch;
3422 Register RnewTopValue = RallocatedObject;
3423 Register RendValue = Rscratch;
3425 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
3427 Label retry;
3428 __ bind(retry);
3429 __ set((intptr_t)Universe::heap()->end_addr(), RendValue);
3430 __ ld_ptr(RendValue, 0, RendValue);
3431 __ ld_ptr(RtopAddr, 0, RoldTopValue);
3432 __ add(RoldTopValue, Roffset, RnewTopValue);
3434 // RnewTopValue contains the top address after the new object
3435 // has been allocated.
3436 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
3438 __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
3439 VM_Version::v9_instructions_work() ? NULL :
3440 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3442 // if someone beat us on the allocation, try again, otherwise continue
3443 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
3445 // bump total bytes allocated by this thread
3446 // RoldTopValue and RtopAddr are dead, so can use G1 and G3
3447 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
3448 }
3450 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3451 // clear object fields
3452 __ bind(initialize_object);
3453 __ deccc(Roffset, sizeof(oopDesc));
3454 __ br(Assembler::zero, false, Assembler::pt, initialize_header);
3455 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
3457 // initialize remaining object fields
3458 if (UseBlockZeroing) {
3459 // Use BIS for zeroing
3460 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header);
3461 } else {
3462 Label loop;
3463 __ subcc(Roffset, wordSize, Roffset);
3464 __ bind(loop);
3465 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot
3466 __ st_ptr(G0, G3_scratch, Roffset);
3467 __ br(Assembler::notEqual, false, Assembler::pt, loop);
3468 __ delayed()->subcc(Roffset, wordSize, Roffset);
3469 }
3470 __ ba_short(initialize_header);
3471 }
3473 // slow case
3474 __ bind(slow_case);
3475 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned);
3476 __ get_constant_pool(O1);
3478 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2);
3480 __ ba_short(done);
3482 // Initialize the header: mark, klass
3483 __ bind(initialize_header);
3485 if (UseBiasedLocking) {
3486 __ ld_ptr(RinstanceKlass, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), G4_scratch);
3487 } else {
3488 __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
3489 }
3490 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
3491 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed
3492 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms)
3494 {
3495 SkipIfEqual skip_if(
3496 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero);
3497 // Trigger dtrace event
3498 __ push(atos);
3499 __ call_VM_leaf(noreg,
3500 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0);
3501 __ pop(atos);
3502 }
3504 // continue
3505 __ bind(done);
3506 }
3510 void TemplateTable::newarray() {
3511 transition(itos, atos);
3512 __ ldub(Lbcp, 1, O1);
3513 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i);
3514 }
3517 void TemplateTable::anewarray() {
3518 transition(itos, atos);
3519 __ get_constant_pool(O1);
3520 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned);
3521 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i);
3522 }
3525 void TemplateTable::arraylength() {
3526 transition(atos, itos);
3527 Label ok;
3528 __ verify_oop(Otos_i);
3529 __ tst(Otos_i);
3530 __ throw_if_not_1_x( Assembler::notZero, ok );
3531 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i);
3532 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3533 }
3536 void TemplateTable::checkcast() {
3537 transition(atos, atos);
3538 Label done, is_null, quicked, cast_ok, resolved;
3539 Register Roffset = G1_scratch;
3540 Register RobjKlass = O5;
3541 Register RspecifiedKlass = O4;
3543 // Check for casting a NULL
3544 __ br_null_short(Otos_i, Assembler::pn, is_null);
3546 // Get value klass in RobjKlass
3547 __ load_klass(Otos_i, RobjKlass); // get value klass
3549 // Get constant pool tag
3550 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3552 // See if the checkcast has been quickened
3553 __ get_cpool_and_tags(Lscratch, G3_scratch);
3554 __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
3555 __ ldub(G3_scratch, Roffset, G3_scratch);
3556 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3557 __ br(Assembler::equal, true, Assembler::pt, quicked);
3558 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3560 __ push_ptr(); // save receiver for result, and for GC
3561 call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3562 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3564 __ ba_short(resolved);
3566 // Extract target class from constant pool
3567 __ bind(quicked);
3568 __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
3569 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3570 __ bind(resolved);
3571 __ load_klass(Otos_i, RobjKlass); // get value klass
3573 // Generate a fast subtype check. Branch to cast_ok if no
3574 // failure. Throw exception if failure.
3575 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok );
3577 // Not a subtype; so must throw exception
3578 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch );
3580 __ bind(cast_ok);
3582 if (ProfileInterpreter) {
3583 __ ba_short(done);
3584 }
3585 __ bind(is_null);
3586 __ profile_null_seen(G3_scratch);
3587 __ bind(done);
3588 }
3591 void TemplateTable::instanceof() {
3592 Label done, is_null, quicked, resolved;
3593 transition(atos, itos);
3594 Register Roffset = G1_scratch;
3595 Register RobjKlass = O5;
3596 Register RspecifiedKlass = O4;
3598 // Check for casting a NULL
3599 __ br_null_short(Otos_i, Assembler::pt, is_null);
3601 // Get value klass in RobjKlass
3602 __ load_klass(Otos_i, RobjKlass); // get value klass
3604 // Get constant pool tag
3605 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3607 // See if the checkcast has been quickened
3608 __ get_cpool_and_tags(Lscratch, G3_scratch);
3609 __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
3610 __ ldub(G3_scratch, Roffset, G3_scratch);
3611 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3612 __ br(Assembler::equal, true, Assembler::pt, quicked);
3613 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3615 __ push_ptr(); // save receiver for result, and for GC
3616 call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3617 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3619 __ ba_short(resolved);
3621 // Extract target class from constant pool
3622 __ bind(quicked);
3623 __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
3624 __ get_constant_pool(Lscratch);
3625 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3626 __ bind(resolved);
3627 __ load_klass(Otos_i, RobjKlass); // get value klass
3629 // Generate a fast subtype check. Branch to cast_ok if no
3630 // failure. Return 0 if failure.
3631 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed
3632 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done );
3633 // Not a subtype; return 0;
3634 __ clr( Otos_i );
3636 if (ProfileInterpreter) {
3637 __ ba_short(done);
3638 }
3639 __ bind(is_null);
3640 __ profile_null_seen(G3_scratch);
3641 __ bind(done);
3642 }
3644 void TemplateTable::_breakpoint() {
3646 // Note: We get here even if we are single stepping..
3647 // jbug inists on setting breakpoints at every bytecode
3648 // even if we are in single step mode.
3650 transition(vtos, vtos);
3651 // get the unpatched byte code
3652 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp);
3653 __ mov(O0, Lbyte_code);
3655 // post the breakpoint event
3656 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp);
3658 // complete the execution of original bytecode
3659 __ dispatch_normal(vtos);
3660 }
3663 //----------------------------------------------------------------------------------------------------
3664 // Exceptions
3666 void TemplateTable::athrow() {
3667 transition(atos, vtos);
3669 // This works because exception is cached in Otos_i which is same as O0,
3670 // which is same as what throw_exception_entry_expects
3671 assert(Otos_i == Oexception, "see explanation above");
3673 __ verify_oop(Otos_i);
3674 __ null_check(Otos_i);
3675 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch);
3676 }
3679 //----------------------------------------------------------------------------------------------------
3680 // Synchronization
3683 // See frame_sparc.hpp for monitor block layout.
3684 // Monitor elements are dynamically allocated by growing stack as needed.
3686 void TemplateTable::monitorenter() {
3687 transition(atos, vtos);
3688 __ verify_oop(Otos_i);
3689 // Try to acquire a lock on the object
3690 // Repeat until succeeded (i.e., until
3691 // monitorenter returns true).
3693 { Label ok;
3694 __ tst(Otos_i);
3695 __ throw_if_not_1_x( Assembler::notZero, ok);
3696 __ delayed()->mov(Otos_i, Lscratch); // save obj
3697 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3698 }
3700 assert(O0 == Otos_i, "Be sure where the object to lock is");
3702 // find a free slot in the monitor block
3705 // initialize entry pointer
3706 __ clr(O1); // points to free slot or NULL
3708 {
3709 Label entry, loop, exit;
3710 __ add( __ top_most_monitor(), O2 ); // last one to check
3711 __ ba( entry );
3712 __ delayed()->mov( Lmonitors, O3 ); // first one to check
3715 __ bind( loop );
3717 __ verify_oop(O4); // verify each monitor's oop
3718 __ tst(O4); // is this entry unused?
3719 if (VM_Version::v9_instructions_work())
3720 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
3721 else {
3722 Label L;
3723 __ br( Assembler::zero, true, Assembler::pn, L );
3724 __ delayed()->mov(O3, O1); // rememeber this one if match
3725 __ bind(L);
3726 }
3728 __ cmp(O4, O0); // check if current entry is for same object
3729 __ brx( Assembler::equal, false, Assembler::pn, exit );
3730 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one
3732 __ bind( entry );
3734 __ cmp( O3, O2 );
3735 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3736 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4);
3738 __ bind( exit );
3739 }
3741 { Label allocated;
3743 // found free slot?
3744 __ br_notnull_short(O1, Assembler::pn, allocated);
3746 __ add_monitor_to_stack( false, O2, O3 );
3747 __ mov(Lmonitors, O1);
3749 __ bind(allocated);
3750 }
3752 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3753 // The object has already been poped from the stack, so the expression stack looks correct.
3754 __ inc(Lbcp);
3756 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object
3757 __ lock_object(O1, O0);
3759 // check if there's enough space on the stack for the monitors after locking
3760 __ generate_stack_overflow_check(0);
3762 // The bcp has already been incremented. Just need to dispatch to next instruction.
3763 __ dispatch_next(vtos);
3764 }
3767 void TemplateTable::monitorexit() {
3768 transition(atos, vtos);
3769 __ verify_oop(Otos_i);
3770 __ tst(Otos_i);
3771 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch );
3773 assert(O0 == Otos_i, "just checking");
3775 { Label entry, loop, found;
3776 __ add( __ top_most_monitor(), O2 ); // last one to check
3777 __ ba(entry);
3778 // use Lscratch to hold monitor elem to check, start with most recent monitor,
3779 // By using a local it survives the call to the C routine.
3780 __ delayed()->mov( Lmonitors, Lscratch );
3782 __ bind( loop );
3784 __ verify_oop(O4); // verify each monitor's oop
3785 __ cmp(O4, O0); // check if current entry is for desired object
3786 __ brx( Assembler::equal, true, Assembler::pt, found );
3787 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit
3789 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next
3791 __ bind( entry );
3793 __ cmp( Lscratch, O2 );
3794 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3795 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4);
3797 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3798 __ should_not_reach_here();
3800 __ bind(found);
3801 }
3802 __ unlock_object(O1);
3803 }
3806 //----------------------------------------------------------------------------------------------------
3807 // Wide instructions
3809 void TemplateTable::wide() {
3810 transition(vtos, vtos);
3811 __ ldub(Lbcp, 1, G3_scratch);// get next bc
3812 __ sll(G3_scratch, LogBytesPerWord, G3_scratch);
3813 AddressLiteral ep(Interpreter::_wentry_point);
3814 __ set(ep, G4_scratch);
3815 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch);
3816 __ jmp(G3_scratch, G0);
3817 __ delayed()->nop();
3818 // Note: the Lbcp increment step is part of the individual wide bytecode implementations
3819 }
3822 //----------------------------------------------------------------------------------------------------
3823 // Multi arrays
3825 void TemplateTable::multianewarray() {
3826 transition(vtos, atos);
3827 // put ndims * wordSize into Lscratch
3828 __ ldub( Lbcp, 3, Lscratch);
3829 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch);
3830 // Lesp points past last_dim, so set to O1 to first_dim address
3831 __ add( Lesp, Lscratch, O1);
3832 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
3833 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack
3834 }
3835 #endif /* !CC_INTERP */