Wed, 23 Jan 2013 13:02:39 -0500
8005915: Unify SERIALGC and INCLUDE_ALTERNATE_GCS
Summary: Rename INCLUDE_ALTERNATE_GCS to INCLUDE_ALL_GCS and replace SERIALGC with INCLUDE_ALL_GCS.
Reviewed-by: coleenp, stefank
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "interpreter/interpreter.hpp"
27 #include "interpreter/interpreterRuntime.hpp"
28 #include "interpreter/templateTable.hpp"
29 #include "memory/universe.inline.hpp"
30 #include "oops/methodData.hpp"
31 #include "oops/objArrayKlass.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/stubRoutines.hpp"
36 #include "runtime/synchronizer.hpp"
37 #include "utilities/macros.hpp"
39 #ifndef CC_INTERP
40 #define __ _masm->
42 // Misc helpers
44 // Do an oop store like *(base + index + offset) = val
45 // index can be noreg,
46 static void do_oop_store(InterpreterMacroAssembler* _masm,
47 Register base,
48 Register index,
49 int offset,
50 Register val,
51 Register tmp,
52 BarrierSet::Name barrier,
53 bool precise) {
54 assert(tmp != val && tmp != base && tmp != index, "register collision");
55 assert(index == noreg || offset == 0, "only one offset");
56 switch (barrier) {
57 #if INCLUDE_ALL_GCS
58 case BarrierSet::G1SATBCT:
59 case BarrierSet::G1SATBCTLogging:
60 {
61 // Load and record the previous value.
62 __ g1_write_barrier_pre(base, index, offset,
63 noreg /* pre_val */,
64 tmp, true /*preserve_o_regs*/);
66 if (index == noreg ) {
67 assert(Assembler::is_simm13(offset), "fix this code");
68 __ store_heap_oop(val, base, offset);
69 } else {
70 __ store_heap_oop(val, base, index);
71 }
73 // No need for post barrier if storing NULL
74 if (val != G0) {
75 if (precise) {
76 if (index == noreg) {
77 __ add(base, offset, base);
78 } else {
79 __ add(base, index, base);
80 }
81 }
82 __ g1_write_barrier_post(base, val, tmp);
83 }
84 }
85 break;
86 #endif // INCLUDE_ALL_GCS
87 case BarrierSet::CardTableModRef:
88 case BarrierSet::CardTableExtension:
89 {
90 if (index == noreg ) {
91 assert(Assembler::is_simm13(offset), "fix this code");
92 __ store_heap_oop(val, base, offset);
93 } else {
94 __ store_heap_oop(val, base, index);
95 }
96 // No need for post barrier if storing NULL
97 if (val != G0) {
98 if (precise) {
99 if (index == noreg) {
100 __ add(base, offset, base);
101 } else {
102 __ add(base, index, base);
103 }
104 }
105 __ card_write_barrier_post(base, val, tmp);
106 }
107 }
108 break;
109 case BarrierSet::ModRef:
110 case BarrierSet::Other:
111 ShouldNotReachHere();
112 break;
113 default :
114 ShouldNotReachHere();
116 }
117 }
120 //----------------------------------------------------------------------------------------------------
121 // Platform-dependent initialization
123 void TemplateTable::pd_initialize() {
124 // (none)
125 }
128 //----------------------------------------------------------------------------------------------------
129 // Condition conversion
130 Assembler::Condition ccNot(TemplateTable::Condition cc) {
131 switch (cc) {
132 case TemplateTable::equal : return Assembler::notEqual;
133 case TemplateTable::not_equal : return Assembler::equal;
134 case TemplateTable::less : return Assembler::greaterEqual;
135 case TemplateTable::less_equal : return Assembler::greater;
136 case TemplateTable::greater : return Assembler::lessEqual;
137 case TemplateTable::greater_equal: return Assembler::less;
138 }
139 ShouldNotReachHere();
140 return Assembler::zero;
141 }
143 //----------------------------------------------------------------------------------------------------
144 // Miscelaneous helper routines
147 Address TemplateTable::at_bcp(int offset) {
148 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
149 return Address(Lbcp, offset);
150 }
153 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
154 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
155 int byte_no) {
156 // With sharing on, may need to test Method* flag.
157 if (!RewriteBytecodes) return;
158 Label L_patch_done;
160 switch (bc) {
161 case Bytecodes::_fast_aputfield:
162 case Bytecodes::_fast_bputfield:
163 case Bytecodes::_fast_cputfield:
164 case Bytecodes::_fast_dputfield:
165 case Bytecodes::_fast_fputfield:
166 case Bytecodes::_fast_iputfield:
167 case Bytecodes::_fast_lputfield:
168 case Bytecodes::_fast_sputfield:
169 {
170 // We skip bytecode quickening for putfield instructions when
171 // the put_code written to the constant pool cache is zero.
172 // This is required so that every execution of this instruction
173 // calls out to InterpreterRuntime::resolve_get_put to do
174 // additional, required work.
175 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
176 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
177 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
178 __ set(bc, bc_reg);
179 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch
180 }
181 break;
182 default:
183 assert(byte_no == -1, "sanity");
184 if (load_bc_into_bc_reg) {
185 __ set(bc, bc_reg);
186 }
187 }
189 if (JvmtiExport::can_post_breakpoint()) {
190 Label L_fast_patch;
191 __ ldub(at_bcp(0), temp_reg);
192 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch);
193 // perform the quickening, slowly, in the bowels of the breakpoint table
194 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg);
195 __ ba_short(L_patch_done);
196 __ bind(L_fast_patch);
197 }
199 #ifdef ASSERT
200 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc);
201 Label L_okay;
202 __ ldub(at_bcp(0), temp_reg);
203 __ cmp(temp_reg, orig_bytecode);
204 __ br(Assembler::equal, false, Assembler::pt, L_okay);
205 __ delayed()->cmp(temp_reg, bc_reg);
206 __ br(Assembler::equal, false, Assembler::pt, L_okay);
207 __ delayed()->nop();
208 __ stop("patching the wrong bytecode");
209 __ bind(L_okay);
210 #endif
212 // patch bytecode
213 __ stb(bc_reg, at_bcp(0));
214 __ bind(L_patch_done);
215 }
217 //----------------------------------------------------------------------------------------------------
218 // Individual instructions
220 void TemplateTable::nop() {
221 transition(vtos, vtos);
222 // nothing to do
223 }
225 void TemplateTable::shouldnotreachhere() {
226 transition(vtos, vtos);
227 __ stop("shouldnotreachhere bytecode");
228 }
230 void TemplateTable::aconst_null() {
231 transition(vtos, atos);
232 __ clr(Otos_i);
233 }
236 void TemplateTable::iconst(int value) {
237 transition(vtos, itos);
238 __ set(value, Otos_i);
239 }
242 void TemplateTable::lconst(int value) {
243 transition(vtos, ltos);
244 assert(value >= 0, "check this code");
245 #ifdef _LP64
246 __ set(value, Otos_l);
247 #else
248 __ set(value, Otos_l2);
249 __ clr( Otos_l1);
250 #endif
251 }
254 void TemplateTable::fconst(int value) {
255 transition(vtos, ftos);
256 static float zero = 0.0, one = 1.0, two = 2.0;
257 float* p;
258 switch( value ) {
259 default: ShouldNotReachHere();
260 case 0: p = &zero; break;
261 case 1: p = &one; break;
262 case 2: p = &two; break;
263 }
264 AddressLiteral a(p);
265 __ sethi(a, G3_scratch);
266 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f);
267 }
270 void TemplateTable::dconst(int value) {
271 transition(vtos, dtos);
272 static double zero = 0.0, one = 1.0;
273 double* p;
274 switch( value ) {
275 default: ShouldNotReachHere();
276 case 0: p = &zero; break;
277 case 1: p = &one; break;
278 }
279 AddressLiteral a(p);
280 __ sethi(a, G3_scratch);
281 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d);
282 }
285 // %%%%% Should factore most snippet templates across platforms
287 void TemplateTable::bipush() {
288 transition(vtos, itos);
289 __ ldsb( at_bcp(1), Otos_i );
290 }
292 void TemplateTable::sipush() {
293 transition(vtos, itos);
294 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed);
295 }
297 void TemplateTable::ldc(bool wide) {
298 transition(vtos, vtos);
299 Label call_ldc, notInt, isString, notString, notClass, exit;
301 if (wide) {
302 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
303 } else {
304 __ ldub(Lbcp, 1, O1);
305 }
306 __ get_cpool_and_tags(O0, O2);
308 const int base_offset = ConstantPool::header_size() * wordSize;
309 const int tags_offset = Array<u1>::base_offset_in_bytes();
311 // get type from tags
312 __ add(O2, tags_offset, O2);
313 __ ldub(O2, O1, O2);
315 // unresolved class? If so, must resolve
316 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc);
318 // unresolved class in error state
319 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc);
321 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class
322 __ brx(Assembler::notEqual, true, Assembler::pt, notClass);
323 __ delayed()->add(O0, base_offset, O0);
325 __ bind(call_ldc);
326 __ set(wide, O1);
327 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1);
328 __ push(atos);
329 __ ba_short(exit);
331 __ bind(notClass);
332 // __ add(O0, base_offset, O0);
333 __ sll(O1, LogBytesPerWord, O1);
334 __ cmp(O2, JVM_CONSTANT_Integer);
335 __ brx(Assembler::notEqual, true, Assembler::pt, notInt);
336 __ delayed()->cmp(O2, JVM_CONSTANT_String);
337 __ ld(O0, O1, Otos_i);
338 __ push(itos);
339 __ ba_short(exit);
341 __ bind(notInt);
342 // __ cmp(O2, JVM_CONSTANT_String);
343 __ brx(Assembler::equal, true, Assembler::pt, isString);
344 __ delayed()->cmp(O2, JVM_CONSTANT_Object);
345 __ brx(Assembler::notEqual, true, Assembler::pt, notString);
346 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
347 __ bind(isString);
348 __ stop("string should be rewritten to fast_aldc");
349 __ ba_short(exit);
351 __ bind(notString);
352 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
353 __ push(ftos);
355 __ bind(exit);
356 }
358 // Fast path for caching oop constants.
359 // %%% We should use this to handle Class and String constants also.
360 // %%% It will simplify the ldc/primitive path considerably.
361 void TemplateTable::fast_aldc(bool wide) {
362 transition(vtos, atos);
364 int index_size = wide ? sizeof(u2) : sizeof(u1);
365 Label resolved;
367 // We are resolved if the resolved reference cache entry contains a
368 // non-null object (CallSite, etc.)
369 assert_different_registers(Otos_i, G3_scratch);
370 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch
371 __ load_resolved_reference_at_index(Otos_i, G3_scratch);
372 __ tst(Otos_i);
373 __ br(Assembler::notEqual, false, Assembler::pt, resolved);
374 __ delayed()->set((int)bytecode(), O1);
376 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
378 // first time invocation - must resolve first
379 __ call_VM(Otos_i, entry, O1);
380 __ bind(resolved);
381 __ verify_oop(Otos_i);
382 }
385 void TemplateTable::ldc2_w() {
386 transition(vtos, vtos);
387 Label Long, exit;
389 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
390 __ get_cpool_and_tags(O0, O2);
392 const int base_offset = ConstantPool::header_size() * wordSize;
393 const int tags_offset = Array<u1>::base_offset_in_bytes();
394 // get type from tags
395 __ add(O2, tags_offset, O2);
396 __ ldub(O2, O1, O2);
398 __ sll(O1, LogBytesPerWord, O1);
399 __ add(O0, O1, G3_scratch);
401 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long);
402 // A double can be placed at word-aligned locations in the constant pool.
403 // Check out Conversions.java for an example.
404 // Also ConstantPool::header_size() is 20, which makes it very difficult
405 // to double-align double on the constant pool. SG, 11/7/97
406 #ifdef _LP64
407 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
408 #else
409 FloatRegister f = Ftos_d;
410 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
411 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
412 f->successor());
413 #endif
414 __ push(dtos);
415 __ ba_short(exit);
417 __ bind(Long);
418 #ifdef _LP64
419 __ ldx(G3_scratch, base_offset, Otos_l);
420 #else
421 __ ld(G3_scratch, base_offset, Otos_l);
422 __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
423 #endif
424 __ push(ltos);
426 __ bind(exit);
427 }
430 void TemplateTable::locals_index(Register reg, int offset) {
431 __ ldub( at_bcp(offset), reg );
432 }
435 void TemplateTable::locals_index_wide(Register reg) {
436 // offset is 2, not 1, because Lbcp points to wide prefix code
437 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
438 }
440 void TemplateTable::iload() {
441 transition(vtos, itos);
442 // Rewrite iload,iload pair into fast_iload2
443 // iload,caload pair into fast_icaload
444 if (RewriteFrequentPairs) {
445 Label rewrite, done;
447 // get next byte
448 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch);
450 // if _iload, wait to rewrite to iload2. We only want to rewrite the
451 // last two iloads in a pair. Comparing against fast_iload means that
452 // the next bytecode is neither an iload or a caload, and therefore
453 // an iload pair.
454 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done);
456 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload);
457 __ br(Assembler::equal, false, Assembler::pn, rewrite);
458 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch);
460 __ cmp(G3_scratch, (int)Bytecodes::_caload);
461 __ br(Assembler::equal, false, Assembler::pn, rewrite);
462 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch);
464 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again
465 // rewrite
466 // G4_scratch: fast bytecode
467 __ bind(rewrite);
468 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false);
469 __ bind(done);
470 }
472 // Get the local value into tos
473 locals_index(G3_scratch);
474 __ access_local_int( G3_scratch, Otos_i );
475 }
477 void TemplateTable::fast_iload2() {
478 transition(vtos, itos);
479 locals_index(G3_scratch);
480 __ access_local_int( G3_scratch, Otos_i );
481 __ push_i();
482 locals_index(G3_scratch, 3); // get next bytecode's local index.
483 __ access_local_int( G3_scratch, Otos_i );
484 }
486 void TemplateTable::fast_iload() {
487 transition(vtos, itos);
488 locals_index(G3_scratch);
489 __ access_local_int( G3_scratch, Otos_i );
490 }
492 void TemplateTable::lload() {
493 transition(vtos, ltos);
494 locals_index(G3_scratch);
495 __ access_local_long( G3_scratch, Otos_l );
496 }
499 void TemplateTable::fload() {
500 transition(vtos, ftos);
501 locals_index(G3_scratch);
502 __ access_local_float( G3_scratch, Ftos_f );
503 }
506 void TemplateTable::dload() {
507 transition(vtos, dtos);
508 locals_index(G3_scratch);
509 __ access_local_double( G3_scratch, Ftos_d );
510 }
513 void TemplateTable::aload() {
514 transition(vtos, atos);
515 locals_index(G3_scratch);
516 __ access_local_ptr( G3_scratch, Otos_i);
517 }
520 void TemplateTable::wide_iload() {
521 transition(vtos, itos);
522 locals_index_wide(G3_scratch);
523 __ access_local_int( G3_scratch, Otos_i );
524 }
527 void TemplateTable::wide_lload() {
528 transition(vtos, ltos);
529 locals_index_wide(G3_scratch);
530 __ access_local_long( G3_scratch, Otos_l );
531 }
534 void TemplateTable::wide_fload() {
535 transition(vtos, ftos);
536 locals_index_wide(G3_scratch);
537 __ access_local_float( G3_scratch, Ftos_f );
538 }
541 void TemplateTable::wide_dload() {
542 transition(vtos, dtos);
543 locals_index_wide(G3_scratch);
544 __ access_local_double( G3_scratch, Ftos_d );
545 }
548 void TemplateTable::wide_aload() {
549 transition(vtos, atos);
550 locals_index_wide(G3_scratch);
551 __ access_local_ptr( G3_scratch, Otos_i );
552 __ verify_oop(Otos_i);
553 }
556 void TemplateTable::iaload() {
557 transition(itos, itos);
558 // Otos_i: index
559 // tos: array
560 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
561 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i);
562 }
565 void TemplateTable::laload() {
566 transition(itos, ltos);
567 // Otos_i: index
568 // O2: array
569 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
570 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l);
571 }
574 void TemplateTable::faload() {
575 transition(itos, ftos);
576 // Otos_i: index
577 // O2: array
578 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
579 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f);
580 }
583 void TemplateTable::daload() {
584 transition(itos, dtos);
585 // Otos_i: index
586 // O2: array
587 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
588 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d);
589 }
592 void TemplateTable::aaload() {
593 transition(itos, atos);
594 // Otos_i: index
595 // tos: array
596 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3);
597 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
598 __ verify_oop(Otos_i);
599 }
602 void TemplateTable::baload() {
603 transition(itos, itos);
604 // Otos_i: index
605 // tos: array
606 __ index_check(O2, Otos_i, 0, G3_scratch, O3);
607 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i);
608 }
611 void TemplateTable::caload() {
612 transition(itos, itos);
613 // Otos_i: index
614 // tos: array
615 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
616 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
617 }
619 void TemplateTable::fast_icaload() {
620 transition(vtos, itos);
621 // Otos_i: index
622 // tos: array
623 locals_index(G3_scratch);
624 __ access_local_int( G3_scratch, Otos_i );
625 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
626 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
627 }
630 void TemplateTable::saload() {
631 transition(itos, itos);
632 // Otos_i: index
633 // tos: array
634 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
635 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i);
636 }
639 void TemplateTable::iload(int n) {
640 transition(vtos, itos);
641 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
642 }
645 void TemplateTable::lload(int n) {
646 transition(vtos, ltos);
647 assert(n+1 < Argument::n_register_parameters, "would need more code");
648 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
649 }
652 void TemplateTable::fload(int n) {
653 transition(vtos, ftos);
654 assert(n < Argument::n_register_parameters, "would need more code");
655 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f );
656 }
659 void TemplateTable::dload(int n) {
660 transition(vtos, dtos);
661 FloatRegister dst = Ftos_d;
662 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
663 }
666 void TemplateTable::aload(int n) {
667 transition(vtos, atos);
668 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
669 }
672 void TemplateTable::aload_0() {
673 transition(vtos, atos);
675 // According to bytecode histograms, the pairs:
676 //
677 // _aload_0, _fast_igetfield (itos)
678 // _aload_0, _fast_agetfield (atos)
679 // _aload_0, _fast_fgetfield (ftos)
680 //
681 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
682 // bytecode checks the next bytecode and then rewrites the current
683 // bytecode into a pair bytecode; otherwise it rewrites the current
684 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
685 //
686 if (RewriteFrequentPairs) {
687 Label rewrite, done;
689 // get next byte
690 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch);
692 // do actual aload_0
693 aload(0);
695 // if _getfield then wait with rewrite
696 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done);
698 // if _igetfield then rewrite to _fast_iaccess_0
699 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
700 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield);
701 __ br(Assembler::equal, false, Assembler::pn, rewrite);
702 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch);
704 // if _agetfield then rewrite to _fast_aaccess_0
705 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
706 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield);
707 __ br(Assembler::equal, false, Assembler::pn, rewrite);
708 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch);
710 // if _fgetfield then rewrite to _fast_faccess_0
711 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
712 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield);
713 __ br(Assembler::equal, false, Assembler::pn, rewrite);
714 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch);
716 // else rewrite to _fast_aload0
717 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
718 __ set(Bytecodes::_fast_aload_0, G4_scratch);
720 // rewrite
721 // G4_scratch: fast bytecode
722 __ bind(rewrite);
723 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false);
724 __ bind(done);
725 } else {
726 aload(0);
727 }
728 }
731 void TemplateTable::istore() {
732 transition(itos, vtos);
733 locals_index(G3_scratch);
734 __ store_local_int( G3_scratch, Otos_i );
735 }
738 void TemplateTable::lstore() {
739 transition(ltos, vtos);
740 locals_index(G3_scratch);
741 __ store_local_long( G3_scratch, Otos_l );
742 }
745 void TemplateTable::fstore() {
746 transition(ftos, vtos);
747 locals_index(G3_scratch);
748 __ store_local_float( G3_scratch, Ftos_f );
749 }
752 void TemplateTable::dstore() {
753 transition(dtos, vtos);
754 locals_index(G3_scratch);
755 __ store_local_double( G3_scratch, Ftos_d );
756 }
759 void TemplateTable::astore() {
760 transition(vtos, vtos);
761 __ load_ptr(0, Otos_i);
762 __ inc(Lesp, Interpreter::stackElementSize);
763 __ verify_oop_or_return_address(Otos_i, G3_scratch);
764 locals_index(G3_scratch);
765 __ store_local_ptr(G3_scratch, Otos_i);
766 }
769 void TemplateTable::wide_istore() {
770 transition(vtos, vtos);
771 __ pop_i();
772 locals_index_wide(G3_scratch);
773 __ store_local_int( G3_scratch, Otos_i );
774 }
777 void TemplateTable::wide_lstore() {
778 transition(vtos, vtos);
779 __ pop_l();
780 locals_index_wide(G3_scratch);
781 __ store_local_long( G3_scratch, Otos_l );
782 }
785 void TemplateTable::wide_fstore() {
786 transition(vtos, vtos);
787 __ pop_f();
788 locals_index_wide(G3_scratch);
789 __ store_local_float( G3_scratch, Ftos_f );
790 }
793 void TemplateTable::wide_dstore() {
794 transition(vtos, vtos);
795 __ pop_d();
796 locals_index_wide(G3_scratch);
797 __ store_local_double( G3_scratch, Ftos_d );
798 }
801 void TemplateTable::wide_astore() {
802 transition(vtos, vtos);
803 __ load_ptr(0, Otos_i);
804 __ inc(Lesp, Interpreter::stackElementSize);
805 __ verify_oop_or_return_address(Otos_i, G3_scratch);
806 locals_index_wide(G3_scratch);
807 __ store_local_ptr(G3_scratch, Otos_i);
808 }
811 void TemplateTable::iastore() {
812 transition(itos, vtos);
813 __ pop_i(O2); // index
814 // Otos_i: val
815 // O3: array
816 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
817 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT));
818 }
821 void TemplateTable::lastore() {
822 transition(ltos, vtos);
823 __ pop_i(O2); // index
824 // Otos_l: val
825 // O3: array
826 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
827 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG));
828 }
831 void TemplateTable::fastore() {
832 transition(ftos, vtos);
833 __ pop_i(O2); // index
834 // Ftos_f: val
835 // O3: array
836 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
837 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
838 }
841 void TemplateTable::dastore() {
842 transition(dtos, vtos);
843 __ pop_i(O2); // index
844 // Fos_d: val
845 // O3: array
846 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
847 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
848 }
851 void TemplateTable::aastore() {
852 Label store_ok, is_null, done;
853 transition(vtos, vtos);
854 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
855 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index
856 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array
857 // Otos_i: val
858 // O2: index
859 // O3: array
860 __ verify_oop(Otos_i);
861 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
863 // do array store check - check for NULL value first
864 __ br_null_short( Otos_i, Assembler::pn, is_null );
866 __ load_klass(O3, O4); // get array klass
867 __ load_klass(Otos_i, O5); // get value klass
869 // do fast instanceof cache test
871 __ ld_ptr(O4, in_bytes(ObjArrayKlass::element_klass_offset()), O4);
873 assert(Otos_i == O0, "just checking");
875 // Otos_i: value
876 // O1: addr - offset
877 // O2: index
878 // O3: array
879 // O4: array element klass
880 // O5: value klass
882 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
884 // Generate a fast subtype check. Branch to store_ok if no
885 // failure. Throw if failure.
886 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok );
888 // Not a subtype; so must throw exception
889 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch );
891 // Store is OK.
892 __ bind(store_ok);
893 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
895 __ ba(done);
896 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
898 __ bind(is_null);
899 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true);
901 __ profile_null_seen(G3_scratch);
902 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
903 __ bind(done);
904 }
907 void TemplateTable::bastore() {
908 transition(itos, vtos);
909 __ pop_i(O2); // index
910 // Otos_i: val
911 // O3: array
912 __ index_check(O3, O2, 0, G3_scratch, O2);
913 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE));
914 }
917 void TemplateTable::castore() {
918 transition(itos, vtos);
919 __ pop_i(O2); // index
920 // Otos_i: val
921 // O3: array
922 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2);
923 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
924 }
927 void TemplateTable::sastore() {
928 // %%%%% Factor across platform
929 castore();
930 }
933 void TemplateTable::istore(int n) {
934 transition(itos, vtos);
935 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
936 }
939 void TemplateTable::lstore(int n) {
940 transition(ltos, vtos);
941 assert(n+1 < Argument::n_register_parameters, "only handle register cases");
942 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
944 }
947 void TemplateTable::fstore(int n) {
948 transition(ftos, vtos);
949 assert(n < Argument::n_register_parameters, "only handle register cases");
950 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
951 }
954 void TemplateTable::dstore(int n) {
955 transition(dtos, vtos);
956 FloatRegister src = Ftos_d;
957 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
958 }
961 void TemplateTable::astore(int n) {
962 transition(vtos, vtos);
963 __ load_ptr(0, Otos_i);
964 __ inc(Lesp, Interpreter::stackElementSize);
965 __ verify_oop_or_return_address(Otos_i, G3_scratch);
966 __ store_local_ptr(n, Otos_i);
967 }
970 void TemplateTable::pop() {
971 transition(vtos, vtos);
972 __ inc(Lesp, Interpreter::stackElementSize);
973 }
976 void TemplateTable::pop2() {
977 transition(vtos, vtos);
978 __ inc(Lesp, 2 * Interpreter::stackElementSize);
979 }
982 void TemplateTable::dup() {
983 transition(vtos, vtos);
984 // stack: ..., a
985 // load a and tag
986 __ load_ptr(0, Otos_i);
987 __ push_ptr(Otos_i);
988 // stack: ..., a, a
989 }
992 void TemplateTable::dup_x1() {
993 transition(vtos, vtos);
994 // stack: ..., a, b
995 __ load_ptr( 1, G3_scratch); // get a
996 __ load_ptr( 0, Otos_l1); // get b
997 __ store_ptr(1, Otos_l1); // put b
998 __ store_ptr(0, G3_scratch); // put a - like swap
999 __ push_ptr(Otos_l1); // push b
1000 // stack: ..., b, a, b
1001 }
1004 void TemplateTable::dup_x2() {
1005 transition(vtos, vtos);
1006 // stack: ..., a, b, c
1007 // get c and push on stack, reuse registers
1008 __ load_ptr( 0, G3_scratch); // get c
1009 __ push_ptr(G3_scratch); // push c with tag
1010 // stack: ..., a, b, c, c (c in reg) (Lesp - 4)
1011 // (stack offsets n+1 now)
1012 __ load_ptr( 3, Otos_l1); // get a
1013 __ store_ptr(3, G3_scratch); // put c at 3
1014 // stack: ..., c, b, c, c (a in reg)
1015 __ load_ptr( 2, G3_scratch); // get b
1016 __ store_ptr(2, Otos_l1); // put a at 2
1017 // stack: ..., c, a, c, c (b in reg)
1018 __ store_ptr(1, G3_scratch); // put b at 1
1019 // stack: ..., c, a, b, c
1020 }
1023 void TemplateTable::dup2() {
1024 transition(vtos, vtos);
1025 __ load_ptr(1, G3_scratch); // get a
1026 __ load_ptr(0, Otos_l1); // get b
1027 __ push_ptr(G3_scratch); // push a
1028 __ push_ptr(Otos_l1); // push b
1029 // stack: ..., a, b, a, b
1030 }
1033 void TemplateTable::dup2_x1() {
1034 transition(vtos, vtos);
1035 // stack: ..., a, b, c
1036 __ load_ptr( 1, Lscratch); // get b
1037 __ load_ptr( 2, Otos_l1); // get a
1038 __ store_ptr(2, Lscratch); // put b at a
1039 // stack: ..., b, b, c
1040 __ load_ptr( 0, G3_scratch); // get c
1041 __ store_ptr(1, G3_scratch); // put c at b
1042 // stack: ..., b, c, c
1043 __ store_ptr(0, Otos_l1); // put a at c
1044 // stack: ..., b, c, a
1045 __ push_ptr(Lscratch); // push b
1046 __ push_ptr(G3_scratch); // push c
1047 // stack: ..., b, c, a, b, c
1048 }
1051 // The spec says that these types can be a mixture of category 1 (1 word)
1052 // types and/or category 2 types (long and doubles)
1053 void TemplateTable::dup2_x2() {
1054 transition(vtos, vtos);
1055 // stack: ..., a, b, c, d
1056 __ load_ptr( 1, Lscratch); // get c
1057 __ load_ptr( 3, Otos_l1); // get a
1058 __ store_ptr(3, Lscratch); // put c at 3
1059 __ store_ptr(1, Otos_l1); // put a at 1
1060 // stack: ..., c, b, a, d
1061 __ load_ptr( 2, G3_scratch); // get b
1062 __ load_ptr( 0, Otos_l1); // get d
1063 __ store_ptr(0, G3_scratch); // put b at 0
1064 __ store_ptr(2, Otos_l1); // put d at 2
1065 // stack: ..., c, d, a, b
1066 __ push_ptr(Lscratch); // push c
1067 __ push_ptr(Otos_l1); // push d
1068 // stack: ..., c, d, a, b, c, d
1069 }
1072 void TemplateTable::swap() {
1073 transition(vtos, vtos);
1074 // stack: ..., a, b
1075 __ load_ptr( 1, G3_scratch); // get a
1076 __ load_ptr( 0, Otos_l1); // get b
1077 __ store_ptr(0, G3_scratch); // put b
1078 __ store_ptr(1, Otos_l1); // put a
1079 // stack: ..., b, a
1080 }
1083 void TemplateTable::iop2(Operation op) {
1084 transition(itos, itos);
1085 __ pop_i(O1);
1086 switch (op) {
1087 case add: __ add(O1, Otos_i, Otos_i); break;
1088 case sub: __ sub(O1, Otos_i, Otos_i); break;
1089 // %%%%% Mul may not exist: better to call .mul?
1090 case mul: __ smul(O1, Otos_i, Otos_i); break;
1091 case _and: __ and3(O1, Otos_i, Otos_i); break;
1092 case _or: __ or3(O1, Otos_i, Otos_i); break;
1093 case _xor: __ xor3(O1, Otos_i, Otos_i); break;
1094 case shl: __ sll(O1, Otos_i, Otos_i); break;
1095 case shr: __ sra(O1, Otos_i, Otos_i); break;
1096 case ushr: __ srl(O1, Otos_i, Otos_i); break;
1097 default: ShouldNotReachHere();
1098 }
1099 }
1102 void TemplateTable::lop2(Operation op) {
1103 transition(ltos, ltos);
1104 __ pop_l(O2);
1105 switch (op) {
1106 #ifdef _LP64
1107 case add: __ add(O2, Otos_l, Otos_l); break;
1108 case sub: __ sub(O2, Otos_l, Otos_l); break;
1109 case _and: __ and3(O2, Otos_l, Otos_l); break;
1110 case _or: __ or3(O2, Otos_l, Otos_l); break;
1111 case _xor: __ xor3(O2, Otos_l, Otos_l); break;
1112 #else
1113 case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break;
1114 case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break;
1115 case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break;
1116 case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break;
1117 case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break;
1118 #endif
1119 default: ShouldNotReachHere();
1120 }
1121 }
1124 void TemplateTable::idiv() {
1125 // %%%%% Later: ForSPARC/V7 call .sdiv library routine,
1126 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
1128 transition(itos, itos);
1129 __ pop_i(O1); // get 1st op
1131 // Y contains upper 32 bits of result, set it to 0 or all ones
1132 __ wry(G0);
1133 __ mov(~0, G3_scratch);
1135 __ tst(O1);
1136 Label neg;
1137 __ br(Assembler::negative, true, Assembler::pn, neg);
1138 __ delayed()->wry(G3_scratch);
1139 __ bind(neg);
1141 Label ok;
1142 __ tst(Otos_i);
1143 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
1145 const int min_int = 0x80000000;
1146 Label regular;
1147 __ cmp(Otos_i, -1);
1148 __ br(Assembler::notEqual, false, Assembler::pt, regular);
1149 #ifdef _LP64
1150 // Don't put set in delay slot
1151 // Set will turn into multiple instructions in 64 bit mode
1152 __ delayed()->nop();
1153 __ set(min_int, G4_scratch);
1154 #else
1155 __ delayed()->set(min_int, G4_scratch);
1156 #endif
1157 Label done;
1158 __ cmp(O1, G4_scratch);
1159 __ br(Assembler::equal, true, Assembler::pt, done);
1160 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken)
1162 __ bind(regular);
1163 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction!
1164 __ bind(done);
1165 }
1168 void TemplateTable::irem() {
1169 transition(itos, itos);
1170 __ mov(Otos_i, O2); // save divisor
1171 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1
1172 __ smul(Otos_i, O2, Otos_i);
1173 __ sub(O1, Otos_i, Otos_i);
1174 }
1177 void TemplateTable::lmul() {
1178 transition(ltos, ltos);
1179 __ pop_l(O2);
1180 #ifdef _LP64
1181 __ mulx(Otos_l, O2, Otos_l);
1182 #else
1183 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
1184 #endif
1186 }
1189 void TemplateTable::ldiv() {
1190 transition(ltos, ltos);
1192 // check for zero
1193 __ pop_l(O2);
1194 #ifdef _LP64
1195 __ tst(Otos_l);
1196 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1197 __ sdivx(O2, Otos_l, Otos_l);
1198 #else
1199 __ orcc(Otos_l1, Otos_l2, G0);
1200 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1201 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1202 #endif
1203 }
1206 void TemplateTable::lrem() {
1207 transition(ltos, ltos);
1209 // check for zero
1210 __ pop_l(O2);
1211 #ifdef _LP64
1212 __ tst(Otos_l);
1213 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1214 __ sdivx(O2, Otos_l, Otos_l2);
1215 __ mulx (Otos_l2, Otos_l, Otos_l2);
1216 __ sub (O2, Otos_l2, Otos_l);
1217 #else
1218 __ orcc(Otos_l1, Otos_l2, G0);
1219 __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1220 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1221 #endif
1222 }
1225 void TemplateTable::lshl() {
1226 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
1228 __ pop_l(O2); // shift value in O2, O3
1229 #ifdef _LP64
1230 __ sllx(O2, Otos_i, Otos_l);
1231 #else
1232 __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1233 #endif
1234 }
1237 void TemplateTable::lshr() {
1238 transition(itos, ltos); // %%%% see lshl comment
1240 __ pop_l(O2); // shift value in O2, O3
1241 #ifdef _LP64
1242 __ srax(O2, Otos_i, Otos_l);
1243 #else
1244 __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1245 #endif
1246 }
1250 void TemplateTable::lushr() {
1251 transition(itos, ltos); // %%%% see lshl comment
1253 __ pop_l(O2); // shift value in O2, O3
1254 #ifdef _LP64
1255 __ srlx(O2, Otos_i, Otos_l);
1256 #else
1257 __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1258 #endif
1259 }
1262 void TemplateTable::fop2(Operation op) {
1263 transition(ftos, ftos);
1264 switch (op) {
1265 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1266 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1267 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1268 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1269 case rem:
1270 assert(Ftos_f == F0, "just checking");
1271 #ifdef _LP64
1272 // LP64 calling conventions use F1, F3 for passing 2 floats
1273 __ pop_f(F1);
1274 __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
1275 #else
1276 __ pop_i(O0);
1277 __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
1278 __ ld( __ d_tmp, O1 );
1279 #endif
1280 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1281 assert( Ftos_f == F0, "fix this code" );
1282 break;
1284 default: ShouldNotReachHere();
1285 }
1286 }
1289 void TemplateTable::dop2(Operation op) {
1290 transition(dtos, dtos);
1291 switch (op) {
1292 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1293 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1294 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1295 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1296 case rem:
1297 #ifdef _LP64
1298 // Pass arguments in D0, D2
1299 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
1300 __ pop_d( F0 );
1301 #else
1302 // Pass arguments in O0O1, O2O3
1303 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1304 __ ldd( __ d_tmp, O2 );
1305 __ pop_d(Ftos_f);
1306 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1307 __ ldd( __ d_tmp, O0 );
1308 #endif
1309 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1310 assert( Ftos_d == F0, "fix this code" );
1311 break;
1313 default: ShouldNotReachHere();
1314 }
1315 }
1318 void TemplateTable::ineg() {
1319 transition(itos, itos);
1320 __ neg(Otos_i);
1321 }
1324 void TemplateTable::lneg() {
1325 transition(ltos, ltos);
1326 #ifdef _LP64
1327 __ sub(G0, Otos_l, Otos_l);
1328 #else
1329 __ lneg(Otos_l1, Otos_l2);
1330 #endif
1331 }
1334 void TemplateTable::fneg() {
1335 transition(ftos, ftos);
1336 __ fneg(FloatRegisterImpl::S, Ftos_f);
1337 }
1340 void TemplateTable::dneg() {
1341 transition(dtos, dtos);
1342 // v8 has fnegd if source and dest are the same
1343 __ fneg(FloatRegisterImpl::D, Ftos_f);
1344 }
1347 void TemplateTable::iinc() {
1348 transition(vtos, vtos);
1349 locals_index(G3_scratch);
1350 __ ldsb(Lbcp, 2, O2); // load constant
1351 __ access_local_int(G3_scratch, Otos_i);
1352 __ add(Otos_i, O2, Otos_i);
1353 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1354 }
1357 void TemplateTable::wide_iinc() {
1358 transition(vtos, vtos);
1359 locals_index_wide(G3_scratch);
1360 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed);
1361 __ access_local_int(G3_scratch, Otos_i);
1362 __ add(Otos_i, O3, Otos_i);
1363 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1364 }
1367 void TemplateTable::convert() {
1368 // %%%%% Factor this first part accross platforms
1369 #ifdef ASSERT
1370 TosState tos_in = ilgl;
1371 TosState tos_out = ilgl;
1372 switch (bytecode()) {
1373 case Bytecodes::_i2l: // fall through
1374 case Bytecodes::_i2f: // fall through
1375 case Bytecodes::_i2d: // fall through
1376 case Bytecodes::_i2b: // fall through
1377 case Bytecodes::_i2c: // fall through
1378 case Bytecodes::_i2s: tos_in = itos; break;
1379 case Bytecodes::_l2i: // fall through
1380 case Bytecodes::_l2f: // fall through
1381 case Bytecodes::_l2d: tos_in = ltos; break;
1382 case Bytecodes::_f2i: // fall through
1383 case Bytecodes::_f2l: // fall through
1384 case Bytecodes::_f2d: tos_in = ftos; break;
1385 case Bytecodes::_d2i: // fall through
1386 case Bytecodes::_d2l: // fall through
1387 case Bytecodes::_d2f: tos_in = dtos; break;
1388 default : ShouldNotReachHere();
1389 }
1390 switch (bytecode()) {
1391 case Bytecodes::_l2i: // fall through
1392 case Bytecodes::_f2i: // fall through
1393 case Bytecodes::_d2i: // fall through
1394 case Bytecodes::_i2b: // fall through
1395 case Bytecodes::_i2c: // fall through
1396 case Bytecodes::_i2s: tos_out = itos; break;
1397 case Bytecodes::_i2l: // fall through
1398 case Bytecodes::_f2l: // fall through
1399 case Bytecodes::_d2l: tos_out = ltos; break;
1400 case Bytecodes::_i2f: // fall through
1401 case Bytecodes::_l2f: // fall through
1402 case Bytecodes::_d2f: tos_out = ftos; break;
1403 case Bytecodes::_i2d: // fall through
1404 case Bytecodes::_l2d: // fall through
1405 case Bytecodes::_f2d: tos_out = dtos; break;
1406 default : ShouldNotReachHere();
1407 }
1408 transition(tos_in, tos_out);
1409 #endif
1412 // Conversion
1413 Label done;
1414 switch (bytecode()) {
1415 case Bytecodes::_i2l:
1416 #ifdef _LP64
1417 // Sign extend the 32 bits
1418 __ sra ( Otos_i, 0, Otos_l );
1419 #else
1420 __ addcc(Otos_i, 0, Otos_l2);
1421 __ br(Assembler::greaterEqual, true, Assembler::pt, done);
1422 __ delayed()->clr(Otos_l1);
1423 __ set(~0, Otos_l1);
1424 #endif
1425 break;
1427 case Bytecodes::_i2f:
1428 __ st(Otos_i, __ d_tmp );
1429 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1430 __ fitof(FloatRegisterImpl::S, F0, Ftos_f);
1431 break;
1433 case Bytecodes::_i2d:
1434 __ st(Otos_i, __ d_tmp);
1435 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1436 __ fitof(FloatRegisterImpl::D, F0, Ftos_f);
1437 break;
1439 case Bytecodes::_i2b:
1440 __ sll(Otos_i, 24, Otos_i);
1441 __ sra(Otos_i, 24, Otos_i);
1442 break;
1444 case Bytecodes::_i2c:
1445 __ sll(Otos_i, 16, Otos_i);
1446 __ srl(Otos_i, 16, Otos_i);
1447 break;
1449 case Bytecodes::_i2s:
1450 __ sll(Otos_i, 16, Otos_i);
1451 __ sra(Otos_i, 16, Otos_i);
1452 break;
1454 case Bytecodes::_l2i:
1455 #ifndef _LP64
1456 __ mov(Otos_l2, Otos_i);
1457 #else
1458 // Sign-extend into the high 32 bits
1459 __ sra(Otos_l, 0, Otos_i);
1460 #endif
1461 break;
1463 case Bytecodes::_l2f:
1464 case Bytecodes::_l2d:
1465 __ st_long(Otos_l, __ d_tmp);
1466 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
1468 if (VM_Version::v9_instructions_work()) {
1469 if (bytecode() == Bytecodes::_l2f) {
1470 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
1471 } else {
1472 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
1473 }
1474 } else {
1475 __ call_VM_leaf(
1476 Lscratch,
1477 bytecode() == Bytecodes::_l2f
1478 ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
1479 : CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
1480 );
1481 }
1482 break;
1484 case Bytecodes::_f2i: {
1485 Label isNaN;
1486 // result must be 0 if value is NaN; test by comparing value to itself
1487 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
1488 // According to the v8 manual, you have to have a non-fp instruction
1489 // between fcmp and fb.
1490 if (!VM_Version::v9_instructions_work()) {
1491 __ nop();
1492 }
1493 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
1494 __ delayed()->clr(Otos_i); // NaN
1495 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
1496 __ stf(FloatRegisterImpl::S, F30, __ d_tmp);
1497 __ ld(__ d_tmp, Otos_i);
1498 __ bind(isNaN);
1499 }
1500 break;
1502 case Bytecodes::_f2l:
1503 // must uncache tos
1504 __ push_f();
1505 #ifdef _LP64
1506 __ pop_f(F1);
1507 #else
1508 __ pop_i(O0);
1509 #endif
1510 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1511 break;
1513 case Bytecodes::_f2d:
1514 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f);
1515 break;
1517 case Bytecodes::_d2i:
1518 case Bytecodes::_d2l:
1519 // must uncache tos
1520 __ push_d();
1521 #ifdef _LP64
1522 // LP64 calling conventions pass first double arg in D0
1523 __ pop_d( Ftos_d );
1524 #else
1525 __ pop_i( O0 );
1526 __ pop_i( O1 );
1527 #endif
1528 __ call_VM_leaf(Lscratch,
1529 bytecode() == Bytecodes::_d2i
1530 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
1531 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1532 break;
1534 case Bytecodes::_d2f:
1535 if (VM_Version::v9_instructions_work()) {
1536 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
1537 }
1538 else {
1539 // must uncache tos
1540 __ push_d();
1541 __ pop_i(O0);
1542 __ pop_i(O1);
1543 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
1544 }
1545 break;
1547 default: ShouldNotReachHere();
1548 }
1549 __ bind(done);
1550 }
1553 void TemplateTable::lcmp() {
1554 transition(ltos, itos);
1556 #ifdef _LP64
1557 __ pop_l(O1); // pop off value 1, value 2 is in O0
1558 __ lcmp( O1, Otos_l, Otos_i );
1559 #else
1560 __ pop_l(O2); // cmp O2,3 to O0,1
1561 __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
1562 #endif
1563 }
1566 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1568 if (is_float) __ pop_f(F2);
1569 else __ pop_d(F2);
1571 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:");
1573 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i );
1574 }
1576 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1577 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1578 __ verify_thread();
1580 const Register O2_bumped_count = O2;
1581 __ profile_taken_branch(G3_scratch, O2_bumped_count);
1583 // get (wide) offset to O1_disp
1584 const Register O1_disp = O1;
1585 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC);
1586 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
1588 // Handle all the JSR stuff here, then exit.
1589 // It's much shorter and cleaner than intermingling with the
1590 // non-JSR normal-branch stuff occurring below.
1591 if( is_jsr ) {
1592 // compute return address as bci in Otos_i
1593 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1594 __ sub(Lbcp, G3_scratch, G3_scratch);
1595 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
1597 // Bump Lbcp to target of JSR
1598 __ add(Lbcp, O1_disp, Lbcp);
1599 // Push returnAddress for "ret" on stack
1600 __ push_ptr(Otos_i);
1601 // And away we go!
1602 __ dispatch_next(vtos);
1603 return;
1604 }
1606 // Normal (non-jsr) branch handling
1608 // Save the current Lbcp
1609 const Register O0_cur_bcp = O0;
1610 __ mov( Lbcp, O0_cur_bcp );
1613 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1614 if ( increment_invocation_counter_for_backward_branches ) {
1615 Label Lforward;
1616 // check branch direction
1617 __ br( Assembler::positive, false, Assembler::pn, Lforward );
1618 // Bump bytecode pointer by displacement (take the branch)
1619 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
1621 if (TieredCompilation) {
1622 Label Lno_mdo, Loverflow;
1623 int increment = InvocationCounter::count_increment;
1624 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1625 if (ProfileInterpreter) {
1626 // If no method data exists, go to profile_continue.
1627 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
1628 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo);
1630 // Increment backedge counter in the MDO
1631 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) +
1632 in_bytes(InvocationCounter::counter_offset()));
1633 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch,
1634 Assembler::notZero, &Lforward);
1635 __ ba_short(Loverflow);
1636 }
1638 // If there's no MDO, increment counter in Method*
1639 __ bind(Lno_mdo);
1640 Address backedge_counter(Lmethod, in_bytes(Method::backedge_counter_offset()) +
1641 in_bytes(InvocationCounter::counter_offset()));
1642 __ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch,
1643 Assembler::notZero, &Lforward);
1644 __ bind(Loverflow);
1646 // notify point for loop, pass branch bytecode
1647 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp);
1649 // Was an OSR adapter generated?
1650 // O0 = osr nmethod
1651 __ br_null_short(O0, Assembler::pn, Lforward);
1653 // Has the nmethod been invalidated already?
1654 __ ld(O0, nmethod::entry_bci_offset(), O2);
1655 __ cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, Lforward);
1657 // migrate the interpreter frame off of the stack
1659 __ mov(G2_thread, L7);
1660 // save nmethod
1661 __ mov(O0, L6);
1662 __ set_last_Java_frame(SP, noreg);
1663 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
1664 __ reset_last_Java_frame();
1665 __ mov(L7, G2_thread);
1667 // move OSR nmethod to I1
1668 __ mov(L6, I1);
1670 // OSR buffer to I0
1671 __ mov(O0, I0);
1673 // remove the interpreter frame
1674 __ restore(I5_savedSP, 0, SP);
1676 // Jump to the osr code.
1677 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
1678 __ jmp(O2, G0);
1679 __ delayed()->nop();
1681 } else {
1682 // Update Backedge branch separately from invocations
1683 const Register G4_invoke_ctr = G4;
1684 __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
1685 if (ProfileInterpreter) {
1686 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward);
1687 if (UseOnStackReplacement) {
1688 __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
1689 }
1690 } else {
1691 if (UseOnStackReplacement) {
1692 __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
1693 }
1694 }
1695 }
1697 __ bind(Lforward);
1698 } else
1699 // Bump bytecode pointer by displacement (take the branch)
1700 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr
1702 // continue with bytecode @ target
1703 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1704 // %%%%% and changing dispatch_next to dispatch_only
1705 __ dispatch_next(vtos);
1706 }
1709 // Note Condition in argument is TemplateTable::Condition
1710 // arg scope is within class scope
1712 void TemplateTable::if_0cmp(Condition cc) {
1713 // no pointers, integer only!
1714 transition(itos, vtos);
1715 // assume branch is more often taken than not (loops use backward branches)
1716 __ cmp( Otos_i, 0);
1717 __ if_cmp(ccNot(cc), false);
1718 }
1721 void TemplateTable::if_icmp(Condition cc) {
1722 transition(itos, vtos);
1723 __ pop_i(O1);
1724 __ cmp(O1, Otos_i);
1725 __ if_cmp(ccNot(cc), false);
1726 }
1729 void TemplateTable::if_nullcmp(Condition cc) {
1730 transition(atos, vtos);
1731 __ tst(Otos_i);
1732 __ if_cmp(ccNot(cc), true);
1733 }
1736 void TemplateTable::if_acmp(Condition cc) {
1737 transition(atos, vtos);
1738 __ pop_ptr(O1);
1739 __ verify_oop(O1);
1740 __ verify_oop(Otos_i);
1741 __ cmp(O1, Otos_i);
1742 __ if_cmp(ccNot(cc), true);
1743 }
1747 void TemplateTable::ret() {
1748 transition(vtos, vtos);
1749 locals_index(G3_scratch);
1750 __ access_local_returnAddress(G3_scratch, Otos_i);
1751 // Otos_i contains the bci, compute the bcp from that
1753 #ifdef _LP64
1754 #ifdef ASSERT
1755 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
1756 // the result. The return address (really a BCI) was stored with an
1757 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in
1758 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
1759 // loaded value.
1760 { Label zzz ;
1761 __ set (65536, G3_scratch) ;
1762 __ cmp (Otos_i, G3_scratch) ;
1763 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
1764 __ delayed()->nop();
1765 __ stop("BCI is in the wrong register half?");
1766 __ bind (zzz) ;
1767 }
1768 #endif
1769 #endif
1771 __ profile_ret(vtos, Otos_i, G4_scratch);
1773 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1774 __ add(G3_scratch, Otos_i, G3_scratch);
1775 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1776 __ dispatch_next(vtos);
1777 }
1780 void TemplateTable::wide_ret() {
1781 transition(vtos, vtos);
1782 locals_index_wide(G3_scratch);
1783 __ access_local_returnAddress(G3_scratch, Otos_i);
1784 // Otos_i contains the bci, compute the bcp from that
1786 __ profile_ret(vtos, Otos_i, G4_scratch);
1788 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1789 __ add(G3_scratch, Otos_i, G3_scratch);
1790 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1791 __ dispatch_next(vtos);
1792 }
1795 void TemplateTable::tableswitch() {
1796 transition(itos, vtos);
1797 Label default_case, continue_execution;
1799 // align bcp
1800 __ add(Lbcp, BytesPerInt, O1);
1801 __ and3(O1, -BytesPerInt, O1);
1802 // load lo, hi
1803 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte
1804 __ ld(O1, 2 * BytesPerInt, O3); // High Byte
1805 #ifdef _LP64
1806 // Sign extend the 32 bits
1807 __ sra ( Otos_i, 0, Otos_i );
1808 #endif /* _LP64 */
1810 // check against lo & hi
1811 __ cmp( Otos_i, O2);
1812 __ br( Assembler::less, false, Assembler::pn, default_case);
1813 __ delayed()->cmp( Otos_i, O3 );
1814 __ br( Assembler::greater, false, Assembler::pn, default_case);
1815 // lookup dispatch offset
1816 __ delayed()->sub(Otos_i, O2, O2);
1817 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
1818 __ sll(O2, LogBytesPerInt, O2);
1819 __ add(O2, 3 * BytesPerInt, O2);
1820 __ ba(continue_execution);
1821 __ delayed()->ld(O1, O2, O2);
1822 // handle default
1823 __ bind(default_case);
1824 __ profile_switch_default(O3);
1825 __ ld(O1, 0, O2); // get default offset
1826 // continue execution
1827 __ bind(continue_execution);
1828 __ add(Lbcp, O2, Lbcp);
1829 __ dispatch_next(vtos);
1830 }
1833 void TemplateTable::lookupswitch() {
1834 transition(itos, itos);
1835 __ stop("lookupswitch bytecode should have been rewritten");
1836 }
1838 void TemplateTable::fast_linearswitch() {
1839 transition(itos, vtos);
1840 Label loop_entry, loop, found, continue_execution;
1841 // align bcp
1842 __ add(Lbcp, BytesPerInt, O1);
1843 __ and3(O1, -BytesPerInt, O1);
1844 // set counter
1845 __ ld(O1, BytesPerInt, O2);
1846 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs
1847 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr
1848 __ ba(loop_entry);
1849 __ delayed()->add(O3, O2, O2); // counter now points past last pair
1851 // table search
1852 __ bind(loop);
1853 __ cmp(O4, Otos_i);
1854 __ br(Assembler::equal, true, Assembler::pn, found);
1855 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4
1856 __ inc(O3, 2 * BytesPerInt);
1858 __ bind(loop_entry);
1859 __ cmp(O2, O3);
1860 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop);
1861 __ delayed()->ld(O3, 0, O4);
1863 // default case
1864 __ ld(O1, 0, O4); // get default offset
1865 if (ProfileInterpreter) {
1866 __ profile_switch_default(O3);
1867 __ ba_short(continue_execution);
1868 }
1870 // entry found -> get offset
1871 __ bind(found);
1872 if (ProfileInterpreter) {
1873 __ sub(O3, O1, O3);
1874 __ sub(O3, 2*BytesPerInt, O3);
1875 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs
1876 __ profile_switch_case(O3, O1, O2, G3_scratch);
1878 __ bind(continue_execution);
1879 }
1880 __ add(Lbcp, O4, Lbcp);
1881 __ dispatch_next(vtos);
1882 }
1885 void TemplateTable::fast_binaryswitch() {
1886 transition(itos, vtos);
1887 // Implementation using the following core algorithm: (copied from Intel)
1888 //
1889 // int binary_search(int key, LookupswitchPair* array, int n) {
1890 // // Binary search according to "Methodik des Programmierens" by
1891 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1892 // int i = 0;
1893 // int j = n;
1894 // while (i+1 < j) {
1895 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1896 // // with Q: for all i: 0 <= i < n: key < a[i]
1897 // // where a stands for the array and assuming that the (inexisting)
1898 // // element a[n] is infinitely big.
1899 // int h = (i + j) >> 1;
1900 // // i < h < j
1901 // if (key < array[h].fast_match()) {
1902 // j = h;
1903 // } else {
1904 // i = h;
1905 // }
1906 // }
1907 // // R: a[i] <= key < a[i+1] or Q
1908 // // (i.e., if key is within array, i is the correct index)
1909 // return i;
1910 // }
1912 // register allocation
1913 assert(Otos_i == O0, "alias checking");
1914 const Register Rkey = Otos_i; // already set (tosca)
1915 const Register Rarray = O1;
1916 const Register Ri = O2;
1917 const Register Rj = O3;
1918 const Register Rh = O4;
1919 const Register Rscratch = O5;
1921 const int log_entry_size = 3;
1922 const int entry_size = 1 << log_entry_size;
1924 Label found;
1925 // Find Array start
1926 __ add(Lbcp, 3 * BytesPerInt, Rarray);
1927 __ and3(Rarray, -BytesPerInt, Rarray);
1928 // initialize i & j (in delay slot)
1929 __ clr( Ri );
1931 // and start
1932 Label entry;
1933 __ ba(entry);
1934 __ delayed()->ld( Rarray, -BytesPerInt, Rj);
1935 // (Rj is already in the native byte-ordering.)
1937 // binary search loop
1938 { Label loop;
1939 __ bind( loop );
1940 // int h = (i + j) >> 1;
1941 __ sra( Rh, 1, Rh );
1942 // if (key < array[h].fast_match()) {
1943 // j = h;
1944 // } else {
1945 // i = h;
1946 // }
1947 __ sll( Rh, log_entry_size, Rscratch );
1948 __ ld( Rarray, Rscratch, Rscratch );
1949 // (Rscratch is already in the native byte-ordering.)
1950 __ cmp( Rkey, Rscratch );
1951 if ( VM_Version::v9_instructions_work() ) {
1952 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
1953 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
1954 }
1955 else {
1956 Label end_of_if;
1957 __ br( Assembler::less, true, Assembler::pt, end_of_if );
1958 __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
1959 __ mov( Rh, Ri ); // else i = h
1960 __ bind(end_of_if); // }
1961 }
1963 // while (i+1 < j)
1964 __ bind( entry );
1965 __ add( Ri, 1, Rscratch );
1966 __ cmp(Rscratch, Rj);
1967 __ br( Assembler::less, true, Assembler::pt, loop );
1968 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1;
1969 }
1971 // end of binary search, result index is i (must check again!)
1972 Label default_case;
1973 Label continue_execution;
1974 if (ProfileInterpreter) {
1975 __ mov( Ri, Rh ); // Save index in i for profiling
1976 }
1977 __ sll( Ri, log_entry_size, Ri );
1978 __ ld( Rarray, Ri, Rscratch );
1979 // (Rscratch is already in the native byte-ordering.)
1980 __ cmp( Rkey, Rscratch );
1981 __ br( Assembler::notEqual, true, Assembler::pn, default_case );
1982 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j
1984 // entry found -> j = offset
1985 __ inc( Ri, BytesPerInt );
1986 __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
1987 __ ld( Rarray, Ri, Rj );
1988 // (Rj is already in the native byte-ordering.)
1990 if (ProfileInterpreter) {
1991 __ ba_short(continue_execution);
1992 }
1994 __ bind(default_case); // fall through (if not profiling)
1995 __ profile_switch_default(Ri);
1997 __ bind(continue_execution);
1998 __ add( Lbcp, Rj, Lbcp );
1999 __ dispatch_next( vtos );
2000 }
2003 void TemplateTable::_return(TosState state) {
2004 transition(state, state);
2005 assert(_desc->calls_vm(), "inconsistent calls_vm information");
2007 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2008 assert(state == vtos, "only valid state");
2009 __ mov(G0, G3_scratch);
2010 __ access_local_ptr(G3_scratch, Otos_i);
2011 __ load_klass(Otos_i, O2);
2012 __ set(JVM_ACC_HAS_FINALIZER, G3);
2013 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2);
2014 __ andcc(G3, O2, G0);
2015 Label skip_register_finalizer;
2016 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
2017 __ delayed()->nop();
2019 // Call out to do finalizer registration
2020 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i);
2022 __ bind(skip_register_finalizer);
2023 }
2025 __ remove_activation(state, /* throw_monitor_exception */ true);
2027 // The caller's SP was adjusted upon method entry to accomodate
2028 // the callee's non-argument locals. Undo that adjustment.
2029 __ ret(); // return to caller
2030 __ delayed()->restore(I5_savedSP, G0, SP);
2031 }
2034 // ----------------------------------------------------------------------------
2035 // Volatile variables demand their effects be made known to all CPU's in
2036 // order. Store buffers on most chips allow reads & writes to reorder; the
2037 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2038 // memory barrier (i.e., it's not sufficient that the interpreter does not
2039 // reorder volatile references, the hardware also must not reorder them).
2040 //
2041 // According to the new Java Memory Model (JMM):
2042 // (1) All volatiles are serialized wrt to each other.
2043 // ALSO reads & writes act as aquire & release, so:
2044 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2045 // the read float up to before the read. It's OK for non-volatile memory refs
2046 // that happen before the volatile read to float down below it.
2047 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2048 // that happen BEFORE the write float down to after the write. It's OK for
2049 // non-volatile memory refs that happen after the volatile write to float up
2050 // before it.
2051 //
2052 // We only put in barriers around volatile refs (they are expensive), not
2053 // _between_ memory refs (that would require us to track the flavor of the
2054 // previous memory refs). Requirements (2) and (3) require some barriers
2055 // before volatile stores and after volatile loads. These nearly cover
2056 // requirement (1) but miss the volatile-store-volatile-load case. This final
2057 // case is placed after volatile-stores although it could just as well go
2058 // before volatile-loads.
2059 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) {
2060 // Helper function to insert a is-volatile test and memory barrier
2061 // All current sparc implementations run in TSO, needing only StoreLoad
2062 if ((order_constraint & Assembler::StoreLoad) == 0) return;
2063 __ membar( order_constraint );
2064 }
2066 // ----------------------------------------------------------------------------
2067 void TemplateTable::resolve_cache_and_index(int byte_no,
2068 Register Rcache,
2069 Register index,
2070 size_t index_size) {
2071 // Depends on cpCacheOop layout!
2072 Label resolved;
2074 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2075 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size);
2076 __ cmp(Lbyte_code, (int) bytecode()); // have we resolved this bytecode?
2077 __ br(Assembler::equal, false, Assembler::pt, resolved);
2078 __ delayed()->set((int)bytecode(), O1);
2080 address entry;
2081 switch (bytecode()) {
2082 case Bytecodes::_getstatic : // fall through
2083 case Bytecodes::_putstatic : // fall through
2084 case Bytecodes::_getfield : // fall through
2085 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2086 case Bytecodes::_invokevirtual : // fall through
2087 case Bytecodes::_invokespecial : // fall through
2088 case Bytecodes::_invokestatic : // fall through
2089 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2090 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2091 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2092 default:
2093 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2094 break;
2095 }
2096 // first time invocation - must resolve first
2097 __ call_VM(noreg, entry, O1);
2098 // Update registers with resolved info
2099 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2100 __ bind(resolved);
2101 }
2103 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2104 Register method,
2105 Register itable_index,
2106 Register flags,
2107 bool is_invokevirtual,
2108 bool is_invokevfinal,
2109 bool is_invokedynamic) {
2110 // Uses both G3_scratch and G4_scratch
2111 Register cache = G3_scratch;
2112 Register index = G4_scratch;
2113 assert_different_registers(cache, method, itable_index);
2115 // determine constant pool cache field offsets
2116 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2117 const int method_offset = in_bytes(
2118 ConstantPoolCache::base_offset() +
2119 ((byte_no == f2_byte)
2120 ? ConstantPoolCacheEntry::f2_offset()
2121 : ConstantPoolCacheEntry::f1_offset()
2122 )
2123 );
2124 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2125 ConstantPoolCacheEntry::flags_offset());
2126 // access constant pool cache fields
2127 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2128 ConstantPoolCacheEntry::f2_offset());
2130 if (is_invokevfinal) {
2131 __ get_cache_and_index_at_bcp(cache, index, 1);
2132 __ ld_ptr(Address(cache, method_offset), method);
2133 } else {
2134 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2135 resolve_cache_and_index(byte_no, cache, index, index_size);
2136 __ ld_ptr(Address(cache, method_offset), method);
2137 }
2139 if (itable_index != noreg) {
2140 // pick up itable or appendix index from f2 also:
2141 __ ld_ptr(Address(cache, index_offset), itable_index);
2142 }
2143 __ ld_ptr(Address(cache, flags_offset), flags);
2144 }
2146 // The Rcache register must be set before call
2147 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2148 Register Rcache,
2149 Register index,
2150 Register Roffset,
2151 Register Rflags,
2152 bool is_static) {
2153 assert_different_registers(Rcache, Rflags, Roffset);
2155 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2157 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2158 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2159 if (is_static) {
2160 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj);
2161 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2162 __ ld_ptr( Robj, mirror_offset, Robj);
2163 }
2164 }
2166 // The registers Rcache and index expected to be set before call.
2167 // Correct values of the Rcache and index registers are preserved.
2168 void TemplateTable::jvmti_post_field_access(Register Rcache,
2169 Register index,
2170 bool is_static,
2171 bool has_tos) {
2172 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2174 if (JvmtiExport::can_post_field_access()) {
2175 // Check to see if a field access watch has been set before we take
2176 // the time to call into the VM.
2177 Label Label1;
2178 assert_different_registers(Rcache, index, G1_scratch);
2179 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr());
2180 __ load_contents(get_field_access_count_addr, G1_scratch);
2181 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1);
2183 __ add(Rcache, in_bytes(cp_base_offset), Rcache);
2185 if (is_static) {
2186 __ clr(Otos_i);
2187 } else {
2188 if (has_tos) {
2189 // save object pointer before call_VM() clobbers it
2190 __ push_ptr(Otos_i); // put object on tos where GC wants it.
2191 } else {
2192 // Load top of stack (do not pop the value off the stack);
2193 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
2194 }
2195 __ verify_oop(Otos_i);
2196 }
2197 // Otos_i: object pointer or NULL if static
2198 // Rcache: cache entry pointer
2199 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2200 Otos_i, Rcache);
2201 if (!is_static && has_tos) {
2202 __ pop_ptr(Otos_i); // restore object pointer
2203 __ verify_oop(Otos_i);
2204 }
2205 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2206 __ bind(Label1);
2207 }
2208 }
2210 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2211 transition(vtos, vtos);
2213 Register Rcache = G3_scratch;
2214 Register index = G4_scratch;
2215 Register Rclass = Rcache;
2216 Register Roffset= G4_scratch;
2217 Register Rflags = G1_scratch;
2218 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2220 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2));
2221 jvmti_post_field_access(Rcache, index, is_static, false);
2222 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2224 if (!is_static) {
2225 pop_and_check_object(Rclass);
2226 } else {
2227 __ verify_oop(Rclass);
2228 }
2230 Label exit;
2232 Assembler::Membar_mask_bits membar_bits =
2233 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2235 if (__ membar_has_effect(membar_bits)) {
2236 // Get volatile flag
2237 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2238 __ and3(Rflags, Lscratch, Lscratch);
2239 }
2241 Label checkVolatile;
2243 // compute field type
2244 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj;
2245 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2246 // Make sure we don't need to mask Rflags after the above shift
2247 ConstantPoolCacheEntry::verify_tos_state_shift();
2249 // Check atos before itos for getstatic, more likely (in Queens at least)
2250 __ cmp(Rflags, atos);
2251 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2252 __ delayed() ->cmp(Rflags, itos);
2254 // atos
2255 __ load_heap_oop(Rclass, Roffset, Otos_i);
2256 __ verify_oop(Otos_i);
2257 __ push(atos);
2258 if (!is_static) {
2259 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
2260 }
2261 __ ba(checkVolatile);
2262 __ delayed()->tst(Lscratch);
2264 __ bind(notObj);
2266 // cmp(Rflags, itos);
2267 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2268 __ delayed() ->cmp(Rflags, ltos);
2270 // itos
2271 __ ld(Rclass, Roffset, Otos_i);
2272 __ push(itos);
2273 if (!is_static) {
2274 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
2275 }
2276 __ ba(checkVolatile);
2277 __ delayed()->tst(Lscratch);
2279 __ bind(notInt);
2281 // cmp(Rflags, ltos);
2282 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2283 __ delayed() ->cmp(Rflags, btos);
2285 // ltos
2286 // load must be atomic
2287 __ ld_long(Rclass, Roffset, Otos_l);
2288 __ push(ltos);
2289 if (!is_static) {
2290 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
2291 }
2292 __ ba(checkVolatile);
2293 __ delayed()->tst(Lscratch);
2295 __ bind(notLong);
2297 // cmp(Rflags, btos);
2298 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2299 __ delayed() ->cmp(Rflags, ctos);
2301 // btos
2302 __ ldsb(Rclass, Roffset, Otos_i);
2303 __ push(itos);
2304 if (!is_static) {
2305 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
2306 }
2307 __ ba(checkVolatile);
2308 __ delayed()->tst(Lscratch);
2310 __ bind(notByte);
2312 // cmp(Rflags, ctos);
2313 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2314 __ delayed() ->cmp(Rflags, stos);
2316 // ctos
2317 __ lduh(Rclass, Roffset, Otos_i);
2318 __ push(itos);
2319 if (!is_static) {
2320 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
2321 }
2322 __ ba(checkVolatile);
2323 __ delayed()->tst(Lscratch);
2325 __ bind(notChar);
2327 // cmp(Rflags, stos);
2328 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2329 __ delayed() ->cmp(Rflags, ftos);
2331 // stos
2332 __ ldsh(Rclass, Roffset, Otos_i);
2333 __ push(itos);
2334 if (!is_static) {
2335 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
2336 }
2337 __ ba(checkVolatile);
2338 __ delayed()->tst(Lscratch);
2340 __ bind(notShort);
2343 // cmp(Rflags, ftos);
2344 __ br(Assembler::notEqual, false, Assembler::pt, notFloat);
2345 __ delayed() ->tst(Lscratch);
2347 // ftos
2348 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f);
2349 __ push(ftos);
2350 if (!is_static) {
2351 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
2352 }
2353 __ ba(checkVolatile);
2354 __ delayed()->tst(Lscratch);
2356 __ bind(notFloat);
2359 // dtos
2360 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d);
2361 __ push(dtos);
2362 if (!is_static) {
2363 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch);
2364 }
2366 __ bind(checkVolatile);
2367 if (__ membar_has_effect(membar_bits)) {
2368 // __ tst(Lscratch); executed in delay slot
2369 __ br(Assembler::zero, false, Assembler::pt, exit);
2370 __ delayed()->nop();
2371 volatile_barrier(membar_bits);
2372 }
2374 __ bind(exit);
2375 }
2378 void TemplateTable::getfield(int byte_no) {
2379 getfield_or_static(byte_no, false);
2380 }
2382 void TemplateTable::getstatic(int byte_no) {
2383 getfield_or_static(byte_no, true);
2384 }
2387 void TemplateTable::fast_accessfield(TosState state) {
2388 transition(atos, state);
2389 Register Rcache = G3_scratch;
2390 Register index = G4_scratch;
2391 Register Roffset = G4_scratch;
2392 Register Rflags = Rcache;
2393 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2395 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2396 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true);
2398 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2400 __ null_check(Otos_i);
2401 __ verify_oop(Otos_i);
2403 Label exit;
2405 Assembler::Membar_mask_bits membar_bits =
2406 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2407 if (__ membar_has_effect(membar_bits)) {
2408 // Get volatile flag
2409 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags);
2410 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2411 }
2413 switch (bytecode()) {
2414 case Bytecodes::_fast_bgetfield:
2415 __ ldsb(Otos_i, Roffset, Otos_i);
2416 break;
2417 case Bytecodes::_fast_cgetfield:
2418 __ lduh(Otos_i, Roffset, Otos_i);
2419 break;
2420 case Bytecodes::_fast_sgetfield:
2421 __ ldsh(Otos_i, Roffset, Otos_i);
2422 break;
2423 case Bytecodes::_fast_igetfield:
2424 __ ld(Otos_i, Roffset, Otos_i);
2425 break;
2426 case Bytecodes::_fast_lgetfield:
2427 __ ld_long(Otos_i, Roffset, Otos_l);
2428 break;
2429 case Bytecodes::_fast_fgetfield:
2430 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f);
2431 break;
2432 case Bytecodes::_fast_dgetfield:
2433 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
2434 break;
2435 case Bytecodes::_fast_agetfield:
2436 __ load_heap_oop(Otos_i, Roffset, Otos_i);
2437 break;
2438 default:
2439 ShouldNotReachHere();
2440 }
2442 if (__ membar_has_effect(membar_bits)) {
2443 __ btst(Lscratch, Rflags);
2444 __ br(Assembler::zero, false, Assembler::pt, exit);
2445 __ delayed()->nop();
2446 volatile_barrier(membar_bits);
2447 __ bind(exit);
2448 }
2450 if (state == atos) {
2451 __ verify_oop(Otos_i); // does not blow flags!
2452 }
2453 }
2455 void TemplateTable::jvmti_post_fast_field_mod() {
2456 if (JvmtiExport::can_post_field_modification()) {
2457 // Check to see if a field modification watch has been set before we take
2458 // the time to call into the VM.
2459 Label done;
2460 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2461 __ load_contents(get_field_modification_count_addr, G4_scratch);
2462 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done);
2463 __ pop_ptr(G4_scratch); // copy the object pointer from tos
2464 __ verify_oop(G4_scratch);
2465 __ push_ptr(G4_scratch); // put the object pointer back on tos
2466 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1);
2467 // Save tos values before call_VM() clobbers them. Since we have
2468 // to do it for every data type, we use the saved values as the
2469 // jvalue object.
2470 switch (bytecode()) { // save tos values before call_VM() clobbers them
2471 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break;
2472 case Bytecodes::_fast_bputfield: // fall through
2473 case Bytecodes::_fast_sputfield: // fall through
2474 case Bytecodes::_fast_cputfield: // fall through
2475 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break;
2476 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break;
2477 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break;
2478 // get words in right order for use as jvalue object
2479 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break;
2480 }
2481 // setup pointer to jvalue object
2482 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize);
2483 // G4_scratch: object pointer
2484 // G1_scratch: cache entry pointer
2485 // G3_scratch: jvalue object on the stack
2486 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch);
2487 switch (bytecode()) { // restore tos values
2488 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break;
2489 case Bytecodes::_fast_bputfield: // fall through
2490 case Bytecodes::_fast_sputfield: // fall through
2491 case Bytecodes::_fast_cputfield: // fall through
2492 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break;
2493 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break;
2494 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break;
2495 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break;
2496 }
2497 __ bind(done);
2498 }
2499 }
2501 // The registers Rcache and index expected to be set before call.
2502 // The function may destroy various registers, just not the Rcache and index registers.
2503 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) {
2504 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2506 if (JvmtiExport::can_post_field_modification()) {
2507 // Check to see if a field modification watch has been set before we take
2508 // the time to call into the VM.
2509 Label Label1;
2510 assert_different_registers(Rcache, index, G1_scratch);
2511 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2512 __ load_contents(get_field_modification_count_addr, G1_scratch);
2513 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1);
2515 // The Rcache and index registers have been already set.
2516 // This allows to eliminate this call but the Rcache and index
2517 // registers must be correspondingly used after this line.
2518 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1);
2520 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch);
2521 if (is_static) {
2522 // Life is simple. Null out the object pointer.
2523 __ clr(G4_scratch);
2524 } else {
2525 Register Rflags = G1_scratch;
2526 // Life is harder. The stack holds the value on top, followed by the
2527 // object. We don't know the size of the value, though; it could be
2528 // one or two words depending on its type. As a result, we must find
2529 // the type to determine where the object is.
2531 Label two_word, valsizeknown;
2532 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2533 __ mov(Lesp, G4_scratch);
2534 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2535 // Make sure we don't need to mask Rflags after the above shift
2536 ConstantPoolCacheEntry::verify_tos_state_shift();
2537 __ cmp(Rflags, ltos);
2538 __ br(Assembler::equal, false, Assembler::pt, two_word);
2539 __ delayed()->cmp(Rflags, dtos);
2540 __ br(Assembler::equal, false, Assembler::pt, two_word);
2541 __ delayed()->nop();
2542 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1));
2543 __ ba_short(valsizeknown);
2544 __ bind(two_word);
2546 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2));
2548 __ bind(valsizeknown);
2549 // setup object pointer
2550 __ ld_ptr(G4_scratch, 0, G4_scratch);
2551 __ verify_oop(G4_scratch);
2552 }
2553 // setup pointer to jvalue object
2554 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize);
2555 // G4_scratch: object pointer or NULL if static
2556 // G3_scratch: cache entry pointer
2557 // G1_scratch: jvalue object on the stack
2558 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2559 G4_scratch, G3_scratch, G1_scratch);
2560 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2561 __ bind(Label1);
2562 }
2563 }
2565 void TemplateTable::pop_and_check_object(Register r) {
2566 __ pop_ptr(r);
2567 __ null_check(r); // for field access must check obj.
2568 __ verify_oop(r);
2569 }
2571 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2572 transition(vtos, vtos);
2573 Register Rcache = G3_scratch;
2574 Register index = G4_scratch;
2575 Register Rclass = Rcache;
2576 Register Roffset= G4_scratch;
2577 Register Rflags = G1_scratch;
2578 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2580 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2));
2581 jvmti_post_field_mod(Rcache, index, is_static);
2582 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2584 Assembler::Membar_mask_bits read_bits =
2585 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2586 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2588 Label notVolatile, checkVolatile, exit;
2589 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2590 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2591 __ and3(Rflags, Lscratch, Lscratch);
2593 if (__ membar_has_effect(read_bits)) {
2594 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2595 volatile_barrier(read_bits);
2596 __ bind(notVolatile);
2597 }
2598 }
2600 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2601 // Make sure we don't need to mask Rflags after the above shift
2602 ConstantPoolCacheEntry::verify_tos_state_shift();
2604 // compute field type
2605 Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat;
2607 if (is_static) {
2608 // putstatic with object type most likely, check that first
2609 __ cmp(Rflags, atos);
2610 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2611 __ delayed()->cmp(Rflags, itos);
2613 // atos
2614 {
2615 __ pop_ptr();
2616 __ verify_oop(Otos_i);
2617 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2618 __ ba(checkVolatile);
2619 __ delayed()->tst(Lscratch);
2620 }
2622 __ bind(notObj);
2623 // cmp(Rflags, itos);
2624 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2625 __ delayed()->cmp(Rflags, btos);
2627 // itos
2628 {
2629 __ pop_i();
2630 __ st(Otos_i, Rclass, Roffset);
2631 __ ba(checkVolatile);
2632 __ delayed()->tst(Lscratch);
2633 }
2635 __ bind(notInt);
2636 } else {
2637 // putfield with int type most likely, check that first
2638 __ cmp(Rflags, itos);
2639 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2640 __ delayed()->cmp(Rflags, atos);
2642 // itos
2643 {
2644 __ pop_i();
2645 pop_and_check_object(Rclass);
2646 __ st(Otos_i, Rclass, Roffset);
2647 patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no);
2648 __ ba(checkVolatile);
2649 __ delayed()->tst(Lscratch);
2650 }
2652 __ bind(notInt);
2653 // cmp(Rflags, atos);
2654 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2655 __ delayed()->cmp(Rflags, btos);
2657 // atos
2658 {
2659 __ pop_ptr();
2660 pop_and_check_object(Rclass);
2661 __ verify_oop(Otos_i);
2662 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2663 patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no);
2664 __ ba(checkVolatile);
2665 __ delayed()->tst(Lscratch);
2666 }
2668 __ bind(notObj);
2669 }
2671 // cmp(Rflags, btos);
2672 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2673 __ delayed()->cmp(Rflags, ltos);
2675 // btos
2676 {
2677 __ pop_i();
2678 if (!is_static) pop_and_check_object(Rclass);
2679 __ stb(Otos_i, Rclass, Roffset);
2680 if (!is_static) {
2681 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no);
2682 }
2683 __ ba(checkVolatile);
2684 __ delayed()->tst(Lscratch);
2685 }
2687 __ bind(notByte);
2688 // cmp(Rflags, ltos);
2689 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2690 __ delayed()->cmp(Rflags, ctos);
2692 // ltos
2693 {
2694 __ pop_l();
2695 if (!is_static) pop_and_check_object(Rclass);
2696 __ st_long(Otos_l, Rclass, Roffset);
2697 if (!is_static) {
2698 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no);
2699 }
2700 __ ba(checkVolatile);
2701 __ delayed()->tst(Lscratch);
2702 }
2704 __ bind(notLong);
2705 // cmp(Rflags, ctos);
2706 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2707 __ delayed()->cmp(Rflags, stos);
2709 // ctos (char)
2710 {
2711 __ pop_i();
2712 if (!is_static) pop_and_check_object(Rclass);
2713 __ sth(Otos_i, Rclass, Roffset);
2714 if (!is_static) {
2715 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no);
2716 }
2717 __ ba(checkVolatile);
2718 __ delayed()->tst(Lscratch);
2719 }
2721 __ bind(notChar);
2722 // cmp(Rflags, stos);
2723 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2724 __ delayed()->cmp(Rflags, ftos);
2726 // stos (short)
2727 {
2728 __ pop_i();
2729 if (!is_static) pop_and_check_object(Rclass);
2730 __ sth(Otos_i, Rclass, Roffset);
2731 if (!is_static) {
2732 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no);
2733 }
2734 __ ba(checkVolatile);
2735 __ delayed()->tst(Lscratch);
2736 }
2738 __ bind(notShort);
2739 // cmp(Rflags, ftos);
2740 __ br(Assembler::notZero, false, Assembler::pt, notFloat);
2741 __ delayed()->nop();
2743 // ftos
2744 {
2745 __ pop_f();
2746 if (!is_static) pop_and_check_object(Rclass);
2747 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2748 if (!is_static) {
2749 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no);
2750 }
2751 __ ba(checkVolatile);
2752 __ delayed()->tst(Lscratch);
2753 }
2755 __ bind(notFloat);
2757 // dtos
2758 {
2759 __ pop_d();
2760 if (!is_static) pop_and_check_object(Rclass);
2761 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2762 if (!is_static) {
2763 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no);
2764 }
2765 }
2767 __ bind(checkVolatile);
2768 __ tst(Lscratch);
2770 if (__ membar_has_effect(write_bits)) {
2771 // __ tst(Lscratch); in delay slot
2772 __ br(Assembler::zero, false, Assembler::pt, exit);
2773 __ delayed()->nop();
2774 volatile_barrier(Assembler::StoreLoad);
2775 __ bind(exit);
2776 }
2777 }
2779 void TemplateTable::fast_storefield(TosState state) {
2780 transition(state, vtos);
2781 Register Rcache = G3_scratch;
2782 Register Rclass = Rcache;
2783 Register Roffset= G4_scratch;
2784 Register Rflags = G1_scratch;
2785 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2787 jvmti_post_fast_field_mod();
2789 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1);
2791 Assembler::Membar_mask_bits read_bits =
2792 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2793 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2795 Label notVolatile, checkVolatile, exit;
2796 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2797 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2798 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2799 __ and3(Rflags, Lscratch, Lscratch);
2800 if (__ membar_has_effect(read_bits)) {
2801 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2802 volatile_barrier(read_bits);
2803 __ bind(notVolatile);
2804 }
2805 }
2807 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2808 pop_and_check_object(Rclass);
2810 switch (bytecode()) {
2811 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break;
2812 case Bytecodes::_fast_cputfield: /* fall through */
2813 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break;
2814 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break;
2815 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break;
2816 case Bytecodes::_fast_fputfield:
2817 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2818 break;
2819 case Bytecodes::_fast_dputfield:
2820 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2821 break;
2822 case Bytecodes::_fast_aputfield:
2823 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2824 break;
2825 default:
2826 ShouldNotReachHere();
2827 }
2829 if (__ membar_has_effect(write_bits)) {
2830 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit);
2831 volatile_barrier(Assembler::StoreLoad);
2832 __ bind(exit);
2833 }
2834 }
2837 void TemplateTable::putfield(int byte_no) {
2838 putfield_or_static(byte_no, false);
2839 }
2841 void TemplateTable::putstatic(int byte_no) {
2842 putfield_or_static(byte_no, true);
2843 }
2846 void TemplateTable::fast_xaccess(TosState state) {
2847 transition(vtos, state);
2848 Register Rcache = G3_scratch;
2849 Register Roffset = G4_scratch;
2850 Register Rflags = G4_scratch;
2851 Register Rreceiver = Lscratch;
2853 __ ld_ptr(Llocals, 0, Rreceiver);
2855 // access constant pool cache (is resolved)
2856 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
2857 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset);
2858 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp
2860 __ verify_oop(Rreceiver);
2861 __ null_check(Rreceiver);
2862 if (state == atos) {
2863 __ load_heap_oop(Rreceiver, Roffset, Otos_i);
2864 } else if (state == itos) {
2865 __ ld (Rreceiver, Roffset, Otos_i) ;
2866 } else if (state == ftos) {
2867 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f);
2868 } else {
2869 ShouldNotReachHere();
2870 }
2872 Assembler::Membar_mask_bits membar_bits =
2873 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2874 if (__ membar_has_effect(membar_bits)) {
2876 // Get is_volatile value in Rflags and check if membar is needed
2877 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags);
2879 // Test volatile
2880 Label notVolatile;
2881 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2882 __ btst(Rflags, Lscratch);
2883 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2884 __ delayed()->nop();
2885 volatile_barrier(membar_bits);
2886 __ bind(notVolatile);
2887 }
2889 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
2890 __ sub(Lbcp, 1, Lbcp);
2891 }
2893 //----------------------------------------------------------------------------------------------------
2894 // Calls
2896 void TemplateTable::count_calls(Register method, Register temp) {
2897 // implemented elsewhere
2898 ShouldNotReachHere();
2899 }
2901 void TemplateTable::prepare_invoke(int byte_no,
2902 Register method, // linked method (or i-klass)
2903 Register ra, // return address
2904 Register index, // itable index, MethodType, etc.
2905 Register recv, // if caller wants to see it
2906 Register flags // if caller wants to test it
2907 ) {
2908 // determine flags
2909 const Bytecodes::Code code = bytecode();
2910 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2911 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2912 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2913 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2914 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2915 const bool load_receiver = (recv != noreg);
2916 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2917 assert(recv == noreg || recv == O0, "");
2918 assert(flags == noreg || flags == O1, "");
2920 // setup registers & access constant pool cache
2921 if (recv == noreg) recv = O0;
2922 if (flags == noreg) flags = O1;
2923 const Register temp = O2;
2924 assert_different_registers(method, ra, index, recv, flags, temp);
2926 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2928 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2930 // maybe push appendix to arguments
2931 if (is_invokedynamic || is_invokehandle) {
2932 Label L_no_push;
2933 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp);
2934 __ btst(flags, temp);
2935 __ br(Assembler::zero, false, Assembler::pt, L_no_push);
2936 __ delayed()->nop();
2937 // Push the appendix as a trailing parameter.
2938 // This must be done before we get the receiver,
2939 // since the parameter_size includes it.
2940 assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
2941 __ load_resolved_reference_at_index(temp, index);
2942 __ verify_oop(temp);
2943 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.)
2944 __ bind(L_no_push);
2945 }
2947 // load receiver if needed (after appendix is pushed so parameter size is correct)
2948 if (load_receiver) {
2949 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size
2950 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp
2951 __ verify_oop(recv);
2952 }
2954 // compute return type
2955 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra);
2956 // Make sure we don't need to mask flags after the above shift
2957 ConstantPoolCacheEntry::verify_tos_state_shift();
2958 // load return address
2959 {
2960 const address table_addr = (is_invokeinterface || is_invokedynamic) ?
2961 (address)Interpreter::return_5_addrs_by_index_table() :
2962 (address)Interpreter::return_3_addrs_by_index_table();
2963 AddressLiteral table(table_addr);
2964 __ set(table, temp);
2965 __ sll(ra, LogBytesPerWord, ra);
2966 __ ld_ptr(Address(temp, ra), ra);
2967 }
2968 }
2971 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
2972 Register Rtemp = G4_scratch;
2973 Register Rcall = Rindex;
2974 assert_different_registers(Rcall, G5_method, Gargs, Rret);
2976 // get target Method* & entry point
2977 __ lookup_virtual_method(Rrecv, Rindex, G5_method);
2978 __ call_from_interpreter(Rcall, Gargs, Rret);
2979 }
2981 void TemplateTable::invokevirtual(int byte_no) {
2982 transition(vtos, vtos);
2983 assert(byte_no == f2_byte, "use this argument");
2985 Register Rscratch = G3_scratch;
2986 Register Rtemp = G4_scratch;
2987 Register Rret = Lscratch;
2988 Register O0_recv = O0;
2989 Label notFinal;
2991 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
2992 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2994 // Check for vfinal
2995 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch);
2996 __ btst(Rret, G4_scratch);
2997 __ br(Assembler::zero, false, Assembler::pt, notFinal);
2998 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters
3000 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
3002 invokevfinal_helper(Rscratch, Rret);
3004 __ bind(notFinal);
3006 __ mov(G5_method, Rscratch); // better scratch register
3007 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop
3008 // receiver is in O0_recv
3009 __ verify_oop(O0_recv);
3011 // get return address
3012 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
3013 __ set(table, Rtemp);
3014 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
3015 // Make sure we don't need to mask Rret after the above shift
3016 ConstantPoolCacheEntry::verify_tos_state_shift();
3017 __ sll(Rret, LogBytesPerWord, Rret);
3018 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3020 // get receiver klass
3021 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
3022 __ load_klass(O0_recv, O0_recv);
3023 __ verify_klass_ptr(O0_recv);
3025 __ profile_virtual_call(O0_recv, O4);
3027 generate_vtable_call(O0_recv, Rscratch, Rret);
3028 }
3030 void TemplateTable::fast_invokevfinal(int byte_no) {
3031 transition(vtos, vtos);
3032 assert(byte_no == f2_byte, "use this argument");
3034 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
3035 /*is_invokevfinal*/true, false);
3036 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3037 invokevfinal_helper(G3_scratch, Lscratch);
3038 }
3040 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
3041 Register Rtemp = G4_scratch;
3043 // Load receiver from stack slot
3044 __ ld_ptr(G5_method, in_bytes(Method::const_offset()), G4_scratch);
3045 __ lduh(G4_scratch, in_bytes(ConstMethod::size_of_parameters_offset()), G4_scratch);
3046 __ load_receiver(G4_scratch, O0);
3048 // receiver NULL check
3049 __ null_check(O0);
3051 __ profile_final_call(O4);
3053 // get return address
3054 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
3055 __ set(table, Rtemp);
3056 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
3057 // Make sure we don't need to mask Rret after the above shift
3058 ConstantPoolCacheEntry::verify_tos_state_shift();
3059 __ sll(Rret, LogBytesPerWord, Rret);
3060 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3063 // do the call
3064 __ call_from_interpreter(Rscratch, Gargs, Rret);
3065 }
3068 void TemplateTable::invokespecial(int byte_no) {
3069 transition(vtos, vtos);
3070 assert(byte_no == f1_byte, "use this argument");
3072 const Register Rret = Lscratch;
3073 const Register O0_recv = O0;
3074 const Register Rscratch = G3_scratch;
3076 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check
3077 __ null_check(O0_recv);
3079 // do the call
3080 __ profile_call(O4);
3081 __ call_from_interpreter(Rscratch, Gargs, Rret);
3082 }
3085 void TemplateTable::invokestatic(int byte_no) {
3086 transition(vtos, vtos);
3087 assert(byte_no == f1_byte, "use this argument");
3089 const Register Rret = Lscratch;
3090 const Register Rscratch = G3_scratch;
3092 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method*
3094 // do the call
3095 __ profile_call(O4);
3096 __ call_from_interpreter(Rscratch, Gargs, Rret);
3097 }
3099 void TemplateTable::invokeinterface_object_method(Register RKlass,
3100 Register Rcall,
3101 Register Rret,
3102 Register Rflags) {
3103 Register Rscratch = G4_scratch;
3104 Register Rindex = Lscratch;
3106 assert_different_registers(Rscratch, Rindex, Rret);
3108 Label notFinal;
3110 // Check for vfinal
3111 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch);
3112 __ btst(Rflags, Rscratch);
3113 __ br(Assembler::zero, false, Assembler::pt, notFinal);
3114 __ delayed()->nop();
3116 __ profile_final_call(O4);
3118 // do the call - the index (f2) contains the Method*
3119 assert_different_registers(G5_method, Gargs, Rcall);
3120 __ mov(Rindex, G5_method);
3121 __ call_from_interpreter(Rcall, Gargs, Rret);
3122 __ bind(notFinal);
3124 __ profile_virtual_call(RKlass, O4);
3125 generate_vtable_call(RKlass, Rindex, Rret);
3126 }
3129 void TemplateTable::invokeinterface(int byte_no) {
3130 transition(vtos, vtos);
3131 assert(byte_no == f1_byte, "use this argument");
3133 const Register Rinterface = G1_scratch;
3134 const Register Rret = G3_scratch;
3135 const Register Rindex = Lscratch;
3136 const Register O0_recv = O0;
3137 const Register O1_flags = O1;
3138 const Register O2_Klass = O2;
3139 const Register Rscratch = G4_scratch;
3140 assert_different_registers(Rscratch, G5_method);
3142 prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags);
3144 // get receiver klass
3145 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
3146 __ load_klass(O0_recv, O2_Klass);
3148 // Special case of invokeinterface called for virtual method of
3149 // java.lang.Object. See cpCacheOop.cpp for details.
3150 // This code isn't produced by javac, but could be produced by
3151 // another compliant java compiler.
3152 Label notMethod;
3153 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch);
3154 __ btst(O1_flags, Rscratch);
3155 __ br(Assembler::zero, false, Assembler::pt, notMethod);
3156 __ delayed()->nop();
3158 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags);
3160 __ bind(notMethod);
3162 __ profile_virtual_call(O2_Klass, O4);
3164 //
3165 // find entry point to call
3166 //
3168 // compute start of first itableOffsetEntry (which is at end of vtable)
3169 const int base = InstanceKlass::vtable_start_offset() * wordSize;
3170 Label search;
3171 Register Rtemp = O1_flags;
3173 __ ld(O2_Klass, InstanceKlass::vtable_length_offset() * wordSize, Rtemp);
3174 if (align_object_offset(1) > 1) {
3175 __ round_to(Rtemp, align_object_offset(1));
3176 }
3177 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4;
3178 if (Assembler::is_simm13(base)) {
3179 __ add(Rtemp, base, Rtemp);
3180 } else {
3181 __ set(base, Rscratch);
3182 __ add(Rscratch, Rtemp, Rtemp);
3183 }
3184 __ add(O2_Klass, Rtemp, Rscratch);
3186 __ bind(search);
3188 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp);
3189 {
3190 Label ok;
3192 // Check that entry is non-null. Null entries are probably a bytecode
3193 // problem. If the interface isn't implemented by the receiver class,
3194 // the VM should throw IncompatibleClassChangeError. linkResolver checks
3195 // this too but that's only if the entry isn't already resolved, so we
3196 // need to check again.
3197 __ br_notnull_short( Rtemp, Assembler::pt, ok);
3198 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3199 __ should_not_reach_here();
3200 __ bind(ok);
3201 }
3203 __ cmp(Rinterface, Rtemp);
3204 __ brx(Assembler::notEqual, true, Assembler::pn, search);
3205 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch);
3207 // entry found and Rscratch points to it
3208 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch);
3210 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below");
3211 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8;
3212 __ add(Rscratch, Rindex, Rscratch);
3213 __ ld_ptr(O2_Klass, Rscratch, G5_method);
3215 // Check for abstract method error.
3216 {
3217 Label ok;
3218 __ br_notnull_short(G5_method, Assembler::pt, ok);
3219 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3220 __ should_not_reach_here();
3221 __ bind(ok);
3222 }
3224 Register Rcall = Rinterface;
3225 assert_different_registers(Rcall, G5_method, Gargs, Rret);
3227 __ call_from_interpreter(Rcall, Gargs, Rret);
3228 }
3230 void TemplateTable::invokehandle(int byte_no) {
3231 transition(vtos, vtos);
3232 assert(byte_no == f1_byte, "use this argument");
3234 if (!EnableInvokeDynamic) {
3235 // rewriter does not generate this bytecode
3236 __ should_not_reach_here();
3237 return;
3238 }
3240 const Register Rret = Lscratch;
3241 const Register G4_mtype = G4_scratch;
3242 const Register O0_recv = O0;
3243 const Register Rscratch = G3_scratch;
3245 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv);
3246 __ null_check(O0_recv);
3248 // G4: MethodType object (from cpool->resolved_references[f1], if necessary)
3249 // G5: MH.invokeExact_MT method (from f2)
3251 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke
3253 // do the call
3254 __ verify_oop(G4_mtype);
3255 __ profile_final_call(O4); // FIXME: profile the LambdaForm also
3256 __ call_from_interpreter(Rscratch, Gargs, Rret);
3257 }
3260 void TemplateTable::invokedynamic(int byte_no) {
3261 transition(vtos, vtos);
3262 assert(byte_no == f1_byte, "use this argument");
3264 if (!EnableInvokeDynamic) {
3265 // We should not encounter this bytecode if !EnableInvokeDynamic.
3266 // The verifier will stop it. However, if we get past the verifier,
3267 // this will stop the thread in a reasonable way, without crashing the JVM.
3268 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3269 InterpreterRuntime::throw_IncompatibleClassChangeError));
3270 // the call_VM checks for exception, so we should never return here.
3271 __ should_not_reach_here();
3272 return;
3273 }
3275 const Register Rret = Lscratch;
3276 const Register G4_callsite = G4_scratch;
3277 const Register Rscratch = G3_scratch;
3279 prepare_invoke(byte_no, G5_method, Rret, G4_callsite);
3281 // G4: CallSite object (from cpool->resolved_references[f1])
3282 // G5: MH.linkToCallSite method (from f2)
3284 // Note: G4_callsite is already pushed by prepare_invoke
3286 // %%% should make a type profile for any invokedynamic that takes a ref argument
3287 // profile this call
3288 __ profile_call(O4);
3290 // do the call
3291 __ verify_oop(G4_callsite);
3292 __ call_from_interpreter(Rscratch, Gargs, Rret);
3293 }
3296 //----------------------------------------------------------------------------------------------------
3297 // Allocation
3299 void TemplateTable::_new() {
3300 transition(vtos, atos);
3302 Label slow_case;
3303 Label done;
3304 Label initialize_header;
3305 Label initialize_object; // including clearing the fields
3307 Register RallocatedObject = Otos_i;
3308 Register RinstanceKlass = O1;
3309 Register Roffset = O3;
3310 Register Rscratch = O4;
3312 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3313 __ get_cpool_and_tags(Rscratch, G3_scratch);
3314 // make sure the class we're about to instantiate has been resolved
3315 // This is done before loading InstanceKlass to be consistent with the order
3316 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3317 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3318 __ ldub(G3_scratch, Roffset, G3_scratch);
3319 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3320 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3321 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3322 // get InstanceKlass
3323 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot
3324 __ add(Roffset, sizeof(ConstantPool), Roffset);
3325 __ ld_ptr(Rscratch, Roffset, RinstanceKlass);
3327 // make sure klass is fully initialized:
3328 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch);
3329 __ cmp(G3_scratch, InstanceKlass::fully_initialized);
3330 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3331 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
3333 // get instance_size in InstanceKlass (already aligned)
3334 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
3336 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
3337 __ btst(Klass::_lh_instance_slow_path_bit, Roffset);
3338 __ br(Assembler::notZero, false, Assembler::pn, slow_case);
3339 __ delayed()->nop();
3341 // allocate the instance
3342 // 1) Try to allocate in the TLAB
3343 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
3344 // 3) if the above fails (or is not applicable), go to a slow case
3345 // (creates a new TLAB, etc.)
3347 const bool allow_shared_alloc =
3348 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3350 if(UseTLAB) {
3351 Register RoldTopValue = RallocatedObject;
3352 Register RtlabWasteLimitValue = G3_scratch;
3353 Register RnewTopValue = G1_scratch;
3354 Register RendValue = Rscratch;
3355 Register RfreeValue = RnewTopValue;
3357 // check if we can allocate in the TLAB
3358 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
3359 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue);
3360 __ add(RoldTopValue, Roffset, RnewTopValue);
3362 // if there is enough space, we do not CAS and do not clear
3363 __ cmp(RnewTopValue, RendValue);
3364 if(ZeroTLAB) {
3365 // the fields have already been cleared
3366 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header);
3367 } else {
3368 // initialize both the header and fields
3369 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object);
3370 }
3371 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3373 if (allow_shared_alloc) {
3374 // Check if tlab should be discarded (refill_waste_limit >= free)
3375 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
3376 __ sub(RendValue, RoldTopValue, RfreeValue);
3377 #ifdef _LP64
3378 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
3379 #else
3380 __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
3381 #endif
3382 __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
3384 // increment waste limit to prevent getting stuck on this slow path
3385 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
3386 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
3387 } else {
3388 // No allocation in the shared eden.
3389 __ ba_short(slow_case);
3390 }
3391 }
3393 // Allocation in the shared Eden
3394 if (allow_shared_alloc) {
3395 Register RoldTopValue = G1_scratch;
3396 Register RtopAddr = G3_scratch;
3397 Register RnewTopValue = RallocatedObject;
3398 Register RendValue = Rscratch;
3400 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
3402 Label retry;
3403 __ bind(retry);
3404 __ set((intptr_t)Universe::heap()->end_addr(), RendValue);
3405 __ ld_ptr(RendValue, 0, RendValue);
3406 __ ld_ptr(RtopAddr, 0, RoldTopValue);
3407 __ add(RoldTopValue, Roffset, RnewTopValue);
3409 // RnewTopValue contains the top address after the new object
3410 // has been allocated.
3411 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
3413 __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
3414 VM_Version::v9_instructions_work() ? NULL :
3415 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3417 // if someone beat us on the allocation, try again, otherwise continue
3418 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
3420 // bump total bytes allocated by this thread
3421 // RoldTopValue and RtopAddr are dead, so can use G1 and G3
3422 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
3423 }
3425 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3426 // clear object fields
3427 __ bind(initialize_object);
3428 __ deccc(Roffset, sizeof(oopDesc));
3429 __ br(Assembler::zero, false, Assembler::pt, initialize_header);
3430 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
3432 // initialize remaining object fields
3433 if (UseBlockZeroing) {
3434 // Use BIS for zeroing
3435 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header);
3436 } else {
3437 Label loop;
3438 __ subcc(Roffset, wordSize, Roffset);
3439 __ bind(loop);
3440 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot
3441 __ st_ptr(G0, G3_scratch, Roffset);
3442 __ br(Assembler::notEqual, false, Assembler::pt, loop);
3443 __ delayed()->subcc(Roffset, wordSize, Roffset);
3444 }
3445 __ ba_short(initialize_header);
3446 }
3448 // slow case
3449 __ bind(slow_case);
3450 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned);
3451 __ get_constant_pool(O1);
3453 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2);
3455 __ ba_short(done);
3457 // Initialize the header: mark, klass
3458 __ bind(initialize_header);
3460 if (UseBiasedLocking) {
3461 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch);
3462 } else {
3463 __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
3464 }
3465 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
3466 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed
3467 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms)
3469 {
3470 SkipIfEqual skip_if(
3471 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero);
3472 // Trigger dtrace event
3473 __ push(atos);
3474 __ call_VM_leaf(noreg,
3475 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0);
3476 __ pop(atos);
3477 }
3479 // continue
3480 __ bind(done);
3481 }
3485 void TemplateTable::newarray() {
3486 transition(itos, atos);
3487 __ ldub(Lbcp, 1, O1);
3488 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i);
3489 }
3492 void TemplateTable::anewarray() {
3493 transition(itos, atos);
3494 __ get_constant_pool(O1);
3495 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned);
3496 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i);
3497 }
3500 void TemplateTable::arraylength() {
3501 transition(atos, itos);
3502 Label ok;
3503 __ verify_oop(Otos_i);
3504 __ tst(Otos_i);
3505 __ throw_if_not_1_x( Assembler::notZero, ok );
3506 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i);
3507 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3508 }
3511 void TemplateTable::checkcast() {
3512 transition(atos, atos);
3513 Label done, is_null, quicked, cast_ok, resolved;
3514 Register Roffset = G1_scratch;
3515 Register RobjKlass = O5;
3516 Register RspecifiedKlass = O4;
3518 // Check for casting a NULL
3519 __ br_null_short(Otos_i, Assembler::pn, is_null);
3521 // Get value klass in RobjKlass
3522 __ load_klass(Otos_i, RobjKlass); // get value klass
3524 // Get constant pool tag
3525 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3527 // See if the checkcast has been quickened
3528 __ get_cpool_and_tags(Lscratch, G3_scratch);
3529 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3530 __ ldub(G3_scratch, Roffset, G3_scratch);
3531 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3532 __ br(Assembler::equal, true, Assembler::pt, quicked);
3533 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3535 __ push_ptr(); // save receiver for result, and for GC
3536 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3537 __ get_vm_result_2(RspecifiedKlass);
3538 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3540 __ ba_short(resolved);
3542 // Extract target class from constant pool
3543 __ bind(quicked);
3544 __ add(Roffset, sizeof(ConstantPool), Roffset);
3545 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3546 __ bind(resolved);
3547 __ load_klass(Otos_i, RobjKlass); // get value klass
3549 // Generate a fast subtype check. Branch to cast_ok if no
3550 // failure. Throw exception if failure.
3551 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok );
3553 // Not a subtype; so must throw exception
3554 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch );
3556 __ bind(cast_ok);
3558 if (ProfileInterpreter) {
3559 __ ba_short(done);
3560 }
3561 __ bind(is_null);
3562 __ profile_null_seen(G3_scratch);
3563 __ bind(done);
3564 }
3567 void TemplateTable::instanceof() {
3568 Label done, is_null, quicked, resolved;
3569 transition(atos, itos);
3570 Register Roffset = G1_scratch;
3571 Register RobjKlass = O5;
3572 Register RspecifiedKlass = O4;
3574 // Check for casting a NULL
3575 __ br_null_short(Otos_i, Assembler::pt, is_null);
3577 // Get value klass in RobjKlass
3578 __ load_klass(Otos_i, RobjKlass); // get value klass
3580 // Get constant pool tag
3581 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3583 // See if the checkcast has been quickened
3584 __ get_cpool_and_tags(Lscratch, G3_scratch);
3585 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3586 __ ldub(G3_scratch, Roffset, G3_scratch);
3587 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3588 __ br(Assembler::equal, true, Assembler::pt, quicked);
3589 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3591 __ push_ptr(); // save receiver for result, and for GC
3592 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3593 __ get_vm_result_2(RspecifiedKlass);
3594 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3596 __ ba_short(resolved);
3598 // Extract target class from constant pool
3599 __ bind(quicked);
3600 __ add(Roffset, sizeof(ConstantPool), Roffset);
3601 __ get_constant_pool(Lscratch);
3602 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3603 __ bind(resolved);
3604 __ load_klass(Otos_i, RobjKlass); // get value klass
3606 // Generate a fast subtype check. Branch to cast_ok if no
3607 // failure. Return 0 if failure.
3608 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed
3609 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done );
3610 // Not a subtype; return 0;
3611 __ clr( Otos_i );
3613 if (ProfileInterpreter) {
3614 __ ba_short(done);
3615 }
3616 __ bind(is_null);
3617 __ profile_null_seen(G3_scratch);
3618 __ bind(done);
3619 }
3621 void TemplateTable::_breakpoint() {
3623 // Note: We get here even if we are single stepping..
3624 // jbug inists on setting breakpoints at every bytecode
3625 // even if we are in single step mode.
3627 transition(vtos, vtos);
3628 // get the unpatched byte code
3629 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp);
3630 __ mov(O0, Lbyte_code);
3632 // post the breakpoint event
3633 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp);
3635 // complete the execution of original bytecode
3636 __ dispatch_normal(vtos);
3637 }
3640 //----------------------------------------------------------------------------------------------------
3641 // Exceptions
3643 void TemplateTable::athrow() {
3644 transition(atos, vtos);
3646 // This works because exception is cached in Otos_i which is same as O0,
3647 // which is same as what throw_exception_entry_expects
3648 assert(Otos_i == Oexception, "see explanation above");
3650 __ verify_oop(Otos_i);
3651 __ null_check(Otos_i);
3652 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch);
3653 }
3656 //----------------------------------------------------------------------------------------------------
3657 // Synchronization
3660 // See frame_sparc.hpp for monitor block layout.
3661 // Monitor elements are dynamically allocated by growing stack as needed.
3663 void TemplateTable::monitorenter() {
3664 transition(atos, vtos);
3665 __ verify_oop(Otos_i);
3666 // Try to acquire a lock on the object
3667 // Repeat until succeeded (i.e., until
3668 // monitorenter returns true).
3670 { Label ok;
3671 __ tst(Otos_i);
3672 __ throw_if_not_1_x( Assembler::notZero, ok);
3673 __ delayed()->mov(Otos_i, Lscratch); // save obj
3674 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3675 }
3677 assert(O0 == Otos_i, "Be sure where the object to lock is");
3679 // find a free slot in the monitor block
3682 // initialize entry pointer
3683 __ clr(O1); // points to free slot or NULL
3685 {
3686 Label entry, loop, exit;
3687 __ add( __ top_most_monitor(), O2 ); // last one to check
3688 __ ba( entry );
3689 __ delayed()->mov( Lmonitors, O3 ); // first one to check
3692 __ bind( loop );
3694 __ verify_oop(O4); // verify each monitor's oop
3695 __ tst(O4); // is this entry unused?
3696 if (VM_Version::v9_instructions_work())
3697 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
3698 else {
3699 Label L;
3700 __ br( Assembler::zero, true, Assembler::pn, L );
3701 __ delayed()->mov(O3, O1); // rememeber this one if match
3702 __ bind(L);
3703 }
3705 __ cmp(O4, O0); // check if current entry is for same object
3706 __ brx( Assembler::equal, false, Assembler::pn, exit );
3707 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one
3709 __ bind( entry );
3711 __ cmp( O3, O2 );
3712 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3713 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4);
3715 __ bind( exit );
3716 }
3718 { Label allocated;
3720 // found free slot?
3721 __ br_notnull_short(O1, Assembler::pn, allocated);
3723 __ add_monitor_to_stack( false, O2, O3 );
3724 __ mov(Lmonitors, O1);
3726 __ bind(allocated);
3727 }
3729 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3730 // The object has already been poped from the stack, so the expression stack looks correct.
3731 __ inc(Lbcp);
3733 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object
3734 __ lock_object(O1, O0);
3736 // check if there's enough space on the stack for the monitors after locking
3737 __ generate_stack_overflow_check(0);
3739 // The bcp has already been incremented. Just need to dispatch to next instruction.
3740 __ dispatch_next(vtos);
3741 }
3744 void TemplateTable::monitorexit() {
3745 transition(atos, vtos);
3746 __ verify_oop(Otos_i);
3747 __ tst(Otos_i);
3748 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch );
3750 assert(O0 == Otos_i, "just checking");
3752 { Label entry, loop, found;
3753 __ add( __ top_most_monitor(), O2 ); // last one to check
3754 __ ba(entry);
3755 // use Lscratch to hold monitor elem to check, start with most recent monitor,
3756 // By using a local it survives the call to the C routine.
3757 __ delayed()->mov( Lmonitors, Lscratch );
3759 __ bind( loop );
3761 __ verify_oop(O4); // verify each monitor's oop
3762 __ cmp(O4, O0); // check if current entry is for desired object
3763 __ brx( Assembler::equal, true, Assembler::pt, found );
3764 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit
3766 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next
3768 __ bind( entry );
3770 __ cmp( Lscratch, O2 );
3771 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3772 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4);
3774 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3775 __ should_not_reach_here();
3777 __ bind(found);
3778 }
3779 __ unlock_object(O1);
3780 }
3783 //----------------------------------------------------------------------------------------------------
3784 // Wide instructions
3786 void TemplateTable::wide() {
3787 transition(vtos, vtos);
3788 __ ldub(Lbcp, 1, G3_scratch);// get next bc
3789 __ sll(G3_scratch, LogBytesPerWord, G3_scratch);
3790 AddressLiteral ep(Interpreter::_wentry_point);
3791 __ set(ep, G4_scratch);
3792 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch);
3793 __ jmp(G3_scratch, G0);
3794 __ delayed()->nop();
3795 // Note: the Lbcp increment step is part of the individual wide bytecode implementations
3796 }
3799 //----------------------------------------------------------------------------------------------------
3800 // Multi arrays
3802 void TemplateTable::multianewarray() {
3803 transition(vtos, atos);
3804 // put ndims * wordSize into Lscratch
3805 __ ldub( Lbcp, 3, Lscratch);
3806 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch);
3807 // Lesp points past last_dim, so set to O1 to first_dim address
3808 __ add( Lesp, Lscratch, O1);
3809 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
3810 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack
3811 }
3812 #endif /* !CC_INTERP */