Sat, 01 Sep 2012 13:25:18 -0400
6964458: Reimplement class meta-data storage to use native memory
Summary: Remove PermGen, allocate meta-data in metaspace linked to class loaders, rewrite GC walking, rewrite and rename metadata to be C++ classes
Reviewed-by: jmasa, stefank, never, coleenp, kvn, brutisso, mgerdin, dholmes, jrose, twisti, roland
Contributed-by: jmasa <jon.masamitsu@oracle.com>, stefank <stefan.karlsson@oracle.com>, mgerdin <mikael.gerdin@oracle.com>, never <tom.rodriguez@oracle.com>
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "interpreter/interpreter.hpp"
27 #include "interpreter/interpreterRuntime.hpp"
28 #include "interpreter/templateTable.hpp"
29 #include "memory/universe.inline.hpp"
30 #include "oops/methodData.hpp"
31 #include "oops/objArrayKlass.hpp"
32 #include "oops/oop.inline.hpp"
33 #include "prims/methodHandles.hpp"
34 #include "runtime/sharedRuntime.hpp"
35 #include "runtime/stubRoutines.hpp"
36 #include "runtime/synchronizer.hpp"
38 #ifndef CC_INTERP
39 #define __ _masm->
41 // Misc helpers
43 // Do an oop store like *(base + index + offset) = val
44 // index can be noreg,
45 static void do_oop_store(InterpreterMacroAssembler* _masm,
46 Register base,
47 Register index,
48 int offset,
49 Register val,
50 Register tmp,
51 BarrierSet::Name barrier,
52 bool precise) {
53 assert(tmp != val && tmp != base && tmp != index, "register collision");
54 assert(index == noreg || offset == 0, "only one offset");
55 switch (barrier) {
56 #ifndef SERIALGC
57 case BarrierSet::G1SATBCT:
58 case BarrierSet::G1SATBCTLogging:
59 {
60 // Load and record the previous value.
61 __ g1_write_barrier_pre(base, index, offset,
62 noreg /* pre_val */,
63 tmp, true /*preserve_o_regs*/);
65 if (index == noreg ) {
66 assert(Assembler::is_simm13(offset), "fix this code");
67 __ store_heap_oop(val, base, offset);
68 } else {
69 __ store_heap_oop(val, base, index);
70 }
72 // No need for post barrier if storing NULL
73 if (val != G0) {
74 if (precise) {
75 if (index == noreg) {
76 __ add(base, offset, base);
77 } else {
78 __ add(base, index, base);
79 }
80 }
81 __ g1_write_barrier_post(base, val, tmp);
82 }
83 }
84 break;
85 #endif // SERIALGC
86 case BarrierSet::CardTableModRef:
87 case BarrierSet::CardTableExtension:
88 {
89 if (index == noreg ) {
90 assert(Assembler::is_simm13(offset), "fix this code");
91 __ store_heap_oop(val, base, offset);
92 } else {
93 __ store_heap_oop(val, base, index);
94 }
95 // No need for post barrier if storing NULL
96 if (val != G0) {
97 if (precise) {
98 if (index == noreg) {
99 __ add(base, offset, base);
100 } else {
101 __ add(base, index, base);
102 }
103 }
104 __ card_write_barrier_post(base, val, tmp);
105 }
106 }
107 break;
108 case BarrierSet::ModRef:
109 case BarrierSet::Other:
110 ShouldNotReachHere();
111 break;
112 default :
113 ShouldNotReachHere();
115 }
116 }
119 //----------------------------------------------------------------------------------------------------
120 // Platform-dependent initialization
122 void TemplateTable::pd_initialize() {
123 // (none)
124 }
127 //----------------------------------------------------------------------------------------------------
128 // Condition conversion
129 Assembler::Condition ccNot(TemplateTable::Condition cc) {
130 switch (cc) {
131 case TemplateTable::equal : return Assembler::notEqual;
132 case TemplateTable::not_equal : return Assembler::equal;
133 case TemplateTable::less : return Assembler::greaterEqual;
134 case TemplateTable::less_equal : return Assembler::greater;
135 case TemplateTable::greater : return Assembler::lessEqual;
136 case TemplateTable::greater_equal: return Assembler::less;
137 }
138 ShouldNotReachHere();
139 return Assembler::zero;
140 }
142 //----------------------------------------------------------------------------------------------------
143 // Miscelaneous helper routines
146 Address TemplateTable::at_bcp(int offset) {
147 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
148 return Address(Lbcp, offset);
149 }
152 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
153 Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
154 int byte_no) {
155 // With sharing on, may need to test Method* flag.
156 if (!RewriteBytecodes) return;
157 Label L_patch_done;
159 switch (bc) {
160 case Bytecodes::_fast_aputfield:
161 case Bytecodes::_fast_bputfield:
162 case Bytecodes::_fast_cputfield:
163 case Bytecodes::_fast_dputfield:
164 case Bytecodes::_fast_fputfield:
165 case Bytecodes::_fast_iputfield:
166 case Bytecodes::_fast_lputfield:
167 case Bytecodes::_fast_sputfield:
168 {
169 // We skip bytecode quickening for putfield instructions when
170 // the put_code written to the constant pool cache is zero.
171 // This is required so that every execution of this instruction
172 // calls out to InterpreterRuntime::resolve_get_put to do
173 // additional, required work.
174 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
175 assert(load_bc_into_bc_reg, "we use bc_reg as temp");
176 __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
177 __ set(bc, bc_reg);
178 __ cmp_and_br_short(temp_reg, 0, Assembler::equal, Assembler::pn, L_patch_done); // don't patch
179 }
180 break;
181 default:
182 assert(byte_no == -1, "sanity");
183 if (load_bc_into_bc_reg) {
184 __ set(bc, bc_reg);
185 }
186 }
188 if (JvmtiExport::can_post_breakpoint()) {
189 Label L_fast_patch;
190 __ ldub(at_bcp(0), temp_reg);
191 __ cmp_and_br_short(temp_reg, Bytecodes::_breakpoint, Assembler::notEqual, Assembler::pt, L_fast_patch);
192 // perform the quickening, slowly, in the bowels of the breakpoint table
193 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, bc_reg);
194 __ ba_short(L_patch_done);
195 __ bind(L_fast_patch);
196 }
198 #ifdef ASSERT
199 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc);
200 Label L_okay;
201 __ ldub(at_bcp(0), temp_reg);
202 __ cmp(temp_reg, orig_bytecode);
203 __ br(Assembler::equal, false, Assembler::pt, L_okay);
204 __ delayed()->cmp(temp_reg, bc_reg);
205 __ br(Assembler::equal, false, Assembler::pt, L_okay);
206 __ delayed()->nop();
207 __ stop("patching the wrong bytecode");
208 __ bind(L_okay);
209 #endif
211 // patch bytecode
212 __ stb(bc_reg, at_bcp(0));
213 __ bind(L_patch_done);
214 }
216 //----------------------------------------------------------------------------------------------------
217 // Individual instructions
219 void TemplateTable::nop() {
220 transition(vtos, vtos);
221 // nothing to do
222 }
224 void TemplateTable::shouldnotreachhere() {
225 transition(vtos, vtos);
226 __ stop("shouldnotreachhere bytecode");
227 }
229 void TemplateTable::aconst_null() {
230 transition(vtos, atos);
231 __ clr(Otos_i);
232 }
235 void TemplateTable::iconst(int value) {
236 transition(vtos, itos);
237 __ set(value, Otos_i);
238 }
241 void TemplateTable::lconst(int value) {
242 transition(vtos, ltos);
243 assert(value >= 0, "check this code");
244 #ifdef _LP64
245 __ set(value, Otos_l);
246 #else
247 __ set(value, Otos_l2);
248 __ clr( Otos_l1);
249 #endif
250 }
253 void TemplateTable::fconst(int value) {
254 transition(vtos, ftos);
255 static float zero = 0.0, one = 1.0, two = 2.0;
256 float* p;
257 switch( value ) {
258 default: ShouldNotReachHere();
259 case 0: p = &zero; break;
260 case 1: p = &one; break;
261 case 2: p = &two; break;
262 }
263 AddressLiteral a(p);
264 __ sethi(a, G3_scratch);
265 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f);
266 }
269 void TemplateTable::dconst(int value) {
270 transition(vtos, dtos);
271 static double zero = 0.0, one = 1.0;
272 double* p;
273 switch( value ) {
274 default: ShouldNotReachHere();
275 case 0: p = &zero; break;
276 case 1: p = &one; break;
277 }
278 AddressLiteral a(p);
279 __ sethi(a, G3_scratch);
280 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d);
281 }
284 // %%%%% Should factore most snippet templates across platforms
286 void TemplateTable::bipush() {
287 transition(vtos, itos);
288 __ ldsb( at_bcp(1), Otos_i );
289 }
291 void TemplateTable::sipush() {
292 transition(vtos, itos);
293 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed);
294 }
296 void TemplateTable::ldc(bool wide) {
297 transition(vtos, vtos);
298 Label call_ldc, notInt, isString, notString, notClass, exit;
300 if (wide) {
301 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
302 } else {
303 __ ldub(Lbcp, 1, O1);
304 }
305 __ get_cpool_and_tags(O0, O2);
307 const int base_offset = ConstantPool::header_size() * wordSize;
308 const int tags_offset = Array<u1>::base_offset_in_bytes();
310 // get type from tags
311 __ add(O2, tags_offset, O2);
312 __ ldub(O2, O1, O2);
314 // unresolved class? If so, must resolve
315 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClass, Assembler::equal, Assembler::pt, call_ldc);
317 // unresolved class in error state
318 __ cmp_and_brx_short(O2, JVM_CONSTANT_UnresolvedClassInError, Assembler::equal, Assembler::pn, call_ldc);
320 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class
321 __ brx(Assembler::notEqual, true, Assembler::pt, notClass);
322 __ delayed()->add(O0, base_offset, O0);
324 __ bind(call_ldc);
325 __ set(wide, O1);
326 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1);
327 __ push(atos);
328 __ ba_short(exit);
330 __ bind(notClass);
331 // __ add(O0, base_offset, O0);
332 __ sll(O1, LogBytesPerWord, O1);
333 __ cmp(O2, JVM_CONSTANT_Integer);
334 __ brx(Assembler::notEqual, true, Assembler::pt, notInt);
335 __ delayed()->cmp(O2, JVM_CONSTANT_String);
336 __ ld(O0, O1, Otos_i);
337 __ push(itos);
338 __ ba_short(exit);
340 __ bind(notInt);
341 // __ cmp(O2, JVM_CONSTANT_String);
342 __ brx(Assembler::equal, true, Assembler::pt, isString);
343 __ delayed()->cmp(O2, JVM_CONSTANT_Object);
344 __ brx(Assembler::notEqual, true, Assembler::pt, notString);
345 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
346 __ bind(isString);
347 __ stop("string should be rewritten to fast_aldc");
348 __ ba_short(exit);
350 __ bind(notString);
351 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
352 __ push(ftos);
354 __ bind(exit);
355 }
357 // Fast path for caching oop constants.
358 // %%% We should use this to handle Class and String constants also.
359 // %%% It will simplify the ldc/primitive path considerably.
360 void TemplateTable::fast_aldc(bool wide) {
361 transition(vtos, atos);
363 int index_size = wide ? sizeof(u2) : sizeof(u1);
364 Label resolved;
366 // We are resolved if the resolved reference cache entry contains a
367 // non-null object (CallSite, etc.)
368 assert_different_registers(Otos_i, G3_scratch);
369 __ get_cache_index_at_bcp(Otos_i, G3_scratch, 1, index_size); // load index => G3_scratch
370 __ load_resolved_reference_at_index(Otos_i, G3_scratch);
371 __ tst(Otos_i);
372 __ br(Assembler::notEqual, false, Assembler::pt, resolved);
373 __ delayed()->set((int)bytecode(), O1);
375 address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
377 // first time invocation - must resolve first
378 __ call_VM(Otos_i, entry, O1);
379 __ bind(resolved);
380 __ verify_oop(Otos_i);
381 }
384 void TemplateTable::ldc2_w() {
385 transition(vtos, vtos);
386 Label Long, exit;
388 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
389 __ get_cpool_and_tags(O0, O2);
391 const int base_offset = ConstantPool::header_size() * wordSize;
392 const int tags_offset = Array<u1>::base_offset_in_bytes();
393 // get type from tags
394 __ add(O2, tags_offset, O2);
395 __ ldub(O2, O1, O2);
397 __ sll(O1, LogBytesPerWord, O1);
398 __ add(O0, O1, G3_scratch);
400 __ cmp_and_brx_short(O2, JVM_CONSTANT_Double, Assembler::notEqual, Assembler::pt, Long);
401 // A double can be placed at word-aligned locations in the constant pool.
402 // Check out Conversions.java for an example.
403 // Also ConstantPool::header_size() is 20, which makes it very difficult
404 // to double-align double on the constant pool. SG, 11/7/97
405 #ifdef _LP64
406 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
407 #else
408 FloatRegister f = Ftos_d;
409 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
410 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
411 f->successor());
412 #endif
413 __ push(dtos);
414 __ ba_short(exit);
416 __ bind(Long);
417 #ifdef _LP64
418 __ ldx(G3_scratch, base_offset, Otos_l);
419 #else
420 __ ld(G3_scratch, base_offset, Otos_l);
421 __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
422 #endif
423 __ push(ltos);
425 __ bind(exit);
426 }
429 void TemplateTable::locals_index(Register reg, int offset) {
430 __ ldub( at_bcp(offset), reg );
431 }
434 void TemplateTable::locals_index_wide(Register reg) {
435 // offset is 2, not 1, because Lbcp points to wide prefix code
436 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
437 }
439 void TemplateTable::iload() {
440 transition(vtos, itos);
441 // Rewrite iload,iload pair into fast_iload2
442 // iload,caload pair into fast_icaload
443 if (RewriteFrequentPairs) {
444 Label rewrite, done;
446 // get next byte
447 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch);
449 // if _iload, wait to rewrite to iload2. We only want to rewrite the
450 // last two iloads in a pair. Comparing against fast_iload means that
451 // the next bytecode is neither an iload or a caload, and therefore
452 // an iload pair.
453 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_iload, Assembler::equal, Assembler::pn, done);
455 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload);
456 __ br(Assembler::equal, false, Assembler::pn, rewrite);
457 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch);
459 __ cmp(G3_scratch, (int)Bytecodes::_caload);
460 __ br(Assembler::equal, false, Assembler::pn, rewrite);
461 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch);
463 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again
464 // rewrite
465 // G4_scratch: fast bytecode
466 __ bind(rewrite);
467 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false);
468 __ bind(done);
469 }
471 // Get the local value into tos
472 locals_index(G3_scratch);
473 __ access_local_int( G3_scratch, Otos_i );
474 }
476 void TemplateTable::fast_iload2() {
477 transition(vtos, itos);
478 locals_index(G3_scratch);
479 __ access_local_int( G3_scratch, Otos_i );
480 __ push_i();
481 locals_index(G3_scratch, 3); // get next bytecode's local index.
482 __ access_local_int( G3_scratch, Otos_i );
483 }
485 void TemplateTable::fast_iload() {
486 transition(vtos, itos);
487 locals_index(G3_scratch);
488 __ access_local_int( G3_scratch, Otos_i );
489 }
491 void TemplateTable::lload() {
492 transition(vtos, ltos);
493 locals_index(G3_scratch);
494 __ access_local_long( G3_scratch, Otos_l );
495 }
498 void TemplateTable::fload() {
499 transition(vtos, ftos);
500 locals_index(G3_scratch);
501 __ access_local_float( G3_scratch, Ftos_f );
502 }
505 void TemplateTable::dload() {
506 transition(vtos, dtos);
507 locals_index(G3_scratch);
508 __ access_local_double( G3_scratch, Ftos_d );
509 }
512 void TemplateTable::aload() {
513 transition(vtos, atos);
514 locals_index(G3_scratch);
515 __ access_local_ptr( G3_scratch, Otos_i);
516 }
519 void TemplateTable::wide_iload() {
520 transition(vtos, itos);
521 locals_index_wide(G3_scratch);
522 __ access_local_int( G3_scratch, Otos_i );
523 }
526 void TemplateTable::wide_lload() {
527 transition(vtos, ltos);
528 locals_index_wide(G3_scratch);
529 __ access_local_long( G3_scratch, Otos_l );
530 }
533 void TemplateTable::wide_fload() {
534 transition(vtos, ftos);
535 locals_index_wide(G3_scratch);
536 __ access_local_float( G3_scratch, Ftos_f );
537 }
540 void TemplateTable::wide_dload() {
541 transition(vtos, dtos);
542 locals_index_wide(G3_scratch);
543 __ access_local_double( G3_scratch, Ftos_d );
544 }
547 void TemplateTable::wide_aload() {
548 transition(vtos, atos);
549 locals_index_wide(G3_scratch);
550 __ access_local_ptr( G3_scratch, Otos_i );
551 __ verify_oop(Otos_i);
552 }
555 void TemplateTable::iaload() {
556 transition(itos, itos);
557 // Otos_i: index
558 // tos: array
559 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
560 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i);
561 }
564 void TemplateTable::laload() {
565 transition(itos, ltos);
566 // Otos_i: index
567 // O2: array
568 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
569 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l);
570 }
573 void TemplateTable::faload() {
574 transition(itos, ftos);
575 // Otos_i: index
576 // O2: array
577 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
578 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f);
579 }
582 void TemplateTable::daload() {
583 transition(itos, dtos);
584 // Otos_i: index
585 // O2: array
586 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
587 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d);
588 }
591 void TemplateTable::aaload() {
592 transition(itos, atos);
593 // Otos_i: index
594 // tos: array
595 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3);
596 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
597 __ verify_oop(Otos_i);
598 }
601 void TemplateTable::baload() {
602 transition(itos, itos);
603 // Otos_i: index
604 // tos: array
605 __ index_check(O2, Otos_i, 0, G3_scratch, O3);
606 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i);
607 }
610 void TemplateTable::caload() {
611 transition(itos, itos);
612 // Otos_i: index
613 // tos: array
614 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
615 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
616 }
618 void TemplateTable::fast_icaload() {
619 transition(vtos, itos);
620 // Otos_i: index
621 // tos: array
622 locals_index(G3_scratch);
623 __ access_local_int( G3_scratch, Otos_i );
624 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
625 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
626 }
629 void TemplateTable::saload() {
630 transition(itos, itos);
631 // Otos_i: index
632 // tos: array
633 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
634 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i);
635 }
638 void TemplateTable::iload(int n) {
639 transition(vtos, itos);
640 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
641 }
644 void TemplateTable::lload(int n) {
645 transition(vtos, ltos);
646 assert(n+1 < Argument::n_register_parameters, "would need more code");
647 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
648 }
651 void TemplateTable::fload(int n) {
652 transition(vtos, ftos);
653 assert(n < Argument::n_register_parameters, "would need more code");
654 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f );
655 }
658 void TemplateTable::dload(int n) {
659 transition(vtos, dtos);
660 FloatRegister dst = Ftos_d;
661 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
662 }
665 void TemplateTable::aload(int n) {
666 transition(vtos, atos);
667 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
668 }
671 void TemplateTable::aload_0() {
672 transition(vtos, atos);
674 // According to bytecode histograms, the pairs:
675 //
676 // _aload_0, _fast_igetfield (itos)
677 // _aload_0, _fast_agetfield (atos)
678 // _aload_0, _fast_fgetfield (ftos)
679 //
680 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
681 // bytecode checks the next bytecode and then rewrites the current
682 // bytecode into a pair bytecode; otherwise it rewrites the current
683 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
684 //
685 if (RewriteFrequentPairs) {
686 Label rewrite, done;
688 // get next byte
689 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch);
691 // do actual aload_0
692 aload(0);
694 // if _getfield then wait with rewrite
695 __ cmp_and_br_short(G3_scratch, (int)Bytecodes::_getfield, Assembler::equal, Assembler::pn, done);
697 // if _igetfield then rewrite to _fast_iaccess_0
698 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
699 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield);
700 __ br(Assembler::equal, false, Assembler::pn, rewrite);
701 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch);
703 // if _agetfield then rewrite to _fast_aaccess_0
704 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
705 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield);
706 __ br(Assembler::equal, false, Assembler::pn, rewrite);
707 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch);
709 // if _fgetfield then rewrite to _fast_faccess_0
710 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
711 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield);
712 __ br(Assembler::equal, false, Assembler::pn, rewrite);
713 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch);
715 // else rewrite to _fast_aload0
716 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
717 __ set(Bytecodes::_fast_aload_0, G4_scratch);
719 // rewrite
720 // G4_scratch: fast bytecode
721 __ bind(rewrite);
722 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false);
723 __ bind(done);
724 } else {
725 aload(0);
726 }
727 }
730 void TemplateTable::istore() {
731 transition(itos, vtos);
732 locals_index(G3_scratch);
733 __ store_local_int( G3_scratch, Otos_i );
734 }
737 void TemplateTable::lstore() {
738 transition(ltos, vtos);
739 locals_index(G3_scratch);
740 __ store_local_long( G3_scratch, Otos_l );
741 }
744 void TemplateTable::fstore() {
745 transition(ftos, vtos);
746 locals_index(G3_scratch);
747 __ store_local_float( G3_scratch, Ftos_f );
748 }
751 void TemplateTable::dstore() {
752 transition(dtos, vtos);
753 locals_index(G3_scratch);
754 __ store_local_double( G3_scratch, Ftos_d );
755 }
758 void TemplateTable::astore() {
759 transition(vtos, vtos);
760 __ load_ptr(0, Otos_i);
761 __ inc(Lesp, Interpreter::stackElementSize);
762 __ verify_oop_or_return_address(Otos_i, G3_scratch);
763 locals_index(G3_scratch);
764 __ store_local_ptr(G3_scratch, Otos_i);
765 }
768 void TemplateTable::wide_istore() {
769 transition(vtos, vtos);
770 __ pop_i();
771 locals_index_wide(G3_scratch);
772 __ store_local_int( G3_scratch, Otos_i );
773 }
776 void TemplateTable::wide_lstore() {
777 transition(vtos, vtos);
778 __ pop_l();
779 locals_index_wide(G3_scratch);
780 __ store_local_long( G3_scratch, Otos_l );
781 }
784 void TemplateTable::wide_fstore() {
785 transition(vtos, vtos);
786 __ pop_f();
787 locals_index_wide(G3_scratch);
788 __ store_local_float( G3_scratch, Ftos_f );
789 }
792 void TemplateTable::wide_dstore() {
793 transition(vtos, vtos);
794 __ pop_d();
795 locals_index_wide(G3_scratch);
796 __ store_local_double( G3_scratch, Ftos_d );
797 }
800 void TemplateTable::wide_astore() {
801 transition(vtos, vtos);
802 __ load_ptr(0, Otos_i);
803 __ inc(Lesp, Interpreter::stackElementSize);
804 __ verify_oop_or_return_address(Otos_i, G3_scratch);
805 locals_index_wide(G3_scratch);
806 __ store_local_ptr(G3_scratch, Otos_i);
807 }
810 void TemplateTable::iastore() {
811 transition(itos, vtos);
812 __ pop_i(O2); // index
813 // Otos_i: val
814 // O3: array
815 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
816 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT));
817 }
820 void TemplateTable::lastore() {
821 transition(ltos, vtos);
822 __ pop_i(O2); // index
823 // Otos_l: val
824 // O3: array
825 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
826 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG));
827 }
830 void TemplateTable::fastore() {
831 transition(ftos, vtos);
832 __ pop_i(O2); // index
833 // Ftos_f: val
834 // O3: array
835 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
836 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
837 }
840 void TemplateTable::dastore() {
841 transition(dtos, vtos);
842 __ pop_i(O2); // index
843 // Fos_d: val
844 // O3: array
845 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
846 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
847 }
850 void TemplateTable::aastore() {
851 Label store_ok, is_null, done;
852 transition(vtos, vtos);
853 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
854 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index
855 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array
856 // Otos_i: val
857 // O2: index
858 // O3: array
859 __ verify_oop(Otos_i);
860 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
862 // do array store check - check for NULL value first
863 __ br_null_short( Otos_i, Assembler::pn, is_null );
865 __ load_klass(O3, O4); // get array klass
866 __ load_klass(Otos_i, O5); // get value klass
868 // do fast instanceof cache test
870 __ ld_ptr(O4, in_bytes(objArrayKlass::element_klass_offset()), O4);
872 assert(Otos_i == O0, "just checking");
874 // Otos_i: value
875 // O1: addr - offset
876 // O2: index
877 // O3: array
878 // O4: array element klass
879 // O5: value klass
881 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
883 // Generate a fast subtype check. Branch to store_ok if no
884 // failure. Throw if failure.
885 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok );
887 // Not a subtype; so must throw exception
888 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch );
890 // Store is OK.
891 __ bind(store_ok);
892 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
894 __ ba(done);
895 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
897 __ bind(is_null);
898 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true);
900 __ profile_null_seen(G3_scratch);
901 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
902 __ bind(done);
903 }
906 void TemplateTable::bastore() {
907 transition(itos, vtos);
908 __ pop_i(O2); // index
909 // Otos_i: val
910 // O3: array
911 __ index_check(O3, O2, 0, G3_scratch, O2);
912 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE));
913 }
916 void TemplateTable::castore() {
917 transition(itos, vtos);
918 __ pop_i(O2); // index
919 // Otos_i: val
920 // O3: array
921 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2);
922 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
923 }
926 void TemplateTable::sastore() {
927 // %%%%% Factor across platform
928 castore();
929 }
932 void TemplateTable::istore(int n) {
933 transition(itos, vtos);
934 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
935 }
938 void TemplateTable::lstore(int n) {
939 transition(ltos, vtos);
940 assert(n+1 < Argument::n_register_parameters, "only handle register cases");
941 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
943 }
946 void TemplateTable::fstore(int n) {
947 transition(ftos, vtos);
948 assert(n < Argument::n_register_parameters, "only handle register cases");
949 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
950 }
953 void TemplateTable::dstore(int n) {
954 transition(dtos, vtos);
955 FloatRegister src = Ftos_d;
956 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
957 }
960 void TemplateTable::astore(int n) {
961 transition(vtos, vtos);
962 __ load_ptr(0, Otos_i);
963 __ inc(Lesp, Interpreter::stackElementSize);
964 __ verify_oop_or_return_address(Otos_i, G3_scratch);
965 __ store_local_ptr(n, Otos_i);
966 }
969 void TemplateTable::pop() {
970 transition(vtos, vtos);
971 __ inc(Lesp, Interpreter::stackElementSize);
972 }
975 void TemplateTable::pop2() {
976 transition(vtos, vtos);
977 __ inc(Lesp, 2 * Interpreter::stackElementSize);
978 }
981 void TemplateTable::dup() {
982 transition(vtos, vtos);
983 // stack: ..., a
984 // load a and tag
985 __ load_ptr(0, Otos_i);
986 __ push_ptr(Otos_i);
987 // stack: ..., a, a
988 }
991 void TemplateTable::dup_x1() {
992 transition(vtos, vtos);
993 // stack: ..., a, b
994 __ load_ptr( 1, G3_scratch); // get a
995 __ load_ptr( 0, Otos_l1); // get b
996 __ store_ptr(1, Otos_l1); // put b
997 __ store_ptr(0, G3_scratch); // put a - like swap
998 __ push_ptr(Otos_l1); // push b
999 // stack: ..., b, a, b
1000 }
1003 void TemplateTable::dup_x2() {
1004 transition(vtos, vtos);
1005 // stack: ..., a, b, c
1006 // get c and push on stack, reuse registers
1007 __ load_ptr( 0, G3_scratch); // get c
1008 __ push_ptr(G3_scratch); // push c with tag
1009 // stack: ..., a, b, c, c (c in reg) (Lesp - 4)
1010 // (stack offsets n+1 now)
1011 __ load_ptr( 3, Otos_l1); // get a
1012 __ store_ptr(3, G3_scratch); // put c at 3
1013 // stack: ..., c, b, c, c (a in reg)
1014 __ load_ptr( 2, G3_scratch); // get b
1015 __ store_ptr(2, Otos_l1); // put a at 2
1016 // stack: ..., c, a, c, c (b in reg)
1017 __ store_ptr(1, G3_scratch); // put b at 1
1018 // stack: ..., c, a, b, c
1019 }
1022 void TemplateTable::dup2() {
1023 transition(vtos, vtos);
1024 __ load_ptr(1, G3_scratch); // get a
1025 __ load_ptr(0, Otos_l1); // get b
1026 __ push_ptr(G3_scratch); // push a
1027 __ push_ptr(Otos_l1); // push b
1028 // stack: ..., a, b, a, b
1029 }
1032 void TemplateTable::dup2_x1() {
1033 transition(vtos, vtos);
1034 // stack: ..., a, b, c
1035 __ load_ptr( 1, Lscratch); // get b
1036 __ load_ptr( 2, Otos_l1); // get a
1037 __ store_ptr(2, Lscratch); // put b at a
1038 // stack: ..., b, b, c
1039 __ load_ptr( 0, G3_scratch); // get c
1040 __ store_ptr(1, G3_scratch); // put c at b
1041 // stack: ..., b, c, c
1042 __ store_ptr(0, Otos_l1); // put a at c
1043 // stack: ..., b, c, a
1044 __ push_ptr(Lscratch); // push b
1045 __ push_ptr(G3_scratch); // push c
1046 // stack: ..., b, c, a, b, c
1047 }
1050 // The spec says that these types can be a mixture of category 1 (1 word)
1051 // types and/or category 2 types (long and doubles)
1052 void TemplateTable::dup2_x2() {
1053 transition(vtos, vtos);
1054 // stack: ..., a, b, c, d
1055 __ load_ptr( 1, Lscratch); // get c
1056 __ load_ptr( 3, Otos_l1); // get a
1057 __ store_ptr(3, Lscratch); // put c at 3
1058 __ store_ptr(1, Otos_l1); // put a at 1
1059 // stack: ..., c, b, a, d
1060 __ load_ptr( 2, G3_scratch); // get b
1061 __ load_ptr( 0, Otos_l1); // get d
1062 __ store_ptr(0, G3_scratch); // put b at 0
1063 __ store_ptr(2, Otos_l1); // put d at 2
1064 // stack: ..., c, d, a, b
1065 __ push_ptr(Lscratch); // push c
1066 __ push_ptr(Otos_l1); // push d
1067 // stack: ..., c, d, a, b, c, d
1068 }
1071 void TemplateTable::swap() {
1072 transition(vtos, vtos);
1073 // stack: ..., a, b
1074 __ load_ptr( 1, G3_scratch); // get a
1075 __ load_ptr( 0, Otos_l1); // get b
1076 __ store_ptr(0, G3_scratch); // put b
1077 __ store_ptr(1, Otos_l1); // put a
1078 // stack: ..., b, a
1079 }
1082 void TemplateTable::iop2(Operation op) {
1083 transition(itos, itos);
1084 __ pop_i(O1);
1085 switch (op) {
1086 case add: __ add(O1, Otos_i, Otos_i); break;
1087 case sub: __ sub(O1, Otos_i, Otos_i); break;
1088 // %%%%% Mul may not exist: better to call .mul?
1089 case mul: __ smul(O1, Otos_i, Otos_i); break;
1090 case _and: __ and3(O1, Otos_i, Otos_i); break;
1091 case _or: __ or3(O1, Otos_i, Otos_i); break;
1092 case _xor: __ xor3(O1, Otos_i, Otos_i); break;
1093 case shl: __ sll(O1, Otos_i, Otos_i); break;
1094 case shr: __ sra(O1, Otos_i, Otos_i); break;
1095 case ushr: __ srl(O1, Otos_i, Otos_i); break;
1096 default: ShouldNotReachHere();
1097 }
1098 }
1101 void TemplateTable::lop2(Operation op) {
1102 transition(ltos, ltos);
1103 __ pop_l(O2);
1104 switch (op) {
1105 #ifdef _LP64
1106 case add: __ add(O2, Otos_l, Otos_l); break;
1107 case sub: __ sub(O2, Otos_l, Otos_l); break;
1108 case _and: __ and3(O2, Otos_l, Otos_l); break;
1109 case _or: __ or3(O2, Otos_l, Otos_l); break;
1110 case _xor: __ xor3(O2, Otos_l, Otos_l); break;
1111 #else
1112 case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break;
1113 case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break;
1114 case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break;
1115 case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break;
1116 case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break;
1117 #endif
1118 default: ShouldNotReachHere();
1119 }
1120 }
1123 void TemplateTable::idiv() {
1124 // %%%%% Later: ForSPARC/V7 call .sdiv library routine,
1125 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
1127 transition(itos, itos);
1128 __ pop_i(O1); // get 1st op
1130 // Y contains upper 32 bits of result, set it to 0 or all ones
1131 __ wry(G0);
1132 __ mov(~0, G3_scratch);
1134 __ tst(O1);
1135 Label neg;
1136 __ br(Assembler::negative, true, Assembler::pn, neg);
1137 __ delayed()->wry(G3_scratch);
1138 __ bind(neg);
1140 Label ok;
1141 __ tst(Otos_i);
1142 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
1144 const int min_int = 0x80000000;
1145 Label regular;
1146 __ cmp(Otos_i, -1);
1147 __ br(Assembler::notEqual, false, Assembler::pt, regular);
1148 #ifdef _LP64
1149 // Don't put set in delay slot
1150 // Set will turn into multiple instructions in 64 bit mode
1151 __ delayed()->nop();
1152 __ set(min_int, G4_scratch);
1153 #else
1154 __ delayed()->set(min_int, G4_scratch);
1155 #endif
1156 Label done;
1157 __ cmp(O1, G4_scratch);
1158 __ br(Assembler::equal, true, Assembler::pt, done);
1159 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken)
1161 __ bind(regular);
1162 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction!
1163 __ bind(done);
1164 }
1167 void TemplateTable::irem() {
1168 transition(itos, itos);
1169 __ mov(Otos_i, O2); // save divisor
1170 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1
1171 __ smul(Otos_i, O2, Otos_i);
1172 __ sub(O1, Otos_i, Otos_i);
1173 }
1176 void TemplateTable::lmul() {
1177 transition(ltos, ltos);
1178 __ pop_l(O2);
1179 #ifdef _LP64
1180 __ mulx(Otos_l, O2, Otos_l);
1181 #else
1182 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
1183 #endif
1185 }
1188 void TemplateTable::ldiv() {
1189 transition(ltos, ltos);
1191 // check for zero
1192 __ pop_l(O2);
1193 #ifdef _LP64
1194 __ tst(Otos_l);
1195 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1196 __ sdivx(O2, Otos_l, Otos_l);
1197 #else
1198 __ orcc(Otos_l1, Otos_l2, G0);
1199 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1200 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1201 #endif
1202 }
1205 void TemplateTable::lrem() {
1206 transition(ltos, ltos);
1208 // check for zero
1209 __ pop_l(O2);
1210 #ifdef _LP64
1211 __ tst(Otos_l);
1212 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1213 __ sdivx(O2, Otos_l, Otos_l2);
1214 __ mulx (Otos_l2, Otos_l, Otos_l2);
1215 __ sub (O2, Otos_l2, Otos_l);
1216 #else
1217 __ orcc(Otos_l1, Otos_l2, G0);
1218 __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1219 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1220 #endif
1221 }
1224 void TemplateTable::lshl() {
1225 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
1227 __ pop_l(O2); // shift value in O2, O3
1228 #ifdef _LP64
1229 __ sllx(O2, Otos_i, Otos_l);
1230 #else
1231 __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1232 #endif
1233 }
1236 void TemplateTable::lshr() {
1237 transition(itos, ltos); // %%%% see lshl comment
1239 __ pop_l(O2); // shift value in O2, O3
1240 #ifdef _LP64
1241 __ srax(O2, Otos_i, Otos_l);
1242 #else
1243 __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1244 #endif
1245 }
1249 void TemplateTable::lushr() {
1250 transition(itos, ltos); // %%%% see lshl comment
1252 __ pop_l(O2); // shift value in O2, O3
1253 #ifdef _LP64
1254 __ srlx(O2, Otos_i, Otos_l);
1255 #else
1256 __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1257 #endif
1258 }
1261 void TemplateTable::fop2(Operation op) {
1262 transition(ftos, ftos);
1263 switch (op) {
1264 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1265 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1266 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1267 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1268 case rem:
1269 assert(Ftos_f == F0, "just checking");
1270 #ifdef _LP64
1271 // LP64 calling conventions use F1, F3 for passing 2 floats
1272 __ pop_f(F1);
1273 __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
1274 #else
1275 __ pop_i(O0);
1276 __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
1277 __ ld( __ d_tmp, O1 );
1278 #endif
1279 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1280 assert( Ftos_f == F0, "fix this code" );
1281 break;
1283 default: ShouldNotReachHere();
1284 }
1285 }
1288 void TemplateTable::dop2(Operation op) {
1289 transition(dtos, dtos);
1290 switch (op) {
1291 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1292 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1293 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1294 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1295 case rem:
1296 #ifdef _LP64
1297 // Pass arguments in D0, D2
1298 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
1299 __ pop_d( F0 );
1300 #else
1301 // Pass arguments in O0O1, O2O3
1302 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1303 __ ldd( __ d_tmp, O2 );
1304 __ pop_d(Ftos_f);
1305 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1306 __ ldd( __ d_tmp, O0 );
1307 #endif
1308 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1309 assert( Ftos_d == F0, "fix this code" );
1310 break;
1312 default: ShouldNotReachHere();
1313 }
1314 }
1317 void TemplateTable::ineg() {
1318 transition(itos, itos);
1319 __ neg(Otos_i);
1320 }
1323 void TemplateTable::lneg() {
1324 transition(ltos, ltos);
1325 #ifdef _LP64
1326 __ sub(G0, Otos_l, Otos_l);
1327 #else
1328 __ lneg(Otos_l1, Otos_l2);
1329 #endif
1330 }
1333 void TemplateTable::fneg() {
1334 transition(ftos, ftos);
1335 __ fneg(FloatRegisterImpl::S, Ftos_f);
1336 }
1339 void TemplateTable::dneg() {
1340 transition(dtos, dtos);
1341 // v8 has fnegd if source and dest are the same
1342 __ fneg(FloatRegisterImpl::D, Ftos_f);
1343 }
1346 void TemplateTable::iinc() {
1347 transition(vtos, vtos);
1348 locals_index(G3_scratch);
1349 __ ldsb(Lbcp, 2, O2); // load constant
1350 __ access_local_int(G3_scratch, Otos_i);
1351 __ add(Otos_i, O2, Otos_i);
1352 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1353 }
1356 void TemplateTable::wide_iinc() {
1357 transition(vtos, vtos);
1358 locals_index_wide(G3_scratch);
1359 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed);
1360 __ access_local_int(G3_scratch, Otos_i);
1361 __ add(Otos_i, O3, Otos_i);
1362 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1363 }
1366 void TemplateTable::convert() {
1367 // %%%%% Factor this first part accross platforms
1368 #ifdef ASSERT
1369 TosState tos_in = ilgl;
1370 TosState tos_out = ilgl;
1371 switch (bytecode()) {
1372 case Bytecodes::_i2l: // fall through
1373 case Bytecodes::_i2f: // fall through
1374 case Bytecodes::_i2d: // fall through
1375 case Bytecodes::_i2b: // fall through
1376 case Bytecodes::_i2c: // fall through
1377 case Bytecodes::_i2s: tos_in = itos; break;
1378 case Bytecodes::_l2i: // fall through
1379 case Bytecodes::_l2f: // fall through
1380 case Bytecodes::_l2d: tos_in = ltos; break;
1381 case Bytecodes::_f2i: // fall through
1382 case Bytecodes::_f2l: // fall through
1383 case Bytecodes::_f2d: tos_in = ftos; break;
1384 case Bytecodes::_d2i: // fall through
1385 case Bytecodes::_d2l: // fall through
1386 case Bytecodes::_d2f: tos_in = dtos; break;
1387 default : ShouldNotReachHere();
1388 }
1389 switch (bytecode()) {
1390 case Bytecodes::_l2i: // fall through
1391 case Bytecodes::_f2i: // fall through
1392 case Bytecodes::_d2i: // fall through
1393 case Bytecodes::_i2b: // fall through
1394 case Bytecodes::_i2c: // fall through
1395 case Bytecodes::_i2s: tos_out = itos; break;
1396 case Bytecodes::_i2l: // fall through
1397 case Bytecodes::_f2l: // fall through
1398 case Bytecodes::_d2l: tos_out = ltos; break;
1399 case Bytecodes::_i2f: // fall through
1400 case Bytecodes::_l2f: // fall through
1401 case Bytecodes::_d2f: tos_out = ftos; break;
1402 case Bytecodes::_i2d: // fall through
1403 case Bytecodes::_l2d: // fall through
1404 case Bytecodes::_f2d: tos_out = dtos; break;
1405 default : ShouldNotReachHere();
1406 }
1407 transition(tos_in, tos_out);
1408 #endif
1411 // Conversion
1412 Label done;
1413 switch (bytecode()) {
1414 case Bytecodes::_i2l:
1415 #ifdef _LP64
1416 // Sign extend the 32 bits
1417 __ sra ( Otos_i, 0, Otos_l );
1418 #else
1419 __ addcc(Otos_i, 0, Otos_l2);
1420 __ br(Assembler::greaterEqual, true, Assembler::pt, done);
1421 __ delayed()->clr(Otos_l1);
1422 __ set(~0, Otos_l1);
1423 #endif
1424 break;
1426 case Bytecodes::_i2f:
1427 __ st(Otos_i, __ d_tmp );
1428 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1429 __ fitof(FloatRegisterImpl::S, F0, Ftos_f);
1430 break;
1432 case Bytecodes::_i2d:
1433 __ st(Otos_i, __ d_tmp);
1434 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1435 __ fitof(FloatRegisterImpl::D, F0, Ftos_f);
1436 break;
1438 case Bytecodes::_i2b:
1439 __ sll(Otos_i, 24, Otos_i);
1440 __ sra(Otos_i, 24, Otos_i);
1441 break;
1443 case Bytecodes::_i2c:
1444 __ sll(Otos_i, 16, Otos_i);
1445 __ srl(Otos_i, 16, Otos_i);
1446 break;
1448 case Bytecodes::_i2s:
1449 __ sll(Otos_i, 16, Otos_i);
1450 __ sra(Otos_i, 16, Otos_i);
1451 break;
1453 case Bytecodes::_l2i:
1454 #ifndef _LP64
1455 __ mov(Otos_l2, Otos_i);
1456 #else
1457 // Sign-extend into the high 32 bits
1458 __ sra(Otos_l, 0, Otos_i);
1459 #endif
1460 break;
1462 case Bytecodes::_l2f:
1463 case Bytecodes::_l2d:
1464 __ st_long(Otos_l, __ d_tmp);
1465 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
1467 if (VM_Version::v9_instructions_work()) {
1468 if (bytecode() == Bytecodes::_l2f) {
1469 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
1470 } else {
1471 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
1472 }
1473 } else {
1474 __ call_VM_leaf(
1475 Lscratch,
1476 bytecode() == Bytecodes::_l2f
1477 ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
1478 : CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
1479 );
1480 }
1481 break;
1483 case Bytecodes::_f2i: {
1484 Label isNaN;
1485 // result must be 0 if value is NaN; test by comparing value to itself
1486 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
1487 // According to the v8 manual, you have to have a non-fp instruction
1488 // between fcmp and fb.
1489 if (!VM_Version::v9_instructions_work()) {
1490 __ nop();
1491 }
1492 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
1493 __ delayed()->clr(Otos_i); // NaN
1494 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
1495 __ stf(FloatRegisterImpl::S, F30, __ d_tmp);
1496 __ ld(__ d_tmp, Otos_i);
1497 __ bind(isNaN);
1498 }
1499 break;
1501 case Bytecodes::_f2l:
1502 // must uncache tos
1503 __ push_f();
1504 #ifdef _LP64
1505 __ pop_f(F1);
1506 #else
1507 __ pop_i(O0);
1508 #endif
1509 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1510 break;
1512 case Bytecodes::_f2d:
1513 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f);
1514 break;
1516 case Bytecodes::_d2i:
1517 case Bytecodes::_d2l:
1518 // must uncache tos
1519 __ push_d();
1520 #ifdef _LP64
1521 // LP64 calling conventions pass first double arg in D0
1522 __ pop_d( Ftos_d );
1523 #else
1524 __ pop_i( O0 );
1525 __ pop_i( O1 );
1526 #endif
1527 __ call_VM_leaf(Lscratch,
1528 bytecode() == Bytecodes::_d2i
1529 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
1530 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1531 break;
1533 case Bytecodes::_d2f:
1534 if (VM_Version::v9_instructions_work()) {
1535 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
1536 }
1537 else {
1538 // must uncache tos
1539 __ push_d();
1540 __ pop_i(O0);
1541 __ pop_i(O1);
1542 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
1543 }
1544 break;
1546 default: ShouldNotReachHere();
1547 }
1548 __ bind(done);
1549 }
1552 void TemplateTable::lcmp() {
1553 transition(ltos, itos);
1555 #ifdef _LP64
1556 __ pop_l(O1); // pop off value 1, value 2 is in O0
1557 __ lcmp( O1, Otos_l, Otos_i );
1558 #else
1559 __ pop_l(O2); // cmp O2,3 to O0,1
1560 __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
1561 #endif
1562 }
1565 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1567 if (is_float) __ pop_f(F2);
1568 else __ pop_d(F2);
1570 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:");
1572 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i );
1573 }
1575 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1576 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1577 __ verify_thread();
1579 const Register O2_bumped_count = O2;
1580 __ profile_taken_branch(G3_scratch, O2_bumped_count);
1582 // get (wide) offset to O1_disp
1583 const Register O1_disp = O1;
1584 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC);
1585 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
1587 // Handle all the JSR stuff here, then exit.
1588 // It's much shorter and cleaner than intermingling with the
1589 // non-JSR normal-branch stuff occurring below.
1590 if( is_jsr ) {
1591 // compute return address as bci in Otos_i
1592 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1593 __ sub(Lbcp, G3_scratch, G3_scratch);
1594 __ sub(G3_scratch, in_bytes(ConstMethod::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
1596 // Bump Lbcp to target of JSR
1597 __ add(Lbcp, O1_disp, Lbcp);
1598 // Push returnAddress for "ret" on stack
1599 __ push_ptr(Otos_i);
1600 // And away we go!
1601 __ dispatch_next(vtos);
1602 return;
1603 }
1605 // Normal (non-jsr) branch handling
1607 // Save the current Lbcp
1608 const Register O0_cur_bcp = O0;
1609 __ mov( Lbcp, O0_cur_bcp );
1612 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1613 if ( increment_invocation_counter_for_backward_branches ) {
1614 Label Lforward;
1615 // check branch direction
1616 __ br( Assembler::positive, false, Assembler::pn, Lforward );
1617 // Bump bytecode pointer by displacement (take the branch)
1618 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
1620 if (TieredCompilation) {
1621 Label Lno_mdo, Loverflow;
1622 int increment = InvocationCounter::count_increment;
1623 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1624 if (ProfileInterpreter) {
1625 // If no method data exists, go to profile_continue.
1626 __ ld_ptr(Lmethod, Method::method_data_offset(), G4_scratch);
1627 __ br_null_short(G4_scratch, Assembler::pn, Lno_mdo);
1629 // Increment backedge counter in the MDO
1630 Address mdo_backedge_counter(G4_scratch, in_bytes(MethodData::backedge_counter_offset()) +
1631 in_bytes(InvocationCounter::counter_offset()));
1632 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch,
1633 Assembler::notZero, &Lforward);
1634 __ ba_short(Loverflow);
1635 }
1637 // If there's no MDO, increment counter in Method*
1638 __ bind(Lno_mdo);
1639 Address backedge_counter(Lmethod, in_bytes(Method::backedge_counter_offset()) +
1640 in_bytes(InvocationCounter::counter_offset()));
1641 __ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch,
1642 Assembler::notZero, &Lforward);
1643 __ bind(Loverflow);
1645 // notify point for loop, pass branch bytecode
1646 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp);
1648 // Was an OSR adapter generated?
1649 // O0 = osr nmethod
1650 __ br_null_short(O0, Assembler::pn, Lforward);
1652 // Has the nmethod been invalidated already?
1653 __ ld(O0, nmethod::entry_bci_offset(), O2);
1654 __ cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, Lforward);
1656 // migrate the interpreter frame off of the stack
1658 __ mov(G2_thread, L7);
1659 // save nmethod
1660 __ mov(O0, L6);
1661 __ set_last_Java_frame(SP, noreg);
1662 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
1663 __ reset_last_Java_frame();
1664 __ mov(L7, G2_thread);
1666 // move OSR nmethod to I1
1667 __ mov(L6, I1);
1669 // OSR buffer to I0
1670 __ mov(O0, I0);
1672 // remove the interpreter frame
1673 __ restore(I5_savedSP, 0, SP);
1675 // Jump to the osr code.
1676 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
1677 __ jmp(O2, G0);
1678 __ delayed()->nop();
1680 } else {
1681 // Update Backedge branch separately from invocations
1682 const Register G4_invoke_ctr = G4;
1683 __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
1684 if (ProfileInterpreter) {
1685 __ test_invocation_counter_for_mdp(G4_invoke_ctr, G3_scratch, Lforward);
1686 if (UseOnStackReplacement) {
1687 __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
1688 }
1689 } else {
1690 if (UseOnStackReplacement) {
1691 __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
1692 }
1693 }
1694 }
1696 __ bind(Lforward);
1697 } else
1698 // Bump bytecode pointer by displacement (take the branch)
1699 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr
1701 // continue with bytecode @ target
1702 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1703 // %%%%% and changing dispatch_next to dispatch_only
1704 __ dispatch_next(vtos);
1705 }
1708 // Note Condition in argument is TemplateTable::Condition
1709 // arg scope is within class scope
1711 void TemplateTable::if_0cmp(Condition cc) {
1712 // no pointers, integer only!
1713 transition(itos, vtos);
1714 // assume branch is more often taken than not (loops use backward branches)
1715 __ cmp( Otos_i, 0);
1716 __ if_cmp(ccNot(cc), false);
1717 }
1720 void TemplateTable::if_icmp(Condition cc) {
1721 transition(itos, vtos);
1722 __ pop_i(O1);
1723 __ cmp(O1, Otos_i);
1724 __ if_cmp(ccNot(cc), false);
1725 }
1728 void TemplateTable::if_nullcmp(Condition cc) {
1729 transition(atos, vtos);
1730 __ tst(Otos_i);
1731 __ if_cmp(ccNot(cc), true);
1732 }
1735 void TemplateTable::if_acmp(Condition cc) {
1736 transition(atos, vtos);
1737 __ pop_ptr(O1);
1738 __ verify_oop(O1);
1739 __ verify_oop(Otos_i);
1740 __ cmp(O1, Otos_i);
1741 __ if_cmp(ccNot(cc), true);
1742 }
1746 void TemplateTable::ret() {
1747 transition(vtos, vtos);
1748 locals_index(G3_scratch);
1749 __ access_local_returnAddress(G3_scratch, Otos_i);
1750 // Otos_i contains the bci, compute the bcp from that
1752 #ifdef _LP64
1753 #ifdef ASSERT
1754 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
1755 // the result. The return address (really a BCI) was stored with an
1756 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in
1757 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
1758 // loaded value.
1759 { Label zzz ;
1760 __ set (65536, G3_scratch) ;
1761 __ cmp (Otos_i, G3_scratch) ;
1762 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
1763 __ delayed()->nop();
1764 __ stop("BCI is in the wrong register half?");
1765 __ bind (zzz) ;
1766 }
1767 #endif
1768 #endif
1770 __ profile_ret(vtos, Otos_i, G4_scratch);
1772 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1773 __ add(G3_scratch, Otos_i, G3_scratch);
1774 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1775 __ dispatch_next(vtos);
1776 }
1779 void TemplateTable::wide_ret() {
1780 transition(vtos, vtos);
1781 locals_index_wide(G3_scratch);
1782 __ access_local_returnAddress(G3_scratch, Otos_i);
1783 // Otos_i contains the bci, compute the bcp from that
1785 __ profile_ret(vtos, Otos_i, G4_scratch);
1787 __ ld_ptr(Lmethod, Method::const_offset(), G3_scratch);
1788 __ add(G3_scratch, Otos_i, G3_scratch);
1789 __ add(G3_scratch, in_bytes(ConstMethod::codes_offset()), Lbcp);
1790 __ dispatch_next(vtos);
1791 }
1794 void TemplateTable::tableswitch() {
1795 transition(itos, vtos);
1796 Label default_case, continue_execution;
1798 // align bcp
1799 __ add(Lbcp, BytesPerInt, O1);
1800 __ and3(O1, -BytesPerInt, O1);
1801 // load lo, hi
1802 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte
1803 __ ld(O1, 2 * BytesPerInt, O3); // High Byte
1804 #ifdef _LP64
1805 // Sign extend the 32 bits
1806 __ sra ( Otos_i, 0, Otos_i );
1807 #endif /* _LP64 */
1809 // check against lo & hi
1810 __ cmp( Otos_i, O2);
1811 __ br( Assembler::less, false, Assembler::pn, default_case);
1812 __ delayed()->cmp( Otos_i, O3 );
1813 __ br( Assembler::greater, false, Assembler::pn, default_case);
1814 // lookup dispatch offset
1815 __ delayed()->sub(Otos_i, O2, O2);
1816 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
1817 __ sll(O2, LogBytesPerInt, O2);
1818 __ add(O2, 3 * BytesPerInt, O2);
1819 __ ba(continue_execution);
1820 __ delayed()->ld(O1, O2, O2);
1821 // handle default
1822 __ bind(default_case);
1823 __ profile_switch_default(O3);
1824 __ ld(O1, 0, O2); // get default offset
1825 // continue execution
1826 __ bind(continue_execution);
1827 __ add(Lbcp, O2, Lbcp);
1828 __ dispatch_next(vtos);
1829 }
1832 void TemplateTable::lookupswitch() {
1833 transition(itos, itos);
1834 __ stop("lookupswitch bytecode should have been rewritten");
1835 }
1837 void TemplateTable::fast_linearswitch() {
1838 transition(itos, vtos);
1839 Label loop_entry, loop, found, continue_execution;
1840 // align bcp
1841 __ add(Lbcp, BytesPerInt, O1);
1842 __ and3(O1, -BytesPerInt, O1);
1843 // set counter
1844 __ ld(O1, BytesPerInt, O2);
1845 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs
1846 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr
1847 __ ba(loop_entry);
1848 __ delayed()->add(O3, O2, O2); // counter now points past last pair
1850 // table search
1851 __ bind(loop);
1852 __ cmp(O4, Otos_i);
1853 __ br(Assembler::equal, true, Assembler::pn, found);
1854 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4
1855 __ inc(O3, 2 * BytesPerInt);
1857 __ bind(loop_entry);
1858 __ cmp(O2, O3);
1859 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop);
1860 __ delayed()->ld(O3, 0, O4);
1862 // default case
1863 __ ld(O1, 0, O4); // get default offset
1864 if (ProfileInterpreter) {
1865 __ profile_switch_default(O3);
1866 __ ba_short(continue_execution);
1867 }
1869 // entry found -> get offset
1870 __ bind(found);
1871 if (ProfileInterpreter) {
1872 __ sub(O3, O1, O3);
1873 __ sub(O3, 2*BytesPerInt, O3);
1874 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs
1875 __ profile_switch_case(O3, O1, O2, G3_scratch);
1877 __ bind(continue_execution);
1878 }
1879 __ add(Lbcp, O4, Lbcp);
1880 __ dispatch_next(vtos);
1881 }
1884 void TemplateTable::fast_binaryswitch() {
1885 transition(itos, vtos);
1886 // Implementation using the following core algorithm: (copied from Intel)
1887 //
1888 // int binary_search(int key, LookupswitchPair* array, int n) {
1889 // // Binary search according to "Methodik des Programmierens" by
1890 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1891 // int i = 0;
1892 // int j = n;
1893 // while (i+1 < j) {
1894 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1895 // // with Q: for all i: 0 <= i < n: key < a[i]
1896 // // where a stands for the array and assuming that the (inexisting)
1897 // // element a[n] is infinitely big.
1898 // int h = (i + j) >> 1;
1899 // // i < h < j
1900 // if (key < array[h].fast_match()) {
1901 // j = h;
1902 // } else {
1903 // i = h;
1904 // }
1905 // }
1906 // // R: a[i] <= key < a[i+1] or Q
1907 // // (i.e., if key is within array, i is the correct index)
1908 // return i;
1909 // }
1911 // register allocation
1912 assert(Otos_i == O0, "alias checking");
1913 const Register Rkey = Otos_i; // already set (tosca)
1914 const Register Rarray = O1;
1915 const Register Ri = O2;
1916 const Register Rj = O3;
1917 const Register Rh = O4;
1918 const Register Rscratch = O5;
1920 const int log_entry_size = 3;
1921 const int entry_size = 1 << log_entry_size;
1923 Label found;
1924 // Find Array start
1925 __ add(Lbcp, 3 * BytesPerInt, Rarray);
1926 __ and3(Rarray, -BytesPerInt, Rarray);
1927 // initialize i & j (in delay slot)
1928 __ clr( Ri );
1930 // and start
1931 Label entry;
1932 __ ba(entry);
1933 __ delayed()->ld( Rarray, -BytesPerInt, Rj);
1934 // (Rj is already in the native byte-ordering.)
1936 // binary search loop
1937 { Label loop;
1938 __ bind( loop );
1939 // int h = (i + j) >> 1;
1940 __ sra( Rh, 1, Rh );
1941 // if (key < array[h].fast_match()) {
1942 // j = h;
1943 // } else {
1944 // i = h;
1945 // }
1946 __ sll( Rh, log_entry_size, Rscratch );
1947 __ ld( Rarray, Rscratch, Rscratch );
1948 // (Rscratch is already in the native byte-ordering.)
1949 __ cmp( Rkey, Rscratch );
1950 if ( VM_Version::v9_instructions_work() ) {
1951 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
1952 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
1953 }
1954 else {
1955 Label end_of_if;
1956 __ br( Assembler::less, true, Assembler::pt, end_of_if );
1957 __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
1958 __ mov( Rh, Ri ); // else i = h
1959 __ bind(end_of_if); // }
1960 }
1962 // while (i+1 < j)
1963 __ bind( entry );
1964 __ add( Ri, 1, Rscratch );
1965 __ cmp(Rscratch, Rj);
1966 __ br( Assembler::less, true, Assembler::pt, loop );
1967 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1;
1968 }
1970 // end of binary search, result index is i (must check again!)
1971 Label default_case;
1972 Label continue_execution;
1973 if (ProfileInterpreter) {
1974 __ mov( Ri, Rh ); // Save index in i for profiling
1975 }
1976 __ sll( Ri, log_entry_size, Ri );
1977 __ ld( Rarray, Ri, Rscratch );
1978 // (Rscratch is already in the native byte-ordering.)
1979 __ cmp( Rkey, Rscratch );
1980 __ br( Assembler::notEqual, true, Assembler::pn, default_case );
1981 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j
1983 // entry found -> j = offset
1984 __ inc( Ri, BytesPerInt );
1985 __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
1986 __ ld( Rarray, Ri, Rj );
1987 // (Rj is already in the native byte-ordering.)
1989 if (ProfileInterpreter) {
1990 __ ba_short(continue_execution);
1991 }
1993 __ bind(default_case); // fall through (if not profiling)
1994 __ profile_switch_default(Ri);
1996 __ bind(continue_execution);
1997 __ add( Lbcp, Rj, Lbcp );
1998 __ dispatch_next( vtos );
1999 }
2002 void TemplateTable::_return(TosState state) {
2003 transition(state, state);
2004 assert(_desc->calls_vm(), "inconsistent calls_vm information");
2006 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2007 assert(state == vtos, "only valid state");
2008 __ mov(G0, G3_scratch);
2009 __ access_local_ptr(G3_scratch, Otos_i);
2010 __ load_klass(Otos_i, O2);
2011 __ set(JVM_ACC_HAS_FINALIZER, G3);
2012 __ ld(O2, in_bytes(Klass::access_flags_offset()), O2);
2013 __ andcc(G3, O2, G0);
2014 Label skip_register_finalizer;
2015 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
2016 __ delayed()->nop();
2018 // Call out to do finalizer registration
2019 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i);
2021 __ bind(skip_register_finalizer);
2022 }
2024 __ remove_activation(state, /* throw_monitor_exception */ true);
2026 // The caller's SP was adjusted upon method entry to accomodate
2027 // the callee's non-argument locals. Undo that adjustment.
2028 __ ret(); // return to caller
2029 __ delayed()->restore(I5_savedSP, G0, SP);
2030 }
2033 // ----------------------------------------------------------------------------
2034 // Volatile variables demand their effects be made known to all CPU's in
2035 // order. Store buffers on most chips allow reads & writes to reorder; the
2036 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2037 // memory barrier (i.e., it's not sufficient that the interpreter does not
2038 // reorder volatile references, the hardware also must not reorder them).
2039 //
2040 // According to the new Java Memory Model (JMM):
2041 // (1) All volatiles are serialized wrt to each other.
2042 // ALSO reads & writes act as aquire & release, so:
2043 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2044 // the read float up to before the read. It's OK for non-volatile memory refs
2045 // that happen before the volatile read to float down below it.
2046 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2047 // that happen BEFORE the write float down to after the write. It's OK for
2048 // non-volatile memory refs that happen after the volatile write to float up
2049 // before it.
2050 //
2051 // We only put in barriers around volatile refs (they are expensive), not
2052 // _between_ memory refs (that would require us to track the flavor of the
2053 // previous memory refs). Requirements (2) and (3) require some barriers
2054 // before volatile stores and after volatile loads. These nearly cover
2055 // requirement (1) but miss the volatile-store-volatile-load case. This final
2056 // case is placed after volatile-stores although it could just as well go
2057 // before volatile-loads.
2058 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) {
2059 // Helper function to insert a is-volatile test and memory barrier
2060 // All current sparc implementations run in TSO, needing only StoreLoad
2061 if ((order_constraint & Assembler::StoreLoad) == 0) return;
2062 __ membar( order_constraint );
2063 }
2065 // ----------------------------------------------------------------------------
2066 void TemplateTable::resolve_cache_and_index(int byte_no,
2067 Register Rcache,
2068 Register index,
2069 size_t index_size) {
2070 // Depends on cpCacheOop layout!
2071 Label resolved;
2073 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2074 __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, Lbyte_code, byte_no, 1, index_size);
2075 __ cmp(Lbyte_code, (int) bytecode()); // have we resolved this bytecode?
2076 __ br(Assembler::equal, false, Assembler::pt, resolved);
2077 __ delayed()->set((int)bytecode(), O1);
2079 address entry;
2080 switch (bytecode()) {
2081 case Bytecodes::_getstatic : // fall through
2082 case Bytecodes::_putstatic : // fall through
2083 case Bytecodes::_getfield : // fall through
2084 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2085 case Bytecodes::_invokevirtual : // fall through
2086 case Bytecodes::_invokespecial : // fall through
2087 case Bytecodes::_invokestatic : // fall through
2088 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2089 case Bytecodes::_invokehandle : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2090 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2091 default:
2092 fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2093 break;
2094 }
2095 // first time invocation - must resolve first
2096 __ call_VM(noreg, entry, O1);
2097 // Update registers with resolved info
2098 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2099 __ bind(resolved);
2100 }
2102 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2103 Register method,
2104 Register itable_index,
2105 Register flags,
2106 bool is_invokevirtual,
2107 bool is_invokevfinal,
2108 bool is_invokedynamic) {
2109 // Uses both G3_scratch and G4_scratch
2110 Register cache = G3_scratch;
2111 Register index = G4_scratch;
2112 assert_different_registers(cache, method, itable_index);
2114 // determine constant pool cache field offsets
2115 assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2116 const int method_offset = in_bytes(
2117 ConstantPoolCache::base_offset() +
2118 ((byte_no == f2_byte)
2119 ? ConstantPoolCacheEntry::f2_offset()
2120 : ConstantPoolCacheEntry::f1_offset()
2121 )
2122 );
2123 const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2124 ConstantPoolCacheEntry::flags_offset());
2125 // access constant pool cache fields
2126 const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2127 ConstantPoolCacheEntry::f2_offset());
2129 if (is_invokevfinal) {
2130 __ get_cache_and_index_at_bcp(cache, index, 1);
2131 __ ld_ptr(Address(cache, method_offset), method);
2132 } else {
2133 size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2134 resolve_cache_and_index(byte_no, cache, index, index_size);
2135 __ ld_ptr(Address(cache, method_offset), method);
2136 }
2138 if (itable_index != noreg) {
2139 // pick up itable or appendix index from f2 also:
2140 __ ld_ptr(Address(cache, index_offset), itable_index);
2141 }
2142 __ ld_ptr(Address(cache, flags_offset), flags);
2143 }
2145 // The Rcache register must be set before call
2146 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2147 Register Rcache,
2148 Register index,
2149 Register Roffset,
2150 Register Rflags,
2151 bool is_static) {
2152 assert_different_registers(Rcache, Rflags, Roffset);
2154 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2156 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2157 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2158 if (is_static) {
2159 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj);
2160 const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2161 __ ld_ptr( Robj, mirror_offset, Robj);
2162 }
2163 }
2165 // The registers Rcache and index expected to be set before call.
2166 // Correct values of the Rcache and index registers are preserved.
2167 void TemplateTable::jvmti_post_field_access(Register Rcache,
2168 Register index,
2169 bool is_static,
2170 bool has_tos) {
2171 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2173 if (JvmtiExport::can_post_field_access()) {
2174 // Check to see if a field access watch has been set before we take
2175 // the time to call into the VM.
2176 Label Label1;
2177 assert_different_registers(Rcache, index, G1_scratch);
2178 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr());
2179 __ load_contents(get_field_access_count_addr, G1_scratch);
2180 __ cmp_and_br_short(G1_scratch, 0, Assembler::equal, Assembler::pt, Label1);
2182 __ add(Rcache, in_bytes(cp_base_offset), Rcache);
2184 if (is_static) {
2185 __ clr(Otos_i);
2186 } else {
2187 if (has_tos) {
2188 // save object pointer before call_VM() clobbers it
2189 __ push_ptr(Otos_i); // put object on tos where GC wants it.
2190 } else {
2191 // Load top of stack (do not pop the value off the stack);
2192 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
2193 }
2194 __ verify_oop(Otos_i);
2195 }
2196 // Otos_i: object pointer or NULL if static
2197 // Rcache: cache entry pointer
2198 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2199 Otos_i, Rcache);
2200 if (!is_static && has_tos) {
2201 __ pop_ptr(Otos_i); // restore object pointer
2202 __ verify_oop(Otos_i);
2203 }
2204 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2205 __ bind(Label1);
2206 }
2207 }
2209 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2210 transition(vtos, vtos);
2212 Register Rcache = G3_scratch;
2213 Register index = G4_scratch;
2214 Register Rclass = Rcache;
2215 Register Roffset= G4_scratch;
2216 Register Rflags = G1_scratch;
2217 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2219 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2));
2220 jvmti_post_field_access(Rcache, index, is_static, false);
2221 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2223 if (!is_static) {
2224 pop_and_check_object(Rclass);
2225 } else {
2226 __ verify_oop(Rclass);
2227 }
2229 Label exit;
2231 Assembler::Membar_mask_bits membar_bits =
2232 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2234 if (__ membar_has_effect(membar_bits)) {
2235 // Get volatile flag
2236 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2237 __ and3(Rflags, Lscratch, Lscratch);
2238 }
2240 Label checkVolatile;
2242 // compute field type
2243 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj;
2244 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2245 // Make sure we don't need to mask Rflags after the above shift
2246 ConstantPoolCacheEntry::verify_tos_state_shift();
2248 // Check atos before itos for getstatic, more likely (in Queens at least)
2249 __ cmp(Rflags, atos);
2250 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2251 __ delayed() ->cmp(Rflags, itos);
2253 // atos
2254 __ load_heap_oop(Rclass, Roffset, Otos_i);
2255 __ verify_oop(Otos_i);
2256 __ push(atos);
2257 if (!is_static) {
2258 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
2259 }
2260 __ ba(checkVolatile);
2261 __ delayed()->tst(Lscratch);
2263 __ bind(notObj);
2265 // cmp(Rflags, itos);
2266 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2267 __ delayed() ->cmp(Rflags, ltos);
2269 // itos
2270 __ ld(Rclass, Roffset, Otos_i);
2271 __ push(itos);
2272 if (!is_static) {
2273 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
2274 }
2275 __ ba(checkVolatile);
2276 __ delayed()->tst(Lscratch);
2278 __ bind(notInt);
2280 // cmp(Rflags, ltos);
2281 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2282 __ delayed() ->cmp(Rflags, btos);
2284 // ltos
2285 // load must be atomic
2286 __ ld_long(Rclass, Roffset, Otos_l);
2287 __ push(ltos);
2288 if (!is_static) {
2289 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
2290 }
2291 __ ba(checkVolatile);
2292 __ delayed()->tst(Lscratch);
2294 __ bind(notLong);
2296 // cmp(Rflags, btos);
2297 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2298 __ delayed() ->cmp(Rflags, ctos);
2300 // btos
2301 __ ldsb(Rclass, Roffset, Otos_i);
2302 __ push(itos);
2303 if (!is_static) {
2304 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
2305 }
2306 __ ba(checkVolatile);
2307 __ delayed()->tst(Lscratch);
2309 __ bind(notByte);
2311 // cmp(Rflags, ctos);
2312 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2313 __ delayed() ->cmp(Rflags, stos);
2315 // ctos
2316 __ lduh(Rclass, Roffset, Otos_i);
2317 __ push(itos);
2318 if (!is_static) {
2319 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
2320 }
2321 __ ba(checkVolatile);
2322 __ delayed()->tst(Lscratch);
2324 __ bind(notChar);
2326 // cmp(Rflags, stos);
2327 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2328 __ delayed() ->cmp(Rflags, ftos);
2330 // stos
2331 __ ldsh(Rclass, Roffset, Otos_i);
2332 __ push(itos);
2333 if (!is_static) {
2334 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
2335 }
2336 __ ba(checkVolatile);
2337 __ delayed()->tst(Lscratch);
2339 __ bind(notShort);
2342 // cmp(Rflags, ftos);
2343 __ br(Assembler::notEqual, false, Assembler::pt, notFloat);
2344 __ delayed() ->tst(Lscratch);
2346 // ftos
2347 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f);
2348 __ push(ftos);
2349 if (!is_static) {
2350 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
2351 }
2352 __ ba(checkVolatile);
2353 __ delayed()->tst(Lscratch);
2355 __ bind(notFloat);
2358 // dtos
2359 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d);
2360 __ push(dtos);
2361 if (!is_static) {
2362 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch);
2363 }
2365 __ bind(checkVolatile);
2366 if (__ membar_has_effect(membar_bits)) {
2367 // __ tst(Lscratch); executed in delay slot
2368 __ br(Assembler::zero, false, Assembler::pt, exit);
2369 __ delayed()->nop();
2370 volatile_barrier(membar_bits);
2371 }
2373 __ bind(exit);
2374 }
2377 void TemplateTable::getfield(int byte_no) {
2378 getfield_or_static(byte_no, false);
2379 }
2381 void TemplateTable::getstatic(int byte_no) {
2382 getfield_or_static(byte_no, true);
2383 }
2386 void TemplateTable::fast_accessfield(TosState state) {
2387 transition(atos, state);
2388 Register Rcache = G3_scratch;
2389 Register index = G4_scratch;
2390 Register Roffset = G4_scratch;
2391 Register Rflags = Rcache;
2392 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2394 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2395 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true);
2397 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2399 __ null_check(Otos_i);
2400 __ verify_oop(Otos_i);
2402 Label exit;
2404 Assembler::Membar_mask_bits membar_bits =
2405 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2406 if (__ membar_has_effect(membar_bits)) {
2407 // Get volatile flag
2408 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags);
2409 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2410 }
2412 switch (bytecode()) {
2413 case Bytecodes::_fast_bgetfield:
2414 __ ldsb(Otos_i, Roffset, Otos_i);
2415 break;
2416 case Bytecodes::_fast_cgetfield:
2417 __ lduh(Otos_i, Roffset, Otos_i);
2418 break;
2419 case Bytecodes::_fast_sgetfield:
2420 __ ldsh(Otos_i, Roffset, Otos_i);
2421 break;
2422 case Bytecodes::_fast_igetfield:
2423 __ ld(Otos_i, Roffset, Otos_i);
2424 break;
2425 case Bytecodes::_fast_lgetfield:
2426 __ ld_long(Otos_i, Roffset, Otos_l);
2427 break;
2428 case Bytecodes::_fast_fgetfield:
2429 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f);
2430 break;
2431 case Bytecodes::_fast_dgetfield:
2432 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
2433 break;
2434 case Bytecodes::_fast_agetfield:
2435 __ load_heap_oop(Otos_i, Roffset, Otos_i);
2436 break;
2437 default:
2438 ShouldNotReachHere();
2439 }
2441 if (__ membar_has_effect(membar_bits)) {
2442 __ btst(Lscratch, Rflags);
2443 __ br(Assembler::zero, false, Assembler::pt, exit);
2444 __ delayed()->nop();
2445 volatile_barrier(membar_bits);
2446 __ bind(exit);
2447 }
2449 if (state == atos) {
2450 __ verify_oop(Otos_i); // does not blow flags!
2451 }
2452 }
2454 void TemplateTable::jvmti_post_fast_field_mod() {
2455 if (JvmtiExport::can_post_field_modification()) {
2456 // Check to see if a field modification watch has been set before we take
2457 // the time to call into the VM.
2458 Label done;
2459 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2460 __ load_contents(get_field_modification_count_addr, G4_scratch);
2461 __ cmp_and_br_short(G4_scratch, 0, Assembler::equal, Assembler::pt, done);
2462 __ pop_ptr(G4_scratch); // copy the object pointer from tos
2463 __ verify_oop(G4_scratch);
2464 __ push_ptr(G4_scratch); // put the object pointer back on tos
2465 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1);
2466 // Save tos values before call_VM() clobbers them. Since we have
2467 // to do it for every data type, we use the saved values as the
2468 // jvalue object.
2469 switch (bytecode()) { // save tos values before call_VM() clobbers them
2470 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break;
2471 case Bytecodes::_fast_bputfield: // fall through
2472 case Bytecodes::_fast_sputfield: // fall through
2473 case Bytecodes::_fast_cputfield: // fall through
2474 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break;
2475 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break;
2476 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break;
2477 // get words in right order for use as jvalue object
2478 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break;
2479 }
2480 // setup pointer to jvalue object
2481 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize);
2482 // G4_scratch: object pointer
2483 // G1_scratch: cache entry pointer
2484 // G3_scratch: jvalue object on the stack
2485 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch);
2486 switch (bytecode()) { // restore tos values
2487 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break;
2488 case Bytecodes::_fast_bputfield: // fall through
2489 case Bytecodes::_fast_sputfield: // fall through
2490 case Bytecodes::_fast_cputfield: // fall through
2491 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break;
2492 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break;
2493 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break;
2494 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break;
2495 }
2496 __ bind(done);
2497 }
2498 }
2500 // The registers Rcache and index expected to be set before call.
2501 // The function may destroy various registers, just not the Rcache and index registers.
2502 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) {
2503 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2505 if (JvmtiExport::can_post_field_modification()) {
2506 // Check to see if a field modification watch has been set before we take
2507 // the time to call into the VM.
2508 Label Label1;
2509 assert_different_registers(Rcache, index, G1_scratch);
2510 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2511 __ load_contents(get_field_modification_count_addr, G1_scratch);
2512 __ cmp_and_br_short(G1_scratch, 0, Assembler::zero, Assembler::pt, Label1);
2514 // The Rcache and index registers have been already set.
2515 // This allows to eliminate this call but the Rcache and index
2516 // registers must be correspondingly used after this line.
2517 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1);
2519 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch);
2520 if (is_static) {
2521 // Life is simple. Null out the object pointer.
2522 __ clr(G4_scratch);
2523 } else {
2524 Register Rflags = G1_scratch;
2525 // Life is harder. The stack holds the value on top, followed by the
2526 // object. We don't know the size of the value, though; it could be
2527 // one or two words depending on its type. As a result, we must find
2528 // the type to determine where the object is.
2530 Label two_word, valsizeknown;
2531 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2532 __ mov(Lesp, G4_scratch);
2533 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2534 // Make sure we don't need to mask Rflags after the above shift
2535 ConstantPoolCacheEntry::verify_tos_state_shift();
2536 __ cmp(Rflags, ltos);
2537 __ br(Assembler::equal, false, Assembler::pt, two_word);
2538 __ delayed()->cmp(Rflags, dtos);
2539 __ br(Assembler::equal, false, Assembler::pt, two_word);
2540 __ delayed()->nop();
2541 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1));
2542 __ ba_short(valsizeknown);
2543 __ bind(two_word);
2545 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2));
2547 __ bind(valsizeknown);
2548 // setup object pointer
2549 __ ld_ptr(G4_scratch, 0, G4_scratch);
2550 __ verify_oop(G4_scratch);
2551 }
2552 // setup pointer to jvalue object
2553 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize);
2554 // G4_scratch: object pointer or NULL if static
2555 // G3_scratch: cache entry pointer
2556 // G1_scratch: jvalue object on the stack
2557 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2558 G4_scratch, G3_scratch, G1_scratch);
2559 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2560 __ bind(Label1);
2561 }
2562 }
2564 void TemplateTable::pop_and_check_object(Register r) {
2565 __ pop_ptr(r);
2566 __ null_check(r); // for field access must check obj.
2567 __ verify_oop(r);
2568 }
2570 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2571 transition(vtos, vtos);
2572 Register Rcache = G3_scratch;
2573 Register index = G4_scratch;
2574 Register Rclass = Rcache;
2575 Register Roffset= G4_scratch;
2576 Register Rflags = G1_scratch;
2577 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2579 resolve_cache_and_index(byte_no, Rcache, index, sizeof(u2));
2580 jvmti_post_field_mod(Rcache, index, is_static);
2581 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2583 Assembler::Membar_mask_bits read_bits =
2584 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2585 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2587 Label notVolatile, checkVolatile, exit;
2588 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2589 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2590 __ and3(Rflags, Lscratch, Lscratch);
2592 if (__ membar_has_effect(read_bits)) {
2593 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2594 volatile_barrier(read_bits);
2595 __ bind(notVolatile);
2596 }
2597 }
2599 __ srl(Rflags, ConstantPoolCacheEntry::tos_state_shift, Rflags);
2600 // Make sure we don't need to mask Rflags after the above shift
2601 ConstantPoolCacheEntry::verify_tos_state_shift();
2603 // compute field type
2604 Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat;
2606 if (is_static) {
2607 // putstatic with object type most likely, check that first
2608 __ cmp(Rflags, atos);
2609 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2610 __ delayed()->cmp(Rflags, itos);
2612 // atos
2613 {
2614 __ pop_ptr();
2615 __ verify_oop(Otos_i);
2616 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2617 __ ba(checkVolatile);
2618 __ delayed()->tst(Lscratch);
2619 }
2621 __ bind(notObj);
2622 // cmp(Rflags, itos);
2623 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2624 __ delayed()->cmp(Rflags, btos);
2626 // itos
2627 {
2628 __ pop_i();
2629 __ st(Otos_i, Rclass, Roffset);
2630 __ ba(checkVolatile);
2631 __ delayed()->tst(Lscratch);
2632 }
2634 __ bind(notInt);
2635 } else {
2636 // putfield with int type most likely, check that first
2637 __ cmp(Rflags, itos);
2638 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2639 __ delayed()->cmp(Rflags, atos);
2641 // itos
2642 {
2643 __ pop_i();
2644 pop_and_check_object(Rclass);
2645 __ st(Otos_i, Rclass, Roffset);
2646 patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch, true, byte_no);
2647 __ ba(checkVolatile);
2648 __ delayed()->tst(Lscratch);
2649 }
2651 __ bind(notInt);
2652 // cmp(Rflags, atos);
2653 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2654 __ delayed()->cmp(Rflags, btos);
2656 // atos
2657 {
2658 __ pop_ptr();
2659 pop_and_check_object(Rclass);
2660 __ verify_oop(Otos_i);
2661 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2662 patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch, true, byte_no);
2663 __ ba(checkVolatile);
2664 __ delayed()->tst(Lscratch);
2665 }
2667 __ bind(notObj);
2668 }
2670 // cmp(Rflags, btos);
2671 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2672 __ delayed()->cmp(Rflags, ltos);
2674 // btos
2675 {
2676 __ pop_i();
2677 if (!is_static) pop_and_check_object(Rclass);
2678 __ stb(Otos_i, Rclass, Roffset);
2679 if (!is_static) {
2680 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch, true, byte_no);
2681 }
2682 __ ba(checkVolatile);
2683 __ delayed()->tst(Lscratch);
2684 }
2686 __ bind(notByte);
2687 // cmp(Rflags, ltos);
2688 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2689 __ delayed()->cmp(Rflags, ctos);
2691 // ltos
2692 {
2693 __ pop_l();
2694 if (!is_static) pop_and_check_object(Rclass);
2695 __ st_long(Otos_l, Rclass, Roffset);
2696 if (!is_static) {
2697 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch, true, byte_no);
2698 }
2699 __ ba(checkVolatile);
2700 __ delayed()->tst(Lscratch);
2701 }
2703 __ bind(notLong);
2704 // cmp(Rflags, ctos);
2705 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2706 __ delayed()->cmp(Rflags, stos);
2708 // ctos (char)
2709 {
2710 __ pop_i();
2711 if (!is_static) pop_and_check_object(Rclass);
2712 __ sth(Otos_i, Rclass, Roffset);
2713 if (!is_static) {
2714 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch, true, byte_no);
2715 }
2716 __ ba(checkVolatile);
2717 __ delayed()->tst(Lscratch);
2718 }
2720 __ bind(notChar);
2721 // cmp(Rflags, stos);
2722 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2723 __ delayed()->cmp(Rflags, ftos);
2725 // stos (short)
2726 {
2727 __ pop_i();
2728 if (!is_static) pop_and_check_object(Rclass);
2729 __ sth(Otos_i, Rclass, Roffset);
2730 if (!is_static) {
2731 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch, true, byte_no);
2732 }
2733 __ ba(checkVolatile);
2734 __ delayed()->tst(Lscratch);
2735 }
2737 __ bind(notShort);
2738 // cmp(Rflags, ftos);
2739 __ br(Assembler::notZero, false, Assembler::pt, notFloat);
2740 __ delayed()->nop();
2742 // ftos
2743 {
2744 __ pop_f();
2745 if (!is_static) pop_and_check_object(Rclass);
2746 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2747 if (!is_static) {
2748 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch, true, byte_no);
2749 }
2750 __ ba(checkVolatile);
2751 __ delayed()->tst(Lscratch);
2752 }
2754 __ bind(notFloat);
2756 // dtos
2757 {
2758 __ pop_d();
2759 if (!is_static) pop_and_check_object(Rclass);
2760 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2761 if (!is_static) {
2762 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch, true, byte_no);
2763 }
2764 }
2766 __ bind(checkVolatile);
2767 __ tst(Lscratch);
2769 if (__ membar_has_effect(write_bits)) {
2770 // __ tst(Lscratch); in delay slot
2771 __ br(Assembler::zero, false, Assembler::pt, exit);
2772 __ delayed()->nop();
2773 volatile_barrier(Assembler::StoreLoad);
2774 __ bind(exit);
2775 }
2776 }
2778 void TemplateTable::fast_storefield(TosState state) {
2779 transition(state, vtos);
2780 Register Rcache = G3_scratch;
2781 Register Rclass = Rcache;
2782 Register Roffset= G4_scratch;
2783 Register Rflags = G1_scratch;
2784 ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2786 jvmti_post_fast_field_mod();
2788 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1);
2790 Assembler::Membar_mask_bits read_bits =
2791 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2792 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2794 Label notVolatile, checkVolatile, exit;
2795 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2796 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2797 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2798 __ and3(Rflags, Lscratch, Lscratch);
2799 if (__ membar_has_effect(read_bits)) {
2800 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, notVolatile);
2801 volatile_barrier(read_bits);
2802 __ bind(notVolatile);
2803 }
2804 }
2806 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2807 pop_and_check_object(Rclass);
2809 switch (bytecode()) {
2810 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break;
2811 case Bytecodes::_fast_cputfield: /* fall through */
2812 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break;
2813 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break;
2814 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break;
2815 case Bytecodes::_fast_fputfield:
2816 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2817 break;
2818 case Bytecodes::_fast_dputfield:
2819 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2820 break;
2821 case Bytecodes::_fast_aputfield:
2822 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2823 break;
2824 default:
2825 ShouldNotReachHere();
2826 }
2828 if (__ membar_has_effect(write_bits)) {
2829 __ cmp_and_br_short(Lscratch, 0, Assembler::equal, Assembler::pt, exit);
2830 volatile_barrier(Assembler::StoreLoad);
2831 __ bind(exit);
2832 }
2833 }
2836 void TemplateTable::putfield(int byte_no) {
2837 putfield_or_static(byte_no, false);
2838 }
2840 void TemplateTable::putstatic(int byte_no) {
2841 putfield_or_static(byte_no, true);
2842 }
2845 void TemplateTable::fast_xaccess(TosState state) {
2846 transition(vtos, state);
2847 Register Rcache = G3_scratch;
2848 Register Roffset = G4_scratch;
2849 Register Rflags = G4_scratch;
2850 Register Rreceiver = Lscratch;
2852 __ ld_ptr(Llocals, 0, Rreceiver);
2854 // access constant pool cache (is resolved)
2855 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
2856 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset);
2857 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp
2859 __ verify_oop(Rreceiver);
2860 __ null_check(Rreceiver);
2861 if (state == atos) {
2862 __ load_heap_oop(Rreceiver, Roffset, Otos_i);
2863 } else if (state == itos) {
2864 __ ld (Rreceiver, Roffset, Otos_i) ;
2865 } else if (state == ftos) {
2866 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f);
2867 } else {
2868 ShouldNotReachHere();
2869 }
2871 Assembler::Membar_mask_bits membar_bits =
2872 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2873 if (__ membar_has_effect(membar_bits)) {
2875 // Get is_volatile value in Rflags and check if membar is needed
2876 __ ld_ptr(Rcache, ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags);
2878 // Test volatile
2879 Label notVolatile;
2880 __ set((1 << ConstantPoolCacheEntry::is_volatile_shift), Lscratch);
2881 __ btst(Rflags, Lscratch);
2882 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2883 __ delayed()->nop();
2884 volatile_barrier(membar_bits);
2885 __ bind(notVolatile);
2886 }
2888 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
2889 __ sub(Lbcp, 1, Lbcp);
2890 }
2892 //----------------------------------------------------------------------------------------------------
2893 // Calls
2895 void TemplateTable::count_calls(Register method, Register temp) {
2896 // implemented elsewhere
2897 ShouldNotReachHere();
2898 }
2900 void TemplateTable::prepare_invoke(int byte_no,
2901 Register method, // linked method (or i-klass)
2902 Register ra, // return address
2903 Register index, // itable index, MethodType, etc.
2904 Register recv, // if caller wants to see it
2905 Register flags // if caller wants to test it
2906 ) {
2907 // determine flags
2908 const Bytecodes::Code code = bytecode();
2909 const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
2910 const bool is_invokedynamic = code == Bytecodes::_invokedynamic;
2911 const bool is_invokehandle = code == Bytecodes::_invokehandle;
2912 const bool is_invokevirtual = code == Bytecodes::_invokevirtual;
2913 const bool is_invokespecial = code == Bytecodes::_invokespecial;
2914 const bool load_receiver = (recv != noreg);
2915 assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2916 assert(recv == noreg || recv == O0, "");
2917 assert(flags == noreg || flags == O1, "");
2919 // setup registers & access constant pool cache
2920 if (recv == noreg) recv = O0;
2921 if (flags == noreg) flags = O1;
2922 const Register temp = O2;
2923 assert_different_registers(method, ra, index, recv, flags, temp);
2925 load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2927 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2929 // maybe push appendix to arguments
2930 if (is_invokedynamic || is_invokehandle) {
2931 Label L_no_push;
2932 __ set((1 << ConstantPoolCacheEntry::has_appendix_shift), temp);
2933 __ btst(flags, temp);
2934 __ br(Assembler::zero, false, Assembler::pt, L_no_push);
2935 __ delayed()->nop();
2936 // Push the appendix as a trailing parameter.
2937 // This must be done before we get the receiver,
2938 // since the parameter_size includes it.
2939 __ load_resolved_reference_at_index(temp, index);
2940 __ verify_oop(temp);
2941 __ push_ptr(temp); // push appendix (MethodType, CallSite, etc.)
2942 __ bind(L_no_push);
2943 }
2945 // load receiver if needed (after appendix is pushed so parameter size is correct)
2946 if (load_receiver) {
2947 __ and3(flags, ConstantPoolCacheEntry::parameter_size_mask, temp); // get parameter size
2948 __ load_receiver(temp, recv); // __ argument_address uses Gargs but we need Lesp
2949 __ verify_oop(recv);
2950 }
2952 // compute return type
2953 __ srl(flags, ConstantPoolCacheEntry::tos_state_shift, ra);
2954 // Make sure we don't need to mask flags after the above shift
2955 ConstantPoolCacheEntry::verify_tos_state_shift();
2956 // load return address
2957 {
2958 const address table_addr = (is_invokeinterface || is_invokedynamic) ?
2959 (address)Interpreter::return_5_addrs_by_index_table() :
2960 (address)Interpreter::return_3_addrs_by_index_table();
2961 AddressLiteral table(table_addr);
2962 __ set(table, temp);
2963 __ sll(ra, LogBytesPerWord, ra);
2964 __ ld_ptr(Address(temp, ra), ra);
2965 }
2966 }
2969 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
2970 Register Rtemp = G4_scratch;
2971 Register Rcall = Rindex;
2972 assert_different_registers(Rcall, G5_method, Gargs, Rret);
2974 // get target Method* & entry point
2975 __ lookup_virtual_method(Rrecv, Rindex, G5_method);
2976 __ call_from_interpreter(Rcall, Gargs, Rret);
2977 }
2979 void TemplateTable::invokevirtual(int byte_no) {
2980 transition(vtos, vtos);
2981 assert(byte_no == f2_byte, "use this argument");
2983 Register Rscratch = G3_scratch;
2984 Register Rtemp = G4_scratch;
2985 Register Rret = Lscratch;
2986 Register O0_recv = O0;
2987 Label notFinal;
2989 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
2990 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2992 // Check for vfinal
2993 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), G4_scratch);
2994 __ btst(Rret, G4_scratch);
2995 __ br(Assembler::zero, false, Assembler::pt, notFinal);
2996 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters
2998 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
3000 invokevfinal_helper(Rscratch, Rret);
3002 __ bind(notFinal);
3004 __ mov(G5_method, Rscratch); // better scratch register
3005 __ load_receiver(G4_scratch, O0_recv); // gets receiverOop
3006 // receiver is in O0_recv
3007 __ verify_oop(O0_recv);
3009 // get return address
3010 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
3011 __ set(table, Rtemp);
3012 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
3013 // Make sure we don't need to mask Rret after the above shift
3014 ConstantPoolCacheEntry::verify_tos_state_shift();
3015 __ sll(Rret, LogBytesPerWord, Rret);
3016 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3018 // get receiver klass
3019 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
3020 __ load_klass(O0_recv, O0_recv);
3021 __ verify_oop(O0_recv);
3023 __ profile_virtual_call(O0_recv, O4);
3025 generate_vtable_call(O0_recv, Rscratch, Rret);
3026 }
3028 void TemplateTable::fast_invokevfinal(int byte_no) {
3029 transition(vtos, vtos);
3030 assert(byte_no == f2_byte, "use this argument");
3032 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
3033 /*is_invokevfinal*/true, false);
3034 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3035 invokevfinal_helper(G3_scratch, Lscratch);
3036 }
3038 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
3039 Register Rtemp = G4_scratch;
3041 // Load receiver from stack slot
3042 __ lduh(G5_method, in_bytes(Method::size_of_parameters_offset()), G4_scratch);
3043 __ load_receiver(G4_scratch, O0);
3045 // receiver NULL check
3046 __ null_check(O0);
3048 __ profile_final_call(O4);
3050 // get return address
3051 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
3052 __ set(table, Rtemp);
3053 __ srl(Rret, ConstantPoolCacheEntry::tos_state_shift, Rret); // get return type
3054 // Make sure we don't need to mask Rret after the above shift
3055 ConstantPoolCacheEntry::verify_tos_state_shift();
3056 __ sll(Rret, LogBytesPerWord, Rret);
3057 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3060 // do the call
3061 __ call_from_interpreter(Rscratch, Gargs, Rret);
3062 }
3065 void TemplateTable::invokespecial(int byte_no) {
3066 transition(vtos, vtos);
3067 assert(byte_no == f1_byte, "use this argument");
3069 const Register Rret = Lscratch;
3070 const Register O0_recv = O0;
3071 const Register Rscratch = G3_scratch;
3073 prepare_invoke(byte_no, G5_method, Rret, noreg, O0_recv); // get receiver also for null check
3074 __ null_check(O0_recv);
3076 // do the call
3077 __ profile_call(O4);
3078 __ call_from_interpreter(Rscratch, Gargs, Rret);
3079 }
3082 void TemplateTable::invokestatic(int byte_no) {
3083 transition(vtos, vtos);
3084 assert(byte_no == f1_byte, "use this argument");
3086 const Register Rret = Lscratch;
3087 const Register Rscratch = G3_scratch;
3089 prepare_invoke(byte_no, G5_method, Rret); // get f1 Method*
3091 // do the call
3092 __ profile_call(O4);
3093 __ call_from_interpreter(Rscratch, Gargs, Rret);
3094 }
3096 void TemplateTable::invokeinterface_object_method(Register RKlass,
3097 Register Rcall,
3098 Register Rret,
3099 Register Rflags) {
3100 Register Rscratch = G4_scratch;
3101 Register Rindex = Lscratch;
3103 assert_different_registers(Rscratch, Rindex, Rret);
3105 Label notFinal;
3107 // Check for vfinal
3108 __ set((1 << ConstantPoolCacheEntry::is_vfinal_shift), Rscratch);
3109 __ btst(Rflags, Rscratch);
3110 __ br(Assembler::zero, false, Assembler::pt, notFinal);
3111 __ delayed()->nop();
3113 __ profile_final_call(O4);
3115 // do the call - the index (f2) contains the Method*
3116 assert_different_registers(G5_method, Gargs, Rcall);
3117 __ mov(Rindex, G5_method);
3118 __ call_from_interpreter(Rcall, Gargs, Rret);
3119 __ bind(notFinal);
3121 __ profile_virtual_call(RKlass, O4);
3122 generate_vtable_call(RKlass, Rindex, Rret);
3123 }
3126 void TemplateTable::invokeinterface(int byte_no) {
3127 transition(vtos, vtos);
3128 assert(byte_no == f1_byte, "use this argument");
3130 const Register Rinterface = G1_scratch;
3131 const Register Rret = G3_scratch;
3132 const Register Rindex = Lscratch;
3133 const Register O0_recv = O0;
3134 const Register O1_flags = O1;
3135 const Register O2_Klass = O2;
3136 const Register Rscratch = G4_scratch;
3137 assert_different_registers(Rscratch, G5_method);
3139 prepare_invoke(byte_no, Rinterface, Rret, Rindex, O0_recv, O1_flags);
3141 // get receiver klass
3142 __ null_check(O0_recv, oopDesc::klass_offset_in_bytes());
3143 __ load_klass(O0_recv, O2_Klass);
3145 // Special case of invokeinterface called for virtual method of
3146 // java.lang.Object. See cpCacheOop.cpp for details.
3147 // This code isn't produced by javac, but could be produced by
3148 // another compliant java compiler.
3149 Label notMethod;
3150 __ set((1 << ConstantPoolCacheEntry::is_forced_virtual_shift), Rscratch);
3151 __ btst(O1_flags, Rscratch);
3152 __ br(Assembler::zero, false, Assembler::pt, notMethod);
3153 __ delayed()->nop();
3155 invokeinterface_object_method(O2_Klass, Rinterface, Rret, O1_flags);
3157 __ bind(notMethod);
3159 __ profile_virtual_call(O2_Klass, O4);
3161 //
3162 // find entry point to call
3163 //
3165 // compute start of first itableOffsetEntry (which is at end of vtable)
3166 const int base = InstanceKlass::vtable_start_offset() * wordSize;
3167 Label search;
3168 Register Rtemp = O1_flags;
3170 __ ld(O2_Klass, InstanceKlass::vtable_length_offset() * wordSize, Rtemp);
3171 if (align_object_offset(1) > 1) {
3172 __ round_to(Rtemp, align_object_offset(1));
3173 }
3174 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4;
3175 if (Assembler::is_simm13(base)) {
3176 __ add(Rtemp, base, Rtemp);
3177 } else {
3178 __ set(base, Rscratch);
3179 __ add(Rscratch, Rtemp, Rtemp);
3180 }
3181 __ add(O2_Klass, Rtemp, Rscratch);
3183 __ bind(search);
3185 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp);
3186 {
3187 Label ok;
3189 // Check that entry is non-null. Null entries are probably a bytecode
3190 // problem. If the interface isn't implemented by the receiver class,
3191 // the VM should throw IncompatibleClassChangeError. linkResolver checks
3192 // this too but that's only if the entry isn't already resolved, so we
3193 // need to check again.
3194 __ br_notnull_short( Rtemp, Assembler::pt, ok);
3195 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3196 __ should_not_reach_here();
3197 __ bind(ok);
3198 }
3200 __ cmp(Rinterface, Rtemp);
3201 __ brx(Assembler::notEqual, true, Assembler::pn, search);
3202 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch);
3204 // entry found and Rscratch points to it
3205 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch);
3207 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below");
3208 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8;
3209 __ add(Rscratch, Rindex, Rscratch);
3210 __ ld_ptr(O2_Klass, Rscratch, G5_method);
3212 // Check for abstract method error.
3213 {
3214 Label ok;
3215 __ br_notnull_short(G5_method, Assembler::pt, ok);
3216 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3217 __ should_not_reach_here();
3218 __ bind(ok);
3219 }
3221 Register Rcall = Rinterface;
3222 assert_different_registers(Rcall, G5_method, Gargs, Rret);
3224 __ call_from_interpreter(Rcall, Gargs, Rret);
3225 }
3227 void TemplateTable::invokehandle(int byte_no) {
3228 transition(vtos, vtos);
3229 assert(byte_no == f1_byte, "use this argument");
3231 if (!EnableInvokeDynamic) {
3232 // rewriter does not generate this bytecode
3233 __ should_not_reach_here();
3234 return;
3235 }
3237 const Register Rret = Lscratch;
3238 const Register G4_mtype = G4_scratch; // f1
3239 const Register O0_recv = O0;
3240 const Register Rscratch = G3_scratch;
3242 prepare_invoke(byte_no, G5_method, Rret, G4_mtype, O0_recv);
3243 __ null_check(O0_recv);
3245 // G4: MethodType object (from cpool->resolved_references[])
3246 // G5: MH.linkToCallSite method (from f2)
3248 // Note: G4_mtype is already pushed (if necessary) by prepare_invoke
3250 // do the call
3251 __ verify_oop(G4_mtype);
3252 __ profile_final_call(O4); // FIXME: profile the LambdaForm also
3253 __ call_from_interpreter(Rscratch, Gargs, Rret);
3254 }
3257 void TemplateTable::invokedynamic(int byte_no) {
3258 transition(vtos, vtos);
3259 assert(byte_no == f1_byte, "use this argument");
3261 if (!EnableInvokeDynamic) {
3262 // We should not encounter this bytecode if !EnableInvokeDynamic.
3263 // The verifier will stop it. However, if we get past the verifier,
3264 // this will stop the thread in a reasonable way, without crashing the JVM.
3265 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3266 InterpreterRuntime::throw_IncompatibleClassChangeError));
3267 // the call_VM checks for exception, so we should never return here.
3268 __ should_not_reach_here();
3269 return;
3270 }
3272 const Register Rret = Lscratch;
3273 const Register G4_callsite = G4_scratch;
3274 const Register Rscratch = G3_scratch;
3276 prepare_invoke(byte_no, G5_method, Rret, G4_callsite);
3278 // G4: CallSite object (from cpool->resolved_references[])
3279 // G5: MH.linkToCallSite method (from f1)
3281 // Note: G4_callsite is already pushed by prepare_invoke
3283 // %%% should make a type profile for any invokedynamic that takes a ref argument
3284 // profile this call
3285 __ profile_call(O4);
3287 // do the call
3288 __ verify_oop(G4_callsite);
3289 __ call_from_interpreter(Rscratch, Gargs, Rret);
3290 }
3293 //----------------------------------------------------------------------------------------------------
3294 // Allocation
3296 void TemplateTable::_new() {
3297 transition(vtos, atos);
3299 Label slow_case;
3300 Label done;
3301 Label initialize_header;
3302 Label initialize_object; // including clearing the fields
3304 Register RallocatedObject = Otos_i;
3305 Register RinstanceKlass = O1;
3306 Register Roffset = O3;
3307 Register Rscratch = O4;
3309 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3310 __ get_cpool_and_tags(Rscratch, G3_scratch);
3311 // make sure the class we're about to instantiate has been resolved
3312 // This is done before loading InstanceKlass to be consistent with the order
3313 // how Constant Pool is updated (see ConstantPool::klass_at_put)
3314 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3315 __ ldub(G3_scratch, Roffset, G3_scratch);
3316 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3317 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3318 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3319 // get InstanceKlass
3320 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot
3321 __ add(Roffset, sizeof(ConstantPool), Roffset);
3322 __ ld_ptr(Rscratch, Roffset, RinstanceKlass);
3324 // make sure klass is fully initialized:
3325 __ ldub(RinstanceKlass, in_bytes(InstanceKlass::init_state_offset()), G3_scratch);
3326 __ cmp(G3_scratch, InstanceKlass::fully_initialized);
3327 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3328 __ delayed()->ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
3330 // get instance_size in InstanceKlass (already aligned)
3331 //__ ld(RinstanceKlass, in_bytes(Klass::layout_helper_offset()), Roffset);
3333 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
3334 __ btst(Klass::_lh_instance_slow_path_bit, Roffset);
3335 __ br(Assembler::notZero, false, Assembler::pn, slow_case);
3336 __ delayed()->nop();
3338 // allocate the instance
3339 // 1) Try to allocate in the TLAB
3340 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
3341 // 3) if the above fails (or is not applicable), go to a slow case
3342 // (creates a new TLAB, etc.)
3344 const bool allow_shared_alloc =
3345 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3347 if(UseTLAB) {
3348 Register RoldTopValue = RallocatedObject;
3349 Register RtlabWasteLimitValue = G3_scratch;
3350 Register RnewTopValue = G1_scratch;
3351 Register RendValue = Rscratch;
3352 Register RfreeValue = RnewTopValue;
3354 // check if we can allocate in the TLAB
3355 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
3356 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue);
3357 __ add(RoldTopValue, Roffset, RnewTopValue);
3359 // if there is enough space, we do not CAS and do not clear
3360 __ cmp(RnewTopValue, RendValue);
3361 if(ZeroTLAB) {
3362 // the fields have already been cleared
3363 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header);
3364 } else {
3365 // initialize both the header and fields
3366 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object);
3367 }
3368 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3370 if (allow_shared_alloc) {
3371 // Check if tlab should be discarded (refill_waste_limit >= free)
3372 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
3373 __ sub(RendValue, RoldTopValue, RfreeValue);
3374 #ifdef _LP64
3375 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
3376 #else
3377 __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
3378 #endif
3379 __ cmp_and_brx_short(RtlabWasteLimitValue, RfreeValue, Assembler::greaterEqualUnsigned, Assembler::pt, slow_case); // tlab waste is small
3381 // increment waste limit to prevent getting stuck on this slow path
3382 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
3383 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
3384 } else {
3385 // No allocation in the shared eden.
3386 __ ba_short(slow_case);
3387 }
3388 }
3390 // Allocation in the shared Eden
3391 if (allow_shared_alloc) {
3392 Register RoldTopValue = G1_scratch;
3393 Register RtopAddr = G3_scratch;
3394 Register RnewTopValue = RallocatedObject;
3395 Register RendValue = Rscratch;
3397 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
3399 Label retry;
3400 __ bind(retry);
3401 __ set((intptr_t)Universe::heap()->end_addr(), RendValue);
3402 __ ld_ptr(RendValue, 0, RendValue);
3403 __ ld_ptr(RtopAddr, 0, RoldTopValue);
3404 __ add(RoldTopValue, Roffset, RnewTopValue);
3406 // RnewTopValue contains the top address after the new object
3407 // has been allocated.
3408 __ cmp_and_brx_short(RnewTopValue, RendValue, Assembler::greaterUnsigned, Assembler::pn, slow_case);
3410 __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
3411 VM_Version::v9_instructions_work() ? NULL :
3412 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3414 // if someone beat us on the allocation, try again, otherwise continue
3415 __ cmp_and_brx_short(RoldTopValue, RnewTopValue, Assembler::notEqual, Assembler::pn, retry);
3417 // bump total bytes allocated by this thread
3418 // RoldTopValue and RtopAddr are dead, so can use G1 and G3
3419 __ incr_allocated_bytes(Roffset, G1_scratch, G3_scratch);
3420 }
3422 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3423 // clear object fields
3424 __ bind(initialize_object);
3425 __ deccc(Roffset, sizeof(oopDesc));
3426 __ br(Assembler::zero, false, Assembler::pt, initialize_header);
3427 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
3429 // initialize remaining object fields
3430 if (UseBlockZeroing) {
3431 // Use BIS for zeroing
3432 __ bis_zeroing(G3_scratch, Roffset, G1_scratch, initialize_header);
3433 } else {
3434 Label loop;
3435 __ subcc(Roffset, wordSize, Roffset);
3436 __ bind(loop);
3437 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot
3438 __ st_ptr(G0, G3_scratch, Roffset);
3439 __ br(Assembler::notEqual, false, Assembler::pt, loop);
3440 __ delayed()->subcc(Roffset, wordSize, Roffset);
3441 }
3442 __ ba_short(initialize_header);
3443 }
3445 // slow case
3446 __ bind(slow_case);
3447 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned);
3448 __ get_constant_pool(O1);
3450 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2);
3452 __ ba_short(done);
3454 // Initialize the header: mark, klass
3455 __ bind(initialize_header);
3457 if (UseBiasedLocking) {
3458 __ ld_ptr(RinstanceKlass, in_bytes(Klass::prototype_header_offset()), G4_scratch);
3459 } else {
3460 __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
3461 }
3462 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
3463 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed
3464 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms)
3466 {
3467 SkipIfEqual skip_if(
3468 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero);
3469 // Trigger dtrace event
3470 __ push(atos);
3471 __ call_VM_leaf(noreg,
3472 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0);
3473 __ pop(atos);
3474 }
3476 // continue
3477 __ bind(done);
3478 }
3482 void TemplateTable::newarray() {
3483 transition(itos, atos);
3484 __ ldub(Lbcp, 1, O1);
3485 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i);
3486 }
3489 void TemplateTable::anewarray() {
3490 transition(itos, atos);
3491 __ get_constant_pool(O1);
3492 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned);
3493 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i);
3494 }
3497 void TemplateTable::arraylength() {
3498 transition(atos, itos);
3499 Label ok;
3500 __ verify_oop(Otos_i);
3501 __ tst(Otos_i);
3502 __ throw_if_not_1_x( Assembler::notZero, ok );
3503 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i);
3504 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3505 }
3508 void TemplateTable::checkcast() {
3509 transition(atos, atos);
3510 Label done, is_null, quicked, cast_ok, resolved;
3511 Register Roffset = G1_scratch;
3512 Register RobjKlass = O5;
3513 Register RspecifiedKlass = O4;
3515 // Check for casting a NULL
3516 __ br_null_short(Otos_i, Assembler::pn, is_null);
3518 // Get value klass in RobjKlass
3519 __ load_klass(Otos_i, RobjKlass); // get value klass
3521 // Get constant pool tag
3522 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3524 // See if the checkcast has been quickened
3525 __ get_cpool_and_tags(Lscratch, G3_scratch);
3526 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3527 __ ldub(G3_scratch, Roffset, G3_scratch);
3528 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3529 __ br(Assembler::equal, true, Assembler::pt, quicked);
3530 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3532 __ push_ptr(); // save receiver for result, and for GC
3533 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3534 __ get_vm_result_2(RspecifiedKlass);
3535 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3537 __ ba_short(resolved);
3539 // Extract target class from constant pool
3540 __ bind(quicked);
3541 __ add(Roffset, sizeof(ConstantPool), Roffset);
3542 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3543 __ bind(resolved);
3544 __ load_klass(Otos_i, RobjKlass); // get value klass
3546 // Generate a fast subtype check. Branch to cast_ok if no
3547 // failure. Throw exception if failure.
3548 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok );
3550 // Not a subtype; so must throw exception
3551 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch );
3553 __ bind(cast_ok);
3555 if (ProfileInterpreter) {
3556 __ ba_short(done);
3557 }
3558 __ bind(is_null);
3559 __ profile_null_seen(G3_scratch);
3560 __ bind(done);
3561 }
3564 void TemplateTable::instanceof() {
3565 Label done, is_null, quicked, resolved;
3566 transition(atos, itos);
3567 Register Roffset = G1_scratch;
3568 Register RobjKlass = O5;
3569 Register RspecifiedKlass = O4;
3571 // Check for casting a NULL
3572 __ br_null_short(Otos_i, Assembler::pt, is_null);
3574 // Get value klass in RobjKlass
3575 __ load_klass(Otos_i, RobjKlass); // get value klass
3577 // Get constant pool tag
3578 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3580 // See if the checkcast has been quickened
3581 __ get_cpool_and_tags(Lscratch, G3_scratch);
3582 __ add(G3_scratch, Array<u1>::base_offset_in_bytes(), G3_scratch);
3583 __ ldub(G3_scratch, Roffset, G3_scratch);
3584 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3585 __ br(Assembler::equal, true, Assembler::pt, quicked);
3586 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3588 __ push_ptr(); // save receiver for result, and for GC
3589 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3590 __ get_vm_result_2(RspecifiedKlass);
3591 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3593 __ ba_short(resolved);
3595 // Extract target class from constant pool
3596 __ bind(quicked);
3597 __ add(Roffset, sizeof(ConstantPool), Roffset);
3598 __ get_constant_pool(Lscratch);
3599 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3600 __ bind(resolved);
3601 __ load_klass(Otos_i, RobjKlass); // get value klass
3603 // Generate a fast subtype check. Branch to cast_ok if no
3604 // failure. Return 0 if failure.
3605 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed
3606 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done );
3607 // Not a subtype; return 0;
3608 __ clr( Otos_i );
3610 if (ProfileInterpreter) {
3611 __ ba_short(done);
3612 }
3613 __ bind(is_null);
3614 __ profile_null_seen(G3_scratch);
3615 __ bind(done);
3616 }
3618 void TemplateTable::_breakpoint() {
3620 // Note: We get here even if we are single stepping..
3621 // jbug inists on setting breakpoints at every bytecode
3622 // even if we are in single step mode.
3624 transition(vtos, vtos);
3625 // get the unpatched byte code
3626 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp);
3627 __ mov(O0, Lbyte_code);
3629 // post the breakpoint event
3630 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp);
3632 // complete the execution of original bytecode
3633 __ dispatch_normal(vtos);
3634 }
3637 //----------------------------------------------------------------------------------------------------
3638 // Exceptions
3640 void TemplateTable::athrow() {
3641 transition(atos, vtos);
3643 // This works because exception is cached in Otos_i which is same as O0,
3644 // which is same as what throw_exception_entry_expects
3645 assert(Otos_i == Oexception, "see explanation above");
3647 __ verify_oop(Otos_i);
3648 __ null_check(Otos_i);
3649 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch);
3650 }
3653 //----------------------------------------------------------------------------------------------------
3654 // Synchronization
3657 // See frame_sparc.hpp for monitor block layout.
3658 // Monitor elements are dynamically allocated by growing stack as needed.
3660 void TemplateTable::monitorenter() {
3661 transition(atos, vtos);
3662 __ verify_oop(Otos_i);
3663 // Try to acquire a lock on the object
3664 // Repeat until succeeded (i.e., until
3665 // monitorenter returns true).
3667 { Label ok;
3668 __ tst(Otos_i);
3669 __ throw_if_not_1_x( Assembler::notZero, ok);
3670 __ delayed()->mov(Otos_i, Lscratch); // save obj
3671 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3672 }
3674 assert(O0 == Otos_i, "Be sure where the object to lock is");
3676 // find a free slot in the monitor block
3679 // initialize entry pointer
3680 __ clr(O1); // points to free slot or NULL
3682 {
3683 Label entry, loop, exit;
3684 __ add( __ top_most_monitor(), O2 ); // last one to check
3685 __ ba( entry );
3686 __ delayed()->mov( Lmonitors, O3 ); // first one to check
3689 __ bind( loop );
3691 __ verify_oop(O4); // verify each monitor's oop
3692 __ tst(O4); // is this entry unused?
3693 if (VM_Version::v9_instructions_work())
3694 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
3695 else {
3696 Label L;
3697 __ br( Assembler::zero, true, Assembler::pn, L );
3698 __ delayed()->mov(O3, O1); // rememeber this one if match
3699 __ bind(L);
3700 }
3702 __ cmp(O4, O0); // check if current entry is for same object
3703 __ brx( Assembler::equal, false, Assembler::pn, exit );
3704 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one
3706 __ bind( entry );
3708 __ cmp( O3, O2 );
3709 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3710 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4);
3712 __ bind( exit );
3713 }
3715 { Label allocated;
3717 // found free slot?
3718 __ br_notnull_short(O1, Assembler::pn, allocated);
3720 __ add_monitor_to_stack( false, O2, O3 );
3721 __ mov(Lmonitors, O1);
3723 __ bind(allocated);
3724 }
3726 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3727 // The object has already been poped from the stack, so the expression stack looks correct.
3728 __ inc(Lbcp);
3730 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object
3731 __ lock_object(O1, O0);
3733 // check if there's enough space on the stack for the monitors after locking
3734 __ generate_stack_overflow_check(0);
3736 // The bcp has already been incremented. Just need to dispatch to next instruction.
3737 __ dispatch_next(vtos);
3738 }
3741 void TemplateTable::monitorexit() {
3742 transition(atos, vtos);
3743 __ verify_oop(Otos_i);
3744 __ tst(Otos_i);
3745 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch );
3747 assert(O0 == Otos_i, "just checking");
3749 { Label entry, loop, found;
3750 __ add( __ top_most_monitor(), O2 ); // last one to check
3751 __ ba(entry);
3752 // use Lscratch to hold monitor elem to check, start with most recent monitor,
3753 // By using a local it survives the call to the C routine.
3754 __ delayed()->mov( Lmonitors, Lscratch );
3756 __ bind( loop );
3758 __ verify_oop(O4); // verify each monitor's oop
3759 __ cmp(O4, O0); // check if current entry is for desired object
3760 __ brx( Assembler::equal, true, Assembler::pt, found );
3761 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit
3763 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next
3765 __ bind( entry );
3767 __ cmp( Lscratch, O2 );
3768 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3769 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4);
3771 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3772 __ should_not_reach_here();
3774 __ bind(found);
3775 }
3776 __ unlock_object(O1);
3777 }
3780 //----------------------------------------------------------------------------------------------------
3781 // Wide instructions
3783 void TemplateTable::wide() {
3784 transition(vtos, vtos);
3785 __ ldub(Lbcp, 1, G3_scratch);// get next bc
3786 __ sll(G3_scratch, LogBytesPerWord, G3_scratch);
3787 AddressLiteral ep(Interpreter::_wentry_point);
3788 __ set(ep, G4_scratch);
3789 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch);
3790 __ jmp(G3_scratch, G0);
3791 __ delayed()->nop();
3792 // Note: the Lbcp increment step is part of the individual wide bytecode implementations
3793 }
3796 //----------------------------------------------------------------------------------------------------
3797 // Multi arrays
3799 void TemplateTable::multianewarray() {
3800 transition(vtos, atos);
3801 // put ndims * wordSize into Lscratch
3802 __ ldub( Lbcp, 3, Lscratch);
3803 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch);
3804 // Lesp points past last_dim, so set to O1 to first_dim address
3805 __ add( Lesp, Lscratch, O1);
3806 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
3807 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack
3808 }
3809 #endif /* !CC_INTERP */