Tue, 12 Oct 2010 23:51:20 -0700
6991512: G1 barriers fail with 64bit C1
Summary: Fix compare-and-swap intrinsic problem with G1 post-barriers and issue with branch ranges in G1 stubs on sparc
Reviewed-by: never, kvn
1 /*
2 * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_templateTable_sparc.cpp.incl"
28 #ifndef CC_INTERP
29 #define __ _masm->
31 // Misc helpers
33 // Do an oop store like *(base + index + offset) = val
34 // index can be noreg,
35 static void do_oop_store(InterpreterMacroAssembler* _masm,
36 Register base,
37 Register index,
38 int offset,
39 Register val,
40 Register tmp,
41 BarrierSet::Name barrier,
42 bool precise) {
43 assert(tmp != val && tmp != base && tmp != index, "register collision");
44 assert(index == noreg || offset == 0, "only one offset");
45 switch (barrier) {
46 #ifndef SERIALGC
47 case BarrierSet::G1SATBCT:
48 case BarrierSet::G1SATBCTLogging:
49 {
50 __ g1_write_barrier_pre( base, index, offset, tmp, /*preserve_o_regs*/true);
51 if (index == noreg ) {
52 assert(Assembler::is_simm13(offset), "fix this code");
53 __ store_heap_oop(val, base, offset);
54 } else {
55 __ store_heap_oop(val, base, index);
56 }
58 // No need for post barrier if storing NULL
59 if (val != G0) {
60 if (precise) {
61 if (index == noreg) {
62 __ add(base, offset, base);
63 } else {
64 __ add(base, index, base);
65 }
66 }
67 __ g1_write_barrier_post(base, val, tmp);
68 }
69 }
70 break;
71 #endif // SERIALGC
72 case BarrierSet::CardTableModRef:
73 case BarrierSet::CardTableExtension:
74 {
75 if (index == noreg ) {
76 assert(Assembler::is_simm13(offset), "fix this code");
77 __ store_heap_oop(val, base, offset);
78 } else {
79 __ store_heap_oop(val, base, index);
80 }
81 // No need for post barrier if storing NULL
82 if (val != G0) {
83 if (precise) {
84 if (index == noreg) {
85 __ add(base, offset, base);
86 } else {
87 __ add(base, index, base);
88 }
89 }
90 __ card_write_barrier_post(base, val, tmp);
91 }
92 }
93 break;
94 case BarrierSet::ModRef:
95 case BarrierSet::Other:
96 ShouldNotReachHere();
97 break;
98 default :
99 ShouldNotReachHere();
101 }
102 }
105 //----------------------------------------------------------------------------------------------------
106 // Platform-dependent initialization
108 void TemplateTable::pd_initialize() {
109 // (none)
110 }
113 //----------------------------------------------------------------------------------------------------
114 // Condition conversion
115 Assembler::Condition ccNot(TemplateTable::Condition cc) {
116 switch (cc) {
117 case TemplateTable::equal : return Assembler::notEqual;
118 case TemplateTable::not_equal : return Assembler::equal;
119 case TemplateTable::less : return Assembler::greaterEqual;
120 case TemplateTable::less_equal : return Assembler::greater;
121 case TemplateTable::greater : return Assembler::lessEqual;
122 case TemplateTable::greater_equal: return Assembler::less;
123 }
124 ShouldNotReachHere();
125 return Assembler::zero;
126 }
128 //----------------------------------------------------------------------------------------------------
129 // Miscelaneous helper routines
132 Address TemplateTable::at_bcp(int offset) {
133 assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
134 return Address(Lbcp, offset);
135 }
138 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register Rbyte_code,
139 Register Rscratch,
140 bool load_bc_into_scratch /*=true*/) {
141 // With sharing on, may need to test methodOop flag.
142 if (!RewriteBytecodes) return;
143 if (load_bc_into_scratch) __ set(bc, Rbyte_code);
144 Label patch_done;
145 if (JvmtiExport::can_post_breakpoint()) {
146 Label fast_patch;
147 __ ldub(at_bcp(0), Rscratch);
148 __ cmp(Rscratch, Bytecodes::_breakpoint);
149 __ br(Assembler::notEqual, false, Assembler::pt, fast_patch);
150 __ delayed()->nop(); // don't bother to hoist the stb here
151 // perform the quickening, slowly, in the bowels of the breakpoint table
152 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), Lmethod, Lbcp, Rbyte_code);
153 __ ba(false, patch_done);
154 __ delayed()->nop();
155 __ bind(fast_patch);
156 }
157 #ifdef ASSERT
158 Bytecodes::Code orig_bytecode = Bytecodes::java_code(bc);
159 Label okay;
160 __ ldub(at_bcp(0), Rscratch);
161 __ cmp(Rscratch, orig_bytecode);
162 __ br(Assembler::equal, false, Assembler::pt, okay);
163 __ delayed() ->cmp(Rscratch, Rbyte_code);
164 __ br(Assembler::equal, false, Assembler::pt, okay);
165 __ delayed()->nop();
166 __ stop("Rewriting wrong bytecode location");
167 __ bind(okay);
168 #endif
169 __ stb(Rbyte_code, at_bcp(0));
170 __ bind(patch_done);
171 }
173 //----------------------------------------------------------------------------------------------------
174 // Individual instructions
176 void TemplateTable::nop() {
177 transition(vtos, vtos);
178 // nothing to do
179 }
181 void TemplateTable::shouldnotreachhere() {
182 transition(vtos, vtos);
183 __ stop("shouldnotreachhere bytecode");
184 }
186 void TemplateTable::aconst_null() {
187 transition(vtos, atos);
188 __ clr(Otos_i);
189 }
192 void TemplateTable::iconst(int value) {
193 transition(vtos, itos);
194 __ set(value, Otos_i);
195 }
198 void TemplateTable::lconst(int value) {
199 transition(vtos, ltos);
200 assert(value >= 0, "check this code");
201 #ifdef _LP64
202 __ set(value, Otos_l);
203 #else
204 __ set(value, Otos_l2);
205 __ clr( Otos_l1);
206 #endif
207 }
210 void TemplateTable::fconst(int value) {
211 transition(vtos, ftos);
212 static float zero = 0.0, one = 1.0, two = 2.0;
213 float* p;
214 switch( value ) {
215 default: ShouldNotReachHere();
216 case 0: p = &zero; break;
217 case 1: p = &one; break;
218 case 2: p = &two; break;
219 }
220 AddressLiteral a(p);
221 __ sethi(a, G3_scratch);
222 __ ldf(FloatRegisterImpl::S, G3_scratch, a.low10(), Ftos_f);
223 }
226 void TemplateTable::dconst(int value) {
227 transition(vtos, dtos);
228 static double zero = 0.0, one = 1.0;
229 double* p;
230 switch( value ) {
231 default: ShouldNotReachHere();
232 case 0: p = &zero; break;
233 case 1: p = &one; break;
234 }
235 AddressLiteral a(p);
236 __ sethi(a, G3_scratch);
237 __ ldf(FloatRegisterImpl::D, G3_scratch, a.low10(), Ftos_d);
238 }
241 // %%%%% Should factore most snippet templates across platforms
243 void TemplateTable::bipush() {
244 transition(vtos, itos);
245 __ ldsb( at_bcp(1), Otos_i );
246 }
248 void TemplateTable::sipush() {
249 transition(vtos, itos);
250 __ get_2_byte_integer_at_bcp(1, G3_scratch, Otos_i, InterpreterMacroAssembler::Signed);
251 }
253 void TemplateTable::ldc(bool wide) {
254 transition(vtos, vtos);
255 Label call_ldc, notInt, notString, notClass, exit;
257 if (wide) {
258 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
259 } else {
260 __ ldub(Lbcp, 1, O1);
261 }
262 __ get_cpool_and_tags(O0, O2);
264 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
265 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
267 // get type from tags
268 __ add(O2, tags_offset, O2);
269 __ ldub(O2, O1, O2);
270 __ cmp(O2, JVM_CONSTANT_UnresolvedString); // unresolved string? If so, must resolve
271 __ brx(Assembler::equal, true, Assembler::pt, call_ldc);
272 __ delayed()->nop();
274 __ cmp(O2, JVM_CONSTANT_UnresolvedClass); // unresolved class? If so, must resolve
275 __ brx(Assembler::equal, true, Assembler::pt, call_ldc);
276 __ delayed()->nop();
278 __ cmp(O2, JVM_CONSTANT_UnresolvedClassInError); // unresolved class in error state
279 __ brx(Assembler::equal, true, Assembler::pn, call_ldc);
280 __ delayed()->nop();
282 __ cmp(O2, JVM_CONSTANT_Class); // need to call vm to get java mirror of the class
283 __ brx(Assembler::notEqual, true, Assembler::pt, notClass);
284 __ delayed()->add(O0, base_offset, O0);
286 __ bind(call_ldc);
287 __ set(wide, O1);
288 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), O1);
289 __ push(atos);
290 __ ba(false, exit);
291 __ delayed()->nop();
293 __ bind(notClass);
294 // __ add(O0, base_offset, O0);
295 __ sll(O1, LogBytesPerWord, O1);
296 __ cmp(O2, JVM_CONSTANT_Integer);
297 __ brx(Assembler::notEqual, true, Assembler::pt, notInt);
298 __ delayed()->cmp(O2, JVM_CONSTANT_String);
299 __ ld(O0, O1, Otos_i);
300 __ push(itos);
301 __ ba(false, exit);
302 __ delayed()->nop();
304 __ bind(notInt);
305 // __ cmp(O2, JVM_CONSTANT_String);
306 __ brx(Assembler::notEqual, true, Assembler::pt, notString);
307 __ delayed()->ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
308 __ ld_ptr(O0, O1, Otos_i);
309 __ verify_oop(Otos_i);
310 __ push(atos);
311 __ ba(false, exit);
312 __ delayed()->nop();
314 __ bind(notString);
315 // __ ldf(FloatRegisterImpl::S, O0, O1, Ftos_f);
316 __ push(ftos);
318 __ bind(exit);
319 }
321 // Fast path for caching oop constants.
322 // %%% We should use this to handle Class and String constants also.
323 // %%% It will simplify the ldc/primitive path considerably.
324 void TemplateTable::fast_aldc(bool wide) {
325 transition(vtos, atos);
327 if (!EnableMethodHandles) {
328 // We should not encounter this bytecode if !EnableMethodHandles.
329 // The verifier will stop it. However, if we get past the verifier,
330 // this will stop the thread in a reasonable way, without crashing the JVM.
331 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
332 InterpreterRuntime::throw_IncompatibleClassChangeError));
333 // the call_VM checks for exception, so we should never return here.
334 __ should_not_reach_here();
335 return;
336 }
338 Register Rcache = G3_scratch;
339 Register Rscratch = G4_scratch;
341 resolve_cache_and_index(f1_oop, Otos_i, Rcache, Rscratch, wide ? sizeof(u2) : sizeof(u1));
343 __ verify_oop(Otos_i);
344 }
346 void TemplateTable::ldc2_w() {
347 transition(vtos, vtos);
348 Label retry, resolved, Long, exit;
350 __ bind(retry);
351 __ get_2_byte_integer_at_bcp(1, G3_scratch, O1, InterpreterMacroAssembler::Unsigned);
352 __ get_cpool_and_tags(O0, O2);
354 const int base_offset = constantPoolOopDesc::header_size() * wordSize;
355 const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
356 // get type from tags
357 __ add(O2, tags_offset, O2);
358 __ ldub(O2, O1, O2);
360 __ sll(O1, LogBytesPerWord, O1);
361 __ add(O0, O1, G3_scratch);
363 __ cmp(O2, JVM_CONSTANT_Double);
364 __ brx(Assembler::notEqual, false, Assembler::pt, Long);
365 __ delayed()->nop();
366 // A double can be placed at word-aligned locations in the constant pool.
367 // Check out Conversions.java for an example.
368 // Also constantPoolOopDesc::header_size() is 20, which makes it very difficult
369 // to double-align double on the constant pool. SG, 11/7/97
370 #ifdef _LP64
371 __ ldf(FloatRegisterImpl::D, G3_scratch, base_offset, Ftos_d);
372 #else
373 FloatRegister f = Ftos_d;
374 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset, f);
375 __ ldf(FloatRegisterImpl::S, G3_scratch, base_offset + sizeof(jdouble)/2,
376 f->successor());
377 #endif
378 __ push(dtos);
379 __ ba(false, exit);
380 __ delayed()->nop();
382 __ bind(Long);
383 #ifdef _LP64
384 __ ldx(G3_scratch, base_offset, Otos_l);
385 #else
386 __ ld(G3_scratch, base_offset, Otos_l);
387 __ ld(G3_scratch, base_offset + sizeof(jlong)/2, Otos_l->successor());
388 #endif
389 __ push(ltos);
391 __ bind(exit);
392 }
395 void TemplateTable::locals_index(Register reg, int offset) {
396 __ ldub( at_bcp(offset), reg );
397 }
400 void TemplateTable::locals_index_wide(Register reg) {
401 // offset is 2, not 1, because Lbcp points to wide prefix code
402 __ get_2_byte_integer_at_bcp(2, G4_scratch, reg, InterpreterMacroAssembler::Unsigned);
403 }
405 void TemplateTable::iload() {
406 transition(vtos, itos);
407 // Rewrite iload,iload pair into fast_iload2
408 // iload,caload pair into fast_icaload
409 if (RewriteFrequentPairs) {
410 Label rewrite, done;
412 // get next byte
413 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_iload)), G3_scratch);
415 // if _iload, wait to rewrite to iload2. We only want to rewrite the
416 // last two iloads in a pair. Comparing against fast_iload means that
417 // the next bytecode is neither an iload or a caload, and therefore
418 // an iload pair.
419 __ cmp(G3_scratch, (int)Bytecodes::_iload);
420 __ br(Assembler::equal, false, Assembler::pn, done);
421 __ delayed()->nop();
423 __ cmp(G3_scratch, (int)Bytecodes::_fast_iload);
424 __ br(Assembler::equal, false, Assembler::pn, rewrite);
425 __ delayed()->set(Bytecodes::_fast_iload2, G4_scratch);
427 __ cmp(G3_scratch, (int)Bytecodes::_caload);
428 __ br(Assembler::equal, false, Assembler::pn, rewrite);
429 __ delayed()->set(Bytecodes::_fast_icaload, G4_scratch);
431 __ set(Bytecodes::_fast_iload, G4_scratch); // don't check again
432 // rewrite
433 // G4_scratch: fast bytecode
434 __ bind(rewrite);
435 patch_bytecode(Bytecodes::_iload, G4_scratch, G3_scratch, false);
436 __ bind(done);
437 }
439 // Get the local value into tos
440 locals_index(G3_scratch);
441 __ access_local_int( G3_scratch, Otos_i );
442 }
444 void TemplateTable::fast_iload2() {
445 transition(vtos, itos);
446 locals_index(G3_scratch);
447 __ access_local_int( G3_scratch, Otos_i );
448 __ push_i();
449 locals_index(G3_scratch, 3); // get next bytecode's local index.
450 __ access_local_int( G3_scratch, Otos_i );
451 }
453 void TemplateTable::fast_iload() {
454 transition(vtos, itos);
455 locals_index(G3_scratch);
456 __ access_local_int( G3_scratch, Otos_i );
457 }
459 void TemplateTable::lload() {
460 transition(vtos, ltos);
461 locals_index(G3_scratch);
462 __ access_local_long( G3_scratch, Otos_l );
463 }
466 void TemplateTable::fload() {
467 transition(vtos, ftos);
468 locals_index(G3_scratch);
469 __ access_local_float( G3_scratch, Ftos_f );
470 }
473 void TemplateTable::dload() {
474 transition(vtos, dtos);
475 locals_index(G3_scratch);
476 __ access_local_double( G3_scratch, Ftos_d );
477 }
480 void TemplateTable::aload() {
481 transition(vtos, atos);
482 locals_index(G3_scratch);
483 __ access_local_ptr( G3_scratch, Otos_i);
484 }
487 void TemplateTable::wide_iload() {
488 transition(vtos, itos);
489 locals_index_wide(G3_scratch);
490 __ access_local_int( G3_scratch, Otos_i );
491 }
494 void TemplateTable::wide_lload() {
495 transition(vtos, ltos);
496 locals_index_wide(G3_scratch);
497 __ access_local_long( G3_scratch, Otos_l );
498 }
501 void TemplateTable::wide_fload() {
502 transition(vtos, ftos);
503 locals_index_wide(G3_scratch);
504 __ access_local_float( G3_scratch, Ftos_f );
505 }
508 void TemplateTable::wide_dload() {
509 transition(vtos, dtos);
510 locals_index_wide(G3_scratch);
511 __ access_local_double( G3_scratch, Ftos_d );
512 }
515 void TemplateTable::wide_aload() {
516 transition(vtos, atos);
517 locals_index_wide(G3_scratch);
518 __ access_local_ptr( G3_scratch, Otos_i );
519 __ verify_oop(Otos_i);
520 }
523 void TemplateTable::iaload() {
524 transition(itos, itos);
525 // Otos_i: index
526 // tos: array
527 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
528 __ ld(O3, arrayOopDesc::base_offset_in_bytes(T_INT), Otos_i);
529 }
532 void TemplateTable::laload() {
533 transition(itos, ltos);
534 // Otos_i: index
535 // O2: array
536 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
537 __ ld_long(O3, arrayOopDesc::base_offset_in_bytes(T_LONG), Otos_l);
538 }
541 void TemplateTable::faload() {
542 transition(itos, ftos);
543 // Otos_i: index
544 // O2: array
545 __ index_check(O2, Otos_i, LogBytesPerInt, G3_scratch, O3);
546 __ ldf(FloatRegisterImpl::S, O3, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Ftos_f);
547 }
550 void TemplateTable::daload() {
551 transition(itos, dtos);
552 // Otos_i: index
553 // O2: array
554 __ index_check(O2, Otos_i, LogBytesPerLong, G3_scratch, O3);
555 __ ldf(FloatRegisterImpl::D, O3, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Ftos_d);
556 }
559 void TemplateTable::aaload() {
560 transition(itos, atos);
561 // Otos_i: index
562 // tos: array
563 __ index_check(O2, Otos_i, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O3);
564 __ load_heap_oop(O3, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i);
565 __ verify_oop(Otos_i);
566 }
569 void TemplateTable::baload() {
570 transition(itos, itos);
571 // Otos_i: index
572 // tos: array
573 __ index_check(O2, Otos_i, 0, G3_scratch, O3);
574 __ ldsb(O3, arrayOopDesc::base_offset_in_bytes(T_BYTE), Otos_i);
575 }
578 void TemplateTable::caload() {
579 transition(itos, itos);
580 // Otos_i: index
581 // tos: array
582 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
583 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
584 }
586 void TemplateTable::fast_icaload() {
587 transition(vtos, itos);
588 // Otos_i: index
589 // tos: array
590 locals_index(G3_scratch);
591 __ access_local_int( G3_scratch, Otos_i );
592 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
593 __ lduh(O3, arrayOopDesc::base_offset_in_bytes(T_CHAR), Otos_i);
594 }
597 void TemplateTable::saload() {
598 transition(itos, itos);
599 // Otos_i: index
600 // tos: array
601 __ index_check(O2, Otos_i, LogBytesPerShort, G3_scratch, O3);
602 __ ldsh(O3, arrayOopDesc::base_offset_in_bytes(T_SHORT), Otos_i);
603 }
606 void TemplateTable::iload(int n) {
607 transition(vtos, itos);
608 __ ld( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
609 }
612 void TemplateTable::lload(int n) {
613 transition(vtos, ltos);
614 assert(n+1 < Argument::n_register_parameters, "would need more code");
615 __ load_unaligned_long(Llocals, Interpreter::local_offset_in_bytes(n+1), Otos_l);
616 }
619 void TemplateTable::fload(int n) {
620 transition(vtos, ftos);
621 assert(n < Argument::n_register_parameters, "would need more code");
622 __ ldf( FloatRegisterImpl::S, Llocals, Interpreter::local_offset_in_bytes(n), Ftos_f );
623 }
626 void TemplateTable::dload(int n) {
627 transition(vtos, dtos);
628 FloatRegister dst = Ftos_d;
629 __ load_unaligned_double(Llocals, Interpreter::local_offset_in_bytes(n+1), dst);
630 }
633 void TemplateTable::aload(int n) {
634 transition(vtos, atos);
635 __ ld_ptr( Llocals, Interpreter::local_offset_in_bytes(n), Otos_i );
636 }
639 void TemplateTable::aload_0() {
640 transition(vtos, atos);
642 // According to bytecode histograms, the pairs:
643 //
644 // _aload_0, _fast_igetfield (itos)
645 // _aload_0, _fast_agetfield (atos)
646 // _aload_0, _fast_fgetfield (ftos)
647 //
648 // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
649 // bytecode checks the next bytecode and then rewrites the current
650 // bytecode into a pair bytecode; otherwise it rewrites the current
651 // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
652 //
653 if (RewriteFrequentPairs) {
654 Label rewrite, done;
656 // get next byte
657 __ ldub(at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)), G3_scratch);
659 // do actual aload_0
660 aload(0);
662 // if _getfield then wait with rewrite
663 __ cmp(G3_scratch, (int)Bytecodes::_getfield);
664 __ br(Assembler::equal, false, Assembler::pn, done);
665 __ delayed()->nop();
667 // if _igetfield then rewrite to _fast_iaccess_0
668 assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
669 __ cmp(G3_scratch, (int)Bytecodes::_fast_igetfield);
670 __ br(Assembler::equal, false, Assembler::pn, rewrite);
671 __ delayed()->set(Bytecodes::_fast_iaccess_0, G4_scratch);
673 // if _agetfield then rewrite to _fast_aaccess_0
674 assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
675 __ cmp(G3_scratch, (int)Bytecodes::_fast_agetfield);
676 __ br(Assembler::equal, false, Assembler::pn, rewrite);
677 __ delayed()->set(Bytecodes::_fast_aaccess_0, G4_scratch);
679 // if _fgetfield then rewrite to _fast_faccess_0
680 assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
681 __ cmp(G3_scratch, (int)Bytecodes::_fast_fgetfield);
682 __ br(Assembler::equal, false, Assembler::pn, rewrite);
683 __ delayed()->set(Bytecodes::_fast_faccess_0, G4_scratch);
685 // else rewrite to _fast_aload0
686 assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "adjust fast bytecode def");
687 __ set(Bytecodes::_fast_aload_0, G4_scratch);
689 // rewrite
690 // G4_scratch: fast bytecode
691 __ bind(rewrite);
692 patch_bytecode(Bytecodes::_aload_0, G4_scratch, G3_scratch, false);
693 __ bind(done);
694 } else {
695 aload(0);
696 }
697 }
700 void TemplateTable::istore() {
701 transition(itos, vtos);
702 locals_index(G3_scratch);
703 __ store_local_int( G3_scratch, Otos_i );
704 }
707 void TemplateTable::lstore() {
708 transition(ltos, vtos);
709 locals_index(G3_scratch);
710 __ store_local_long( G3_scratch, Otos_l );
711 }
714 void TemplateTable::fstore() {
715 transition(ftos, vtos);
716 locals_index(G3_scratch);
717 __ store_local_float( G3_scratch, Ftos_f );
718 }
721 void TemplateTable::dstore() {
722 transition(dtos, vtos);
723 locals_index(G3_scratch);
724 __ store_local_double( G3_scratch, Ftos_d );
725 }
728 void TemplateTable::astore() {
729 transition(vtos, vtos);
730 __ load_ptr(0, Otos_i);
731 __ inc(Lesp, Interpreter::stackElementSize);
732 __ verify_oop_or_return_address(Otos_i, G3_scratch);
733 locals_index(G3_scratch);
734 __ store_local_ptr(G3_scratch, Otos_i);
735 }
738 void TemplateTable::wide_istore() {
739 transition(vtos, vtos);
740 __ pop_i();
741 locals_index_wide(G3_scratch);
742 __ store_local_int( G3_scratch, Otos_i );
743 }
746 void TemplateTable::wide_lstore() {
747 transition(vtos, vtos);
748 __ pop_l();
749 locals_index_wide(G3_scratch);
750 __ store_local_long( G3_scratch, Otos_l );
751 }
754 void TemplateTable::wide_fstore() {
755 transition(vtos, vtos);
756 __ pop_f();
757 locals_index_wide(G3_scratch);
758 __ store_local_float( G3_scratch, Ftos_f );
759 }
762 void TemplateTable::wide_dstore() {
763 transition(vtos, vtos);
764 __ pop_d();
765 locals_index_wide(G3_scratch);
766 __ store_local_double( G3_scratch, Ftos_d );
767 }
770 void TemplateTable::wide_astore() {
771 transition(vtos, vtos);
772 __ load_ptr(0, Otos_i);
773 __ inc(Lesp, Interpreter::stackElementSize);
774 __ verify_oop_or_return_address(Otos_i, G3_scratch);
775 locals_index_wide(G3_scratch);
776 __ store_local_ptr(G3_scratch, Otos_i);
777 }
780 void TemplateTable::iastore() {
781 transition(itos, vtos);
782 __ pop_i(O2); // index
783 // Otos_i: val
784 // O3: array
785 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
786 __ st(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_INT));
787 }
790 void TemplateTable::lastore() {
791 transition(ltos, vtos);
792 __ pop_i(O2); // index
793 // Otos_l: val
794 // O3: array
795 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
796 __ st_long(Otos_l, O2, arrayOopDesc::base_offset_in_bytes(T_LONG));
797 }
800 void TemplateTable::fastore() {
801 transition(ftos, vtos);
802 __ pop_i(O2); // index
803 // Ftos_f: val
804 // O3: array
805 __ index_check(O3, O2, LogBytesPerInt, G3_scratch, O2);
806 __ stf(FloatRegisterImpl::S, Ftos_f, O2, arrayOopDesc::base_offset_in_bytes(T_FLOAT));
807 }
810 void TemplateTable::dastore() {
811 transition(dtos, vtos);
812 __ pop_i(O2); // index
813 // Fos_d: val
814 // O3: array
815 __ index_check(O3, O2, LogBytesPerLong, G3_scratch, O2);
816 __ stf(FloatRegisterImpl::D, Ftos_d, O2, arrayOopDesc::base_offset_in_bytes(T_DOUBLE));
817 }
820 void TemplateTable::aastore() {
821 Label store_ok, is_null, done;
822 transition(vtos, vtos);
823 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
824 __ ld(Lesp, Interpreter::expr_offset_in_bytes(1), O2); // get index
825 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(2), O3); // get array
826 // Otos_i: val
827 // O2: index
828 // O3: array
829 __ verify_oop(Otos_i);
830 __ index_check_without_pop(O3, O2, UseCompressedOops ? 2 : LogBytesPerWord, G3_scratch, O1);
832 // do array store check - check for NULL value first
833 __ br_null( Otos_i, false, Assembler::pn, is_null );
834 __ delayed()->nop();
836 __ load_klass(O3, O4); // get array klass
837 __ load_klass(Otos_i, O5); // get value klass
839 // do fast instanceof cache test
841 __ ld_ptr(O4, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes(), O4);
843 assert(Otos_i == O0, "just checking");
845 // Otos_i: value
846 // O1: addr - offset
847 // O2: index
848 // O3: array
849 // O4: array element klass
850 // O5: value klass
852 // Address element(O1, 0, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
854 // Generate a fast subtype check. Branch to store_ok if no
855 // failure. Throw if failure.
856 __ gen_subtype_check( O5, O4, G3_scratch, G4_scratch, G1_scratch, store_ok );
858 // Not a subtype; so must throw exception
859 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ArrayStoreException_entry, G3_scratch );
861 // Store is OK.
862 __ bind(store_ok);
863 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Otos_i, G3_scratch, _bs->kind(), true);
865 __ ba(false,done);
866 __ delayed()->inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
868 __ bind(is_null);
869 do_oop_store(_masm, O1, noreg, arrayOopDesc::base_offset_in_bytes(T_OBJECT), G0, G4_scratch, _bs->kind(), true);
871 __ profile_null_seen(G3_scratch);
872 __ inc(Lesp, 3* Interpreter::stackElementSize); // adj sp (pops array, index and value)
873 __ bind(done);
874 }
877 void TemplateTable::bastore() {
878 transition(itos, vtos);
879 __ pop_i(O2); // index
880 // Otos_i: val
881 // O3: array
882 __ index_check(O3, O2, 0, G3_scratch, O2);
883 __ stb(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_BYTE));
884 }
887 void TemplateTable::castore() {
888 transition(itos, vtos);
889 __ pop_i(O2); // index
890 // Otos_i: val
891 // O3: array
892 __ index_check(O3, O2, LogBytesPerShort, G3_scratch, O2);
893 __ sth(Otos_i, O2, arrayOopDesc::base_offset_in_bytes(T_CHAR));
894 }
897 void TemplateTable::sastore() {
898 // %%%%% Factor across platform
899 castore();
900 }
903 void TemplateTable::istore(int n) {
904 transition(itos, vtos);
905 __ st(Otos_i, Llocals, Interpreter::local_offset_in_bytes(n));
906 }
909 void TemplateTable::lstore(int n) {
910 transition(ltos, vtos);
911 assert(n+1 < Argument::n_register_parameters, "only handle register cases");
912 __ store_unaligned_long(Otos_l, Llocals, Interpreter::local_offset_in_bytes(n+1));
914 }
917 void TemplateTable::fstore(int n) {
918 transition(ftos, vtos);
919 assert(n < Argument::n_register_parameters, "only handle register cases");
920 __ stf(FloatRegisterImpl::S, Ftos_f, Llocals, Interpreter::local_offset_in_bytes(n));
921 }
924 void TemplateTable::dstore(int n) {
925 transition(dtos, vtos);
926 FloatRegister src = Ftos_d;
927 __ store_unaligned_double(src, Llocals, Interpreter::local_offset_in_bytes(n+1));
928 }
931 void TemplateTable::astore(int n) {
932 transition(vtos, vtos);
933 __ load_ptr(0, Otos_i);
934 __ inc(Lesp, Interpreter::stackElementSize);
935 __ verify_oop_or_return_address(Otos_i, G3_scratch);
936 __ store_local_ptr(n, Otos_i);
937 }
940 void TemplateTable::pop() {
941 transition(vtos, vtos);
942 __ inc(Lesp, Interpreter::stackElementSize);
943 }
946 void TemplateTable::pop2() {
947 transition(vtos, vtos);
948 __ inc(Lesp, 2 * Interpreter::stackElementSize);
949 }
952 void TemplateTable::dup() {
953 transition(vtos, vtos);
954 // stack: ..., a
955 // load a and tag
956 __ load_ptr(0, Otos_i);
957 __ push_ptr(Otos_i);
958 // stack: ..., a, a
959 }
962 void TemplateTable::dup_x1() {
963 transition(vtos, vtos);
964 // stack: ..., a, b
965 __ load_ptr( 1, G3_scratch); // get a
966 __ load_ptr( 0, Otos_l1); // get b
967 __ store_ptr(1, Otos_l1); // put b
968 __ store_ptr(0, G3_scratch); // put a - like swap
969 __ push_ptr(Otos_l1); // push b
970 // stack: ..., b, a, b
971 }
974 void TemplateTable::dup_x2() {
975 transition(vtos, vtos);
976 // stack: ..., a, b, c
977 // get c and push on stack, reuse registers
978 __ load_ptr( 0, G3_scratch); // get c
979 __ push_ptr(G3_scratch); // push c with tag
980 // stack: ..., a, b, c, c (c in reg) (Lesp - 4)
981 // (stack offsets n+1 now)
982 __ load_ptr( 3, Otos_l1); // get a
983 __ store_ptr(3, G3_scratch); // put c at 3
984 // stack: ..., c, b, c, c (a in reg)
985 __ load_ptr( 2, G3_scratch); // get b
986 __ store_ptr(2, Otos_l1); // put a at 2
987 // stack: ..., c, a, c, c (b in reg)
988 __ store_ptr(1, G3_scratch); // put b at 1
989 // stack: ..., c, a, b, c
990 }
993 void TemplateTable::dup2() {
994 transition(vtos, vtos);
995 __ load_ptr(1, G3_scratch); // get a
996 __ load_ptr(0, Otos_l1); // get b
997 __ push_ptr(G3_scratch); // push a
998 __ push_ptr(Otos_l1); // push b
999 // stack: ..., a, b, a, b
1000 }
1003 void TemplateTable::dup2_x1() {
1004 transition(vtos, vtos);
1005 // stack: ..., a, b, c
1006 __ load_ptr( 1, Lscratch); // get b
1007 __ load_ptr( 2, Otos_l1); // get a
1008 __ store_ptr(2, Lscratch); // put b at a
1009 // stack: ..., b, b, c
1010 __ load_ptr( 0, G3_scratch); // get c
1011 __ store_ptr(1, G3_scratch); // put c at b
1012 // stack: ..., b, c, c
1013 __ store_ptr(0, Otos_l1); // put a at c
1014 // stack: ..., b, c, a
1015 __ push_ptr(Lscratch); // push b
1016 __ push_ptr(G3_scratch); // push c
1017 // stack: ..., b, c, a, b, c
1018 }
1021 // The spec says that these types can be a mixture of category 1 (1 word)
1022 // types and/or category 2 types (long and doubles)
1023 void TemplateTable::dup2_x2() {
1024 transition(vtos, vtos);
1025 // stack: ..., a, b, c, d
1026 __ load_ptr( 1, Lscratch); // get c
1027 __ load_ptr( 3, Otos_l1); // get a
1028 __ store_ptr(3, Lscratch); // put c at 3
1029 __ store_ptr(1, Otos_l1); // put a at 1
1030 // stack: ..., c, b, a, d
1031 __ load_ptr( 2, G3_scratch); // get b
1032 __ load_ptr( 0, Otos_l1); // get d
1033 __ store_ptr(0, G3_scratch); // put b at 0
1034 __ store_ptr(2, Otos_l1); // put d at 2
1035 // stack: ..., c, d, a, b
1036 __ push_ptr(Lscratch); // push c
1037 __ push_ptr(Otos_l1); // push d
1038 // stack: ..., c, d, a, b, c, d
1039 }
1042 void TemplateTable::swap() {
1043 transition(vtos, vtos);
1044 // stack: ..., a, b
1045 __ load_ptr( 1, G3_scratch); // get a
1046 __ load_ptr( 0, Otos_l1); // get b
1047 __ store_ptr(0, G3_scratch); // put b
1048 __ store_ptr(1, Otos_l1); // put a
1049 // stack: ..., b, a
1050 }
1053 void TemplateTable::iop2(Operation op) {
1054 transition(itos, itos);
1055 __ pop_i(O1);
1056 switch (op) {
1057 case add: __ add(O1, Otos_i, Otos_i); break;
1058 case sub: __ sub(O1, Otos_i, Otos_i); break;
1059 // %%%%% Mul may not exist: better to call .mul?
1060 case mul: __ smul(O1, Otos_i, Otos_i); break;
1061 case _and: __ and3(O1, Otos_i, Otos_i); break;
1062 case _or: __ or3(O1, Otos_i, Otos_i); break;
1063 case _xor: __ xor3(O1, Otos_i, Otos_i); break;
1064 case shl: __ sll(O1, Otos_i, Otos_i); break;
1065 case shr: __ sra(O1, Otos_i, Otos_i); break;
1066 case ushr: __ srl(O1, Otos_i, Otos_i); break;
1067 default: ShouldNotReachHere();
1068 }
1069 }
1072 void TemplateTable::lop2(Operation op) {
1073 transition(ltos, ltos);
1074 __ pop_l(O2);
1075 switch (op) {
1076 #ifdef _LP64
1077 case add: __ add(O2, Otos_l, Otos_l); break;
1078 case sub: __ sub(O2, Otos_l, Otos_l); break;
1079 case _and: __ and3(O2, Otos_l, Otos_l); break;
1080 case _or: __ or3(O2, Otos_l, Otos_l); break;
1081 case _xor: __ xor3(O2, Otos_l, Otos_l); break;
1082 #else
1083 case add: __ addcc(O3, Otos_l2, Otos_l2); __ addc(O2, Otos_l1, Otos_l1); break;
1084 case sub: __ subcc(O3, Otos_l2, Otos_l2); __ subc(O2, Otos_l1, Otos_l1); break;
1085 case _and: __ and3(O3, Otos_l2, Otos_l2); __ and3(O2, Otos_l1, Otos_l1); break;
1086 case _or: __ or3(O3, Otos_l2, Otos_l2); __ or3(O2, Otos_l1, Otos_l1); break;
1087 case _xor: __ xor3(O3, Otos_l2, Otos_l2); __ xor3(O2, Otos_l1, Otos_l1); break;
1088 #endif
1089 default: ShouldNotReachHere();
1090 }
1091 }
1094 void TemplateTable::idiv() {
1095 // %%%%% Later: ForSPARC/V7 call .sdiv library routine,
1096 // %%%%% Use ldsw...sdivx on pure V9 ABI. 64 bit safe.
1098 transition(itos, itos);
1099 __ pop_i(O1); // get 1st op
1101 // Y contains upper 32 bits of result, set it to 0 or all ones
1102 __ wry(G0);
1103 __ mov(~0, G3_scratch);
1105 __ tst(O1);
1106 Label neg;
1107 __ br(Assembler::negative, true, Assembler::pn, neg);
1108 __ delayed()->wry(G3_scratch);
1109 __ bind(neg);
1111 Label ok;
1112 __ tst(Otos_i);
1113 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch );
1115 const int min_int = 0x80000000;
1116 Label regular;
1117 __ cmp(Otos_i, -1);
1118 __ br(Assembler::notEqual, false, Assembler::pt, regular);
1119 #ifdef _LP64
1120 // Don't put set in delay slot
1121 // Set will turn into multiple instructions in 64 bit mode
1122 __ delayed()->nop();
1123 __ set(min_int, G4_scratch);
1124 #else
1125 __ delayed()->set(min_int, G4_scratch);
1126 #endif
1127 Label done;
1128 __ cmp(O1, G4_scratch);
1129 __ br(Assembler::equal, true, Assembler::pt, done);
1130 __ delayed()->mov(O1, Otos_i); // (mov only executed if branch taken)
1132 __ bind(regular);
1133 __ sdiv(O1, Otos_i, Otos_i); // note: irem uses O1 after this instruction!
1134 __ bind(done);
1135 }
1138 void TemplateTable::irem() {
1139 transition(itos, itos);
1140 __ mov(Otos_i, O2); // save divisor
1141 idiv(); // %%%% Hack: exploits fact that idiv leaves dividend in O1
1142 __ smul(Otos_i, O2, Otos_i);
1143 __ sub(O1, Otos_i, Otos_i);
1144 }
1147 void TemplateTable::lmul() {
1148 transition(ltos, ltos);
1149 __ pop_l(O2);
1150 #ifdef _LP64
1151 __ mulx(Otos_l, O2, Otos_l);
1152 #else
1153 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lmul));
1154 #endif
1156 }
1159 void TemplateTable::ldiv() {
1160 transition(ltos, ltos);
1162 // check for zero
1163 __ pop_l(O2);
1164 #ifdef _LP64
1165 __ tst(Otos_l);
1166 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1167 __ sdivx(O2, Otos_l, Otos_l);
1168 #else
1169 __ orcc(Otos_l1, Otos_l2, G0);
1170 __ throw_if_not_icc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1171 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1172 #endif
1173 }
1176 void TemplateTable::lrem() {
1177 transition(ltos, ltos);
1179 // check for zero
1180 __ pop_l(O2);
1181 #ifdef _LP64
1182 __ tst(Otos_l);
1183 __ throw_if_not_xcc( Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1184 __ sdivx(O2, Otos_l, Otos_l2);
1185 __ mulx (Otos_l2, Otos_l, Otos_l2);
1186 __ sub (O2, Otos_l2, Otos_l);
1187 #else
1188 __ orcc(Otos_l1, Otos_l2, G0);
1189 __ throw_if_not_icc(Assembler::notZero, Interpreter::_throw_ArithmeticException_entry, G3_scratch);
1190 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1191 #endif
1192 }
1195 void TemplateTable::lshl() {
1196 transition(itos, ltos); // %%%% could optimize, fill delay slot or opt for ultra
1198 __ pop_l(O2); // shift value in O2, O3
1199 #ifdef _LP64
1200 __ sllx(O2, Otos_i, Otos_l);
1201 #else
1202 __ lshl(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1203 #endif
1204 }
1207 void TemplateTable::lshr() {
1208 transition(itos, ltos); // %%%% see lshl comment
1210 __ pop_l(O2); // shift value in O2, O3
1211 #ifdef _LP64
1212 __ srax(O2, Otos_i, Otos_l);
1213 #else
1214 __ lshr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1215 #endif
1216 }
1220 void TemplateTable::lushr() {
1221 transition(itos, ltos); // %%%% see lshl comment
1223 __ pop_l(O2); // shift value in O2, O3
1224 #ifdef _LP64
1225 __ srlx(O2, Otos_i, Otos_l);
1226 #else
1227 __ lushr(O2, O3, Otos_i, Otos_l1, Otos_l2, O4);
1228 #endif
1229 }
1232 void TemplateTable::fop2(Operation op) {
1233 transition(ftos, ftos);
1234 switch (op) {
1235 case add: __ pop_f(F4); __ fadd(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1236 case sub: __ pop_f(F4); __ fsub(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1237 case mul: __ pop_f(F4); __ fmul(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1238 case div: __ pop_f(F4); __ fdiv(FloatRegisterImpl::S, F4, Ftos_f, Ftos_f); break;
1239 case rem:
1240 assert(Ftos_f == F0, "just checking");
1241 #ifdef _LP64
1242 // LP64 calling conventions use F1, F3 for passing 2 floats
1243 __ pop_f(F1);
1244 __ fmov(FloatRegisterImpl::S, Ftos_f, F3);
1245 #else
1246 __ pop_i(O0);
1247 __ stf(FloatRegisterImpl::S, Ftos_f, __ d_tmp);
1248 __ ld( __ d_tmp, O1 );
1249 #endif
1250 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1251 assert( Ftos_f == F0, "fix this code" );
1252 break;
1254 default: ShouldNotReachHere();
1255 }
1256 }
1259 void TemplateTable::dop2(Operation op) {
1260 transition(dtos, dtos);
1261 switch (op) {
1262 case add: __ pop_d(F4); __ fadd(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1263 case sub: __ pop_d(F4); __ fsub(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1264 case mul: __ pop_d(F4); __ fmul(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1265 case div: __ pop_d(F4); __ fdiv(FloatRegisterImpl::D, F4, Ftos_d, Ftos_d); break;
1266 case rem:
1267 #ifdef _LP64
1268 // Pass arguments in D0, D2
1269 __ fmov(FloatRegisterImpl::D, Ftos_f, F2 );
1270 __ pop_d( F0 );
1271 #else
1272 // Pass arguments in O0O1, O2O3
1273 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1274 __ ldd( __ d_tmp, O2 );
1275 __ pop_d(Ftos_f);
1276 __ stf(FloatRegisterImpl::D, Ftos_f, __ d_tmp);
1277 __ ldd( __ d_tmp, O0 );
1278 #endif
1279 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1280 assert( Ftos_d == F0, "fix this code" );
1281 break;
1283 default: ShouldNotReachHere();
1284 }
1285 }
1288 void TemplateTable::ineg() {
1289 transition(itos, itos);
1290 __ neg(Otos_i);
1291 }
1294 void TemplateTable::lneg() {
1295 transition(ltos, ltos);
1296 #ifdef _LP64
1297 __ sub(G0, Otos_l, Otos_l);
1298 #else
1299 __ lneg(Otos_l1, Otos_l2);
1300 #endif
1301 }
1304 void TemplateTable::fneg() {
1305 transition(ftos, ftos);
1306 __ fneg(FloatRegisterImpl::S, Ftos_f);
1307 }
1310 void TemplateTable::dneg() {
1311 transition(dtos, dtos);
1312 // v8 has fnegd if source and dest are the same
1313 __ fneg(FloatRegisterImpl::D, Ftos_f);
1314 }
1317 void TemplateTable::iinc() {
1318 transition(vtos, vtos);
1319 locals_index(G3_scratch);
1320 __ ldsb(Lbcp, 2, O2); // load constant
1321 __ access_local_int(G3_scratch, Otos_i);
1322 __ add(Otos_i, O2, Otos_i);
1323 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1324 }
1327 void TemplateTable::wide_iinc() {
1328 transition(vtos, vtos);
1329 locals_index_wide(G3_scratch);
1330 __ get_2_byte_integer_at_bcp( 4, O2, O3, InterpreterMacroAssembler::Signed);
1331 __ access_local_int(G3_scratch, Otos_i);
1332 __ add(Otos_i, O3, Otos_i);
1333 __ st(Otos_i, G3_scratch, 0); // access_local_int puts E.A. in G3_scratch
1334 }
1337 void TemplateTable::convert() {
1338 // %%%%% Factor this first part accross platforms
1339 #ifdef ASSERT
1340 TosState tos_in = ilgl;
1341 TosState tos_out = ilgl;
1342 switch (bytecode()) {
1343 case Bytecodes::_i2l: // fall through
1344 case Bytecodes::_i2f: // fall through
1345 case Bytecodes::_i2d: // fall through
1346 case Bytecodes::_i2b: // fall through
1347 case Bytecodes::_i2c: // fall through
1348 case Bytecodes::_i2s: tos_in = itos; break;
1349 case Bytecodes::_l2i: // fall through
1350 case Bytecodes::_l2f: // fall through
1351 case Bytecodes::_l2d: tos_in = ltos; break;
1352 case Bytecodes::_f2i: // fall through
1353 case Bytecodes::_f2l: // fall through
1354 case Bytecodes::_f2d: tos_in = ftos; break;
1355 case Bytecodes::_d2i: // fall through
1356 case Bytecodes::_d2l: // fall through
1357 case Bytecodes::_d2f: tos_in = dtos; break;
1358 default : ShouldNotReachHere();
1359 }
1360 switch (bytecode()) {
1361 case Bytecodes::_l2i: // fall through
1362 case Bytecodes::_f2i: // fall through
1363 case Bytecodes::_d2i: // fall through
1364 case Bytecodes::_i2b: // fall through
1365 case Bytecodes::_i2c: // fall through
1366 case Bytecodes::_i2s: tos_out = itos; break;
1367 case Bytecodes::_i2l: // fall through
1368 case Bytecodes::_f2l: // fall through
1369 case Bytecodes::_d2l: tos_out = ltos; break;
1370 case Bytecodes::_i2f: // fall through
1371 case Bytecodes::_l2f: // fall through
1372 case Bytecodes::_d2f: tos_out = ftos; break;
1373 case Bytecodes::_i2d: // fall through
1374 case Bytecodes::_l2d: // fall through
1375 case Bytecodes::_f2d: tos_out = dtos; break;
1376 default : ShouldNotReachHere();
1377 }
1378 transition(tos_in, tos_out);
1379 #endif
1382 // Conversion
1383 Label done;
1384 switch (bytecode()) {
1385 case Bytecodes::_i2l:
1386 #ifdef _LP64
1387 // Sign extend the 32 bits
1388 __ sra ( Otos_i, 0, Otos_l );
1389 #else
1390 __ addcc(Otos_i, 0, Otos_l2);
1391 __ br(Assembler::greaterEqual, true, Assembler::pt, done);
1392 __ delayed()->clr(Otos_l1);
1393 __ set(~0, Otos_l1);
1394 #endif
1395 break;
1397 case Bytecodes::_i2f:
1398 __ st(Otos_i, __ d_tmp );
1399 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1400 __ fitof(FloatRegisterImpl::S, F0, Ftos_f);
1401 break;
1403 case Bytecodes::_i2d:
1404 __ st(Otos_i, __ d_tmp);
1405 __ ldf(FloatRegisterImpl::S, __ d_tmp, F0);
1406 __ fitof(FloatRegisterImpl::D, F0, Ftos_f);
1407 break;
1409 case Bytecodes::_i2b:
1410 __ sll(Otos_i, 24, Otos_i);
1411 __ sra(Otos_i, 24, Otos_i);
1412 break;
1414 case Bytecodes::_i2c:
1415 __ sll(Otos_i, 16, Otos_i);
1416 __ srl(Otos_i, 16, Otos_i);
1417 break;
1419 case Bytecodes::_i2s:
1420 __ sll(Otos_i, 16, Otos_i);
1421 __ sra(Otos_i, 16, Otos_i);
1422 break;
1424 case Bytecodes::_l2i:
1425 #ifndef _LP64
1426 __ mov(Otos_l2, Otos_i);
1427 #else
1428 // Sign-extend into the high 32 bits
1429 __ sra(Otos_l, 0, Otos_i);
1430 #endif
1431 break;
1433 case Bytecodes::_l2f:
1434 case Bytecodes::_l2d:
1435 __ st_long(Otos_l, __ d_tmp);
1436 __ ldf(FloatRegisterImpl::D, __ d_tmp, Ftos_d);
1438 if (VM_Version::v9_instructions_work()) {
1439 if (bytecode() == Bytecodes::_l2f) {
1440 __ fxtof(FloatRegisterImpl::S, Ftos_d, Ftos_f);
1441 } else {
1442 __ fxtof(FloatRegisterImpl::D, Ftos_d, Ftos_d);
1443 }
1444 } else {
1445 __ call_VM_leaf(
1446 Lscratch,
1447 bytecode() == Bytecodes::_l2f
1448 ? CAST_FROM_FN_PTR(address, SharedRuntime::l2f)
1449 : CAST_FROM_FN_PTR(address, SharedRuntime::l2d)
1450 );
1451 }
1452 break;
1454 case Bytecodes::_f2i: {
1455 Label isNaN;
1456 // result must be 0 if value is NaN; test by comparing value to itself
1457 __ fcmp(FloatRegisterImpl::S, Assembler::fcc0, Ftos_f, Ftos_f);
1458 // According to the v8 manual, you have to have a non-fp instruction
1459 // between fcmp and fb.
1460 if (!VM_Version::v9_instructions_work()) {
1461 __ nop();
1462 }
1463 __ fb(Assembler::f_unordered, true, Assembler::pn, isNaN);
1464 __ delayed()->clr(Otos_i); // NaN
1465 __ ftoi(FloatRegisterImpl::S, Ftos_f, F30);
1466 __ stf(FloatRegisterImpl::S, F30, __ d_tmp);
1467 __ ld(__ d_tmp, Otos_i);
1468 __ bind(isNaN);
1469 }
1470 break;
1472 case Bytecodes::_f2l:
1473 // must uncache tos
1474 __ push_f();
1475 #ifdef _LP64
1476 __ pop_f(F1);
1477 #else
1478 __ pop_i(O0);
1479 #endif
1480 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::f2l));
1481 break;
1483 case Bytecodes::_f2d:
1484 __ ftof( FloatRegisterImpl::S, FloatRegisterImpl::D, Ftos_f, Ftos_f);
1485 break;
1487 case Bytecodes::_d2i:
1488 case Bytecodes::_d2l:
1489 // must uncache tos
1490 __ push_d();
1491 #ifdef _LP64
1492 // LP64 calling conventions pass first double arg in D0
1493 __ pop_d( Ftos_d );
1494 #else
1495 __ pop_i( O0 );
1496 __ pop_i( O1 );
1497 #endif
1498 __ call_VM_leaf(Lscratch,
1499 bytecode() == Bytecodes::_d2i
1500 ? CAST_FROM_FN_PTR(address, SharedRuntime::d2i)
1501 : CAST_FROM_FN_PTR(address, SharedRuntime::d2l));
1502 break;
1504 case Bytecodes::_d2f:
1505 if (VM_Version::v9_instructions_work()) {
1506 __ ftof( FloatRegisterImpl::D, FloatRegisterImpl::S, Ftos_d, Ftos_f);
1507 }
1508 else {
1509 // must uncache tos
1510 __ push_d();
1511 __ pop_i(O0);
1512 __ pop_i(O1);
1513 __ call_VM_leaf(Lscratch, CAST_FROM_FN_PTR(address, SharedRuntime::d2f));
1514 }
1515 break;
1517 default: ShouldNotReachHere();
1518 }
1519 __ bind(done);
1520 }
1523 void TemplateTable::lcmp() {
1524 transition(ltos, itos);
1526 #ifdef _LP64
1527 __ pop_l(O1); // pop off value 1, value 2 is in O0
1528 __ lcmp( O1, Otos_l, Otos_i );
1529 #else
1530 __ pop_l(O2); // cmp O2,3 to O0,1
1531 __ lcmp( O2, O3, Otos_l1, Otos_l2, Otos_i );
1532 #endif
1533 }
1536 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1538 if (is_float) __ pop_f(F2);
1539 else __ pop_d(F2);
1541 assert(Ftos_f == F0 && Ftos_d == F0, "alias checking:");
1543 __ float_cmp( is_float, unordered_result, F2, F0, Otos_i );
1544 }
1546 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1547 // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1548 __ verify_oop(Lmethod);
1549 __ verify_thread();
1551 const Register O2_bumped_count = O2;
1552 __ profile_taken_branch(G3_scratch, O2_bumped_count);
1554 // get (wide) offset to O1_disp
1555 const Register O1_disp = O1;
1556 if (is_wide) __ get_4_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::set_CC);
1557 else __ get_2_byte_integer_at_bcp( 1, G4_scratch, O1_disp, InterpreterMacroAssembler::Signed, InterpreterMacroAssembler::set_CC);
1559 // Handle all the JSR stuff here, then exit.
1560 // It's much shorter and cleaner than intermingling with the
1561 // non-JSR normal-branch stuff occurring below.
1562 if( is_jsr ) {
1563 // compute return address as bci in Otos_i
1564 __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch);
1565 __ sub(Lbcp, G3_scratch, G3_scratch);
1566 __ sub(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()) - (is_wide ? 5 : 3), Otos_i);
1568 // Bump Lbcp to target of JSR
1569 __ add(Lbcp, O1_disp, Lbcp);
1570 // Push returnAddress for "ret" on stack
1571 __ push_ptr(Otos_i);
1572 // And away we go!
1573 __ dispatch_next(vtos);
1574 return;
1575 }
1577 // Normal (non-jsr) branch handling
1579 // Save the current Lbcp
1580 const Register O0_cur_bcp = O0;
1581 __ mov( Lbcp, O0_cur_bcp );
1584 bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1585 if ( increment_invocation_counter_for_backward_branches ) {
1586 Label Lforward;
1587 // check branch direction
1588 __ br( Assembler::positive, false, Assembler::pn, Lforward );
1589 // Bump bytecode pointer by displacement (take the branch)
1590 __ delayed()->add( O1_disp, Lbcp, Lbcp ); // add to bc addr
1592 if (TieredCompilation) {
1593 Label Lno_mdo, Loverflow;
1594 int increment = InvocationCounter::count_increment;
1595 int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1596 if (ProfileInterpreter) {
1597 // If no method data exists, go to profile_continue.
1598 __ ld_ptr(Lmethod, methodOopDesc::method_data_offset(), G4_scratch);
1599 __ br_null(G4_scratch, false, Assembler::pn, Lno_mdo);
1600 __ delayed()->nop();
1602 // Increment backedge counter in the MDO
1603 Address mdo_backedge_counter(G4_scratch, in_bytes(methodDataOopDesc::backedge_counter_offset()) +
1604 in_bytes(InvocationCounter::counter_offset()));
1605 __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, G3_scratch, Lscratch,
1606 Assembler::notZero, &Lforward);
1607 __ ba(false, Loverflow);
1608 __ delayed()->nop();
1609 }
1611 // If there's no MDO, increment counter in methodOop
1612 __ bind(Lno_mdo);
1613 Address backedge_counter(Lmethod, in_bytes(methodOopDesc::backedge_counter_offset()) +
1614 in_bytes(InvocationCounter::counter_offset()));
1615 __ increment_mask_and_jump(backedge_counter, increment, mask, G3_scratch, Lscratch,
1616 Assembler::notZero, &Lforward);
1617 __ bind(Loverflow);
1619 // notify point for loop, pass branch bytecode
1620 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O0_cur_bcp);
1622 // Was an OSR adapter generated?
1623 // O0 = osr nmethod
1624 __ br_null(O0, false, Assembler::pn, Lforward);
1625 __ delayed()->nop();
1627 // Has the nmethod been invalidated already?
1628 __ ld(O0, nmethod::entry_bci_offset(), O2);
1629 __ cmp(O2, InvalidOSREntryBci);
1630 __ br(Assembler::equal, false, Assembler::pn, Lforward);
1631 __ delayed()->nop();
1633 // migrate the interpreter frame off of the stack
1635 __ mov(G2_thread, L7);
1636 // save nmethod
1637 __ mov(O0, L6);
1638 __ set_last_Java_frame(SP, noreg);
1639 __ call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
1640 __ reset_last_Java_frame();
1641 __ mov(L7, G2_thread);
1643 // move OSR nmethod to I1
1644 __ mov(L6, I1);
1646 // OSR buffer to I0
1647 __ mov(O0, I0);
1649 // remove the interpreter frame
1650 __ restore(I5_savedSP, 0, SP);
1652 // Jump to the osr code.
1653 __ ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
1654 __ jmp(O2, G0);
1655 __ delayed()->nop();
1657 } else {
1658 // Update Backedge branch separately from invocations
1659 const Register G4_invoke_ctr = G4;
1660 __ increment_backedge_counter(G4_invoke_ctr, G1_scratch);
1661 if (ProfileInterpreter) {
1662 __ test_invocation_counter_for_mdp(G4_invoke_ctr, Lbcp, G3_scratch, Lforward);
1663 if (UseOnStackReplacement) {
1664 __ test_backedge_count_for_osr(O2_bumped_count, O0_cur_bcp, G3_scratch);
1665 }
1666 } else {
1667 if (UseOnStackReplacement) {
1668 __ test_backedge_count_for_osr(G4_invoke_ctr, O0_cur_bcp, G3_scratch);
1669 }
1670 }
1671 }
1673 __ bind(Lforward);
1674 } else
1675 // Bump bytecode pointer by displacement (take the branch)
1676 __ add( O1_disp, Lbcp, Lbcp );// add to bc addr
1678 // continue with bytecode @ target
1679 // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1680 // %%%%% and changing dispatch_next to dispatch_only
1681 __ dispatch_next(vtos);
1682 }
1685 // Note Condition in argument is TemplateTable::Condition
1686 // arg scope is within class scope
1688 void TemplateTable::if_0cmp(Condition cc) {
1689 // no pointers, integer only!
1690 transition(itos, vtos);
1691 // assume branch is more often taken than not (loops use backward branches)
1692 __ cmp( Otos_i, 0);
1693 __ if_cmp(ccNot(cc), false);
1694 }
1697 void TemplateTable::if_icmp(Condition cc) {
1698 transition(itos, vtos);
1699 __ pop_i(O1);
1700 __ cmp(O1, Otos_i);
1701 __ if_cmp(ccNot(cc), false);
1702 }
1705 void TemplateTable::if_nullcmp(Condition cc) {
1706 transition(atos, vtos);
1707 __ tst(Otos_i);
1708 __ if_cmp(ccNot(cc), true);
1709 }
1712 void TemplateTable::if_acmp(Condition cc) {
1713 transition(atos, vtos);
1714 __ pop_ptr(O1);
1715 __ verify_oop(O1);
1716 __ verify_oop(Otos_i);
1717 __ cmp(O1, Otos_i);
1718 __ if_cmp(ccNot(cc), true);
1719 }
1723 void TemplateTable::ret() {
1724 transition(vtos, vtos);
1725 locals_index(G3_scratch);
1726 __ access_local_returnAddress(G3_scratch, Otos_i);
1727 // Otos_i contains the bci, compute the bcp from that
1729 #ifdef _LP64
1730 #ifdef ASSERT
1731 // jsr result was labeled as an 'itos' not an 'atos' because we cannot GC
1732 // the result. The return address (really a BCI) was stored with an
1733 // 'astore' because JVM specs claim it's a pointer-sized thing. Hence in
1734 // the 64-bit build the 32-bit BCI is actually in the low bits of a 64-bit
1735 // loaded value.
1736 { Label zzz ;
1737 __ set (65536, G3_scratch) ;
1738 __ cmp (Otos_i, G3_scratch) ;
1739 __ bp( Assembler::lessEqualUnsigned, false, Assembler::xcc, Assembler::pn, zzz);
1740 __ delayed()->nop();
1741 __ stop("BCI is in the wrong register half?");
1742 __ bind (zzz) ;
1743 }
1744 #endif
1745 #endif
1747 __ profile_ret(vtos, Otos_i, G4_scratch);
1749 __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch);
1750 __ add(G3_scratch, Otos_i, G3_scratch);
1751 __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
1752 __ dispatch_next(vtos);
1753 }
1756 void TemplateTable::wide_ret() {
1757 transition(vtos, vtos);
1758 locals_index_wide(G3_scratch);
1759 __ access_local_returnAddress(G3_scratch, Otos_i);
1760 // Otos_i contains the bci, compute the bcp from that
1762 __ profile_ret(vtos, Otos_i, G4_scratch);
1764 __ ld_ptr(Lmethod, methodOopDesc::const_offset(), G3_scratch);
1765 __ add(G3_scratch, Otos_i, G3_scratch);
1766 __ add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), Lbcp);
1767 __ dispatch_next(vtos);
1768 }
1771 void TemplateTable::tableswitch() {
1772 transition(itos, vtos);
1773 Label default_case, continue_execution;
1775 // align bcp
1776 __ add(Lbcp, BytesPerInt, O1);
1777 __ and3(O1, -BytesPerInt, O1);
1778 // load lo, hi
1779 __ ld(O1, 1 * BytesPerInt, O2); // Low Byte
1780 __ ld(O1, 2 * BytesPerInt, O3); // High Byte
1781 #ifdef _LP64
1782 // Sign extend the 32 bits
1783 __ sra ( Otos_i, 0, Otos_i );
1784 #endif /* _LP64 */
1786 // check against lo & hi
1787 __ cmp( Otos_i, O2);
1788 __ br( Assembler::less, false, Assembler::pn, default_case);
1789 __ delayed()->cmp( Otos_i, O3 );
1790 __ br( Assembler::greater, false, Assembler::pn, default_case);
1791 // lookup dispatch offset
1792 __ delayed()->sub(Otos_i, O2, O2);
1793 __ profile_switch_case(O2, O3, G3_scratch, G4_scratch);
1794 __ sll(O2, LogBytesPerInt, O2);
1795 __ add(O2, 3 * BytesPerInt, O2);
1796 __ ba(false, continue_execution);
1797 __ delayed()->ld(O1, O2, O2);
1798 // handle default
1799 __ bind(default_case);
1800 __ profile_switch_default(O3);
1801 __ ld(O1, 0, O2); // get default offset
1802 // continue execution
1803 __ bind(continue_execution);
1804 __ add(Lbcp, O2, Lbcp);
1805 __ dispatch_next(vtos);
1806 }
1809 void TemplateTable::lookupswitch() {
1810 transition(itos, itos);
1811 __ stop("lookupswitch bytecode should have been rewritten");
1812 }
1814 void TemplateTable::fast_linearswitch() {
1815 transition(itos, vtos);
1816 Label loop_entry, loop, found, continue_execution;
1817 // align bcp
1818 __ add(Lbcp, BytesPerInt, O1);
1819 __ and3(O1, -BytesPerInt, O1);
1820 // set counter
1821 __ ld(O1, BytesPerInt, O2);
1822 __ sll(O2, LogBytesPerInt + 1, O2); // in word-pairs
1823 __ add(O1, 2 * BytesPerInt, O3); // set first pair addr
1824 __ ba(false, loop_entry);
1825 __ delayed()->add(O3, O2, O2); // counter now points past last pair
1827 // table search
1828 __ bind(loop);
1829 __ cmp(O4, Otos_i);
1830 __ br(Assembler::equal, true, Assembler::pn, found);
1831 __ delayed()->ld(O3, BytesPerInt, O4); // offset -> O4
1832 __ inc(O3, 2 * BytesPerInt);
1834 __ bind(loop_entry);
1835 __ cmp(O2, O3);
1836 __ brx(Assembler::greaterUnsigned, true, Assembler::pt, loop);
1837 __ delayed()->ld(O3, 0, O4);
1839 // default case
1840 __ ld(O1, 0, O4); // get default offset
1841 if (ProfileInterpreter) {
1842 __ profile_switch_default(O3);
1843 __ ba(false, continue_execution);
1844 __ delayed()->nop();
1845 }
1847 // entry found -> get offset
1848 __ bind(found);
1849 if (ProfileInterpreter) {
1850 __ sub(O3, O1, O3);
1851 __ sub(O3, 2*BytesPerInt, O3);
1852 __ srl(O3, LogBytesPerInt + 1, O3); // in word-pairs
1853 __ profile_switch_case(O3, O1, O2, G3_scratch);
1855 __ bind(continue_execution);
1856 }
1857 __ add(Lbcp, O4, Lbcp);
1858 __ dispatch_next(vtos);
1859 }
1862 void TemplateTable::fast_binaryswitch() {
1863 transition(itos, vtos);
1864 // Implementation using the following core algorithm: (copied from Intel)
1865 //
1866 // int binary_search(int key, LookupswitchPair* array, int n) {
1867 // // Binary search according to "Methodik des Programmierens" by
1868 // // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1869 // int i = 0;
1870 // int j = n;
1871 // while (i+1 < j) {
1872 // // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1873 // // with Q: for all i: 0 <= i < n: key < a[i]
1874 // // where a stands for the array and assuming that the (inexisting)
1875 // // element a[n] is infinitely big.
1876 // int h = (i + j) >> 1;
1877 // // i < h < j
1878 // if (key < array[h].fast_match()) {
1879 // j = h;
1880 // } else {
1881 // i = h;
1882 // }
1883 // }
1884 // // R: a[i] <= key < a[i+1] or Q
1885 // // (i.e., if key is within array, i is the correct index)
1886 // return i;
1887 // }
1889 // register allocation
1890 assert(Otos_i == O0, "alias checking");
1891 const Register Rkey = Otos_i; // already set (tosca)
1892 const Register Rarray = O1;
1893 const Register Ri = O2;
1894 const Register Rj = O3;
1895 const Register Rh = O4;
1896 const Register Rscratch = O5;
1898 const int log_entry_size = 3;
1899 const int entry_size = 1 << log_entry_size;
1901 Label found;
1902 // Find Array start
1903 __ add(Lbcp, 3 * BytesPerInt, Rarray);
1904 __ and3(Rarray, -BytesPerInt, Rarray);
1905 // initialize i & j (in delay slot)
1906 __ clr( Ri );
1908 // and start
1909 Label entry;
1910 __ ba(false, entry);
1911 __ delayed()->ld( Rarray, -BytesPerInt, Rj);
1912 // (Rj is already in the native byte-ordering.)
1914 // binary search loop
1915 { Label loop;
1916 __ bind( loop );
1917 // int h = (i + j) >> 1;
1918 __ sra( Rh, 1, Rh );
1919 // if (key < array[h].fast_match()) {
1920 // j = h;
1921 // } else {
1922 // i = h;
1923 // }
1924 __ sll( Rh, log_entry_size, Rscratch );
1925 __ ld( Rarray, Rscratch, Rscratch );
1926 // (Rscratch is already in the native byte-ordering.)
1927 __ cmp( Rkey, Rscratch );
1928 if ( VM_Version::v9_instructions_work() ) {
1929 __ movcc( Assembler::less, false, Assembler::icc, Rh, Rj ); // j = h if (key < array[h].fast_match())
1930 __ movcc( Assembler::greaterEqual, false, Assembler::icc, Rh, Ri ); // i = h if (key >= array[h].fast_match())
1931 }
1932 else {
1933 Label end_of_if;
1934 __ br( Assembler::less, true, Assembler::pt, end_of_if );
1935 __ delayed()->mov( Rh, Rj ); // if (<) Rj = Rh
1936 __ mov( Rh, Ri ); // else i = h
1937 __ bind(end_of_if); // }
1938 }
1940 // while (i+1 < j)
1941 __ bind( entry );
1942 __ add( Ri, 1, Rscratch );
1943 __ cmp(Rscratch, Rj);
1944 __ br( Assembler::less, true, Assembler::pt, loop );
1945 __ delayed()->add( Ri, Rj, Rh ); // start h = i + j >> 1;
1946 }
1948 // end of binary search, result index is i (must check again!)
1949 Label default_case;
1950 Label continue_execution;
1951 if (ProfileInterpreter) {
1952 __ mov( Ri, Rh ); // Save index in i for profiling
1953 }
1954 __ sll( Ri, log_entry_size, Ri );
1955 __ ld( Rarray, Ri, Rscratch );
1956 // (Rscratch is already in the native byte-ordering.)
1957 __ cmp( Rkey, Rscratch );
1958 __ br( Assembler::notEqual, true, Assembler::pn, default_case );
1959 __ delayed()->ld( Rarray, -2 * BytesPerInt, Rj ); // load default offset -> j
1961 // entry found -> j = offset
1962 __ inc( Ri, BytesPerInt );
1963 __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
1964 __ ld( Rarray, Ri, Rj );
1965 // (Rj is already in the native byte-ordering.)
1967 if (ProfileInterpreter) {
1968 __ ba(false, continue_execution);
1969 __ delayed()->nop();
1970 }
1972 __ bind(default_case); // fall through (if not profiling)
1973 __ profile_switch_default(Ri);
1975 __ bind(continue_execution);
1976 __ add( Lbcp, Rj, Lbcp );
1977 __ dispatch_next( vtos );
1978 }
1981 void TemplateTable::_return(TosState state) {
1982 transition(state, state);
1983 assert(_desc->calls_vm(), "inconsistent calls_vm information");
1985 if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
1986 assert(state == vtos, "only valid state");
1987 __ mov(G0, G3_scratch);
1988 __ access_local_ptr(G3_scratch, Otos_i);
1989 __ load_klass(Otos_i, O2);
1990 __ set(JVM_ACC_HAS_FINALIZER, G3);
1991 __ ld(O2, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), O2);
1992 __ andcc(G3, O2, G0);
1993 Label skip_register_finalizer;
1994 __ br(Assembler::zero, false, Assembler::pn, skip_register_finalizer);
1995 __ delayed()->nop();
1997 // Call out to do finalizer registration
1998 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), Otos_i);
2000 __ bind(skip_register_finalizer);
2001 }
2003 __ remove_activation(state, /* throw_monitor_exception */ true);
2005 // The caller's SP was adjusted upon method entry to accomodate
2006 // the callee's non-argument locals. Undo that adjustment.
2007 __ ret(); // return to caller
2008 __ delayed()->restore(I5_savedSP, G0, SP);
2009 }
2012 // ----------------------------------------------------------------------------
2013 // Volatile variables demand their effects be made known to all CPU's in
2014 // order. Store buffers on most chips allow reads & writes to reorder; the
2015 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2016 // memory barrier (i.e., it's not sufficient that the interpreter does not
2017 // reorder volatile references, the hardware also must not reorder them).
2018 //
2019 // According to the new Java Memory Model (JMM):
2020 // (1) All volatiles are serialized wrt to each other.
2021 // ALSO reads & writes act as aquire & release, so:
2022 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2023 // the read float up to before the read. It's OK for non-volatile memory refs
2024 // that happen before the volatile read to float down below it.
2025 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2026 // that happen BEFORE the write float down to after the write. It's OK for
2027 // non-volatile memory refs that happen after the volatile write to float up
2028 // before it.
2029 //
2030 // We only put in barriers around volatile refs (they are expensive), not
2031 // _between_ memory refs (that would require us to track the flavor of the
2032 // previous memory refs). Requirements (2) and (3) require some barriers
2033 // before volatile stores and after volatile loads. These nearly cover
2034 // requirement (1) but miss the volatile-store-volatile-load case. This final
2035 // case is placed after volatile-stores although it could just as well go
2036 // before volatile-loads.
2037 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint) {
2038 // Helper function to insert a is-volatile test and memory barrier
2039 // All current sparc implementations run in TSO, needing only StoreLoad
2040 if ((order_constraint & Assembler::StoreLoad) == 0) return;
2041 __ membar( order_constraint );
2042 }
2044 // ----------------------------------------------------------------------------
2045 void TemplateTable::resolve_cache_and_index(int byte_no,
2046 Register result,
2047 Register Rcache,
2048 Register index,
2049 size_t index_size) {
2050 // Depends on cpCacheOop layout!
2051 Label resolved;
2053 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2054 if (byte_no == f1_oop) {
2055 // We are resolved if the f1 field contains a non-null object (CallSite, etc.)
2056 // This kind of CP cache entry does not need to match the flags byte, because
2057 // there is a 1-1 relation between bytecode type and CP entry type.
2058 assert_different_registers(result, Rcache);
2059 __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
2060 ConstantPoolCacheEntry::f1_offset(), result);
2061 __ tst(result);
2062 __ br(Assembler::notEqual, false, Assembler::pt, resolved);
2063 __ delayed()->set((int)bytecode(), O1);
2064 } else {
2065 assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2066 assert(result == noreg, ""); //else change code for setting result
2067 const int shift_count = (1 + byte_no)*BitsPerByte;
2069 __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
2070 ConstantPoolCacheEntry::indices_offset(), Lbyte_code);
2072 __ srl( Lbyte_code, shift_count, Lbyte_code );
2073 __ and3( Lbyte_code, 0xFF, Lbyte_code );
2074 __ cmp( Lbyte_code, (int)bytecode());
2075 __ br( Assembler::equal, false, Assembler::pt, resolved);
2076 __ delayed()->set((int)bytecode(), O1);
2077 }
2079 address entry;
2080 switch (bytecode()) {
2081 case Bytecodes::_getstatic : // fall through
2082 case Bytecodes::_putstatic : // fall through
2083 case Bytecodes::_getfield : // fall through
2084 case Bytecodes::_putfield : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2085 case Bytecodes::_invokevirtual : // fall through
2086 case Bytecodes::_invokespecial : // fall through
2087 case Bytecodes::_invokestatic : // fall through
2088 case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2089 case Bytecodes::_invokedynamic : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2090 case Bytecodes::_fast_aldc : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2091 case Bytecodes::_fast_aldc_w : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc); break;
2092 default : ShouldNotReachHere(); break;
2093 }
2094 // first time invocation - must resolve first
2095 __ call_VM(noreg, entry, O1);
2096 // Update registers with resolved info
2097 __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2098 if (result != noreg)
2099 __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() +
2100 ConstantPoolCacheEntry::f1_offset(), result);
2101 __ bind(resolved);
2102 }
2104 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2105 Register Rmethod,
2106 Register Ritable_index,
2107 Register Rflags,
2108 bool is_invokevirtual,
2109 bool is_invokevfinal,
2110 bool is_invokedynamic) {
2111 // Uses both G3_scratch and G4_scratch
2112 Register Rcache = G3_scratch;
2113 Register Rscratch = G4_scratch;
2114 assert_different_registers(Rcache, Rmethod, Ritable_index);
2116 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2118 // determine constant pool cache field offsets
2119 const int method_offset = in_bytes(
2120 cp_base_offset +
2121 (is_invokevirtual
2122 ? ConstantPoolCacheEntry::f2_offset()
2123 : ConstantPoolCacheEntry::f1_offset()
2124 )
2125 );
2126 const int flags_offset = in_bytes(cp_base_offset +
2127 ConstantPoolCacheEntry::flags_offset());
2128 // access constant pool cache fields
2129 const int index_offset = in_bytes(cp_base_offset +
2130 ConstantPoolCacheEntry::f2_offset());
2132 if (is_invokevfinal) {
2133 __ get_cache_and_index_at_bcp(Rcache, Rscratch, 1);
2134 __ ld_ptr(Rcache, method_offset, Rmethod);
2135 } else if (byte_no == f1_oop) {
2136 // Resolved f1_oop goes directly into 'method' register.
2137 resolve_cache_and_index(byte_no, Rmethod, Rcache, Rscratch, sizeof(u4));
2138 } else {
2139 resolve_cache_and_index(byte_no, noreg, Rcache, Rscratch, sizeof(u2));
2140 __ ld_ptr(Rcache, method_offset, Rmethod);
2141 }
2143 if (Ritable_index != noreg) {
2144 __ ld_ptr(Rcache, index_offset, Ritable_index);
2145 }
2146 __ ld_ptr(Rcache, flags_offset, Rflags);
2147 }
2149 // The Rcache register must be set before call
2150 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2151 Register Rcache,
2152 Register index,
2153 Register Roffset,
2154 Register Rflags,
2155 bool is_static) {
2156 assert_different_registers(Rcache, Rflags, Roffset);
2158 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2160 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2161 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2162 if (is_static) {
2163 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj);
2164 }
2165 }
2167 // The registers Rcache and index expected to be set before call.
2168 // Correct values of the Rcache and index registers are preserved.
2169 void TemplateTable::jvmti_post_field_access(Register Rcache,
2170 Register index,
2171 bool is_static,
2172 bool has_tos) {
2173 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2175 if (JvmtiExport::can_post_field_access()) {
2176 // Check to see if a field access watch has been set before we take
2177 // the time to call into the VM.
2178 Label Label1;
2179 assert_different_registers(Rcache, index, G1_scratch);
2180 AddressLiteral get_field_access_count_addr(JvmtiExport::get_field_access_count_addr());
2181 __ load_contents(get_field_access_count_addr, G1_scratch);
2182 __ tst(G1_scratch);
2183 __ br(Assembler::zero, false, Assembler::pt, Label1);
2184 __ delayed()->nop();
2186 __ add(Rcache, in_bytes(cp_base_offset), Rcache);
2188 if (is_static) {
2189 __ clr(Otos_i);
2190 } else {
2191 if (has_tos) {
2192 // save object pointer before call_VM() clobbers it
2193 __ push_ptr(Otos_i); // put object on tos where GC wants it.
2194 } else {
2195 // Load top of stack (do not pop the value off the stack);
2196 __ ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), Otos_i);
2197 }
2198 __ verify_oop(Otos_i);
2199 }
2200 // Otos_i: object pointer or NULL if static
2201 // Rcache: cache entry pointer
2202 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2203 Otos_i, Rcache);
2204 if (!is_static && has_tos) {
2205 __ pop_ptr(Otos_i); // restore object pointer
2206 __ verify_oop(Otos_i);
2207 }
2208 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2209 __ bind(Label1);
2210 }
2211 }
2213 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2214 transition(vtos, vtos);
2216 Register Rcache = G3_scratch;
2217 Register index = G4_scratch;
2218 Register Rclass = Rcache;
2219 Register Roffset= G4_scratch;
2220 Register Rflags = G1_scratch;
2221 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2223 resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
2224 jvmti_post_field_access(Rcache, index, is_static, false);
2225 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2227 if (!is_static) {
2228 pop_and_check_object(Rclass);
2229 } else {
2230 __ verify_oop(Rclass);
2231 }
2233 Label exit;
2235 Assembler::Membar_mask_bits membar_bits =
2236 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2238 if (__ membar_has_effect(membar_bits)) {
2239 // Get volatile flag
2240 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2241 __ and3(Rflags, Lscratch, Lscratch);
2242 }
2244 Label checkVolatile;
2246 // compute field type
2247 Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj;
2248 __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
2249 // Make sure we don't need to mask Rflags for tosBits after the above shift
2250 ConstantPoolCacheEntry::verify_tosBits();
2252 // Check atos before itos for getstatic, more likely (in Queens at least)
2253 __ cmp(Rflags, atos);
2254 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2255 __ delayed() ->cmp(Rflags, itos);
2257 // atos
2258 __ load_heap_oop(Rclass, Roffset, Otos_i);
2259 __ verify_oop(Otos_i);
2260 __ push(atos);
2261 if (!is_static) {
2262 patch_bytecode(Bytecodes::_fast_agetfield, G3_scratch, G4_scratch);
2263 }
2264 __ ba(false, checkVolatile);
2265 __ delayed()->tst(Lscratch);
2267 __ bind(notObj);
2269 // cmp(Rflags, itos);
2270 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2271 __ delayed() ->cmp(Rflags, ltos);
2273 // itos
2274 __ ld(Rclass, Roffset, Otos_i);
2275 __ push(itos);
2276 if (!is_static) {
2277 patch_bytecode(Bytecodes::_fast_igetfield, G3_scratch, G4_scratch);
2278 }
2279 __ ba(false, checkVolatile);
2280 __ delayed()->tst(Lscratch);
2282 __ bind(notInt);
2284 // cmp(Rflags, ltos);
2285 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2286 __ delayed() ->cmp(Rflags, btos);
2288 // ltos
2289 // load must be atomic
2290 __ ld_long(Rclass, Roffset, Otos_l);
2291 __ push(ltos);
2292 if (!is_static) {
2293 patch_bytecode(Bytecodes::_fast_lgetfield, G3_scratch, G4_scratch);
2294 }
2295 __ ba(false, checkVolatile);
2296 __ delayed()->tst(Lscratch);
2298 __ bind(notLong);
2300 // cmp(Rflags, btos);
2301 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2302 __ delayed() ->cmp(Rflags, ctos);
2304 // btos
2305 __ ldsb(Rclass, Roffset, Otos_i);
2306 __ push(itos);
2307 if (!is_static) {
2308 patch_bytecode(Bytecodes::_fast_bgetfield, G3_scratch, G4_scratch);
2309 }
2310 __ ba(false, checkVolatile);
2311 __ delayed()->tst(Lscratch);
2313 __ bind(notByte);
2315 // cmp(Rflags, ctos);
2316 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2317 __ delayed() ->cmp(Rflags, stos);
2319 // ctos
2320 __ lduh(Rclass, Roffset, Otos_i);
2321 __ push(itos);
2322 if (!is_static) {
2323 patch_bytecode(Bytecodes::_fast_cgetfield, G3_scratch, G4_scratch);
2324 }
2325 __ ba(false, checkVolatile);
2326 __ delayed()->tst(Lscratch);
2328 __ bind(notChar);
2330 // cmp(Rflags, stos);
2331 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2332 __ delayed() ->cmp(Rflags, ftos);
2334 // stos
2335 __ ldsh(Rclass, Roffset, Otos_i);
2336 __ push(itos);
2337 if (!is_static) {
2338 patch_bytecode(Bytecodes::_fast_sgetfield, G3_scratch, G4_scratch);
2339 }
2340 __ ba(false, checkVolatile);
2341 __ delayed()->tst(Lscratch);
2343 __ bind(notShort);
2346 // cmp(Rflags, ftos);
2347 __ br(Assembler::notEqual, false, Assembler::pt, notFloat);
2348 __ delayed() ->tst(Lscratch);
2350 // ftos
2351 __ ldf(FloatRegisterImpl::S, Rclass, Roffset, Ftos_f);
2352 __ push(ftos);
2353 if (!is_static) {
2354 patch_bytecode(Bytecodes::_fast_fgetfield, G3_scratch, G4_scratch);
2355 }
2356 __ ba(false, checkVolatile);
2357 __ delayed()->tst(Lscratch);
2359 __ bind(notFloat);
2362 // dtos
2363 __ ldf(FloatRegisterImpl::D, Rclass, Roffset, Ftos_d);
2364 __ push(dtos);
2365 if (!is_static) {
2366 patch_bytecode(Bytecodes::_fast_dgetfield, G3_scratch, G4_scratch);
2367 }
2369 __ bind(checkVolatile);
2370 if (__ membar_has_effect(membar_bits)) {
2371 // __ tst(Lscratch); executed in delay slot
2372 __ br(Assembler::zero, false, Assembler::pt, exit);
2373 __ delayed()->nop();
2374 volatile_barrier(membar_bits);
2375 }
2377 __ bind(exit);
2378 }
2381 void TemplateTable::getfield(int byte_no) {
2382 getfield_or_static(byte_no, false);
2383 }
2385 void TemplateTable::getstatic(int byte_no) {
2386 getfield_or_static(byte_no, true);
2387 }
2390 void TemplateTable::fast_accessfield(TosState state) {
2391 transition(atos, state);
2392 Register Rcache = G3_scratch;
2393 Register index = G4_scratch;
2394 Register Roffset = G4_scratch;
2395 Register Rflags = Rcache;
2396 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2398 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2399 jvmti_post_field_access(Rcache, index, /*is_static*/false, /*has_tos*/true);
2401 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2403 __ null_check(Otos_i);
2404 __ verify_oop(Otos_i);
2406 Label exit;
2408 Assembler::Membar_mask_bits membar_bits =
2409 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2410 if (__ membar_has_effect(membar_bits)) {
2411 // Get volatile flag
2412 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Rflags);
2413 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2414 }
2416 switch (bytecode()) {
2417 case Bytecodes::_fast_bgetfield:
2418 __ ldsb(Otos_i, Roffset, Otos_i);
2419 break;
2420 case Bytecodes::_fast_cgetfield:
2421 __ lduh(Otos_i, Roffset, Otos_i);
2422 break;
2423 case Bytecodes::_fast_sgetfield:
2424 __ ldsh(Otos_i, Roffset, Otos_i);
2425 break;
2426 case Bytecodes::_fast_igetfield:
2427 __ ld(Otos_i, Roffset, Otos_i);
2428 break;
2429 case Bytecodes::_fast_lgetfield:
2430 __ ld_long(Otos_i, Roffset, Otos_l);
2431 break;
2432 case Bytecodes::_fast_fgetfield:
2433 __ ldf(FloatRegisterImpl::S, Otos_i, Roffset, Ftos_f);
2434 break;
2435 case Bytecodes::_fast_dgetfield:
2436 __ ldf(FloatRegisterImpl::D, Otos_i, Roffset, Ftos_d);
2437 break;
2438 case Bytecodes::_fast_agetfield:
2439 __ load_heap_oop(Otos_i, Roffset, Otos_i);
2440 break;
2441 default:
2442 ShouldNotReachHere();
2443 }
2445 if (__ membar_has_effect(membar_bits)) {
2446 __ btst(Lscratch, Rflags);
2447 __ br(Assembler::zero, false, Assembler::pt, exit);
2448 __ delayed()->nop();
2449 volatile_barrier(membar_bits);
2450 __ bind(exit);
2451 }
2453 if (state == atos) {
2454 __ verify_oop(Otos_i); // does not blow flags!
2455 }
2456 }
2458 void TemplateTable::jvmti_post_fast_field_mod() {
2459 if (JvmtiExport::can_post_field_modification()) {
2460 // Check to see if a field modification watch has been set before we take
2461 // the time to call into the VM.
2462 Label done;
2463 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2464 __ load_contents(get_field_modification_count_addr, G4_scratch);
2465 __ tst(G4_scratch);
2466 __ br(Assembler::zero, false, Assembler::pt, done);
2467 __ delayed()->nop();
2468 __ pop_ptr(G4_scratch); // copy the object pointer from tos
2469 __ verify_oop(G4_scratch);
2470 __ push_ptr(G4_scratch); // put the object pointer back on tos
2471 __ get_cache_entry_pointer_at_bcp(G1_scratch, G3_scratch, 1);
2472 // Save tos values before call_VM() clobbers them. Since we have
2473 // to do it for every data type, we use the saved values as the
2474 // jvalue object.
2475 switch (bytecode()) { // save tos values before call_VM() clobbers them
2476 case Bytecodes::_fast_aputfield: __ push_ptr(Otos_i); break;
2477 case Bytecodes::_fast_bputfield: // fall through
2478 case Bytecodes::_fast_sputfield: // fall through
2479 case Bytecodes::_fast_cputfield: // fall through
2480 case Bytecodes::_fast_iputfield: __ push_i(Otos_i); break;
2481 case Bytecodes::_fast_dputfield: __ push_d(Ftos_d); break;
2482 case Bytecodes::_fast_fputfield: __ push_f(Ftos_f); break;
2483 // get words in right order for use as jvalue object
2484 case Bytecodes::_fast_lputfield: __ push_l(Otos_l); break;
2485 }
2486 // setup pointer to jvalue object
2487 __ mov(Lesp, G3_scratch); __ inc(G3_scratch, wordSize);
2488 // G4_scratch: object pointer
2489 // G1_scratch: cache entry pointer
2490 // G3_scratch: jvalue object on the stack
2491 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), G4_scratch, G1_scratch, G3_scratch);
2492 switch (bytecode()) { // restore tos values
2493 case Bytecodes::_fast_aputfield: __ pop_ptr(Otos_i); break;
2494 case Bytecodes::_fast_bputfield: // fall through
2495 case Bytecodes::_fast_sputfield: // fall through
2496 case Bytecodes::_fast_cputfield: // fall through
2497 case Bytecodes::_fast_iputfield: __ pop_i(Otos_i); break;
2498 case Bytecodes::_fast_dputfield: __ pop_d(Ftos_d); break;
2499 case Bytecodes::_fast_fputfield: __ pop_f(Ftos_f); break;
2500 case Bytecodes::_fast_lputfield: __ pop_l(Otos_l); break;
2501 }
2502 __ bind(done);
2503 }
2504 }
2506 // The registers Rcache and index expected to be set before call.
2507 // The function may destroy various registers, just not the Rcache and index registers.
2508 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register index, bool is_static) {
2509 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2511 if (JvmtiExport::can_post_field_modification()) {
2512 // Check to see if a field modification watch has been set before we take
2513 // the time to call into the VM.
2514 Label Label1;
2515 assert_different_registers(Rcache, index, G1_scratch);
2516 AddressLiteral get_field_modification_count_addr(JvmtiExport::get_field_modification_count_addr());
2517 __ load_contents(get_field_modification_count_addr, G1_scratch);
2518 __ tst(G1_scratch);
2519 __ br(Assembler::zero, false, Assembler::pt, Label1);
2520 __ delayed()->nop();
2522 // The Rcache and index registers have been already set.
2523 // This allows to eliminate this call but the Rcache and index
2524 // registers must be correspondingly used after this line.
2525 __ get_cache_and_index_at_bcp(G1_scratch, G4_scratch, 1);
2527 __ add(G1_scratch, in_bytes(cp_base_offset), G3_scratch);
2528 if (is_static) {
2529 // Life is simple. Null out the object pointer.
2530 __ clr(G4_scratch);
2531 } else {
2532 Register Rflags = G1_scratch;
2533 // Life is harder. The stack holds the value on top, followed by the
2534 // object. We don't know the size of the value, though; it could be
2535 // one or two words depending on its type. As a result, we must find
2536 // the type to determine where the object is.
2538 Label two_word, valsizeknown;
2539 __ ld_ptr(G1_scratch, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2540 __ mov(Lesp, G4_scratch);
2541 __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
2542 // Make sure we don't need to mask Rflags for tosBits after the above shift
2543 ConstantPoolCacheEntry::verify_tosBits();
2544 __ cmp(Rflags, ltos);
2545 __ br(Assembler::equal, false, Assembler::pt, two_word);
2546 __ delayed()->cmp(Rflags, dtos);
2547 __ br(Assembler::equal, false, Assembler::pt, two_word);
2548 __ delayed()->nop();
2549 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(1));
2550 __ br(Assembler::always, false, Assembler::pt, valsizeknown);
2551 __ delayed()->nop();
2552 __ bind(two_word);
2554 __ inc(G4_scratch, Interpreter::expr_offset_in_bytes(2));
2556 __ bind(valsizeknown);
2557 // setup object pointer
2558 __ ld_ptr(G4_scratch, 0, G4_scratch);
2559 __ verify_oop(G4_scratch);
2560 }
2561 // setup pointer to jvalue object
2562 __ mov(Lesp, G1_scratch); __ inc(G1_scratch, wordSize);
2563 // G4_scratch: object pointer or NULL if static
2564 // G3_scratch: cache entry pointer
2565 // G1_scratch: jvalue object on the stack
2566 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2567 G4_scratch, G3_scratch, G1_scratch);
2568 __ get_cache_and_index_at_bcp(Rcache, index, 1);
2569 __ bind(Label1);
2570 }
2571 }
2573 void TemplateTable::pop_and_check_object(Register r) {
2574 __ pop_ptr(r);
2575 __ null_check(r); // for field access must check obj.
2576 __ verify_oop(r);
2577 }
2579 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2580 transition(vtos, vtos);
2581 Register Rcache = G3_scratch;
2582 Register index = G4_scratch;
2583 Register Rclass = Rcache;
2584 Register Roffset= G4_scratch;
2585 Register Rflags = G1_scratch;
2586 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2588 resolve_cache_and_index(byte_no, noreg, Rcache, index, sizeof(u2));
2589 jvmti_post_field_mod(Rcache, index, is_static);
2590 load_field_cp_cache_entry(Rclass, Rcache, index, Roffset, Rflags, is_static);
2592 Assembler::Membar_mask_bits read_bits =
2593 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2594 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2596 Label notVolatile, checkVolatile, exit;
2597 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2598 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2599 __ and3(Rflags, Lscratch, Lscratch);
2601 if (__ membar_has_effect(read_bits)) {
2602 __ tst(Lscratch);
2603 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2604 __ delayed()->nop();
2605 volatile_barrier(read_bits);
2606 __ bind(notVolatile);
2607 }
2608 }
2610 __ srl(Rflags, ConstantPoolCacheEntry::tosBits, Rflags);
2611 // Make sure we don't need to mask Rflags for tosBits after the above shift
2612 ConstantPoolCacheEntry::verify_tosBits();
2614 // compute field type
2615 Label notInt, notShort, notChar, notObj, notByte, notLong, notFloat;
2617 if (is_static) {
2618 // putstatic with object type most likely, check that first
2619 __ cmp(Rflags, atos );
2620 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2621 __ delayed() ->cmp(Rflags, itos );
2623 // atos
2624 __ pop_ptr();
2625 __ verify_oop(Otos_i);
2627 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2629 __ ba(false, checkVolatile);
2630 __ delayed()->tst(Lscratch);
2632 __ bind(notObj);
2634 // cmp(Rflags, itos );
2635 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2636 __ delayed() ->cmp(Rflags, btos );
2638 // itos
2639 __ pop_i();
2640 __ st(Otos_i, Rclass, Roffset);
2641 __ ba(false, checkVolatile);
2642 __ delayed()->tst(Lscratch);
2644 __ bind(notInt);
2646 } else {
2647 // putfield with int type most likely, check that first
2648 __ cmp(Rflags, itos );
2649 __ br(Assembler::notEqual, false, Assembler::pt, notInt);
2650 __ delayed() ->cmp(Rflags, atos );
2652 // itos
2653 __ pop_i();
2654 pop_and_check_object(Rclass);
2655 __ st(Otos_i, Rclass, Roffset);
2656 patch_bytecode(Bytecodes::_fast_iputfield, G3_scratch, G4_scratch);
2657 __ ba(false, checkVolatile);
2658 __ delayed()->tst(Lscratch);
2660 __ bind(notInt);
2661 // cmp(Rflags, atos );
2662 __ br(Assembler::notEqual, false, Assembler::pt, notObj);
2663 __ delayed() ->cmp(Rflags, btos );
2665 // atos
2666 __ pop_ptr();
2667 pop_and_check_object(Rclass);
2668 __ verify_oop(Otos_i);
2670 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2672 patch_bytecode(Bytecodes::_fast_aputfield, G3_scratch, G4_scratch);
2673 __ ba(false, checkVolatile);
2674 __ delayed()->tst(Lscratch);
2676 __ bind(notObj);
2677 }
2679 // cmp(Rflags, btos );
2680 __ br(Assembler::notEqual, false, Assembler::pt, notByte);
2681 __ delayed() ->cmp(Rflags, ltos );
2683 // btos
2684 __ pop_i();
2685 if (!is_static) pop_and_check_object(Rclass);
2686 __ stb(Otos_i, Rclass, Roffset);
2687 if (!is_static) {
2688 patch_bytecode(Bytecodes::_fast_bputfield, G3_scratch, G4_scratch);
2689 }
2690 __ ba(false, checkVolatile);
2691 __ delayed()->tst(Lscratch);
2693 __ bind(notByte);
2695 // cmp(Rflags, ltos );
2696 __ br(Assembler::notEqual, false, Assembler::pt, notLong);
2697 __ delayed() ->cmp(Rflags, ctos );
2699 // ltos
2700 __ pop_l();
2701 if (!is_static) pop_and_check_object(Rclass);
2702 __ st_long(Otos_l, Rclass, Roffset);
2703 if (!is_static) {
2704 patch_bytecode(Bytecodes::_fast_lputfield, G3_scratch, G4_scratch);
2705 }
2706 __ ba(false, checkVolatile);
2707 __ delayed()->tst(Lscratch);
2709 __ bind(notLong);
2711 // cmp(Rflags, ctos );
2712 __ br(Assembler::notEqual, false, Assembler::pt, notChar);
2713 __ delayed() ->cmp(Rflags, stos );
2715 // ctos (char)
2716 __ pop_i();
2717 if (!is_static) pop_and_check_object(Rclass);
2718 __ sth(Otos_i, Rclass, Roffset);
2719 if (!is_static) {
2720 patch_bytecode(Bytecodes::_fast_cputfield, G3_scratch, G4_scratch);
2721 }
2722 __ ba(false, checkVolatile);
2723 __ delayed()->tst(Lscratch);
2725 __ bind(notChar);
2726 // cmp(Rflags, stos );
2727 __ br(Assembler::notEqual, false, Assembler::pt, notShort);
2728 __ delayed() ->cmp(Rflags, ftos );
2730 // stos (char)
2731 __ pop_i();
2732 if (!is_static) pop_and_check_object(Rclass);
2733 __ sth(Otos_i, Rclass, Roffset);
2734 if (!is_static) {
2735 patch_bytecode(Bytecodes::_fast_sputfield, G3_scratch, G4_scratch);
2736 }
2737 __ ba(false, checkVolatile);
2738 __ delayed()->tst(Lscratch);
2740 __ bind(notShort);
2741 // cmp(Rflags, ftos );
2742 __ br(Assembler::notZero, false, Assembler::pt, notFloat);
2743 __ delayed()->nop();
2745 // ftos
2746 __ pop_f();
2747 if (!is_static) pop_and_check_object(Rclass);
2748 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2749 if (!is_static) {
2750 patch_bytecode(Bytecodes::_fast_fputfield, G3_scratch, G4_scratch);
2751 }
2752 __ ba(false, checkVolatile);
2753 __ delayed()->tst(Lscratch);
2755 __ bind(notFloat);
2757 // dtos
2758 __ pop_d();
2759 if (!is_static) pop_and_check_object(Rclass);
2760 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2761 if (!is_static) {
2762 patch_bytecode(Bytecodes::_fast_dputfield, G3_scratch, G4_scratch);
2763 }
2765 __ bind(checkVolatile);
2766 __ tst(Lscratch);
2768 if (__ membar_has_effect(write_bits)) {
2769 // __ tst(Lscratch); in delay slot
2770 __ br(Assembler::zero, false, Assembler::pt, exit);
2771 __ delayed()->nop();
2772 volatile_barrier(Assembler::StoreLoad);
2773 __ bind(exit);
2774 }
2775 }
2777 void TemplateTable::fast_storefield(TosState state) {
2778 transition(state, vtos);
2779 Register Rcache = G3_scratch;
2780 Register Rclass = Rcache;
2781 Register Roffset= G4_scratch;
2782 Register Rflags = G1_scratch;
2783 ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
2785 jvmti_post_fast_field_mod();
2787 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 1);
2789 Assembler::Membar_mask_bits read_bits =
2790 Assembler::Membar_mask_bits(Assembler::LoadStore | Assembler::StoreStore);
2791 Assembler::Membar_mask_bits write_bits = Assembler::StoreLoad;
2793 Label notVolatile, checkVolatile, exit;
2794 if (__ membar_has_effect(read_bits) || __ membar_has_effect(write_bits)) {
2795 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::flags_offset(), Rflags);
2796 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2797 __ and3(Rflags, Lscratch, Lscratch);
2798 if (__ membar_has_effect(read_bits)) {
2799 __ tst(Lscratch);
2800 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2801 __ delayed()->nop();
2802 volatile_barrier(read_bits);
2803 __ bind(notVolatile);
2804 }
2805 }
2807 __ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f2_offset(), Roffset);
2808 pop_and_check_object(Rclass);
2810 switch (bytecode()) {
2811 case Bytecodes::_fast_bputfield: __ stb(Otos_i, Rclass, Roffset); break;
2812 case Bytecodes::_fast_cputfield: /* fall through */
2813 case Bytecodes::_fast_sputfield: __ sth(Otos_i, Rclass, Roffset); break;
2814 case Bytecodes::_fast_iputfield: __ st(Otos_i, Rclass, Roffset); break;
2815 case Bytecodes::_fast_lputfield: __ st_long(Otos_l, Rclass, Roffset); break;
2816 case Bytecodes::_fast_fputfield:
2817 __ stf(FloatRegisterImpl::S, Ftos_f, Rclass, Roffset);
2818 break;
2819 case Bytecodes::_fast_dputfield:
2820 __ stf(FloatRegisterImpl::D, Ftos_d, Rclass, Roffset);
2821 break;
2822 case Bytecodes::_fast_aputfield:
2823 do_oop_store(_masm, Rclass, Roffset, 0, Otos_i, G1_scratch, _bs->kind(), false);
2824 break;
2825 default:
2826 ShouldNotReachHere();
2827 }
2829 if (__ membar_has_effect(write_bits)) {
2830 __ tst(Lscratch);
2831 __ br(Assembler::zero, false, Assembler::pt, exit);
2832 __ delayed()->nop();
2833 volatile_barrier(Assembler::StoreLoad);
2834 __ bind(exit);
2835 }
2836 }
2839 void TemplateTable::putfield(int byte_no) {
2840 putfield_or_static(byte_no, false);
2841 }
2843 void TemplateTable::putstatic(int byte_no) {
2844 putfield_or_static(byte_no, true);
2845 }
2848 void TemplateTable::fast_xaccess(TosState state) {
2849 transition(vtos, state);
2850 Register Rcache = G3_scratch;
2851 Register Roffset = G4_scratch;
2852 Register Rflags = G4_scratch;
2853 Register Rreceiver = Lscratch;
2855 __ ld_ptr(Llocals, 0, Rreceiver);
2857 // access constant pool cache (is resolved)
2858 __ get_cache_and_index_at_bcp(Rcache, G4_scratch, 2);
2859 __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset(), Roffset);
2860 __ add(Lbcp, 1, Lbcp); // needed to report exception at the correct bcp
2862 __ verify_oop(Rreceiver);
2863 __ null_check(Rreceiver);
2864 if (state == atos) {
2865 __ load_heap_oop(Rreceiver, Roffset, Otos_i);
2866 } else if (state == itos) {
2867 __ ld (Rreceiver, Roffset, Otos_i) ;
2868 } else if (state == ftos) {
2869 __ ldf(FloatRegisterImpl::S, Rreceiver, Roffset, Ftos_f);
2870 } else {
2871 ShouldNotReachHere();
2872 }
2874 Assembler::Membar_mask_bits membar_bits =
2875 Assembler::Membar_mask_bits(Assembler::LoadLoad | Assembler::LoadStore);
2876 if (__ membar_has_effect(membar_bits)) {
2878 // Get is_volatile value in Rflags and check if membar is needed
2879 __ ld_ptr(Rcache, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset(), Rflags);
2881 // Test volatile
2882 Label notVolatile;
2883 __ set((1 << ConstantPoolCacheEntry::volatileField), Lscratch);
2884 __ btst(Rflags, Lscratch);
2885 __ br(Assembler::zero, false, Assembler::pt, notVolatile);
2886 __ delayed()->nop();
2887 volatile_barrier(membar_bits);
2888 __ bind(notVolatile);
2889 }
2891 __ interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
2892 __ sub(Lbcp, 1, Lbcp);
2893 }
2895 //----------------------------------------------------------------------------------------------------
2896 // Calls
2898 void TemplateTable::count_calls(Register method, Register temp) {
2899 // implemented elsewhere
2900 ShouldNotReachHere();
2901 }
2903 void TemplateTable::generate_vtable_call(Register Rrecv, Register Rindex, Register Rret) {
2904 Register Rtemp = G4_scratch;
2905 Register Rcall = Rindex;
2906 assert_different_registers(Rcall, G5_method, Gargs, Rret);
2908 // get target methodOop & entry point
2909 const int base = instanceKlass::vtable_start_offset() * wordSize;
2910 if (vtableEntry::size() % 3 == 0) {
2911 // scale the vtable index by 12:
2912 int one_third = vtableEntry::size() / 3;
2913 __ sll(Rindex, exact_log2(one_third * 1 * wordSize), Rtemp);
2914 __ sll(Rindex, exact_log2(one_third * 2 * wordSize), Rindex);
2915 __ add(Rindex, Rtemp, Rindex);
2916 } else {
2917 // scale the vtable index by 8:
2918 __ sll(Rindex, exact_log2(vtableEntry::size() * wordSize), Rindex);
2919 }
2921 __ add(Rrecv, Rindex, Rrecv);
2922 __ ld_ptr(Rrecv, base + vtableEntry::method_offset_in_bytes(), G5_method);
2924 __ call_from_interpreter(Rcall, Gargs, Rret);
2925 }
2927 void TemplateTable::invokevirtual(int byte_no) {
2928 transition(vtos, vtos);
2929 assert(byte_no == f2_byte, "use this argument");
2931 Register Rscratch = G3_scratch;
2932 Register Rtemp = G4_scratch;
2933 Register Rret = Lscratch;
2934 Register Rrecv = G5_method;
2935 Label notFinal;
2937 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, true, false, false);
2938 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2940 // Check for vfinal
2941 __ set((1 << ConstantPoolCacheEntry::vfinalMethod), G4_scratch);
2942 __ btst(Rret, G4_scratch);
2943 __ br(Assembler::zero, false, Assembler::pt, notFinal);
2944 __ delayed()->and3(Rret, 0xFF, G4_scratch); // gets number of parameters
2946 patch_bytecode(Bytecodes::_fast_invokevfinal, Rscratch, Rtemp);
2948 invokevfinal_helper(Rscratch, Rret);
2950 __ bind(notFinal);
2952 __ mov(G5_method, Rscratch); // better scratch register
2953 __ load_receiver(G4_scratch, O0); // gets receiverOop
2954 // receiver is in O0
2955 __ verify_oop(O0);
2957 // get return address
2958 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
2959 __ set(table, Rtemp);
2960 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
2961 // Make sure we don't need to mask Rret for tosBits after the above shift
2962 ConstantPoolCacheEntry::verify_tosBits();
2963 __ sll(Rret, LogBytesPerWord, Rret);
2964 __ ld_ptr(Rtemp, Rret, Rret); // get return address
2966 // get receiver klass
2967 __ null_check(O0, oopDesc::klass_offset_in_bytes());
2968 __ load_klass(O0, Rrecv);
2969 __ verify_oop(Rrecv);
2971 __ profile_virtual_call(Rrecv, O4);
2973 generate_vtable_call(Rrecv, Rscratch, Rret);
2974 }
2976 void TemplateTable::fast_invokevfinal(int byte_no) {
2977 transition(vtos, vtos);
2978 assert(byte_no == f2_byte, "use this argument");
2980 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Lscratch, true,
2981 /*is_invokevfinal*/true, false);
2982 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
2983 invokevfinal_helper(G3_scratch, Lscratch);
2984 }
2986 void TemplateTable::invokevfinal_helper(Register Rscratch, Register Rret) {
2987 Register Rtemp = G4_scratch;
2989 __ verify_oop(G5_method);
2991 // Load receiver from stack slot
2992 __ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch);
2993 __ load_receiver(G4_scratch, O0);
2995 // receiver NULL check
2996 __ null_check(O0);
2998 __ profile_final_call(O4);
3000 // get return address
3001 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
3002 __ set(table, Rtemp);
3003 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
3004 // Make sure we don't need to mask Rret for tosBits after the above shift
3005 ConstantPoolCacheEntry::verify_tosBits();
3006 __ sll(Rret, LogBytesPerWord, Rret);
3007 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3010 // do the call
3011 __ call_from_interpreter(Rscratch, Gargs, Rret);
3012 }
3014 void TemplateTable::invokespecial(int byte_no) {
3015 transition(vtos, vtos);
3016 assert(byte_no == f1_byte, "use this argument");
3018 Register Rscratch = G3_scratch;
3019 Register Rtemp = G4_scratch;
3020 Register Rret = Lscratch;
3022 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
3023 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3025 __ verify_oop(G5_method);
3027 __ lduh(G5_method, in_bytes(methodOopDesc::size_of_parameters_offset()), G4_scratch);
3028 __ load_receiver(G4_scratch, O0);
3030 // receiver NULL check
3031 __ null_check(O0);
3033 __ profile_call(O4);
3035 // get return address
3036 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
3037 __ set(table, Rtemp);
3038 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
3039 // Make sure we don't need to mask Rret for tosBits after the above shift
3040 ConstantPoolCacheEntry::verify_tosBits();
3041 __ sll(Rret, LogBytesPerWord, Rret);
3042 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3044 // do the call
3045 __ call_from_interpreter(Rscratch, Gargs, Rret);
3046 }
3048 void TemplateTable::invokestatic(int byte_no) {
3049 transition(vtos, vtos);
3050 assert(byte_no == f1_byte, "use this argument");
3052 Register Rscratch = G3_scratch;
3053 Register Rtemp = G4_scratch;
3054 Register Rret = Lscratch;
3056 load_invoke_cp_cache_entry(byte_no, G5_method, noreg, Rret, /*virtual*/ false, false, false);
3057 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3059 __ verify_oop(G5_method);
3061 __ profile_call(O4);
3063 // get return address
3064 AddressLiteral table(Interpreter::return_3_addrs_by_index_table());
3065 __ set(table, Rtemp);
3066 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
3067 // Make sure we don't need to mask Rret for tosBits after the above shift
3068 ConstantPoolCacheEntry::verify_tosBits();
3069 __ sll(Rret, LogBytesPerWord, Rret);
3070 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3072 // do the call
3073 __ call_from_interpreter(Rscratch, Gargs, Rret);
3074 }
3077 void TemplateTable::invokeinterface_object_method(Register RklassOop,
3078 Register Rcall,
3079 Register Rret,
3080 Register Rflags) {
3081 Register Rscratch = G4_scratch;
3082 Register Rindex = Lscratch;
3084 assert_different_registers(Rscratch, Rindex, Rret);
3086 Label notFinal;
3088 // Check for vfinal
3089 __ set((1 << ConstantPoolCacheEntry::vfinalMethod), Rscratch);
3090 __ btst(Rflags, Rscratch);
3091 __ br(Assembler::zero, false, Assembler::pt, notFinal);
3092 __ delayed()->nop();
3094 __ profile_final_call(O4);
3096 // do the call - the index (f2) contains the methodOop
3097 assert_different_registers(G5_method, Gargs, Rcall);
3098 __ mov(Rindex, G5_method);
3099 __ call_from_interpreter(Rcall, Gargs, Rret);
3100 __ bind(notFinal);
3102 __ profile_virtual_call(RklassOop, O4);
3103 generate_vtable_call(RklassOop, Rindex, Rret);
3104 }
3107 void TemplateTable::invokeinterface(int byte_no) {
3108 transition(vtos, vtos);
3109 assert(byte_no == f1_byte, "use this argument");
3111 Register Rscratch = G4_scratch;
3112 Register Rret = G3_scratch;
3113 Register Rindex = Lscratch;
3114 Register Rinterface = G1_scratch;
3115 Register RklassOop = G5_method;
3116 Register Rflags = O1;
3117 assert_different_registers(Rscratch, G5_method);
3119 load_invoke_cp_cache_entry(byte_no, Rinterface, Rindex, Rflags, /*virtual*/ false, false, false);
3120 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3122 // get receiver
3123 __ and3(Rflags, 0xFF, Rscratch); // gets number of parameters
3124 __ load_receiver(Rscratch, O0);
3125 __ verify_oop(O0);
3127 __ mov(Rflags, Rret);
3129 // get return address
3130 AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
3131 __ set(table, Rscratch);
3132 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
3133 // Make sure we don't need to mask Rret for tosBits after the above shift
3134 ConstantPoolCacheEntry::verify_tosBits();
3135 __ sll(Rret, LogBytesPerWord, Rret);
3136 __ ld_ptr(Rscratch, Rret, Rret); // get return address
3138 // get receiver klass
3139 __ null_check(O0, oopDesc::klass_offset_in_bytes());
3140 __ load_klass(O0, RklassOop);
3141 __ verify_oop(RklassOop);
3143 // Special case of invokeinterface called for virtual method of
3144 // java.lang.Object. See cpCacheOop.cpp for details.
3145 // This code isn't produced by javac, but could be produced by
3146 // another compliant java compiler.
3147 Label notMethod;
3148 __ set((1 << ConstantPoolCacheEntry::methodInterface), Rscratch);
3149 __ btst(Rflags, Rscratch);
3150 __ br(Assembler::zero, false, Assembler::pt, notMethod);
3151 __ delayed()->nop();
3153 invokeinterface_object_method(RklassOop, Rinterface, Rret, Rflags);
3155 __ bind(notMethod);
3157 __ profile_virtual_call(RklassOop, O4);
3159 //
3160 // find entry point to call
3161 //
3163 // compute start of first itableOffsetEntry (which is at end of vtable)
3164 const int base = instanceKlass::vtable_start_offset() * wordSize;
3165 Label search;
3166 Register Rtemp = Rflags;
3168 __ ld(RklassOop, instanceKlass::vtable_length_offset() * wordSize, Rtemp);
3169 if (align_object_offset(1) > 1) {
3170 __ round_to(Rtemp, align_object_offset(1));
3171 }
3172 __ sll(Rtemp, LogBytesPerWord, Rtemp); // Rscratch *= 4;
3173 if (Assembler::is_simm13(base)) {
3174 __ add(Rtemp, base, Rtemp);
3175 } else {
3176 __ set(base, Rscratch);
3177 __ add(Rscratch, Rtemp, Rtemp);
3178 }
3179 __ add(RklassOop, Rtemp, Rscratch);
3181 __ bind(search);
3183 __ ld_ptr(Rscratch, itableOffsetEntry::interface_offset_in_bytes(), Rtemp);
3184 {
3185 Label ok;
3187 // Check that entry is non-null. Null entries are probably a bytecode
3188 // problem. If the interface isn't implemented by the receiver class,
3189 // the VM should throw IncompatibleClassChangeError. linkResolver checks
3190 // this too but that's only if the entry isn't already resolved, so we
3191 // need to check again.
3192 __ br_notnull( Rtemp, false, Assembler::pt, ok);
3193 __ delayed()->nop();
3194 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3195 __ should_not_reach_here();
3196 __ bind(ok);
3197 __ verify_oop(Rtemp);
3198 }
3200 __ verify_oop(Rinterface);
3202 __ cmp(Rinterface, Rtemp);
3203 __ brx(Assembler::notEqual, true, Assembler::pn, search);
3204 __ delayed()->add(Rscratch, itableOffsetEntry::size() * wordSize, Rscratch);
3206 // entry found and Rscratch points to it
3207 __ ld(Rscratch, itableOffsetEntry::offset_offset_in_bytes(), Rscratch);
3209 assert(itableMethodEntry::method_offset_in_bytes() == 0, "adjust instruction below");
3210 __ sll(Rindex, exact_log2(itableMethodEntry::size() * wordSize), Rindex); // Rindex *= 8;
3211 __ add(Rscratch, Rindex, Rscratch);
3212 __ ld_ptr(RklassOop, Rscratch, G5_method);
3214 // Check for abstract method error.
3215 {
3216 Label ok;
3217 __ tst(G5_method);
3218 __ brx(Assembler::notZero, false, Assembler::pt, ok);
3219 __ delayed()->nop();
3220 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3221 __ should_not_reach_here();
3222 __ bind(ok);
3223 }
3225 Register Rcall = Rinterface;
3226 assert_different_registers(Rcall, G5_method, Gargs, Rret);
3228 __ verify_oop(G5_method);
3229 __ call_from_interpreter(Rcall, Gargs, Rret);
3231 }
3234 void TemplateTable::invokedynamic(int byte_no) {
3235 transition(vtos, vtos);
3236 assert(byte_no == f1_oop, "use this argument");
3238 if (!EnableInvokeDynamic) {
3239 // We should not encounter this bytecode if !EnableInvokeDynamic.
3240 // The verifier will stop it. However, if we get past the verifier,
3241 // this will stop the thread in a reasonable way, without crashing the JVM.
3242 __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3243 InterpreterRuntime::throw_IncompatibleClassChangeError));
3244 // the call_VM checks for exception, so we should never return here.
3245 __ should_not_reach_here();
3246 return;
3247 }
3249 // G5: CallSite object (f1)
3250 // XX: unused (f2)
3251 // XX: flags (unused)
3253 Register G5_callsite = G5_method;
3254 Register Rscratch = G3_scratch;
3255 Register Rtemp = G1_scratch;
3256 Register Rret = Lscratch;
3258 load_invoke_cp_cache_entry(byte_no, G5_callsite, noreg, Rret,
3259 /*virtual*/ false, /*vfinal*/ false, /*indy*/ true);
3260 __ mov(SP, O5_savedSP); // record SP that we wanted the callee to restore
3262 __ verify_oop(G5_callsite);
3264 // profile this call
3265 __ profile_call(O4);
3267 // get return address
3268 AddressLiteral table(Interpreter::return_5_addrs_by_index_table());
3269 __ set(table, Rtemp);
3270 __ srl(Rret, ConstantPoolCacheEntry::tosBits, Rret); // get return type
3271 // Make sure we don't need to mask Rret for tosBits after the above shift
3272 ConstantPoolCacheEntry::verify_tosBits();
3273 __ sll(Rret, LogBytesPerWord, Rret);
3274 __ ld_ptr(Rtemp, Rret, Rret); // get return address
3276 __ load_heap_oop(G5_callsite, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, Rscratch), G3_method_handle);
3277 __ null_check(G3_method_handle);
3279 // Adjust Rret first so Llast_SP can be same as Rret
3280 __ add(Rret, -frame::pc_return_offset, O7);
3281 __ add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
3282 __ jump_to_method_handle_entry(G3_method_handle, Rtemp, /* emit_delayed_nop */ false);
3283 // Record SP so we can remove any stack space allocated by adapter transition
3284 __ delayed()->mov(SP, Llast_SP);
3285 }
3288 //----------------------------------------------------------------------------------------------------
3289 // Allocation
3291 void TemplateTable::_new() {
3292 transition(vtos, atos);
3294 Label slow_case;
3295 Label done;
3296 Label initialize_header;
3297 Label initialize_object; // including clearing the fields
3299 Register RallocatedObject = Otos_i;
3300 Register RinstanceKlass = O1;
3301 Register Roffset = O3;
3302 Register Rscratch = O4;
3304 __ get_2_byte_integer_at_bcp(1, Rscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3305 __ get_cpool_and_tags(Rscratch, G3_scratch);
3306 // make sure the class we're about to instantiate has been resolved
3307 // This is done before loading instanceKlass to be consistent with the order
3308 // how Constant Pool is updated (see constantPoolOopDesc::klass_at_put)
3309 __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
3310 __ ldub(G3_scratch, Roffset, G3_scratch);
3311 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3312 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3313 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3314 // get instanceKlass
3315 //__ sll(Roffset, LogBytesPerWord, Roffset); // executed in delay slot
3316 __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
3317 __ ld_ptr(Rscratch, Roffset, RinstanceKlass);
3319 // make sure klass is fully initialized:
3320 __ ld(RinstanceKlass, instanceKlass::init_state_offset_in_bytes() + sizeof(oopDesc), G3_scratch);
3321 __ cmp(G3_scratch, instanceKlass::fully_initialized);
3322 __ br(Assembler::notEqual, false, Assembler::pn, slow_case);
3323 __ delayed()->ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
3325 // get instance_size in instanceKlass (already aligned)
3326 //__ ld(RinstanceKlass, Klass::layout_helper_offset_in_bytes() + sizeof(oopDesc), Roffset);
3328 // make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class
3329 __ btst(Klass::_lh_instance_slow_path_bit, Roffset);
3330 __ br(Assembler::notZero, false, Assembler::pn, slow_case);
3331 __ delayed()->nop();
3333 // allocate the instance
3334 // 1) Try to allocate in the TLAB
3335 // 2) if fail, and the TLAB is not full enough to discard, allocate in the shared Eden
3336 // 3) if the above fails (or is not applicable), go to a slow case
3337 // (creates a new TLAB, etc.)
3339 const bool allow_shared_alloc =
3340 Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3342 if(UseTLAB) {
3343 Register RoldTopValue = RallocatedObject;
3344 Register RtopAddr = G3_scratch, RtlabWasteLimitValue = G3_scratch;
3345 Register RnewTopValue = G1_scratch;
3346 Register RendValue = Rscratch;
3347 Register RfreeValue = RnewTopValue;
3349 // check if we can allocate in the TLAB
3350 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), RoldTopValue); // sets up RalocatedObject
3351 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), RendValue);
3352 __ add(RoldTopValue, Roffset, RnewTopValue);
3354 // if there is enough space, we do not CAS and do not clear
3355 __ cmp(RnewTopValue, RendValue);
3356 if(ZeroTLAB) {
3357 // the fields have already been cleared
3358 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_header);
3359 } else {
3360 // initialize both the header and fields
3361 __ brx(Assembler::lessEqualUnsigned, true, Assembler::pt, initialize_object);
3362 }
3363 __ delayed()->st_ptr(RnewTopValue, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3365 if (allow_shared_alloc) {
3366 // Check if tlab should be discarded (refill_waste_limit >= free)
3367 __ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), RtlabWasteLimitValue);
3368 __ sub(RendValue, RoldTopValue, RfreeValue);
3369 #ifdef _LP64
3370 __ srlx(RfreeValue, LogHeapWordSize, RfreeValue);
3371 #else
3372 __ srl(RfreeValue, LogHeapWordSize, RfreeValue);
3373 #endif
3374 __ cmp(RtlabWasteLimitValue, RfreeValue);
3375 __ brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, slow_case); // tlab waste is small
3376 __ delayed()->nop();
3378 // increment waste limit to prevent getting stuck on this slow path
3379 __ add(RtlabWasteLimitValue, ThreadLocalAllocBuffer::refill_waste_limit_increment(), RtlabWasteLimitValue);
3380 __ st_ptr(RtlabWasteLimitValue, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
3381 } else {
3382 // No allocation in the shared eden.
3383 __ br(Assembler::always, false, Assembler::pt, slow_case);
3384 __ delayed()->nop();
3385 }
3386 }
3388 // Allocation in the shared Eden
3389 if (allow_shared_alloc) {
3390 Register RoldTopValue = G1_scratch;
3391 Register RtopAddr = G3_scratch;
3392 Register RnewTopValue = RallocatedObject;
3393 Register RendValue = Rscratch;
3395 __ set((intptr_t)Universe::heap()->top_addr(), RtopAddr);
3397 Label retry;
3398 __ bind(retry);
3399 __ set((intptr_t)Universe::heap()->end_addr(), RendValue);
3400 __ ld_ptr(RendValue, 0, RendValue);
3401 __ ld_ptr(RtopAddr, 0, RoldTopValue);
3402 __ add(RoldTopValue, Roffset, RnewTopValue);
3404 // RnewTopValue contains the top address after the new object
3405 // has been allocated.
3406 __ cmp(RnewTopValue, RendValue);
3407 __ brx(Assembler::greaterUnsigned, false, Assembler::pn, slow_case);
3408 __ delayed()->nop();
3410 __ casx_under_lock(RtopAddr, RoldTopValue, RnewTopValue,
3411 VM_Version::v9_instructions_work() ? NULL :
3412 (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
3414 // if someone beat us on the allocation, try again, otherwise continue
3415 __ cmp(RoldTopValue, RnewTopValue);
3416 __ brx(Assembler::notEqual, false, Assembler::pn, retry);
3417 __ delayed()->nop();
3418 }
3420 if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3421 // clear object fields
3422 __ bind(initialize_object);
3423 __ deccc(Roffset, sizeof(oopDesc));
3424 __ br(Assembler::zero, false, Assembler::pt, initialize_header);
3425 __ delayed()->add(RallocatedObject, sizeof(oopDesc), G3_scratch);
3427 // initialize remaining object fields
3428 { Label loop;
3429 __ subcc(Roffset, wordSize, Roffset);
3430 __ bind(loop);
3431 //__ subcc(Roffset, wordSize, Roffset); // executed above loop or in delay slot
3432 __ st_ptr(G0, G3_scratch, Roffset);
3433 __ br(Assembler::notEqual, false, Assembler::pt, loop);
3434 __ delayed()->subcc(Roffset, wordSize, Roffset);
3435 }
3436 __ br(Assembler::always, false, Assembler::pt, initialize_header);
3437 __ delayed()->nop();
3438 }
3440 // slow case
3441 __ bind(slow_case);
3442 __ get_2_byte_integer_at_bcp(1, G3_scratch, O2, InterpreterMacroAssembler::Unsigned);
3443 __ get_constant_pool(O1);
3445 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), O1, O2);
3447 __ ba(false, done);
3448 __ delayed()->nop();
3450 // Initialize the header: mark, klass
3451 __ bind(initialize_header);
3453 if (UseBiasedLocking) {
3454 __ ld_ptr(RinstanceKlass, Klass::prototype_header_offset_in_bytes() + sizeof(oopDesc), G4_scratch);
3455 } else {
3456 __ set((intptr_t)markOopDesc::prototype(), G4_scratch);
3457 }
3458 __ st_ptr(G4_scratch, RallocatedObject, oopDesc::mark_offset_in_bytes()); // mark
3459 __ store_klass_gap(G0, RallocatedObject); // klass gap if compressed
3460 __ store_klass(RinstanceKlass, RallocatedObject); // klass (last for cms)
3462 {
3463 SkipIfEqual skip_if(
3464 _masm, G4_scratch, &DTraceAllocProbes, Assembler::zero);
3465 // Trigger dtrace event
3466 __ push(atos);
3467 __ call_VM_leaf(noreg,
3468 CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), O0);
3469 __ pop(atos);
3470 }
3472 // continue
3473 __ bind(done);
3474 }
3478 void TemplateTable::newarray() {
3479 transition(itos, atos);
3480 __ ldub(Lbcp, 1, O1);
3481 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), O1, Otos_i);
3482 }
3485 void TemplateTable::anewarray() {
3486 transition(itos, atos);
3487 __ get_constant_pool(O1);
3488 __ get_2_byte_integer_at_bcp(1, G4_scratch, O2, InterpreterMacroAssembler::Unsigned);
3489 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), O1, O2, Otos_i);
3490 }
3493 void TemplateTable::arraylength() {
3494 transition(atos, itos);
3495 Label ok;
3496 __ verify_oop(Otos_i);
3497 __ tst(Otos_i);
3498 __ throw_if_not_1_x( Assembler::notZero, ok );
3499 __ delayed()->ld(Otos_i, arrayOopDesc::length_offset_in_bytes(), Otos_i);
3500 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3501 }
3504 void TemplateTable::checkcast() {
3505 transition(atos, atos);
3506 Label done, is_null, quicked, cast_ok, resolved;
3507 Register Roffset = G1_scratch;
3508 Register RobjKlass = O5;
3509 Register RspecifiedKlass = O4;
3511 // Check for casting a NULL
3512 __ br_null(Otos_i, false, Assembler::pn, is_null);
3513 __ delayed()->nop();
3515 // Get value klass in RobjKlass
3516 __ load_klass(Otos_i, RobjKlass); // get value klass
3518 // Get constant pool tag
3519 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3521 // See if the checkcast has been quickened
3522 __ get_cpool_and_tags(Lscratch, G3_scratch);
3523 __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
3524 __ ldub(G3_scratch, Roffset, G3_scratch);
3525 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3526 __ br(Assembler::equal, true, Assembler::pt, quicked);
3527 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3529 __ push_ptr(); // save receiver for result, and for GC
3530 call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3531 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3533 __ br(Assembler::always, false, Assembler::pt, resolved);
3534 __ delayed()->nop();
3536 // Extract target class from constant pool
3537 __ bind(quicked);
3538 __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
3539 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3540 __ bind(resolved);
3541 __ load_klass(Otos_i, RobjKlass); // get value klass
3543 // Generate a fast subtype check. Branch to cast_ok if no
3544 // failure. Throw exception if failure.
3545 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, cast_ok );
3547 // Not a subtype; so must throw exception
3548 __ throw_if_not_x( Assembler::never, Interpreter::_throw_ClassCastException_entry, G3_scratch );
3550 __ bind(cast_ok);
3552 if (ProfileInterpreter) {
3553 __ ba(false, done);
3554 __ delayed()->nop();
3555 }
3556 __ bind(is_null);
3557 __ profile_null_seen(G3_scratch);
3558 __ bind(done);
3559 }
3562 void TemplateTable::instanceof() {
3563 Label done, is_null, quicked, resolved;
3564 transition(atos, itos);
3565 Register Roffset = G1_scratch;
3566 Register RobjKlass = O5;
3567 Register RspecifiedKlass = O4;
3569 // Check for casting a NULL
3570 __ br_null(Otos_i, false, Assembler::pt, is_null);
3571 __ delayed()->nop();
3573 // Get value klass in RobjKlass
3574 __ load_klass(Otos_i, RobjKlass); // get value klass
3576 // Get constant pool tag
3577 __ get_2_byte_integer_at_bcp(1, Lscratch, Roffset, InterpreterMacroAssembler::Unsigned);
3579 // See if the checkcast has been quickened
3580 __ get_cpool_and_tags(Lscratch, G3_scratch);
3581 __ add(G3_scratch, typeArrayOopDesc::header_size(T_BYTE) * wordSize, G3_scratch);
3582 __ ldub(G3_scratch, Roffset, G3_scratch);
3583 __ cmp(G3_scratch, JVM_CONSTANT_Class);
3584 __ br(Assembler::equal, true, Assembler::pt, quicked);
3585 __ delayed()->sll(Roffset, LogBytesPerWord, Roffset);
3587 __ push_ptr(); // save receiver for result, and for GC
3588 call_VM(RspecifiedKlass, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3589 __ pop_ptr(Otos_i, G3_scratch); // restore receiver
3591 __ br(Assembler::always, false, Assembler::pt, resolved);
3592 __ delayed()->nop();
3595 // Extract target class from constant pool
3596 __ bind(quicked);
3597 __ add(Roffset, sizeof(constantPoolOopDesc), Roffset);
3598 __ get_constant_pool(Lscratch);
3599 __ ld_ptr(Lscratch, Roffset, RspecifiedKlass);
3600 __ bind(resolved);
3601 __ load_klass(Otos_i, RobjKlass); // get value klass
3603 // Generate a fast subtype check. Branch to cast_ok if no
3604 // failure. Return 0 if failure.
3605 __ or3(G0, 1, Otos_i); // set result assuming quick tests succeed
3606 __ gen_subtype_check( RobjKlass, RspecifiedKlass, G3_scratch, G4_scratch, G1_scratch, done );
3607 // Not a subtype; return 0;
3608 __ clr( Otos_i );
3610 if (ProfileInterpreter) {
3611 __ ba(false, done);
3612 __ delayed()->nop();
3613 }
3614 __ bind(is_null);
3615 __ profile_null_seen(G3_scratch);
3616 __ bind(done);
3617 }
3619 void TemplateTable::_breakpoint() {
3621 // Note: We get here even if we are single stepping..
3622 // jbug inists on setting breakpoints at every bytecode
3623 // even if we are in single step mode.
3625 transition(vtos, vtos);
3626 // get the unpatched byte code
3627 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), Lmethod, Lbcp);
3628 __ mov(O0, Lbyte_code);
3630 // post the breakpoint event
3631 __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), Lmethod, Lbcp);
3633 // complete the execution of original bytecode
3634 __ dispatch_normal(vtos);
3635 }
3638 //----------------------------------------------------------------------------------------------------
3639 // Exceptions
3641 void TemplateTable::athrow() {
3642 transition(atos, vtos);
3644 // This works because exception is cached in Otos_i which is same as O0,
3645 // which is same as what throw_exception_entry_expects
3646 assert(Otos_i == Oexception, "see explanation above");
3648 __ verify_oop(Otos_i);
3649 __ null_check(Otos_i);
3650 __ throw_if_not_x(Assembler::never, Interpreter::throw_exception_entry(), G3_scratch);
3651 }
3654 //----------------------------------------------------------------------------------------------------
3655 // Synchronization
3658 // See frame_sparc.hpp for monitor block layout.
3659 // Monitor elements are dynamically allocated by growing stack as needed.
3661 void TemplateTable::monitorenter() {
3662 transition(atos, vtos);
3663 __ verify_oop(Otos_i);
3664 // Try to acquire a lock on the object
3665 // Repeat until succeeded (i.e., until
3666 // monitorenter returns true).
3668 { Label ok;
3669 __ tst(Otos_i);
3670 __ throw_if_not_1_x( Assembler::notZero, ok);
3671 __ delayed()->mov(Otos_i, Lscratch); // save obj
3672 __ throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ok);
3673 }
3675 assert(O0 == Otos_i, "Be sure where the object to lock is");
3677 // find a free slot in the monitor block
3680 // initialize entry pointer
3681 __ clr(O1); // points to free slot or NULL
3683 {
3684 Label entry, loop, exit;
3685 __ add( __ top_most_monitor(), O2 ); // last one to check
3686 __ ba( false, entry );
3687 __ delayed()->mov( Lmonitors, O3 ); // first one to check
3690 __ bind( loop );
3692 __ verify_oop(O4); // verify each monitor's oop
3693 __ tst(O4); // is this entry unused?
3694 if (VM_Version::v9_instructions_work())
3695 __ movcc( Assembler::zero, false, Assembler::ptr_cc, O3, O1);
3696 else {
3697 Label L;
3698 __ br( Assembler::zero, true, Assembler::pn, L );
3699 __ delayed()->mov(O3, O1); // rememeber this one if match
3700 __ bind(L);
3701 }
3703 __ cmp(O4, O0); // check if current entry is for same object
3704 __ brx( Assembler::equal, false, Assembler::pn, exit );
3705 __ delayed()->inc( O3, frame::interpreter_frame_monitor_size() * wordSize ); // check next one
3707 __ bind( entry );
3709 __ cmp( O3, O2 );
3710 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3711 __ delayed()->ld_ptr(O3, BasicObjectLock::obj_offset_in_bytes(), O4);
3713 __ bind( exit );
3714 }
3716 { Label allocated;
3718 // found free slot?
3719 __ br_notnull(O1, false, Assembler::pn, allocated);
3720 __ delayed()->nop();
3722 __ add_monitor_to_stack( false, O2, O3 );
3723 __ mov(Lmonitors, O1);
3725 __ bind(allocated);
3726 }
3728 // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3729 // The object has already been poped from the stack, so the expression stack looks correct.
3730 __ inc(Lbcp);
3732 __ st_ptr(O0, O1, BasicObjectLock::obj_offset_in_bytes()); // store object
3733 __ lock_object(O1, O0);
3735 // check if there's enough space on the stack for the monitors after locking
3736 __ generate_stack_overflow_check(0);
3738 // The bcp has already been incremented. Just need to dispatch to next instruction.
3739 __ dispatch_next(vtos);
3740 }
3743 void TemplateTable::monitorexit() {
3744 transition(atos, vtos);
3745 __ verify_oop(Otos_i);
3746 __ tst(Otos_i);
3747 __ throw_if_not_x( Assembler::notZero, Interpreter::_throw_NullPointerException_entry, G3_scratch );
3749 assert(O0 == Otos_i, "just checking");
3751 { Label entry, loop, found;
3752 __ add( __ top_most_monitor(), O2 ); // last one to check
3753 __ ba(false, entry );
3754 // use Lscratch to hold monitor elem to check, start with most recent monitor,
3755 // By using a local it survives the call to the C routine.
3756 __ delayed()->mov( Lmonitors, Lscratch );
3758 __ bind( loop );
3760 __ verify_oop(O4); // verify each monitor's oop
3761 __ cmp(O4, O0); // check if current entry is for desired object
3762 __ brx( Assembler::equal, true, Assembler::pt, found );
3763 __ delayed()->mov(Lscratch, O1); // pass found entry as argument to monitorexit
3765 __ inc( Lscratch, frame::interpreter_frame_monitor_size() * wordSize ); // advance to next
3767 __ bind( entry );
3769 __ cmp( Lscratch, O2 );
3770 __ brx( Assembler::lessEqualUnsigned, true, Assembler::pt, loop );
3771 __ delayed()->ld_ptr(Lscratch, BasicObjectLock::obj_offset_in_bytes(), O4);
3773 call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3774 __ should_not_reach_here();
3776 __ bind(found);
3777 }
3778 __ unlock_object(O1);
3779 }
3782 //----------------------------------------------------------------------------------------------------
3783 // Wide instructions
3785 void TemplateTable::wide() {
3786 transition(vtos, vtos);
3787 __ ldub(Lbcp, 1, G3_scratch);// get next bc
3788 __ sll(G3_scratch, LogBytesPerWord, G3_scratch);
3789 AddressLiteral ep(Interpreter::_wentry_point);
3790 __ set(ep, G4_scratch);
3791 __ ld_ptr(G4_scratch, G3_scratch, G3_scratch);
3792 __ jmp(G3_scratch, G0);
3793 __ delayed()->nop();
3794 // Note: the Lbcp increment step is part of the individual wide bytecode implementations
3795 }
3798 //----------------------------------------------------------------------------------------------------
3799 // Multi arrays
3801 void TemplateTable::multianewarray() {
3802 transition(vtos, atos);
3803 // put ndims * wordSize into Lscratch
3804 __ ldub( Lbcp, 3, Lscratch);
3805 __ sll( Lscratch, Interpreter::logStackElementSize, Lscratch);
3806 // Lesp points past last_dim, so set to O1 to first_dim address
3807 __ add( Lesp, Lscratch, O1);
3808 call_VM(Otos_i, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), O1);
3809 __ add( Lesp, Lscratch, Lesp); // pop all dimensions off the stack
3810 }
3811 #endif /* !CC_INTERP */