Mon, 24 Jun 2013 11:53:54 -0700
8017308: Remove unused breakpoint relocation type
Summary: remove unused breakpoint relocation type
Reviewed-by: kvn
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.inline.hpp"
27 #include "compiler/disassembler.hpp"
28 #include "gc_interface/collectedHeap.inline.hpp"
29 #include "interpreter/interpreter.hpp"
30 #include "memory/cardTableModRefBS.hpp"
31 #include "memory/resourceArea.hpp"
32 #include "prims/methodHandles.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/interfaceSupport.hpp"
35 #include "runtime/objectMonitor.hpp"
36 #include "runtime/os.hpp"
37 #include "runtime/sharedRuntime.hpp"
38 #include "runtime/stubRoutines.hpp"
39 #include "utilities/macros.hpp"
40 #if INCLUDE_ALL_GCS
41 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
42 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
43 #include "gc_implementation/g1/heapRegion.hpp"
44 #endif // INCLUDE_ALL_GCS
46 #ifdef PRODUCT
47 #define BLOCK_COMMENT(str) /* nothing */
48 #define STOP(error) stop(error)
49 #else
50 #define BLOCK_COMMENT(str) block_comment(str)
51 #define STOP(error) block_comment(error); stop(error)
52 #endif
54 // Convert the raw encoding form into the form expected by the
55 // constructor for Address.
56 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
57 assert(scale == 0, "not supported");
58 RelocationHolder rspec;
59 if (disp_reloc != relocInfo::none) {
60 rspec = Relocation::spec_simple(disp_reloc);
61 }
63 Register rindex = as_Register(index);
64 if (rindex != G0) {
65 Address madr(as_Register(base), rindex);
66 madr._rspec = rspec;
67 return madr;
68 } else {
69 Address madr(as_Register(base), disp);
70 madr._rspec = rspec;
71 return madr;
72 }
73 }
75 Address Argument::address_in_frame() const {
76 // Warning: In LP64 mode disp will occupy more than 10 bits, but
77 // op codes such as ld or ldx, only access disp() to get
78 // their simm13 argument.
79 int disp = ((_number - Argument::n_register_parameters + frame::memory_parameter_word_sp_offset) * BytesPerWord) + STACK_BIAS;
80 if (is_in())
81 return Address(FP, disp); // In argument.
82 else
83 return Address(SP, disp); // Out argument.
84 }
86 static const char* argumentNames[][2] = {
87 {"A0","P0"}, {"A1","P1"}, {"A2","P2"}, {"A3","P3"}, {"A4","P4"},
88 {"A5","P5"}, {"A6","P6"}, {"A7","P7"}, {"A8","P8"}, {"A9","P9"},
89 {"A(n>9)","P(n>9)"}
90 };
92 const char* Argument::name() const {
93 int nofArgs = sizeof argumentNames / sizeof argumentNames[0];
94 int num = number();
95 if (num >= nofArgs) num = nofArgs - 1;
96 return argumentNames[num][is_in() ? 1 : 0];
97 }
99 #ifdef ASSERT
100 // On RISC, there's no benefit to verifying instruction boundaries.
101 bool AbstractAssembler::pd_check_instruction_mark() { return false; }
102 #endif
104 // Patch instruction inst at offset inst_pos to refer to dest_pos
105 // and return the resulting instruction.
106 // We should have pcs, not offsets, but since all is relative, it will work out
107 // OK.
108 int MacroAssembler::patched_branch(int dest_pos, int inst, int inst_pos) {
109 int m; // mask for displacement field
110 int v; // new value for displacement field
111 const int word_aligned_ones = -4;
112 switch (inv_op(inst)) {
113 default: ShouldNotReachHere();
114 case call_op: m = wdisp(word_aligned_ones, 0, 30); v = wdisp(dest_pos, inst_pos, 30); break;
115 case branch_op:
116 switch (inv_op2(inst)) {
117 case fbp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
118 case bp_op2: m = wdisp( word_aligned_ones, 0, 19); v = wdisp( dest_pos, inst_pos, 19); break;
119 case fb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
120 case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break;
121 case bpr_op2: {
122 if (is_cbcond(inst)) {
123 m = wdisp10(word_aligned_ones, 0);
124 v = wdisp10(dest_pos, inst_pos);
125 } else {
126 m = wdisp16(word_aligned_ones, 0);
127 v = wdisp16(dest_pos, inst_pos);
128 }
129 break;
130 }
131 default: ShouldNotReachHere();
132 }
133 }
134 return inst & ~m | v;
135 }
137 // Return the offset of the branch destionation of instruction inst
138 // at offset pos.
139 // Should have pcs, but since all is relative, it works out.
140 int MacroAssembler::branch_destination(int inst, int pos) {
141 int r;
142 switch (inv_op(inst)) {
143 default: ShouldNotReachHere();
144 case call_op: r = inv_wdisp(inst, pos, 30); break;
145 case branch_op:
146 switch (inv_op2(inst)) {
147 case fbp_op2: r = inv_wdisp( inst, pos, 19); break;
148 case bp_op2: r = inv_wdisp( inst, pos, 19); break;
149 case fb_op2: r = inv_wdisp( inst, pos, 22); break;
150 case br_op2: r = inv_wdisp( inst, pos, 22); break;
151 case bpr_op2: {
152 if (is_cbcond(inst)) {
153 r = inv_wdisp10(inst, pos);
154 } else {
155 r = inv_wdisp16(inst, pos);
156 }
157 break;
158 }
159 default: ShouldNotReachHere();
160 }
161 }
162 return r;
163 }
165 void MacroAssembler::null_check(Register reg, int offset) {
166 if (needs_explicit_null_check((intptr_t)offset)) {
167 // provoke OS NULL exception if reg = NULL by
168 // accessing M[reg] w/o changing any registers
169 ld_ptr(reg, 0, G0);
170 }
171 else {
172 // nothing to do, (later) access of M[reg + offset]
173 // will provoke OS NULL exception if reg = NULL
174 }
175 }
177 // Ring buffer jumps
179 #ifndef PRODUCT
180 void MacroAssembler::ret( bool trace ) { if (trace) {
181 mov(I7, O7); // traceable register
182 JMP(O7, 2 * BytesPerInstWord);
183 } else {
184 jmpl( I7, 2 * BytesPerInstWord, G0 );
185 }
186 }
188 void MacroAssembler::retl( bool trace ) { if (trace) JMP(O7, 2 * BytesPerInstWord);
189 else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
190 #endif /* PRODUCT */
193 void MacroAssembler::jmp2(Register r1, Register r2, const char* file, int line ) {
194 assert_not_delayed();
195 // This can only be traceable if r1 & r2 are visible after a window save
196 if (TraceJumps) {
197 #ifndef PRODUCT
198 save_frame(0);
199 verify_thread();
200 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
201 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
202 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
203 add(O2, O1, O1);
205 add(r1->after_save(), r2->after_save(), O2);
206 set((intptr_t)file, O3);
207 set(line, O4);
208 Label L;
209 // get nearby pc, store jmp target
210 call(L, relocInfo::none); // No relocation for call to pc+0x8
211 delayed()->st(O2, O1, 0);
212 bind(L);
214 // store nearby pc
215 st(O7, O1, sizeof(intptr_t));
216 // store file
217 st(O3, O1, 2*sizeof(intptr_t));
218 // store line
219 st(O4, O1, 3*sizeof(intptr_t));
220 add(O0, 1, O0);
221 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
222 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
223 restore();
224 #endif /* PRODUCT */
225 }
226 jmpl(r1, r2, G0);
227 }
228 void MacroAssembler::jmp(Register r1, int offset, const char* file, int line ) {
229 assert_not_delayed();
230 // This can only be traceable if r1 is visible after a window save
231 if (TraceJumps) {
232 #ifndef PRODUCT
233 save_frame(0);
234 verify_thread();
235 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
236 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
237 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
238 add(O2, O1, O1);
240 add(r1->after_save(), offset, O2);
241 set((intptr_t)file, O3);
242 set(line, O4);
243 Label L;
244 // get nearby pc, store jmp target
245 call(L, relocInfo::none); // No relocation for call to pc+0x8
246 delayed()->st(O2, O1, 0);
247 bind(L);
249 // store nearby pc
250 st(O7, O1, sizeof(intptr_t));
251 // store file
252 st(O3, O1, 2*sizeof(intptr_t));
253 // store line
254 st(O4, O1, 3*sizeof(intptr_t));
255 add(O0, 1, O0);
256 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
257 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
258 restore();
259 #endif /* PRODUCT */
260 }
261 jmp(r1, offset);
262 }
264 // This code sequence is relocatable to any address, even on LP64.
265 void MacroAssembler::jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line) {
266 assert_not_delayed();
267 // Force fixed length sethi because NativeJump and NativeFarCall don't handle
268 // variable length instruction streams.
269 patchable_sethi(addrlit, temp);
270 Address a(temp, addrlit.low10() + offset); // Add the offset to the displacement.
271 if (TraceJumps) {
272 #ifndef PRODUCT
273 // Must do the add here so relocation can find the remainder of the
274 // value to be relocated.
275 add(a.base(), a.disp(), a.base(), addrlit.rspec(offset));
276 save_frame(0);
277 verify_thread();
278 ld(G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()), O0);
279 add(G2_thread, in_bytes(JavaThread::jmp_ring_offset()), O1);
280 sll(O0, exact_log2(4*sizeof(intptr_t)), O2);
281 add(O2, O1, O1);
283 set((intptr_t)file, O3);
284 set(line, O4);
285 Label L;
287 // get nearby pc, store jmp target
288 call(L, relocInfo::none); // No relocation for call to pc+0x8
289 delayed()->st(a.base()->after_save(), O1, 0);
290 bind(L);
292 // store nearby pc
293 st(O7, O1, sizeof(intptr_t));
294 // store file
295 st(O3, O1, 2*sizeof(intptr_t));
296 // store line
297 st(O4, O1, 3*sizeof(intptr_t));
298 add(O0, 1, O0);
299 and3(O0, JavaThread::jump_ring_buffer_size - 1, O0);
300 st(O0, G2_thread, in_bytes(JavaThread::jmp_ring_index_offset()));
301 restore();
302 jmpl(a.base(), G0, d);
303 #else
304 jmpl(a.base(), a.disp(), d);
305 #endif /* PRODUCT */
306 } else {
307 jmpl(a.base(), a.disp(), d);
308 }
309 }
311 void MacroAssembler::jump(const AddressLiteral& addrlit, Register temp, int offset, const char* file, int line) {
312 jumpl(addrlit, temp, G0, offset, file, line);
313 }
316 // Conditional breakpoint (for assertion checks in assembly code)
317 void MacroAssembler::breakpoint_trap(Condition c, CC cc) {
318 trap(c, cc, G0, ST_RESERVED_FOR_USER_0);
319 }
321 // We want to use ST_BREAKPOINT here, but the debugger is confused by it.
322 void MacroAssembler::breakpoint_trap() {
323 trap(ST_RESERVED_FOR_USER_0);
324 }
326 // Write serialization page so VM thread can do a pseudo remote membar
327 // We use the current thread pointer to calculate a thread specific
328 // offset to write to within the page. This minimizes bus traffic
329 // due to cache line collision.
330 void MacroAssembler::serialize_memory(Register thread, Register tmp1, Register tmp2) {
331 srl(thread, os::get_serialize_page_shift_count(), tmp2);
332 if (Assembler::is_simm13(os::vm_page_size())) {
333 and3(tmp2, (os::vm_page_size() - sizeof(int)), tmp2);
334 }
335 else {
336 set((os::vm_page_size() - sizeof(int)), tmp1);
337 and3(tmp2, tmp1, tmp2);
338 }
339 set(os::get_memory_serialize_page(), tmp1);
340 st(G0, tmp1, tmp2);
341 }
345 void MacroAssembler::enter() {
346 Unimplemented();
347 }
349 void MacroAssembler::leave() {
350 Unimplemented();
351 }
353 // Calls to C land
355 #ifdef ASSERT
356 // a hook for debugging
357 static Thread* reinitialize_thread() {
358 return ThreadLocalStorage::thread();
359 }
360 #else
361 #define reinitialize_thread ThreadLocalStorage::thread
362 #endif
364 #ifdef ASSERT
365 address last_get_thread = NULL;
366 #endif
368 // call this when G2_thread is not known to be valid
369 void MacroAssembler::get_thread() {
370 save_frame(0); // to avoid clobbering O0
371 mov(G1, L0); // avoid clobbering G1
372 mov(G5_method, L1); // avoid clobbering G5
373 mov(G3, L2); // avoid clobbering G3 also
374 mov(G4, L5); // avoid clobbering G4
375 #ifdef ASSERT
376 AddressLiteral last_get_thread_addrlit(&last_get_thread);
377 set(last_get_thread_addrlit, L3);
378 rdpc(L4);
379 inc(L4, 3 * BytesPerInstWord); // skip rdpc + inc + st_ptr to point L4 at call st_ptr(L4, L3, 0);
380 #endif
381 call(CAST_FROM_FN_PTR(address, reinitialize_thread), relocInfo::runtime_call_type);
382 delayed()->nop();
383 mov(L0, G1);
384 mov(L1, G5_method);
385 mov(L2, G3);
386 mov(L5, G4);
387 restore(O0, 0, G2_thread);
388 }
390 static Thread* verify_thread_subroutine(Thread* gthread_value) {
391 Thread* correct_value = ThreadLocalStorage::thread();
392 guarantee(gthread_value == correct_value, "G2_thread value must be the thread");
393 return correct_value;
394 }
396 void MacroAssembler::verify_thread() {
397 if (VerifyThread) {
398 // NOTE: this chops off the heads of the 64-bit O registers.
399 #ifdef CC_INTERP
400 save_frame(0);
401 #else
402 // make sure G2_thread contains the right value
403 save_frame_and_mov(0, Lmethod, Lmethod); // to avoid clobbering O0 (and propagate Lmethod for -Xprof)
404 mov(G1, L1); // avoid clobbering G1
405 // G2 saved below
406 mov(G3, L3); // avoid clobbering G3
407 mov(G4, L4); // avoid clobbering G4
408 mov(G5_method, L5); // avoid clobbering G5_method
409 #endif /* CC_INTERP */
410 #if defined(COMPILER2) && !defined(_LP64)
411 // Save & restore possible 64-bit Long arguments in G-regs
412 srlx(G1,32,L0);
413 srlx(G4,32,L6);
414 #endif
415 call(CAST_FROM_FN_PTR(address,verify_thread_subroutine), relocInfo::runtime_call_type);
416 delayed()->mov(G2_thread, O0);
418 mov(L1, G1); // Restore G1
419 // G2 restored below
420 mov(L3, G3); // restore G3
421 mov(L4, G4); // restore G4
422 mov(L5, G5_method); // restore G5_method
423 #if defined(COMPILER2) && !defined(_LP64)
424 // Save & restore possible 64-bit Long arguments in G-regs
425 sllx(L0,32,G2); // Move old high G1 bits high in G2
426 srl(G1, 0,G1); // Clear current high G1 bits
427 or3 (G1,G2,G1); // Recover 64-bit G1
428 sllx(L6,32,G2); // Move old high G4 bits high in G2
429 srl(G4, 0,G4); // Clear current high G4 bits
430 or3 (G4,G2,G4); // Recover 64-bit G4
431 #endif
432 restore(O0, 0, G2_thread);
433 }
434 }
437 void MacroAssembler::save_thread(const Register thread_cache) {
438 verify_thread();
439 if (thread_cache->is_valid()) {
440 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
441 mov(G2_thread, thread_cache);
442 }
443 if (VerifyThread) {
444 // smash G2_thread, as if the VM were about to anyway
445 set(0x67676767, G2_thread);
446 }
447 }
450 void MacroAssembler::restore_thread(const Register thread_cache) {
451 if (thread_cache->is_valid()) {
452 assert(thread_cache->is_local() || thread_cache->is_in(), "bad volatile");
453 mov(thread_cache, G2_thread);
454 verify_thread();
455 } else {
456 // do it the slow way
457 get_thread();
458 }
459 }
462 // %%% maybe get rid of [re]set_last_Java_frame
463 void MacroAssembler::set_last_Java_frame(Register last_java_sp, Register last_Java_pc) {
464 assert_not_delayed();
465 Address flags(G2_thread, JavaThread::frame_anchor_offset() +
466 JavaFrameAnchor::flags_offset());
467 Address pc_addr(G2_thread, JavaThread::last_Java_pc_offset());
469 // Always set last_Java_pc and flags first because once last_Java_sp is visible
470 // has_last_Java_frame is true and users will look at the rest of the fields.
471 // (Note: flags should always be zero before we get here so doesn't need to be set.)
473 #ifdef ASSERT
474 // Verify that flags was zeroed on return to Java
475 Label PcOk;
476 save_frame(0); // to avoid clobbering O0
477 ld_ptr(pc_addr, L0);
478 br_null_short(L0, Assembler::pt, PcOk);
479 STOP("last_Java_pc not zeroed before leaving Java");
480 bind(PcOk);
482 // Verify that flags was zeroed on return to Java
483 Label FlagsOk;
484 ld(flags, L0);
485 tst(L0);
486 br(Assembler::zero, false, Assembler::pt, FlagsOk);
487 delayed() -> restore();
488 STOP("flags not zeroed before leaving Java");
489 bind(FlagsOk);
490 #endif /* ASSERT */
491 //
492 // When returning from calling out from Java mode the frame anchor's last_Java_pc
493 // will always be set to NULL. It is set here so that if we are doing a call to
494 // native (not VM) that we capture the known pc and don't have to rely on the
495 // native call having a standard frame linkage where we can find the pc.
497 if (last_Java_pc->is_valid()) {
498 st_ptr(last_Java_pc, pc_addr);
499 }
501 #ifdef _LP64
502 #ifdef ASSERT
503 // Make sure that we have an odd stack
504 Label StackOk;
505 andcc(last_java_sp, 0x01, G0);
506 br(Assembler::notZero, false, Assembler::pt, StackOk);
507 delayed()->nop();
508 STOP("Stack Not Biased in set_last_Java_frame");
509 bind(StackOk);
510 #endif // ASSERT
511 assert( last_java_sp != G4_scratch, "bad register usage in set_last_Java_frame");
512 add( last_java_sp, STACK_BIAS, G4_scratch );
513 st_ptr(G4_scratch, G2_thread, JavaThread::last_Java_sp_offset());
514 #else
515 st_ptr(last_java_sp, G2_thread, JavaThread::last_Java_sp_offset());
516 #endif // _LP64
517 }
519 void MacroAssembler::reset_last_Java_frame(void) {
520 assert_not_delayed();
522 Address sp_addr(G2_thread, JavaThread::last_Java_sp_offset());
523 Address pc_addr(G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
524 Address flags (G2_thread, JavaThread::frame_anchor_offset() + JavaFrameAnchor::flags_offset());
526 #ifdef ASSERT
527 // check that it WAS previously set
528 #ifdef CC_INTERP
529 save_frame(0);
530 #else
531 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod to helper frame for -Xprof
532 #endif /* CC_INTERP */
533 ld_ptr(sp_addr, L0);
534 tst(L0);
535 breakpoint_trap(Assembler::zero, Assembler::ptr_cc);
536 restore();
537 #endif // ASSERT
539 st_ptr(G0, sp_addr);
540 // Always return last_Java_pc to zero
541 st_ptr(G0, pc_addr);
542 // Always null flags after return to Java
543 st(G0, flags);
544 }
547 void MacroAssembler::call_VM_base(
548 Register oop_result,
549 Register thread_cache,
550 Register last_java_sp,
551 address entry_point,
552 int number_of_arguments,
553 bool check_exceptions)
554 {
555 assert_not_delayed();
557 // determine last_java_sp register
558 if (!last_java_sp->is_valid()) {
559 last_java_sp = SP;
560 }
561 // debugging support
562 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
564 // 64-bit last_java_sp is biased!
565 set_last_Java_frame(last_java_sp, noreg);
566 if (VerifyThread) mov(G2_thread, O0); // about to be smashed; pass early
567 save_thread(thread_cache);
568 // do the call
569 call(entry_point, relocInfo::runtime_call_type);
570 if (!VerifyThread)
571 delayed()->mov(G2_thread, O0); // pass thread as first argument
572 else
573 delayed()->nop(); // (thread already passed)
574 restore_thread(thread_cache);
575 reset_last_Java_frame();
577 // check for pending exceptions. use Gtemp as scratch register.
578 if (check_exceptions) {
579 check_and_forward_exception(Gtemp);
580 }
582 #ifdef ASSERT
583 set(badHeapWordVal, G3);
584 set(badHeapWordVal, G4);
585 set(badHeapWordVal, G5);
586 #endif
588 // get oop result if there is one and reset the value in the thread
589 if (oop_result->is_valid()) {
590 get_vm_result(oop_result);
591 }
592 }
594 void MacroAssembler::check_and_forward_exception(Register scratch_reg)
595 {
596 Label L;
598 check_and_handle_popframe(scratch_reg);
599 check_and_handle_earlyret(scratch_reg);
601 Address exception_addr(G2_thread, Thread::pending_exception_offset());
602 ld_ptr(exception_addr, scratch_reg);
603 br_null_short(scratch_reg, pt, L);
604 // we use O7 linkage so that forward_exception_entry has the issuing PC
605 call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);
606 delayed()->nop();
607 bind(L);
608 }
611 void MacroAssembler::check_and_handle_popframe(Register scratch_reg) {
612 }
615 void MacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
616 }
619 void MacroAssembler::call_VM(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
620 call_VM_base(oop_result, noreg, noreg, entry_point, number_of_arguments, check_exceptions);
621 }
624 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
625 // O0 is reserved for the thread
626 mov(arg_1, O1);
627 call_VM(oop_result, entry_point, 1, check_exceptions);
628 }
631 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
632 // O0 is reserved for the thread
633 mov(arg_1, O1);
634 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
635 call_VM(oop_result, entry_point, 2, check_exceptions);
636 }
639 void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
640 // O0 is reserved for the thread
641 mov(arg_1, O1);
642 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
643 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
644 call_VM(oop_result, entry_point, 3, check_exceptions);
645 }
649 // Note: The following call_VM overloadings are useful when a "save"
650 // has already been performed by a stub, and the last Java frame is
651 // the previous one. In that case, last_java_sp must be passed as FP
652 // instead of SP.
655 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) {
656 call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions);
657 }
660 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
661 // O0 is reserved for the thread
662 mov(arg_1, O1);
663 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
664 }
667 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
668 // O0 is reserved for the thread
669 mov(arg_1, O1);
670 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
671 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
672 }
675 void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
676 // O0 is reserved for the thread
677 mov(arg_1, O1);
678 mov(arg_2, O2); assert(arg_2 != O1, "smashed argument");
679 mov(arg_3, O3); assert(arg_3 != O1 && arg_3 != O2, "smashed argument");
680 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
681 }
685 void MacroAssembler::call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments) {
686 assert_not_delayed();
687 save_thread(thread_cache);
688 // do the call
689 call(entry_point, relocInfo::runtime_call_type);
690 delayed()->nop();
691 restore_thread(thread_cache);
692 #ifdef ASSERT
693 set(badHeapWordVal, G3);
694 set(badHeapWordVal, G4);
695 set(badHeapWordVal, G5);
696 #endif
697 }
700 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments) {
701 call_VM_leaf_base(thread_cache, entry_point, number_of_arguments);
702 }
705 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) {
706 mov(arg_1, O0);
707 call_VM_leaf(thread_cache, entry_point, 1);
708 }
711 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
712 mov(arg_1, O0);
713 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
714 call_VM_leaf(thread_cache, entry_point, 2);
715 }
718 void MacroAssembler::call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3) {
719 mov(arg_1, O0);
720 mov(arg_2, O1); assert(arg_2 != O0, "smashed argument");
721 mov(arg_3, O2); assert(arg_3 != O0 && arg_3 != O1, "smashed argument");
722 call_VM_leaf(thread_cache, entry_point, 3);
723 }
726 void MacroAssembler::get_vm_result(Register oop_result) {
727 verify_thread();
728 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
729 ld_ptr( vm_result_addr, oop_result);
730 st_ptr(G0, vm_result_addr);
731 verify_oop(oop_result);
732 }
735 void MacroAssembler::get_vm_result_2(Register metadata_result) {
736 verify_thread();
737 Address vm_result_addr_2(G2_thread, JavaThread::vm_result_2_offset());
738 ld_ptr(vm_result_addr_2, metadata_result);
739 st_ptr(G0, vm_result_addr_2);
740 }
743 // We require that C code which does not return a value in vm_result will
744 // leave it undisturbed.
745 void MacroAssembler::set_vm_result(Register oop_result) {
746 verify_thread();
747 Address vm_result_addr(G2_thread, JavaThread::vm_result_offset());
748 verify_oop(oop_result);
750 # ifdef ASSERT
751 // Check that we are not overwriting any other oop.
752 #ifdef CC_INTERP
753 save_frame(0);
754 #else
755 save_frame_and_mov(0, Lmethod, Lmethod); // Propagate Lmethod for -Xprof
756 #endif /* CC_INTERP */
757 ld_ptr(vm_result_addr, L0);
758 tst(L0);
759 restore();
760 breakpoint_trap(notZero, Assembler::ptr_cc);
761 // }
762 # endif
764 st_ptr(oop_result, vm_result_addr);
765 }
768 void MacroAssembler::ic_call(address entry, bool emit_delay) {
769 RelocationHolder rspec = virtual_call_Relocation::spec(pc());
770 patchable_set((intptr_t)Universe::non_oop_word(), G5_inline_cache_reg);
771 relocate(rspec);
772 call(entry, relocInfo::none);
773 if (emit_delay) {
774 delayed()->nop();
775 }
776 }
779 void MacroAssembler::card_table_write(jbyte* byte_map_base,
780 Register tmp, Register obj) {
781 #ifdef _LP64
782 srlx(obj, CardTableModRefBS::card_shift, obj);
783 #else
784 srl(obj, CardTableModRefBS::card_shift, obj);
785 #endif
786 assert(tmp != obj, "need separate temp reg");
787 set((address) byte_map_base, tmp);
788 stb(G0, tmp, obj);
789 }
792 void MacroAssembler::internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
793 address save_pc;
794 int shiftcnt;
795 #ifdef _LP64
796 # ifdef CHECK_DELAY
797 assert_not_delayed((char*) "cannot put two instructions in delay slot");
798 # endif
799 v9_dep();
800 save_pc = pc();
802 int msb32 = (int) (addrlit.value() >> 32);
803 int lsb32 = (int) (addrlit.value());
805 if (msb32 == 0 && lsb32 >= 0) {
806 Assembler::sethi(lsb32, d, addrlit.rspec());
807 }
808 else if (msb32 == -1) {
809 Assembler::sethi(~lsb32, d, addrlit.rspec());
810 xor3(d, ~low10(~0), d);
811 }
812 else {
813 Assembler::sethi(msb32, d, addrlit.rspec()); // msb 22-bits
814 if (msb32 & 0x3ff) // Any bits?
815 or3(d, msb32 & 0x3ff, d); // msb 32-bits are now in lsb 32
816 if (lsb32 & 0xFFFFFC00) { // done?
817 if ((lsb32 >> 20) & 0xfff) { // Any bits set?
818 sllx(d, 12, d); // Make room for next 12 bits
819 or3(d, (lsb32 >> 20) & 0xfff, d); // Or in next 12
820 shiftcnt = 0; // We already shifted
821 }
822 else
823 shiftcnt = 12;
824 if ((lsb32 >> 10) & 0x3ff) {
825 sllx(d, shiftcnt + 10, d); // Make room for last 10 bits
826 or3(d, (lsb32 >> 10) & 0x3ff, d); // Or in next 10
827 shiftcnt = 0;
828 }
829 else
830 shiftcnt = 10;
831 sllx(d, shiftcnt + 10, d); // Shift leaving disp field 0'd
832 }
833 else
834 sllx(d, 32, d);
835 }
836 // Pad out the instruction sequence so it can be patched later.
837 if (ForceRelocatable || (addrlit.rtype() != relocInfo::none &&
838 addrlit.rtype() != relocInfo::runtime_call_type)) {
839 while (pc() < (save_pc + (7 * BytesPerInstWord)))
840 nop();
841 }
842 #else
843 Assembler::sethi(addrlit.value(), d, addrlit.rspec());
844 #endif
845 }
848 void MacroAssembler::sethi(const AddressLiteral& addrlit, Register d) {
849 internal_sethi(addrlit, d, false);
850 }
853 void MacroAssembler::patchable_sethi(const AddressLiteral& addrlit, Register d) {
854 internal_sethi(addrlit, d, true);
855 }
858 int MacroAssembler::insts_for_sethi(address a, bool worst_case) {
859 #ifdef _LP64
860 if (worst_case) return 7;
861 intptr_t iaddr = (intptr_t) a;
862 int msb32 = (int) (iaddr >> 32);
863 int lsb32 = (int) (iaddr);
864 int count;
865 if (msb32 == 0 && lsb32 >= 0)
866 count = 1;
867 else if (msb32 == -1)
868 count = 2;
869 else {
870 count = 2;
871 if (msb32 & 0x3ff)
872 count++;
873 if (lsb32 & 0xFFFFFC00 ) {
874 if ((lsb32 >> 20) & 0xfff) count += 2;
875 if ((lsb32 >> 10) & 0x3ff) count += 2;
876 }
877 }
878 return count;
879 #else
880 return 1;
881 #endif
882 }
884 int MacroAssembler::worst_case_insts_for_set() {
885 return insts_for_sethi(NULL, true) + 1;
886 }
889 // Keep in sync with MacroAssembler::insts_for_internal_set
890 void MacroAssembler::internal_set(const AddressLiteral& addrlit, Register d, bool ForceRelocatable) {
891 intptr_t value = addrlit.value();
893 if (!ForceRelocatable && addrlit.rspec().type() == relocInfo::none) {
894 // can optimize
895 if (-4096 <= value && value <= 4095) {
896 or3(G0, value, d); // setsw (this leaves upper 32 bits sign-extended)
897 return;
898 }
899 if (inv_hi22(hi22(value)) == value) {
900 sethi(addrlit, d);
901 return;
902 }
903 }
904 assert_not_delayed((char*) "cannot put two instructions in delay slot");
905 internal_sethi(addrlit, d, ForceRelocatable);
906 if (ForceRelocatable || addrlit.rspec().type() != relocInfo::none || addrlit.low10() != 0) {
907 add(d, addrlit.low10(), d, addrlit.rspec());
908 }
909 }
911 // Keep in sync with MacroAssembler::internal_set
912 int MacroAssembler::insts_for_internal_set(intptr_t value) {
913 // can optimize
914 if (-4096 <= value && value <= 4095) {
915 return 1;
916 }
917 if (inv_hi22(hi22(value)) == value) {
918 return insts_for_sethi((address) value);
919 }
920 int count = insts_for_sethi((address) value);
921 AddressLiteral al(value);
922 if (al.low10() != 0) {
923 count++;
924 }
925 return count;
926 }
928 void MacroAssembler::set(const AddressLiteral& al, Register d) {
929 internal_set(al, d, false);
930 }
932 void MacroAssembler::set(intptr_t value, Register d) {
933 AddressLiteral al(value);
934 internal_set(al, d, false);
935 }
937 void MacroAssembler::set(address addr, Register d, RelocationHolder const& rspec) {
938 AddressLiteral al(addr, rspec);
939 internal_set(al, d, false);
940 }
942 void MacroAssembler::patchable_set(const AddressLiteral& al, Register d) {
943 internal_set(al, d, true);
944 }
946 void MacroAssembler::patchable_set(intptr_t value, Register d) {
947 AddressLiteral al(value);
948 internal_set(al, d, true);
949 }
952 void MacroAssembler::set64(jlong value, Register d, Register tmp) {
953 assert_not_delayed();
954 v9_dep();
956 int hi = (int)(value >> 32);
957 int lo = (int)(value & ~0);
958 // (Matcher::isSimpleConstant64 knows about the following optimizations.)
959 if (Assembler::is_simm13(lo) && value == lo) {
960 or3(G0, lo, d);
961 } else if (hi == 0) {
962 Assembler::sethi(lo, d); // hardware version zero-extends to upper 32
963 if (low10(lo) != 0)
964 or3(d, low10(lo), d);
965 }
966 else if (hi == -1) {
967 Assembler::sethi(~lo, d); // hardware version zero-extends to upper 32
968 xor3(d, low10(lo) ^ ~low10(~0), d);
969 }
970 else if (lo == 0) {
971 if (Assembler::is_simm13(hi)) {
972 or3(G0, hi, d);
973 } else {
974 Assembler::sethi(hi, d); // hardware version zero-extends to upper 32
975 if (low10(hi) != 0)
976 or3(d, low10(hi), d);
977 }
978 sllx(d, 32, d);
979 }
980 else {
981 Assembler::sethi(hi, tmp);
982 Assembler::sethi(lo, d); // macro assembler version sign-extends
983 if (low10(hi) != 0)
984 or3 (tmp, low10(hi), tmp);
985 if (low10(lo) != 0)
986 or3 ( d, low10(lo), d);
987 sllx(tmp, 32, tmp);
988 or3 (d, tmp, d);
989 }
990 }
992 int MacroAssembler::insts_for_set64(jlong value) {
993 v9_dep();
995 int hi = (int) (value >> 32);
996 int lo = (int) (value & ~0);
997 int count = 0;
999 // (Matcher::isSimpleConstant64 knows about the following optimizations.)
1000 if (Assembler::is_simm13(lo) && value == lo) {
1001 count++;
1002 } else if (hi == 0) {
1003 count++;
1004 if (low10(lo) != 0)
1005 count++;
1006 }
1007 else if (hi == -1) {
1008 count += 2;
1009 }
1010 else if (lo == 0) {
1011 if (Assembler::is_simm13(hi)) {
1012 count++;
1013 } else {
1014 count++;
1015 if (low10(hi) != 0)
1016 count++;
1017 }
1018 count++;
1019 }
1020 else {
1021 count += 2;
1022 if (low10(hi) != 0)
1023 count++;
1024 if (low10(lo) != 0)
1025 count++;
1026 count += 2;
1027 }
1028 return count;
1029 }
1031 // compute size in bytes of sparc frame, given
1032 // number of extraWords
1033 int MacroAssembler::total_frame_size_in_bytes(int extraWords) {
1035 int nWords = frame::memory_parameter_word_sp_offset;
1037 nWords += extraWords;
1039 if (nWords & 1) ++nWords; // round up to double-word
1041 return nWords * BytesPerWord;
1042 }
1045 // save_frame: given number of "extra" words in frame,
1046 // issue approp. save instruction (p 200, v8 manual)
1048 void MacroAssembler::save_frame(int extraWords) {
1049 int delta = -total_frame_size_in_bytes(extraWords);
1050 if (is_simm13(delta)) {
1051 save(SP, delta, SP);
1052 } else {
1053 set(delta, G3_scratch);
1054 save(SP, G3_scratch, SP);
1055 }
1056 }
1059 void MacroAssembler::save_frame_c1(int size_in_bytes) {
1060 if (is_simm13(-size_in_bytes)) {
1061 save(SP, -size_in_bytes, SP);
1062 } else {
1063 set(-size_in_bytes, G3_scratch);
1064 save(SP, G3_scratch, SP);
1065 }
1066 }
1069 void MacroAssembler::save_frame_and_mov(int extraWords,
1070 Register s1, Register d1,
1071 Register s2, Register d2) {
1072 assert_not_delayed();
1074 // The trick here is to use precisely the same memory word
1075 // that trap handlers also use to save the register.
1076 // This word cannot be used for any other purpose, but
1077 // it works fine to save the register's value, whether or not
1078 // an interrupt flushes register windows at any given moment!
1079 Address s1_addr;
1080 if (s1->is_valid() && (s1->is_in() || s1->is_local())) {
1081 s1_addr = s1->address_in_saved_window();
1082 st_ptr(s1, s1_addr);
1083 }
1085 Address s2_addr;
1086 if (s2->is_valid() && (s2->is_in() || s2->is_local())) {
1087 s2_addr = s2->address_in_saved_window();
1088 st_ptr(s2, s2_addr);
1089 }
1091 save_frame(extraWords);
1093 if (s1_addr.base() == SP) {
1094 ld_ptr(s1_addr.after_save(), d1);
1095 } else if (s1->is_valid()) {
1096 mov(s1->after_save(), d1);
1097 }
1099 if (s2_addr.base() == SP) {
1100 ld_ptr(s2_addr.after_save(), d2);
1101 } else if (s2->is_valid()) {
1102 mov(s2->after_save(), d2);
1103 }
1104 }
1107 AddressLiteral MacroAssembler::allocate_metadata_address(Metadata* obj) {
1108 assert(oop_recorder() != NULL, "this assembler needs a Recorder");
1109 int index = oop_recorder()->allocate_metadata_index(obj);
1110 RelocationHolder rspec = metadata_Relocation::spec(index);
1111 return AddressLiteral((address)obj, rspec);
1112 }
1114 AddressLiteral MacroAssembler::constant_metadata_address(Metadata* obj) {
1115 assert(oop_recorder() != NULL, "this assembler needs a Recorder");
1116 int index = oop_recorder()->find_index(obj);
1117 RelocationHolder rspec = metadata_Relocation::spec(index);
1118 return AddressLiteral((address)obj, rspec);
1119 }
1122 AddressLiteral MacroAssembler::constant_oop_address(jobject obj) {
1123 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1124 assert(Universe::heap()->is_in_reserved(JNIHandles::resolve(obj)), "not an oop");
1125 int oop_index = oop_recorder()->find_index(obj);
1126 return AddressLiteral(obj, oop_Relocation::spec(oop_index));
1127 }
1129 void MacroAssembler::set_narrow_oop(jobject obj, Register d) {
1130 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1131 int oop_index = oop_recorder()->find_index(obj);
1132 RelocationHolder rspec = oop_Relocation::spec(oop_index);
1134 assert_not_delayed();
1135 // Relocation with special format (see relocInfo_sparc.hpp).
1136 relocate(rspec, 1);
1137 // Assembler::sethi(0x3fffff, d);
1138 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(0x3fffff) );
1139 // Don't add relocation for 'add'. Do patching during 'sethi' processing.
1140 add(d, 0x3ff, d);
1142 }
1144 void MacroAssembler::set_narrow_klass(Klass* k, Register d) {
1145 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
1146 int klass_index = oop_recorder()->find_index(k);
1147 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
1148 narrowOop encoded_k = oopDesc::encode_klass(k);
1150 assert_not_delayed();
1151 // Relocation with special format (see relocInfo_sparc.hpp).
1152 relocate(rspec, 1);
1153 // Assembler::sethi(encoded_k, d);
1154 emit_int32( op(branch_op) | rd(d) | op2(sethi_op2) | hi22(encoded_k) );
1155 // Don't add relocation for 'add'. Do patching during 'sethi' processing.
1156 add(d, low10(encoded_k), d);
1158 }
1160 void MacroAssembler::align(int modulus) {
1161 while (offset() % modulus != 0) nop();
1162 }
1164 void RegistersForDebugging::print(outputStream* s) {
1165 FlagSetting fs(Debugging, true);
1166 int j;
1167 for (j = 0; j < 8; ++j) {
1168 if (j != 6) { s->print("i%d = ", j); os::print_location(s, i[j]); }
1169 else { s->print( "fp = " ); os::print_location(s, i[j]); }
1170 }
1171 s->cr();
1173 for (j = 0; j < 8; ++j) {
1174 s->print("l%d = ", j); os::print_location(s, l[j]);
1175 }
1176 s->cr();
1178 for (j = 0; j < 8; ++j) {
1179 if (j != 6) { s->print("o%d = ", j); os::print_location(s, o[j]); }
1180 else { s->print( "sp = " ); os::print_location(s, o[j]); }
1181 }
1182 s->cr();
1184 for (j = 0; j < 8; ++j) {
1185 s->print("g%d = ", j); os::print_location(s, g[j]);
1186 }
1187 s->cr();
1189 // print out floats with compression
1190 for (j = 0; j < 32; ) {
1191 jfloat val = f[j];
1192 int last = j;
1193 for ( ; last+1 < 32; ++last ) {
1194 char b1[1024], b2[1024];
1195 sprintf(b1, "%f", val);
1196 sprintf(b2, "%f", f[last+1]);
1197 if (strcmp(b1, b2))
1198 break;
1199 }
1200 s->print("f%d", j);
1201 if ( j != last ) s->print(" - f%d", last);
1202 s->print(" = %f", val);
1203 s->fill_to(25);
1204 s->print_cr(" (0x%x)", val);
1205 j = last + 1;
1206 }
1207 s->cr();
1209 // and doubles (evens only)
1210 for (j = 0; j < 32; ) {
1211 jdouble val = d[j];
1212 int last = j;
1213 for ( ; last+1 < 32; ++last ) {
1214 char b1[1024], b2[1024];
1215 sprintf(b1, "%f", val);
1216 sprintf(b2, "%f", d[last+1]);
1217 if (strcmp(b1, b2))
1218 break;
1219 }
1220 s->print("d%d", 2 * j);
1221 if ( j != last ) s->print(" - d%d", last);
1222 s->print(" = %f", val);
1223 s->fill_to(30);
1224 s->print("(0x%x)", *(int*)&val);
1225 s->fill_to(42);
1226 s->print_cr("(0x%x)", *(1 + (int*)&val));
1227 j = last + 1;
1228 }
1229 s->cr();
1230 }
1232 void RegistersForDebugging::save_registers(MacroAssembler* a) {
1233 a->sub(FP, round_to(sizeof(RegistersForDebugging), sizeof(jdouble)) - STACK_BIAS, O0);
1234 a->flushw();
1235 int i;
1236 for (i = 0; i < 8; ++i) {
1237 a->ld_ptr(as_iRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, i_offset(i));
1238 a->ld_ptr(as_lRegister(i)->address_in_saved_window().after_save(), L1); a->st_ptr( L1, O0, l_offset(i));
1239 a->st_ptr(as_oRegister(i)->after_save(), O0, o_offset(i));
1240 a->st_ptr(as_gRegister(i)->after_save(), O0, g_offset(i));
1241 }
1242 for (i = 0; i < 32; ++i) {
1243 a->stf(FloatRegisterImpl::S, as_FloatRegister(i), O0, f_offset(i));
1244 }
1245 for (i = 0; i < 64; i += 2) {
1246 a->stf(FloatRegisterImpl::D, as_FloatRegister(i), O0, d_offset(i));
1247 }
1248 }
1250 void RegistersForDebugging::restore_registers(MacroAssembler* a, Register r) {
1251 for (int i = 1; i < 8; ++i) {
1252 a->ld_ptr(r, g_offset(i), as_gRegister(i));
1253 }
1254 for (int j = 0; j < 32; ++j) {
1255 a->ldf(FloatRegisterImpl::S, O0, f_offset(j), as_FloatRegister(j));
1256 }
1257 for (int k = 0; k < 64; k += 2) {
1258 a->ldf(FloatRegisterImpl::D, O0, d_offset(k), as_FloatRegister(k));
1259 }
1260 }
1263 // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
1264 void MacroAssembler::push_fTOS() {
1265 // %%%%%% need to implement this
1266 }
1268 // pops double TOS element from CPU stack and pushes on FPU stack
1269 void MacroAssembler::pop_fTOS() {
1270 // %%%%%% need to implement this
1271 }
1273 void MacroAssembler::empty_FPU_stack() {
1274 // %%%%%% need to implement this
1275 }
1277 void MacroAssembler::_verify_oop(Register reg, const char* msg, const char * file, int line) {
1278 // plausibility check for oops
1279 if (!VerifyOops) return;
1281 if (reg == G0) return; // always NULL, which is always an oop
1283 BLOCK_COMMENT("verify_oop {");
1284 char buffer[64];
1285 #ifdef COMPILER1
1286 if (CommentedAssembly) {
1287 snprintf(buffer, sizeof(buffer), "verify_oop at %d", offset());
1288 block_comment(buffer);
1289 }
1290 #endif
1292 const char* real_msg = NULL;
1293 {
1294 ResourceMark rm;
1295 stringStream ss;
1296 ss.print("%s at offset %d (%s:%d)", msg, offset(), file, line);
1297 real_msg = code_string(ss.as_string());
1298 }
1300 // Call indirectly to solve generation ordering problem
1301 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
1303 // Make some space on stack above the current register window.
1304 // Enough to hold 8 64-bit registers.
1305 add(SP,-8*8,SP);
1307 // Save some 64-bit registers; a normal 'save' chops the heads off
1308 // of 64-bit longs in the 32-bit build.
1309 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
1310 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
1311 mov(reg,O0); // Move arg into O0; arg might be in O7 which is about to be crushed
1312 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
1314 // Size of set() should stay the same
1315 patchable_set((intptr_t)real_msg, O1);
1316 // Load address to call to into O7
1317 load_ptr_contents(a, O7);
1318 // Register call to verify_oop_subroutine
1319 callr(O7, G0);
1320 delayed()->nop();
1321 // recover frame size
1322 add(SP, 8*8,SP);
1323 BLOCK_COMMENT("} verify_oop");
1324 }
1326 void MacroAssembler::_verify_oop_addr(Address addr, const char* msg, const char * file, int line) {
1327 // plausibility check for oops
1328 if (!VerifyOops) return;
1330 const char* real_msg = NULL;
1331 {
1332 ResourceMark rm;
1333 stringStream ss;
1334 ss.print("%s at SP+%d (%s:%d)", msg, addr.disp(), file, line);
1335 real_msg = code_string(ss.as_string());
1336 }
1338 // Call indirectly to solve generation ordering problem
1339 AddressLiteral a(StubRoutines::verify_oop_subroutine_entry_address());
1341 // Make some space on stack above the current register window.
1342 // Enough to hold 8 64-bit registers.
1343 add(SP,-8*8,SP);
1345 // Save some 64-bit registers; a normal 'save' chops the heads off
1346 // of 64-bit longs in the 32-bit build.
1347 stx(O0,SP,frame::register_save_words*wordSize+STACK_BIAS+0*8);
1348 stx(O1,SP,frame::register_save_words*wordSize+STACK_BIAS+1*8);
1349 ld_ptr(addr.base(), addr.disp() + 8*8, O0); // Load arg into O0; arg might be in O7 which is about to be crushed
1350 stx(O7,SP,frame::register_save_words*wordSize+STACK_BIAS+7*8);
1352 // Size of set() should stay the same
1353 patchable_set((intptr_t)real_msg, O1);
1354 // Load address to call to into O7
1355 load_ptr_contents(a, O7);
1356 // Register call to verify_oop_subroutine
1357 callr(O7, G0);
1358 delayed()->nop();
1359 // recover frame size
1360 add(SP, 8*8,SP);
1361 }
1363 // side-door communication with signalHandler in os_solaris.cpp
1364 address MacroAssembler::_verify_oop_implicit_branch[3] = { NULL };
1366 // This macro is expanded just once; it creates shared code. Contract:
1367 // receives an oop in O0. Must restore O0 & O7 from TLS. Must not smash ANY
1368 // registers, including flags. May not use a register 'save', as this blows
1369 // the high bits of the O-regs if they contain Long values. Acts as a 'leaf'
1370 // call.
1371 void MacroAssembler::verify_oop_subroutine() {
1372 // Leaf call; no frame.
1373 Label succeed, fail, null_or_fail;
1375 // O0 and O7 were saved already (O0 in O0's TLS home, O7 in O5's TLS home).
1376 // O0 is now the oop to be checked. O7 is the return address.
1377 Register O0_obj = O0;
1379 // Save some more registers for temps.
1380 stx(O2,SP,frame::register_save_words*wordSize+STACK_BIAS+2*8);
1381 stx(O3,SP,frame::register_save_words*wordSize+STACK_BIAS+3*8);
1382 stx(O4,SP,frame::register_save_words*wordSize+STACK_BIAS+4*8);
1383 stx(O5,SP,frame::register_save_words*wordSize+STACK_BIAS+5*8);
1385 // Save flags
1386 Register O5_save_flags = O5;
1387 rdccr( O5_save_flags );
1389 { // count number of verifies
1390 Register O2_adr = O2;
1391 Register O3_accum = O3;
1392 inc_counter(StubRoutines::verify_oop_count_addr(), O2_adr, O3_accum);
1393 }
1395 Register O2_mask = O2;
1396 Register O3_bits = O3;
1397 Register O4_temp = O4;
1399 // mark lower end of faulting range
1400 assert(_verify_oop_implicit_branch[0] == NULL, "set once");
1401 _verify_oop_implicit_branch[0] = pc();
1403 // We can't check the mark oop because it could be in the process of
1404 // locking or unlocking while this is running.
1405 set(Universe::verify_oop_mask (), O2_mask);
1406 set(Universe::verify_oop_bits (), O3_bits);
1408 // assert((obj & oop_mask) == oop_bits);
1409 and3(O0_obj, O2_mask, O4_temp);
1410 cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail);
1412 if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) {
1413 // the null_or_fail case is useless; must test for null separately
1414 br_null_short(O0_obj, pn, succeed);
1415 }
1417 // Check the Klass* of this object for being in the right area of memory.
1418 // Cannot do the load in the delay above slot in case O0 is null
1419 load_klass(O0_obj, O0_obj);
1420 // assert((klass != NULL)
1421 br_null_short(O0_obj, pn, fail);
1422 // TODO: Future assert that klass is lower 4g memory for UseCompressedKlassPointers
1424 wrccr( O5_save_flags ); // Restore CCR's
1426 // mark upper end of faulting range
1427 _verify_oop_implicit_branch[1] = pc();
1429 //-----------------------
1430 // all tests pass
1431 bind(succeed);
1433 // Restore prior 64-bit registers
1434 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+0*8,O0);
1435 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+1*8,O1);
1436 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+2*8,O2);
1437 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+3*8,O3);
1438 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+4*8,O4);
1439 ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+5*8,O5);
1441 retl(); // Leaf return; restore prior O7 in delay slot
1442 delayed()->ldx(SP,frame::register_save_words*wordSize+STACK_BIAS+7*8,O7);
1444 //-----------------------
1445 bind(null_or_fail); // nulls are less common but OK
1446 br_null(O0_obj, false, pt, succeed);
1447 delayed()->wrccr( O5_save_flags ); // Restore CCR's
1449 //-----------------------
1450 // report failure:
1451 bind(fail);
1452 _verify_oop_implicit_branch[2] = pc();
1454 wrccr( O5_save_flags ); // Restore CCR's
1456 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
1458 // stop_subroutine expects message pointer in I1.
1459 mov(I1, O1);
1461 // Restore prior 64-bit registers
1462 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+0*8,I0);
1463 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+1*8,I1);
1464 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+2*8,I2);
1465 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+3*8,I3);
1466 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+4*8,I4);
1467 ldx(FP,frame::register_save_words*wordSize+STACK_BIAS+5*8,I5);
1469 // factor long stop-sequence into subroutine to save space
1470 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
1472 // call indirectly to solve generation ordering problem
1473 AddressLiteral al(StubRoutines::Sparc::stop_subroutine_entry_address());
1474 load_ptr_contents(al, O5);
1475 jmpl(O5, 0, O7);
1476 delayed()->nop();
1477 }
1480 void MacroAssembler::stop(const char* msg) {
1481 // save frame first to get O7 for return address
1482 // add one word to size in case struct is odd number of words long
1483 // It must be doubleword-aligned for storing doubles into it.
1485 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
1487 // stop_subroutine expects message pointer in I1.
1488 // Size of set() should stay the same
1489 patchable_set((intptr_t)msg, O1);
1491 // factor long stop-sequence into subroutine to save space
1492 assert(StubRoutines::Sparc::stop_subroutine_entry_address(), "hasn't been generated yet");
1494 // call indirectly to solve generation ordering problem
1495 AddressLiteral a(StubRoutines::Sparc::stop_subroutine_entry_address());
1496 load_ptr_contents(a, O5);
1497 jmpl(O5, 0, O7);
1498 delayed()->nop();
1500 breakpoint_trap(); // make stop actually stop rather than writing
1501 // unnoticeable results in the output files.
1503 // restore(); done in callee to save space!
1504 }
1507 void MacroAssembler::warn(const char* msg) {
1508 save_frame(::round_to(sizeof(RegistersForDebugging) / BytesPerWord, 2));
1509 RegistersForDebugging::save_registers(this);
1510 mov(O0, L0);
1511 // Size of set() should stay the same
1512 patchable_set((intptr_t)msg, O0);
1513 call( CAST_FROM_FN_PTR(address, warning) );
1514 delayed()->nop();
1515 // ret();
1516 // delayed()->restore();
1517 RegistersForDebugging::restore_registers(this, L0);
1518 restore();
1519 }
1522 void MacroAssembler::untested(const char* what) {
1523 // We must be able to turn interactive prompting off
1524 // in order to run automated test scripts on the VM
1525 // Use the flag ShowMessageBoxOnError
1527 const char* b = NULL;
1528 {
1529 ResourceMark rm;
1530 stringStream ss;
1531 ss.print("untested: %s", what);
1532 b = code_string(ss.as_string());
1533 }
1534 if (ShowMessageBoxOnError) { STOP(b); }
1535 else { warn(b); }
1536 }
1539 void MacroAssembler::stop_subroutine() {
1540 RegistersForDebugging::save_registers(this);
1542 // for the sake of the debugger, stick a PC on the current frame
1543 // (this assumes that the caller has performed an extra "save")
1544 mov(I7, L7);
1545 add(O7, -7 * BytesPerInt, I7);
1547 save_frame(); // one more save to free up another O7 register
1548 mov(I0, O1); // addr of reg save area
1550 // We expect pointer to message in I1. Caller must set it up in O1
1551 mov(I1, O0); // get msg
1552 call (CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
1553 delayed()->nop();
1555 restore();
1557 RegistersForDebugging::restore_registers(this, O0);
1559 save_frame(0);
1560 call(CAST_FROM_FN_PTR(address,breakpoint));
1561 delayed()->nop();
1562 restore();
1564 mov(L7, I7);
1565 retl();
1566 delayed()->restore(); // see stop above
1567 }
1570 void MacroAssembler::debug(char* msg, RegistersForDebugging* regs) {
1571 if ( ShowMessageBoxOnError ) {
1572 JavaThread* thread = JavaThread::current();
1573 JavaThreadState saved_state = thread->thread_state();
1574 thread->set_thread_state(_thread_in_vm);
1575 {
1576 // In order to get locks work, we need to fake a in_VM state
1577 ttyLocker ttyl;
1578 ::tty->print_cr("EXECUTION STOPPED: %s\n", msg);
1579 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
1580 BytecodeCounter::print();
1581 }
1582 if (os::message_box(msg, "Execution stopped, print registers?"))
1583 regs->print(::tty);
1584 }
1585 BREAKPOINT;
1586 ThreadStateTransition::transition(JavaThread::current(), _thread_in_vm, saved_state);
1587 }
1588 else {
1589 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
1590 }
1591 assert(false, err_msg("DEBUG MESSAGE: %s", msg));
1592 }
1595 void MacroAssembler::calc_mem_param_words(Register Rparam_words, Register Rresult) {
1596 subcc( Rparam_words, Argument::n_register_parameters, Rresult); // how many mem words?
1597 Label no_extras;
1598 br( negative, true, pt, no_extras ); // if neg, clear reg
1599 delayed()->set(0, Rresult); // annuled, so only if taken
1600 bind( no_extras );
1601 }
1604 void MacroAssembler::calc_frame_size(Register Rextra_words, Register Rresult) {
1605 #ifdef _LP64
1606 add(Rextra_words, frame::memory_parameter_word_sp_offset, Rresult);
1607 #else
1608 add(Rextra_words, frame::memory_parameter_word_sp_offset + 1, Rresult);
1609 #endif
1610 bclr(1, Rresult);
1611 sll(Rresult, LogBytesPerWord, Rresult); // Rresult has total frame bytes
1612 }
1615 void MacroAssembler::calc_frame_size_and_save(Register Rextra_words, Register Rresult) {
1616 calc_frame_size(Rextra_words, Rresult);
1617 neg(Rresult);
1618 save(SP, Rresult, SP);
1619 }
1622 // ---------------------------------------------------------
1623 Assembler::RCondition cond2rcond(Assembler::Condition c) {
1624 switch (c) {
1625 /*case zero: */
1626 case Assembler::equal: return Assembler::rc_z;
1627 case Assembler::lessEqual: return Assembler::rc_lez;
1628 case Assembler::less: return Assembler::rc_lz;
1629 /*case notZero:*/
1630 case Assembler::notEqual: return Assembler::rc_nz;
1631 case Assembler::greater: return Assembler::rc_gz;
1632 case Assembler::greaterEqual: return Assembler::rc_gez;
1633 }
1634 ShouldNotReachHere();
1635 return Assembler::rc_z;
1636 }
1638 // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS
1639 void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) {
1640 tst(s1);
1641 br (c, a, p, L);
1642 }
1644 // Compares a pointer register with zero and branches on null.
1645 // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
1646 void MacroAssembler::br_null( Register s1, bool a, Predict p, Label& L ) {
1647 assert_not_delayed();
1648 #ifdef _LP64
1649 bpr( rc_z, a, p, s1, L );
1650 #else
1651 tst(s1);
1652 br ( zero, a, p, L );
1653 #endif
1654 }
1656 void MacroAssembler::br_notnull( Register s1, bool a, Predict p, Label& L ) {
1657 assert_not_delayed();
1658 #ifdef _LP64
1659 bpr( rc_nz, a, p, s1, L );
1660 #else
1661 tst(s1);
1662 br ( notZero, a, p, L );
1663 #endif
1664 }
1666 // Compare registers and branch with nop in delay slot or cbcond without delay slot.
1668 // Compare integer (32 bit) values (icc only).
1669 void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c,
1670 Predict p, Label& L) {
1671 assert_not_delayed();
1672 if (use_cbcond(L)) {
1673 Assembler::cbcond(c, icc, s1, s2, L);
1674 } else {
1675 cmp(s1, s2);
1676 br(c, false, p, L);
1677 delayed()->nop();
1678 }
1679 }
1681 // Compare integer (32 bit) values (icc only).
1682 void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c,
1683 Predict p, Label& L) {
1684 assert_not_delayed();
1685 if (is_simm(simm13a,5) && use_cbcond(L)) {
1686 Assembler::cbcond(c, icc, s1, simm13a, L);
1687 } else {
1688 cmp(s1, simm13a);
1689 br(c, false, p, L);
1690 delayed()->nop();
1691 }
1692 }
1694 // Branch that tests xcc in LP64 and icc in !LP64
1695 void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c,
1696 Predict p, Label& L) {
1697 assert_not_delayed();
1698 if (use_cbcond(L)) {
1699 Assembler::cbcond(c, ptr_cc, s1, s2, L);
1700 } else {
1701 cmp(s1, s2);
1702 brx(c, false, p, L);
1703 delayed()->nop();
1704 }
1705 }
1707 // Branch that tests xcc in LP64 and icc in !LP64
1708 void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c,
1709 Predict p, Label& L) {
1710 assert_not_delayed();
1711 if (is_simm(simm13a,5) && use_cbcond(L)) {
1712 Assembler::cbcond(c, ptr_cc, s1, simm13a, L);
1713 } else {
1714 cmp(s1, simm13a);
1715 brx(c, false, p, L);
1716 delayed()->nop();
1717 }
1718 }
1720 // Short branch version for compares a pointer with zero.
1722 void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) {
1723 assert_not_delayed();
1724 if (use_cbcond(L)) {
1725 Assembler::cbcond(zero, ptr_cc, s1, 0, L);
1726 return;
1727 }
1728 br_null(s1, false, p, L);
1729 delayed()->nop();
1730 }
1732 void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) {
1733 assert_not_delayed();
1734 if (use_cbcond(L)) {
1735 Assembler::cbcond(notZero, ptr_cc, s1, 0, L);
1736 return;
1737 }
1738 br_notnull(s1, false, p, L);
1739 delayed()->nop();
1740 }
1742 // Unconditional short branch
1743 void MacroAssembler::ba_short(Label& L) {
1744 if (use_cbcond(L)) {
1745 Assembler::cbcond(equal, icc, G0, G0, L);
1746 return;
1747 }
1748 br(always, false, pt, L);
1749 delayed()->nop();
1750 }
1752 // instruction sequences factored across compiler & interpreter
1755 void MacroAssembler::lcmp( Register Ra_hi, Register Ra_low,
1756 Register Rb_hi, Register Rb_low,
1757 Register Rresult) {
1759 Label check_low_parts, done;
1761 cmp(Ra_hi, Rb_hi ); // compare hi parts
1762 br(equal, true, pt, check_low_parts);
1763 delayed()->cmp(Ra_low, Rb_low); // test low parts
1765 // And, with an unsigned comparison, it does not matter if the numbers
1766 // are negative or not.
1767 // E.g., -2 cmp -1: the low parts are 0xfffffffe and 0xffffffff.
1768 // The second one is bigger (unsignedly).
1770 // Other notes: The first move in each triplet can be unconditional
1771 // (and therefore probably prefetchable).
1772 // And the equals case for the high part does not need testing,
1773 // since that triplet is reached only after finding the high halves differ.
1775 mov(-1, Rresult);
1776 ba(done);
1777 delayed()->movcc(greater, false, icc, 1, Rresult);
1779 bind(check_low_parts);
1781 mov( -1, Rresult);
1782 movcc(equal, false, icc, 0, Rresult);
1783 movcc(greaterUnsigned, false, icc, 1, Rresult);
1785 bind(done);
1786 }
1788 void MacroAssembler::lneg( Register Rhi, Register Rlow ) {
1789 subcc( G0, Rlow, Rlow );
1790 subc( G0, Rhi, Rhi );
1791 }
1793 void MacroAssembler::lshl( Register Rin_high, Register Rin_low,
1794 Register Rcount,
1795 Register Rout_high, Register Rout_low,
1796 Register Rtemp ) {
1799 Register Ralt_count = Rtemp;
1800 Register Rxfer_bits = Rtemp;
1802 assert( Ralt_count != Rin_high
1803 && Ralt_count != Rin_low
1804 && Ralt_count != Rcount
1805 && Rxfer_bits != Rin_low
1806 && Rxfer_bits != Rin_high
1807 && Rxfer_bits != Rcount
1808 && Rxfer_bits != Rout_low
1809 && Rout_low != Rin_high,
1810 "register alias checks");
1812 Label big_shift, done;
1814 // This code can be optimized to use the 64 bit shifts in V9.
1815 // Here we use the 32 bit shifts.
1817 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
1818 subcc(Rcount, 31, Ralt_count);
1819 br(greater, true, pn, big_shift);
1820 delayed()->dec(Ralt_count);
1822 // shift < 32 bits, Ralt_count = Rcount-31
1824 // We get the transfer bits by shifting right by 32-count the low
1825 // register. This is done by shifting right by 31-count and then by one
1826 // more to take care of the special (rare) case where count is zero
1827 // (shifting by 32 would not work).
1829 neg(Ralt_count);
1831 // The order of the next two instructions is critical in the case where
1832 // Rin and Rout are the same and should not be reversed.
1834 srl(Rin_low, Ralt_count, Rxfer_bits); // shift right by 31-count
1835 if (Rcount != Rout_low) {
1836 sll(Rin_low, Rcount, Rout_low); // low half
1837 }
1838 sll(Rin_high, Rcount, Rout_high);
1839 if (Rcount == Rout_low) {
1840 sll(Rin_low, Rcount, Rout_low); // low half
1841 }
1842 srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more
1843 ba(done);
1844 delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low
1846 // shift >= 32 bits, Ralt_count = Rcount-32
1847 bind(big_shift);
1848 sll(Rin_low, Ralt_count, Rout_high );
1849 clr(Rout_low);
1851 bind(done);
1852 }
1855 void MacroAssembler::lshr( Register Rin_high, Register Rin_low,
1856 Register Rcount,
1857 Register Rout_high, Register Rout_low,
1858 Register Rtemp ) {
1860 Register Ralt_count = Rtemp;
1861 Register Rxfer_bits = Rtemp;
1863 assert( Ralt_count != Rin_high
1864 && Ralt_count != Rin_low
1865 && Ralt_count != Rcount
1866 && Rxfer_bits != Rin_low
1867 && Rxfer_bits != Rin_high
1868 && Rxfer_bits != Rcount
1869 && Rxfer_bits != Rout_high
1870 && Rout_high != Rin_low,
1871 "register alias checks");
1873 Label big_shift, done;
1875 // This code can be optimized to use the 64 bit shifts in V9.
1876 // Here we use the 32 bit shifts.
1878 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
1879 subcc(Rcount, 31, Ralt_count);
1880 br(greater, true, pn, big_shift);
1881 delayed()->dec(Ralt_count);
1883 // shift < 32 bits, Ralt_count = Rcount-31
1885 // We get the transfer bits by shifting left by 32-count the high
1886 // register. This is done by shifting left by 31-count and then by one
1887 // more to take care of the special (rare) case where count is zero
1888 // (shifting by 32 would not work).
1890 neg(Ralt_count);
1891 if (Rcount != Rout_low) {
1892 srl(Rin_low, Rcount, Rout_low);
1893 }
1895 // The order of the next two instructions is critical in the case where
1896 // Rin and Rout are the same and should not be reversed.
1898 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count
1899 sra(Rin_high, Rcount, Rout_high ); // high half
1900 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more
1901 if (Rcount == Rout_low) {
1902 srl(Rin_low, Rcount, Rout_low);
1903 }
1904 ba(done);
1905 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high
1907 // shift >= 32 bits, Ralt_count = Rcount-32
1908 bind(big_shift);
1910 sra(Rin_high, Ralt_count, Rout_low);
1911 sra(Rin_high, 31, Rout_high); // sign into hi
1913 bind( done );
1914 }
1918 void MacroAssembler::lushr( Register Rin_high, Register Rin_low,
1919 Register Rcount,
1920 Register Rout_high, Register Rout_low,
1921 Register Rtemp ) {
1923 Register Ralt_count = Rtemp;
1924 Register Rxfer_bits = Rtemp;
1926 assert( Ralt_count != Rin_high
1927 && Ralt_count != Rin_low
1928 && Ralt_count != Rcount
1929 && Rxfer_bits != Rin_low
1930 && Rxfer_bits != Rin_high
1931 && Rxfer_bits != Rcount
1932 && Rxfer_bits != Rout_high
1933 && Rout_high != Rin_low,
1934 "register alias checks");
1936 Label big_shift, done;
1938 // This code can be optimized to use the 64 bit shifts in V9.
1939 // Here we use the 32 bit shifts.
1941 and3( Rcount, 0x3f, Rcount); // take least significant 6 bits
1942 subcc(Rcount, 31, Ralt_count);
1943 br(greater, true, pn, big_shift);
1944 delayed()->dec(Ralt_count);
1946 // shift < 32 bits, Ralt_count = Rcount-31
1948 // We get the transfer bits by shifting left by 32-count the high
1949 // register. This is done by shifting left by 31-count and then by one
1950 // more to take care of the special (rare) case where count is zero
1951 // (shifting by 32 would not work).
1953 neg(Ralt_count);
1954 if (Rcount != Rout_low) {
1955 srl(Rin_low, Rcount, Rout_low);
1956 }
1958 // The order of the next two instructions is critical in the case where
1959 // Rin and Rout are the same and should not be reversed.
1961 sll(Rin_high, Ralt_count, Rxfer_bits); // shift left by 31-count
1962 srl(Rin_high, Rcount, Rout_high ); // high half
1963 sll(Rxfer_bits, 1, Rxfer_bits); // shift left by one more
1964 if (Rcount == Rout_low) {
1965 srl(Rin_low, Rcount, Rout_low);
1966 }
1967 ba(done);
1968 delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high
1970 // shift >= 32 bits, Ralt_count = Rcount-32
1971 bind(big_shift);
1973 srl(Rin_high, Ralt_count, Rout_low);
1974 clr(Rout_high);
1976 bind( done );
1977 }
1979 #ifdef _LP64
1980 void MacroAssembler::lcmp( Register Ra, Register Rb, Register Rresult) {
1981 cmp(Ra, Rb);
1982 mov(-1, Rresult);
1983 movcc(equal, false, xcc, 0, Rresult);
1984 movcc(greater, false, xcc, 1, Rresult);
1985 }
1986 #endif
1989 void MacroAssembler::load_sized_value(Address src, Register dst, size_t size_in_bytes, bool is_signed) {
1990 switch (size_in_bytes) {
1991 case 8: ld_long(src, dst); break;
1992 case 4: ld( src, dst); break;
1993 case 2: is_signed ? ldsh(src, dst) : lduh(src, dst); break;
1994 case 1: is_signed ? ldsb(src, dst) : ldub(src, dst); break;
1995 default: ShouldNotReachHere();
1996 }
1997 }
1999 void MacroAssembler::store_sized_value(Register src, Address dst, size_t size_in_bytes) {
2000 switch (size_in_bytes) {
2001 case 8: st_long(src, dst); break;
2002 case 4: st( src, dst); break;
2003 case 2: sth( src, dst); break;
2004 case 1: stb( src, dst); break;
2005 default: ShouldNotReachHere();
2006 }
2007 }
2010 void MacroAssembler::float_cmp( bool is_float, int unordered_result,
2011 FloatRegister Fa, FloatRegister Fb,
2012 Register Rresult) {
2013 if (is_float) {
2014 fcmp(FloatRegisterImpl::S, fcc0, Fa, Fb);
2015 } else {
2016 fcmp(FloatRegisterImpl::D, fcc0, Fa, Fb);
2017 }
2019 if (unordered_result == 1) {
2020 mov( -1, Rresult);
2021 movcc(f_equal, true, fcc0, 0, Rresult);
2022 movcc(f_unorderedOrGreater, true, fcc0, 1, Rresult);
2023 } else {
2024 mov( -1, Rresult);
2025 movcc(f_equal, true, fcc0, 0, Rresult);
2026 movcc(f_greater, true, fcc0, 1, Rresult);
2027 }
2028 }
2031 void MacroAssembler::save_all_globals_into_locals() {
2032 mov(G1,L1);
2033 mov(G2,L2);
2034 mov(G3,L3);
2035 mov(G4,L4);
2036 mov(G5,L5);
2037 mov(G6,L6);
2038 mov(G7,L7);
2039 }
2041 void MacroAssembler::restore_globals_from_locals() {
2042 mov(L1,G1);
2043 mov(L2,G2);
2044 mov(L3,G3);
2045 mov(L4,G4);
2046 mov(L5,G5);
2047 mov(L6,G6);
2048 mov(L7,G7);
2049 }
2051 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
2052 Register tmp,
2053 int offset) {
2054 intptr_t value = *delayed_value_addr;
2055 if (value != 0)
2056 return RegisterOrConstant(value + offset);
2058 // load indirectly to solve generation ordering problem
2059 AddressLiteral a(delayed_value_addr);
2060 load_ptr_contents(a, tmp);
2062 #ifdef ASSERT
2063 tst(tmp);
2064 breakpoint_trap(zero, xcc);
2065 #endif
2067 if (offset != 0)
2068 add(tmp, offset, tmp);
2070 return RegisterOrConstant(tmp);
2071 }
2074 RegisterOrConstant MacroAssembler::regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
2075 assert(d.register_or_noreg() != G0, "lost side effect");
2076 if ((s2.is_constant() && s2.as_constant() == 0) ||
2077 (s2.is_register() && s2.as_register() == G0)) {
2078 // Do nothing, just move value.
2079 if (s1.is_register()) {
2080 if (d.is_constant()) d = temp;
2081 mov(s1.as_register(), d.as_register());
2082 return d;
2083 } else {
2084 return s1;
2085 }
2086 }
2088 if (s1.is_register()) {
2089 assert_different_registers(s1.as_register(), temp);
2090 if (d.is_constant()) d = temp;
2091 andn(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
2092 return d;
2093 } else {
2094 if (s2.is_register()) {
2095 assert_different_registers(s2.as_register(), temp);
2096 if (d.is_constant()) d = temp;
2097 set(s1.as_constant(), temp);
2098 andn(temp, s2.as_register(), d.as_register());
2099 return d;
2100 } else {
2101 intptr_t res = s1.as_constant() & ~s2.as_constant();
2102 return res;
2103 }
2104 }
2105 }
2107 RegisterOrConstant MacroAssembler::regcon_inc_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
2108 assert(d.register_or_noreg() != G0, "lost side effect");
2109 if ((s2.is_constant() && s2.as_constant() == 0) ||
2110 (s2.is_register() && s2.as_register() == G0)) {
2111 // Do nothing, just move value.
2112 if (s1.is_register()) {
2113 if (d.is_constant()) d = temp;
2114 mov(s1.as_register(), d.as_register());
2115 return d;
2116 } else {
2117 return s1;
2118 }
2119 }
2121 if (s1.is_register()) {
2122 assert_different_registers(s1.as_register(), temp);
2123 if (d.is_constant()) d = temp;
2124 add(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
2125 return d;
2126 } else {
2127 if (s2.is_register()) {
2128 assert_different_registers(s2.as_register(), temp);
2129 if (d.is_constant()) d = temp;
2130 add(s2.as_register(), ensure_simm13_or_reg(s1, temp), d.as_register());
2131 return d;
2132 } else {
2133 intptr_t res = s1.as_constant() + s2.as_constant();
2134 return res;
2135 }
2136 }
2137 }
2139 RegisterOrConstant MacroAssembler::regcon_sll_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp) {
2140 assert(d.register_or_noreg() != G0, "lost side effect");
2141 if (!is_simm13(s2.constant_or_zero()))
2142 s2 = (s2.as_constant() & 0xFF);
2143 if ((s2.is_constant() && s2.as_constant() == 0) ||
2144 (s2.is_register() && s2.as_register() == G0)) {
2145 // Do nothing, just move value.
2146 if (s1.is_register()) {
2147 if (d.is_constant()) d = temp;
2148 mov(s1.as_register(), d.as_register());
2149 return d;
2150 } else {
2151 return s1;
2152 }
2153 }
2155 if (s1.is_register()) {
2156 assert_different_registers(s1.as_register(), temp);
2157 if (d.is_constant()) d = temp;
2158 sll_ptr(s1.as_register(), ensure_simm13_or_reg(s2, temp), d.as_register());
2159 return d;
2160 } else {
2161 if (s2.is_register()) {
2162 assert_different_registers(s2.as_register(), temp);
2163 if (d.is_constant()) d = temp;
2164 set(s1.as_constant(), temp);
2165 sll_ptr(temp, s2.as_register(), d.as_register());
2166 return d;
2167 } else {
2168 intptr_t res = s1.as_constant() << s2.as_constant();
2169 return res;
2170 }
2171 }
2172 }
2175 // Look up the method for a megamorphic invokeinterface call.
2176 // The target method is determined by <intf_klass, itable_index>.
2177 // The receiver klass is in recv_klass.
2178 // On success, the result will be in method_result, and execution falls through.
2179 // On failure, execution transfers to the given label.
2180 void MacroAssembler::lookup_interface_method(Register recv_klass,
2181 Register intf_klass,
2182 RegisterOrConstant itable_index,
2183 Register method_result,
2184 Register scan_temp,
2185 Register sethi_temp,
2186 Label& L_no_such_interface) {
2187 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
2188 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
2189 "caller must use same register for non-constant itable index as for method");
2191 Label L_no_such_interface_restore;
2192 bool did_save = false;
2193 if (scan_temp == noreg || sethi_temp == noreg) {
2194 Register recv_2 = recv_klass->is_global() ? recv_klass : L0;
2195 Register intf_2 = intf_klass->is_global() ? intf_klass : L1;
2196 assert(method_result->is_global(), "must be able to return value");
2197 scan_temp = L2;
2198 sethi_temp = L3;
2199 save_frame_and_mov(0, recv_klass, recv_2, intf_klass, intf_2);
2200 recv_klass = recv_2;
2201 intf_klass = intf_2;
2202 did_save = true;
2203 }
2205 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
2206 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
2207 int scan_step = itableOffsetEntry::size() * wordSize;
2208 int vte_size = vtableEntry::size() * wordSize;
2210 lduw(recv_klass, InstanceKlass::vtable_length_offset() * wordSize, scan_temp);
2211 // %%% We should store the aligned, prescaled offset in the klassoop.
2212 // Then the next several instructions would fold away.
2214 int round_to_unit = ((HeapWordsPerLong > 1) ? BytesPerLong : 0);
2215 int itb_offset = vtable_base;
2216 if (round_to_unit != 0) {
2217 // hoist first instruction of round_to(scan_temp, BytesPerLong):
2218 itb_offset += round_to_unit - wordSize;
2219 }
2220 int itb_scale = exact_log2(vtableEntry::size() * wordSize);
2221 sll(scan_temp, itb_scale, scan_temp);
2222 add(scan_temp, itb_offset, scan_temp);
2223 if (round_to_unit != 0) {
2224 // Round up to align_object_offset boundary
2225 // see code for InstanceKlass::start_of_itable!
2226 // Was: round_to(scan_temp, BytesPerLong);
2227 // Hoisted: add(scan_temp, BytesPerLong-1, scan_temp);
2228 and3(scan_temp, -round_to_unit, scan_temp);
2229 }
2230 add(recv_klass, scan_temp, scan_temp);
2232 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
2233 RegisterOrConstant itable_offset = itable_index;
2234 itable_offset = regcon_sll_ptr(itable_index, exact_log2(itableMethodEntry::size() * wordSize), itable_offset);
2235 itable_offset = regcon_inc_ptr(itable_offset, itableMethodEntry::method_offset_in_bytes(), itable_offset);
2236 add(recv_klass, ensure_simm13_or_reg(itable_offset, sethi_temp), recv_klass);
2238 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
2239 // if (scan->interface() == intf) {
2240 // result = (klass + scan->offset() + itable_index);
2241 // }
2242 // }
2243 Label L_search, L_found_method;
2245 for (int peel = 1; peel >= 0; peel--) {
2246 // %%%% Could load both offset and interface in one ldx, if they were
2247 // in the opposite order. This would save a load.
2248 ld_ptr(scan_temp, itableOffsetEntry::interface_offset_in_bytes(), method_result);
2250 // Check that this entry is non-null. A null entry means that
2251 // the receiver class doesn't implement the interface, and wasn't the
2252 // same as when the caller was compiled.
2253 bpr(Assembler::rc_z, false, Assembler::pn, method_result, did_save ? L_no_such_interface_restore : L_no_such_interface);
2254 delayed()->cmp(method_result, intf_klass);
2256 if (peel) {
2257 brx(Assembler::equal, false, Assembler::pt, L_found_method);
2258 } else {
2259 brx(Assembler::notEqual, false, Assembler::pn, L_search);
2260 // (invert the test to fall through to found_method...)
2261 }
2262 delayed()->add(scan_temp, scan_step, scan_temp);
2264 if (!peel) break;
2266 bind(L_search);
2267 }
2269 bind(L_found_method);
2271 // Got a hit.
2272 int ito_offset = itableOffsetEntry::offset_offset_in_bytes();
2273 // scan_temp[-scan_step] points to the vtable offset we need
2274 ito_offset -= scan_step;
2275 lduw(scan_temp, ito_offset, scan_temp);
2276 ld_ptr(recv_klass, scan_temp, method_result);
2278 if (did_save) {
2279 Label L_done;
2280 ba(L_done);
2281 delayed()->restore();
2283 bind(L_no_such_interface_restore);
2284 ba(L_no_such_interface);
2285 delayed()->restore();
2287 bind(L_done);
2288 }
2289 }
2292 // virtual method calling
2293 void MacroAssembler::lookup_virtual_method(Register recv_klass,
2294 RegisterOrConstant vtable_index,
2295 Register method_result) {
2296 assert_different_registers(recv_klass, method_result, vtable_index.register_or_noreg());
2297 Register sethi_temp = method_result;
2298 const int base = (InstanceKlass::vtable_start_offset() * wordSize +
2299 // method pointer offset within the vtable entry:
2300 vtableEntry::method_offset_in_bytes());
2301 RegisterOrConstant vtable_offset = vtable_index;
2302 // Each of the following three lines potentially generates an instruction.
2303 // But the total number of address formation instructions will always be
2304 // at most two, and will often be zero. In any case, it will be optimal.
2305 // If vtable_index is a register, we will have (sll_ptr N,x; inc_ptr B,x; ld_ptr k,x).
2306 // If vtable_index is a constant, we will have at most (set B+X<<N,t; ld_ptr k,t).
2307 vtable_offset = regcon_sll_ptr(vtable_index, exact_log2(vtableEntry::size() * wordSize), vtable_offset);
2308 vtable_offset = regcon_inc_ptr(vtable_offset, base, vtable_offset, sethi_temp);
2309 Address vtable_entry_addr(recv_klass, ensure_simm13_or_reg(vtable_offset, sethi_temp));
2310 ld_ptr(vtable_entry_addr, method_result);
2311 }
2314 void MacroAssembler::check_klass_subtype(Register sub_klass,
2315 Register super_klass,
2316 Register temp_reg,
2317 Register temp2_reg,
2318 Label& L_success) {
2319 Register sub_2 = sub_klass;
2320 Register sup_2 = super_klass;
2321 if (!sub_2->is_global()) sub_2 = L0;
2322 if (!sup_2->is_global()) sup_2 = L1;
2323 bool did_save = false;
2324 if (temp_reg == noreg || temp2_reg == noreg) {
2325 temp_reg = L2;
2326 temp2_reg = L3;
2327 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
2328 sub_klass = sub_2;
2329 super_klass = sup_2;
2330 did_save = true;
2331 }
2332 Label L_failure, L_pop_to_failure, L_pop_to_success;
2333 check_klass_subtype_fast_path(sub_klass, super_klass,
2334 temp_reg, temp2_reg,
2335 (did_save ? &L_pop_to_success : &L_success),
2336 (did_save ? &L_pop_to_failure : &L_failure), NULL);
2338 if (!did_save)
2339 save_frame_and_mov(0, sub_klass, sub_2, super_klass, sup_2);
2340 check_klass_subtype_slow_path(sub_2, sup_2,
2341 L2, L3, L4, L5,
2342 NULL, &L_pop_to_failure);
2344 // on success:
2345 bind(L_pop_to_success);
2346 restore();
2347 ba_short(L_success);
2349 // on failure:
2350 bind(L_pop_to_failure);
2351 restore();
2352 bind(L_failure);
2353 }
2356 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
2357 Register super_klass,
2358 Register temp_reg,
2359 Register temp2_reg,
2360 Label* L_success,
2361 Label* L_failure,
2362 Label* L_slow_path,
2363 RegisterOrConstant super_check_offset) {
2364 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
2365 int sco_offset = in_bytes(Klass::super_check_offset_offset());
2367 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
2368 bool need_slow_path = (must_load_sco ||
2369 super_check_offset.constant_or_zero() == sco_offset);
2371 assert_different_registers(sub_klass, super_klass, temp_reg);
2372 if (super_check_offset.is_register()) {
2373 assert_different_registers(sub_klass, super_klass, temp_reg,
2374 super_check_offset.as_register());
2375 } else if (must_load_sco) {
2376 assert(temp2_reg != noreg, "supply either a temp or a register offset");
2377 }
2379 Label L_fallthrough;
2380 int label_nulls = 0;
2381 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
2382 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
2383 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
2384 assert(label_nulls <= 1 ||
2385 (L_slow_path == &L_fallthrough && label_nulls <= 2 && !need_slow_path),
2386 "at most one NULL in the batch, usually");
2388 // If the pointers are equal, we are done (e.g., String[] elements).
2389 // This self-check enables sharing of secondary supertype arrays among
2390 // non-primary types such as array-of-interface. Otherwise, each such
2391 // type would need its own customized SSA.
2392 // We move this check to the front of the fast path because many
2393 // type checks are in fact trivially successful in this manner,
2394 // so we get a nicely predicted branch right at the start of the check.
2395 cmp(super_klass, sub_klass);
2396 brx(Assembler::equal, false, Assembler::pn, *L_success);
2397 delayed()->nop();
2399 // Check the supertype display:
2400 if (must_load_sco) {
2401 // The super check offset is always positive...
2402 lduw(super_klass, sco_offset, temp2_reg);
2403 super_check_offset = RegisterOrConstant(temp2_reg);
2404 // super_check_offset is register.
2405 assert_different_registers(sub_klass, super_klass, temp_reg, super_check_offset.as_register());
2406 }
2407 ld_ptr(sub_klass, super_check_offset, temp_reg);
2408 cmp(super_klass, temp_reg);
2410 // This check has worked decisively for primary supers.
2411 // Secondary supers are sought in the super_cache ('super_cache_addr').
2412 // (Secondary supers are interfaces and very deeply nested subtypes.)
2413 // This works in the same check above because of a tricky aliasing
2414 // between the super_cache and the primary super display elements.
2415 // (The 'super_check_addr' can address either, as the case requires.)
2416 // Note that the cache is updated below if it does not help us find
2417 // what we need immediately.
2418 // So if it was a primary super, we can just fail immediately.
2419 // Otherwise, it's the slow path for us (no success at this point).
2421 // Hacked ba(), which may only be used just before L_fallthrough.
2422 #define FINAL_JUMP(label) \
2423 if (&(label) != &L_fallthrough) { \
2424 ba(label); delayed()->nop(); \
2425 }
2427 if (super_check_offset.is_register()) {
2428 brx(Assembler::equal, false, Assembler::pn, *L_success);
2429 delayed()->cmp(super_check_offset.as_register(), sc_offset);
2431 if (L_failure == &L_fallthrough) {
2432 brx(Assembler::equal, false, Assembler::pt, *L_slow_path);
2433 delayed()->nop();
2434 } else {
2435 brx(Assembler::notEqual, false, Assembler::pn, *L_failure);
2436 delayed()->nop();
2437 FINAL_JUMP(*L_slow_path);
2438 }
2439 } else if (super_check_offset.as_constant() == sc_offset) {
2440 // Need a slow path; fast failure is impossible.
2441 if (L_slow_path == &L_fallthrough) {
2442 brx(Assembler::equal, false, Assembler::pt, *L_success);
2443 delayed()->nop();
2444 } else {
2445 brx(Assembler::notEqual, false, Assembler::pn, *L_slow_path);
2446 delayed()->nop();
2447 FINAL_JUMP(*L_success);
2448 }
2449 } else {
2450 // No slow path; it's a fast decision.
2451 if (L_failure == &L_fallthrough) {
2452 brx(Assembler::equal, false, Assembler::pt, *L_success);
2453 delayed()->nop();
2454 } else {
2455 brx(Assembler::notEqual, false, Assembler::pn, *L_failure);
2456 delayed()->nop();
2457 FINAL_JUMP(*L_success);
2458 }
2459 }
2461 bind(L_fallthrough);
2463 #undef FINAL_JUMP
2464 }
2467 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
2468 Register super_klass,
2469 Register count_temp,
2470 Register scan_temp,
2471 Register scratch_reg,
2472 Register coop_reg,
2473 Label* L_success,
2474 Label* L_failure) {
2475 assert_different_registers(sub_klass, super_klass,
2476 count_temp, scan_temp, scratch_reg, coop_reg);
2478 Label L_fallthrough, L_loop;
2479 int label_nulls = 0;
2480 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
2481 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
2482 assert(label_nulls <= 1, "at most one NULL in the batch");
2484 // a couple of useful fields in sub_klass:
2485 int ss_offset = in_bytes(Klass::secondary_supers_offset());
2486 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
2488 // Do a linear scan of the secondary super-klass chain.
2489 // This code is rarely used, so simplicity is a virtue here.
2491 #ifndef PRODUCT
2492 int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
2493 inc_counter((address) pst_counter, count_temp, scan_temp);
2494 #endif
2496 // We will consult the secondary-super array.
2497 ld_ptr(sub_klass, ss_offset, scan_temp);
2499 Register search_key = super_klass;
2501 // Load the array length. (Positive movl does right thing on LP64.)
2502 lduw(scan_temp, Array<Klass*>::length_offset_in_bytes(), count_temp);
2504 // Check for empty secondary super list
2505 tst(count_temp);
2507 // In the array of super classes elements are pointer sized.
2508 int element_size = wordSize;
2510 // Top of search loop
2511 bind(L_loop);
2512 br(Assembler::equal, false, Assembler::pn, *L_failure);
2513 delayed()->add(scan_temp, element_size, scan_temp);
2515 // Skip the array header in all array accesses.
2516 int elem_offset = Array<Klass*>::base_offset_in_bytes();
2517 elem_offset -= element_size; // the scan pointer was pre-incremented also
2519 // Load next super to check
2520 ld_ptr( scan_temp, elem_offset, scratch_reg );
2522 // Look for Rsuper_klass on Rsub_klass's secondary super-class-overflow list
2523 cmp(scratch_reg, search_key);
2525 // A miss means we are NOT a subtype and need to keep looping
2526 brx(Assembler::notEqual, false, Assembler::pn, L_loop);
2527 delayed()->deccc(count_temp); // decrement trip counter in delay slot
2529 // Success. Cache the super we found and proceed in triumph.
2530 st_ptr(super_klass, sub_klass, sc_offset);
2532 if (L_success != &L_fallthrough) {
2533 ba(*L_success);
2534 delayed()->nop();
2535 }
2537 bind(L_fallthrough);
2538 }
2541 RegisterOrConstant MacroAssembler::argument_offset(RegisterOrConstant arg_slot,
2542 Register temp_reg,
2543 int extra_slot_offset) {
2544 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
2545 int stackElementSize = Interpreter::stackElementSize;
2546 int offset = extra_slot_offset * stackElementSize;
2547 if (arg_slot.is_constant()) {
2548 offset += arg_slot.as_constant() * stackElementSize;
2549 return offset;
2550 } else {
2551 assert(temp_reg != noreg, "must specify");
2552 sll_ptr(arg_slot.as_register(), exact_log2(stackElementSize), temp_reg);
2553 if (offset != 0)
2554 add(temp_reg, offset, temp_reg);
2555 return temp_reg;
2556 }
2557 }
2560 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
2561 Register temp_reg,
2562 int extra_slot_offset) {
2563 return Address(Gargs, argument_offset(arg_slot, temp_reg, extra_slot_offset));
2564 }
2567 void MacroAssembler::biased_locking_enter(Register obj_reg, Register mark_reg,
2568 Register temp_reg,
2569 Label& done, Label* slow_case,
2570 BiasedLockingCounters* counters) {
2571 assert(UseBiasedLocking, "why call this otherwise?");
2573 if (PrintBiasedLockingStatistics) {
2574 assert_different_registers(obj_reg, mark_reg, temp_reg, O7);
2575 if (counters == NULL)
2576 counters = BiasedLocking::counters();
2577 }
2579 Label cas_label;
2581 // Biased locking
2582 // See whether the lock is currently biased toward our thread and
2583 // whether the epoch is still valid
2584 // Note that the runtime guarantees sufficient alignment of JavaThread
2585 // pointers to allow age to be placed into low bits
2586 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
2587 and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
2588 cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label);
2590 load_klass(obj_reg, temp_reg);
2591 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
2592 or3(G2_thread, temp_reg, temp_reg);
2593 xor3(mark_reg, temp_reg, temp_reg);
2594 andcc(temp_reg, ~((int) markOopDesc::age_mask_in_place), temp_reg);
2595 if (counters != NULL) {
2596 cond_inc(Assembler::equal, (address) counters->biased_lock_entry_count_addr(), mark_reg, temp_reg);
2597 // Reload mark_reg as we may need it later
2598 ld_ptr(Address(obj_reg, oopDesc::mark_offset_in_bytes()), mark_reg);
2599 }
2600 brx(Assembler::equal, true, Assembler::pt, done);
2601 delayed()->nop();
2603 Label try_revoke_bias;
2604 Label try_rebias;
2605 Address mark_addr = Address(obj_reg, oopDesc::mark_offset_in_bytes());
2606 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
2608 // At this point we know that the header has the bias pattern and
2609 // that we are not the bias owner in the current epoch. We need to
2610 // figure out more details about the state of the header in order to
2611 // know what operations can be legally performed on the object's
2612 // header.
2614 // If the low three bits in the xor result aren't clear, that means
2615 // the prototype header is no longer biased and we have to revoke
2616 // the bias on this object.
2617 btst(markOopDesc::biased_lock_mask_in_place, temp_reg);
2618 brx(Assembler::notZero, false, Assembler::pn, try_revoke_bias);
2620 // Biasing is still enabled for this data type. See whether the
2621 // epoch of the current bias is still valid, meaning that the epoch
2622 // bits of the mark word are equal to the epoch bits of the
2623 // prototype header. (Note that the prototype header's epoch bits
2624 // only change at a safepoint.) If not, attempt to rebias the object
2625 // toward the current thread. Note that we must be absolutely sure
2626 // that the current epoch is invalid in order to do this because
2627 // otherwise the manipulations it performs on the mark word are
2628 // illegal.
2629 delayed()->btst(markOopDesc::epoch_mask_in_place, temp_reg);
2630 brx(Assembler::notZero, false, Assembler::pn, try_rebias);
2632 // The epoch of the current bias is still valid but we know nothing
2633 // about the owner; it might be set or it might be clear. Try to
2634 // acquire the bias of the object using an atomic operation. If this
2635 // fails we will go in to the runtime to revoke the object's bias.
2636 // Note that we first construct the presumed unbiased header so we
2637 // don't accidentally blow away another thread's valid bias.
2638 delayed()->and3(mark_reg,
2639 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place,
2640 mark_reg);
2641 or3(G2_thread, mark_reg, temp_reg);
2642 cas_ptr(mark_addr.base(), mark_reg, temp_reg);
2643 // If the biasing toward our thread failed, this means that
2644 // another thread succeeded in biasing it toward itself and we
2645 // need to revoke that bias. The revocation will occur in the
2646 // interpreter runtime in the slow case.
2647 cmp(mark_reg, temp_reg);
2648 if (counters != NULL) {
2649 cond_inc(Assembler::zero, (address) counters->anonymously_biased_lock_entry_count_addr(), mark_reg, temp_reg);
2650 }
2651 if (slow_case != NULL) {
2652 brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
2653 delayed()->nop();
2654 }
2655 ba_short(done);
2657 bind(try_rebias);
2658 // At this point we know the epoch has expired, meaning that the
2659 // current "bias owner", if any, is actually invalid. Under these
2660 // circumstances _only_, we are allowed to use the current header's
2661 // value as the comparison value when doing the cas to acquire the
2662 // bias in the current epoch. In other words, we allow transfer of
2663 // the bias from one thread to another directly in this situation.
2664 //
2665 // FIXME: due to a lack of registers we currently blow away the age
2666 // bits in this situation. Should attempt to preserve them.
2667 load_klass(obj_reg, temp_reg);
2668 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
2669 or3(G2_thread, temp_reg, temp_reg);
2670 cas_ptr(mark_addr.base(), mark_reg, temp_reg);
2671 // If the biasing toward our thread failed, this means that
2672 // another thread succeeded in biasing it toward itself and we
2673 // need to revoke that bias. The revocation will occur in the
2674 // interpreter runtime in the slow case.
2675 cmp(mark_reg, temp_reg);
2676 if (counters != NULL) {
2677 cond_inc(Assembler::zero, (address) counters->rebiased_lock_entry_count_addr(), mark_reg, temp_reg);
2678 }
2679 if (slow_case != NULL) {
2680 brx(Assembler::notEqual, true, Assembler::pn, *slow_case);
2681 delayed()->nop();
2682 }
2683 ba_short(done);
2685 bind(try_revoke_bias);
2686 // The prototype mark in the klass doesn't have the bias bit set any
2687 // more, indicating that objects of this data type are not supposed
2688 // to be biased any more. We are going to try to reset the mark of
2689 // this object to the prototype value and fall through to the
2690 // CAS-based locking scheme. Note that if our CAS fails, it means
2691 // that another thread raced us for the privilege of revoking the
2692 // bias of this particular object, so it's okay to continue in the
2693 // normal locking code.
2694 //
2695 // FIXME: due to a lack of registers we currently blow away the age
2696 // bits in this situation. Should attempt to preserve them.
2697 load_klass(obj_reg, temp_reg);
2698 ld_ptr(Address(temp_reg, Klass::prototype_header_offset()), temp_reg);
2699 cas_ptr(mark_addr.base(), mark_reg, temp_reg);
2700 // Fall through to the normal CAS-based lock, because no matter what
2701 // the result of the above CAS, some thread must have succeeded in
2702 // removing the bias bit from the object's header.
2703 if (counters != NULL) {
2704 cmp(mark_reg, temp_reg);
2705 cond_inc(Assembler::zero, (address) counters->revoked_lock_entry_count_addr(), mark_reg, temp_reg);
2706 }
2708 bind(cas_label);
2709 }
2711 void MacroAssembler::biased_locking_exit (Address mark_addr, Register temp_reg, Label& done,
2712 bool allow_delay_slot_filling) {
2713 // Check for biased locking unlock case, which is a no-op
2714 // Note: we do not have to check the thread ID for two reasons.
2715 // First, the interpreter checks for IllegalMonitorStateException at
2716 // a higher level. Second, if the bias was revoked while we held the
2717 // lock, the object could not be rebiased toward another thread, so
2718 // the bias bit would be clear.
2719 ld_ptr(mark_addr, temp_reg);
2720 and3(temp_reg, markOopDesc::biased_lock_mask_in_place, temp_reg);
2721 cmp(temp_reg, markOopDesc::biased_lock_pattern);
2722 brx(Assembler::equal, allow_delay_slot_filling, Assembler::pt, done);
2723 delayed();
2724 if (!allow_delay_slot_filling) {
2725 nop();
2726 }
2727 }
2730 // compiler_lock_object() and compiler_unlock_object() are direct transliterations
2731 // of i486.ad fast_lock() and fast_unlock(). See those methods for detailed comments.
2732 // The code could be tightened up considerably.
2733 //
2734 // box->dhw disposition - post-conditions at DONE_LABEL.
2735 // - Successful inflated lock: box->dhw != 0.
2736 // Any non-zero value suffices.
2737 // Consider G2_thread, rsp, boxReg, or unused_mark()
2738 // - Successful Stack-lock: box->dhw == mark.
2739 // box->dhw must contain the displaced mark word value
2740 // - Failure -- icc.ZFlag == 0 and box->dhw is undefined.
2741 // The slow-path fast_enter() and slow_enter() operators
2742 // are responsible for setting box->dhw = NonZero (typically ::unused_mark).
2743 // - Biased: box->dhw is undefined
2744 //
2745 // SPARC refworkload performance - specifically jetstream and scimark - are
2746 // extremely sensitive to the size of the code emitted by compiler_lock_object
2747 // and compiler_unlock_object. Critically, the key factor is code size, not path
2748 // length. (Simply experiments to pad CLO with unexecuted NOPs demonstrte the
2749 // effect).
2752 void MacroAssembler::compiler_lock_object(Register Roop, Register Rmark,
2753 Register Rbox, Register Rscratch,
2754 BiasedLockingCounters* counters,
2755 bool try_bias) {
2756 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
2758 verify_oop(Roop);
2759 Label done ;
2761 if (counters != NULL) {
2762 inc_counter((address) counters->total_entry_count_addr(), Rmark, Rscratch);
2763 }
2765 if (EmitSync & 1) {
2766 mov(3, Rscratch);
2767 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
2768 cmp(SP, G0);
2769 return ;
2770 }
2772 if (EmitSync & 2) {
2774 // Fetch object's markword
2775 ld_ptr(mark_addr, Rmark);
2777 if (try_bias) {
2778 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
2779 }
2781 // Save Rbox in Rscratch to be used for the cas operation
2782 mov(Rbox, Rscratch);
2784 // set Rmark to markOop | markOopDesc::unlocked_value
2785 or3(Rmark, markOopDesc::unlocked_value, Rmark);
2787 // Initialize the box. (Must happen before we update the object mark!)
2788 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
2790 // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
2791 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
2792 cas_ptr(mark_addr.base(), Rmark, Rscratch);
2794 // if compare/exchange succeeded we found an unlocked object and we now have locked it
2795 // hence we are done
2796 cmp(Rmark, Rscratch);
2797 #ifdef _LP64
2798 sub(Rscratch, STACK_BIAS, Rscratch);
2799 #endif
2800 brx(Assembler::equal, false, Assembler::pt, done);
2801 delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot
2803 // we did not find an unlocked object so see if this is a recursive case
2804 // sub(Rscratch, SP, Rscratch);
2805 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
2806 andcc(Rscratch, 0xfffff003, Rscratch);
2807 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
2808 bind (done);
2809 return ;
2810 }
2812 Label Egress ;
2814 if (EmitSync & 256) {
2815 Label IsInflated ;
2817 ld_ptr(mark_addr, Rmark); // fetch obj->mark
2818 // Triage: biased, stack-locked, neutral, inflated
2819 if (try_bias) {
2820 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
2821 // Invariant: if control reaches this point in the emitted stream
2822 // then Rmark has not been modified.
2823 }
2825 // Store mark into displaced mark field in the on-stack basic-lock "box"
2826 // Critically, this must happen before the CAS
2827 // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty.
2828 st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
2829 andcc(Rmark, 2, G0);
2830 brx(Assembler::notZero, false, Assembler::pn, IsInflated);
2831 delayed()->
2833 // Try stack-lock acquisition.
2834 // Beware: the 1st instruction is in a delay slot
2835 mov(Rbox, Rscratch);
2836 or3(Rmark, markOopDesc::unlocked_value, Rmark);
2837 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
2838 cas_ptr(mark_addr.base(), Rmark, Rscratch);
2839 cmp(Rmark, Rscratch);
2840 brx(Assembler::equal, false, Assembler::pt, done);
2841 delayed()->sub(Rscratch, SP, Rscratch);
2843 // Stack-lock attempt failed - check for recursive stack-lock.
2844 // See the comments below about how we might remove this case.
2845 #ifdef _LP64
2846 sub(Rscratch, STACK_BIAS, Rscratch);
2847 #endif
2848 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
2849 andcc(Rscratch, 0xfffff003, Rscratch);
2850 br(Assembler::always, false, Assembler::pt, done);
2851 delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
2853 bind(IsInflated);
2854 if (EmitSync & 64) {
2855 // If m->owner != null goto IsLocked
2856 // Pessimistic form: Test-and-CAS vs CAS
2857 // The optimistic form avoids RTS->RTO cache line upgrades.
2858 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
2859 andcc(Rscratch, Rscratch, G0);
2860 brx(Assembler::notZero, false, Assembler::pn, done);
2861 delayed()->nop();
2862 // m->owner == null : it's unlocked.
2863 }
2865 // Try to CAS m->owner from null to Self
2866 // Invariant: if we acquire the lock then _recursions should be 0.
2867 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
2868 mov(G2_thread, Rscratch);
2869 cas_ptr(Rmark, G0, Rscratch);
2870 cmp(Rscratch, G0);
2871 // Intentional fall-through into done
2872 } else {
2873 // Aggressively avoid the Store-before-CAS penalty
2874 // Defer the store into box->dhw until after the CAS
2875 Label IsInflated, Recursive ;
2877 // Anticipate CAS -- Avoid RTS->RTO upgrade
2878 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
2880 ld_ptr(mark_addr, Rmark); // fetch obj->mark
2881 // Triage: biased, stack-locked, neutral, inflated
2883 if (try_bias) {
2884 biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters);
2885 // Invariant: if control reaches this point in the emitted stream
2886 // then Rmark has not been modified.
2887 }
2888 andcc(Rmark, 2, G0);
2889 brx(Assembler::notZero, false, Assembler::pn, IsInflated);
2890 delayed()-> // Beware - dangling delay-slot
2892 // Try stack-lock acquisition.
2893 // Transiently install BUSY (0) encoding in the mark word.
2894 // if the CAS of 0 into the mark was successful then we execute:
2895 // ST box->dhw = mark -- save fetched mark in on-stack basiclock box
2896 // ST obj->mark = box -- overwrite transient 0 value
2897 // This presumes TSO, of course.
2899 mov(0, Rscratch);
2900 or3(Rmark, markOopDesc::unlocked_value, Rmark);
2901 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
2902 cas_ptr(mark_addr.base(), Rmark, Rscratch);
2903 // prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads);
2904 cmp(Rscratch, Rmark);
2905 brx(Assembler::notZero, false, Assembler::pn, Recursive);
2906 delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());
2907 if (counters != NULL) {
2908 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
2909 }
2910 ba(done);
2911 delayed()->st_ptr(Rbox, mark_addr);
2913 bind(Recursive);
2914 // Stack-lock attempt failed - check for recursive stack-lock.
2915 // Tests show that we can remove the recursive case with no impact
2916 // on refworkload 0.83. If we need to reduce the size of the code
2917 // emitted by compiler_lock_object() the recursive case is perfect
2918 // candidate.
2919 //
2920 // A more extreme idea is to always inflate on stack-lock recursion.
2921 // This lets us eliminate the recursive checks in compiler_lock_object
2922 // and compiler_unlock_object and the (box->dhw == 0) encoding.
2923 // A brief experiment - requiring changes to synchronizer.cpp, interpreter,
2924 // and showed a performance *increase*. In the same experiment I eliminated
2925 // the fast-path stack-lock code from the interpreter and always passed
2926 // control to the "slow" operators in synchronizer.cpp.
2928 // RScratch contains the fetched obj->mark value from the failed CAS.
2929 #ifdef _LP64
2930 sub(Rscratch, STACK_BIAS, Rscratch);
2931 #endif
2932 sub(Rscratch, SP, Rscratch);
2933 assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
2934 andcc(Rscratch, 0xfffff003, Rscratch);
2935 if (counters != NULL) {
2936 // Accounting needs the Rscratch register
2937 st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
2938 cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch);
2939 ba_short(done);
2940 } else {
2941 ba(done);
2942 delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
2943 }
2945 bind (IsInflated);
2946 if (EmitSync & 64) {
2947 // If m->owner != null goto IsLocked
2948 // Test-and-CAS vs CAS
2949 // Pessimistic form avoids futile (doomed) CAS attempts
2950 // The optimistic form avoids RTS->RTO cache line upgrades.
2951 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
2952 andcc(Rscratch, Rscratch, G0);
2953 brx(Assembler::notZero, false, Assembler::pn, done);
2954 delayed()->nop();
2955 // m->owner == null : it's unlocked.
2956 }
2958 // Try to CAS m->owner from null to Self
2959 // Invariant: if we acquire the lock then _recursions should be 0.
2960 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
2961 mov(G2_thread, Rscratch);
2962 cas_ptr(Rmark, G0, Rscratch);
2963 cmp(Rscratch, G0);
2964 // ST box->displaced_header = NonZero.
2965 // Any non-zero value suffices:
2966 // unused_mark(), G2_thread, RBox, RScratch, rsp, etc.
2967 st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes());
2968 // Intentional fall-through into done
2969 }
2971 bind (done);
2972 }
2974 void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark,
2975 Register Rbox, Register Rscratch,
2976 bool try_bias) {
2977 Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
2979 Label done ;
2981 if (EmitSync & 4) {
2982 cmp(SP, G0);
2983 return ;
2984 }
2986 if (EmitSync & 8) {
2987 if (try_bias) {
2988 biased_locking_exit(mark_addr, Rscratch, done);
2989 }
2991 // Test first if it is a fast recursive unlock
2992 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark);
2993 br_null_short(Rmark, Assembler::pt, done);
2995 // Check if it is still a light weight lock, this is is true if we see
2996 // the stack address of the basicLock in the markOop of the object
2997 assert(mark_addr.disp() == 0, "cas must take a zero displacement");
2998 cas_ptr(mark_addr.base(), Rbox, Rmark);
2999 ba(done);
3000 delayed()->cmp(Rbox, Rmark);
3001 bind(done);
3002 return ;
3003 }
3005 // Beware ... If the aggregate size of the code emitted by CLO and CUO is
3006 // is too large performance rolls abruptly off a cliff.
3007 // This could be related to inlining policies, code cache management, or
3008 // I$ effects.
3009 Label LStacked ;
3011 if (try_bias) {
3012 // TODO: eliminate redundant LDs of obj->mark
3013 biased_locking_exit(mark_addr, Rscratch, done);
3014 }
3016 ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark);
3017 ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch);
3018 andcc(Rscratch, Rscratch, G0);
3019 brx(Assembler::zero, false, Assembler::pn, done);
3020 delayed()->nop(); // consider: relocate fetch of mark, above, into this DS
3021 andcc(Rmark, 2, G0);
3022 brx(Assembler::zero, false, Assembler::pt, LStacked);
3023 delayed()->nop();
3025 // It's inflated
3026 // Conceptually we need a #loadstore|#storestore "release" MEMBAR before
3027 // the ST of 0 into _owner which releases the lock. This prevents loads
3028 // and stores within the critical section from reordering (floating)
3029 // past the store that releases the lock. But TSO is a strong memory model
3030 // and that particular flavor of barrier is a noop, so we can safely elide it.
3031 // Note that we use 1-0 locking by default for the inflated case. We
3032 // close the resultant (and rare) race by having contented threads in
3033 // monitorenter periodically poll _owner.
3034 ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch);
3035 ld_ptr(Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox);
3036 xor3(Rscratch, G2_thread, Rscratch);
3037 orcc(Rbox, Rscratch, Rbox);
3038 brx(Assembler::notZero, false, Assembler::pn, done);
3039 delayed()->
3040 ld_ptr(Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch);
3041 ld_ptr(Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox);
3042 orcc(Rbox, Rscratch, G0);
3043 if (EmitSync & 65536) {
3044 Label LSucc ;
3045 brx(Assembler::notZero, false, Assembler::pn, LSucc);
3046 delayed()->nop();
3047 ba(done);
3048 delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
3050 bind(LSucc);
3051 st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
3052 if (os::is_MP()) { membar (StoreLoad); }
3053 ld_ptr(Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch);
3054 andcc(Rscratch, Rscratch, G0);
3055 brx(Assembler::notZero, false, Assembler::pt, done);
3056 delayed()->andcc(G0, G0, G0);
3057 add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark);
3058 mov(G2_thread, Rscratch);
3059 cas_ptr(Rmark, G0, Rscratch);
3060 // invert icc.zf and goto done
3061 br_notnull(Rscratch, false, Assembler::pt, done);
3062 delayed()->cmp(G0, G0);
3063 ba(done);
3064 delayed()->cmp(G0, 1);
3065 } else {
3066 brx(Assembler::notZero, false, Assembler::pn, done);
3067 delayed()->nop();
3068 ba(done);
3069 delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2);
3070 }
3072 bind (LStacked);
3073 // Consider: we could replace the expensive CAS in the exit
3074 // path with a simple ST of the displaced mark value fetched from
3075 // the on-stack basiclock box. That admits a race where a thread T2
3076 // in the slow lock path -- inflating with monitor M -- could race a
3077 // thread T1 in the fast unlock path, resulting in a missed wakeup for T2.
3078 // More precisely T1 in the stack-lock unlock path could "stomp" the
3079 // inflated mark value M installed by T2, resulting in an orphan
3080 // object monitor M and T2 becoming stranded. We can remedy that situation
3081 // by having T2 periodically poll the object's mark word using timed wait
3082 // operations. If T2 discovers that a stomp has occurred it vacates
3083 // the monitor M and wakes any other threads stranded on the now-orphan M.
3084 // In addition the monitor scavenger, which performs deflation,
3085 // would also need to check for orpan monitors and stranded threads.
3086 //
3087 // Finally, inflation is also used when T2 needs to assign a hashCode
3088 // to O and O is stack-locked by T1. The "stomp" race could cause
3089 // an assigned hashCode value to be lost. We can avoid that condition
3090 // and provide the necessary hashCode stability invariants by ensuring
3091 // that hashCode generation is idempotent between copying GCs.
3092 // For example we could compute the hashCode of an object O as
3093 // O's heap address XOR some high quality RNG value that is refreshed
3094 // at GC-time. The monitor scavenger would install the hashCode
3095 // found in any orphan monitors. Again, the mechanism admits a
3096 // lost-update "stomp" WAW race but detects and recovers as needed.
3097 //
3098 // A prototype implementation showed excellent results, although
3099 // the scavenger and timeout code was rather involved.
3101 cas_ptr(mark_addr.base(), Rbox, Rscratch);
3102 cmp(Rbox, Rscratch);
3103 // Intentional fall through into done ...
3105 bind(done);
3106 }
3110 void MacroAssembler::print_CPU_state() {
3111 // %%%%% need to implement this
3112 }
3114 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
3115 // %%%%% need to implement this
3116 }
3118 void MacroAssembler::push_IU_state() {
3119 // %%%%% need to implement this
3120 }
3123 void MacroAssembler::pop_IU_state() {
3124 // %%%%% need to implement this
3125 }
3128 void MacroAssembler::push_FPU_state() {
3129 // %%%%% need to implement this
3130 }
3133 void MacroAssembler::pop_FPU_state() {
3134 // %%%%% need to implement this
3135 }
3138 void MacroAssembler::push_CPU_state() {
3139 // %%%%% need to implement this
3140 }
3143 void MacroAssembler::pop_CPU_state() {
3144 // %%%%% need to implement this
3145 }
3149 void MacroAssembler::verify_tlab() {
3150 #ifdef ASSERT
3151 if (UseTLAB && VerifyOops) {
3152 Label next, next2, ok;
3153 Register t1 = L0;
3154 Register t2 = L1;
3155 Register t3 = L2;
3157 save_frame(0);
3158 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
3159 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2);
3160 or3(t1, t2, t3);
3161 cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next);
3162 STOP("assert(top >= start)");
3163 should_not_reach_here();
3165 bind(next);
3166 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1);
3167 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2);
3168 or3(t3, t2, t3);
3169 cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2);
3170 STOP("assert(top <= end)");
3171 should_not_reach_here();
3173 bind(next2);
3174 and3(t3, MinObjAlignmentInBytesMask, t3);
3175 cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok);
3176 STOP("assert(aligned)");
3177 should_not_reach_here();
3179 bind(ok);
3180 restore();
3181 }
3182 #endif
3183 }
3186 void MacroAssembler::eden_allocate(
3187 Register obj, // result: pointer to object after successful allocation
3188 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
3189 int con_size_in_bytes, // object size in bytes if known at compile time
3190 Register t1, // temp register
3191 Register t2, // temp register
3192 Label& slow_case // continuation point if fast allocation fails
3193 ){
3194 // make sure arguments make sense
3195 assert_different_registers(obj, var_size_in_bytes, t1, t2);
3196 assert(0 <= con_size_in_bytes && Assembler::is_simm13(con_size_in_bytes), "illegal object size");
3197 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
3199 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
3200 // No allocation in the shared eden.
3201 ba(slow_case);
3202 delayed()->nop();
3203 } else {
3204 // get eden boundaries
3205 // note: we need both top & top_addr!
3206 const Register top_addr = t1;
3207 const Register end = t2;
3209 CollectedHeap* ch = Universe::heap();
3210 set((intx)ch->top_addr(), top_addr);
3211 intx delta = (intx)ch->end_addr() - (intx)ch->top_addr();
3212 ld_ptr(top_addr, delta, end);
3213 ld_ptr(top_addr, 0, obj);
3215 // try to allocate
3216 Label retry;
3217 bind(retry);
3218 #ifdef ASSERT
3219 // make sure eden top is properly aligned
3220 {
3221 Label L;
3222 btst(MinObjAlignmentInBytesMask, obj);
3223 br(Assembler::zero, false, Assembler::pt, L);
3224 delayed()->nop();
3225 STOP("eden top is not properly aligned");
3226 bind(L);
3227 }
3228 #endif // ASSERT
3229 const Register free = end;
3230 sub(end, obj, free); // compute amount of free space
3231 if (var_size_in_bytes->is_valid()) {
3232 // size is unknown at compile time
3233 cmp(free, var_size_in_bytes);
3234 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
3235 delayed()->add(obj, var_size_in_bytes, end);
3236 } else {
3237 // size is known at compile time
3238 cmp(free, con_size_in_bytes);
3239 br(Assembler::lessUnsigned, false, Assembler::pn, slow_case); // if there is not enough space go the slow case
3240 delayed()->add(obj, con_size_in_bytes, end);
3241 }
3242 // Compare obj with the value at top_addr; if still equal, swap the value of
3243 // end with the value at top_addr. If not equal, read the value at top_addr
3244 // into end.
3245 cas_ptr(top_addr, obj, end);
3246 // if someone beat us on the allocation, try again, otherwise continue
3247 cmp(obj, end);
3248 brx(Assembler::notEqual, false, Assembler::pn, retry);
3249 delayed()->mov(end, obj); // nop if successfull since obj == end
3251 #ifdef ASSERT
3252 // make sure eden top is properly aligned
3253 {
3254 Label L;
3255 const Register top_addr = t1;
3257 set((intx)ch->top_addr(), top_addr);
3258 ld_ptr(top_addr, 0, top_addr);
3259 btst(MinObjAlignmentInBytesMask, top_addr);
3260 br(Assembler::zero, false, Assembler::pt, L);
3261 delayed()->nop();
3262 STOP("eden top is not properly aligned");
3263 bind(L);
3264 }
3265 #endif // ASSERT
3266 }
3267 }
3270 void MacroAssembler::tlab_allocate(
3271 Register obj, // result: pointer to object after successful allocation
3272 Register var_size_in_bytes, // object size in bytes if unknown at compile time; invalid otherwise
3273 int con_size_in_bytes, // object size in bytes if known at compile time
3274 Register t1, // temp register
3275 Label& slow_case // continuation point if fast allocation fails
3276 ){
3277 // make sure arguments make sense
3278 assert_different_registers(obj, var_size_in_bytes, t1);
3279 assert(0 <= con_size_in_bytes && is_simm13(con_size_in_bytes), "illegal object size");
3280 assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "object size is not multiple of alignment");
3282 const Register free = t1;
3284 verify_tlab();
3286 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), obj);
3288 // calculate amount of free space
3289 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), free);
3290 sub(free, obj, free);
3292 Label done;
3293 if (var_size_in_bytes == noreg) {
3294 cmp(free, con_size_in_bytes);
3295 } else {
3296 cmp(free, var_size_in_bytes);
3297 }
3298 br(Assembler::less, false, Assembler::pn, slow_case);
3299 // calculate the new top pointer
3300 if (var_size_in_bytes == noreg) {
3301 delayed()->add(obj, con_size_in_bytes, free);
3302 } else {
3303 delayed()->add(obj, var_size_in_bytes, free);
3304 }
3306 bind(done);
3308 #ifdef ASSERT
3309 // make sure new free pointer is properly aligned
3310 {
3311 Label L;
3312 btst(MinObjAlignmentInBytesMask, free);
3313 br(Assembler::zero, false, Assembler::pt, L);
3314 delayed()->nop();
3315 STOP("updated TLAB free is not properly aligned");
3316 bind(L);
3317 }
3318 #endif // ASSERT
3320 // update the tlab top pointer
3321 st_ptr(free, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3322 verify_tlab();
3323 }
3326 void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) {
3327 Register top = O0;
3328 Register t1 = G1;
3329 Register t2 = G3;
3330 Register t3 = O1;
3331 assert_different_registers(top, t1, t2, t3, G4, G5 /* preserve G4 and G5 */);
3332 Label do_refill, discard_tlab;
3334 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
3335 // No allocation in the shared eden.
3336 ba_short(slow_case);
3337 }
3339 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top);
3340 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t1);
3341 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), t2);
3343 // calculate amount of free space
3344 sub(t1, top, t1);
3345 srl_ptr(t1, LogHeapWordSize, t1);
3347 // Retain tlab and allocate object in shared space if
3348 // the amount free in the tlab is too large to discard.
3349 cmp(t1, t2);
3350 brx(Assembler::lessEqual, false, Assembler::pt, discard_tlab);
3352 // increment waste limit to prevent getting stuck on this slow path
3353 delayed()->add(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment(), t2);
3354 st_ptr(t2, G2_thread, in_bytes(JavaThread::tlab_refill_waste_limit_offset()));
3355 if (TLABStats) {
3356 // increment number of slow_allocations
3357 ld(G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()), t2);
3358 add(t2, 1, t2);
3359 stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset()));
3360 }
3361 ba_short(try_eden);
3363 bind(discard_tlab);
3364 if (TLABStats) {
3365 // increment number of refills
3366 ld(G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()), t2);
3367 add(t2, 1, t2);
3368 stw(t2, G2_thread, in_bytes(JavaThread::tlab_number_of_refills_offset()));
3369 // accumulate wastage
3370 ld(G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()), t2);
3371 add(t2, t1, t2);
3372 stw(t2, G2_thread, in_bytes(JavaThread::tlab_fast_refill_waste_offset()));
3373 }
3375 // if tlab is currently allocated (top or end != null) then
3376 // fill [top, end + alignment_reserve) with array object
3377 br_null_short(top, Assembler::pn, do_refill);
3379 set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2);
3380 st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word
3381 // set klass to intArrayKlass
3382 sub(t1, typeArrayOopDesc::header_size(T_INT), t1);
3383 add(t1, ThreadLocalAllocBuffer::alignment_reserve(), t1);
3384 sll_ptr(t1, log2_intptr(HeapWordSize/sizeof(jint)), t1);
3385 st(t1, top, arrayOopDesc::length_offset_in_bytes());
3386 set((intptr_t)Universe::intArrayKlassObj_addr(), t2);
3387 ld_ptr(t2, 0, t2);
3388 // store klass last. concurrent gcs assumes klass length is valid if
3389 // klass field is not null.
3390 store_klass(t2, top);
3391 verify_oop(top);
3393 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t1);
3394 sub(top, t1, t1); // size of tlab's allocated portion
3395 incr_allocated_bytes(t1, t2, t3);
3397 // refill the tlab with an eden allocation
3398 bind(do_refill);
3399 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t1);
3400 sll_ptr(t1, LogHeapWordSize, t1);
3401 // allocate new tlab, address returned in top
3402 eden_allocate(top, t1, 0, t2, t3, slow_case);
3404 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_start_offset()));
3405 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_top_offset()));
3406 #ifdef ASSERT
3407 // check that tlab_size (t1) is still valid
3408 {
3409 Label ok;
3410 ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2);
3411 sll_ptr(t2, LogHeapWordSize, t2);
3412 cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok);
3413 STOP("assert(t1 == tlab_size)");
3414 should_not_reach_here();
3416 bind(ok);
3417 }
3418 #endif // ASSERT
3419 add(top, t1, top); // t1 is tlab_size
3420 sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top);
3421 st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset()));
3422 verify_tlab();
3423 ba_short(retry);
3424 }
3426 void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes,
3427 Register t1, Register t2) {
3428 // Bump total bytes allocated by this thread
3429 assert(t1->is_global(), "must be global reg"); // so all 64 bits are saved on a context switch
3430 assert_different_registers(size_in_bytes.register_or_noreg(), t1, t2);
3431 // v8 support has gone the way of the dodo
3432 ldx(G2_thread, in_bytes(JavaThread::allocated_bytes_offset()), t1);
3433 add(t1, ensure_simm13_or_reg(size_in_bytes, t2), t1);
3434 stx(t1, G2_thread, in_bytes(JavaThread::allocated_bytes_offset()));
3435 }
3437 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
3438 switch (cond) {
3439 // Note some conditions are synonyms for others
3440 case Assembler::never: return Assembler::always;
3441 case Assembler::zero: return Assembler::notZero;
3442 case Assembler::lessEqual: return Assembler::greater;
3443 case Assembler::less: return Assembler::greaterEqual;
3444 case Assembler::lessEqualUnsigned: return Assembler::greaterUnsigned;
3445 case Assembler::lessUnsigned: return Assembler::greaterEqualUnsigned;
3446 case Assembler::negative: return Assembler::positive;
3447 case Assembler::overflowSet: return Assembler::overflowClear;
3448 case Assembler::always: return Assembler::never;
3449 case Assembler::notZero: return Assembler::zero;
3450 case Assembler::greater: return Assembler::lessEqual;
3451 case Assembler::greaterEqual: return Assembler::less;
3452 case Assembler::greaterUnsigned: return Assembler::lessEqualUnsigned;
3453 case Assembler::greaterEqualUnsigned: return Assembler::lessUnsigned;
3454 case Assembler::positive: return Assembler::negative;
3455 case Assembler::overflowClear: return Assembler::overflowSet;
3456 }
3458 ShouldNotReachHere(); return Assembler::overflowClear;
3459 }
3461 void MacroAssembler::cond_inc(Assembler::Condition cond, address counter_ptr,
3462 Register Rtmp1, Register Rtmp2 /*, Register Rtmp3, Register Rtmp4 */) {
3463 Condition negated_cond = negate_condition(cond);
3464 Label L;
3465 brx(negated_cond, false, Assembler::pt, L);
3466 delayed()->nop();
3467 inc_counter(counter_ptr, Rtmp1, Rtmp2);
3468 bind(L);
3469 }
3471 void MacroAssembler::inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2) {
3472 AddressLiteral addrlit(counter_addr);
3473 sethi(addrlit, Rtmp1); // Move hi22 bits into temporary register.
3474 Address addr(Rtmp1, addrlit.low10()); // Build an address with low10 bits.
3475 ld(addr, Rtmp2);
3476 inc(Rtmp2);
3477 st(Rtmp2, addr);
3478 }
3480 void MacroAssembler::inc_counter(int* counter_addr, Register Rtmp1, Register Rtmp2) {
3481 inc_counter((address) counter_addr, Rtmp1, Rtmp2);
3482 }
3484 SkipIfEqual::SkipIfEqual(
3485 MacroAssembler* masm, Register temp, const bool* flag_addr,
3486 Assembler::Condition condition) {
3487 _masm = masm;
3488 AddressLiteral flag(flag_addr);
3489 _masm->sethi(flag, temp);
3490 _masm->ldub(temp, flag.low10(), temp);
3491 _masm->tst(temp);
3492 _masm->br(condition, false, Assembler::pt, _label);
3493 _masm->delayed()->nop();
3494 }
3496 SkipIfEqual::~SkipIfEqual() {
3497 _masm->bind(_label);
3498 }
3501 // Writes to stack successive pages until offset reached to check for
3502 // stack overflow + shadow pages. This clobbers tsp and scratch.
3503 void MacroAssembler::bang_stack_size(Register Rsize, Register Rtsp,
3504 Register Rscratch) {
3505 // Use stack pointer in temp stack pointer
3506 mov(SP, Rtsp);
3508 // Bang stack for total size given plus stack shadow page size.
3509 // Bang one page at a time because a large size can overflow yellow and
3510 // red zones (the bang will fail but stack overflow handling can't tell that
3511 // it was a stack overflow bang vs a regular segv).
3512 int offset = os::vm_page_size();
3513 Register Roffset = Rscratch;
3515 Label loop;
3516 bind(loop);
3517 set((-offset)+STACK_BIAS, Rscratch);
3518 st(G0, Rtsp, Rscratch);
3519 set(offset, Roffset);
3520 sub(Rsize, Roffset, Rsize);
3521 cmp(Rsize, G0);
3522 br(Assembler::greater, false, Assembler::pn, loop);
3523 delayed()->sub(Rtsp, Roffset, Rtsp);
3525 // Bang down shadow pages too.
3526 // The -1 because we already subtracted 1 page.
3527 for (int i = 0; i< StackShadowPages-1; i++) {
3528 set((-i*offset)+STACK_BIAS, Rscratch);
3529 st(G0, Rtsp, Rscratch);
3530 }
3531 }
3533 ///////////////////////////////////////////////////////////////////////////////////
3534 #if INCLUDE_ALL_GCS
3536 static address satb_log_enqueue_with_frame = NULL;
3537 static u_char* satb_log_enqueue_with_frame_end = NULL;
3539 static address satb_log_enqueue_frameless = NULL;
3540 static u_char* satb_log_enqueue_frameless_end = NULL;
3542 static int EnqueueCodeSize = 128 DEBUG_ONLY( + 256); // Instructions?
3544 static void generate_satb_log_enqueue(bool with_frame) {
3545 BufferBlob* bb = BufferBlob::create("enqueue_with_frame", EnqueueCodeSize);
3546 CodeBuffer buf(bb);
3547 MacroAssembler masm(&buf);
3549 #define __ masm.
3551 address start = __ pc();
3552 Register pre_val;
3554 Label refill, restart;
3555 if (with_frame) {
3556 __ save_frame(0);
3557 pre_val = I0; // Was O0 before the save.
3558 } else {
3559 pre_val = O0;
3560 }
3562 int satb_q_index_byte_offset =
3563 in_bytes(JavaThread::satb_mark_queue_offset() +
3564 PtrQueue::byte_offset_of_index());
3566 int satb_q_buf_byte_offset =
3567 in_bytes(JavaThread::satb_mark_queue_offset() +
3568 PtrQueue::byte_offset_of_buf());
3570 assert(in_bytes(PtrQueue::byte_width_of_index()) == sizeof(intptr_t) &&
3571 in_bytes(PtrQueue::byte_width_of_buf()) == sizeof(intptr_t),
3572 "check sizes in assembly below");
3574 __ bind(restart);
3576 // Load the index into the SATB buffer. PtrQueue::_index is a size_t
3577 // so ld_ptr is appropriate.
3578 __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0);
3580 // index == 0?
3581 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill);
3583 __ ld_ptr(G2_thread, satb_q_buf_byte_offset, L1);
3584 __ sub(L0, oopSize, L0);
3586 __ st_ptr(pre_val, L1, L0); // [_buf + index] := I0
3587 if (!with_frame) {
3588 // Use return-from-leaf
3589 __ retl();
3590 __ delayed()->st_ptr(L0, G2_thread, satb_q_index_byte_offset);
3591 } else {
3592 // Not delayed.
3593 __ st_ptr(L0, G2_thread, satb_q_index_byte_offset);
3594 }
3595 if (with_frame) {
3596 __ ret();
3597 __ delayed()->restore();
3598 }
3599 __ bind(refill);
3601 address handle_zero =
3602 CAST_FROM_FN_PTR(address,
3603 &SATBMarkQueueSet::handle_zero_index_for_thread);
3604 // This should be rare enough that we can afford to save all the
3605 // scratch registers that the calling context might be using.
3606 __ mov(G1_scratch, L0);
3607 __ mov(G3_scratch, L1);
3608 __ mov(G4, L2);
3609 // We need the value of O0 above (for the write into the buffer), so we
3610 // save and restore it.
3611 __ mov(O0, L3);
3612 // Since the call will overwrite O7, we save and restore that, as well.
3613 __ mov(O7, L4);
3614 __ call_VM_leaf(L5, handle_zero, G2_thread);
3615 __ mov(L0, G1_scratch);
3616 __ mov(L1, G3_scratch);
3617 __ mov(L2, G4);
3618 __ mov(L3, O0);
3619 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
3620 __ delayed()->mov(L4, O7);
3622 if (with_frame) {
3623 satb_log_enqueue_with_frame = start;
3624 satb_log_enqueue_with_frame_end = __ pc();
3625 } else {
3626 satb_log_enqueue_frameless = start;
3627 satb_log_enqueue_frameless_end = __ pc();
3628 }
3630 #undef __
3631 }
3633 static inline void generate_satb_log_enqueue_if_necessary(bool with_frame) {
3634 if (with_frame) {
3635 if (satb_log_enqueue_with_frame == 0) {
3636 generate_satb_log_enqueue(with_frame);
3637 assert(satb_log_enqueue_with_frame != 0, "postcondition.");
3638 if (G1SATBPrintStubs) {
3639 tty->print_cr("Generated with-frame satb enqueue:");
3640 Disassembler::decode((u_char*)satb_log_enqueue_with_frame,
3641 satb_log_enqueue_with_frame_end,
3642 tty);
3643 }
3644 }
3645 } else {
3646 if (satb_log_enqueue_frameless == 0) {
3647 generate_satb_log_enqueue(with_frame);
3648 assert(satb_log_enqueue_frameless != 0, "postcondition.");
3649 if (G1SATBPrintStubs) {
3650 tty->print_cr("Generated frameless satb enqueue:");
3651 Disassembler::decode((u_char*)satb_log_enqueue_frameless,
3652 satb_log_enqueue_frameless_end,
3653 tty);
3654 }
3655 }
3656 }
3657 }
3659 void MacroAssembler::g1_write_barrier_pre(Register obj,
3660 Register index,
3661 int offset,
3662 Register pre_val,
3663 Register tmp,
3664 bool preserve_o_regs) {
3665 Label filtered;
3667 if (obj == noreg) {
3668 // We are not loading the previous value so make
3669 // sure that we don't trash the value in pre_val
3670 // with the code below.
3671 assert_different_registers(pre_val, tmp);
3672 } else {
3673 // We will be loading the previous value
3674 // in this code so...
3675 assert(offset == 0 || index == noreg, "choose one");
3676 assert(pre_val == noreg, "check this code");
3677 }
3679 // Is marking active?
3680 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
3681 ld(G2,
3682 in_bytes(JavaThread::satb_mark_queue_offset() +
3683 PtrQueue::byte_offset_of_active()),
3684 tmp);
3685 } else {
3686 guarantee(in_bytes(PtrQueue::byte_width_of_active()) == 1,
3687 "Assumption");
3688 ldsb(G2,
3689 in_bytes(JavaThread::satb_mark_queue_offset() +
3690 PtrQueue::byte_offset_of_active()),
3691 tmp);
3692 }
3694 // Is marking active?
3695 cmp_and_br_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
3697 // Do we need to load the previous value?
3698 if (obj != noreg) {
3699 // Load the previous value...
3700 if (index == noreg) {
3701 if (Assembler::is_simm13(offset)) {
3702 load_heap_oop(obj, offset, tmp);
3703 } else {
3704 set(offset, tmp);
3705 load_heap_oop(obj, tmp, tmp);
3706 }
3707 } else {
3708 load_heap_oop(obj, index, tmp);
3709 }
3710 // Previous value has been loaded into tmp
3711 pre_val = tmp;
3712 }
3714 assert(pre_val != noreg, "must have a real register");
3716 // Is the previous value null?
3717 cmp_and_brx_short(pre_val, G0, Assembler::equal, Assembler::pt, filtered);
3719 // OK, it's not filtered, so we'll need to call enqueue. In the normal
3720 // case, pre_val will be a scratch G-reg, but there are some cases in
3721 // which it's an O-reg. In the first case, do a normal call. In the
3722 // latter, do a save here and call the frameless version.
3724 guarantee(pre_val->is_global() || pre_val->is_out(),
3725 "Or we need to think harder.");
3727 if (pre_val->is_global() && !preserve_o_regs) {
3728 generate_satb_log_enqueue_if_necessary(true); // with frame
3730 call(satb_log_enqueue_with_frame);
3731 delayed()->mov(pre_val, O0);
3732 } else {
3733 generate_satb_log_enqueue_if_necessary(false); // frameless
3735 save_frame(0);
3736 call(satb_log_enqueue_frameless);
3737 delayed()->mov(pre_val->after_save(), O0);
3738 restore();
3739 }
3741 bind(filtered);
3742 }
3744 static address dirty_card_log_enqueue = 0;
3745 static u_char* dirty_card_log_enqueue_end = 0;
3747 // This gets to assume that o0 contains the object address.
3748 static void generate_dirty_card_log_enqueue(jbyte* byte_map_base) {
3749 BufferBlob* bb = BufferBlob::create("dirty_card_enqueue", EnqueueCodeSize*2);
3750 CodeBuffer buf(bb);
3751 MacroAssembler masm(&buf);
3752 #define __ masm.
3753 address start = __ pc();
3755 Label not_already_dirty, restart, refill;
3757 #ifdef _LP64
3758 __ srlx(O0, CardTableModRefBS::card_shift, O0);
3759 #else
3760 __ srl(O0, CardTableModRefBS::card_shift, O0);
3761 #endif
3762 AddressLiteral addrlit(byte_map_base);
3763 __ set(addrlit, O1); // O1 := <card table base>
3764 __ ldub(O0, O1, O2); // O2 := [O0 + O1]
3766 assert(CardTableModRefBS::dirty_card_val() == 0, "otherwise check this code");
3767 __ cmp_and_br_short(O2, G0, Assembler::notEqual, Assembler::pt, not_already_dirty);
3769 // We didn't take the branch, so we're already dirty: return.
3770 // Use return-from-leaf
3771 __ retl();
3772 __ delayed()->nop();
3774 // Not dirty.
3775 __ bind(not_already_dirty);
3777 // Get O0 + O1 into a reg by itself
3778 __ add(O0, O1, O3);
3780 // First, dirty it.
3781 __ stb(G0, O3, G0); // [cardPtr] := 0 (i.e., dirty).
3783 int dirty_card_q_index_byte_offset =
3784 in_bytes(JavaThread::dirty_card_queue_offset() +
3785 PtrQueue::byte_offset_of_index());
3786 int dirty_card_q_buf_byte_offset =
3787 in_bytes(JavaThread::dirty_card_queue_offset() +
3788 PtrQueue::byte_offset_of_buf());
3789 __ bind(restart);
3791 // Load the index into the update buffer. PtrQueue::_index is
3792 // a size_t so ld_ptr is appropriate here.
3793 __ ld_ptr(G2_thread, dirty_card_q_index_byte_offset, L0);
3795 // index == 0?
3796 __ cmp_and_brx_short(L0, G0, Assembler::equal, Assembler::pn, refill);
3798 __ ld_ptr(G2_thread, dirty_card_q_buf_byte_offset, L1);
3799 __ sub(L0, oopSize, L0);
3801 __ st_ptr(O3, L1, L0); // [_buf + index] := I0
3802 // Use return-from-leaf
3803 __ retl();
3804 __ delayed()->st_ptr(L0, G2_thread, dirty_card_q_index_byte_offset);
3806 __ bind(refill);
3807 address handle_zero =
3808 CAST_FROM_FN_PTR(address,
3809 &DirtyCardQueueSet::handle_zero_index_for_thread);
3810 // This should be rare enough that we can afford to save all the
3811 // scratch registers that the calling context might be using.
3812 __ mov(G1_scratch, L3);
3813 __ mov(G3_scratch, L5);
3814 // We need the value of O3 above (for the write into the buffer), so we
3815 // save and restore it.
3816 __ mov(O3, L6);
3817 // Since the call will overwrite O7, we save and restore that, as well.
3818 __ mov(O7, L4);
3820 __ call_VM_leaf(L7_thread_cache, handle_zero, G2_thread);
3821 __ mov(L3, G1_scratch);
3822 __ mov(L5, G3_scratch);
3823 __ mov(L6, O3);
3824 __ br(Assembler::always, /*annul*/false, Assembler::pt, restart);
3825 __ delayed()->mov(L4, O7);
3827 dirty_card_log_enqueue = start;
3828 dirty_card_log_enqueue_end = __ pc();
3829 // XXX Should have a guarantee here about not going off the end!
3830 // Does it already do so? Do an experiment...
3832 #undef __
3834 }
3836 static inline void
3837 generate_dirty_card_log_enqueue_if_necessary(jbyte* byte_map_base) {
3838 if (dirty_card_log_enqueue == 0) {
3839 generate_dirty_card_log_enqueue(byte_map_base);
3840 assert(dirty_card_log_enqueue != 0, "postcondition.");
3841 if (G1SATBPrintStubs) {
3842 tty->print_cr("Generated dirty_card enqueue:");
3843 Disassembler::decode((u_char*)dirty_card_log_enqueue,
3844 dirty_card_log_enqueue_end,
3845 tty);
3846 }
3847 }
3848 }
3851 void MacroAssembler::g1_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
3853 Label filtered;
3854 MacroAssembler* post_filter_masm = this;
3856 if (new_val == G0) return;
3858 G1SATBCardTableModRefBS* bs = (G1SATBCardTableModRefBS*) Universe::heap()->barrier_set();
3859 assert(bs->kind() == BarrierSet::G1SATBCT ||
3860 bs->kind() == BarrierSet::G1SATBCTLogging, "wrong barrier");
3862 if (G1RSBarrierRegionFilter) {
3863 xor3(store_addr, new_val, tmp);
3864 #ifdef _LP64
3865 srlx(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
3866 #else
3867 srl(tmp, HeapRegion::LogOfHRGrainBytes, tmp);
3868 #endif
3870 // XXX Should I predict this taken or not? Does it matter?
3871 cmp_and_brx_short(tmp, G0, Assembler::equal, Assembler::pt, filtered);
3872 }
3874 // If the "store_addr" register is an "in" or "local" register, move it to
3875 // a scratch reg so we can pass it as an argument.
3876 bool use_scr = !(store_addr->is_global() || store_addr->is_out());
3877 // Pick a scratch register different from "tmp".
3878 Register scr = (tmp == G1_scratch ? G3_scratch : G1_scratch);
3879 // Make sure we use up the delay slot!
3880 if (use_scr) {
3881 post_filter_masm->mov(store_addr, scr);
3882 } else {
3883 post_filter_masm->nop();
3884 }
3885 generate_dirty_card_log_enqueue_if_necessary(bs->byte_map_base);
3886 save_frame(0);
3887 call(dirty_card_log_enqueue);
3888 if (use_scr) {
3889 delayed()->mov(scr, O0);
3890 } else {
3891 delayed()->mov(store_addr->after_save(), O0);
3892 }
3893 restore();
3895 bind(filtered);
3896 }
3898 #endif // INCLUDE_ALL_GCS
3899 ///////////////////////////////////////////////////////////////////////////////////
3901 void MacroAssembler::card_write_barrier_post(Register store_addr, Register new_val, Register tmp) {
3902 // If we're writing constant NULL, we can skip the write barrier.
3903 if (new_val == G0) return;
3904 CardTableModRefBS* bs = (CardTableModRefBS*) Universe::heap()->barrier_set();
3905 assert(bs->kind() == BarrierSet::CardTableModRef ||
3906 bs->kind() == BarrierSet::CardTableExtension, "wrong barrier");
3907 card_table_write(bs->byte_map_base, tmp, store_addr);
3908 }
3910 void MacroAssembler::load_klass(Register src_oop, Register klass) {
3911 // The number of bytes in this code is used by
3912 // MachCallDynamicJavaNode::ret_addr_offset()
3913 // if this changes, change that.
3914 if (UseCompressedKlassPointers) {
3915 lduw(src_oop, oopDesc::klass_offset_in_bytes(), klass);
3916 decode_klass_not_null(klass);
3917 } else {
3918 ld_ptr(src_oop, oopDesc::klass_offset_in_bytes(), klass);
3919 }
3920 }
3922 void MacroAssembler::store_klass(Register klass, Register dst_oop) {
3923 if (UseCompressedKlassPointers) {
3924 assert(dst_oop != klass, "not enough registers");
3925 encode_klass_not_null(klass);
3926 st(klass, dst_oop, oopDesc::klass_offset_in_bytes());
3927 } else {
3928 st_ptr(klass, dst_oop, oopDesc::klass_offset_in_bytes());
3929 }
3930 }
3932 void MacroAssembler::store_klass_gap(Register s, Register d) {
3933 if (UseCompressedKlassPointers) {
3934 assert(s != d, "not enough registers");
3935 st(s, d, oopDesc::klass_gap_offset_in_bytes());
3936 }
3937 }
3939 void MacroAssembler::load_heap_oop(const Address& s, Register d) {
3940 if (UseCompressedOops) {
3941 lduw(s, d);
3942 decode_heap_oop(d);
3943 } else {
3944 ld_ptr(s, d);
3945 }
3946 }
3948 void MacroAssembler::load_heap_oop(Register s1, Register s2, Register d) {
3949 if (UseCompressedOops) {
3950 lduw(s1, s2, d);
3951 decode_heap_oop(d, d);
3952 } else {
3953 ld_ptr(s1, s2, d);
3954 }
3955 }
3957 void MacroAssembler::load_heap_oop(Register s1, int simm13a, Register d) {
3958 if (UseCompressedOops) {
3959 lduw(s1, simm13a, d);
3960 decode_heap_oop(d, d);
3961 } else {
3962 ld_ptr(s1, simm13a, d);
3963 }
3964 }
3966 void MacroAssembler::load_heap_oop(Register s1, RegisterOrConstant s2, Register d) {
3967 if (s2.is_constant()) load_heap_oop(s1, s2.as_constant(), d);
3968 else load_heap_oop(s1, s2.as_register(), d);
3969 }
3971 void MacroAssembler::store_heap_oop(Register d, Register s1, Register s2) {
3972 if (UseCompressedOops) {
3973 assert(s1 != d && s2 != d, "not enough registers");
3974 encode_heap_oop(d);
3975 st(d, s1, s2);
3976 } else {
3977 st_ptr(d, s1, s2);
3978 }
3979 }
3981 void MacroAssembler::store_heap_oop(Register d, Register s1, int simm13a) {
3982 if (UseCompressedOops) {
3983 assert(s1 != d, "not enough registers");
3984 encode_heap_oop(d);
3985 st(d, s1, simm13a);
3986 } else {
3987 st_ptr(d, s1, simm13a);
3988 }
3989 }
3991 void MacroAssembler::store_heap_oop(Register d, const Address& a, int offset) {
3992 if (UseCompressedOops) {
3993 assert(a.base() != d, "not enough registers");
3994 encode_heap_oop(d);
3995 st(d, a, offset);
3996 } else {
3997 st_ptr(d, a, offset);
3998 }
3999 }
4002 void MacroAssembler::encode_heap_oop(Register src, Register dst) {
4003 assert (UseCompressedOops, "must be compressed");
4004 assert (Universe::heap() != NULL, "java heap should be initialized");
4005 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4006 verify_oop(src);
4007 if (Universe::narrow_oop_base() == NULL) {
4008 srlx(src, LogMinObjAlignmentInBytes, dst);
4009 return;
4010 }
4011 Label done;
4012 if (src == dst) {
4013 // optimize for frequent case src == dst
4014 bpr(rc_nz, true, Assembler::pt, src, done);
4015 delayed() -> sub(src, G6_heapbase, dst); // annuled if not taken
4016 bind(done);
4017 srlx(src, LogMinObjAlignmentInBytes, dst);
4018 } else {
4019 bpr(rc_z, false, Assembler::pn, src, done);
4020 delayed() -> mov(G0, dst);
4021 // could be moved before branch, and annulate delay,
4022 // but may add some unneeded work decoding null
4023 sub(src, G6_heapbase, dst);
4024 srlx(dst, LogMinObjAlignmentInBytes, dst);
4025 bind(done);
4026 }
4027 }
4030 void MacroAssembler::encode_heap_oop_not_null(Register r) {
4031 assert (UseCompressedOops, "must be compressed");
4032 assert (Universe::heap() != NULL, "java heap should be initialized");
4033 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4034 verify_oop(r);
4035 if (Universe::narrow_oop_base() != NULL)
4036 sub(r, G6_heapbase, r);
4037 srlx(r, LogMinObjAlignmentInBytes, r);
4038 }
4040 void MacroAssembler::encode_heap_oop_not_null(Register src, Register dst) {
4041 assert (UseCompressedOops, "must be compressed");
4042 assert (Universe::heap() != NULL, "java heap should be initialized");
4043 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4044 verify_oop(src);
4045 if (Universe::narrow_oop_base() == NULL) {
4046 srlx(src, LogMinObjAlignmentInBytes, dst);
4047 } else {
4048 sub(src, G6_heapbase, dst);
4049 srlx(dst, LogMinObjAlignmentInBytes, dst);
4050 }
4051 }
4053 // Same algorithm as oops.inline.hpp decode_heap_oop.
4054 void MacroAssembler::decode_heap_oop(Register src, Register dst) {
4055 assert (UseCompressedOops, "must be compressed");
4056 assert (Universe::heap() != NULL, "java heap should be initialized");
4057 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4058 sllx(src, LogMinObjAlignmentInBytes, dst);
4059 if (Universe::narrow_oop_base() != NULL) {
4060 Label done;
4061 bpr(rc_nz, true, Assembler::pt, dst, done);
4062 delayed() -> add(dst, G6_heapbase, dst); // annuled if not taken
4063 bind(done);
4064 }
4065 verify_oop(dst);
4066 }
4068 void MacroAssembler::decode_heap_oop_not_null(Register r) {
4069 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4070 // pd_code_size_limit.
4071 // Also do not verify_oop as this is called by verify_oop.
4072 assert (UseCompressedOops, "must be compressed");
4073 assert (Universe::heap() != NULL, "java heap should be initialized");
4074 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4075 sllx(r, LogMinObjAlignmentInBytes, r);
4076 if (Universe::narrow_oop_base() != NULL)
4077 add(r, G6_heapbase, r);
4078 }
4080 void MacroAssembler::decode_heap_oop_not_null(Register src, Register dst) {
4081 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4082 // pd_code_size_limit.
4083 // Also do not verify_oop as this is called by verify_oop.
4084 assert (UseCompressedOops, "must be compressed");
4085 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
4086 sllx(src, LogMinObjAlignmentInBytes, dst);
4087 if (Universe::narrow_oop_base() != NULL)
4088 add(dst, G6_heapbase, dst);
4089 }
4091 void MacroAssembler::encode_klass_not_null(Register r) {
4092 assert(Metaspace::is_initialized(), "metaspace should be initialized");
4093 assert (UseCompressedKlassPointers, "must be compressed");
4094 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
4095 if (Universe::narrow_klass_base() != NULL)
4096 sub(r, G6_heapbase, r);
4097 srlx(r, LogKlassAlignmentInBytes, r);
4098 }
4100 void MacroAssembler::encode_klass_not_null(Register src, Register dst) {
4101 assert(Metaspace::is_initialized(), "metaspace should be initialized");
4102 assert (UseCompressedKlassPointers, "must be compressed");
4103 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
4104 if (Universe::narrow_klass_base() == NULL) {
4105 srlx(src, LogKlassAlignmentInBytes, dst);
4106 } else {
4107 sub(src, G6_heapbase, dst);
4108 srlx(dst, LogKlassAlignmentInBytes, dst);
4109 }
4110 }
4112 void MacroAssembler::decode_klass_not_null(Register r) {
4113 assert(Metaspace::is_initialized(), "metaspace should be initialized");
4114 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4115 // pd_code_size_limit.
4116 assert (UseCompressedKlassPointers, "must be compressed");
4117 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
4118 sllx(r, LogKlassAlignmentInBytes, r);
4119 if (Universe::narrow_klass_base() != NULL)
4120 add(r, G6_heapbase, r);
4121 }
4123 void MacroAssembler::decode_klass_not_null(Register src, Register dst) {
4124 assert(Metaspace::is_initialized(), "metaspace should be initialized");
4125 // Do not add assert code to this unless you change vtableStubs_sparc.cpp
4126 // pd_code_size_limit.
4127 assert (UseCompressedKlassPointers, "must be compressed");
4128 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
4129 sllx(src, LogKlassAlignmentInBytes, dst);
4130 if (Universe::narrow_klass_base() != NULL)
4131 add(dst, G6_heapbase, dst);
4132 }
4134 void MacroAssembler::reinit_heapbase() {
4135 if (UseCompressedOops || UseCompressedKlassPointers) {
4136 AddressLiteral base(Universe::narrow_ptrs_base_addr());
4137 load_ptr_contents(base, G6_heapbase);
4138 }
4139 }
4141 // Compare char[] arrays aligned to 4 bytes.
4142 void MacroAssembler::char_arrays_equals(Register ary1, Register ary2,
4143 Register limit, Register result,
4144 Register chr1, Register chr2, Label& Ldone) {
4145 Label Lvector, Lloop;
4146 assert(chr1 == result, "should be the same");
4148 // Note: limit contains number of bytes (2*char_elements) != 0.
4149 andcc(limit, 0x2, chr1); // trailing character ?
4150 br(Assembler::zero, false, Assembler::pt, Lvector);
4151 delayed()->nop();
4153 // compare the trailing char
4154 sub(limit, sizeof(jchar), limit);
4155 lduh(ary1, limit, chr1);
4156 lduh(ary2, limit, chr2);
4157 cmp(chr1, chr2);
4158 br(Assembler::notEqual, true, Assembler::pt, Ldone);
4159 delayed()->mov(G0, result); // not equal
4161 // only one char ?
4162 cmp_zero_and_br(zero, limit, Ldone, true, Assembler::pn);
4163 delayed()->add(G0, 1, result); // zero-length arrays are equal
4165 // word by word compare, dont't need alignment check
4166 bind(Lvector);
4167 // Shift ary1 and ary2 to the end of the arrays, negate limit
4168 add(ary1, limit, ary1);
4169 add(ary2, limit, ary2);
4170 neg(limit, limit);
4172 lduw(ary1, limit, chr1);
4173 bind(Lloop);
4174 lduw(ary2, limit, chr2);
4175 cmp(chr1, chr2);
4176 br(Assembler::notEqual, true, Assembler::pt, Ldone);
4177 delayed()->mov(G0, result); // not equal
4178 inccc(limit, 2*sizeof(jchar));
4179 // annul LDUW if branch is not taken to prevent access past end of array
4180 br(Assembler::notZero, true, Assembler::pt, Lloop);
4181 delayed()->lduw(ary1, limit, chr1); // hoisted
4183 // Caller should set it:
4184 // add(G0, 1, result); // equals
4185 }
4187 // Use BIS for zeroing (count is in bytes).
4188 void MacroAssembler::bis_zeroing(Register to, Register count, Register temp, Label& Ldone) {
4189 assert(UseBlockZeroing && VM_Version::has_block_zeroing(), "only works with BIS zeroing");
4190 Register end = count;
4191 int cache_line_size = VM_Version::prefetch_data_size();
4192 // Minimum count when BIS zeroing can be used since
4193 // it needs membar which is expensive.
4194 int block_zero_size = MAX2(cache_line_size*3, (int)BlockZeroingLowLimit);
4196 Label small_loop;
4197 // Check if count is negative (dead code) or zero.
4198 // Note, count uses 64bit in 64 bit VM.
4199 cmp_and_brx_short(count, 0, Assembler::lessEqual, Assembler::pn, Ldone);
4201 // Use BIS zeroing only for big arrays since it requires membar.
4202 if (Assembler::is_simm13(block_zero_size)) { // < 4096
4203 cmp(count, block_zero_size);
4204 } else {
4205 set(block_zero_size, temp);
4206 cmp(count, temp);
4207 }
4208 br(Assembler::lessUnsigned, false, Assembler::pt, small_loop);
4209 delayed()->add(to, count, end);
4211 // Note: size is >= three (32 bytes) cache lines.
4213 // Clean the beginning of space up to next cache line.
4214 for (int offs = 0; offs < cache_line_size; offs += 8) {
4215 stx(G0, to, offs);
4216 }
4218 // align to next cache line
4219 add(to, cache_line_size, to);
4220 and3(to, -cache_line_size, to);
4222 // Note: size left >= two (32 bytes) cache lines.
4224 // BIS should not be used to zero tail (64 bytes)
4225 // to avoid zeroing a header of the following object.
4226 sub(end, (cache_line_size*2)-8, end);
4228 Label bis_loop;
4229 bind(bis_loop);
4230 stxa(G0, to, G0, Assembler::ASI_ST_BLKINIT_PRIMARY);
4231 add(to, cache_line_size, to);
4232 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, bis_loop);
4234 // BIS needs membar.
4235 membar(Assembler::StoreLoad);
4237 add(end, (cache_line_size*2)-8, end); // restore end
4238 cmp_and_brx_short(to, end, Assembler::greaterEqualUnsigned, Assembler::pn, Ldone);
4240 // Clean the tail.
4241 bind(small_loop);
4242 stx(G0, to, 0);
4243 add(to, 8, to);
4244 cmp_and_brx_short(to, end, Assembler::lessUnsigned, Assembler::pt, small_loop);
4245 nop(); // Separate short branches
4246 }