Fri, 25 Apr 2014 12:48:34 +0200
8029302: Performance regression in Math.pow intrinsic
Summary: Added special case for x^y where y == 2
Reviewed-by: kvn
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "compiler/disassembler.hpp"
29 #include "gc_interface/collectedHeap.inline.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "memory/cardTableModRefBS.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/biasedLocking.hpp"
36 #include "runtime/interfaceSupport.hpp"
37 #include "runtime/objectMonitor.hpp"
38 #include "runtime/os.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "utilities/macros.hpp"
42 #if INCLUDE_ALL_GCS
43 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
44 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
45 #include "gc_implementation/g1/heapRegion.hpp"
46 #endif // INCLUDE_ALL_GCS
48 #ifdef PRODUCT
49 #define BLOCK_COMMENT(str) /* nothing */
50 #define STOP(error) stop(error)
51 #else
52 #define BLOCK_COMMENT(str) block_comment(str)
53 #define STOP(error) block_comment(error); stop(error)
54 #endif
56 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
59 #ifdef ASSERT
60 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
61 #endif
63 static Assembler::Condition reverse[] = {
64 Assembler::noOverflow /* overflow = 0x0 */ ,
65 Assembler::overflow /* noOverflow = 0x1 */ ,
66 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
67 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
68 Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
69 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
70 Assembler::above /* belowEqual = 0x6 */ ,
71 Assembler::belowEqual /* above = 0x7 */ ,
72 Assembler::positive /* negative = 0x8 */ ,
73 Assembler::negative /* positive = 0x9 */ ,
74 Assembler::noParity /* parity = 0xa */ ,
75 Assembler::parity /* noParity = 0xb */ ,
76 Assembler::greaterEqual /* less = 0xc */ ,
77 Assembler::less /* greaterEqual = 0xd */ ,
78 Assembler::greater /* lessEqual = 0xe */ ,
79 Assembler::lessEqual /* greater = 0xf, */
81 };
84 // Implementation of MacroAssembler
86 // First all the versions that have distinct versions depending on 32/64 bit
87 // Unless the difference is trivial (1 line or so).
89 #ifndef _LP64
91 // 32bit versions
93 Address MacroAssembler::as_Address(AddressLiteral adr) {
94 return Address(adr.target(), adr.rspec());
95 }
97 Address MacroAssembler::as_Address(ArrayAddress adr) {
98 return Address::make_array(adr);
99 }
101 void MacroAssembler::call_VM_leaf_base(address entry_point,
102 int number_of_arguments) {
103 call(RuntimeAddress(entry_point));
104 increment(rsp, number_of_arguments * wordSize);
105 }
107 void MacroAssembler::cmpklass(Address src1, Metadata* obj) {
108 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
109 }
111 void MacroAssembler::cmpklass(Register src1, Metadata* obj) {
112 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
113 }
115 void MacroAssembler::cmpoop(Address src1, jobject obj) {
116 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
117 }
119 void MacroAssembler::cmpoop(Register src1, jobject obj) {
120 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
121 }
123 void MacroAssembler::extend_sign(Register hi, Register lo) {
124 // According to Intel Doc. AP-526, "Integer Divide", p.18.
125 if (VM_Version::is_P6() && hi == rdx && lo == rax) {
126 cdql();
127 } else {
128 movl(hi, lo);
129 sarl(hi, 31);
130 }
131 }
133 void MacroAssembler::jC2(Register tmp, Label& L) {
134 // set parity bit if FPU flag C2 is set (via rax)
135 save_rax(tmp);
136 fwait(); fnstsw_ax();
137 sahf();
138 restore_rax(tmp);
139 // branch
140 jcc(Assembler::parity, L);
141 }
143 void MacroAssembler::jnC2(Register tmp, Label& L) {
144 // set parity bit if FPU flag C2 is set (via rax)
145 save_rax(tmp);
146 fwait(); fnstsw_ax();
147 sahf();
148 restore_rax(tmp);
149 // branch
150 jcc(Assembler::noParity, L);
151 }
153 // 32bit can do a case table jump in one instruction but we no longer allow the base
154 // to be installed in the Address class
155 void MacroAssembler::jump(ArrayAddress entry) {
156 jmp(as_Address(entry));
157 }
159 // Note: y_lo will be destroyed
160 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
161 // Long compare for Java (semantics as described in JVM spec.)
162 Label high, low, done;
164 cmpl(x_hi, y_hi);
165 jcc(Assembler::less, low);
166 jcc(Assembler::greater, high);
167 // x_hi is the return register
168 xorl(x_hi, x_hi);
169 cmpl(x_lo, y_lo);
170 jcc(Assembler::below, low);
171 jcc(Assembler::equal, done);
173 bind(high);
174 xorl(x_hi, x_hi);
175 increment(x_hi);
176 jmp(done);
178 bind(low);
179 xorl(x_hi, x_hi);
180 decrementl(x_hi);
182 bind(done);
183 }
185 void MacroAssembler::lea(Register dst, AddressLiteral src) {
186 mov_literal32(dst, (int32_t)src.target(), src.rspec());
187 }
189 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
190 // leal(dst, as_Address(adr));
191 // see note in movl as to why we must use a move
192 mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
193 }
195 void MacroAssembler::leave() {
196 mov(rsp, rbp);
197 pop(rbp);
198 }
200 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
201 // Multiplication of two Java long values stored on the stack
202 // as illustrated below. Result is in rdx:rax.
203 //
204 // rsp ---> [ ?? ] \ \
205 // .... | y_rsp_offset |
206 // [ y_lo ] / (in bytes) | x_rsp_offset
207 // [ y_hi ] | (in bytes)
208 // .... |
209 // [ x_lo ] /
210 // [ x_hi ]
211 // ....
212 //
213 // Basic idea: lo(result) = lo(x_lo * y_lo)
214 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
215 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
216 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
217 Label quick;
218 // load x_hi, y_hi and check if quick
219 // multiplication is possible
220 movl(rbx, x_hi);
221 movl(rcx, y_hi);
222 movl(rax, rbx);
223 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0
224 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply
225 // do full multiplication
226 // 1st step
227 mull(y_lo); // x_hi * y_lo
228 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx,
229 // 2nd step
230 movl(rax, x_lo);
231 mull(rcx); // x_lo * y_hi
232 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx,
233 // 3rd step
234 bind(quick); // note: rbx, = 0 if quick multiply!
235 movl(rax, x_lo);
236 mull(y_lo); // x_lo * y_lo
237 addl(rdx, rbx); // correct hi(x_lo * y_lo)
238 }
240 void MacroAssembler::lneg(Register hi, Register lo) {
241 negl(lo);
242 adcl(hi, 0);
243 negl(hi);
244 }
246 void MacroAssembler::lshl(Register hi, Register lo) {
247 // Java shift left long support (semantics as described in JVM spec., p.305)
248 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
249 // shift value is in rcx !
250 assert(hi != rcx, "must not use rcx");
251 assert(lo != rcx, "must not use rcx");
252 const Register s = rcx; // shift count
253 const int n = BitsPerWord;
254 Label L;
255 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
256 cmpl(s, n); // if (s < n)
257 jcc(Assembler::less, L); // else (s >= n)
258 movl(hi, lo); // x := x << n
259 xorl(lo, lo);
260 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
261 bind(L); // s (mod n) < n
262 shldl(hi, lo); // x := x << s
263 shll(lo);
264 }
267 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
268 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
269 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
270 assert(hi != rcx, "must not use rcx");
271 assert(lo != rcx, "must not use rcx");
272 const Register s = rcx; // shift count
273 const int n = BitsPerWord;
274 Label L;
275 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
276 cmpl(s, n); // if (s < n)
277 jcc(Assembler::less, L); // else (s >= n)
278 movl(lo, hi); // x := x >> n
279 if (sign_extension) sarl(hi, 31);
280 else xorl(hi, hi);
281 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
282 bind(L); // s (mod n) < n
283 shrdl(lo, hi); // x := x >> s
284 if (sign_extension) sarl(hi);
285 else shrl(hi);
286 }
288 void MacroAssembler::movoop(Register dst, jobject obj) {
289 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
290 }
292 void MacroAssembler::movoop(Address dst, jobject obj) {
293 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
294 }
296 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
297 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
298 }
300 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
301 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
302 }
304 void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
305 // scratch register is not used,
306 // it is defined to match parameters of 64-bit version of this method.
307 if (src.is_lval()) {
308 mov_literal32(dst, (intptr_t)src.target(), src.rspec());
309 } else {
310 movl(dst, as_Address(src));
311 }
312 }
314 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
315 movl(as_Address(dst), src);
316 }
318 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
319 movl(dst, as_Address(src));
320 }
322 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
323 void MacroAssembler::movptr(Address dst, intptr_t src) {
324 movl(dst, src);
325 }
328 void MacroAssembler::pop_callee_saved_registers() {
329 pop(rcx);
330 pop(rdx);
331 pop(rdi);
332 pop(rsi);
333 }
335 void MacroAssembler::pop_fTOS() {
336 fld_d(Address(rsp, 0));
337 addl(rsp, 2 * wordSize);
338 }
340 void MacroAssembler::push_callee_saved_registers() {
341 push(rsi);
342 push(rdi);
343 push(rdx);
344 push(rcx);
345 }
347 void MacroAssembler::push_fTOS() {
348 subl(rsp, 2 * wordSize);
349 fstp_d(Address(rsp, 0));
350 }
353 void MacroAssembler::pushoop(jobject obj) {
354 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
355 }
357 void MacroAssembler::pushklass(Metadata* obj) {
358 push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate());
359 }
361 void MacroAssembler::pushptr(AddressLiteral src) {
362 if (src.is_lval()) {
363 push_literal32((int32_t)src.target(), src.rspec());
364 } else {
365 pushl(as_Address(src));
366 }
367 }
369 void MacroAssembler::set_word_if_not_zero(Register dst) {
370 xorl(dst, dst);
371 set_byte_if_not_zero(dst);
372 }
374 static void pass_arg0(MacroAssembler* masm, Register arg) {
375 masm->push(arg);
376 }
378 static void pass_arg1(MacroAssembler* masm, Register arg) {
379 masm->push(arg);
380 }
382 static void pass_arg2(MacroAssembler* masm, Register arg) {
383 masm->push(arg);
384 }
386 static void pass_arg3(MacroAssembler* masm, Register arg) {
387 masm->push(arg);
388 }
390 #ifndef PRODUCT
391 extern "C" void findpc(intptr_t x);
392 #endif
394 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
395 // In order to get locks to work, we need to fake a in_VM state
396 JavaThread* thread = JavaThread::current();
397 JavaThreadState saved_state = thread->thread_state();
398 thread->set_thread_state(_thread_in_vm);
399 if (ShowMessageBoxOnError) {
400 JavaThread* thread = JavaThread::current();
401 JavaThreadState saved_state = thread->thread_state();
402 thread->set_thread_state(_thread_in_vm);
403 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
404 ttyLocker ttyl;
405 BytecodeCounter::print();
406 }
407 // To see where a verify_oop failed, get $ebx+40/X for this frame.
408 // This is the value of eip which points to where verify_oop will return.
409 if (os::message_box(msg, "Execution stopped, print registers?")) {
410 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip);
411 BREAKPOINT;
412 }
413 } else {
414 ttyLocker ttyl;
415 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
416 }
417 // Don't assert holding the ttyLock
418 assert(false, err_msg("DEBUG MESSAGE: %s", msg));
419 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
420 }
422 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) {
423 ttyLocker ttyl;
424 FlagSetting fs(Debugging, true);
425 tty->print_cr("eip = 0x%08x", eip);
426 #ifndef PRODUCT
427 if ((WizardMode || Verbose) && PrintMiscellaneous) {
428 tty->cr();
429 findpc(eip);
430 tty->cr();
431 }
432 #endif
433 #define PRINT_REG(rax) \
434 { tty->print("%s = ", #rax); os::print_location(tty, rax); }
435 PRINT_REG(rax);
436 PRINT_REG(rbx);
437 PRINT_REG(rcx);
438 PRINT_REG(rdx);
439 PRINT_REG(rdi);
440 PRINT_REG(rsi);
441 PRINT_REG(rbp);
442 PRINT_REG(rsp);
443 #undef PRINT_REG
444 // Print some words near top of staack.
445 int* dump_sp = (int*) rsp;
446 for (int col1 = 0; col1 < 8; col1++) {
447 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
448 os::print_location(tty, *dump_sp++);
449 }
450 for (int row = 0; row < 16; row++) {
451 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
452 for (int col = 0; col < 8; col++) {
453 tty->print(" 0x%08x", *dump_sp++);
454 }
455 tty->cr();
456 }
457 // Print some instructions around pc:
458 Disassembler::decode((address)eip-64, (address)eip);
459 tty->print_cr("--------");
460 Disassembler::decode((address)eip, (address)eip+32);
461 }
463 void MacroAssembler::stop(const char* msg) {
464 ExternalAddress message((address)msg);
465 // push address of message
466 pushptr(message.addr());
467 { Label L; call(L, relocInfo::none); bind(L); } // push eip
468 pusha(); // push registers
469 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
470 hlt();
471 }
473 void MacroAssembler::warn(const char* msg) {
474 push_CPU_state();
476 ExternalAddress message((address) msg);
477 // push address of message
478 pushptr(message.addr());
480 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
481 addl(rsp, wordSize); // discard argument
482 pop_CPU_state();
483 }
485 void MacroAssembler::print_state() {
486 { Label L; call(L, relocInfo::none); bind(L); } // push eip
487 pusha(); // push registers
489 push_CPU_state();
490 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32)));
491 pop_CPU_state();
493 popa();
494 addl(rsp, wordSize);
495 }
497 #else // _LP64
499 // 64 bit versions
501 Address MacroAssembler::as_Address(AddressLiteral adr) {
502 // amd64 always does this as a pc-rel
503 // we can be absolute or disp based on the instruction type
504 // jmp/call are displacements others are absolute
505 assert(!adr.is_lval(), "must be rval");
506 assert(reachable(adr), "must be");
507 return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
509 }
511 Address MacroAssembler::as_Address(ArrayAddress adr) {
512 AddressLiteral base = adr.base();
513 lea(rscratch1, base);
514 Address index = adr.index();
515 assert(index._disp == 0, "must not have disp"); // maybe it can?
516 Address array(rscratch1, index._index, index._scale, index._disp);
517 return array;
518 }
520 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
521 Label L, E;
523 #ifdef _WIN64
524 // Windows always allocates space for it's register args
525 assert(num_args <= 4, "only register arguments supported");
526 subq(rsp, frame::arg_reg_save_area_bytes);
527 #endif
529 // Align stack if necessary
530 testl(rsp, 15);
531 jcc(Assembler::zero, L);
533 subq(rsp, 8);
534 {
535 call(RuntimeAddress(entry_point));
536 }
537 addq(rsp, 8);
538 jmp(E);
540 bind(L);
541 {
542 call(RuntimeAddress(entry_point));
543 }
545 bind(E);
547 #ifdef _WIN64
548 // restore stack pointer
549 addq(rsp, frame::arg_reg_save_area_bytes);
550 #endif
552 }
554 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
555 assert(!src2.is_lval(), "should use cmpptr");
557 if (reachable(src2)) {
558 cmpq(src1, as_Address(src2));
559 } else {
560 lea(rscratch1, src2);
561 Assembler::cmpq(src1, Address(rscratch1, 0));
562 }
563 }
565 int MacroAssembler::corrected_idivq(Register reg) {
566 // Full implementation of Java ldiv and lrem; checks for special
567 // case as described in JVM spec., p.243 & p.271. The function
568 // returns the (pc) offset of the idivl instruction - may be needed
569 // for implicit exceptions.
570 //
571 // normal case special case
572 //
573 // input : rax: dividend min_long
574 // reg: divisor (may not be eax/edx) -1
575 //
576 // output: rax: quotient (= rax idiv reg) min_long
577 // rdx: remainder (= rax irem reg) 0
578 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
579 static const int64_t min_long = 0x8000000000000000;
580 Label normal_case, special_case;
582 // check for special case
583 cmp64(rax, ExternalAddress((address) &min_long));
584 jcc(Assembler::notEqual, normal_case);
585 xorl(rdx, rdx); // prepare rdx for possible special case (where
586 // remainder = 0)
587 cmpq(reg, -1);
588 jcc(Assembler::equal, special_case);
590 // handle normal case
591 bind(normal_case);
592 cdqq();
593 int idivq_offset = offset();
594 idivq(reg);
596 // normal and special case exit
597 bind(special_case);
599 return idivq_offset;
600 }
602 void MacroAssembler::decrementq(Register reg, int value) {
603 if (value == min_jint) { subq(reg, value); return; }
604 if (value < 0) { incrementq(reg, -value); return; }
605 if (value == 0) { ; return; }
606 if (value == 1 && UseIncDec) { decq(reg) ; return; }
607 /* else */ { subq(reg, value) ; return; }
608 }
610 void MacroAssembler::decrementq(Address dst, int value) {
611 if (value == min_jint) { subq(dst, value); return; }
612 if (value < 0) { incrementq(dst, -value); return; }
613 if (value == 0) { ; return; }
614 if (value == 1 && UseIncDec) { decq(dst) ; return; }
615 /* else */ { subq(dst, value) ; return; }
616 }
618 void MacroAssembler::incrementq(AddressLiteral dst) {
619 if (reachable(dst)) {
620 incrementq(as_Address(dst));
621 } else {
622 lea(rscratch1, dst);
623 incrementq(Address(rscratch1, 0));
624 }
625 }
627 void MacroAssembler::incrementq(Register reg, int value) {
628 if (value == min_jint) { addq(reg, value); return; }
629 if (value < 0) { decrementq(reg, -value); return; }
630 if (value == 0) { ; return; }
631 if (value == 1 && UseIncDec) { incq(reg) ; return; }
632 /* else */ { addq(reg, value) ; return; }
633 }
635 void MacroAssembler::incrementq(Address dst, int value) {
636 if (value == min_jint) { addq(dst, value); return; }
637 if (value < 0) { decrementq(dst, -value); return; }
638 if (value == 0) { ; return; }
639 if (value == 1 && UseIncDec) { incq(dst) ; return; }
640 /* else */ { addq(dst, value) ; return; }
641 }
643 // 32bit can do a case table jump in one instruction but we no longer allow the base
644 // to be installed in the Address class
645 void MacroAssembler::jump(ArrayAddress entry) {
646 lea(rscratch1, entry.base());
647 Address dispatch = entry.index();
648 assert(dispatch._base == noreg, "must be");
649 dispatch._base = rscratch1;
650 jmp(dispatch);
651 }
653 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
654 ShouldNotReachHere(); // 64bit doesn't use two regs
655 cmpq(x_lo, y_lo);
656 }
658 void MacroAssembler::lea(Register dst, AddressLiteral src) {
659 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
660 }
662 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
663 mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec());
664 movptr(dst, rscratch1);
665 }
667 void MacroAssembler::leave() {
668 // %%% is this really better? Why not on 32bit too?
669 emit_int8((unsigned char)0xC9); // LEAVE
670 }
672 void MacroAssembler::lneg(Register hi, Register lo) {
673 ShouldNotReachHere(); // 64bit doesn't use two regs
674 negq(lo);
675 }
677 void MacroAssembler::movoop(Register dst, jobject obj) {
678 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
679 }
681 void MacroAssembler::movoop(Address dst, jobject obj) {
682 mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
683 movq(dst, rscratch1);
684 }
686 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
687 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
688 }
690 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
691 mov_literal64(rscratch1, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
692 movq(dst, rscratch1);
693 }
695 void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
696 if (src.is_lval()) {
697 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
698 } else {
699 if (reachable(src)) {
700 movq(dst, as_Address(src));
701 } else {
702 lea(scratch, src);
703 movq(dst, Address(scratch, 0));
704 }
705 }
706 }
708 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
709 movq(as_Address(dst), src);
710 }
712 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
713 movq(dst, as_Address(src));
714 }
716 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
717 void MacroAssembler::movptr(Address dst, intptr_t src) {
718 mov64(rscratch1, src);
719 movq(dst, rscratch1);
720 }
722 // These are mostly for initializing NULL
723 void MacroAssembler::movptr(Address dst, int32_t src) {
724 movslq(dst, src);
725 }
727 void MacroAssembler::movptr(Register dst, int32_t src) {
728 mov64(dst, (intptr_t)src);
729 }
731 void MacroAssembler::pushoop(jobject obj) {
732 movoop(rscratch1, obj);
733 push(rscratch1);
734 }
736 void MacroAssembler::pushklass(Metadata* obj) {
737 mov_metadata(rscratch1, obj);
738 push(rscratch1);
739 }
741 void MacroAssembler::pushptr(AddressLiteral src) {
742 lea(rscratch1, src);
743 if (src.is_lval()) {
744 push(rscratch1);
745 } else {
746 pushq(Address(rscratch1, 0));
747 }
748 }
750 void MacroAssembler::reset_last_Java_frame(bool clear_fp,
751 bool clear_pc) {
752 // we must set sp to zero to clear frame
753 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
754 // must clear fp, so that compiled frames are not confused; it is
755 // possible that we need it only for debugging
756 if (clear_fp) {
757 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
758 }
760 if (clear_pc) {
761 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
762 }
763 }
765 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
766 Register last_java_fp,
767 address last_java_pc) {
768 // determine last_java_sp register
769 if (!last_java_sp->is_valid()) {
770 last_java_sp = rsp;
771 }
773 // last_java_fp is optional
774 if (last_java_fp->is_valid()) {
775 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
776 last_java_fp);
777 }
779 // last_java_pc is optional
780 if (last_java_pc != NULL) {
781 Address java_pc(r15_thread,
782 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
783 lea(rscratch1, InternalAddress(last_java_pc));
784 movptr(java_pc, rscratch1);
785 }
787 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
788 }
790 static void pass_arg0(MacroAssembler* masm, Register arg) {
791 if (c_rarg0 != arg ) {
792 masm->mov(c_rarg0, arg);
793 }
794 }
796 static void pass_arg1(MacroAssembler* masm, Register arg) {
797 if (c_rarg1 != arg ) {
798 masm->mov(c_rarg1, arg);
799 }
800 }
802 static void pass_arg2(MacroAssembler* masm, Register arg) {
803 if (c_rarg2 != arg ) {
804 masm->mov(c_rarg2, arg);
805 }
806 }
808 static void pass_arg3(MacroAssembler* masm, Register arg) {
809 if (c_rarg3 != arg ) {
810 masm->mov(c_rarg3, arg);
811 }
812 }
814 void MacroAssembler::stop(const char* msg) {
815 address rip = pc();
816 pusha(); // get regs on stack
817 lea(c_rarg0, ExternalAddress((address) msg));
818 lea(c_rarg1, InternalAddress(rip));
819 movq(c_rarg2, rsp); // pass pointer to regs array
820 andq(rsp, -16); // align stack as required by ABI
821 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
822 hlt();
823 }
825 void MacroAssembler::warn(const char* msg) {
826 push(rbp);
827 movq(rbp, rsp);
828 andq(rsp, -16); // align stack as required by push_CPU_state and call
829 push_CPU_state(); // keeps alignment at 16 bytes
830 lea(c_rarg0, ExternalAddress((address) msg));
831 call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
832 pop_CPU_state();
833 mov(rsp, rbp);
834 pop(rbp);
835 }
837 void MacroAssembler::print_state() {
838 address rip = pc();
839 pusha(); // get regs on stack
840 push(rbp);
841 movq(rbp, rsp);
842 andq(rsp, -16); // align stack as required by push_CPU_state and call
843 push_CPU_state(); // keeps alignment at 16 bytes
845 lea(c_rarg0, InternalAddress(rip));
846 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
847 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
849 pop_CPU_state();
850 mov(rsp, rbp);
851 pop(rbp);
852 popa();
853 }
855 #ifndef PRODUCT
856 extern "C" void findpc(intptr_t x);
857 #endif
859 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
860 // In order to get locks to work, we need to fake a in_VM state
861 if (ShowMessageBoxOnError) {
862 JavaThread* thread = JavaThread::current();
863 JavaThreadState saved_state = thread->thread_state();
864 thread->set_thread_state(_thread_in_vm);
865 #ifndef PRODUCT
866 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
867 ttyLocker ttyl;
868 BytecodeCounter::print();
869 }
870 #endif
871 // To see where a verify_oop failed, get $ebx+40/X for this frame.
872 // XXX correct this offset for amd64
873 // This is the value of eip which points to where verify_oop will return.
874 if (os::message_box(msg, "Execution stopped, print registers?")) {
875 print_state64(pc, regs);
876 BREAKPOINT;
877 assert(false, "start up GDB");
878 }
879 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
880 } else {
881 ttyLocker ttyl;
882 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
883 msg);
884 assert(false, err_msg("DEBUG MESSAGE: %s", msg));
885 }
886 }
888 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
889 ttyLocker ttyl;
890 FlagSetting fs(Debugging, true);
891 tty->print_cr("rip = 0x%016lx", pc);
892 #ifndef PRODUCT
893 tty->cr();
894 findpc(pc);
895 tty->cr();
896 #endif
897 #define PRINT_REG(rax, value) \
898 { tty->print("%s = ", #rax); os::print_location(tty, value); }
899 PRINT_REG(rax, regs[15]);
900 PRINT_REG(rbx, regs[12]);
901 PRINT_REG(rcx, regs[14]);
902 PRINT_REG(rdx, regs[13]);
903 PRINT_REG(rdi, regs[8]);
904 PRINT_REG(rsi, regs[9]);
905 PRINT_REG(rbp, regs[10]);
906 PRINT_REG(rsp, regs[11]);
907 PRINT_REG(r8 , regs[7]);
908 PRINT_REG(r9 , regs[6]);
909 PRINT_REG(r10, regs[5]);
910 PRINT_REG(r11, regs[4]);
911 PRINT_REG(r12, regs[3]);
912 PRINT_REG(r13, regs[2]);
913 PRINT_REG(r14, regs[1]);
914 PRINT_REG(r15, regs[0]);
915 #undef PRINT_REG
916 // Print some words near top of staack.
917 int64_t* rsp = (int64_t*) regs[11];
918 int64_t* dump_sp = rsp;
919 for (int col1 = 0; col1 < 8; col1++) {
920 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
921 os::print_location(tty, *dump_sp++);
922 }
923 for (int row = 0; row < 25; row++) {
924 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
925 for (int col = 0; col < 4; col++) {
926 tty->print(" 0x%016lx", *dump_sp++);
927 }
928 tty->cr();
929 }
930 // Print some instructions around pc:
931 Disassembler::decode((address)pc-64, (address)pc);
932 tty->print_cr("--------");
933 Disassembler::decode((address)pc, (address)pc+32);
934 }
936 #endif // _LP64
938 // Now versions that are common to 32/64 bit
940 void MacroAssembler::addptr(Register dst, int32_t imm32) {
941 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
942 }
944 void MacroAssembler::addptr(Register dst, Register src) {
945 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
946 }
948 void MacroAssembler::addptr(Address dst, Register src) {
949 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
950 }
952 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) {
953 if (reachable(src)) {
954 Assembler::addsd(dst, as_Address(src));
955 } else {
956 lea(rscratch1, src);
957 Assembler::addsd(dst, Address(rscratch1, 0));
958 }
959 }
961 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) {
962 if (reachable(src)) {
963 addss(dst, as_Address(src));
964 } else {
965 lea(rscratch1, src);
966 addss(dst, Address(rscratch1, 0));
967 }
968 }
970 void MacroAssembler::align(int modulus) {
971 if (offset() % modulus != 0) {
972 nop(modulus - (offset() % modulus));
973 }
974 }
976 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
977 // Used in sign-masking with aligned address.
978 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
979 if (reachable(src)) {
980 Assembler::andpd(dst, as_Address(src));
981 } else {
982 lea(rscratch1, src);
983 Assembler::andpd(dst, Address(rscratch1, 0));
984 }
985 }
987 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) {
988 // Used in sign-masking with aligned address.
989 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
990 if (reachable(src)) {
991 Assembler::andps(dst, as_Address(src));
992 } else {
993 lea(rscratch1, src);
994 Assembler::andps(dst, Address(rscratch1, 0));
995 }
996 }
998 void MacroAssembler::andptr(Register dst, int32_t imm32) {
999 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
1000 }
1002 void MacroAssembler::atomic_incl(Address counter_addr) {
1003 if (os::is_MP())
1004 lock();
1005 incrementl(counter_addr);
1006 }
1008 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register scr) {
1009 if (reachable(counter_addr)) {
1010 atomic_incl(as_Address(counter_addr));
1011 } else {
1012 lea(scr, counter_addr);
1013 atomic_incl(Address(scr, 0));
1014 }
1015 }
1017 #ifdef _LP64
1018 void MacroAssembler::atomic_incq(Address counter_addr) {
1019 if (os::is_MP())
1020 lock();
1021 incrementq(counter_addr);
1022 }
1024 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register scr) {
1025 if (reachable(counter_addr)) {
1026 atomic_incq(as_Address(counter_addr));
1027 } else {
1028 lea(scr, counter_addr);
1029 atomic_incq(Address(scr, 0));
1030 }
1031 }
1032 #endif
1034 // Writes to stack successive pages until offset reached to check for
1035 // stack overflow + shadow pages. This clobbers tmp.
1036 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
1037 movptr(tmp, rsp);
1038 // Bang stack for total size given plus shadow page size.
1039 // Bang one page at a time because large size can bang beyond yellow and
1040 // red zones.
1041 Label loop;
1042 bind(loop);
1043 movl(Address(tmp, (-os::vm_page_size())), size );
1044 subptr(tmp, os::vm_page_size());
1045 subl(size, os::vm_page_size());
1046 jcc(Assembler::greater, loop);
1048 // Bang down shadow pages too.
1049 // At this point, (tmp-0) is the last address touched, so don't
1050 // touch it again. (It was touched as (tmp-pagesize) but then tmp
1051 // was post-decremented.) Skip this address by starting at i=1, and
1052 // touch a few more pages below. N.B. It is important to touch all
1053 // the way down to and including i=StackShadowPages.
1054 for (int i = 1; i <= StackShadowPages; i++) {
1055 // this could be any sized move but this is can be a debugging crumb
1056 // so the bigger the better.
1057 movptr(Address(tmp, (-i*os::vm_page_size())), size );
1058 }
1059 }
1061 int MacroAssembler::biased_locking_enter(Register lock_reg,
1062 Register obj_reg,
1063 Register swap_reg,
1064 Register tmp_reg,
1065 bool swap_reg_contains_mark,
1066 Label& done,
1067 Label* slow_case,
1068 BiasedLockingCounters* counters) {
1069 assert(UseBiasedLocking, "why call this otherwise?");
1070 assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
1071 LP64_ONLY( assert(tmp_reg != noreg, "tmp_reg must be supplied"); )
1072 bool need_tmp_reg = false;
1073 if (tmp_reg == noreg) {
1074 need_tmp_reg = true;
1075 tmp_reg = lock_reg;
1076 assert_different_registers(lock_reg, obj_reg, swap_reg);
1077 } else {
1078 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
1079 }
1080 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
1081 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
1082 Address saved_mark_addr(lock_reg, 0);
1084 if (PrintBiasedLockingStatistics && counters == NULL) {
1085 counters = BiasedLocking::counters();
1086 }
1087 // Biased locking
1088 // See whether the lock is currently biased toward our thread and
1089 // whether the epoch is still valid
1090 // Note that the runtime guarantees sufficient alignment of JavaThread
1091 // pointers to allow age to be placed into low bits
1092 // First check to see whether biasing is even enabled for this object
1093 Label cas_label;
1094 int null_check_offset = -1;
1095 if (!swap_reg_contains_mark) {
1096 null_check_offset = offset();
1097 movptr(swap_reg, mark_addr);
1098 }
1099 if (need_tmp_reg) {
1100 push(tmp_reg);
1101 }
1102 movptr(tmp_reg, swap_reg);
1103 andptr(tmp_reg, markOopDesc::biased_lock_mask_in_place);
1104 cmpptr(tmp_reg, markOopDesc::biased_lock_pattern);
1105 if (need_tmp_reg) {
1106 pop(tmp_reg);
1107 }
1108 jcc(Assembler::notEqual, cas_label);
1109 // The bias pattern is present in the object's header. Need to check
1110 // whether the bias owner and the epoch are both still current.
1111 #ifndef _LP64
1112 // Note that because there is no current thread register on x86_32 we
1113 // need to store off the mark word we read out of the object to
1114 // avoid reloading it and needing to recheck invariants below. This
1115 // store is unfortunate but it makes the overall code shorter and
1116 // simpler.
1117 movptr(saved_mark_addr, swap_reg);
1118 #endif
1119 if (need_tmp_reg) {
1120 push(tmp_reg);
1121 }
1122 if (swap_reg_contains_mark) {
1123 null_check_offset = offset();
1124 }
1125 load_prototype_header(tmp_reg, obj_reg);
1126 #ifdef _LP64
1127 orptr(tmp_reg, r15_thread);
1128 xorptr(tmp_reg, swap_reg);
1129 Register header_reg = tmp_reg;
1130 #else
1131 xorptr(tmp_reg, swap_reg);
1132 get_thread(swap_reg);
1133 xorptr(swap_reg, tmp_reg);
1134 Register header_reg = swap_reg;
1135 #endif
1136 andptr(header_reg, ~((int) markOopDesc::age_mask_in_place));
1137 if (need_tmp_reg) {
1138 pop(tmp_reg);
1139 }
1140 if (counters != NULL) {
1141 cond_inc32(Assembler::zero,
1142 ExternalAddress((address) counters->biased_lock_entry_count_addr()));
1143 }
1144 jcc(Assembler::equal, done);
1146 Label try_revoke_bias;
1147 Label try_rebias;
1149 // At this point we know that the header has the bias pattern and
1150 // that we are not the bias owner in the current epoch. We need to
1151 // figure out more details about the state of the header in order to
1152 // know what operations can be legally performed on the object's
1153 // header.
1155 // If the low three bits in the xor result aren't clear, that means
1156 // the prototype header is no longer biased and we have to revoke
1157 // the bias on this object.
1158 testptr(header_reg, markOopDesc::biased_lock_mask_in_place);
1159 jccb(Assembler::notZero, try_revoke_bias);
1161 // Biasing is still enabled for this data type. See whether the
1162 // epoch of the current bias is still valid, meaning that the epoch
1163 // bits of the mark word are equal to the epoch bits of the
1164 // prototype header. (Note that the prototype header's epoch bits
1165 // only change at a safepoint.) If not, attempt to rebias the object
1166 // toward the current thread. Note that we must be absolutely sure
1167 // that the current epoch is invalid in order to do this because
1168 // otherwise the manipulations it performs on the mark word are
1169 // illegal.
1170 testptr(header_reg, markOopDesc::epoch_mask_in_place);
1171 jccb(Assembler::notZero, try_rebias);
1173 // The epoch of the current bias is still valid but we know nothing
1174 // about the owner; it might be set or it might be clear. Try to
1175 // acquire the bias of the object using an atomic operation. If this
1176 // fails we will go in to the runtime to revoke the object's bias.
1177 // Note that we first construct the presumed unbiased header so we
1178 // don't accidentally blow away another thread's valid bias.
1179 NOT_LP64( movptr(swap_reg, saved_mark_addr); )
1180 andptr(swap_reg,
1181 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
1182 if (need_tmp_reg) {
1183 push(tmp_reg);
1184 }
1185 #ifdef _LP64
1186 movptr(tmp_reg, swap_reg);
1187 orptr(tmp_reg, r15_thread);
1188 #else
1189 get_thread(tmp_reg);
1190 orptr(tmp_reg, swap_reg);
1191 #endif
1192 if (os::is_MP()) {
1193 lock();
1194 }
1195 cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
1196 if (need_tmp_reg) {
1197 pop(tmp_reg);
1198 }
1199 // If the biasing toward our thread failed, this means that
1200 // another thread succeeded in biasing it toward itself and we
1201 // need to revoke that bias. The revocation will occur in the
1202 // interpreter runtime in the slow case.
1203 if (counters != NULL) {
1204 cond_inc32(Assembler::zero,
1205 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
1206 }
1207 if (slow_case != NULL) {
1208 jcc(Assembler::notZero, *slow_case);
1209 }
1210 jmp(done);
1212 bind(try_rebias);
1213 // At this point we know the epoch has expired, meaning that the
1214 // current "bias owner", if any, is actually invalid. Under these
1215 // circumstances _only_, we are allowed to use the current header's
1216 // value as the comparison value when doing the cas to acquire the
1217 // bias in the current epoch. In other words, we allow transfer of
1218 // the bias from one thread to another directly in this situation.
1219 //
1220 // FIXME: due to a lack of registers we currently blow away the age
1221 // bits in this situation. Should attempt to preserve them.
1222 if (need_tmp_reg) {
1223 push(tmp_reg);
1224 }
1225 load_prototype_header(tmp_reg, obj_reg);
1226 #ifdef _LP64
1227 orptr(tmp_reg, r15_thread);
1228 #else
1229 get_thread(swap_reg);
1230 orptr(tmp_reg, swap_reg);
1231 movptr(swap_reg, saved_mark_addr);
1232 #endif
1233 if (os::is_MP()) {
1234 lock();
1235 }
1236 cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
1237 if (need_tmp_reg) {
1238 pop(tmp_reg);
1239 }
1240 // If the biasing toward our thread failed, then another thread
1241 // succeeded in biasing it toward itself and we need to revoke that
1242 // bias. The revocation will occur in the runtime in the slow case.
1243 if (counters != NULL) {
1244 cond_inc32(Assembler::zero,
1245 ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
1246 }
1247 if (slow_case != NULL) {
1248 jcc(Assembler::notZero, *slow_case);
1249 }
1250 jmp(done);
1252 bind(try_revoke_bias);
1253 // The prototype mark in the klass doesn't have the bias bit set any
1254 // more, indicating that objects of this data type are not supposed
1255 // to be biased any more. We are going to try to reset the mark of
1256 // this object to the prototype value and fall through to the
1257 // CAS-based locking scheme. Note that if our CAS fails, it means
1258 // that another thread raced us for the privilege of revoking the
1259 // bias of this particular object, so it's okay to continue in the
1260 // normal locking code.
1261 //
1262 // FIXME: due to a lack of registers we currently blow away the age
1263 // bits in this situation. Should attempt to preserve them.
1264 NOT_LP64( movptr(swap_reg, saved_mark_addr); )
1265 if (need_tmp_reg) {
1266 push(tmp_reg);
1267 }
1268 load_prototype_header(tmp_reg, obj_reg);
1269 if (os::is_MP()) {
1270 lock();
1271 }
1272 cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
1273 if (need_tmp_reg) {
1274 pop(tmp_reg);
1275 }
1276 // Fall through to the normal CAS-based lock, because no matter what
1277 // the result of the above CAS, some thread must have succeeded in
1278 // removing the bias bit from the object's header.
1279 if (counters != NULL) {
1280 cond_inc32(Assembler::zero,
1281 ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
1282 }
1284 bind(cas_label);
1286 return null_check_offset;
1287 }
1289 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
1290 assert(UseBiasedLocking, "why call this otherwise?");
1292 // Check for biased locking unlock case, which is a no-op
1293 // Note: we do not have to check the thread ID for two reasons.
1294 // First, the interpreter checks for IllegalMonitorStateException at
1295 // a higher level. Second, if the bias was revoked while we held the
1296 // lock, the object could not be rebiased toward another thread, so
1297 // the bias bit would be clear.
1298 movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1299 andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
1300 cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
1301 jcc(Assembler::equal, done);
1302 }
1304 #ifdef COMPILER2
1306 #if INCLUDE_RTM_OPT
1308 // Update rtm_counters based on abort status
1309 // input: abort_status
1310 // rtm_counters (RTMLockingCounters*)
1311 // flags are killed
1312 void MacroAssembler::rtm_counters_update(Register abort_status, Register rtm_counters) {
1314 atomic_incptr(Address(rtm_counters, RTMLockingCounters::abort_count_offset()));
1315 if (PrintPreciseRTMLockingStatistics) {
1316 for (int i = 0; i < RTMLockingCounters::ABORT_STATUS_LIMIT; i++) {
1317 Label check_abort;
1318 testl(abort_status, (1<<i));
1319 jccb(Assembler::equal, check_abort);
1320 atomic_incptr(Address(rtm_counters, RTMLockingCounters::abortX_count_offset() + (i * sizeof(uintx))));
1321 bind(check_abort);
1322 }
1323 }
1324 }
1326 // Branch if (random & (count-1) != 0), count is 2^n
1327 // tmp, scr and flags are killed
1328 void MacroAssembler::branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel) {
1329 assert(tmp == rax, "");
1330 assert(scr == rdx, "");
1331 rdtsc(); // modifies EDX:EAX
1332 andptr(tmp, count-1);
1333 jccb(Assembler::notZero, brLabel);
1334 }
1336 // Perform abort ratio calculation, set no_rtm bit if high ratio
1337 // input: rtm_counters_Reg (RTMLockingCounters* address)
1338 // tmpReg, rtm_counters_Reg and flags are killed
1339 void MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
1340 Register rtm_counters_Reg,
1341 RTMLockingCounters* rtm_counters,
1342 Metadata* method_data) {
1343 Label L_done, L_check_always_rtm1, L_check_always_rtm2;
1345 if (RTMLockingCalculationDelay > 0) {
1346 // Delay calculation
1347 movptr(tmpReg, ExternalAddress((address) RTMLockingCounters::rtm_calculation_flag_addr()), tmpReg);
1348 testptr(tmpReg, tmpReg);
1349 jccb(Assembler::equal, L_done);
1350 }
1351 // Abort ratio calculation only if abort_count > RTMAbortThreshold
1352 // Aborted transactions = abort_count * 100
1353 // All transactions = total_count * RTMTotalCountIncrRate
1354 // Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
1356 movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::abort_count_offset()));
1357 cmpptr(tmpReg, RTMAbortThreshold);
1358 jccb(Assembler::below, L_check_always_rtm2);
1359 imulptr(tmpReg, tmpReg, 100);
1361 Register scrReg = rtm_counters_Reg;
1362 movptr(scrReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
1363 imulptr(scrReg, scrReg, RTMTotalCountIncrRate);
1364 imulptr(scrReg, scrReg, RTMAbortRatio);
1365 cmpptr(tmpReg, scrReg);
1366 jccb(Assembler::below, L_check_always_rtm1);
1367 if (method_data != NULL) {
1368 // set rtm_state to "no rtm" in MDO
1369 mov_metadata(tmpReg, method_data);
1370 if (os::is_MP()) {
1371 lock();
1372 }
1373 orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), NoRTM);
1374 }
1375 jmpb(L_done);
1376 bind(L_check_always_rtm1);
1377 // Reload RTMLockingCounters* address
1378 lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
1379 bind(L_check_always_rtm2);
1380 movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
1381 cmpptr(tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
1382 jccb(Assembler::below, L_done);
1383 if (method_data != NULL) {
1384 // set rtm_state to "always rtm" in MDO
1385 mov_metadata(tmpReg, method_data);
1386 if (os::is_MP()) {
1387 lock();
1388 }
1389 orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), UseRTM);
1390 }
1391 bind(L_done);
1392 }
1394 // Update counters and perform abort ratio calculation
1395 // input: abort_status_Reg
1396 // rtm_counters_Reg, flags are killed
1397 void MacroAssembler::rtm_profiling(Register abort_status_Reg,
1398 Register rtm_counters_Reg,
1399 RTMLockingCounters* rtm_counters,
1400 Metadata* method_data,
1401 bool profile_rtm) {
1403 assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1404 // update rtm counters based on rax value at abort
1405 // reads abort_status_Reg, updates flags
1406 lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
1407 rtm_counters_update(abort_status_Reg, rtm_counters_Reg);
1408 if (profile_rtm) {
1409 // Save abort status because abort_status_Reg is used by following code.
1410 if (RTMRetryCount > 0) {
1411 push(abort_status_Reg);
1412 }
1413 assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1414 rtm_abort_ratio_calculation(abort_status_Reg, rtm_counters_Reg, rtm_counters, method_data);
1415 // restore abort status
1416 if (RTMRetryCount > 0) {
1417 pop(abort_status_Reg);
1418 }
1419 }
1420 }
1422 // Retry on abort if abort's status is 0x6: can retry (0x2) | memory conflict (0x4)
1423 // inputs: retry_count_Reg
1424 // : abort_status_Reg
1425 // output: retry_count_Reg decremented by 1
1426 // flags are killed
1427 void MacroAssembler::rtm_retry_lock_on_abort(Register retry_count_Reg, Register abort_status_Reg, Label& retryLabel) {
1428 Label doneRetry;
1429 assert(abort_status_Reg == rax, "");
1430 // The abort reason bits are in eax (see all states in rtmLocking.hpp)
1431 // 0x6 = conflict on which we can retry (0x2) | memory conflict (0x4)
1432 // if reason is in 0x6 and retry count != 0 then retry
1433 andptr(abort_status_Reg, 0x6);
1434 jccb(Assembler::zero, doneRetry);
1435 testl(retry_count_Reg, retry_count_Reg);
1436 jccb(Assembler::zero, doneRetry);
1437 pause();
1438 decrementl(retry_count_Reg);
1439 jmp(retryLabel);
1440 bind(doneRetry);
1441 }
1443 // Spin and retry if lock is busy,
1444 // inputs: box_Reg (monitor address)
1445 // : retry_count_Reg
1446 // output: retry_count_Reg decremented by 1
1447 // : clear z flag if retry count exceeded
1448 // tmp_Reg, scr_Reg, flags are killed
1449 void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register box_Reg,
1450 Register tmp_Reg, Register scr_Reg, Label& retryLabel) {
1451 Label SpinLoop, SpinExit, doneRetry;
1452 // Clean monitor_value bit to get valid pointer
1453 int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
1455 testl(retry_count_Reg, retry_count_Reg);
1456 jccb(Assembler::zero, doneRetry);
1457 decrementl(retry_count_Reg);
1458 movptr(scr_Reg, RTMSpinLoopCount);
1460 bind(SpinLoop);
1461 pause();
1462 decrementl(scr_Reg);
1463 jccb(Assembler::lessEqual, SpinExit);
1464 movptr(tmp_Reg, Address(box_Reg, owner_offset));
1465 testptr(tmp_Reg, tmp_Reg);
1466 jccb(Assembler::notZero, SpinLoop);
1468 bind(SpinExit);
1469 jmp(retryLabel);
1470 bind(doneRetry);
1471 incrementl(retry_count_Reg); // clear z flag
1472 }
1474 // Use RTM for normal stack locks
1475 // Input: objReg (object to lock)
1476 void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Register scrReg,
1477 Register retry_on_abort_count_Reg,
1478 RTMLockingCounters* stack_rtm_counters,
1479 Metadata* method_data, bool profile_rtm,
1480 Label& DONE_LABEL, Label& IsInflated) {
1481 assert(UseRTMForStackLocks, "why call this otherwise?");
1482 assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
1483 assert(tmpReg == rax, "");
1484 assert(scrReg == rdx, "");
1485 Label L_rtm_retry, L_decrement_retry, L_on_abort;
1487 if (RTMRetryCount > 0) {
1488 movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
1489 bind(L_rtm_retry);
1490 }
1491 movptr(tmpReg, Address(objReg, 0));
1492 testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
1493 jcc(Assembler::notZero, IsInflated);
1495 if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1496 Label L_noincrement;
1497 if (RTMTotalCountIncrRate > 1) {
1498 // tmpReg, scrReg and flags are killed
1499 branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
1500 }
1501 assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
1502 atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg);
1503 bind(L_noincrement);
1504 }
1505 xbegin(L_on_abort);
1506 movptr(tmpReg, Address(objReg, 0)); // fetch markword
1507 andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
1508 cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
1509 jcc(Assembler::equal, DONE_LABEL); // all done if unlocked
1511 Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
1512 if (UseRTMXendForLockBusy) {
1513 xend();
1514 movptr(abort_status_Reg, 0x2); // Set the abort status to 2 (so we can retry)
1515 jmp(L_decrement_retry);
1516 }
1517 else {
1518 xabort(0);
1519 }
1520 bind(L_on_abort);
1521 if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1522 rtm_profiling(abort_status_Reg, scrReg, stack_rtm_counters, method_data, profile_rtm);
1523 }
1524 bind(L_decrement_retry);
1525 if (RTMRetryCount > 0) {
1526 // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
1527 rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
1528 }
1529 }
1531 // Use RTM for inflating locks
1532 // inputs: objReg (object to lock)
1533 // boxReg (on-stack box address (displaced header location) - KILLED)
1534 // tmpReg (ObjectMonitor address + 2(monitor_value))
1535 void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg,
1536 Register scrReg, Register retry_on_busy_count_Reg,
1537 Register retry_on_abort_count_Reg,
1538 RTMLockingCounters* rtm_counters,
1539 Metadata* method_data, bool profile_rtm,
1540 Label& DONE_LABEL) {
1541 assert(UseRTMLocking, "why call this otherwise?");
1542 assert(tmpReg == rax, "");
1543 assert(scrReg == rdx, "");
1544 Label L_rtm_retry, L_decrement_retry, L_on_abort;
1545 // Clean monitor_value bit to get valid pointer
1546 int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
1548 // Without cast to int32_t a movptr will destroy r10 which is typically obj
1549 movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1550 movptr(boxReg, tmpReg); // Save ObjectMonitor address
1552 if (RTMRetryCount > 0) {
1553 movl(retry_on_busy_count_Reg, RTMRetryCount); // Retry on lock busy
1554 movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
1555 bind(L_rtm_retry);
1556 }
1557 if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1558 Label L_noincrement;
1559 if (RTMTotalCountIncrRate > 1) {
1560 // tmpReg, scrReg and flags are killed
1561 branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
1562 }
1563 assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1564 atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg);
1565 bind(L_noincrement);
1566 }
1567 xbegin(L_on_abort);
1568 movptr(tmpReg, Address(objReg, 0));
1569 movptr(tmpReg, Address(tmpReg, owner_offset));
1570 testptr(tmpReg, tmpReg);
1571 jcc(Assembler::zero, DONE_LABEL);
1572 if (UseRTMXendForLockBusy) {
1573 xend();
1574 jmp(L_decrement_retry);
1575 }
1576 else {
1577 xabort(0);
1578 }
1579 bind(L_on_abort);
1580 Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
1581 if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1582 rtm_profiling(abort_status_Reg, scrReg, rtm_counters, method_data, profile_rtm);
1583 }
1584 if (RTMRetryCount > 0) {
1585 // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
1586 rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
1587 }
1589 movptr(tmpReg, Address(boxReg, owner_offset)) ;
1590 testptr(tmpReg, tmpReg) ;
1591 jccb(Assembler::notZero, L_decrement_retry) ;
1593 // Appears unlocked - try to swing _owner from null to non-null.
1594 // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
1595 #ifdef _LP64
1596 Register threadReg = r15_thread;
1597 #else
1598 get_thread(scrReg);
1599 Register threadReg = scrReg;
1600 #endif
1601 if (os::is_MP()) {
1602 lock();
1603 }
1604 cmpxchgptr(threadReg, Address(boxReg, owner_offset)); // Updates tmpReg
1606 if (RTMRetryCount > 0) {
1607 // success done else retry
1608 jccb(Assembler::equal, DONE_LABEL) ;
1609 bind(L_decrement_retry);
1610 // Spin and retry if lock is busy.
1611 rtm_retry_lock_on_busy(retry_on_busy_count_Reg, boxReg, tmpReg, scrReg, L_rtm_retry);
1612 }
1613 else {
1614 bind(L_decrement_retry);
1615 }
1616 }
1618 #endif // INCLUDE_RTM_OPT
1620 // Fast_Lock and Fast_Unlock used by C2
1622 // Because the transitions from emitted code to the runtime
1623 // monitorenter/exit helper stubs are so slow it's critical that
1624 // we inline both the stack-locking fast-path and the inflated fast path.
1625 //
1626 // See also: cmpFastLock and cmpFastUnlock.
1627 //
1628 // What follows is a specialized inline transliteration of the code
1629 // in slow_enter() and slow_exit(). If we're concerned about I$ bloat
1630 // another option would be to emit TrySlowEnter and TrySlowExit methods
1631 // at startup-time. These methods would accept arguments as
1632 // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
1633 // indications in the icc.ZFlag. Fast_Lock and Fast_Unlock would simply
1634 // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.
1635 // In practice, however, the # of lock sites is bounded and is usually small.
1636 // Besides the call overhead, TrySlowEnter and TrySlowExit might suffer
1637 // if the processor uses simple bimodal branch predictors keyed by EIP
1638 // Since the helper routines would be called from multiple synchronization
1639 // sites.
1640 //
1641 // An even better approach would be write "MonitorEnter()" and "MonitorExit()"
1642 // in java - using j.u.c and unsafe - and just bind the lock and unlock sites
1643 // to those specialized methods. That'd give us a mostly platform-independent
1644 // implementation that the JITs could optimize and inline at their pleasure.
1645 // Done correctly, the only time we'd need to cross to native could would be
1646 // to park() or unpark() threads. We'd also need a few more unsafe operators
1647 // to (a) prevent compiler-JIT reordering of non-volatile accesses, and
1648 // (b) explicit barriers or fence operations.
1649 //
1650 // TODO:
1651 //
1652 // * Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr).
1653 // This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals.
1654 // Given TLAB allocation, Self is usually manifested in a register, so passing it into
1655 // the lock operators would typically be faster than reifying Self.
1656 //
1657 // * Ideally I'd define the primitives as:
1658 // fast_lock (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED.
1659 // fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED
1660 // Unfortunately ADLC bugs prevent us from expressing the ideal form.
1661 // Instead, we're stuck with a rather awkward and brittle register assignments below.
1662 // Furthermore the register assignments are overconstrained, possibly resulting in
1663 // sub-optimal code near the synchronization site.
1664 //
1665 // * Eliminate the sp-proximity tests and just use "== Self" tests instead.
1666 // Alternately, use a better sp-proximity test.
1667 //
1668 // * Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value.
1669 // Either one is sufficient to uniquely identify a thread.
1670 // TODO: eliminate use of sp in _owner and use get_thread(tr) instead.
1671 //
1672 // * Intrinsify notify() and notifyAll() for the common cases where the
1673 // object is locked by the calling thread but the waitlist is empty.
1674 // avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll().
1675 //
1676 // * use jccb and jmpb instead of jcc and jmp to improve code density.
1677 // But beware of excessive branch density on AMD Opterons.
1678 //
1679 // * Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success
1680 // or failure of the fast-path. If the fast-path fails then we pass
1681 // control to the slow-path, typically in C. In Fast_Lock and
1682 // Fast_Unlock we often branch to DONE_LABEL, just to find that C2
1683 // will emit a conditional branch immediately after the node.
1684 // So we have branches to branches and lots of ICC.ZF games.
1685 // Instead, it might be better to have C2 pass a "FailureLabel"
1686 // into Fast_Lock and Fast_Unlock. In the case of success, control
1687 // will drop through the node. ICC.ZF is undefined at exit.
1688 // In the case of failure, the node will branch directly to the
1689 // FailureLabel
1692 // obj: object to lock
1693 // box: on-stack box address (displaced header location) - KILLED
1694 // rax,: tmp -- KILLED
1695 // scr: tmp -- KILLED
1696 void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg,
1697 Register scrReg, Register cx1Reg, Register cx2Reg,
1698 BiasedLockingCounters* counters,
1699 RTMLockingCounters* rtm_counters,
1700 RTMLockingCounters* stack_rtm_counters,
1701 Metadata* method_data,
1702 bool use_rtm, bool profile_rtm) {
1703 // Ensure the register assignents are disjoint
1704 assert(tmpReg == rax, "");
1706 if (use_rtm) {
1707 assert_different_registers(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg);
1708 } else {
1709 assert(cx1Reg == noreg, "");
1710 assert(cx2Reg == noreg, "");
1711 assert_different_registers(objReg, boxReg, tmpReg, scrReg);
1712 }
1714 if (counters != NULL) {
1715 atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg);
1716 }
1717 if (EmitSync & 1) {
1718 // set box->dhw = unused_mark (3)
1719 // Force all sync thru slow-path: slow_enter() and slow_exit()
1720 movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1721 cmpptr (rsp, (int32_t)NULL_WORD);
1722 } else
1723 if (EmitSync & 2) {
1724 Label DONE_LABEL ;
1725 if (UseBiasedLocking) {
1726 // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
1727 biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters);
1728 }
1730 movptr(tmpReg, Address(objReg, 0)); // fetch markword
1731 orptr (tmpReg, 0x1);
1732 movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
1733 if (os::is_MP()) {
1734 lock();
1735 }
1736 cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
1737 jccb(Assembler::equal, DONE_LABEL);
1738 // Recursive locking
1739 subptr(tmpReg, rsp);
1740 andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
1741 movptr(Address(boxReg, 0), tmpReg);
1742 bind(DONE_LABEL);
1743 } else {
1744 // Possible cases that we'll encounter in fast_lock
1745 // ------------------------------------------------
1746 // * Inflated
1747 // -- unlocked
1748 // -- Locked
1749 // = by self
1750 // = by other
1751 // * biased
1752 // -- by Self
1753 // -- by other
1754 // * neutral
1755 // * stack-locked
1756 // -- by self
1757 // = sp-proximity test hits
1758 // = sp-proximity test generates false-negative
1759 // -- by other
1760 //
1762 Label IsInflated, DONE_LABEL;
1764 // it's stack-locked, biased or neutral
1765 // TODO: optimize away redundant LDs of obj->mark and improve the markword triage
1766 // order to reduce the number of conditional branches in the most common cases.
1767 // Beware -- there's a subtle invariant that fetch of the markword
1768 // at [FETCH], below, will never observe a biased encoding (*101b).
1769 // If this invariant is not held we risk exclusion (safety) failure.
1770 if (UseBiasedLocking && !UseOptoBiasInlining) {
1771 biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, counters);
1772 }
1774 #if INCLUDE_RTM_OPT
1775 if (UseRTMForStackLocks && use_rtm) {
1776 rtm_stack_locking(objReg, tmpReg, scrReg, cx2Reg,
1777 stack_rtm_counters, method_data, profile_rtm,
1778 DONE_LABEL, IsInflated);
1779 }
1780 #endif // INCLUDE_RTM_OPT
1782 movptr(tmpReg, Address(objReg, 0)); // [FETCH]
1783 testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
1784 jccb(Assembler::notZero, IsInflated);
1786 // Attempt stack-locking ...
1787 orptr (tmpReg, markOopDesc::unlocked_value);
1788 movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
1789 if (os::is_MP()) {
1790 lock();
1791 }
1792 cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
1793 if (counters != NULL) {
1794 cond_inc32(Assembler::equal,
1795 ExternalAddress((address)counters->fast_path_entry_count_addr()));
1796 }
1797 jcc(Assembler::equal, DONE_LABEL); // Success
1799 // Recursive locking.
1800 // The object is stack-locked: markword contains stack pointer to BasicLock.
1801 // Locked by current thread if difference with current SP is less than one page.
1802 subptr(tmpReg, rsp);
1803 // Next instruction set ZFlag == 1 (Success) if difference is less then one page.
1804 andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
1805 movptr(Address(boxReg, 0), tmpReg);
1806 if (counters != NULL) {
1807 cond_inc32(Assembler::equal,
1808 ExternalAddress((address)counters->fast_path_entry_count_addr()));
1809 }
1810 jmp(DONE_LABEL);
1812 bind(IsInflated);
1813 // The object is inflated. tmpReg contains pointer to ObjectMonitor* + 2(monitor_value)
1815 #if INCLUDE_RTM_OPT
1816 // Use the same RTM locking code in 32- and 64-bit VM.
1817 if (use_rtm) {
1818 rtm_inflated_locking(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg,
1819 rtm_counters, method_data, profile_rtm, DONE_LABEL);
1820 } else {
1821 #endif // INCLUDE_RTM_OPT
1823 #ifndef _LP64
1824 // The object is inflated.
1825 //
1826 // TODO-FIXME: eliminate the ugly use of manifest constants:
1827 // Use markOopDesc::monitor_value instead of "2".
1828 // use markOop::unused_mark() instead of "3".
1829 // The tmpReg value is an objectMonitor reference ORed with
1830 // markOopDesc::monitor_value (2). We can either convert tmpReg to an
1831 // objectmonitor pointer by masking off the "2" bit or we can just
1832 // use tmpReg as an objectmonitor pointer but bias the objectmonitor
1833 // field offsets with "-2" to compensate for and annul the low-order tag bit.
1834 //
1835 // I use the latter as it avoids AGI stalls.
1836 // As such, we write "mov r, [tmpReg+OFFSETOF(Owner)-2]"
1837 // instead of "mov r, [tmpReg+OFFSETOF(Owner)]".
1838 //
1839 #define OFFSET_SKEWED(f) ((ObjectMonitor::f ## _offset_in_bytes())-2)
1841 // boxReg refers to the on-stack BasicLock in the current frame.
1842 // We'd like to write:
1843 // set box->_displaced_header = markOop::unused_mark(). Any non-0 value suffices.
1844 // This is convenient but results a ST-before-CAS penalty. The following CAS suffers
1845 // additional latency as we have another ST in the store buffer that must drain.
1847 if (EmitSync & 8192) {
1848 movptr(Address(boxReg, 0), 3); // results in ST-before-CAS penalty
1849 get_thread (scrReg);
1850 movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
1851 movptr(tmpReg, NULL_WORD); // consider: xor vs mov
1852 if (os::is_MP()) {
1853 lock();
1854 }
1855 cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
1856 } else
1857 if ((EmitSync & 128) == 0) { // avoid ST-before-CAS
1858 movptr(scrReg, boxReg);
1859 movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
1861 // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
1862 if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
1863 // prefetchw [eax + Offset(_owner)-2]
1864 prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
1865 }
1867 if ((EmitSync & 64) == 0) {
1868 // Optimistic form: consider XORL tmpReg,tmpReg
1869 movptr(tmpReg, NULL_WORD);
1870 } else {
1871 // Can suffer RTS->RTO upgrades on shared or cold $ lines
1872 // Test-And-CAS instead of CAS
1873 movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); // rax, = m->_owner
1874 testptr(tmpReg, tmpReg); // Locked ?
1875 jccb (Assembler::notZero, DONE_LABEL);
1876 }
1878 // Appears unlocked - try to swing _owner from null to non-null.
1879 // Ideally, I'd manifest "Self" with get_thread and then attempt
1880 // to CAS the register containing Self into m->Owner.
1881 // But we don't have enough registers, so instead we can either try to CAS
1882 // rsp or the address of the box (in scr) into &m->owner. If the CAS succeeds
1883 // we later store "Self" into m->Owner. Transiently storing a stack address
1884 // (rsp or the address of the box) into m->owner is harmless.
1885 // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
1886 if (os::is_MP()) {
1887 lock();
1888 }
1889 cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
1890 movptr(Address(scrReg, 0), 3); // box->_displaced_header = 3
1891 jccb (Assembler::notZero, DONE_LABEL);
1892 get_thread (scrReg); // beware: clobbers ICCs
1893 movptr(Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg);
1894 xorptr(boxReg, boxReg); // set icc.ZFlag = 1 to indicate success
1896 // If the CAS fails we can either retry or pass control to the slow-path.
1897 // We use the latter tactic.
1898 // Pass the CAS result in the icc.ZFlag into DONE_LABEL
1899 // If the CAS was successful ...
1900 // Self has acquired the lock
1901 // Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
1902 // Intentional fall-through into DONE_LABEL ...
1903 } else {
1904 movptr(Address(boxReg, 0), intptr_t(markOopDesc::unused_mark())); // results in ST-before-CAS penalty
1905 movptr(boxReg, tmpReg);
1907 // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
1908 if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
1909 // prefetchw [eax + Offset(_owner)-2]
1910 prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
1911 }
1913 if ((EmitSync & 64) == 0) {
1914 // Optimistic form
1915 xorptr (tmpReg, tmpReg);
1916 } else {
1917 // Can suffer RTS->RTO upgrades on shared or cold $ lines
1918 movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); // rax, = m->_owner
1919 testptr(tmpReg, tmpReg); // Locked ?
1920 jccb (Assembler::notZero, DONE_LABEL);
1921 }
1923 // Appears unlocked - try to swing _owner from null to non-null.
1924 // Use either "Self" (in scr) or rsp as thread identity in _owner.
1925 // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
1926 get_thread (scrReg);
1927 if (os::is_MP()) {
1928 lock();
1929 }
1930 cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
1932 // If the CAS fails we can either retry or pass control to the slow-path.
1933 // We use the latter tactic.
1934 // Pass the CAS result in the icc.ZFlag into DONE_LABEL
1935 // If the CAS was successful ...
1936 // Self has acquired the lock
1937 // Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
1938 // Intentional fall-through into DONE_LABEL ...
1939 }
1940 #else // _LP64
1941 // It's inflated
1943 // TODO: someday avoid the ST-before-CAS penalty by
1944 // relocating (deferring) the following ST.
1945 // We should also think about trying a CAS without having
1946 // fetched _owner. If the CAS is successful we may
1947 // avoid an RTO->RTS upgrade on the $line.
1949 // Without cast to int32_t a movptr will destroy r10 which is typically obj
1950 movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1952 movptr (boxReg, tmpReg);
1953 movptr (tmpReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
1954 testptr(tmpReg, tmpReg);
1955 jccb (Assembler::notZero, DONE_LABEL);
1957 // It's inflated and appears unlocked
1958 if (os::is_MP()) {
1959 lock();
1960 }
1961 cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
1962 // Intentional fall-through into DONE_LABEL ...
1963 #endif // _LP64
1965 #if INCLUDE_RTM_OPT
1966 } // use_rtm()
1967 #endif
1968 // DONE_LABEL is a hot target - we'd really like to place it at the
1969 // start of cache line by padding with NOPs.
1970 // See the AMD and Intel software optimization manuals for the
1971 // most efficient "long" NOP encodings.
1972 // Unfortunately none of our alignment mechanisms suffice.
1973 bind(DONE_LABEL);
1975 // At DONE_LABEL the icc ZFlag is set as follows ...
1976 // Fast_Unlock uses the same protocol.
1977 // ZFlag == 1 -> Success
1978 // ZFlag == 0 -> Failure - force control through the slow-path
1979 }
1980 }
1982 // obj: object to unlock
1983 // box: box address (displaced header location), killed. Must be EAX.
1984 // tmp: killed, cannot be obj nor box.
1985 //
1986 // Some commentary on balanced locking:
1987 //
1988 // Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites.
1989 // Methods that don't have provably balanced locking are forced to run in the
1990 // interpreter - such methods won't be compiled to use fast_lock and fast_unlock.
1991 // The interpreter provides two properties:
1992 // I1: At return-time the interpreter automatically and quietly unlocks any
1993 // objects acquired the current activation (frame). Recall that the
1994 // interpreter maintains an on-stack list of locks currently held by
1995 // a frame.
1996 // I2: If a method attempts to unlock an object that is not held by the
1997 // the frame the interpreter throws IMSX.
1998 //
1999 // Lets say A(), which has provably balanced locking, acquires O and then calls B().
2000 // B() doesn't have provably balanced locking so it runs in the interpreter.
2001 // Control returns to A() and A() unlocks O. By I1 and I2, above, we know that O
2002 // is still locked by A().
2003 //
2004 // The only other source of unbalanced locking would be JNI. The "Java Native Interface:
2005 // Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
2006 // should not be unlocked by "normal" java-level locking and vice-versa. The specification
2007 // doesn't specify what will occur if a program engages in such mixed-mode locking, however.
2009 void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg, bool use_rtm) {
2010 assert(boxReg == rax, "");
2011 assert_different_registers(objReg, boxReg, tmpReg);
2013 if (EmitSync & 4) {
2014 // Disable - inhibit all inlining. Force control through the slow-path
2015 cmpptr (rsp, 0);
2016 } else
2017 if (EmitSync & 8) {
2018 Label DONE_LABEL;
2019 if (UseBiasedLocking) {
2020 biased_locking_exit(objReg, tmpReg, DONE_LABEL);
2021 }
2022 // Classic stack-locking code ...
2023 // Check whether the displaced header is 0
2024 //(=> recursive unlock)
2025 movptr(tmpReg, Address(boxReg, 0));
2026 testptr(tmpReg, tmpReg);
2027 jccb(Assembler::zero, DONE_LABEL);
2028 // If not recursive lock, reset the header to displaced header
2029 if (os::is_MP()) {
2030 lock();
2031 }
2032 cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
2033 bind(DONE_LABEL);
2034 } else {
2035 Label DONE_LABEL, Stacked, CheckSucc;
2037 // Critically, the biased locking test must have precedence over
2038 // and appear before the (box->dhw == 0) recursive stack-lock test.
2039 if (UseBiasedLocking && !UseOptoBiasInlining) {
2040 biased_locking_exit(objReg, tmpReg, DONE_LABEL);
2041 }
2043 #if INCLUDE_RTM_OPT
2044 if (UseRTMForStackLocks && use_rtm) {
2045 assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
2046 Label L_regular_unlock;
2047 movptr(tmpReg, Address(objReg, 0)); // fetch markword
2048 andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
2049 cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
2050 jccb(Assembler::notEqual, L_regular_unlock); // if !HLE RegularLock
2051 xend(); // otherwise end...
2052 jmp(DONE_LABEL); // ... and we're done
2053 bind(L_regular_unlock);
2054 }
2055 #endif
2057 cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
2058 jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
2059 movptr(tmpReg, Address(objReg, 0)); // Examine the object's markword
2060 testptr(tmpReg, markOopDesc::monitor_value); // Inflated?
2061 jccb (Assembler::zero, Stacked);
2063 // It's inflated.
2064 #if INCLUDE_RTM_OPT
2065 if (use_rtm) {
2066 Label L_regular_inflated_unlock;
2067 // Clean monitor_value bit to get valid pointer
2068 int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
2069 movptr(boxReg, Address(tmpReg, owner_offset));
2070 testptr(boxReg, boxReg);
2071 jccb(Assembler::notZero, L_regular_inflated_unlock);
2072 xend();
2073 jmpb(DONE_LABEL);
2074 bind(L_regular_inflated_unlock);
2075 }
2076 #endif
2078 // Despite our balanced locking property we still check that m->_owner == Self
2079 // as java routines or native JNI code called by this thread might
2080 // have released the lock.
2081 // Refer to the comments in synchronizer.cpp for how we might encode extra
2082 // state in _succ so we can avoid fetching EntryList|cxq.
2083 //
2084 // I'd like to add more cases in fast_lock() and fast_unlock() --
2085 // such as recursive enter and exit -- but we have to be wary of
2086 // I$ bloat, T$ effects and BP$ effects.
2087 //
2088 // If there's no contention try a 1-0 exit. That is, exit without
2089 // a costly MEMBAR or CAS. See synchronizer.cpp for details on how
2090 // we detect and recover from the race that the 1-0 exit admits.
2091 //
2092 // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier
2093 // before it STs null into _owner, releasing the lock. Updates
2094 // to data protected by the critical section must be visible before
2095 // we drop the lock (and thus before any other thread could acquire
2096 // the lock and observe the fields protected by the lock).
2097 // IA32's memory-model is SPO, so STs are ordered with respect to
2098 // each other and there's no need for an explicit barrier (fence).
2099 // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
2100 #ifndef _LP64
2101 get_thread (boxReg);
2102 if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
2103 // prefetchw [ebx + Offset(_owner)-2]
2104 prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
2105 }
2107 // Note that we could employ various encoding schemes to reduce
2108 // the number of loads below (currently 4) to just 2 or 3.
2109 // Refer to the comments in synchronizer.cpp.
2110 // In practice the chain of fetches doesn't seem to impact performance, however.
2111 if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
2112 // Attempt to reduce branch density - AMD's branch predictor.
2113 xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
2114 orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
2115 orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
2116 orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
2117 jccb (Assembler::notZero, DONE_LABEL);
2118 movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
2119 jmpb (DONE_LABEL);
2120 } else {
2121 xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
2122 orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
2123 jccb (Assembler::notZero, DONE_LABEL);
2124 movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
2125 orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
2126 jccb (Assembler::notZero, CheckSucc);
2127 movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
2128 jmpb (DONE_LABEL);
2129 }
2131 // The Following code fragment (EmitSync & 65536) improves the performance of
2132 // contended applications and contended synchronization microbenchmarks.
2133 // Unfortunately the emission of the code - even though not executed - causes regressions
2134 // in scimark and jetstream, evidently because of $ effects. Replacing the code
2135 // with an equal number of never-executed NOPs results in the same regression.
2136 // We leave it off by default.
2138 if ((EmitSync & 65536) != 0) {
2139 Label LSuccess, LGoSlowPath ;
2141 bind (CheckSucc);
2143 // Optional pre-test ... it's safe to elide this
2144 if ((EmitSync & 16) == 0) {
2145 cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
2146 jccb (Assembler::zero, LGoSlowPath);
2147 }
2149 // We have a classic Dekker-style idiom:
2150 // ST m->_owner = 0 ; MEMBAR; LD m->_succ
2151 // There are a number of ways to implement the barrier:
2152 // (1) lock:andl &m->_owner, 0
2153 // is fast, but mask doesn't currently support the "ANDL M,IMM32" form.
2154 // LOCK: ANDL [ebx+Offset(_Owner)-2], 0
2155 // Encodes as 81 31 OFF32 IMM32 or 83 63 OFF8 IMM8
2156 // (2) If supported, an explicit MFENCE is appealing.
2157 // In older IA32 processors MFENCE is slower than lock:add or xchg
2158 // particularly if the write-buffer is full as might be the case if
2159 // if stores closely precede the fence or fence-equivalent instruction.
2160 // In more modern implementations MFENCE appears faster, however.
2161 // (3) In lieu of an explicit fence, use lock:addl to the top-of-stack
2162 // The $lines underlying the top-of-stack should be in M-state.
2163 // The locked add instruction is serializing, of course.
2164 // (4) Use xchg, which is serializing
2165 // mov boxReg, 0; xchgl boxReg, [tmpReg + Offset(_owner)-2] also works
2166 // (5) ST m->_owner = 0 and then execute lock:orl &m->_succ, 0.
2167 // The integer condition codes will tell us if succ was 0.
2168 // Since _succ and _owner should reside in the same $line and
2169 // we just stored into _owner, it's likely that the $line
2170 // remains in M-state for the lock:orl.
2171 //
2172 // We currently use (3), although it's likely that switching to (2)
2173 // is correct for the future.
2175 movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
2176 if (os::is_MP()) {
2177 if (VM_Version::supports_sse2() && 1 == FenceInstruction) {
2178 mfence();
2179 } else {
2180 lock (); addptr(Address(rsp, 0), 0);
2181 }
2182 }
2183 // Ratify _succ remains non-null
2184 cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0);
2185 jccb (Assembler::notZero, LSuccess);
2187 xorptr(boxReg, boxReg); // box is really EAX
2188 if (os::is_MP()) { lock(); }
2189 cmpxchgptr(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
2190 jccb (Assembler::notEqual, LSuccess);
2191 // Since we're low on registers we installed rsp as a placeholding in _owner.
2192 // Now install Self over rsp. This is safe as we're transitioning from
2193 // non-null to non=null
2194 get_thread (boxReg);
2195 movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg);
2196 // Intentional fall-through into LGoSlowPath ...
2198 bind (LGoSlowPath);
2199 orptr(boxReg, 1); // set ICC.ZF=0 to indicate failure
2200 jmpb (DONE_LABEL);
2202 bind (LSuccess);
2203 xorptr(boxReg, boxReg); // set ICC.ZF=1 to indicate success
2204 jmpb (DONE_LABEL);
2205 }
2207 bind (Stacked);
2208 // It's not inflated and it's not recursively stack-locked and it's not biased.
2209 // It must be stack-locked.
2210 // Try to reset the header to displaced header.
2211 // The "box" value on the stack is stable, so we can reload
2212 // and be assured we observe the same value as above.
2213 movptr(tmpReg, Address(boxReg, 0));
2214 if (os::is_MP()) {
2215 lock();
2216 }
2217 cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
2218 // Intention fall-thru into DONE_LABEL
2220 // DONE_LABEL is a hot target - we'd really like to place it at the
2221 // start of cache line by padding with NOPs.
2222 // See the AMD and Intel software optimization manuals for the
2223 // most efficient "long" NOP encodings.
2224 // Unfortunately none of our alignment mechanisms suffice.
2225 if ((EmitSync & 65536) == 0) {
2226 bind (CheckSucc);
2227 }
2228 #else // _LP64
2229 // It's inflated
2230 movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
2231 xorptr(boxReg, r15_thread);
2232 orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
2233 jccb (Assembler::notZero, DONE_LABEL);
2234 movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
2235 orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
2236 jccb (Assembler::notZero, CheckSucc);
2237 movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD);
2238 jmpb (DONE_LABEL);
2240 if ((EmitSync & 65536) == 0) {
2241 Label LSuccess, LGoSlowPath ;
2242 bind (CheckSucc);
2243 cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
2244 jccb (Assembler::zero, LGoSlowPath);
2246 // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
2247 // the explicit ST;MEMBAR combination, but masm doesn't currently support
2248 // "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
2249 // are all faster when the write buffer is populated.
2250 movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD);
2251 if (os::is_MP()) {
2252 lock (); addl (Address(rsp, 0), 0);
2253 }
2254 cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
2255 jccb (Assembler::notZero, LSuccess);
2257 movptr (boxReg, (int32_t)NULL_WORD); // box is really EAX
2258 if (os::is_MP()) { lock(); }
2259 cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
2260 jccb (Assembler::notEqual, LSuccess);
2261 // Intentional fall-through into slow-path
2263 bind (LGoSlowPath);
2264 orl (boxReg, 1); // set ICC.ZF=0 to indicate failure
2265 jmpb (DONE_LABEL);
2267 bind (LSuccess);
2268 testl (boxReg, 0); // set ICC.ZF=1 to indicate success
2269 jmpb (DONE_LABEL);
2270 }
2272 bind (Stacked);
2273 movptr(tmpReg, Address (boxReg, 0)); // re-fetch
2274 if (os::is_MP()) { lock(); }
2275 cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
2277 if (EmitSync & 65536) {
2278 bind (CheckSucc);
2279 }
2280 #endif
2281 bind(DONE_LABEL);
2282 // Avoid branch to branch on AMD processors
2283 if (EmitSync & 32768) {
2284 nop();
2285 }
2286 }
2287 }
2288 #endif // COMPILER2
2290 void MacroAssembler::c2bool(Register x) {
2291 // implements x == 0 ? 0 : 1
2292 // note: must only look at least-significant byte of x
2293 // since C-style booleans are stored in one byte
2294 // only! (was bug)
2295 andl(x, 0xFF);
2296 setb(Assembler::notZero, x);
2297 }
2299 // Wouldn't need if AddressLiteral version had new name
2300 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
2301 Assembler::call(L, rtype);
2302 }
2304 void MacroAssembler::call(Register entry) {
2305 Assembler::call(entry);
2306 }
2308 void MacroAssembler::call(AddressLiteral entry) {
2309 if (reachable(entry)) {
2310 Assembler::call_literal(entry.target(), entry.rspec());
2311 } else {
2312 lea(rscratch1, entry);
2313 Assembler::call(rscratch1);
2314 }
2315 }
2317 void MacroAssembler::ic_call(address entry) {
2318 RelocationHolder rh = virtual_call_Relocation::spec(pc());
2319 movptr(rax, (intptr_t)Universe::non_oop_word());
2320 call(AddressLiteral(entry, rh));
2321 }
2323 // Implementation of call_VM versions
2325 void MacroAssembler::call_VM(Register oop_result,
2326 address entry_point,
2327 bool check_exceptions) {
2328 Label C, E;
2329 call(C, relocInfo::none);
2330 jmp(E);
2332 bind(C);
2333 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
2334 ret(0);
2336 bind(E);
2337 }
2339 void MacroAssembler::call_VM(Register oop_result,
2340 address entry_point,
2341 Register arg_1,
2342 bool check_exceptions) {
2343 Label C, E;
2344 call(C, relocInfo::none);
2345 jmp(E);
2347 bind(C);
2348 pass_arg1(this, arg_1);
2349 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
2350 ret(0);
2352 bind(E);
2353 }
2355 void MacroAssembler::call_VM(Register oop_result,
2356 address entry_point,
2357 Register arg_1,
2358 Register arg_2,
2359 bool check_exceptions) {
2360 Label C, E;
2361 call(C, relocInfo::none);
2362 jmp(E);
2364 bind(C);
2366 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2368 pass_arg2(this, arg_2);
2369 pass_arg1(this, arg_1);
2370 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
2371 ret(0);
2373 bind(E);
2374 }
2376 void MacroAssembler::call_VM(Register oop_result,
2377 address entry_point,
2378 Register arg_1,
2379 Register arg_2,
2380 Register arg_3,
2381 bool check_exceptions) {
2382 Label C, E;
2383 call(C, relocInfo::none);
2384 jmp(E);
2386 bind(C);
2388 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2389 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2390 pass_arg3(this, arg_3);
2392 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2393 pass_arg2(this, arg_2);
2395 pass_arg1(this, arg_1);
2396 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
2397 ret(0);
2399 bind(E);
2400 }
2402 void MacroAssembler::call_VM(Register oop_result,
2403 Register last_java_sp,
2404 address entry_point,
2405 int number_of_arguments,
2406 bool check_exceptions) {
2407 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2408 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
2409 }
2411 void MacroAssembler::call_VM(Register oop_result,
2412 Register last_java_sp,
2413 address entry_point,
2414 Register arg_1,
2415 bool check_exceptions) {
2416 pass_arg1(this, arg_1);
2417 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
2418 }
2420 void MacroAssembler::call_VM(Register oop_result,
2421 Register last_java_sp,
2422 address entry_point,
2423 Register arg_1,
2424 Register arg_2,
2425 bool check_exceptions) {
2427 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2428 pass_arg2(this, arg_2);
2429 pass_arg1(this, arg_1);
2430 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
2431 }
2433 void MacroAssembler::call_VM(Register oop_result,
2434 Register last_java_sp,
2435 address entry_point,
2436 Register arg_1,
2437 Register arg_2,
2438 Register arg_3,
2439 bool check_exceptions) {
2440 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2441 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2442 pass_arg3(this, arg_3);
2443 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2444 pass_arg2(this, arg_2);
2445 pass_arg1(this, arg_1);
2446 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
2447 }
2449 void MacroAssembler::super_call_VM(Register oop_result,
2450 Register last_java_sp,
2451 address entry_point,
2452 int number_of_arguments,
2453 bool check_exceptions) {
2454 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2455 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
2456 }
2458 void MacroAssembler::super_call_VM(Register oop_result,
2459 Register last_java_sp,
2460 address entry_point,
2461 Register arg_1,
2462 bool check_exceptions) {
2463 pass_arg1(this, arg_1);
2464 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
2465 }
2467 void MacroAssembler::super_call_VM(Register oop_result,
2468 Register last_java_sp,
2469 address entry_point,
2470 Register arg_1,
2471 Register arg_2,
2472 bool check_exceptions) {
2474 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2475 pass_arg2(this, arg_2);
2476 pass_arg1(this, arg_1);
2477 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
2478 }
2480 void MacroAssembler::super_call_VM(Register oop_result,
2481 Register last_java_sp,
2482 address entry_point,
2483 Register arg_1,
2484 Register arg_2,
2485 Register arg_3,
2486 bool check_exceptions) {
2487 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2488 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2489 pass_arg3(this, arg_3);
2490 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2491 pass_arg2(this, arg_2);
2492 pass_arg1(this, arg_1);
2493 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
2494 }
2496 void MacroAssembler::call_VM_base(Register oop_result,
2497 Register java_thread,
2498 Register last_java_sp,
2499 address entry_point,
2500 int number_of_arguments,
2501 bool check_exceptions) {
2502 // determine java_thread register
2503 if (!java_thread->is_valid()) {
2504 #ifdef _LP64
2505 java_thread = r15_thread;
2506 #else
2507 java_thread = rdi;
2508 get_thread(java_thread);
2509 #endif // LP64
2510 }
2511 // determine last_java_sp register
2512 if (!last_java_sp->is_valid()) {
2513 last_java_sp = rsp;
2514 }
2515 // debugging support
2516 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
2517 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
2518 #ifdef ASSERT
2519 // TraceBytecodes does not use r12 but saves it over the call, so don't verify
2520 // r12 is the heapbase.
2521 LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
2522 #endif // ASSERT
2524 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
2525 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
2527 // push java thread (becomes first argument of C function)
2529 NOT_LP64(push(java_thread); number_of_arguments++);
2530 LP64_ONLY(mov(c_rarg0, r15_thread));
2532 // set last Java frame before call
2533 assert(last_java_sp != rbp, "can't use ebp/rbp");
2535 // Only interpreter should have to set fp
2536 set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
2538 // do the call, remove parameters
2539 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
2541 // restore the thread (cannot use the pushed argument since arguments
2542 // may be overwritten by C code generated by an optimizing compiler);
2543 // however can use the register value directly if it is callee saved.
2544 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
2545 // rdi & rsi (also r15) are callee saved -> nothing to do
2546 #ifdef ASSERT
2547 guarantee(java_thread != rax, "change this code");
2548 push(rax);
2549 { Label L;
2550 get_thread(rax);
2551 cmpptr(java_thread, rax);
2552 jcc(Assembler::equal, L);
2553 STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
2554 bind(L);
2555 }
2556 pop(rax);
2557 #endif
2558 } else {
2559 get_thread(java_thread);
2560 }
2561 // reset last Java frame
2562 // Only interpreter should have to clear fp
2563 reset_last_Java_frame(java_thread, true, false);
2565 #ifndef CC_INTERP
2566 // C++ interp handles this in the interpreter
2567 check_and_handle_popframe(java_thread);
2568 check_and_handle_earlyret(java_thread);
2569 #endif /* CC_INTERP */
2571 if (check_exceptions) {
2572 // check for pending exceptions (java_thread is set upon return)
2573 cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
2574 #ifndef _LP64
2575 jump_cc(Assembler::notEqual,
2576 RuntimeAddress(StubRoutines::forward_exception_entry()));
2577 #else
2578 // This used to conditionally jump to forward_exception however it is
2579 // possible if we relocate that the branch will not reach. So we must jump
2580 // around so we can always reach
2582 Label ok;
2583 jcc(Assembler::equal, ok);
2584 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2585 bind(ok);
2586 #endif // LP64
2587 }
2589 // get oop result if there is one and reset the value in the thread
2590 if (oop_result->is_valid()) {
2591 get_vm_result(oop_result, java_thread);
2592 }
2593 }
2595 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
2597 // Calculate the value for last_Java_sp
2598 // somewhat subtle. call_VM does an intermediate call
2599 // which places a return address on the stack just under the
2600 // stack pointer as the user finsihed with it. This allows
2601 // use to retrieve last_Java_pc from last_Java_sp[-1].
2602 // On 32bit we then have to push additional args on the stack to accomplish
2603 // the actual requested call. On 64bit call_VM only can use register args
2604 // so the only extra space is the return address that call_VM created.
2605 // This hopefully explains the calculations here.
2607 #ifdef _LP64
2608 // We've pushed one address, correct last_Java_sp
2609 lea(rax, Address(rsp, wordSize));
2610 #else
2611 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
2612 #endif // LP64
2614 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
2616 }
2618 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
2619 call_VM_leaf_base(entry_point, number_of_arguments);
2620 }
2622 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
2623 pass_arg0(this, arg_0);
2624 call_VM_leaf(entry_point, 1);
2625 }
2627 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2629 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2630 pass_arg1(this, arg_1);
2631 pass_arg0(this, arg_0);
2632 call_VM_leaf(entry_point, 2);
2633 }
2635 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2636 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
2637 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2638 pass_arg2(this, arg_2);
2639 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2640 pass_arg1(this, arg_1);
2641 pass_arg0(this, arg_0);
2642 call_VM_leaf(entry_point, 3);
2643 }
2645 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
2646 pass_arg0(this, arg_0);
2647 MacroAssembler::call_VM_leaf_base(entry_point, 1);
2648 }
2650 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2652 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2653 pass_arg1(this, arg_1);
2654 pass_arg0(this, arg_0);
2655 MacroAssembler::call_VM_leaf_base(entry_point, 2);
2656 }
2658 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2659 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
2660 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2661 pass_arg2(this, arg_2);
2662 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2663 pass_arg1(this, arg_1);
2664 pass_arg0(this, arg_0);
2665 MacroAssembler::call_VM_leaf_base(entry_point, 3);
2666 }
2668 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
2669 LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
2670 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2671 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2672 pass_arg3(this, arg_3);
2673 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
2674 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2675 pass_arg2(this, arg_2);
2676 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2677 pass_arg1(this, arg_1);
2678 pass_arg0(this, arg_0);
2679 MacroAssembler::call_VM_leaf_base(entry_point, 4);
2680 }
2682 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
2683 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
2684 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
2685 verify_oop(oop_result, "broken oop in call_VM_base");
2686 }
2688 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
2689 movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
2690 movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD);
2691 }
2693 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
2694 }
2696 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
2697 }
2699 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
2700 if (reachable(src1)) {
2701 cmpl(as_Address(src1), imm);
2702 } else {
2703 lea(rscratch1, src1);
2704 cmpl(Address(rscratch1, 0), imm);
2705 }
2706 }
2708 void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
2709 assert(!src2.is_lval(), "use cmpptr");
2710 if (reachable(src2)) {
2711 cmpl(src1, as_Address(src2));
2712 } else {
2713 lea(rscratch1, src2);
2714 cmpl(src1, Address(rscratch1, 0));
2715 }
2716 }
2718 void MacroAssembler::cmp32(Register src1, int32_t imm) {
2719 Assembler::cmpl(src1, imm);
2720 }
2722 void MacroAssembler::cmp32(Register src1, Address src2) {
2723 Assembler::cmpl(src1, src2);
2724 }
2726 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
2727 ucomisd(opr1, opr2);
2729 Label L;
2730 if (unordered_is_less) {
2731 movl(dst, -1);
2732 jcc(Assembler::parity, L);
2733 jcc(Assembler::below , L);
2734 movl(dst, 0);
2735 jcc(Assembler::equal , L);
2736 increment(dst);
2737 } else { // unordered is greater
2738 movl(dst, 1);
2739 jcc(Assembler::parity, L);
2740 jcc(Assembler::above , L);
2741 movl(dst, 0);
2742 jcc(Assembler::equal , L);
2743 decrementl(dst);
2744 }
2745 bind(L);
2746 }
2748 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
2749 ucomiss(opr1, opr2);
2751 Label L;
2752 if (unordered_is_less) {
2753 movl(dst, -1);
2754 jcc(Assembler::parity, L);
2755 jcc(Assembler::below , L);
2756 movl(dst, 0);
2757 jcc(Assembler::equal , L);
2758 increment(dst);
2759 } else { // unordered is greater
2760 movl(dst, 1);
2761 jcc(Assembler::parity, L);
2762 jcc(Assembler::above , L);
2763 movl(dst, 0);
2764 jcc(Assembler::equal , L);
2765 decrementl(dst);
2766 }
2767 bind(L);
2768 }
2771 void MacroAssembler::cmp8(AddressLiteral src1, int imm) {
2772 if (reachable(src1)) {
2773 cmpb(as_Address(src1), imm);
2774 } else {
2775 lea(rscratch1, src1);
2776 cmpb(Address(rscratch1, 0), imm);
2777 }
2778 }
2780 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
2781 #ifdef _LP64
2782 if (src2.is_lval()) {
2783 movptr(rscratch1, src2);
2784 Assembler::cmpq(src1, rscratch1);
2785 } else if (reachable(src2)) {
2786 cmpq(src1, as_Address(src2));
2787 } else {
2788 lea(rscratch1, src2);
2789 Assembler::cmpq(src1, Address(rscratch1, 0));
2790 }
2791 #else
2792 if (src2.is_lval()) {
2793 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
2794 } else {
2795 cmpl(src1, as_Address(src2));
2796 }
2797 #endif // _LP64
2798 }
2800 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
2801 assert(src2.is_lval(), "not a mem-mem compare");
2802 #ifdef _LP64
2803 // moves src2's literal address
2804 movptr(rscratch1, src2);
2805 Assembler::cmpq(src1, rscratch1);
2806 #else
2807 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
2808 #endif // _LP64
2809 }
2811 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
2812 if (reachable(adr)) {
2813 if (os::is_MP())
2814 lock();
2815 cmpxchgptr(reg, as_Address(adr));
2816 } else {
2817 lea(rscratch1, adr);
2818 if (os::is_MP())
2819 lock();
2820 cmpxchgptr(reg, Address(rscratch1, 0));
2821 }
2822 }
2824 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
2825 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
2826 }
2828 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
2829 if (reachable(src)) {
2830 Assembler::comisd(dst, as_Address(src));
2831 } else {
2832 lea(rscratch1, src);
2833 Assembler::comisd(dst, Address(rscratch1, 0));
2834 }
2835 }
2837 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
2838 if (reachable(src)) {
2839 Assembler::comiss(dst, as_Address(src));
2840 } else {
2841 lea(rscratch1, src);
2842 Assembler::comiss(dst, Address(rscratch1, 0));
2843 }
2844 }
2847 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
2848 Condition negated_cond = negate_condition(cond);
2849 Label L;
2850 jcc(negated_cond, L);
2851 pushf(); // Preserve flags
2852 atomic_incl(counter_addr);
2853 popf();
2854 bind(L);
2855 }
2857 int MacroAssembler::corrected_idivl(Register reg) {
2858 // Full implementation of Java idiv and irem; checks for
2859 // special case as described in JVM spec., p.243 & p.271.
2860 // The function returns the (pc) offset of the idivl
2861 // instruction - may be needed for implicit exceptions.
2862 //
2863 // normal case special case
2864 //
2865 // input : rax,: dividend min_int
2866 // reg: divisor (may not be rax,/rdx) -1
2867 //
2868 // output: rax,: quotient (= rax, idiv reg) min_int
2869 // rdx: remainder (= rax, irem reg) 0
2870 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
2871 const int min_int = 0x80000000;
2872 Label normal_case, special_case;
2874 // check for special case
2875 cmpl(rax, min_int);
2876 jcc(Assembler::notEqual, normal_case);
2877 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
2878 cmpl(reg, -1);
2879 jcc(Assembler::equal, special_case);
2881 // handle normal case
2882 bind(normal_case);
2883 cdql();
2884 int idivl_offset = offset();
2885 idivl(reg);
2887 // normal and special case exit
2888 bind(special_case);
2890 return idivl_offset;
2891 }
2895 void MacroAssembler::decrementl(Register reg, int value) {
2896 if (value == min_jint) {subl(reg, value) ; return; }
2897 if (value < 0) { incrementl(reg, -value); return; }
2898 if (value == 0) { ; return; }
2899 if (value == 1 && UseIncDec) { decl(reg) ; return; }
2900 /* else */ { subl(reg, value) ; return; }
2901 }
2903 void MacroAssembler::decrementl(Address dst, int value) {
2904 if (value == min_jint) {subl(dst, value) ; return; }
2905 if (value < 0) { incrementl(dst, -value); return; }
2906 if (value == 0) { ; return; }
2907 if (value == 1 && UseIncDec) { decl(dst) ; return; }
2908 /* else */ { subl(dst, value) ; return; }
2909 }
2911 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
2912 assert (shift_value > 0, "illegal shift value");
2913 Label _is_positive;
2914 testl (reg, reg);
2915 jcc (Assembler::positive, _is_positive);
2916 int offset = (1 << shift_value) - 1 ;
2918 if (offset == 1) {
2919 incrementl(reg);
2920 } else {
2921 addl(reg, offset);
2922 }
2924 bind (_is_positive);
2925 sarl(reg, shift_value);
2926 }
2928 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src) {
2929 if (reachable(src)) {
2930 Assembler::divsd(dst, as_Address(src));
2931 } else {
2932 lea(rscratch1, src);
2933 Assembler::divsd(dst, Address(rscratch1, 0));
2934 }
2935 }
2937 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src) {
2938 if (reachable(src)) {
2939 Assembler::divss(dst, as_Address(src));
2940 } else {
2941 lea(rscratch1, src);
2942 Assembler::divss(dst, Address(rscratch1, 0));
2943 }
2944 }
2946 // !defined(COMPILER2) is because of stupid core builds
2947 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
2948 void MacroAssembler::empty_FPU_stack() {
2949 if (VM_Version::supports_mmx()) {
2950 emms();
2951 } else {
2952 for (int i = 8; i-- > 0; ) ffree(i);
2953 }
2954 }
2955 #endif // !LP64 || C1 || !C2
2958 // Defines obj, preserves var_size_in_bytes
2959 void MacroAssembler::eden_allocate(Register obj,
2960 Register var_size_in_bytes,
2961 int con_size_in_bytes,
2962 Register t1,
2963 Label& slow_case) {
2964 assert(obj == rax, "obj must be in rax, for cmpxchg");
2965 assert_different_registers(obj, var_size_in_bytes, t1);
2966 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
2967 jmp(slow_case);
2968 } else {
2969 Register end = t1;
2970 Label retry;
2971 bind(retry);
2972 ExternalAddress heap_top((address) Universe::heap()->top_addr());
2973 movptr(obj, heap_top);
2974 if (var_size_in_bytes == noreg) {
2975 lea(end, Address(obj, con_size_in_bytes));
2976 } else {
2977 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
2978 }
2979 // if end < obj then we wrapped around => object too long => slow case
2980 cmpptr(end, obj);
2981 jcc(Assembler::below, slow_case);
2982 cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
2983 jcc(Assembler::above, slow_case);
2984 // Compare obj with the top addr, and if still equal, store the new top addr in
2985 // end at the address of the top addr pointer. Sets ZF if was equal, and clears
2986 // it otherwise. Use lock prefix for atomicity on MPs.
2987 locked_cmpxchgptr(end, heap_top);
2988 jcc(Assembler::notEqual, retry);
2989 }
2990 }
2992 void MacroAssembler::enter() {
2993 push(rbp);
2994 mov(rbp, rsp);
2995 }
2997 // A 5 byte nop that is safe for patching (see patch_verified_entry)
2998 void MacroAssembler::fat_nop() {
2999 if (UseAddressNop) {
3000 addr_nop_5();
3001 } else {
3002 emit_int8(0x26); // es:
3003 emit_int8(0x2e); // cs:
3004 emit_int8(0x64); // fs:
3005 emit_int8(0x65); // gs:
3006 emit_int8((unsigned char)0x90);
3007 }
3008 }
3010 void MacroAssembler::fcmp(Register tmp) {
3011 fcmp(tmp, 1, true, true);
3012 }
3014 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
3015 assert(!pop_right || pop_left, "usage error");
3016 if (VM_Version::supports_cmov()) {
3017 assert(tmp == noreg, "unneeded temp");
3018 if (pop_left) {
3019 fucomip(index);
3020 } else {
3021 fucomi(index);
3022 }
3023 if (pop_right) {
3024 fpop();
3025 }
3026 } else {
3027 assert(tmp != noreg, "need temp");
3028 if (pop_left) {
3029 if (pop_right) {
3030 fcompp();
3031 } else {
3032 fcomp(index);
3033 }
3034 } else {
3035 fcom(index);
3036 }
3037 // convert FPU condition into eflags condition via rax,
3038 save_rax(tmp);
3039 fwait(); fnstsw_ax();
3040 sahf();
3041 restore_rax(tmp);
3042 }
3043 // condition codes set as follows:
3044 //
3045 // CF (corresponds to C0) if x < y
3046 // PF (corresponds to C2) if unordered
3047 // ZF (corresponds to C3) if x = y
3048 }
3050 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
3051 fcmp2int(dst, unordered_is_less, 1, true, true);
3052 }
3054 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
3055 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
3056 Label L;
3057 if (unordered_is_less) {
3058 movl(dst, -1);
3059 jcc(Assembler::parity, L);
3060 jcc(Assembler::below , L);
3061 movl(dst, 0);
3062 jcc(Assembler::equal , L);
3063 increment(dst);
3064 } else { // unordered is greater
3065 movl(dst, 1);
3066 jcc(Assembler::parity, L);
3067 jcc(Assembler::above , L);
3068 movl(dst, 0);
3069 jcc(Assembler::equal , L);
3070 decrementl(dst);
3071 }
3072 bind(L);
3073 }
3075 void MacroAssembler::fld_d(AddressLiteral src) {
3076 fld_d(as_Address(src));
3077 }
3079 void MacroAssembler::fld_s(AddressLiteral src) {
3080 fld_s(as_Address(src));
3081 }
3083 void MacroAssembler::fld_x(AddressLiteral src) {
3084 Assembler::fld_x(as_Address(src));
3085 }
3087 void MacroAssembler::fldcw(AddressLiteral src) {
3088 Assembler::fldcw(as_Address(src));
3089 }
3091 void MacroAssembler::pow_exp_core_encoding() {
3092 // kills rax, rcx, rdx
3093 subptr(rsp,sizeof(jdouble));
3094 // computes 2^X. Stack: X ...
3095 // f2xm1 computes 2^X-1 but only operates on -1<=X<=1. Get int(X) and
3096 // keep it on the thread's stack to compute 2^int(X) later
3097 // then compute 2^(X-int(X)) as (2^(X-int(X)-1+1)
3098 // final result is obtained with: 2^X = 2^int(X) * 2^(X-int(X))
3099 fld_s(0); // Stack: X X ...
3100 frndint(); // Stack: int(X) X ...
3101 fsuba(1); // Stack: int(X) X-int(X) ...
3102 fistp_s(Address(rsp,0)); // move int(X) as integer to thread's stack. Stack: X-int(X) ...
3103 f2xm1(); // Stack: 2^(X-int(X))-1 ...
3104 fld1(); // Stack: 1 2^(X-int(X))-1 ...
3105 faddp(1); // Stack: 2^(X-int(X))
3106 // computes 2^(int(X)): add exponent bias (1023) to int(X), then
3107 // shift int(X)+1023 to exponent position.
3108 // Exponent is limited to 11 bits if int(X)+1023 does not fit in 11
3109 // bits, set result to NaN. 0x000 and 0x7FF are reserved exponent
3110 // values so detect them and set result to NaN.
3111 movl(rax,Address(rsp,0));
3112 movl(rcx, -2048); // 11 bit mask and valid NaN binary encoding
3113 addl(rax, 1023);
3114 movl(rdx,rax);
3115 shll(rax,20);
3116 // Check that 0 < int(X)+1023 < 2047. Otherwise set rax to NaN.
3117 addl(rdx,1);
3118 // Check that 1 < int(X)+1023+1 < 2048
3119 // in 3 steps:
3120 // 1- (int(X)+1023+1)&-2048 == 0 => 0 <= int(X)+1023+1 < 2048
3121 // 2- (int(X)+1023+1)&-2048 != 0
3122 // 3- (int(X)+1023+1)&-2048 != 1
3123 // Do 2- first because addl just updated the flags.
3124 cmov32(Assembler::equal,rax,rcx);
3125 cmpl(rdx,1);
3126 cmov32(Assembler::equal,rax,rcx);
3127 testl(rdx,rcx);
3128 cmov32(Assembler::notEqual,rax,rcx);
3129 movl(Address(rsp,4),rax);
3130 movl(Address(rsp,0),0);
3131 fmul_d(Address(rsp,0)); // Stack: 2^X ...
3132 addptr(rsp,sizeof(jdouble));
3133 }
3135 void MacroAssembler::increase_precision() {
3136 subptr(rsp, BytesPerWord);
3137 fnstcw(Address(rsp, 0));
3138 movl(rax, Address(rsp, 0));
3139 orl(rax, 0x300);
3140 push(rax);
3141 fldcw(Address(rsp, 0));
3142 pop(rax);
3143 }
3145 void MacroAssembler::restore_precision() {
3146 fldcw(Address(rsp, 0));
3147 addptr(rsp, BytesPerWord);
3148 }
3150 void MacroAssembler::fast_pow() {
3151 // computes X^Y = 2^(Y * log2(X))
3152 // if fast computation is not possible, result is NaN. Requires
3153 // fallback from user of this macro.
3154 // increase precision for intermediate steps of the computation
3155 BLOCK_COMMENT("fast_pow {");
3156 increase_precision();
3157 fyl2x(); // Stack: (Y*log2(X)) ...
3158 pow_exp_core_encoding(); // Stack: exp(X) ...
3159 restore_precision();
3160 BLOCK_COMMENT("} fast_pow");
3161 }
3163 void MacroAssembler::fast_exp() {
3164 // computes exp(X) = 2^(X * log2(e))
3165 // if fast computation is not possible, result is NaN. Requires
3166 // fallback from user of this macro.
3167 // increase precision for intermediate steps of the computation
3168 increase_precision();
3169 fldl2e(); // Stack: log2(e) X ...
3170 fmulp(1); // Stack: (X*log2(e)) ...
3171 pow_exp_core_encoding(); // Stack: exp(X) ...
3172 restore_precision();
3173 }
3175 void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) {
3176 // kills rax, rcx, rdx
3177 // pow and exp needs 2 extra registers on the fpu stack.
3178 Label slow_case, done;
3179 Register tmp = noreg;
3180 if (!VM_Version::supports_cmov()) {
3181 // fcmp needs a temporary so preserve rdx,
3182 tmp = rdx;
3183 }
3184 Register tmp2 = rax;
3185 Register tmp3 = rcx;
3187 if (is_exp) {
3188 // Stack: X
3189 fld_s(0); // duplicate argument for runtime call. Stack: X X
3190 fast_exp(); // Stack: exp(X) X
3191 fcmp(tmp, 0, false, false); // Stack: exp(X) X
3192 // exp(X) not equal to itself: exp(X) is NaN go to slow case.
3193 jcc(Assembler::parity, slow_case);
3194 // get rid of duplicate argument. Stack: exp(X)
3195 if (num_fpu_regs_in_use > 0) {
3196 fxch();
3197 fpop();
3198 } else {
3199 ffree(1);
3200 }
3201 jmp(done);
3202 } else {
3203 // Stack: X Y
3204 Label x_negative, y_odd;
3206 fldz(); // Stack: 0 X Y
3207 fcmp(tmp, 1, true, false); // Stack: X Y
3208 jcc(Assembler::above, x_negative);
3210 // X >= 0
3212 fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
3213 fld_s(1); // Stack: X Y X Y
3214 fast_pow(); // Stack: X^Y X Y
3215 fcmp(tmp, 0, false, false); // Stack: X^Y X Y
3216 // X^Y not equal to itself: X^Y is NaN go to slow case.
3217 jcc(Assembler::parity, slow_case);
3218 // get rid of duplicate arguments. Stack: X^Y
3219 if (num_fpu_regs_in_use > 0) {
3220 fxch(); fpop();
3221 fxch(); fpop();
3222 } else {
3223 ffree(2);
3224 ffree(1);
3225 }
3226 jmp(done);
3228 // X <= 0
3229 bind(x_negative);
3231 fld_s(1); // Stack: Y X Y
3232 frndint(); // Stack: int(Y) X Y
3233 fcmp(tmp, 2, false, false); // Stack: int(Y) X Y
3234 jcc(Assembler::notEqual, slow_case);
3236 subptr(rsp, 8);
3238 // For X^Y, when X < 0, Y has to be an integer and the final
3239 // result depends on whether it's odd or even. We just checked
3240 // that int(Y) == Y. We move int(Y) to gp registers as a 64 bit
3241 // integer to test its parity. If int(Y) is huge and doesn't fit
3242 // in the 64 bit integer range, the integer indefinite value will
3243 // end up in the gp registers. Huge numbers are all even, the
3244 // integer indefinite number is even so it's fine.
3246 #ifdef ASSERT
3247 // Let's check we don't end up with an integer indefinite number
3248 // when not expected. First test for huge numbers: check whether
3249 // int(Y)+1 == int(Y) which is true for very large numbers and
3250 // those are all even. A 64 bit integer is guaranteed to not
3251 // overflow for numbers where y+1 != y (when precision is set to
3252 // double precision).
3253 Label y_not_huge;
3255 fld1(); // Stack: 1 int(Y) X Y
3256 fadd(1); // Stack: 1+int(Y) int(Y) X Y
3258 #ifdef _LP64
3259 // trip to memory to force the precision down from double extended
3260 // precision
3261 fstp_d(Address(rsp, 0));
3262 fld_d(Address(rsp, 0));
3263 #endif
3265 fcmp(tmp, 1, true, false); // Stack: int(Y) X Y
3266 #endif
3268 // move int(Y) as 64 bit integer to thread's stack
3269 fistp_d(Address(rsp,0)); // Stack: X Y
3271 #ifdef ASSERT
3272 jcc(Assembler::notEqual, y_not_huge);
3274 // Y is huge so we know it's even. It may not fit in a 64 bit
3275 // integer and we don't want the debug code below to see the
3276 // integer indefinite value so overwrite int(Y) on the thread's
3277 // stack with 0.
3278 movl(Address(rsp, 0), 0);
3279 movl(Address(rsp, 4), 0);
3281 bind(y_not_huge);
3282 #endif
3284 fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
3285 fld_s(1); // Stack: X Y X Y
3286 fabs(); // Stack: abs(X) Y X Y
3287 fast_pow(); // Stack: abs(X)^Y X Y
3288 fcmp(tmp, 0, false, false); // Stack: abs(X)^Y X Y
3289 // abs(X)^Y not equal to itself: abs(X)^Y is NaN go to slow case.
3291 pop(tmp2);
3292 NOT_LP64(pop(tmp3));
3293 jcc(Assembler::parity, slow_case);
3295 #ifdef ASSERT
3296 // Check that int(Y) is not integer indefinite value (int
3297 // overflow). Shouldn't happen because for values that would
3298 // overflow, 1+int(Y)==Y which was tested earlier.
3299 #ifndef _LP64
3300 {
3301 Label integer;
3302 testl(tmp2, tmp2);
3303 jcc(Assembler::notZero, integer);
3304 cmpl(tmp3, 0x80000000);
3305 jcc(Assembler::notZero, integer);
3306 STOP("integer indefinite value shouldn't be seen here");
3307 bind(integer);
3308 }
3309 #else
3310 {
3311 Label integer;
3312 mov(tmp3, tmp2); // preserve tmp2 for parity check below
3313 shlq(tmp3, 1);
3314 jcc(Assembler::carryClear, integer);
3315 jcc(Assembler::notZero, integer);
3316 STOP("integer indefinite value shouldn't be seen here");
3317 bind(integer);
3318 }
3319 #endif
3320 #endif
3322 // get rid of duplicate arguments. Stack: X^Y
3323 if (num_fpu_regs_in_use > 0) {
3324 fxch(); fpop();
3325 fxch(); fpop();
3326 } else {
3327 ffree(2);
3328 ffree(1);
3329 }
3331 testl(tmp2, 1);
3332 jcc(Assembler::zero, done); // X <= 0, Y even: X^Y = abs(X)^Y
3333 // X <= 0, Y even: X^Y = -abs(X)^Y
3335 fchs(); // Stack: -abs(X)^Y Y
3336 jmp(done);
3337 }
3339 // slow case: runtime call
3340 bind(slow_case);
3342 fpop(); // pop incorrect result or int(Y)
3344 fp_runtime_fallback(is_exp ? CAST_FROM_FN_PTR(address, SharedRuntime::dexp) : CAST_FROM_FN_PTR(address, SharedRuntime::dpow),
3345 is_exp ? 1 : 2, num_fpu_regs_in_use);
3347 // Come here with result in F-TOS
3348 bind(done);
3349 }
3351 void MacroAssembler::fpop() {
3352 ffree();
3353 fincstp();
3354 }
3356 void MacroAssembler::fremr(Register tmp) {
3357 save_rax(tmp);
3358 { Label L;
3359 bind(L);
3360 fprem();
3361 fwait(); fnstsw_ax();
3362 #ifdef _LP64
3363 testl(rax, 0x400);
3364 jcc(Assembler::notEqual, L);
3365 #else
3366 sahf();
3367 jcc(Assembler::parity, L);
3368 #endif // _LP64
3369 }
3370 restore_rax(tmp);
3371 // Result is in ST0.
3372 // Note: fxch & fpop to get rid of ST1
3373 // (otherwise FPU stack could overflow eventually)
3374 fxch(1);
3375 fpop();
3376 }
3379 void MacroAssembler::incrementl(AddressLiteral dst) {
3380 if (reachable(dst)) {
3381 incrementl(as_Address(dst));
3382 } else {
3383 lea(rscratch1, dst);
3384 incrementl(Address(rscratch1, 0));
3385 }
3386 }
3388 void MacroAssembler::incrementl(ArrayAddress dst) {
3389 incrementl(as_Address(dst));
3390 }
3392 void MacroAssembler::incrementl(Register reg, int value) {
3393 if (value == min_jint) {addl(reg, value) ; return; }
3394 if (value < 0) { decrementl(reg, -value); return; }
3395 if (value == 0) { ; return; }
3396 if (value == 1 && UseIncDec) { incl(reg) ; return; }
3397 /* else */ { addl(reg, value) ; return; }
3398 }
3400 void MacroAssembler::incrementl(Address dst, int value) {
3401 if (value == min_jint) {addl(dst, value) ; return; }
3402 if (value < 0) { decrementl(dst, -value); return; }
3403 if (value == 0) { ; return; }
3404 if (value == 1 && UseIncDec) { incl(dst) ; return; }
3405 /* else */ { addl(dst, value) ; return; }
3406 }
3408 void MacroAssembler::jump(AddressLiteral dst) {
3409 if (reachable(dst)) {
3410 jmp_literal(dst.target(), dst.rspec());
3411 } else {
3412 lea(rscratch1, dst);
3413 jmp(rscratch1);
3414 }
3415 }
3417 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
3418 if (reachable(dst)) {
3419 InstructionMark im(this);
3420 relocate(dst.reloc());
3421 const int short_size = 2;
3422 const int long_size = 6;
3423 int offs = (intptr_t)dst.target() - ((intptr_t)pc());
3424 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
3425 // 0111 tttn #8-bit disp
3426 emit_int8(0x70 | cc);
3427 emit_int8((offs - short_size) & 0xFF);
3428 } else {
3429 // 0000 1111 1000 tttn #32-bit disp
3430 emit_int8(0x0F);
3431 emit_int8((unsigned char)(0x80 | cc));
3432 emit_int32(offs - long_size);
3433 }
3434 } else {
3435 #ifdef ASSERT
3436 warning("reversing conditional branch");
3437 #endif /* ASSERT */
3438 Label skip;
3439 jccb(reverse[cc], skip);
3440 lea(rscratch1, dst);
3441 Assembler::jmp(rscratch1);
3442 bind(skip);
3443 }
3444 }
3446 void MacroAssembler::ldmxcsr(AddressLiteral src) {
3447 if (reachable(src)) {
3448 Assembler::ldmxcsr(as_Address(src));
3449 } else {
3450 lea(rscratch1, src);
3451 Assembler::ldmxcsr(Address(rscratch1, 0));
3452 }
3453 }
3455 int MacroAssembler::load_signed_byte(Register dst, Address src) {
3456 int off;
3457 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
3458 off = offset();
3459 movsbl(dst, src); // movsxb
3460 } else {
3461 off = load_unsigned_byte(dst, src);
3462 shll(dst, 24);
3463 sarl(dst, 24);
3464 }
3465 return off;
3466 }
3468 // Note: load_signed_short used to be called load_signed_word.
3469 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
3470 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
3471 // The term "word" in HotSpot means a 32- or 64-bit machine word.
3472 int MacroAssembler::load_signed_short(Register dst, Address src) {
3473 int off;
3474 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
3475 // This is dubious to me since it seems safe to do a signed 16 => 64 bit
3476 // version but this is what 64bit has always done. This seems to imply
3477 // that users are only using 32bits worth.
3478 off = offset();
3479 movswl(dst, src); // movsxw
3480 } else {
3481 off = load_unsigned_short(dst, src);
3482 shll(dst, 16);
3483 sarl(dst, 16);
3484 }
3485 return off;
3486 }
3488 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
3489 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
3490 // and "3.9 Partial Register Penalties", p. 22).
3491 int off;
3492 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
3493 off = offset();
3494 movzbl(dst, src); // movzxb
3495 } else {
3496 xorl(dst, dst);
3497 off = offset();
3498 movb(dst, src);
3499 }
3500 return off;
3501 }
3503 // Note: load_unsigned_short used to be called load_unsigned_word.
3504 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
3505 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
3506 // and "3.9 Partial Register Penalties", p. 22).
3507 int off;
3508 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
3509 off = offset();
3510 movzwl(dst, src); // movzxw
3511 } else {
3512 xorl(dst, dst);
3513 off = offset();
3514 movw(dst, src);
3515 }
3516 return off;
3517 }
3519 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
3520 switch (size_in_bytes) {
3521 #ifndef _LP64
3522 case 8:
3523 assert(dst2 != noreg, "second dest register required");
3524 movl(dst, src);
3525 movl(dst2, src.plus_disp(BytesPerInt));
3526 break;
3527 #else
3528 case 8: movq(dst, src); break;
3529 #endif
3530 case 4: movl(dst, src); break;
3531 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
3532 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
3533 default: ShouldNotReachHere();
3534 }
3535 }
3537 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
3538 switch (size_in_bytes) {
3539 #ifndef _LP64
3540 case 8:
3541 assert(src2 != noreg, "second source register required");
3542 movl(dst, src);
3543 movl(dst.plus_disp(BytesPerInt), src2);
3544 break;
3545 #else
3546 case 8: movq(dst, src); break;
3547 #endif
3548 case 4: movl(dst, src); break;
3549 case 2: movw(dst, src); break;
3550 case 1: movb(dst, src); break;
3551 default: ShouldNotReachHere();
3552 }
3553 }
3555 void MacroAssembler::mov32(AddressLiteral dst, Register src) {
3556 if (reachable(dst)) {
3557 movl(as_Address(dst), src);
3558 } else {
3559 lea(rscratch1, dst);
3560 movl(Address(rscratch1, 0), src);
3561 }
3562 }
3564 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
3565 if (reachable(src)) {
3566 movl(dst, as_Address(src));
3567 } else {
3568 lea(rscratch1, src);
3569 movl(dst, Address(rscratch1, 0));
3570 }
3571 }
3573 // C++ bool manipulation
3575 void MacroAssembler::movbool(Register dst, Address src) {
3576 if(sizeof(bool) == 1)
3577 movb(dst, src);
3578 else if(sizeof(bool) == 2)
3579 movw(dst, src);
3580 else if(sizeof(bool) == 4)
3581 movl(dst, src);
3582 else
3583 // unsupported
3584 ShouldNotReachHere();
3585 }
3587 void MacroAssembler::movbool(Address dst, bool boolconst) {
3588 if(sizeof(bool) == 1)
3589 movb(dst, (int) boolconst);
3590 else if(sizeof(bool) == 2)
3591 movw(dst, (int) boolconst);
3592 else if(sizeof(bool) == 4)
3593 movl(dst, (int) boolconst);
3594 else
3595 // unsupported
3596 ShouldNotReachHere();
3597 }
3599 void MacroAssembler::movbool(Address dst, Register src) {
3600 if(sizeof(bool) == 1)
3601 movb(dst, src);
3602 else if(sizeof(bool) == 2)
3603 movw(dst, src);
3604 else if(sizeof(bool) == 4)
3605 movl(dst, src);
3606 else
3607 // unsupported
3608 ShouldNotReachHere();
3609 }
3611 void MacroAssembler::movbyte(ArrayAddress dst, int src) {
3612 movb(as_Address(dst), src);
3613 }
3615 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) {
3616 if (reachable(src)) {
3617 movdl(dst, as_Address(src));
3618 } else {
3619 lea(rscratch1, src);
3620 movdl(dst, Address(rscratch1, 0));
3621 }
3622 }
3624 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) {
3625 if (reachable(src)) {
3626 movq(dst, as_Address(src));
3627 } else {
3628 lea(rscratch1, src);
3629 movq(dst, Address(rscratch1, 0));
3630 }
3631 }
3633 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
3634 if (reachable(src)) {
3635 if (UseXmmLoadAndClearUpper) {
3636 movsd (dst, as_Address(src));
3637 } else {
3638 movlpd(dst, as_Address(src));
3639 }
3640 } else {
3641 lea(rscratch1, src);
3642 if (UseXmmLoadAndClearUpper) {
3643 movsd (dst, Address(rscratch1, 0));
3644 } else {
3645 movlpd(dst, Address(rscratch1, 0));
3646 }
3647 }
3648 }
3650 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
3651 if (reachable(src)) {
3652 movss(dst, as_Address(src));
3653 } else {
3654 lea(rscratch1, src);
3655 movss(dst, Address(rscratch1, 0));
3656 }
3657 }
3659 void MacroAssembler::movptr(Register dst, Register src) {
3660 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
3661 }
3663 void MacroAssembler::movptr(Register dst, Address src) {
3664 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
3665 }
3667 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
3668 void MacroAssembler::movptr(Register dst, intptr_t src) {
3669 LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
3670 }
3672 void MacroAssembler::movptr(Address dst, Register src) {
3673 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
3674 }
3676 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) {
3677 if (reachable(src)) {
3678 Assembler::movdqu(dst, as_Address(src));
3679 } else {
3680 lea(rscratch1, src);
3681 Assembler::movdqu(dst, Address(rscratch1, 0));
3682 }
3683 }
3685 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src) {
3686 if (reachable(src)) {
3687 Assembler::movdqa(dst, as_Address(src));
3688 } else {
3689 lea(rscratch1, src);
3690 Assembler::movdqa(dst, Address(rscratch1, 0));
3691 }
3692 }
3694 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
3695 if (reachable(src)) {
3696 Assembler::movsd(dst, as_Address(src));
3697 } else {
3698 lea(rscratch1, src);
3699 Assembler::movsd(dst, Address(rscratch1, 0));
3700 }
3701 }
3703 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
3704 if (reachable(src)) {
3705 Assembler::movss(dst, as_Address(src));
3706 } else {
3707 lea(rscratch1, src);
3708 Assembler::movss(dst, Address(rscratch1, 0));
3709 }
3710 }
3712 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src) {
3713 if (reachable(src)) {
3714 Assembler::mulsd(dst, as_Address(src));
3715 } else {
3716 lea(rscratch1, src);
3717 Assembler::mulsd(dst, Address(rscratch1, 0));
3718 }
3719 }
3721 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src) {
3722 if (reachable(src)) {
3723 Assembler::mulss(dst, as_Address(src));
3724 } else {
3725 lea(rscratch1, src);
3726 Assembler::mulss(dst, Address(rscratch1, 0));
3727 }
3728 }
3730 void MacroAssembler::null_check(Register reg, int offset) {
3731 if (needs_explicit_null_check(offset)) {
3732 // provoke OS NULL exception if reg = NULL by
3733 // accessing M[reg] w/o changing any (non-CC) registers
3734 // NOTE: cmpl is plenty here to provoke a segv
3735 cmpptr(rax, Address(reg, 0));
3736 // Note: should probably use testl(rax, Address(reg, 0));
3737 // may be shorter code (however, this version of
3738 // testl needs to be implemented first)
3739 } else {
3740 // nothing to do, (later) access of M[reg + offset]
3741 // will provoke OS NULL exception if reg = NULL
3742 }
3743 }
3745 void MacroAssembler::os_breakpoint() {
3746 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
3747 // (e.g., MSVC can't call ps() otherwise)
3748 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
3749 }
3751 void MacroAssembler::pop_CPU_state() {
3752 pop_FPU_state();
3753 pop_IU_state();
3754 }
3756 void MacroAssembler::pop_FPU_state() {
3757 NOT_LP64(frstor(Address(rsp, 0));)
3758 LP64_ONLY(fxrstor(Address(rsp, 0));)
3759 addptr(rsp, FPUStateSizeInWords * wordSize);
3760 }
3762 void MacroAssembler::pop_IU_state() {
3763 popa();
3764 LP64_ONLY(addq(rsp, 8));
3765 popf();
3766 }
3768 // Save Integer and Float state
3769 // Warning: Stack must be 16 byte aligned (64bit)
3770 void MacroAssembler::push_CPU_state() {
3771 push_IU_state();
3772 push_FPU_state();
3773 }
3775 void MacroAssembler::push_FPU_state() {
3776 subptr(rsp, FPUStateSizeInWords * wordSize);
3777 #ifndef _LP64
3778 fnsave(Address(rsp, 0));
3779 fwait();
3780 #else
3781 fxsave(Address(rsp, 0));
3782 #endif // LP64
3783 }
3785 void MacroAssembler::push_IU_state() {
3786 // Push flags first because pusha kills them
3787 pushf();
3788 // Make sure rsp stays 16-byte aligned
3789 LP64_ONLY(subq(rsp, 8));
3790 pusha();
3791 }
3793 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
3794 // determine java_thread register
3795 if (!java_thread->is_valid()) {
3796 java_thread = rdi;
3797 get_thread(java_thread);
3798 }
3799 // we must set sp to zero to clear frame
3800 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
3801 if (clear_fp) {
3802 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
3803 }
3805 if (clear_pc)
3806 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
3808 }
3810 void MacroAssembler::restore_rax(Register tmp) {
3811 if (tmp == noreg) pop(rax);
3812 else if (tmp != rax) mov(rax, tmp);
3813 }
3815 void MacroAssembler::round_to(Register reg, int modulus) {
3816 addptr(reg, modulus - 1);
3817 andptr(reg, -modulus);
3818 }
3820 void MacroAssembler::save_rax(Register tmp) {
3821 if (tmp == noreg) push(rax);
3822 else if (tmp != rax) mov(tmp, rax);
3823 }
3825 // Write serialization page so VM thread can do a pseudo remote membar.
3826 // We use the current thread pointer to calculate a thread specific
3827 // offset to write to within the page. This minimizes bus traffic
3828 // due to cache line collision.
3829 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
3830 movl(tmp, thread);
3831 shrl(tmp, os::get_serialize_page_shift_count());
3832 andl(tmp, (os::vm_page_size() - sizeof(int)));
3834 Address index(noreg, tmp, Address::times_1);
3835 ExternalAddress page(os::get_memory_serialize_page());
3837 // Size of store must match masking code above
3838 movl(as_Address(ArrayAddress(page, index)), tmp);
3839 }
3841 // Calls to C land
3842 //
3843 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
3844 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
3845 // has to be reset to 0. This is required to allow proper stack traversal.
3846 void MacroAssembler::set_last_Java_frame(Register java_thread,
3847 Register last_java_sp,
3848 Register last_java_fp,
3849 address last_java_pc) {
3850 // determine java_thread register
3851 if (!java_thread->is_valid()) {
3852 java_thread = rdi;
3853 get_thread(java_thread);
3854 }
3855 // determine last_java_sp register
3856 if (!last_java_sp->is_valid()) {
3857 last_java_sp = rsp;
3858 }
3860 // last_java_fp is optional
3862 if (last_java_fp->is_valid()) {
3863 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
3864 }
3866 // last_java_pc is optional
3868 if (last_java_pc != NULL) {
3869 lea(Address(java_thread,
3870 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
3871 InternalAddress(last_java_pc));
3873 }
3874 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
3875 }
3877 void MacroAssembler::shlptr(Register dst, int imm8) {
3878 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
3879 }
3881 void MacroAssembler::shrptr(Register dst, int imm8) {
3882 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
3883 }
3885 void MacroAssembler::sign_extend_byte(Register reg) {
3886 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
3887 movsbl(reg, reg); // movsxb
3888 } else {
3889 shll(reg, 24);
3890 sarl(reg, 24);
3891 }
3892 }
3894 void MacroAssembler::sign_extend_short(Register reg) {
3895 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
3896 movswl(reg, reg); // movsxw
3897 } else {
3898 shll(reg, 16);
3899 sarl(reg, 16);
3900 }
3901 }
3903 void MacroAssembler::testl(Register dst, AddressLiteral src) {
3904 assert(reachable(src), "Address should be reachable");
3905 testl(dst, as_Address(src));
3906 }
3908 void MacroAssembler::sqrtsd(XMMRegister dst, AddressLiteral src) {
3909 if (reachable(src)) {
3910 Assembler::sqrtsd(dst, as_Address(src));
3911 } else {
3912 lea(rscratch1, src);
3913 Assembler::sqrtsd(dst, Address(rscratch1, 0));
3914 }
3915 }
3917 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src) {
3918 if (reachable(src)) {
3919 Assembler::sqrtss(dst, as_Address(src));
3920 } else {
3921 lea(rscratch1, src);
3922 Assembler::sqrtss(dst, Address(rscratch1, 0));
3923 }
3924 }
3926 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) {
3927 if (reachable(src)) {
3928 Assembler::subsd(dst, as_Address(src));
3929 } else {
3930 lea(rscratch1, src);
3931 Assembler::subsd(dst, Address(rscratch1, 0));
3932 }
3933 }
3935 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) {
3936 if (reachable(src)) {
3937 Assembler::subss(dst, as_Address(src));
3938 } else {
3939 lea(rscratch1, src);
3940 Assembler::subss(dst, Address(rscratch1, 0));
3941 }
3942 }
3944 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
3945 if (reachable(src)) {
3946 Assembler::ucomisd(dst, as_Address(src));
3947 } else {
3948 lea(rscratch1, src);
3949 Assembler::ucomisd(dst, Address(rscratch1, 0));
3950 }
3951 }
3953 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
3954 if (reachable(src)) {
3955 Assembler::ucomiss(dst, as_Address(src));
3956 } else {
3957 lea(rscratch1, src);
3958 Assembler::ucomiss(dst, Address(rscratch1, 0));
3959 }
3960 }
3962 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
3963 // Used in sign-bit flipping with aligned address.
3964 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
3965 if (reachable(src)) {
3966 Assembler::xorpd(dst, as_Address(src));
3967 } else {
3968 lea(rscratch1, src);
3969 Assembler::xorpd(dst, Address(rscratch1, 0));
3970 }
3971 }
3973 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
3974 // Used in sign-bit flipping with aligned address.
3975 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
3976 if (reachable(src)) {
3977 Assembler::xorps(dst, as_Address(src));
3978 } else {
3979 lea(rscratch1, src);
3980 Assembler::xorps(dst, Address(rscratch1, 0));
3981 }
3982 }
3984 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
3985 // Used in sign-bit flipping with aligned address.
3986 bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
3987 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
3988 if (reachable(src)) {
3989 Assembler::pshufb(dst, as_Address(src));
3990 } else {
3991 lea(rscratch1, src);
3992 Assembler::pshufb(dst, Address(rscratch1, 0));
3993 }
3994 }
3996 // AVX 3-operands instructions
3998 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
3999 if (reachable(src)) {
4000 vaddsd(dst, nds, as_Address(src));
4001 } else {
4002 lea(rscratch1, src);
4003 vaddsd(dst, nds, Address(rscratch1, 0));
4004 }
4005 }
4007 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4008 if (reachable(src)) {
4009 vaddss(dst, nds, as_Address(src));
4010 } else {
4011 lea(rscratch1, src);
4012 vaddss(dst, nds, Address(rscratch1, 0));
4013 }
4014 }
4016 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
4017 if (reachable(src)) {
4018 vandpd(dst, nds, as_Address(src), vector256);
4019 } else {
4020 lea(rscratch1, src);
4021 vandpd(dst, nds, Address(rscratch1, 0), vector256);
4022 }
4023 }
4025 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
4026 if (reachable(src)) {
4027 vandps(dst, nds, as_Address(src), vector256);
4028 } else {
4029 lea(rscratch1, src);
4030 vandps(dst, nds, Address(rscratch1, 0), vector256);
4031 }
4032 }
4034 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4035 if (reachable(src)) {
4036 vdivsd(dst, nds, as_Address(src));
4037 } else {
4038 lea(rscratch1, src);
4039 vdivsd(dst, nds, Address(rscratch1, 0));
4040 }
4041 }
4043 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4044 if (reachable(src)) {
4045 vdivss(dst, nds, as_Address(src));
4046 } else {
4047 lea(rscratch1, src);
4048 vdivss(dst, nds, Address(rscratch1, 0));
4049 }
4050 }
4052 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4053 if (reachable(src)) {
4054 vmulsd(dst, nds, as_Address(src));
4055 } else {
4056 lea(rscratch1, src);
4057 vmulsd(dst, nds, Address(rscratch1, 0));
4058 }
4059 }
4061 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4062 if (reachable(src)) {
4063 vmulss(dst, nds, as_Address(src));
4064 } else {
4065 lea(rscratch1, src);
4066 vmulss(dst, nds, Address(rscratch1, 0));
4067 }
4068 }
4070 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4071 if (reachable(src)) {
4072 vsubsd(dst, nds, as_Address(src));
4073 } else {
4074 lea(rscratch1, src);
4075 vsubsd(dst, nds, Address(rscratch1, 0));
4076 }
4077 }
4079 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4080 if (reachable(src)) {
4081 vsubss(dst, nds, as_Address(src));
4082 } else {
4083 lea(rscratch1, src);
4084 vsubss(dst, nds, Address(rscratch1, 0));
4085 }
4086 }
4088 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
4089 if (reachable(src)) {
4090 vxorpd(dst, nds, as_Address(src), vector256);
4091 } else {
4092 lea(rscratch1, src);
4093 vxorpd(dst, nds, Address(rscratch1, 0), vector256);
4094 }
4095 }
4097 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
4098 if (reachable(src)) {
4099 vxorps(dst, nds, as_Address(src), vector256);
4100 } else {
4101 lea(rscratch1, src);
4102 vxorps(dst, nds, Address(rscratch1, 0), vector256);
4103 }
4104 }
4107 //////////////////////////////////////////////////////////////////////////////////
4108 #if INCLUDE_ALL_GCS
4110 void MacroAssembler::g1_write_barrier_pre(Register obj,
4111 Register pre_val,
4112 Register thread,
4113 Register tmp,
4114 bool tosca_live,
4115 bool expand_call) {
4117 // If expand_call is true then we expand the call_VM_leaf macro
4118 // directly to skip generating the check by
4119 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
4121 #ifdef _LP64
4122 assert(thread == r15_thread, "must be");
4123 #endif // _LP64
4125 Label done;
4126 Label runtime;
4128 assert(pre_val != noreg, "check this code");
4130 if (obj != noreg) {
4131 assert_different_registers(obj, pre_val, tmp);
4132 assert(pre_val != rax, "check this code");
4133 }
4135 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
4136 PtrQueue::byte_offset_of_active()));
4137 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
4138 PtrQueue::byte_offset_of_index()));
4139 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
4140 PtrQueue::byte_offset_of_buf()));
4143 // Is marking active?
4144 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
4145 cmpl(in_progress, 0);
4146 } else {
4147 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
4148 cmpb(in_progress, 0);
4149 }
4150 jcc(Assembler::equal, done);
4152 // Do we need to load the previous value?
4153 if (obj != noreg) {
4154 load_heap_oop(pre_val, Address(obj, 0));
4155 }
4157 // Is the previous value null?
4158 cmpptr(pre_val, (int32_t) NULL_WORD);
4159 jcc(Assembler::equal, done);
4161 // Can we store original value in the thread's buffer?
4162 // Is index == 0?
4163 // (The index field is typed as size_t.)
4165 movptr(tmp, index); // tmp := *index_adr
4166 cmpptr(tmp, 0); // tmp == 0?
4167 jcc(Assembler::equal, runtime); // If yes, goto runtime
4169 subptr(tmp, wordSize); // tmp := tmp - wordSize
4170 movptr(index, tmp); // *index_adr := tmp
4171 addptr(tmp, buffer); // tmp := tmp + *buffer_adr
4173 // Record the previous value
4174 movptr(Address(tmp, 0), pre_val);
4175 jmp(done);
4177 bind(runtime);
4178 // save the live input values
4179 if(tosca_live) push(rax);
4181 if (obj != noreg && obj != rax)
4182 push(obj);
4184 if (pre_val != rax)
4185 push(pre_val);
4187 // Calling the runtime using the regular call_VM_leaf mechanism generates
4188 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
4189 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
4190 //
4191 // If we care generating the pre-barrier without a frame (e.g. in the
4192 // intrinsified Reference.get() routine) then ebp might be pointing to
4193 // the caller frame and so this check will most likely fail at runtime.
4194 //
4195 // Expanding the call directly bypasses the generation of the check.
4196 // So when we do not have have a full interpreter frame on the stack
4197 // expand_call should be passed true.
4199 NOT_LP64( push(thread); )
4201 if (expand_call) {
4202 LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
4203 pass_arg1(this, thread);
4204 pass_arg0(this, pre_val);
4205 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
4206 } else {
4207 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
4208 }
4210 NOT_LP64( pop(thread); )
4212 // save the live input values
4213 if (pre_val != rax)
4214 pop(pre_val);
4216 if (obj != noreg && obj != rax)
4217 pop(obj);
4219 if(tosca_live) pop(rax);
4221 bind(done);
4222 }
4224 void MacroAssembler::g1_write_barrier_post(Register store_addr,
4225 Register new_val,
4226 Register thread,
4227 Register tmp,
4228 Register tmp2) {
4229 #ifdef _LP64
4230 assert(thread == r15_thread, "must be");
4231 #endif // _LP64
4233 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
4234 PtrQueue::byte_offset_of_index()));
4235 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
4236 PtrQueue::byte_offset_of_buf()));
4238 BarrierSet* bs = Universe::heap()->barrier_set();
4239 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
4240 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
4242 Label done;
4243 Label runtime;
4245 // Does store cross heap regions?
4247 movptr(tmp, store_addr);
4248 xorptr(tmp, new_val);
4249 shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
4250 jcc(Assembler::equal, done);
4252 // crosses regions, storing NULL?
4254 cmpptr(new_val, (int32_t) NULL_WORD);
4255 jcc(Assembler::equal, done);
4257 // storing region crossing non-NULL, is card already dirty?
4259 const Register card_addr = tmp;
4260 const Register cardtable = tmp2;
4262 movptr(card_addr, store_addr);
4263 shrptr(card_addr, CardTableModRefBS::card_shift);
4264 // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
4265 // a valid address and therefore is not properly handled by the relocation code.
4266 movptr(cardtable, (intptr_t)ct->byte_map_base);
4267 addptr(card_addr, cardtable);
4269 cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
4270 jcc(Assembler::equal, done);
4272 membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4273 cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
4274 jcc(Assembler::equal, done);
4277 // storing a region crossing, non-NULL oop, card is clean.
4278 // dirty card and log.
4280 movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
4282 cmpl(queue_index, 0);
4283 jcc(Assembler::equal, runtime);
4284 subl(queue_index, wordSize);
4285 movptr(tmp2, buffer);
4286 #ifdef _LP64
4287 movslq(rscratch1, queue_index);
4288 addq(tmp2, rscratch1);
4289 movq(Address(tmp2, 0), card_addr);
4290 #else
4291 addl(tmp2, queue_index);
4292 movl(Address(tmp2, 0), card_addr);
4293 #endif
4294 jmp(done);
4296 bind(runtime);
4297 // save the live input values
4298 push(store_addr);
4299 push(new_val);
4300 #ifdef _LP64
4301 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
4302 #else
4303 push(thread);
4304 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
4305 pop(thread);
4306 #endif
4307 pop(new_val);
4308 pop(store_addr);
4310 bind(done);
4311 }
4313 #endif // INCLUDE_ALL_GCS
4314 //////////////////////////////////////////////////////////////////////////////////
4317 void MacroAssembler::store_check(Register obj) {
4318 // Does a store check for the oop in register obj. The content of
4319 // register obj is destroyed afterwards.
4320 store_check_part_1(obj);
4321 store_check_part_2(obj);
4322 }
4324 void MacroAssembler::store_check(Register obj, Address dst) {
4325 store_check(obj);
4326 }
4329 // split the store check operation so that other instructions can be scheduled inbetween
4330 void MacroAssembler::store_check_part_1(Register obj) {
4331 BarrierSet* bs = Universe::heap()->barrier_set();
4332 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
4333 shrptr(obj, CardTableModRefBS::card_shift);
4334 }
4336 void MacroAssembler::store_check_part_2(Register obj) {
4337 BarrierSet* bs = Universe::heap()->barrier_set();
4338 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
4339 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
4340 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
4342 // The calculation for byte_map_base is as follows:
4343 // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
4344 // So this essentially converts an address to a displacement and it will
4345 // never need to be relocated. On 64bit however the value may be too
4346 // large for a 32bit displacement.
4347 intptr_t disp = (intptr_t) ct->byte_map_base;
4348 if (is_simm32(disp)) {
4349 Address cardtable(noreg, obj, Address::times_1, disp);
4350 movb(cardtable, 0);
4351 } else {
4352 // By doing it as an ExternalAddress 'disp' could be converted to a rip-relative
4353 // displacement and done in a single instruction given favorable mapping and a
4354 // smarter version of as_Address. However, 'ExternalAddress' generates a relocation
4355 // entry and that entry is not properly handled by the relocation code.
4356 AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
4357 Address index(noreg, obj, Address::times_1);
4358 movb(as_Address(ArrayAddress(cardtable, index)), 0);
4359 }
4360 }
4362 void MacroAssembler::subptr(Register dst, int32_t imm32) {
4363 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
4364 }
4366 // Force generation of a 4 byte immediate value even if it fits into 8bit
4367 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
4368 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32));
4369 }
4371 void MacroAssembler::subptr(Register dst, Register src) {
4372 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
4373 }
4375 // C++ bool manipulation
4376 void MacroAssembler::testbool(Register dst) {
4377 if(sizeof(bool) == 1)
4378 testb(dst, 0xff);
4379 else if(sizeof(bool) == 2) {
4380 // testw implementation needed for two byte bools
4381 ShouldNotReachHere();
4382 } else if(sizeof(bool) == 4)
4383 testl(dst, dst);
4384 else
4385 // unsupported
4386 ShouldNotReachHere();
4387 }
4389 void MacroAssembler::testptr(Register dst, Register src) {
4390 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
4391 }
4393 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
4394 void MacroAssembler::tlab_allocate(Register obj,
4395 Register var_size_in_bytes,
4396 int con_size_in_bytes,
4397 Register t1,
4398 Register t2,
4399 Label& slow_case) {
4400 assert_different_registers(obj, t1, t2);
4401 assert_different_registers(obj, var_size_in_bytes, t1);
4402 Register end = t2;
4403 Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
4405 verify_tlab();
4407 NOT_LP64(get_thread(thread));
4409 movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
4410 if (var_size_in_bytes == noreg) {
4411 lea(end, Address(obj, con_size_in_bytes));
4412 } else {
4413 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
4414 }
4415 cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
4416 jcc(Assembler::above, slow_case);
4418 // update the tlab top pointer
4419 movptr(Address(thread, JavaThread::tlab_top_offset()), end);
4421 // recover var_size_in_bytes if necessary
4422 if (var_size_in_bytes == end) {
4423 subptr(var_size_in_bytes, obj);
4424 }
4425 verify_tlab();
4426 }
4428 // Preserves rbx, and rdx.
4429 Register MacroAssembler::tlab_refill(Label& retry,
4430 Label& try_eden,
4431 Label& slow_case) {
4432 Register top = rax;
4433 Register t1 = rcx;
4434 Register t2 = rsi;
4435 Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
4436 assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
4437 Label do_refill, discard_tlab;
4439 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
4440 // No allocation in the shared eden.
4441 jmp(slow_case);
4442 }
4444 NOT_LP64(get_thread(thread_reg));
4446 movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
4447 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
4449 // calculate amount of free space
4450 subptr(t1, top);
4451 shrptr(t1, LogHeapWordSize);
4453 // Retain tlab and allocate object in shared space if
4454 // the amount free in the tlab is too large to discard.
4455 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
4456 jcc(Assembler::lessEqual, discard_tlab);
4458 // Retain
4459 // %%% yuck as movptr...
4460 movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
4461 addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
4462 if (TLABStats) {
4463 // increment number of slow_allocations
4464 addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
4465 }
4466 jmp(try_eden);
4468 bind(discard_tlab);
4469 if (TLABStats) {
4470 // increment number of refills
4471 addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
4472 // accumulate wastage -- t1 is amount free in tlab
4473 addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
4474 }
4476 // if tlab is currently allocated (top or end != null) then
4477 // fill [top, end + alignment_reserve) with array object
4478 testptr(top, top);
4479 jcc(Assembler::zero, do_refill);
4481 // set up the mark word
4482 movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
4483 // set the length to the remaining space
4484 subptr(t1, typeArrayOopDesc::header_size(T_INT));
4485 addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
4486 shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
4487 movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
4488 // set klass to intArrayKlass
4489 // dubious reloc why not an oop reloc?
4490 movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr()));
4491 // store klass last. concurrent gcs assumes klass length is valid if
4492 // klass field is not null.
4493 store_klass(top, t1);
4495 movptr(t1, top);
4496 subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
4497 incr_allocated_bytes(thread_reg, t1, 0);
4499 // refill the tlab with an eden allocation
4500 bind(do_refill);
4501 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
4502 shlptr(t1, LogHeapWordSize);
4503 // allocate new tlab, address returned in top
4504 eden_allocate(top, t1, 0, t2, slow_case);
4506 // Check that t1 was preserved in eden_allocate.
4507 #ifdef ASSERT
4508 if (UseTLAB) {
4509 Label ok;
4510 Register tsize = rsi;
4511 assert_different_registers(tsize, thread_reg, t1);
4512 push(tsize);
4513 movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
4514 shlptr(tsize, LogHeapWordSize);
4515 cmpptr(t1, tsize);
4516 jcc(Assembler::equal, ok);
4517 STOP("assert(t1 != tlab size)");
4518 should_not_reach_here();
4520 bind(ok);
4521 pop(tsize);
4522 }
4523 #endif
4524 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
4525 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
4526 addptr(top, t1);
4527 subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
4528 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
4529 verify_tlab();
4530 jmp(retry);
4532 return thread_reg; // for use by caller
4533 }
4535 void MacroAssembler::incr_allocated_bytes(Register thread,
4536 Register var_size_in_bytes,
4537 int con_size_in_bytes,
4538 Register t1) {
4539 if (!thread->is_valid()) {
4540 #ifdef _LP64
4541 thread = r15_thread;
4542 #else
4543 assert(t1->is_valid(), "need temp reg");
4544 thread = t1;
4545 get_thread(thread);
4546 #endif
4547 }
4549 #ifdef _LP64
4550 if (var_size_in_bytes->is_valid()) {
4551 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
4552 } else {
4553 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
4554 }
4555 #else
4556 if (var_size_in_bytes->is_valid()) {
4557 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
4558 } else {
4559 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
4560 }
4561 adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
4562 #endif
4563 }
4565 void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use) {
4566 pusha();
4568 // if we are coming from c1, xmm registers may be live
4569 int off = 0;
4570 if (UseSSE == 1) {
4571 subptr(rsp, sizeof(jdouble)*8);
4572 movflt(Address(rsp,off++*sizeof(jdouble)),xmm0);
4573 movflt(Address(rsp,off++*sizeof(jdouble)),xmm1);
4574 movflt(Address(rsp,off++*sizeof(jdouble)),xmm2);
4575 movflt(Address(rsp,off++*sizeof(jdouble)),xmm3);
4576 movflt(Address(rsp,off++*sizeof(jdouble)),xmm4);
4577 movflt(Address(rsp,off++*sizeof(jdouble)),xmm5);
4578 movflt(Address(rsp,off++*sizeof(jdouble)),xmm6);
4579 movflt(Address(rsp,off++*sizeof(jdouble)),xmm7);
4580 } else if (UseSSE >= 2) {
4581 #ifdef COMPILER2
4582 if (MaxVectorSize > 16) {
4583 assert(UseAVX > 0, "256bit vectors are supported only with AVX");
4584 // Save upper half of YMM registes
4585 subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
4586 vextractf128h(Address(rsp, 0),xmm0);
4587 vextractf128h(Address(rsp, 16),xmm1);
4588 vextractf128h(Address(rsp, 32),xmm2);
4589 vextractf128h(Address(rsp, 48),xmm3);
4590 vextractf128h(Address(rsp, 64),xmm4);
4591 vextractf128h(Address(rsp, 80),xmm5);
4592 vextractf128h(Address(rsp, 96),xmm6);
4593 vextractf128h(Address(rsp,112),xmm7);
4594 #ifdef _LP64
4595 vextractf128h(Address(rsp,128),xmm8);
4596 vextractf128h(Address(rsp,144),xmm9);
4597 vextractf128h(Address(rsp,160),xmm10);
4598 vextractf128h(Address(rsp,176),xmm11);
4599 vextractf128h(Address(rsp,192),xmm12);
4600 vextractf128h(Address(rsp,208),xmm13);
4601 vextractf128h(Address(rsp,224),xmm14);
4602 vextractf128h(Address(rsp,240),xmm15);
4603 #endif
4604 }
4605 #endif
4606 // Save whole 128bit (16 bytes) XMM regiters
4607 subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
4608 movdqu(Address(rsp,off++*16),xmm0);
4609 movdqu(Address(rsp,off++*16),xmm1);
4610 movdqu(Address(rsp,off++*16),xmm2);
4611 movdqu(Address(rsp,off++*16),xmm3);
4612 movdqu(Address(rsp,off++*16),xmm4);
4613 movdqu(Address(rsp,off++*16),xmm5);
4614 movdqu(Address(rsp,off++*16),xmm6);
4615 movdqu(Address(rsp,off++*16),xmm7);
4616 #ifdef _LP64
4617 movdqu(Address(rsp,off++*16),xmm8);
4618 movdqu(Address(rsp,off++*16),xmm9);
4619 movdqu(Address(rsp,off++*16),xmm10);
4620 movdqu(Address(rsp,off++*16),xmm11);
4621 movdqu(Address(rsp,off++*16),xmm12);
4622 movdqu(Address(rsp,off++*16),xmm13);
4623 movdqu(Address(rsp,off++*16),xmm14);
4624 movdqu(Address(rsp,off++*16),xmm15);
4625 #endif
4626 }
4628 // Preserve registers across runtime call
4629 int incoming_argument_and_return_value_offset = -1;
4630 if (num_fpu_regs_in_use > 1) {
4631 // Must preserve all other FPU regs (could alternatively convert
4632 // SharedRuntime::dsin, dcos etc. into assembly routines known not to trash
4633 // FPU state, but can not trust C compiler)
4634 NEEDS_CLEANUP;
4635 // NOTE that in this case we also push the incoming argument(s) to
4636 // the stack and restore it later; we also use this stack slot to
4637 // hold the return value from dsin, dcos etc.
4638 for (int i = 0; i < num_fpu_regs_in_use; i++) {
4639 subptr(rsp, sizeof(jdouble));
4640 fstp_d(Address(rsp, 0));
4641 }
4642 incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1);
4643 for (int i = nb_args-1; i >= 0; i--) {
4644 fld_d(Address(rsp, incoming_argument_and_return_value_offset-i*sizeof(jdouble)));
4645 }
4646 }
4648 subptr(rsp, nb_args*sizeof(jdouble));
4649 for (int i = 0; i < nb_args; i++) {
4650 fstp_d(Address(rsp, i*sizeof(jdouble)));
4651 }
4653 #ifdef _LP64
4654 if (nb_args > 0) {
4655 movdbl(xmm0, Address(rsp, 0));
4656 }
4657 if (nb_args > 1) {
4658 movdbl(xmm1, Address(rsp, sizeof(jdouble)));
4659 }
4660 assert(nb_args <= 2, "unsupported number of args");
4661 #endif // _LP64
4663 // NOTE: we must not use call_VM_leaf here because that requires a
4664 // complete interpreter frame in debug mode -- same bug as 4387334
4665 // MacroAssembler::call_VM_leaf_base is perfectly safe and will
4666 // do proper 64bit abi
4668 NEEDS_CLEANUP;
4669 // Need to add stack banging before this runtime call if it needs to
4670 // be taken; however, there is no generic stack banging routine at
4671 // the MacroAssembler level
4673 MacroAssembler::call_VM_leaf_base(runtime_entry, 0);
4675 #ifdef _LP64
4676 movsd(Address(rsp, 0), xmm0);
4677 fld_d(Address(rsp, 0));
4678 #endif // _LP64
4679 addptr(rsp, sizeof(jdouble) * nb_args);
4680 if (num_fpu_regs_in_use > 1) {
4681 // Must save return value to stack and then restore entire FPU
4682 // stack except incoming arguments
4683 fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
4684 for (int i = 0; i < num_fpu_regs_in_use - nb_args; i++) {
4685 fld_d(Address(rsp, 0));
4686 addptr(rsp, sizeof(jdouble));
4687 }
4688 fld_d(Address(rsp, (nb_args-1)*sizeof(jdouble)));
4689 addptr(rsp, sizeof(jdouble) * nb_args);
4690 }
4692 off = 0;
4693 if (UseSSE == 1) {
4694 movflt(xmm0, Address(rsp,off++*sizeof(jdouble)));
4695 movflt(xmm1, Address(rsp,off++*sizeof(jdouble)));
4696 movflt(xmm2, Address(rsp,off++*sizeof(jdouble)));
4697 movflt(xmm3, Address(rsp,off++*sizeof(jdouble)));
4698 movflt(xmm4, Address(rsp,off++*sizeof(jdouble)));
4699 movflt(xmm5, Address(rsp,off++*sizeof(jdouble)));
4700 movflt(xmm6, Address(rsp,off++*sizeof(jdouble)));
4701 movflt(xmm7, Address(rsp,off++*sizeof(jdouble)));
4702 addptr(rsp, sizeof(jdouble)*8);
4703 } else if (UseSSE >= 2) {
4704 // Restore whole 128bit (16 bytes) XMM regiters
4705 movdqu(xmm0, Address(rsp,off++*16));
4706 movdqu(xmm1, Address(rsp,off++*16));
4707 movdqu(xmm2, Address(rsp,off++*16));
4708 movdqu(xmm3, Address(rsp,off++*16));
4709 movdqu(xmm4, Address(rsp,off++*16));
4710 movdqu(xmm5, Address(rsp,off++*16));
4711 movdqu(xmm6, Address(rsp,off++*16));
4712 movdqu(xmm7, Address(rsp,off++*16));
4713 #ifdef _LP64
4714 movdqu(xmm8, Address(rsp,off++*16));
4715 movdqu(xmm9, Address(rsp,off++*16));
4716 movdqu(xmm10, Address(rsp,off++*16));
4717 movdqu(xmm11, Address(rsp,off++*16));
4718 movdqu(xmm12, Address(rsp,off++*16));
4719 movdqu(xmm13, Address(rsp,off++*16));
4720 movdqu(xmm14, Address(rsp,off++*16));
4721 movdqu(xmm15, Address(rsp,off++*16));
4722 #endif
4723 addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
4724 #ifdef COMPILER2
4725 if (MaxVectorSize > 16) {
4726 // Restore upper half of YMM registes.
4727 vinsertf128h(xmm0, Address(rsp, 0));
4728 vinsertf128h(xmm1, Address(rsp, 16));
4729 vinsertf128h(xmm2, Address(rsp, 32));
4730 vinsertf128h(xmm3, Address(rsp, 48));
4731 vinsertf128h(xmm4, Address(rsp, 64));
4732 vinsertf128h(xmm5, Address(rsp, 80));
4733 vinsertf128h(xmm6, Address(rsp, 96));
4734 vinsertf128h(xmm7, Address(rsp,112));
4735 #ifdef _LP64
4736 vinsertf128h(xmm8, Address(rsp,128));
4737 vinsertf128h(xmm9, Address(rsp,144));
4738 vinsertf128h(xmm10, Address(rsp,160));
4739 vinsertf128h(xmm11, Address(rsp,176));
4740 vinsertf128h(xmm12, Address(rsp,192));
4741 vinsertf128h(xmm13, Address(rsp,208));
4742 vinsertf128h(xmm14, Address(rsp,224));
4743 vinsertf128h(xmm15, Address(rsp,240));
4744 #endif
4745 addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
4746 }
4747 #endif
4748 }
4749 popa();
4750 }
4752 static const double pi_4 = 0.7853981633974483;
4754 void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
4755 // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
4756 // was attempted in this code; unfortunately it appears that the
4757 // switch to 80-bit precision and back causes this to be
4758 // unprofitable compared with simply performing a runtime call if
4759 // the argument is out of the (-pi/4, pi/4) range.
4761 Register tmp = noreg;
4762 if (!VM_Version::supports_cmov()) {
4763 // fcmp needs a temporary so preserve rbx,
4764 tmp = rbx;
4765 push(tmp);
4766 }
4768 Label slow_case, done;
4770 ExternalAddress pi4_adr = (address)&pi_4;
4771 if (reachable(pi4_adr)) {
4772 // x ?<= pi/4
4773 fld_d(pi4_adr);
4774 fld_s(1); // Stack: X PI/4 X
4775 fabs(); // Stack: |X| PI/4 X
4776 fcmp(tmp);
4777 jcc(Assembler::above, slow_case);
4779 // fastest case: -pi/4 <= x <= pi/4
4780 switch(trig) {
4781 case 's':
4782 fsin();
4783 break;
4784 case 'c':
4785 fcos();
4786 break;
4787 case 't':
4788 ftan();
4789 break;
4790 default:
4791 assert(false, "bad intrinsic");
4792 break;
4793 }
4794 jmp(done);
4795 }
4797 // slow case: runtime call
4798 bind(slow_case);
4800 switch(trig) {
4801 case 's':
4802 {
4803 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 1, num_fpu_regs_in_use);
4804 }
4805 break;
4806 case 'c':
4807 {
4808 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 1, num_fpu_regs_in_use);
4809 }
4810 break;
4811 case 't':
4812 {
4813 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 1, num_fpu_regs_in_use);
4814 }
4815 break;
4816 default:
4817 assert(false, "bad intrinsic");
4818 break;
4819 }
4821 // Come here with result in F-TOS
4822 bind(done);
4824 if (tmp != noreg) {
4825 pop(tmp);
4826 }
4827 }
4830 // Look up the method for a megamorphic invokeinterface call.
4831 // The target method is determined by <intf_klass, itable_index>.
4832 // The receiver klass is in recv_klass.
4833 // On success, the result will be in method_result, and execution falls through.
4834 // On failure, execution transfers to the given label.
4835 void MacroAssembler::lookup_interface_method(Register recv_klass,
4836 Register intf_klass,
4837 RegisterOrConstant itable_index,
4838 Register method_result,
4839 Register scan_temp,
4840 Label& L_no_such_interface) {
4841 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
4842 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
4843 "caller must use same register for non-constant itable index as for method");
4845 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
4846 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
4847 int itentry_off = itableMethodEntry::method_offset_in_bytes();
4848 int scan_step = itableOffsetEntry::size() * wordSize;
4849 int vte_size = vtableEntry::size() * wordSize;
4850 Address::ScaleFactor times_vte_scale = Address::times_ptr;
4851 assert(vte_size == wordSize, "else adjust times_vte_scale");
4853 movl(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
4855 // %%% Could store the aligned, prescaled offset in the klassoop.
4856 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
4857 if (HeapWordsPerLong > 1) {
4858 // Round up to align_object_offset boundary
4859 // see code for InstanceKlass::start_of_itable!
4860 round_to(scan_temp, BytesPerLong);
4861 }
4863 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
4864 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
4865 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
4867 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
4868 // if (scan->interface() == intf) {
4869 // result = (klass + scan->offset() + itable_index);
4870 // }
4871 // }
4872 Label search, found_method;
4874 for (int peel = 1; peel >= 0; peel--) {
4875 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
4876 cmpptr(intf_klass, method_result);
4878 if (peel) {
4879 jccb(Assembler::equal, found_method);
4880 } else {
4881 jccb(Assembler::notEqual, search);
4882 // (invert the test to fall through to found_method...)
4883 }
4885 if (!peel) break;
4887 bind(search);
4889 // Check that the previous entry is non-null. A null entry means that
4890 // the receiver class doesn't implement the interface, and wasn't the
4891 // same as when the caller was compiled.
4892 testptr(method_result, method_result);
4893 jcc(Assembler::zero, L_no_such_interface);
4894 addptr(scan_temp, scan_step);
4895 }
4897 bind(found_method);
4899 // Got a hit.
4900 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
4901 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
4902 }
4905 // virtual method calling
4906 void MacroAssembler::lookup_virtual_method(Register recv_klass,
4907 RegisterOrConstant vtable_index,
4908 Register method_result) {
4909 const int base = InstanceKlass::vtable_start_offset() * wordSize;
4910 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
4911 Address vtable_entry_addr(recv_klass,
4912 vtable_index, Address::times_ptr,
4913 base + vtableEntry::method_offset_in_bytes());
4914 movptr(method_result, vtable_entry_addr);
4915 }
4918 void MacroAssembler::check_klass_subtype(Register sub_klass,
4919 Register super_klass,
4920 Register temp_reg,
4921 Label& L_success) {
4922 Label L_failure;
4923 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL);
4924 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
4925 bind(L_failure);
4926 }
4929 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
4930 Register super_klass,
4931 Register temp_reg,
4932 Label* L_success,
4933 Label* L_failure,
4934 Label* L_slow_path,
4935 RegisterOrConstant super_check_offset) {
4936 assert_different_registers(sub_klass, super_klass, temp_reg);
4937 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
4938 if (super_check_offset.is_register()) {
4939 assert_different_registers(sub_klass, super_klass,
4940 super_check_offset.as_register());
4941 } else if (must_load_sco) {
4942 assert(temp_reg != noreg, "supply either a temp or a register offset");
4943 }
4945 Label L_fallthrough;
4946 int label_nulls = 0;
4947 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
4948 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
4949 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
4950 assert(label_nulls <= 1, "at most one NULL in the batch");
4952 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
4953 int sco_offset = in_bytes(Klass::super_check_offset_offset());
4954 Address super_check_offset_addr(super_klass, sco_offset);
4956 // Hacked jcc, which "knows" that L_fallthrough, at least, is in
4957 // range of a jccb. If this routine grows larger, reconsider at
4958 // least some of these.
4959 #define local_jcc(assembler_cond, label) \
4960 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \
4961 else jcc( assembler_cond, label) /*omit semi*/
4963 // Hacked jmp, which may only be used just before L_fallthrough.
4964 #define final_jmp(label) \
4965 if (&(label) == &L_fallthrough) { /*do nothing*/ } \
4966 else jmp(label) /*omit semi*/
4968 // If the pointers are equal, we are done (e.g., String[] elements).
4969 // This self-check enables sharing of secondary supertype arrays among
4970 // non-primary types such as array-of-interface. Otherwise, each such
4971 // type would need its own customized SSA.
4972 // We move this check to the front of the fast path because many
4973 // type checks are in fact trivially successful in this manner,
4974 // so we get a nicely predicted branch right at the start of the check.
4975 cmpptr(sub_klass, super_klass);
4976 local_jcc(Assembler::equal, *L_success);
4978 // Check the supertype display:
4979 if (must_load_sco) {
4980 // Positive movl does right thing on LP64.
4981 movl(temp_reg, super_check_offset_addr);
4982 super_check_offset = RegisterOrConstant(temp_reg);
4983 }
4984 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
4985 cmpptr(super_klass, super_check_addr); // load displayed supertype
4987 // This check has worked decisively for primary supers.
4988 // Secondary supers are sought in the super_cache ('super_cache_addr').
4989 // (Secondary supers are interfaces and very deeply nested subtypes.)
4990 // This works in the same check above because of a tricky aliasing
4991 // between the super_cache and the primary super display elements.
4992 // (The 'super_check_addr' can address either, as the case requires.)
4993 // Note that the cache is updated below if it does not help us find
4994 // what we need immediately.
4995 // So if it was a primary super, we can just fail immediately.
4996 // Otherwise, it's the slow path for us (no success at this point).
4998 if (super_check_offset.is_register()) {
4999 local_jcc(Assembler::equal, *L_success);
5000 cmpl(super_check_offset.as_register(), sc_offset);
5001 if (L_failure == &L_fallthrough) {
5002 local_jcc(Assembler::equal, *L_slow_path);
5003 } else {
5004 local_jcc(Assembler::notEqual, *L_failure);
5005 final_jmp(*L_slow_path);
5006 }
5007 } else if (super_check_offset.as_constant() == sc_offset) {
5008 // Need a slow path; fast failure is impossible.
5009 if (L_slow_path == &L_fallthrough) {
5010 local_jcc(Assembler::equal, *L_success);
5011 } else {
5012 local_jcc(Assembler::notEqual, *L_slow_path);
5013 final_jmp(*L_success);
5014 }
5015 } else {
5016 // No slow path; it's a fast decision.
5017 if (L_failure == &L_fallthrough) {
5018 local_jcc(Assembler::equal, *L_success);
5019 } else {
5020 local_jcc(Assembler::notEqual, *L_failure);
5021 final_jmp(*L_success);
5022 }
5023 }
5025 bind(L_fallthrough);
5027 #undef local_jcc
5028 #undef final_jmp
5029 }
5032 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
5033 Register super_klass,
5034 Register temp_reg,
5035 Register temp2_reg,
5036 Label* L_success,
5037 Label* L_failure,
5038 bool set_cond_codes) {
5039 assert_different_registers(sub_klass, super_klass, temp_reg);
5040 if (temp2_reg != noreg)
5041 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
5042 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
5044 Label L_fallthrough;
5045 int label_nulls = 0;
5046 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
5047 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
5048 assert(label_nulls <= 1, "at most one NULL in the batch");
5050 // a couple of useful fields in sub_klass:
5051 int ss_offset = in_bytes(Klass::secondary_supers_offset());
5052 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
5053 Address secondary_supers_addr(sub_klass, ss_offset);
5054 Address super_cache_addr( sub_klass, sc_offset);
5056 // Do a linear scan of the secondary super-klass chain.
5057 // This code is rarely used, so simplicity is a virtue here.
5058 // The repne_scan instruction uses fixed registers, which we must spill.
5059 // Don't worry too much about pre-existing connections with the input regs.
5061 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
5062 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
5064 // Get super_klass value into rax (even if it was in rdi or rcx).
5065 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
5066 if (super_klass != rax || UseCompressedOops) {
5067 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
5068 mov(rax, super_klass);
5069 }
5070 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
5071 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
5073 #ifndef PRODUCT
5074 int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
5075 ExternalAddress pst_counter_addr((address) pst_counter);
5076 NOT_LP64( incrementl(pst_counter_addr) );
5077 LP64_ONLY( lea(rcx, pst_counter_addr) );
5078 LP64_ONLY( incrementl(Address(rcx, 0)) );
5079 #endif //PRODUCT
5081 // We will consult the secondary-super array.
5082 movptr(rdi, secondary_supers_addr);
5083 // Load the array length. (Positive movl does right thing on LP64.)
5084 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
5085 // Skip to start of data.
5086 addptr(rdi, Array<Klass*>::base_offset_in_bytes());
5088 // Scan RCX words at [RDI] for an occurrence of RAX.
5089 // Set NZ/Z based on last compare.
5090 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
5091 // not change flags (only scas instruction which is repeated sets flags).
5092 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
5094 testptr(rax,rax); // Set Z = 0
5095 repne_scan();
5097 // Unspill the temp. registers:
5098 if (pushed_rdi) pop(rdi);
5099 if (pushed_rcx) pop(rcx);
5100 if (pushed_rax) pop(rax);
5102 if (set_cond_codes) {
5103 // Special hack for the AD files: rdi is guaranteed non-zero.
5104 assert(!pushed_rdi, "rdi must be left non-NULL");
5105 // Also, the condition codes are properly set Z/NZ on succeed/failure.
5106 }
5108 if (L_failure == &L_fallthrough)
5109 jccb(Assembler::notEqual, *L_failure);
5110 else jcc(Assembler::notEqual, *L_failure);
5112 // Success. Cache the super we found and proceed in triumph.
5113 movptr(super_cache_addr, super_klass);
5115 if (L_success != &L_fallthrough) {
5116 jmp(*L_success);
5117 }
5119 #undef IS_A_TEMP
5121 bind(L_fallthrough);
5122 }
5125 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
5126 if (VM_Version::supports_cmov()) {
5127 cmovl(cc, dst, src);
5128 } else {
5129 Label L;
5130 jccb(negate_condition(cc), L);
5131 movl(dst, src);
5132 bind(L);
5133 }
5134 }
5136 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
5137 if (VM_Version::supports_cmov()) {
5138 cmovl(cc, dst, src);
5139 } else {
5140 Label L;
5141 jccb(negate_condition(cc), L);
5142 movl(dst, src);
5143 bind(L);
5144 }
5145 }
5147 void MacroAssembler::verify_oop(Register reg, const char* s) {
5148 if (!VerifyOops) return;
5150 // Pass register number to verify_oop_subroutine
5151 const char* b = NULL;
5152 {
5153 ResourceMark rm;
5154 stringStream ss;
5155 ss.print("verify_oop: %s: %s", reg->name(), s);
5156 b = code_string(ss.as_string());
5157 }
5158 BLOCK_COMMENT("verify_oop {");
5159 #ifdef _LP64
5160 push(rscratch1); // save r10, trashed by movptr()
5161 #endif
5162 push(rax); // save rax,
5163 push(reg); // pass register argument
5164 ExternalAddress buffer((address) b);
5165 // avoid using pushptr, as it modifies scratch registers
5166 // and our contract is not to modify anything
5167 movptr(rax, buffer.addr());
5168 push(rax);
5169 // call indirectly to solve generation ordering problem
5170 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
5171 call(rax);
5172 // Caller pops the arguments (oop, message) and restores rax, r10
5173 BLOCK_COMMENT("} verify_oop");
5174 }
5177 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
5178 Register tmp,
5179 int offset) {
5180 intptr_t value = *delayed_value_addr;
5181 if (value != 0)
5182 return RegisterOrConstant(value + offset);
5184 // load indirectly to solve generation ordering problem
5185 movptr(tmp, ExternalAddress((address) delayed_value_addr));
5187 #ifdef ASSERT
5188 { Label L;
5189 testptr(tmp, tmp);
5190 if (WizardMode) {
5191 const char* buf = NULL;
5192 {
5193 ResourceMark rm;
5194 stringStream ss;
5195 ss.print("DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]);
5196 buf = code_string(ss.as_string());
5197 }
5198 jcc(Assembler::notZero, L);
5199 STOP(buf);
5200 } else {
5201 jccb(Assembler::notZero, L);
5202 hlt();
5203 }
5204 bind(L);
5205 }
5206 #endif
5208 if (offset != 0)
5209 addptr(tmp, offset);
5211 return RegisterOrConstant(tmp);
5212 }
5215 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
5216 int extra_slot_offset) {
5217 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
5218 int stackElementSize = Interpreter::stackElementSize;
5219 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
5220 #ifdef ASSERT
5221 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
5222 assert(offset1 - offset == stackElementSize, "correct arithmetic");
5223 #endif
5224 Register scale_reg = noreg;
5225 Address::ScaleFactor scale_factor = Address::no_scale;
5226 if (arg_slot.is_constant()) {
5227 offset += arg_slot.as_constant() * stackElementSize;
5228 } else {
5229 scale_reg = arg_slot.as_register();
5230 scale_factor = Address::times(stackElementSize);
5231 }
5232 offset += wordSize; // return PC is on stack
5233 return Address(rsp, scale_reg, scale_factor, offset);
5234 }
5237 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
5238 if (!VerifyOops) return;
5240 // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
5241 // Pass register number to verify_oop_subroutine
5242 const char* b = NULL;
5243 {
5244 ResourceMark rm;
5245 stringStream ss;
5246 ss.print("verify_oop_addr: %s", s);
5247 b = code_string(ss.as_string());
5248 }
5249 #ifdef _LP64
5250 push(rscratch1); // save r10, trashed by movptr()
5251 #endif
5252 push(rax); // save rax,
5253 // addr may contain rsp so we will have to adjust it based on the push
5254 // we just did (and on 64 bit we do two pushes)
5255 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
5256 // stores rax into addr which is backwards of what was intended.
5257 if (addr.uses(rsp)) {
5258 lea(rax, addr);
5259 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
5260 } else {
5261 pushptr(addr);
5262 }
5264 ExternalAddress buffer((address) b);
5265 // pass msg argument
5266 // avoid using pushptr, as it modifies scratch registers
5267 // and our contract is not to modify anything
5268 movptr(rax, buffer.addr());
5269 push(rax);
5271 // call indirectly to solve generation ordering problem
5272 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
5273 call(rax);
5274 // Caller pops the arguments (addr, message) and restores rax, r10.
5275 }
5277 void MacroAssembler::verify_tlab() {
5278 #ifdef ASSERT
5279 if (UseTLAB && VerifyOops) {
5280 Label next, ok;
5281 Register t1 = rsi;
5282 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
5284 push(t1);
5285 NOT_LP64(push(thread_reg));
5286 NOT_LP64(get_thread(thread_reg));
5288 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
5289 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
5290 jcc(Assembler::aboveEqual, next);
5291 STOP("assert(top >= start)");
5292 should_not_reach_here();
5294 bind(next);
5295 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
5296 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
5297 jcc(Assembler::aboveEqual, ok);
5298 STOP("assert(top <= end)");
5299 should_not_reach_here();
5301 bind(ok);
5302 NOT_LP64(pop(thread_reg));
5303 pop(t1);
5304 }
5305 #endif
5306 }
5308 class ControlWord {
5309 public:
5310 int32_t _value;
5312 int rounding_control() const { return (_value >> 10) & 3 ; }
5313 int precision_control() const { return (_value >> 8) & 3 ; }
5314 bool precision() const { return ((_value >> 5) & 1) != 0; }
5315 bool underflow() const { return ((_value >> 4) & 1) != 0; }
5316 bool overflow() const { return ((_value >> 3) & 1) != 0; }
5317 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
5318 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
5319 bool invalid() const { return ((_value >> 0) & 1) != 0; }
5321 void print() const {
5322 // rounding control
5323 const char* rc;
5324 switch (rounding_control()) {
5325 case 0: rc = "round near"; break;
5326 case 1: rc = "round down"; break;
5327 case 2: rc = "round up "; break;
5328 case 3: rc = "chop "; break;
5329 };
5330 // precision control
5331 const char* pc;
5332 switch (precision_control()) {
5333 case 0: pc = "24 bits "; break;
5334 case 1: pc = "reserved"; break;
5335 case 2: pc = "53 bits "; break;
5336 case 3: pc = "64 bits "; break;
5337 };
5338 // flags
5339 char f[9];
5340 f[0] = ' ';
5341 f[1] = ' ';
5342 f[2] = (precision ()) ? 'P' : 'p';
5343 f[3] = (underflow ()) ? 'U' : 'u';
5344 f[4] = (overflow ()) ? 'O' : 'o';
5345 f[5] = (zero_divide ()) ? 'Z' : 'z';
5346 f[6] = (denormalized()) ? 'D' : 'd';
5347 f[7] = (invalid ()) ? 'I' : 'i';
5348 f[8] = '\x0';
5349 // output
5350 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
5351 }
5353 };
5355 class StatusWord {
5356 public:
5357 int32_t _value;
5359 bool busy() const { return ((_value >> 15) & 1) != 0; }
5360 bool C3() const { return ((_value >> 14) & 1) != 0; }
5361 bool C2() const { return ((_value >> 10) & 1) != 0; }
5362 bool C1() const { return ((_value >> 9) & 1) != 0; }
5363 bool C0() const { return ((_value >> 8) & 1) != 0; }
5364 int top() const { return (_value >> 11) & 7 ; }
5365 bool error_status() const { return ((_value >> 7) & 1) != 0; }
5366 bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
5367 bool precision() const { return ((_value >> 5) & 1) != 0; }
5368 bool underflow() const { return ((_value >> 4) & 1) != 0; }
5369 bool overflow() const { return ((_value >> 3) & 1) != 0; }
5370 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
5371 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
5372 bool invalid() const { return ((_value >> 0) & 1) != 0; }
5374 void print() const {
5375 // condition codes
5376 char c[5];
5377 c[0] = (C3()) ? '3' : '-';
5378 c[1] = (C2()) ? '2' : '-';
5379 c[2] = (C1()) ? '1' : '-';
5380 c[3] = (C0()) ? '0' : '-';
5381 c[4] = '\x0';
5382 // flags
5383 char f[9];
5384 f[0] = (error_status()) ? 'E' : '-';
5385 f[1] = (stack_fault ()) ? 'S' : '-';
5386 f[2] = (precision ()) ? 'P' : '-';
5387 f[3] = (underflow ()) ? 'U' : '-';
5388 f[4] = (overflow ()) ? 'O' : '-';
5389 f[5] = (zero_divide ()) ? 'Z' : '-';
5390 f[6] = (denormalized()) ? 'D' : '-';
5391 f[7] = (invalid ()) ? 'I' : '-';
5392 f[8] = '\x0';
5393 // output
5394 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
5395 }
5397 };
5399 class TagWord {
5400 public:
5401 int32_t _value;
5403 int tag_at(int i) const { return (_value >> (i*2)) & 3; }
5405 void print() const {
5406 printf("%04x", _value & 0xFFFF);
5407 }
5409 };
5411 class FPU_Register {
5412 public:
5413 int32_t _m0;
5414 int32_t _m1;
5415 int16_t _ex;
5417 bool is_indefinite() const {
5418 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
5419 }
5421 void print() const {
5422 char sign = (_ex < 0) ? '-' : '+';
5423 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
5424 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
5425 };
5427 };
5429 class FPU_State {
5430 public:
5431 enum {
5432 register_size = 10,
5433 number_of_registers = 8,
5434 register_mask = 7
5435 };
5437 ControlWord _control_word;
5438 StatusWord _status_word;
5439 TagWord _tag_word;
5440 int32_t _error_offset;
5441 int32_t _error_selector;
5442 int32_t _data_offset;
5443 int32_t _data_selector;
5444 int8_t _register[register_size * number_of_registers];
5446 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
5447 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
5449 const char* tag_as_string(int tag) const {
5450 switch (tag) {
5451 case 0: return "valid";
5452 case 1: return "zero";
5453 case 2: return "special";
5454 case 3: return "empty";
5455 }
5456 ShouldNotReachHere();
5457 return NULL;
5458 }
5460 void print() const {
5461 // print computation registers
5462 { int t = _status_word.top();
5463 for (int i = 0; i < number_of_registers; i++) {
5464 int j = (i - t) & register_mask;
5465 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
5466 st(j)->print();
5467 printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
5468 }
5469 }
5470 printf("\n");
5471 // print control registers
5472 printf("ctrl = "); _control_word.print(); printf("\n");
5473 printf("stat = "); _status_word .print(); printf("\n");
5474 printf("tags = "); _tag_word .print(); printf("\n");
5475 }
5477 };
5479 class Flag_Register {
5480 public:
5481 int32_t _value;
5483 bool overflow() const { return ((_value >> 11) & 1) != 0; }
5484 bool direction() const { return ((_value >> 10) & 1) != 0; }
5485 bool sign() const { return ((_value >> 7) & 1) != 0; }
5486 bool zero() const { return ((_value >> 6) & 1) != 0; }
5487 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
5488 bool parity() const { return ((_value >> 2) & 1) != 0; }
5489 bool carry() const { return ((_value >> 0) & 1) != 0; }
5491 void print() const {
5492 // flags
5493 char f[8];
5494 f[0] = (overflow ()) ? 'O' : '-';
5495 f[1] = (direction ()) ? 'D' : '-';
5496 f[2] = (sign ()) ? 'S' : '-';
5497 f[3] = (zero ()) ? 'Z' : '-';
5498 f[4] = (auxiliary_carry()) ? 'A' : '-';
5499 f[5] = (parity ()) ? 'P' : '-';
5500 f[6] = (carry ()) ? 'C' : '-';
5501 f[7] = '\x0';
5502 // output
5503 printf("%08x flags = %s", _value, f);
5504 }
5506 };
5508 class IU_Register {
5509 public:
5510 int32_t _value;
5512 void print() const {
5513 printf("%08x %11d", _value, _value);
5514 }
5516 };
5518 class IU_State {
5519 public:
5520 Flag_Register _eflags;
5521 IU_Register _rdi;
5522 IU_Register _rsi;
5523 IU_Register _rbp;
5524 IU_Register _rsp;
5525 IU_Register _rbx;
5526 IU_Register _rdx;
5527 IU_Register _rcx;
5528 IU_Register _rax;
5530 void print() const {
5531 // computation registers
5532 printf("rax, = "); _rax.print(); printf("\n");
5533 printf("rbx, = "); _rbx.print(); printf("\n");
5534 printf("rcx = "); _rcx.print(); printf("\n");
5535 printf("rdx = "); _rdx.print(); printf("\n");
5536 printf("rdi = "); _rdi.print(); printf("\n");
5537 printf("rsi = "); _rsi.print(); printf("\n");
5538 printf("rbp, = "); _rbp.print(); printf("\n");
5539 printf("rsp = "); _rsp.print(); printf("\n");
5540 printf("\n");
5541 // control registers
5542 printf("flgs = "); _eflags.print(); printf("\n");
5543 }
5544 };
5547 class CPU_State {
5548 public:
5549 FPU_State _fpu_state;
5550 IU_State _iu_state;
5552 void print() const {
5553 printf("--------------------------------------------------\n");
5554 _iu_state .print();
5555 printf("\n");
5556 _fpu_state.print();
5557 printf("--------------------------------------------------\n");
5558 }
5560 };
5563 static void _print_CPU_state(CPU_State* state) {
5564 state->print();
5565 };
5568 void MacroAssembler::print_CPU_state() {
5569 push_CPU_state();
5570 push(rsp); // pass CPU state
5571 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
5572 addptr(rsp, wordSize); // discard argument
5573 pop_CPU_state();
5574 }
5577 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
5578 static int counter = 0;
5579 FPU_State* fs = &state->_fpu_state;
5580 counter++;
5581 // For leaf calls, only verify that the top few elements remain empty.
5582 // We only need 1 empty at the top for C2 code.
5583 if( stack_depth < 0 ) {
5584 if( fs->tag_for_st(7) != 3 ) {
5585 printf("FPR7 not empty\n");
5586 state->print();
5587 assert(false, "error");
5588 return false;
5589 }
5590 return true; // All other stack states do not matter
5591 }
5593 assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
5594 "bad FPU control word");
5596 // compute stack depth
5597 int i = 0;
5598 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++;
5599 int d = i;
5600 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
5601 // verify findings
5602 if (i != FPU_State::number_of_registers) {
5603 // stack not contiguous
5604 printf("%s: stack not contiguous at ST%d\n", s, i);
5605 state->print();
5606 assert(false, "error");
5607 return false;
5608 }
5609 // check if computed stack depth corresponds to expected stack depth
5610 if (stack_depth < 0) {
5611 // expected stack depth is -stack_depth or less
5612 if (d > -stack_depth) {
5613 // too many elements on the stack
5614 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
5615 state->print();
5616 assert(false, "error");
5617 return false;
5618 }
5619 } else {
5620 // expected stack depth is stack_depth
5621 if (d != stack_depth) {
5622 // wrong stack depth
5623 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
5624 state->print();
5625 assert(false, "error");
5626 return false;
5627 }
5628 }
5629 // everything is cool
5630 return true;
5631 }
5634 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
5635 if (!VerifyFPU) return;
5636 push_CPU_state();
5637 push(rsp); // pass CPU state
5638 ExternalAddress msg((address) s);
5639 // pass message string s
5640 pushptr(msg.addr());
5641 push(stack_depth); // pass stack depth
5642 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
5643 addptr(rsp, 3 * wordSize); // discard arguments
5644 // check for error
5645 { Label L;
5646 testl(rax, rax);
5647 jcc(Assembler::notZero, L);
5648 int3(); // break if error condition
5649 bind(L);
5650 }
5651 pop_CPU_state();
5652 }
5654 void MacroAssembler::restore_cpu_control_state_after_jni() {
5655 // Either restore the MXCSR register after returning from the JNI Call
5656 // or verify that it wasn't changed (with -Xcheck:jni flag).
5657 if (VM_Version::supports_sse()) {
5658 if (RestoreMXCSROnJNICalls) {
5659 ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
5660 } else if (CheckJNICalls) {
5661 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
5662 }
5663 }
5664 if (VM_Version::supports_avx()) {
5665 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
5666 vzeroupper();
5667 }
5669 #ifndef _LP64
5670 // Either restore the x87 floating pointer control word after returning
5671 // from the JNI call or verify that it wasn't changed.
5672 if (CheckJNICalls) {
5673 call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
5674 }
5675 #endif // _LP64
5676 }
5679 void MacroAssembler::load_klass(Register dst, Register src) {
5680 #ifdef _LP64
5681 if (UseCompressedClassPointers) {
5682 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5683 decode_klass_not_null(dst);
5684 } else
5685 #endif
5686 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5687 }
5689 void MacroAssembler::load_prototype_header(Register dst, Register src) {
5690 load_klass(dst, src);
5691 movptr(dst, Address(dst, Klass::prototype_header_offset()));
5692 }
5694 void MacroAssembler::store_klass(Register dst, Register src) {
5695 #ifdef _LP64
5696 if (UseCompressedClassPointers) {
5697 encode_klass_not_null(src);
5698 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5699 } else
5700 #endif
5701 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5702 }
5704 void MacroAssembler::load_heap_oop(Register dst, Address src) {
5705 #ifdef _LP64
5706 // FIXME: Must change all places where we try to load the klass.
5707 if (UseCompressedOops) {
5708 movl(dst, src);
5709 decode_heap_oop(dst);
5710 } else
5711 #endif
5712 movptr(dst, src);
5713 }
5715 // Doesn't do verfication, generates fixed size code
5716 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) {
5717 #ifdef _LP64
5718 if (UseCompressedOops) {
5719 movl(dst, src);
5720 decode_heap_oop_not_null(dst);
5721 } else
5722 #endif
5723 movptr(dst, src);
5724 }
5726 void MacroAssembler::store_heap_oop(Address dst, Register src) {
5727 #ifdef _LP64
5728 if (UseCompressedOops) {
5729 assert(!dst.uses(src), "not enough registers");
5730 encode_heap_oop(src);
5731 movl(dst, src);
5732 } else
5733 #endif
5734 movptr(dst, src);
5735 }
5737 void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) {
5738 assert_different_registers(src1, tmp);
5739 #ifdef _LP64
5740 if (UseCompressedOops) {
5741 bool did_push = false;
5742 if (tmp == noreg) {
5743 tmp = rax;
5744 push(tmp);
5745 did_push = true;
5746 assert(!src2.uses(rsp), "can't push");
5747 }
5748 load_heap_oop(tmp, src2);
5749 cmpptr(src1, tmp);
5750 if (did_push) pop(tmp);
5751 } else
5752 #endif
5753 cmpptr(src1, src2);
5754 }
5756 // Used for storing NULLs.
5757 void MacroAssembler::store_heap_oop_null(Address dst) {
5758 #ifdef _LP64
5759 if (UseCompressedOops) {
5760 movl(dst, (int32_t)NULL_WORD);
5761 } else {
5762 movslq(dst, (int32_t)NULL_WORD);
5763 }
5764 #else
5765 movl(dst, (int32_t)NULL_WORD);
5766 #endif
5767 }
5769 #ifdef _LP64
5770 void MacroAssembler::store_klass_gap(Register dst, Register src) {
5771 if (UseCompressedClassPointers) {
5772 // Store to klass gap in destination
5773 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
5774 }
5775 }
5777 #ifdef ASSERT
5778 void MacroAssembler::verify_heapbase(const char* msg) {
5779 assert (UseCompressedOops, "should be compressed");
5780 assert (Universe::heap() != NULL, "java heap should be initialized");
5781 if (CheckCompressedOops) {
5782 Label ok;
5783 push(rscratch1); // cmpptr trashes rscratch1
5784 cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
5785 jcc(Assembler::equal, ok);
5786 STOP(msg);
5787 bind(ok);
5788 pop(rscratch1);
5789 }
5790 }
5791 #endif
5793 // Algorithm must match oop.inline.hpp encode_heap_oop.
5794 void MacroAssembler::encode_heap_oop(Register r) {
5795 #ifdef ASSERT
5796 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
5797 #endif
5798 verify_oop(r, "broken oop in encode_heap_oop");
5799 if (Universe::narrow_oop_base() == NULL) {
5800 if (Universe::narrow_oop_shift() != 0) {
5801 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5802 shrq(r, LogMinObjAlignmentInBytes);
5803 }
5804 return;
5805 }
5806 testq(r, r);
5807 cmovq(Assembler::equal, r, r12_heapbase);
5808 subq(r, r12_heapbase);
5809 shrq(r, LogMinObjAlignmentInBytes);
5810 }
5812 void MacroAssembler::encode_heap_oop_not_null(Register r) {
5813 #ifdef ASSERT
5814 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
5815 if (CheckCompressedOops) {
5816 Label ok;
5817 testq(r, r);
5818 jcc(Assembler::notEqual, ok);
5819 STOP("null oop passed to encode_heap_oop_not_null");
5820 bind(ok);
5821 }
5822 #endif
5823 verify_oop(r, "broken oop in encode_heap_oop_not_null");
5824 if (Universe::narrow_oop_base() != NULL) {
5825 subq(r, r12_heapbase);
5826 }
5827 if (Universe::narrow_oop_shift() != 0) {
5828 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5829 shrq(r, LogMinObjAlignmentInBytes);
5830 }
5831 }
5833 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
5834 #ifdef ASSERT
5835 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
5836 if (CheckCompressedOops) {
5837 Label ok;
5838 testq(src, src);
5839 jcc(Assembler::notEqual, ok);
5840 STOP("null oop passed to encode_heap_oop_not_null2");
5841 bind(ok);
5842 }
5843 #endif
5844 verify_oop(src, "broken oop in encode_heap_oop_not_null2");
5845 if (dst != src) {
5846 movq(dst, src);
5847 }
5848 if (Universe::narrow_oop_base() != NULL) {
5849 subq(dst, r12_heapbase);
5850 }
5851 if (Universe::narrow_oop_shift() != 0) {
5852 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5853 shrq(dst, LogMinObjAlignmentInBytes);
5854 }
5855 }
5857 void MacroAssembler::decode_heap_oop(Register r) {
5858 #ifdef ASSERT
5859 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
5860 #endif
5861 if (Universe::narrow_oop_base() == NULL) {
5862 if (Universe::narrow_oop_shift() != 0) {
5863 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5864 shlq(r, LogMinObjAlignmentInBytes);
5865 }
5866 } else {
5867 Label done;
5868 shlq(r, LogMinObjAlignmentInBytes);
5869 jccb(Assembler::equal, done);
5870 addq(r, r12_heapbase);
5871 bind(done);
5872 }
5873 verify_oop(r, "broken oop in decode_heap_oop");
5874 }
5876 void MacroAssembler::decode_heap_oop_not_null(Register r) {
5877 // Note: it will change flags
5878 assert (UseCompressedOops, "should only be used for compressed headers");
5879 assert (Universe::heap() != NULL, "java heap should be initialized");
5880 // Cannot assert, unverified entry point counts instructions (see .ad file)
5881 // vtableStubs also counts instructions in pd_code_size_limit.
5882 // Also do not verify_oop as this is called by verify_oop.
5883 if (Universe::narrow_oop_shift() != 0) {
5884 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5885 shlq(r, LogMinObjAlignmentInBytes);
5886 if (Universe::narrow_oop_base() != NULL) {
5887 addq(r, r12_heapbase);
5888 }
5889 } else {
5890 assert (Universe::narrow_oop_base() == NULL, "sanity");
5891 }
5892 }
5894 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
5895 // Note: it will change flags
5896 assert (UseCompressedOops, "should only be used for compressed headers");
5897 assert (Universe::heap() != NULL, "java heap should be initialized");
5898 // Cannot assert, unverified entry point counts instructions (see .ad file)
5899 // vtableStubs also counts instructions in pd_code_size_limit.
5900 // Also do not verify_oop as this is called by verify_oop.
5901 if (Universe::narrow_oop_shift() != 0) {
5902 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5903 if (LogMinObjAlignmentInBytes == Address::times_8) {
5904 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
5905 } else {
5906 if (dst != src) {
5907 movq(dst, src);
5908 }
5909 shlq(dst, LogMinObjAlignmentInBytes);
5910 if (Universe::narrow_oop_base() != NULL) {
5911 addq(dst, r12_heapbase);
5912 }
5913 }
5914 } else {
5915 assert (Universe::narrow_oop_base() == NULL, "sanity");
5916 if (dst != src) {
5917 movq(dst, src);
5918 }
5919 }
5920 }
5922 void MacroAssembler::encode_klass_not_null(Register r) {
5923 if (Universe::narrow_klass_base() != NULL) {
5924 // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
5925 assert(r != r12_heapbase, "Encoding a klass in r12");
5926 mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
5927 subq(r, r12_heapbase);
5928 }
5929 if (Universe::narrow_klass_shift() != 0) {
5930 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
5931 shrq(r, LogKlassAlignmentInBytes);
5932 }
5933 if (Universe::narrow_klass_base() != NULL) {
5934 reinit_heapbase();
5935 }
5936 }
5938 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5939 if (dst == src) {
5940 encode_klass_not_null(src);
5941 } else {
5942 if (Universe::narrow_klass_base() != NULL) {
5943 mov64(dst, (int64_t)Universe::narrow_klass_base());
5944 negq(dst);
5945 addq(dst, src);
5946 } else {
5947 movptr(dst, src);
5948 }
5949 if (Universe::narrow_klass_shift() != 0) {
5950 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
5951 shrq(dst, LogKlassAlignmentInBytes);
5952 }
5953 }
5954 }
5956 // Function instr_size_for_decode_klass_not_null() counts the instructions
5957 // generated by decode_klass_not_null(register r) and reinit_heapbase(),
5958 // when (Universe::heap() != NULL). Hence, if the instructions they
5959 // generate change, then this method needs to be updated.
5960 int MacroAssembler::instr_size_for_decode_klass_not_null() {
5961 assert (UseCompressedClassPointers, "only for compressed klass ptrs");
5962 if (Universe::narrow_klass_base() != NULL) {
5963 // mov64 + addq + shlq? + mov64 (for reinit_heapbase()).
5964 return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
5965 } else {
5966 // longest load decode klass function, mov64, leaq
5967 return 16;
5968 }
5969 }
5971 // !!! If the instructions that get generated here change then function
5972 // instr_size_for_decode_klass_not_null() needs to get updated.
5973 void MacroAssembler::decode_klass_not_null(Register r) {
5974 // Note: it will change flags
5975 assert (UseCompressedClassPointers, "should only be used for compressed headers");
5976 assert(r != r12_heapbase, "Decoding a klass in r12");
5977 // Cannot assert, unverified entry point counts instructions (see .ad file)
5978 // vtableStubs also counts instructions in pd_code_size_limit.
5979 // Also do not verify_oop as this is called by verify_oop.
5980 if (Universe::narrow_klass_shift() != 0) {
5981 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
5982 shlq(r, LogKlassAlignmentInBytes);
5983 }
5984 // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
5985 if (Universe::narrow_klass_base() != NULL) {
5986 mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
5987 addq(r, r12_heapbase);
5988 reinit_heapbase();
5989 }
5990 }
5992 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
5993 // Note: it will change flags
5994 assert (UseCompressedClassPointers, "should only be used for compressed headers");
5995 if (dst == src) {
5996 decode_klass_not_null(dst);
5997 } else {
5998 // Cannot assert, unverified entry point counts instructions (see .ad file)
5999 // vtableStubs also counts instructions in pd_code_size_limit.
6000 // Also do not verify_oop as this is called by verify_oop.
6001 mov64(dst, (int64_t)Universe::narrow_klass_base());
6002 if (Universe::narrow_klass_shift() != 0) {
6003 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
6004 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
6005 leaq(dst, Address(dst, src, Address::times_8, 0));
6006 } else {
6007 addq(dst, src);
6008 }
6009 }
6010 }
6012 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
6013 assert (UseCompressedOops, "should only be used for compressed headers");
6014 assert (Universe::heap() != NULL, "java heap should be initialized");
6015 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6016 int oop_index = oop_recorder()->find_index(obj);
6017 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6018 mov_narrow_oop(dst, oop_index, rspec);
6019 }
6021 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
6022 assert (UseCompressedOops, "should only be used for compressed headers");
6023 assert (Universe::heap() != NULL, "java heap should be initialized");
6024 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6025 int oop_index = oop_recorder()->find_index(obj);
6026 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6027 mov_narrow_oop(dst, oop_index, rspec);
6028 }
6030 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
6031 assert (UseCompressedClassPointers, "should only be used for compressed headers");
6032 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6033 int klass_index = oop_recorder()->find_index(k);
6034 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6035 mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
6036 }
6038 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
6039 assert (UseCompressedClassPointers, "should only be used for compressed headers");
6040 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6041 int klass_index = oop_recorder()->find_index(k);
6042 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6043 mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
6044 }
6046 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
6047 assert (UseCompressedOops, "should only be used for compressed headers");
6048 assert (Universe::heap() != NULL, "java heap should be initialized");
6049 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6050 int oop_index = oop_recorder()->find_index(obj);
6051 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6052 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
6053 }
6055 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
6056 assert (UseCompressedOops, "should only be used for compressed headers");
6057 assert (Universe::heap() != NULL, "java heap should be initialized");
6058 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6059 int oop_index = oop_recorder()->find_index(obj);
6060 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6061 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
6062 }
6064 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
6065 assert (UseCompressedClassPointers, "should only be used for compressed headers");
6066 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6067 int klass_index = oop_recorder()->find_index(k);
6068 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6069 Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
6070 }
6072 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
6073 assert (UseCompressedClassPointers, "should only be used for compressed headers");
6074 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6075 int klass_index = oop_recorder()->find_index(k);
6076 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6077 Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
6078 }
6080 void MacroAssembler::reinit_heapbase() {
6081 if (UseCompressedOops || UseCompressedClassPointers) {
6082 if (Universe::heap() != NULL) {
6083 if (Universe::narrow_oop_base() == NULL) {
6084 MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
6085 } else {
6086 mov64(r12_heapbase, (int64_t)Universe::narrow_ptrs_base());
6087 }
6088 } else {
6089 movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
6090 }
6091 }
6092 }
6094 #endif // _LP64
6097 // C2 compiled method's prolog code.
6098 void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode_24b) {
6100 // WARNING: Initial instruction MUST be 5 bytes or longer so that
6101 // NativeJump::patch_verified_entry will be able to patch out the entry
6102 // code safely. The push to verify stack depth is ok at 5 bytes,
6103 // the frame allocation can be either 3 or 6 bytes. So if we don't do
6104 // stack bang then we must use the 6 byte frame allocation even if
6105 // we have no frame. :-(
6107 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
6108 // Remove word for return addr
6109 framesize -= wordSize;
6111 // Calls to C2R adapters often do not accept exceptional returns.
6112 // We require that their callers must bang for them. But be careful, because
6113 // some VM calls (such as call site linkage) can use several kilobytes of
6114 // stack. But the stack safety zone should account for that.
6115 // See bugs 4446381, 4468289, 4497237.
6116 if (stack_bang) {
6117 generate_stack_overflow_check(framesize);
6119 // We always push rbp, so that on return to interpreter rbp, will be
6120 // restored correctly and we can correct the stack.
6121 push(rbp);
6122 // Remove word for ebp
6123 framesize -= wordSize;
6125 // Create frame
6126 if (framesize) {
6127 subptr(rsp, framesize);
6128 }
6129 } else {
6130 // Create frame (force generation of a 4 byte immediate value)
6131 subptr_imm32(rsp, framesize);
6133 // Save RBP register now.
6134 framesize -= wordSize;
6135 movptr(Address(rsp, framesize), rbp);
6136 }
6138 if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
6139 framesize -= wordSize;
6140 movptr(Address(rsp, framesize), (int32_t)0xbadb100d);
6141 }
6143 #ifndef _LP64
6144 // If method sets FPU control word do it now
6145 if (fp_mode_24b) {
6146 fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
6147 }
6148 if (UseSSE >= 2 && VerifyFPU) {
6149 verify_FPU(0, "FPU stack must be clean on entry");
6150 }
6151 #endif
6153 #ifdef ASSERT
6154 if (VerifyStackAtCalls) {
6155 Label L;
6156 push(rax);
6157 mov(rax, rsp);
6158 andptr(rax, StackAlignmentInBytes-1);
6159 cmpptr(rax, StackAlignmentInBytes-wordSize);
6160 pop(rax);
6161 jcc(Assembler::equal, L);
6162 STOP("Stack is not properly aligned!");
6163 bind(L);
6164 }
6165 #endif
6167 }
6169 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp) {
6170 // cnt - number of qwords (8-byte words).
6171 // base - start address, qword aligned.
6172 assert(base==rdi, "base register must be edi for rep stos");
6173 assert(tmp==rax, "tmp register must be eax for rep stos");
6174 assert(cnt==rcx, "cnt register must be ecx for rep stos");
6176 xorptr(tmp, tmp);
6177 if (UseFastStosb) {
6178 shlptr(cnt,3); // convert to number of bytes
6179 rep_stosb();
6180 } else {
6181 NOT_LP64(shlptr(cnt,1);) // convert to number of dwords for 32-bit VM
6182 rep_stos();
6183 }
6184 }
6186 // IndexOf for constant substrings with size >= 8 chars
6187 // which don't need to be loaded through stack.
6188 void MacroAssembler::string_indexofC8(Register str1, Register str2,
6189 Register cnt1, Register cnt2,
6190 int int_cnt2, Register result,
6191 XMMRegister vec, Register tmp) {
6192 ShortBranchVerifier sbv(this);
6193 assert(UseSSE42Intrinsics, "SSE4.2 is required");
6195 // This method uses pcmpestri inxtruction with bound registers
6196 // inputs:
6197 // xmm - substring
6198 // rax - substring length (elements count)
6199 // mem - scanned string
6200 // rdx - string length (elements count)
6201 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
6202 // outputs:
6203 // rcx - matched index in string
6204 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
6206 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR,
6207 RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR,
6208 MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE;
6210 // Note, inline_string_indexOf() generates checks:
6211 // if (substr.count > string.count) return -1;
6212 // if (substr.count == 0) return 0;
6213 assert(int_cnt2 >= 8, "this code isused only for cnt2 >= 8 chars");
6215 // Load substring.
6216 movdqu(vec, Address(str2, 0));
6217 movl(cnt2, int_cnt2);
6218 movptr(result, str1); // string addr
6220 if (int_cnt2 > 8) {
6221 jmpb(SCAN_TO_SUBSTR);
6223 // Reload substr for rescan, this code
6224 // is executed only for large substrings (> 8 chars)
6225 bind(RELOAD_SUBSTR);
6226 movdqu(vec, Address(str2, 0));
6227 negptr(cnt2); // Jumped here with negative cnt2, convert to positive
6229 bind(RELOAD_STR);
6230 // We came here after the beginning of the substring was
6231 // matched but the rest of it was not so we need to search
6232 // again. Start from the next element after the previous match.
6234 // cnt2 is number of substring reminding elements and
6235 // cnt1 is number of string reminding elements when cmp failed.
6236 // Restored cnt1 = cnt1 - cnt2 + int_cnt2
6237 subl(cnt1, cnt2);
6238 addl(cnt1, int_cnt2);
6239 movl(cnt2, int_cnt2); // Now restore cnt2
6241 decrementl(cnt1); // Shift to next element
6242 cmpl(cnt1, cnt2);
6243 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
6245 addptr(result, 2);
6247 } // (int_cnt2 > 8)
6249 // Scan string for start of substr in 16-byte vectors
6250 bind(SCAN_TO_SUBSTR);
6251 pcmpestri(vec, Address(result, 0), 0x0d);
6252 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
6253 subl(cnt1, 8);
6254 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
6255 cmpl(cnt1, cnt2);
6256 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
6257 addptr(result, 16);
6258 jmpb(SCAN_TO_SUBSTR);
6260 // Found a potential substr
6261 bind(FOUND_CANDIDATE);
6262 // Matched whole vector if first element matched (tmp(rcx) == 0).
6263 if (int_cnt2 == 8) {
6264 jccb(Assembler::overflow, RET_FOUND); // OF == 1
6265 } else { // int_cnt2 > 8
6266 jccb(Assembler::overflow, FOUND_SUBSTR);
6267 }
6268 // After pcmpestri tmp(rcx) contains matched element index
6269 // Compute start addr of substr
6270 lea(result, Address(result, tmp, Address::times_2));
6272 // Make sure string is still long enough
6273 subl(cnt1, tmp);
6274 cmpl(cnt1, cnt2);
6275 if (int_cnt2 == 8) {
6276 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
6277 } else { // int_cnt2 > 8
6278 jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD);
6279 }
6280 // Left less then substring.
6282 bind(RET_NOT_FOUND);
6283 movl(result, -1);
6284 jmpb(EXIT);
6286 if (int_cnt2 > 8) {
6287 // This code is optimized for the case when whole substring
6288 // is matched if its head is matched.
6289 bind(MATCH_SUBSTR_HEAD);
6290 pcmpestri(vec, Address(result, 0), 0x0d);
6291 // Reload only string if does not match
6292 jccb(Assembler::noOverflow, RELOAD_STR); // OF == 0
6294 Label CONT_SCAN_SUBSTR;
6295 // Compare the rest of substring (> 8 chars).
6296 bind(FOUND_SUBSTR);
6297 // First 8 chars are already matched.
6298 negptr(cnt2);
6299 addptr(cnt2, 8);
6301 bind(SCAN_SUBSTR);
6302 subl(cnt1, 8);
6303 cmpl(cnt2, -8); // Do not read beyond substring
6304 jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR);
6305 // Back-up strings to avoid reading beyond substring:
6306 // cnt1 = cnt1 - cnt2 + 8
6307 addl(cnt1, cnt2); // cnt2 is negative
6308 addl(cnt1, 8);
6309 movl(cnt2, 8); negptr(cnt2);
6310 bind(CONT_SCAN_SUBSTR);
6311 if (int_cnt2 < (int)G) {
6312 movdqu(vec, Address(str2, cnt2, Address::times_2, int_cnt2*2));
6313 pcmpestri(vec, Address(result, cnt2, Address::times_2, int_cnt2*2), 0x0d);
6314 } else {
6315 // calculate index in register to avoid integer overflow (int_cnt2*2)
6316 movl(tmp, int_cnt2);
6317 addptr(tmp, cnt2);
6318 movdqu(vec, Address(str2, tmp, Address::times_2, 0));
6319 pcmpestri(vec, Address(result, tmp, Address::times_2, 0), 0x0d);
6320 }
6321 // Need to reload strings pointers if not matched whole vector
6322 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
6323 addptr(cnt2, 8);
6324 jcc(Assembler::negative, SCAN_SUBSTR);
6325 // Fall through if found full substring
6327 } // (int_cnt2 > 8)
6329 bind(RET_FOUND);
6330 // Found result if we matched full small substring.
6331 // Compute substr offset
6332 subptr(result, str1);
6333 shrl(result, 1); // index
6334 bind(EXIT);
6336 } // string_indexofC8
6338 // Small strings are loaded through stack if they cross page boundary.
6339 void MacroAssembler::string_indexof(Register str1, Register str2,
6340 Register cnt1, Register cnt2,
6341 int int_cnt2, Register result,
6342 XMMRegister vec, Register tmp) {
6343 ShortBranchVerifier sbv(this);
6344 assert(UseSSE42Intrinsics, "SSE4.2 is required");
6345 //
6346 // int_cnt2 is length of small (< 8 chars) constant substring
6347 // or (-1) for non constant substring in which case its length
6348 // is in cnt2 register.
6349 //
6350 // Note, inline_string_indexOf() generates checks:
6351 // if (substr.count > string.count) return -1;
6352 // if (substr.count == 0) return 0;
6353 //
6354 assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < 8), "should be != 0");
6356 // This method uses pcmpestri inxtruction with bound registers
6357 // inputs:
6358 // xmm - substring
6359 // rax - substring length (elements count)
6360 // mem - scanned string
6361 // rdx - string length (elements count)
6362 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
6363 // outputs:
6364 // rcx - matched index in string
6365 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
6367 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR,
6368 RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR,
6369 FOUND_CANDIDATE;
6371 { //========================================================
6372 // We don't know where these strings are located
6373 // and we can't read beyond them. Load them through stack.
6374 Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR;
6376 movptr(tmp, rsp); // save old SP
6378 if (int_cnt2 > 0) { // small (< 8 chars) constant substring
6379 if (int_cnt2 == 1) { // One char
6380 load_unsigned_short(result, Address(str2, 0));
6381 movdl(vec, result); // move 32 bits
6382 } else if (int_cnt2 == 2) { // Two chars
6383 movdl(vec, Address(str2, 0)); // move 32 bits
6384 } else if (int_cnt2 == 4) { // Four chars
6385 movq(vec, Address(str2, 0)); // move 64 bits
6386 } else { // cnt2 = { 3, 5, 6, 7 }
6387 // Array header size is 12 bytes in 32-bit VM
6388 // + 6 bytes for 3 chars == 18 bytes,
6389 // enough space to load vec and shift.
6390 assert(HeapWordSize*TypeArrayKlass::header_size() >= 12,"sanity");
6391 movdqu(vec, Address(str2, (int_cnt2*2)-16));
6392 psrldq(vec, 16-(int_cnt2*2));
6393 }
6394 } else { // not constant substring
6395 cmpl(cnt2, 8);
6396 jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough
6398 // We can read beyond string if srt+16 does not cross page boundary
6399 // since heaps are aligned and mapped by pages.
6400 assert(os::vm_page_size() < (int)G, "default page should be small");
6401 movl(result, str2); // We need only low 32 bits
6402 andl(result, (os::vm_page_size()-1));
6403 cmpl(result, (os::vm_page_size()-16));
6404 jccb(Assembler::belowEqual, CHECK_STR);
6406 // Move small strings to stack to allow load 16 bytes into vec.
6407 subptr(rsp, 16);
6408 int stk_offset = wordSize-2;
6409 push(cnt2);
6411 bind(COPY_SUBSTR);
6412 load_unsigned_short(result, Address(str2, cnt2, Address::times_2, -2));
6413 movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
6414 decrement(cnt2);
6415 jccb(Assembler::notZero, COPY_SUBSTR);
6417 pop(cnt2);
6418 movptr(str2, rsp); // New substring address
6419 } // non constant
6421 bind(CHECK_STR);
6422 cmpl(cnt1, 8);
6423 jccb(Assembler::aboveEqual, BIG_STRINGS);
6425 // Check cross page boundary.
6426 movl(result, str1); // We need only low 32 bits
6427 andl(result, (os::vm_page_size()-1));
6428 cmpl(result, (os::vm_page_size()-16));
6429 jccb(Assembler::belowEqual, BIG_STRINGS);
6431 subptr(rsp, 16);
6432 int stk_offset = -2;
6433 if (int_cnt2 < 0) { // not constant
6434 push(cnt2);
6435 stk_offset += wordSize;
6436 }
6437 movl(cnt2, cnt1);
6439 bind(COPY_STR);
6440 load_unsigned_short(result, Address(str1, cnt2, Address::times_2, -2));
6441 movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
6442 decrement(cnt2);
6443 jccb(Assembler::notZero, COPY_STR);
6445 if (int_cnt2 < 0) { // not constant
6446 pop(cnt2);
6447 }
6448 movptr(str1, rsp); // New string address
6450 bind(BIG_STRINGS);
6451 // Load substring.
6452 if (int_cnt2 < 0) { // -1
6453 movdqu(vec, Address(str2, 0));
6454 push(cnt2); // substr count
6455 push(str2); // substr addr
6456 push(str1); // string addr
6457 } else {
6458 // Small (< 8 chars) constant substrings are loaded already.
6459 movl(cnt2, int_cnt2);
6460 }
6461 push(tmp); // original SP
6463 } // Finished loading
6465 //========================================================
6466 // Start search
6467 //
6469 movptr(result, str1); // string addr
6471 if (int_cnt2 < 0) { // Only for non constant substring
6472 jmpb(SCAN_TO_SUBSTR);
6474 // SP saved at sp+0
6475 // String saved at sp+1*wordSize
6476 // Substr saved at sp+2*wordSize
6477 // Substr count saved at sp+3*wordSize
6479 // Reload substr for rescan, this code
6480 // is executed only for large substrings (> 8 chars)
6481 bind(RELOAD_SUBSTR);
6482 movptr(str2, Address(rsp, 2*wordSize));
6483 movl(cnt2, Address(rsp, 3*wordSize));
6484 movdqu(vec, Address(str2, 0));
6485 // We came here after the beginning of the substring was
6486 // matched but the rest of it was not so we need to search
6487 // again. Start from the next element after the previous match.
6488 subptr(str1, result); // Restore counter
6489 shrl(str1, 1);
6490 addl(cnt1, str1);
6491 decrementl(cnt1); // Shift to next element
6492 cmpl(cnt1, cnt2);
6493 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
6495 addptr(result, 2);
6496 } // non constant
6498 // Scan string for start of substr in 16-byte vectors
6499 bind(SCAN_TO_SUBSTR);
6500 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
6501 pcmpestri(vec, Address(result, 0), 0x0d);
6502 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
6503 subl(cnt1, 8);
6504 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
6505 cmpl(cnt1, cnt2);
6506 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
6507 addptr(result, 16);
6509 bind(ADJUST_STR);
6510 cmpl(cnt1, 8); // Do not read beyond string
6511 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
6512 // Back-up string to avoid reading beyond string.
6513 lea(result, Address(result, cnt1, Address::times_2, -16));
6514 movl(cnt1, 8);
6515 jmpb(SCAN_TO_SUBSTR);
6517 // Found a potential substr
6518 bind(FOUND_CANDIDATE);
6519 // After pcmpestri tmp(rcx) contains matched element index
6521 // Make sure string is still long enough
6522 subl(cnt1, tmp);
6523 cmpl(cnt1, cnt2);
6524 jccb(Assembler::greaterEqual, FOUND_SUBSTR);
6525 // Left less then substring.
6527 bind(RET_NOT_FOUND);
6528 movl(result, -1);
6529 jmpb(CLEANUP);
6531 bind(FOUND_SUBSTR);
6532 // Compute start addr of substr
6533 lea(result, Address(result, tmp, Address::times_2));
6535 if (int_cnt2 > 0) { // Constant substring
6536 // Repeat search for small substring (< 8 chars)
6537 // from new point without reloading substring.
6538 // Have to check that we don't read beyond string.
6539 cmpl(tmp, 8-int_cnt2);
6540 jccb(Assembler::greater, ADJUST_STR);
6541 // Fall through if matched whole substring.
6542 } else { // non constant
6543 assert(int_cnt2 == -1, "should be != 0");
6545 addl(tmp, cnt2);
6546 // Found result if we matched whole substring.
6547 cmpl(tmp, 8);
6548 jccb(Assembler::lessEqual, RET_FOUND);
6550 // Repeat search for small substring (<= 8 chars)
6551 // from new point 'str1' without reloading substring.
6552 cmpl(cnt2, 8);
6553 // Have to check that we don't read beyond string.
6554 jccb(Assembler::lessEqual, ADJUST_STR);
6556 Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG;
6557 // Compare the rest of substring (> 8 chars).
6558 movptr(str1, result);
6560 cmpl(tmp, cnt2);
6561 // First 8 chars are already matched.
6562 jccb(Assembler::equal, CHECK_NEXT);
6564 bind(SCAN_SUBSTR);
6565 pcmpestri(vec, Address(str1, 0), 0x0d);
6566 // Need to reload strings pointers if not matched whole vector
6567 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
6569 bind(CHECK_NEXT);
6570 subl(cnt2, 8);
6571 jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring
6572 addptr(str1, 16);
6573 addptr(str2, 16);
6574 subl(cnt1, 8);
6575 cmpl(cnt2, 8); // Do not read beyond substring
6576 jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR);
6577 // Back-up strings to avoid reading beyond substring.
6578 lea(str2, Address(str2, cnt2, Address::times_2, -16));
6579 lea(str1, Address(str1, cnt2, Address::times_2, -16));
6580 subl(cnt1, cnt2);
6581 movl(cnt2, 8);
6582 addl(cnt1, 8);
6583 bind(CONT_SCAN_SUBSTR);
6584 movdqu(vec, Address(str2, 0));
6585 jmpb(SCAN_SUBSTR);
6587 bind(RET_FOUND_LONG);
6588 movptr(str1, Address(rsp, wordSize));
6589 } // non constant
6591 bind(RET_FOUND);
6592 // Compute substr offset
6593 subptr(result, str1);
6594 shrl(result, 1); // index
6596 bind(CLEANUP);
6597 pop(rsp); // restore SP
6599 } // string_indexof
6601 // Compare strings.
6602 void MacroAssembler::string_compare(Register str1, Register str2,
6603 Register cnt1, Register cnt2, Register result,
6604 XMMRegister vec1) {
6605 ShortBranchVerifier sbv(this);
6606 Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL;
6608 // Compute the minimum of the string lengths and the
6609 // difference of the string lengths (stack).
6610 // Do the conditional move stuff
6611 movl(result, cnt1);
6612 subl(cnt1, cnt2);
6613 push(cnt1);
6614 cmov32(Assembler::lessEqual, cnt2, result);
6616 // Is the minimum length zero?
6617 testl(cnt2, cnt2);
6618 jcc(Assembler::zero, LENGTH_DIFF_LABEL);
6620 // Compare first characters
6621 load_unsigned_short(result, Address(str1, 0));
6622 load_unsigned_short(cnt1, Address(str2, 0));
6623 subl(result, cnt1);
6624 jcc(Assembler::notZero, POP_LABEL);
6625 cmpl(cnt2, 1);
6626 jcc(Assembler::equal, LENGTH_DIFF_LABEL);
6628 // Check if the strings start at the same location.
6629 cmpptr(str1, str2);
6630 jcc(Assembler::equal, LENGTH_DIFF_LABEL);
6632 Address::ScaleFactor scale = Address::times_2;
6633 int stride = 8;
6635 if (UseAVX >= 2 && UseSSE42Intrinsics) {
6636 Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_WIDE_TAIL, COMPARE_SMALL_STR;
6637 Label COMPARE_WIDE_VECTORS_LOOP, COMPARE_16_CHARS, COMPARE_INDEX_CHAR;
6638 Label COMPARE_TAIL_LONG;
6639 int pcmpmask = 0x19;
6641 // Setup to compare 16-chars (32-bytes) vectors,
6642 // start from first character again because it has aligned address.
6643 int stride2 = 16;
6644 int adr_stride = stride << scale;
6645 int adr_stride2 = stride2 << scale;
6647 assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
6648 // rax and rdx are used by pcmpestri as elements counters
6649 movl(result, cnt2);
6650 andl(cnt2, ~(stride2-1)); // cnt2 holds the vector count
6651 jcc(Assembler::zero, COMPARE_TAIL_LONG);
6653 // fast path : compare first 2 8-char vectors.
6654 bind(COMPARE_16_CHARS);
6655 movdqu(vec1, Address(str1, 0));
6656 pcmpestri(vec1, Address(str2, 0), pcmpmask);
6657 jccb(Assembler::below, COMPARE_INDEX_CHAR);
6659 movdqu(vec1, Address(str1, adr_stride));
6660 pcmpestri(vec1, Address(str2, adr_stride), pcmpmask);
6661 jccb(Assembler::aboveEqual, COMPARE_WIDE_VECTORS);
6662 addl(cnt1, stride);
6664 // Compare the characters at index in cnt1
6665 bind(COMPARE_INDEX_CHAR); //cnt1 has the offset of the mismatching character
6666 load_unsigned_short(result, Address(str1, cnt1, scale));
6667 load_unsigned_short(cnt2, Address(str2, cnt1, scale));
6668 subl(result, cnt2);
6669 jmp(POP_LABEL);
6671 // Setup the registers to start vector comparison loop
6672 bind(COMPARE_WIDE_VECTORS);
6673 lea(str1, Address(str1, result, scale));
6674 lea(str2, Address(str2, result, scale));
6675 subl(result, stride2);
6676 subl(cnt2, stride2);
6677 jccb(Assembler::zero, COMPARE_WIDE_TAIL);
6678 negptr(result);
6680 // In a loop, compare 16-chars (32-bytes) at once using (vpxor+vptest)
6681 bind(COMPARE_WIDE_VECTORS_LOOP);
6682 vmovdqu(vec1, Address(str1, result, scale));
6683 vpxor(vec1, Address(str2, result, scale));
6684 vptest(vec1, vec1);
6685 jccb(Assembler::notZero, VECTOR_NOT_EQUAL);
6686 addptr(result, stride2);
6687 subl(cnt2, stride2);
6688 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP);
6689 // clean upper bits of YMM registers
6690 vzeroupper();
6692 // compare wide vectors tail
6693 bind(COMPARE_WIDE_TAIL);
6694 testptr(result, result);
6695 jccb(Assembler::zero, LENGTH_DIFF_LABEL);
6697 movl(result, stride2);
6698 movl(cnt2, result);
6699 negptr(result);
6700 jmpb(COMPARE_WIDE_VECTORS_LOOP);
6702 // Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors.
6703 bind(VECTOR_NOT_EQUAL);
6704 // clean upper bits of YMM registers
6705 vzeroupper();
6706 lea(str1, Address(str1, result, scale));
6707 lea(str2, Address(str2, result, scale));
6708 jmp(COMPARE_16_CHARS);
6710 // Compare tail chars, length between 1 to 15 chars
6711 bind(COMPARE_TAIL_LONG);
6712 movl(cnt2, result);
6713 cmpl(cnt2, stride);
6714 jccb(Assembler::less, COMPARE_SMALL_STR);
6716 movdqu(vec1, Address(str1, 0));
6717 pcmpestri(vec1, Address(str2, 0), pcmpmask);
6718 jcc(Assembler::below, COMPARE_INDEX_CHAR);
6719 subptr(cnt2, stride);
6720 jccb(Assembler::zero, LENGTH_DIFF_LABEL);
6721 lea(str1, Address(str1, result, scale));
6722 lea(str2, Address(str2, result, scale));
6723 negptr(cnt2);
6724 jmpb(WHILE_HEAD_LABEL);
6726 bind(COMPARE_SMALL_STR);
6727 } else if (UseSSE42Intrinsics) {
6728 Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
6729 int pcmpmask = 0x19;
6730 // Setup to compare 8-char (16-byte) vectors,
6731 // start from first character again because it has aligned address.
6732 movl(result, cnt2);
6733 andl(cnt2, ~(stride - 1)); // cnt2 holds the vector count
6734 jccb(Assembler::zero, COMPARE_TAIL);
6736 lea(str1, Address(str1, result, scale));
6737 lea(str2, Address(str2, result, scale));
6738 negptr(result);
6740 // pcmpestri
6741 // inputs:
6742 // vec1- substring
6743 // rax - negative string length (elements count)
6744 // mem - scaned string
6745 // rdx - string length (elements count)
6746 // pcmpmask - cmp mode: 11000 (string compare with negated result)
6747 // + 00 (unsigned bytes) or + 01 (unsigned shorts)
6748 // outputs:
6749 // rcx - first mismatched element index
6750 assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
6752 bind(COMPARE_WIDE_VECTORS);
6753 movdqu(vec1, Address(str1, result, scale));
6754 pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
6755 // After pcmpestri cnt1(rcx) contains mismatched element index
6757 jccb(Assembler::below, VECTOR_NOT_EQUAL); // CF==1
6758 addptr(result, stride);
6759 subptr(cnt2, stride);
6760 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS);
6762 // compare wide vectors tail
6763 testptr(result, result);
6764 jccb(Assembler::zero, LENGTH_DIFF_LABEL);
6766 movl(cnt2, stride);
6767 movl(result, stride);
6768 negptr(result);
6769 movdqu(vec1, Address(str1, result, scale));
6770 pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
6771 jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL);
6773 // Mismatched characters in the vectors
6774 bind(VECTOR_NOT_EQUAL);
6775 addptr(cnt1, result);
6776 load_unsigned_short(result, Address(str1, cnt1, scale));
6777 load_unsigned_short(cnt2, Address(str2, cnt1, scale));
6778 subl(result, cnt2);
6779 jmpb(POP_LABEL);
6781 bind(COMPARE_TAIL); // limit is zero
6782 movl(cnt2, result);
6783 // Fallthru to tail compare
6784 }
6785 // Shift str2 and str1 to the end of the arrays, negate min
6786 lea(str1, Address(str1, cnt2, scale));
6787 lea(str2, Address(str2, cnt2, scale));
6788 decrementl(cnt2); // first character was compared already
6789 negptr(cnt2);
6791 // Compare the rest of the elements
6792 bind(WHILE_HEAD_LABEL);
6793 load_unsigned_short(result, Address(str1, cnt2, scale, 0));
6794 load_unsigned_short(cnt1, Address(str2, cnt2, scale, 0));
6795 subl(result, cnt1);
6796 jccb(Assembler::notZero, POP_LABEL);
6797 increment(cnt2);
6798 jccb(Assembler::notZero, WHILE_HEAD_LABEL);
6800 // Strings are equal up to min length. Return the length difference.
6801 bind(LENGTH_DIFF_LABEL);
6802 pop(result);
6803 jmpb(DONE_LABEL);
6805 // Discard the stored length difference
6806 bind(POP_LABEL);
6807 pop(cnt1);
6809 // That's it
6810 bind(DONE_LABEL);
6811 }
6813 // Compare char[] arrays aligned to 4 bytes or substrings.
6814 void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
6815 Register limit, Register result, Register chr,
6816 XMMRegister vec1, XMMRegister vec2) {
6817 ShortBranchVerifier sbv(this);
6818 Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_VECTORS, COMPARE_CHAR;
6820 int length_offset = arrayOopDesc::length_offset_in_bytes();
6821 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
6823 // Check the input args
6824 cmpptr(ary1, ary2);
6825 jcc(Assembler::equal, TRUE_LABEL);
6827 if (is_array_equ) {
6828 // Need additional checks for arrays_equals.
6829 testptr(ary1, ary1);
6830 jcc(Assembler::zero, FALSE_LABEL);
6831 testptr(ary2, ary2);
6832 jcc(Assembler::zero, FALSE_LABEL);
6834 // Check the lengths
6835 movl(limit, Address(ary1, length_offset));
6836 cmpl(limit, Address(ary2, length_offset));
6837 jcc(Assembler::notEqual, FALSE_LABEL);
6838 }
6840 // count == 0
6841 testl(limit, limit);
6842 jcc(Assembler::zero, TRUE_LABEL);
6844 if (is_array_equ) {
6845 // Load array address
6846 lea(ary1, Address(ary1, base_offset));
6847 lea(ary2, Address(ary2, base_offset));
6848 }
6850 shll(limit, 1); // byte count != 0
6851 movl(result, limit); // copy
6853 if (UseAVX >= 2) {
6854 // With AVX2, use 32-byte vector compare
6855 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
6857 // Compare 32-byte vectors
6858 andl(result, 0x0000001e); // tail count (in bytes)
6859 andl(limit, 0xffffffe0); // vector count (in bytes)
6860 jccb(Assembler::zero, COMPARE_TAIL);
6862 lea(ary1, Address(ary1, limit, Address::times_1));
6863 lea(ary2, Address(ary2, limit, Address::times_1));
6864 negptr(limit);
6866 bind(COMPARE_WIDE_VECTORS);
6867 vmovdqu(vec1, Address(ary1, limit, Address::times_1));
6868 vmovdqu(vec2, Address(ary2, limit, Address::times_1));
6869 vpxor(vec1, vec2);
6871 vptest(vec1, vec1);
6872 jccb(Assembler::notZero, FALSE_LABEL);
6873 addptr(limit, 32);
6874 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
6876 testl(result, result);
6877 jccb(Assembler::zero, TRUE_LABEL);
6879 vmovdqu(vec1, Address(ary1, result, Address::times_1, -32));
6880 vmovdqu(vec2, Address(ary2, result, Address::times_1, -32));
6881 vpxor(vec1, vec2);
6883 vptest(vec1, vec1);
6884 jccb(Assembler::notZero, FALSE_LABEL);
6885 jmpb(TRUE_LABEL);
6887 bind(COMPARE_TAIL); // limit is zero
6888 movl(limit, result);
6889 // Fallthru to tail compare
6890 } else if (UseSSE42Intrinsics) {
6891 // With SSE4.2, use double quad vector compare
6892 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
6894 // Compare 16-byte vectors
6895 andl(result, 0x0000000e); // tail count (in bytes)
6896 andl(limit, 0xfffffff0); // vector count (in bytes)
6897 jccb(Assembler::zero, COMPARE_TAIL);
6899 lea(ary1, Address(ary1, limit, Address::times_1));
6900 lea(ary2, Address(ary2, limit, Address::times_1));
6901 negptr(limit);
6903 bind(COMPARE_WIDE_VECTORS);
6904 movdqu(vec1, Address(ary1, limit, Address::times_1));
6905 movdqu(vec2, Address(ary2, limit, Address::times_1));
6906 pxor(vec1, vec2);
6908 ptest(vec1, vec1);
6909 jccb(Assembler::notZero, FALSE_LABEL);
6910 addptr(limit, 16);
6911 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
6913 testl(result, result);
6914 jccb(Assembler::zero, TRUE_LABEL);
6916 movdqu(vec1, Address(ary1, result, Address::times_1, -16));
6917 movdqu(vec2, Address(ary2, result, Address::times_1, -16));
6918 pxor(vec1, vec2);
6920 ptest(vec1, vec1);
6921 jccb(Assembler::notZero, FALSE_LABEL);
6922 jmpb(TRUE_LABEL);
6924 bind(COMPARE_TAIL); // limit is zero
6925 movl(limit, result);
6926 // Fallthru to tail compare
6927 }
6929 // Compare 4-byte vectors
6930 andl(limit, 0xfffffffc); // vector count (in bytes)
6931 jccb(Assembler::zero, COMPARE_CHAR);
6933 lea(ary1, Address(ary1, limit, Address::times_1));
6934 lea(ary2, Address(ary2, limit, Address::times_1));
6935 negptr(limit);
6937 bind(COMPARE_VECTORS);
6938 movl(chr, Address(ary1, limit, Address::times_1));
6939 cmpl(chr, Address(ary2, limit, Address::times_1));
6940 jccb(Assembler::notEqual, FALSE_LABEL);
6941 addptr(limit, 4);
6942 jcc(Assembler::notZero, COMPARE_VECTORS);
6944 // Compare trailing char (final 2 bytes), if any
6945 bind(COMPARE_CHAR);
6946 testl(result, 0x2); // tail char
6947 jccb(Assembler::zero, TRUE_LABEL);
6948 load_unsigned_short(chr, Address(ary1, 0));
6949 load_unsigned_short(limit, Address(ary2, 0));
6950 cmpl(chr, limit);
6951 jccb(Assembler::notEqual, FALSE_LABEL);
6953 bind(TRUE_LABEL);
6954 movl(result, 1); // return true
6955 jmpb(DONE);
6957 bind(FALSE_LABEL);
6958 xorl(result, result); // return false
6960 // That's it
6961 bind(DONE);
6962 if (UseAVX >= 2) {
6963 // clean upper bits of YMM registers
6964 vzeroupper();
6965 }
6966 }
6968 void MacroAssembler::generate_fill(BasicType t, bool aligned,
6969 Register to, Register value, Register count,
6970 Register rtmp, XMMRegister xtmp) {
6971 ShortBranchVerifier sbv(this);
6972 assert_different_registers(to, value, count, rtmp);
6973 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
6974 Label L_fill_2_bytes, L_fill_4_bytes;
6976 int shift = -1;
6977 switch (t) {
6978 case T_BYTE:
6979 shift = 2;
6980 break;
6981 case T_SHORT:
6982 shift = 1;
6983 break;
6984 case T_INT:
6985 shift = 0;
6986 break;
6987 default: ShouldNotReachHere();
6988 }
6990 if (t == T_BYTE) {
6991 andl(value, 0xff);
6992 movl(rtmp, value);
6993 shll(rtmp, 8);
6994 orl(value, rtmp);
6995 }
6996 if (t == T_SHORT) {
6997 andl(value, 0xffff);
6998 }
6999 if (t == T_BYTE || t == T_SHORT) {
7000 movl(rtmp, value);
7001 shll(rtmp, 16);
7002 orl(value, rtmp);
7003 }
7005 cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
7006 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
7007 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
7008 // align source address at 4 bytes address boundary
7009 if (t == T_BYTE) {
7010 // One byte misalignment happens only for byte arrays
7011 testptr(to, 1);
7012 jccb(Assembler::zero, L_skip_align1);
7013 movb(Address(to, 0), value);
7014 increment(to);
7015 decrement(count);
7016 BIND(L_skip_align1);
7017 }
7018 // Two bytes misalignment happens only for byte and short (char) arrays
7019 testptr(to, 2);
7020 jccb(Assembler::zero, L_skip_align2);
7021 movw(Address(to, 0), value);
7022 addptr(to, 2);
7023 subl(count, 1<<(shift-1));
7024 BIND(L_skip_align2);
7025 }
7026 if (UseSSE < 2) {
7027 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
7028 // Fill 32-byte chunks
7029 subl(count, 8 << shift);
7030 jcc(Assembler::less, L_check_fill_8_bytes);
7031 align(16);
7033 BIND(L_fill_32_bytes_loop);
7035 for (int i = 0; i < 32; i += 4) {
7036 movl(Address(to, i), value);
7037 }
7039 addptr(to, 32);
7040 subl(count, 8 << shift);
7041 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
7042 BIND(L_check_fill_8_bytes);
7043 addl(count, 8 << shift);
7044 jccb(Assembler::zero, L_exit);
7045 jmpb(L_fill_8_bytes);
7047 //
7048 // length is too short, just fill qwords
7049 //
7050 BIND(L_fill_8_bytes_loop);
7051 movl(Address(to, 0), value);
7052 movl(Address(to, 4), value);
7053 addptr(to, 8);
7054 BIND(L_fill_8_bytes);
7055 subl(count, 1 << (shift + 1));
7056 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
7057 // fall through to fill 4 bytes
7058 } else {
7059 Label L_fill_32_bytes;
7060 if (!UseUnalignedLoadStores) {
7061 // align to 8 bytes, we know we are 4 byte aligned to start
7062 testptr(to, 4);
7063 jccb(Assembler::zero, L_fill_32_bytes);
7064 movl(Address(to, 0), value);
7065 addptr(to, 4);
7066 subl(count, 1<<shift);
7067 }
7068 BIND(L_fill_32_bytes);
7069 {
7070 assert( UseSSE >= 2, "supported cpu only" );
7071 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
7072 movdl(xtmp, value);
7073 if (UseAVX >= 2 && UseUnalignedLoadStores) {
7074 // Fill 64-byte chunks
7075 Label L_fill_64_bytes_loop, L_check_fill_32_bytes;
7076 vpbroadcastd(xtmp, xtmp);
7078 subl(count, 16 << shift);
7079 jcc(Assembler::less, L_check_fill_32_bytes);
7080 align(16);
7082 BIND(L_fill_64_bytes_loop);
7083 vmovdqu(Address(to, 0), xtmp);
7084 vmovdqu(Address(to, 32), xtmp);
7085 addptr(to, 64);
7086 subl(count, 16 << shift);
7087 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
7089 BIND(L_check_fill_32_bytes);
7090 addl(count, 8 << shift);
7091 jccb(Assembler::less, L_check_fill_8_bytes);
7092 vmovdqu(Address(to, 0), xtmp);
7093 addptr(to, 32);
7094 subl(count, 8 << shift);
7096 BIND(L_check_fill_8_bytes);
7097 // clean upper bits of YMM registers
7098 vzeroupper();
7099 } else {
7100 // Fill 32-byte chunks
7101 pshufd(xtmp, xtmp, 0);
7103 subl(count, 8 << shift);
7104 jcc(Assembler::less, L_check_fill_8_bytes);
7105 align(16);
7107 BIND(L_fill_32_bytes_loop);
7109 if (UseUnalignedLoadStores) {
7110 movdqu(Address(to, 0), xtmp);
7111 movdqu(Address(to, 16), xtmp);
7112 } else {
7113 movq(Address(to, 0), xtmp);
7114 movq(Address(to, 8), xtmp);
7115 movq(Address(to, 16), xtmp);
7116 movq(Address(to, 24), xtmp);
7117 }
7119 addptr(to, 32);
7120 subl(count, 8 << shift);
7121 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
7123 BIND(L_check_fill_8_bytes);
7124 }
7125 addl(count, 8 << shift);
7126 jccb(Assembler::zero, L_exit);
7127 jmpb(L_fill_8_bytes);
7129 //
7130 // length is too short, just fill qwords
7131 //
7132 BIND(L_fill_8_bytes_loop);
7133 movq(Address(to, 0), xtmp);
7134 addptr(to, 8);
7135 BIND(L_fill_8_bytes);
7136 subl(count, 1 << (shift + 1));
7137 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
7138 }
7139 }
7140 // fill trailing 4 bytes
7141 BIND(L_fill_4_bytes);
7142 testl(count, 1<<shift);
7143 jccb(Assembler::zero, L_fill_2_bytes);
7144 movl(Address(to, 0), value);
7145 if (t == T_BYTE || t == T_SHORT) {
7146 addptr(to, 4);
7147 BIND(L_fill_2_bytes);
7148 // fill trailing 2 bytes
7149 testl(count, 1<<(shift-1));
7150 jccb(Assembler::zero, L_fill_byte);
7151 movw(Address(to, 0), value);
7152 if (t == T_BYTE) {
7153 addptr(to, 2);
7154 BIND(L_fill_byte);
7155 // fill trailing byte
7156 testl(count, 1);
7157 jccb(Assembler::zero, L_exit);
7158 movb(Address(to, 0), value);
7159 } else {
7160 BIND(L_fill_byte);
7161 }
7162 } else {
7163 BIND(L_fill_2_bytes);
7164 }
7165 BIND(L_exit);
7166 }
7168 // encode char[] to byte[] in ISO_8859_1
7169 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
7170 XMMRegister tmp1Reg, XMMRegister tmp2Reg,
7171 XMMRegister tmp3Reg, XMMRegister tmp4Reg,
7172 Register tmp5, Register result) {
7173 // rsi: src
7174 // rdi: dst
7175 // rdx: len
7176 // rcx: tmp5
7177 // rax: result
7178 ShortBranchVerifier sbv(this);
7179 assert_different_registers(src, dst, len, tmp5, result);
7180 Label L_done, L_copy_1_char, L_copy_1_char_exit;
7182 // set result
7183 xorl(result, result);
7184 // check for zero length
7185 testl(len, len);
7186 jcc(Assembler::zero, L_done);
7187 movl(result, len);
7189 // Setup pointers
7190 lea(src, Address(src, len, Address::times_2)); // char[]
7191 lea(dst, Address(dst, len, Address::times_1)); // byte[]
7192 negptr(len);
7194 if (UseSSE42Intrinsics || UseAVX >= 2) {
7195 Label L_chars_8_check, L_copy_8_chars, L_copy_8_chars_exit;
7196 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit;
7198 if (UseAVX >= 2) {
7199 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit;
7200 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vector
7201 movdl(tmp1Reg, tmp5);
7202 vpbroadcastd(tmp1Reg, tmp1Reg);
7203 jmpb(L_chars_32_check);
7205 bind(L_copy_32_chars);
7206 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64));
7207 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32));
7208 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector256 */ true);
7209 vptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector
7210 jccb(Assembler::notZero, L_copy_32_chars_exit);
7211 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector256 */ true);
7212 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector256 */ true);
7213 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg);
7215 bind(L_chars_32_check);
7216 addptr(len, 32);
7217 jccb(Assembler::lessEqual, L_copy_32_chars);
7219 bind(L_copy_32_chars_exit);
7220 subptr(len, 16);
7221 jccb(Assembler::greater, L_copy_16_chars_exit);
7223 } else if (UseSSE42Intrinsics) {
7224 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vector
7225 movdl(tmp1Reg, tmp5);
7226 pshufd(tmp1Reg, tmp1Reg, 0);
7227 jmpb(L_chars_16_check);
7228 }
7230 bind(L_copy_16_chars);
7231 if (UseAVX >= 2) {
7232 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32));
7233 vptest(tmp2Reg, tmp1Reg);
7234 jccb(Assembler::notZero, L_copy_16_chars_exit);
7235 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector256 */ true);
7236 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector256 */ true);
7237 } else {
7238 if (UseAVX > 0) {
7239 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
7240 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
7241 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector256 */ false);
7242 } else {
7243 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
7244 por(tmp2Reg, tmp3Reg);
7245 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
7246 por(tmp2Reg, tmp4Reg);
7247 }
7248 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector
7249 jccb(Assembler::notZero, L_copy_16_chars_exit);
7250 packuswb(tmp3Reg, tmp4Reg);
7251 }
7252 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg);
7254 bind(L_chars_16_check);
7255 addptr(len, 16);
7256 jccb(Assembler::lessEqual, L_copy_16_chars);
7258 bind(L_copy_16_chars_exit);
7259 if (UseAVX >= 2) {
7260 // clean upper bits of YMM registers
7261 vzeroupper();
7262 }
7263 subptr(len, 8);
7264 jccb(Assembler::greater, L_copy_8_chars_exit);
7266 bind(L_copy_8_chars);
7267 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16));
7268 ptest(tmp3Reg, tmp1Reg);
7269 jccb(Assembler::notZero, L_copy_8_chars_exit);
7270 packuswb(tmp3Reg, tmp1Reg);
7271 movq(Address(dst, len, Address::times_1, -8), tmp3Reg);
7272 addptr(len, 8);
7273 jccb(Assembler::lessEqual, L_copy_8_chars);
7275 bind(L_copy_8_chars_exit);
7276 subptr(len, 8);
7277 jccb(Assembler::zero, L_done);
7278 }
7280 bind(L_copy_1_char);
7281 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0));
7282 testl(tmp5, 0xff00); // check if Unicode char
7283 jccb(Assembler::notZero, L_copy_1_char_exit);
7284 movb(Address(dst, len, Address::times_1, 0), tmp5);
7285 addptr(len, 1);
7286 jccb(Assembler::less, L_copy_1_char);
7288 bind(L_copy_1_char_exit);
7289 addptr(result, len); // len is negative count of not processed elements
7290 bind(L_done);
7291 }
7293 /**
7294 * Emits code to update CRC-32 with a byte value according to constants in table
7295 *
7296 * @param [in,out]crc Register containing the crc.
7297 * @param [in]val Register containing the byte to fold into the CRC.
7298 * @param [in]table Register containing the table of crc constants.
7299 *
7300 * uint32_t crc;
7301 * val = crc_table[(val ^ crc) & 0xFF];
7302 * crc = val ^ (crc >> 8);
7303 *
7304 */
7305 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
7306 xorl(val, crc);
7307 andl(val, 0xFF);
7308 shrl(crc, 8); // unsigned shift
7309 xorl(crc, Address(table, val, Address::times_4, 0));
7310 }
7312 /**
7313 * Fold 128-bit data chunk
7314 */
7315 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
7316 vpclmulhdq(xtmp, xK, xcrc); // [123:64]
7317 vpclmulldq(xcrc, xK, xcrc); // [63:0]
7318 vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */);
7319 pxor(xcrc, xtmp);
7320 }
7322 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
7323 vpclmulhdq(xtmp, xK, xcrc);
7324 vpclmulldq(xcrc, xK, xcrc);
7325 pxor(xcrc, xbuf);
7326 pxor(xcrc, xtmp);
7327 }
7329 /**
7330 * 8-bit folds to compute 32-bit CRC
7331 *
7332 * uint64_t xcrc;
7333 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8);
7334 */
7335 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) {
7336 movdl(tmp, xcrc);
7337 andl(tmp, 0xFF);
7338 movdl(xtmp, Address(table, tmp, Address::times_4, 0));
7339 psrldq(xcrc, 1); // unsigned shift one byte
7340 pxor(xcrc, xtmp);
7341 }
7343 /**
7344 * uint32_t crc;
7345 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
7346 */
7347 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
7348 movl(tmp, crc);
7349 andl(tmp, 0xFF);
7350 shrl(crc, 8);
7351 xorl(crc, Address(table, tmp, Address::times_4, 0));
7352 }
7354 /**
7355 * @param crc register containing existing CRC (32-bit)
7356 * @param buf register pointing to input byte buffer (byte*)
7357 * @param len register containing number of bytes
7358 * @param table register that will contain address of CRC table
7359 * @param tmp scratch register
7360 */
7361 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) {
7362 assert_different_registers(crc, buf, len, table, tmp, rax);
7364 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
7365 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
7367 lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
7368 notl(crc); // ~crc
7369 cmpl(len, 16);
7370 jcc(Assembler::less, L_tail);
7372 // Align buffer to 16 bytes
7373 movl(tmp, buf);
7374 andl(tmp, 0xF);
7375 jccb(Assembler::zero, L_aligned);
7376 subl(tmp, 16);
7377 addl(len, tmp);
7379 align(4);
7380 BIND(L_align_loop);
7381 movsbl(rax, Address(buf, 0)); // load byte with sign extension
7382 update_byte_crc32(crc, rax, table);
7383 increment(buf);
7384 incrementl(tmp);
7385 jccb(Assembler::less, L_align_loop);
7387 BIND(L_aligned);
7388 movl(tmp, len); // save
7389 shrl(len, 4);
7390 jcc(Assembler::zero, L_tail_restore);
7392 // Fold crc into first bytes of vector
7393 movdqa(xmm1, Address(buf, 0));
7394 movdl(rax, xmm1);
7395 xorl(crc, rax);
7396 pinsrd(xmm1, crc, 0);
7397 addptr(buf, 16);
7398 subl(len, 4); // len > 0
7399 jcc(Assembler::less, L_fold_tail);
7401 movdqa(xmm2, Address(buf, 0));
7402 movdqa(xmm3, Address(buf, 16));
7403 movdqa(xmm4, Address(buf, 32));
7404 addptr(buf, 48);
7405 subl(len, 3);
7406 jcc(Assembler::lessEqual, L_fold_512b);
7408 // Fold total 512 bits of polynomial on each iteration,
7409 // 128 bits per each of 4 parallel streams.
7410 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32));
7412 align(32);
7413 BIND(L_fold_512b_loop);
7414 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
7415 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
7416 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32);
7417 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48);
7418 addptr(buf, 64);
7419 subl(len, 4);
7420 jcc(Assembler::greater, L_fold_512b_loop);
7422 // Fold 512 bits to 128 bits.
7423 BIND(L_fold_512b);
7424 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16));
7425 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2);
7426 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3);
7427 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4);
7429 // Fold the rest of 128 bits data chunks
7430 BIND(L_fold_tail);
7431 addl(len, 3);
7432 jccb(Assembler::lessEqual, L_fold_128b);
7433 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16));
7435 BIND(L_fold_tail_loop);
7436 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
7437 addptr(buf, 16);
7438 decrementl(len);
7439 jccb(Assembler::greater, L_fold_tail_loop);
7441 // Fold 128 bits in xmm1 down into 32 bits in crc register.
7442 BIND(L_fold_128b);
7443 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()));
7444 vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
7445 vpand(xmm3, xmm0, xmm2, false /* vector256 */);
7446 vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
7447 psrldq(xmm1, 8);
7448 psrldq(xmm2, 4);
7449 pxor(xmm0, xmm1);
7450 pxor(xmm0, xmm2);
7452 // 8 8-bit folds to compute 32-bit CRC.
7453 for (int j = 0; j < 4; j++) {
7454 fold_8bit_crc32(xmm0, table, xmm1, rax);
7455 }
7456 movdl(crc, xmm0); // mov 32 bits to general register
7457 for (int j = 0; j < 4; j++) {
7458 fold_8bit_crc32(crc, table, rax);
7459 }
7461 BIND(L_tail_restore);
7462 movl(len, tmp); // restore
7463 BIND(L_tail);
7464 andl(len, 0xf);
7465 jccb(Assembler::zero, L_exit);
7467 // Fold the rest of bytes
7468 align(4);
7469 BIND(L_tail_loop);
7470 movsbl(rax, Address(buf, 0)); // load byte with sign extension
7471 update_byte_crc32(crc, rax, table);
7472 increment(buf);
7473 decrementl(len);
7474 jccb(Assembler::greater, L_tail_loop);
7476 BIND(L_exit);
7477 notl(crc); // ~c
7478 }
7480 #undef BIND
7481 #undef BLOCK_COMMENT
7484 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
7485 switch (cond) {
7486 // Note some conditions are synonyms for others
7487 case Assembler::zero: return Assembler::notZero;
7488 case Assembler::notZero: return Assembler::zero;
7489 case Assembler::less: return Assembler::greaterEqual;
7490 case Assembler::lessEqual: return Assembler::greater;
7491 case Assembler::greater: return Assembler::lessEqual;
7492 case Assembler::greaterEqual: return Assembler::less;
7493 case Assembler::below: return Assembler::aboveEqual;
7494 case Assembler::belowEqual: return Assembler::above;
7495 case Assembler::above: return Assembler::belowEqual;
7496 case Assembler::aboveEqual: return Assembler::below;
7497 case Assembler::overflow: return Assembler::noOverflow;
7498 case Assembler::noOverflow: return Assembler::overflow;
7499 case Assembler::negative: return Assembler::positive;
7500 case Assembler::positive: return Assembler::negative;
7501 case Assembler::parity: return Assembler::noParity;
7502 case Assembler::noParity: return Assembler::parity;
7503 }
7504 ShouldNotReachHere(); return Assembler::overflow;
7505 }
7507 SkipIfEqual::SkipIfEqual(
7508 MacroAssembler* masm, const bool* flag_addr, bool value) {
7509 _masm = masm;
7510 _masm->cmp8(ExternalAddress((address)flag_addr), value);
7511 _masm->jcc(Assembler::equal, _label);
7512 }
7514 SkipIfEqual::~SkipIfEqual() {
7515 _masm->bind(_label);
7516 }