Thu, 14 Jun 2018 09:15:08 -0700
8081202: Hotspot compile warning: "Invalid suffix on literal; C++11 requires a space between literal and identifier"
Summary: Need to add a space between macro identifier and string literal
Reviewed-by: bpittore, stefank, dholmes, kbarrett
1 /*
2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "asm/assembler.hpp"
27 #include "asm/assembler.inline.hpp"
28 #include "compiler/disassembler.hpp"
29 #include "gc_interface/collectedHeap.inline.hpp"
30 #include "interpreter/interpreter.hpp"
31 #include "memory/cardTableModRefBS.hpp"
32 #include "memory/resourceArea.hpp"
33 #include "memory/universe.hpp"
34 #include "prims/methodHandles.hpp"
35 #include "runtime/biasedLocking.hpp"
36 #include "runtime/interfaceSupport.hpp"
37 #include "runtime/objectMonitor.hpp"
38 #include "runtime/os.hpp"
39 #include "runtime/sharedRuntime.hpp"
40 #include "runtime/stubRoutines.hpp"
41 #include "utilities/macros.hpp"
42 #if INCLUDE_ALL_GCS
43 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
44 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
45 #include "gc_implementation/g1/heapRegion.hpp"
46 #endif // INCLUDE_ALL_GCS
48 #ifdef PRODUCT
49 #define BLOCK_COMMENT(str) /* nothing */
50 #define STOP(error) stop(error)
51 #else
52 #define BLOCK_COMMENT(str) block_comment(str)
53 #define STOP(error) block_comment(error); stop(error)
54 #endif
56 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
58 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
60 #ifdef ASSERT
61 bool AbstractAssembler::pd_check_instruction_mark() { return true; }
62 #endif
64 static Assembler::Condition reverse[] = {
65 Assembler::noOverflow /* overflow = 0x0 */ ,
66 Assembler::overflow /* noOverflow = 0x1 */ ,
67 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
68 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
69 Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
70 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
71 Assembler::above /* belowEqual = 0x6 */ ,
72 Assembler::belowEqual /* above = 0x7 */ ,
73 Assembler::positive /* negative = 0x8 */ ,
74 Assembler::negative /* positive = 0x9 */ ,
75 Assembler::noParity /* parity = 0xa */ ,
76 Assembler::parity /* noParity = 0xb */ ,
77 Assembler::greaterEqual /* less = 0xc */ ,
78 Assembler::less /* greaterEqual = 0xd */ ,
79 Assembler::greater /* lessEqual = 0xe */ ,
80 Assembler::lessEqual /* greater = 0xf, */
82 };
85 // Implementation of MacroAssembler
87 // First all the versions that have distinct versions depending on 32/64 bit
88 // Unless the difference is trivial (1 line or so).
90 #ifndef _LP64
92 // 32bit versions
94 Address MacroAssembler::as_Address(AddressLiteral adr) {
95 return Address(adr.target(), adr.rspec());
96 }
98 Address MacroAssembler::as_Address(ArrayAddress adr) {
99 return Address::make_array(adr);
100 }
102 void MacroAssembler::call_VM_leaf_base(address entry_point,
103 int number_of_arguments) {
104 call(RuntimeAddress(entry_point));
105 increment(rsp, number_of_arguments * wordSize);
106 }
108 void MacroAssembler::cmpklass(Address src1, Metadata* obj) {
109 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
110 }
112 void MacroAssembler::cmpklass(Register src1, Metadata* obj) {
113 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
114 }
116 void MacroAssembler::cmpoop(Address src1, jobject obj) {
117 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
118 }
120 void MacroAssembler::cmpoop(Register src1, jobject obj) {
121 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
122 }
124 void MacroAssembler::extend_sign(Register hi, Register lo) {
125 // According to Intel Doc. AP-526, "Integer Divide", p.18.
126 if (VM_Version::is_P6() && hi == rdx && lo == rax) {
127 cdql();
128 } else {
129 movl(hi, lo);
130 sarl(hi, 31);
131 }
132 }
134 void MacroAssembler::jC2(Register tmp, Label& L) {
135 // set parity bit if FPU flag C2 is set (via rax)
136 save_rax(tmp);
137 fwait(); fnstsw_ax();
138 sahf();
139 restore_rax(tmp);
140 // branch
141 jcc(Assembler::parity, L);
142 }
144 void MacroAssembler::jnC2(Register tmp, Label& L) {
145 // set parity bit if FPU flag C2 is set (via rax)
146 save_rax(tmp);
147 fwait(); fnstsw_ax();
148 sahf();
149 restore_rax(tmp);
150 // branch
151 jcc(Assembler::noParity, L);
152 }
154 // 32bit can do a case table jump in one instruction but we no longer allow the base
155 // to be installed in the Address class
156 void MacroAssembler::jump(ArrayAddress entry) {
157 jmp(as_Address(entry));
158 }
160 // Note: y_lo will be destroyed
161 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
162 // Long compare for Java (semantics as described in JVM spec.)
163 Label high, low, done;
165 cmpl(x_hi, y_hi);
166 jcc(Assembler::less, low);
167 jcc(Assembler::greater, high);
168 // x_hi is the return register
169 xorl(x_hi, x_hi);
170 cmpl(x_lo, y_lo);
171 jcc(Assembler::below, low);
172 jcc(Assembler::equal, done);
174 bind(high);
175 xorl(x_hi, x_hi);
176 increment(x_hi);
177 jmp(done);
179 bind(low);
180 xorl(x_hi, x_hi);
181 decrementl(x_hi);
183 bind(done);
184 }
186 void MacroAssembler::lea(Register dst, AddressLiteral src) {
187 mov_literal32(dst, (int32_t)src.target(), src.rspec());
188 }
190 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
191 // leal(dst, as_Address(adr));
192 // see note in movl as to why we must use a move
193 mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
194 }
196 void MacroAssembler::leave() {
197 mov(rsp, rbp);
198 pop(rbp);
199 }
201 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
202 // Multiplication of two Java long values stored on the stack
203 // as illustrated below. Result is in rdx:rax.
204 //
205 // rsp ---> [ ?? ] \ \
206 // .... | y_rsp_offset |
207 // [ y_lo ] / (in bytes) | x_rsp_offset
208 // [ y_hi ] | (in bytes)
209 // .... |
210 // [ x_lo ] /
211 // [ x_hi ]
212 // ....
213 //
214 // Basic idea: lo(result) = lo(x_lo * y_lo)
215 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
216 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
217 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
218 Label quick;
219 // load x_hi, y_hi and check if quick
220 // multiplication is possible
221 movl(rbx, x_hi);
222 movl(rcx, y_hi);
223 movl(rax, rbx);
224 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0
225 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply
226 // do full multiplication
227 // 1st step
228 mull(y_lo); // x_hi * y_lo
229 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx,
230 // 2nd step
231 movl(rax, x_lo);
232 mull(rcx); // x_lo * y_hi
233 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx,
234 // 3rd step
235 bind(quick); // note: rbx, = 0 if quick multiply!
236 movl(rax, x_lo);
237 mull(y_lo); // x_lo * y_lo
238 addl(rdx, rbx); // correct hi(x_lo * y_lo)
239 }
241 void MacroAssembler::lneg(Register hi, Register lo) {
242 negl(lo);
243 adcl(hi, 0);
244 negl(hi);
245 }
247 void MacroAssembler::lshl(Register hi, Register lo) {
248 // Java shift left long support (semantics as described in JVM spec., p.305)
249 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
250 // shift value is in rcx !
251 assert(hi != rcx, "must not use rcx");
252 assert(lo != rcx, "must not use rcx");
253 const Register s = rcx; // shift count
254 const int n = BitsPerWord;
255 Label L;
256 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
257 cmpl(s, n); // if (s < n)
258 jcc(Assembler::less, L); // else (s >= n)
259 movl(hi, lo); // x := x << n
260 xorl(lo, lo);
261 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
262 bind(L); // s (mod n) < n
263 shldl(hi, lo); // x := x << s
264 shll(lo);
265 }
268 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
269 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
270 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
271 assert(hi != rcx, "must not use rcx");
272 assert(lo != rcx, "must not use rcx");
273 const Register s = rcx; // shift count
274 const int n = BitsPerWord;
275 Label L;
276 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
277 cmpl(s, n); // if (s < n)
278 jcc(Assembler::less, L); // else (s >= n)
279 movl(lo, hi); // x := x >> n
280 if (sign_extension) sarl(hi, 31);
281 else xorl(hi, hi);
282 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
283 bind(L); // s (mod n) < n
284 shrdl(lo, hi); // x := x >> s
285 if (sign_extension) sarl(hi);
286 else shrl(hi);
287 }
289 void MacroAssembler::movoop(Register dst, jobject obj) {
290 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
291 }
293 void MacroAssembler::movoop(Address dst, jobject obj) {
294 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
295 }
297 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
298 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
299 }
301 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
302 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
303 }
305 void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
306 // scratch register is not used,
307 // it is defined to match parameters of 64-bit version of this method.
308 if (src.is_lval()) {
309 mov_literal32(dst, (intptr_t)src.target(), src.rspec());
310 } else {
311 movl(dst, as_Address(src));
312 }
313 }
315 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
316 movl(as_Address(dst), src);
317 }
319 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
320 movl(dst, as_Address(src));
321 }
323 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
324 void MacroAssembler::movptr(Address dst, intptr_t src) {
325 movl(dst, src);
326 }
329 void MacroAssembler::pop_callee_saved_registers() {
330 pop(rcx);
331 pop(rdx);
332 pop(rdi);
333 pop(rsi);
334 }
336 void MacroAssembler::pop_fTOS() {
337 fld_d(Address(rsp, 0));
338 addl(rsp, 2 * wordSize);
339 }
341 void MacroAssembler::push_callee_saved_registers() {
342 push(rsi);
343 push(rdi);
344 push(rdx);
345 push(rcx);
346 }
348 void MacroAssembler::push_fTOS() {
349 subl(rsp, 2 * wordSize);
350 fstp_d(Address(rsp, 0));
351 }
354 void MacroAssembler::pushoop(jobject obj) {
355 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
356 }
358 void MacroAssembler::pushklass(Metadata* obj) {
359 push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate());
360 }
362 void MacroAssembler::pushptr(AddressLiteral src) {
363 if (src.is_lval()) {
364 push_literal32((int32_t)src.target(), src.rspec());
365 } else {
366 pushl(as_Address(src));
367 }
368 }
370 void MacroAssembler::set_word_if_not_zero(Register dst) {
371 xorl(dst, dst);
372 set_byte_if_not_zero(dst);
373 }
375 static void pass_arg0(MacroAssembler* masm, Register arg) {
376 masm->push(arg);
377 }
379 static void pass_arg1(MacroAssembler* masm, Register arg) {
380 masm->push(arg);
381 }
383 static void pass_arg2(MacroAssembler* masm, Register arg) {
384 masm->push(arg);
385 }
387 static void pass_arg3(MacroAssembler* masm, Register arg) {
388 masm->push(arg);
389 }
391 #ifndef PRODUCT
392 extern "C" void findpc(intptr_t x);
393 #endif
395 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
396 // In order to get locks to work, we need to fake a in_VM state
397 JavaThread* thread = JavaThread::current();
398 JavaThreadState saved_state = thread->thread_state();
399 thread->set_thread_state(_thread_in_vm);
400 if (ShowMessageBoxOnError) {
401 JavaThread* thread = JavaThread::current();
402 JavaThreadState saved_state = thread->thread_state();
403 thread->set_thread_state(_thread_in_vm);
404 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
405 ttyLocker ttyl;
406 BytecodeCounter::print();
407 }
408 // To see where a verify_oop failed, get $ebx+40/X for this frame.
409 // This is the value of eip which points to where verify_oop will return.
410 if (os::message_box(msg, "Execution stopped, print registers?")) {
411 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip);
412 BREAKPOINT;
413 }
414 } else {
415 ttyLocker ttyl;
416 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
417 }
418 // Don't assert holding the ttyLock
419 assert(false, err_msg("DEBUG MESSAGE: %s", msg));
420 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
421 }
423 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) {
424 ttyLocker ttyl;
425 FlagSetting fs(Debugging, true);
426 tty->print_cr("eip = 0x%08x", eip);
427 #ifndef PRODUCT
428 if ((WizardMode || Verbose) && PrintMiscellaneous) {
429 tty->cr();
430 findpc(eip);
431 tty->cr();
432 }
433 #endif
434 #define PRINT_REG(rax) \
435 { tty->print("%s = ", #rax); os::print_location(tty, rax); }
436 PRINT_REG(rax);
437 PRINT_REG(rbx);
438 PRINT_REG(rcx);
439 PRINT_REG(rdx);
440 PRINT_REG(rdi);
441 PRINT_REG(rsi);
442 PRINT_REG(rbp);
443 PRINT_REG(rsp);
444 #undef PRINT_REG
445 // Print some words near top of staack.
446 int* dump_sp = (int*) rsp;
447 for (int col1 = 0; col1 < 8; col1++) {
448 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
449 os::print_location(tty, *dump_sp++);
450 }
451 for (int row = 0; row < 16; row++) {
452 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
453 for (int col = 0; col < 8; col++) {
454 tty->print(" 0x%08x", *dump_sp++);
455 }
456 tty->cr();
457 }
458 // Print some instructions around pc:
459 Disassembler::decode((address)eip-64, (address)eip);
460 tty->print_cr("--------");
461 Disassembler::decode((address)eip, (address)eip+32);
462 }
464 void MacroAssembler::stop(const char* msg) {
465 ExternalAddress message((address)msg);
466 // push address of message
467 pushptr(message.addr());
468 { Label L; call(L, relocInfo::none); bind(L); } // push eip
469 pusha(); // push registers
470 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
471 hlt();
472 }
474 void MacroAssembler::warn(const char* msg) {
475 push_CPU_state();
477 ExternalAddress message((address) msg);
478 // push address of message
479 pushptr(message.addr());
481 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
482 addl(rsp, wordSize); // discard argument
483 pop_CPU_state();
484 }
486 void MacroAssembler::print_state() {
487 { Label L; call(L, relocInfo::none); bind(L); } // push eip
488 pusha(); // push registers
490 push_CPU_state();
491 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32)));
492 pop_CPU_state();
494 popa();
495 addl(rsp, wordSize);
496 }
498 #else // _LP64
500 // 64 bit versions
502 Address MacroAssembler::as_Address(AddressLiteral adr) {
503 // amd64 always does this as a pc-rel
504 // we can be absolute or disp based on the instruction type
505 // jmp/call are displacements others are absolute
506 assert(!adr.is_lval(), "must be rval");
507 assert(reachable(adr), "must be");
508 return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
510 }
512 Address MacroAssembler::as_Address(ArrayAddress adr) {
513 AddressLiteral base = adr.base();
514 lea(rscratch1, base);
515 Address index = adr.index();
516 assert(index._disp == 0, "must not have disp"); // maybe it can?
517 Address array(rscratch1, index._index, index._scale, index._disp);
518 return array;
519 }
521 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
522 Label L, E;
524 #ifdef _WIN64
525 // Windows always allocates space for it's register args
526 assert(num_args <= 4, "only register arguments supported");
527 subq(rsp, frame::arg_reg_save_area_bytes);
528 #endif
530 // Align stack if necessary
531 testl(rsp, 15);
532 jcc(Assembler::zero, L);
534 subq(rsp, 8);
535 {
536 call(RuntimeAddress(entry_point));
537 }
538 addq(rsp, 8);
539 jmp(E);
541 bind(L);
542 {
543 call(RuntimeAddress(entry_point));
544 }
546 bind(E);
548 #ifdef _WIN64
549 // restore stack pointer
550 addq(rsp, frame::arg_reg_save_area_bytes);
551 #endif
553 }
555 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
556 assert(!src2.is_lval(), "should use cmpptr");
558 if (reachable(src2)) {
559 cmpq(src1, as_Address(src2));
560 } else {
561 lea(rscratch1, src2);
562 Assembler::cmpq(src1, Address(rscratch1, 0));
563 }
564 }
566 int MacroAssembler::corrected_idivq(Register reg) {
567 // Full implementation of Java ldiv and lrem; checks for special
568 // case as described in JVM spec., p.243 & p.271. The function
569 // returns the (pc) offset of the idivl instruction - may be needed
570 // for implicit exceptions.
571 //
572 // normal case special case
573 //
574 // input : rax: dividend min_long
575 // reg: divisor (may not be eax/edx) -1
576 //
577 // output: rax: quotient (= rax idiv reg) min_long
578 // rdx: remainder (= rax irem reg) 0
579 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
580 static const int64_t min_long = 0x8000000000000000;
581 Label normal_case, special_case;
583 // check for special case
584 cmp64(rax, ExternalAddress((address) &min_long));
585 jcc(Assembler::notEqual, normal_case);
586 xorl(rdx, rdx); // prepare rdx for possible special case (where
587 // remainder = 0)
588 cmpq(reg, -1);
589 jcc(Assembler::equal, special_case);
591 // handle normal case
592 bind(normal_case);
593 cdqq();
594 int idivq_offset = offset();
595 idivq(reg);
597 // normal and special case exit
598 bind(special_case);
600 return idivq_offset;
601 }
603 void MacroAssembler::decrementq(Register reg, int value) {
604 if (value == min_jint) { subq(reg, value); return; }
605 if (value < 0) { incrementq(reg, -value); return; }
606 if (value == 0) { ; return; }
607 if (value == 1 && UseIncDec) { decq(reg) ; return; }
608 /* else */ { subq(reg, value) ; return; }
609 }
611 void MacroAssembler::decrementq(Address dst, int value) {
612 if (value == min_jint) { subq(dst, value); return; }
613 if (value < 0) { incrementq(dst, -value); return; }
614 if (value == 0) { ; return; }
615 if (value == 1 && UseIncDec) { decq(dst) ; return; }
616 /* else */ { subq(dst, value) ; return; }
617 }
619 void MacroAssembler::incrementq(AddressLiteral dst) {
620 if (reachable(dst)) {
621 incrementq(as_Address(dst));
622 } else {
623 lea(rscratch1, dst);
624 incrementq(Address(rscratch1, 0));
625 }
626 }
628 void MacroAssembler::incrementq(Register reg, int value) {
629 if (value == min_jint) { addq(reg, value); return; }
630 if (value < 0) { decrementq(reg, -value); return; }
631 if (value == 0) { ; return; }
632 if (value == 1 && UseIncDec) { incq(reg) ; return; }
633 /* else */ { addq(reg, value) ; return; }
634 }
636 void MacroAssembler::incrementq(Address dst, int value) {
637 if (value == min_jint) { addq(dst, value); return; }
638 if (value < 0) { decrementq(dst, -value); return; }
639 if (value == 0) { ; return; }
640 if (value == 1 && UseIncDec) { incq(dst) ; return; }
641 /* else */ { addq(dst, value) ; return; }
642 }
644 // 32bit can do a case table jump in one instruction but we no longer allow the base
645 // to be installed in the Address class
646 void MacroAssembler::jump(ArrayAddress entry) {
647 lea(rscratch1, entry.base());
648 Address dispatch = entry.index();
649 assert(dispatch._base == noreg, "must be");
650 dispatch._base = rscratch1;
651 jmp(dispatch);
652 }
654 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
655 ShouldNotReachHere(); // 64bit doesn't use two regs
656 cmpq(x_lo, y_lo);
657 }
659 void MacroAssembler::lea(Register dst, AddressLiteral src) {
660 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
661 }
663 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
664 mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec());
665 movptr(dst, rscratch1);
666 }
668 void MacroAssembler::leave() {
669 // %%% is this really better? Why not on 32bit too?
670 emit_int8((unsigned char)0xC9); // LEAVE
671 }
673 void MacroAssembler::lneg(Register hi, Register lo) {
674 ShouldNotReachHere(); // 64bit doesn't use two regs
675 negq(lo);
676 }
678 void MacroAssembler::movoop(Register dst, jobject obj) {
679 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
680 }
682 void MacroAssembler::movoop(Address dst, jobject obj) {
683 mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
684 movq(dst, rscratch1);
685 }
687 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
688 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
689 }
691 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
692 mov_literal64(rscratch1, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
693 movq(dst, rscratch1);
694 }
696 void MacroAssembler::movptr(Register dst, AddressLiteral src, Register scratch) {
697 if (src.is_lval()) {
698 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
699 } else {
700 if (reachable(src)) {
701 movq(dst, as_Address(src));
702 } else {
703 lea(scratch, src);
704 movq(dst, Address(scratch, 0));
705 }
706 }
707 }
709 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
710 movq(as_Address(dst), src);
711 }
713 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
714 movq(dst, as_Address(src));
715 }
717 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
718 void MacroAssembler::movptr(Address dst, intptr_t src) {
719 mov64(rscratch1, src);
720 movq(dst, rscratch1);
721 }
723 // These are mostly for initializing NULL
724 void MacroAssembler::movptr(Address dst, int32_t src) {
725 movslq(dst, src);
726 }
728 void MacroAssembler::movptr(Register dst, int32_t src) {
729 mov64(dst, (intptr_t)src);
730 }
732 void MacroAssembler::pushoop(jobject obj) {
733 movoop(rscratch1, obj);
734 push(rscratch1);
735 }
737 void MacroAssembler::pushklass(Metadata* obj) {
738 mov_metadata(rscratch1, obj);
739 push(rscratch1);
740 }
742 void MacroAssembler::pushptr(AddressLiteral src) {
743 lea(rscratch1, src);
744 if (src.is_lval()) {
745 push(rscratch1);
746 } else {
747 pushq(Address(rscratch1, 0));
748 }
749 }
751 void MacroAssembler::reset_last_Java_frame(bool clear_fp) {
752 // we must set sp to zero to clear frame
753 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
754 // must clear fp, so that compiled frames are not confused; it is
755 // possible that we need it only for debugging
756 if (clear_fp) {
757 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
758 }
760 // Always clear the pc because it could have been set by make_walkable()
761 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
762 }
764 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
765 Register last_java_fp,
766 address last_java_pc) {
767 // determine last_java_sp register
768 if (!last_java_sp->is_valid()) {
769 last_java_sp = rsp;
770 }
772 // last_java_fp is optional
773 if (last_java_fp->is_valid()) {
774 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
775 last_java_fp);
776 }
778 // last_java_pc is optional
779 if (last_java_pc != NULL) {
780 Address java_pc(r15_thread,
781 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
782 lea(rscratch1, InternalAddress(last_java_pc));
783 movptr(java_pc, rscratch1);
784 }
786 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
787 }
789 static void pass_arg0(MacroAssembler* masm, Register arg) {
790 if (c_rarg0 != arg ) {
791 masm->mov(c_rarg0, arg);
792 }
793 }
795 static void pass_arg1(MacroAssembler* masm, Register arg) {
796 if (c_rarg1 != arg ) {
797 masm->mov(c_rarg1, arg);
798 }
799 }
801 static void pass_arg2(MacroAssembler* masm, Register arg) {
802 if (c_rarg2 != arg ) {
803 masm->mov(c_rarg2, arg);
804 }
805 }
807 static void pass_arg3(MacroAssembler* masm, Register arg) {
808 if (c_rarg3 != arg ) {
809 masm->mov(c_rarg3, arg);
810 }
811 }
813 void MacroAssembler::stop(const char* msg) {
814 address rip = pc();
815 pusha(); // get regs on stack
816 lea(c_rarg0, ExternalAddress((address) msg));
817 lea(c_rarg1, InternalAddress(rip));
818 movq(c_rarg2, rsp); // pass pointer to regs array
819 andq(rsp, -16); // align stack as required by ABI
820 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
821 hlt();
822 }
824 void MacroAssembler::warn(const char* msg) {
825 push(rbp);
826 movq(rbp, rsp);
827 andq(rsp, -16); // align stack as required by push_CPU_state and call
828 push_CPU_state(); // keeps alignment at 16 bytes
829 lea(c_rarg0, ExternalAddress((address) msg));
830 call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
831 pop_CPU_state();
832 mov(rsp, rbp);
833 pop(rbp);
834 }
836 void MacroAssembler::print_state() {
837 address rip = pc();
838 pusha(); // get regs on stack
839 push(rbp);
840 movq(rbp, rsp);
841 andq(rsp, -16); // align stack as required by push_CPU_state and call
842 push_CPU_state(); // keeps alignment at 16 bytes
844 lea(c_rarg0, InternalAddress(rip));
845 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
846 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
848 pop_CPU_state();
849 mov(rsp, rbp);
850 pop(rbp);
851 popa();
852 }
854 #ifndef PRODUCT
855 extern "C" void findpc(intptr_t x);
856 #endif
858 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
859 // In order to get locks to work, we need to fake a in_VM state
860 if (ShowMessageBoxOnError) {
861 JavaThread* thread = JavaThread::current();
862 JavaThreadState saved_state = thread->thread_state();
863 thread->set_thread_state(_thread_in_vm);
864 #ifndef PRODUCT
865 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
866 ttyLocker ttyl;
867 BytecodeCounter::print();
868 }
869 #endif
870 // To see where a verify_oop failed, get $ebx+40/X for this frame.
871 // XXX correct this offset for amd64
872 // This is the value of eip which points to where verify_oop will return.
873 if (os::message_box(msg, "Execution stopped, print registers?")) {
874 print_state64(pc, regs);
875 BREAKPOINT;
876 assert(false, "start up GDB");
877 }
878 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
879 } else {
880 ttyLocker ttyl;
881 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
882 msg);
883 assert(false, err_msg("DEBUG MESSAGE: %s", msg));
884 }
885 }
887 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
888 ttyLocker ttyl;
889 FlagSetting fs(Debugging, true);
890 tty->print_cr("rip = 0x%016lx", pc);
891 #ifndef PRODUCT
892 tty->cr();
893 findpc(pc);
894 tty->cr();
895 #endif
896 #define PRINT_REG(rax, value) \
897 { tty->print("%s = ", #rax); os::print_location(tty, value); }
898 PRINT_REG(rax, regs[15]);
899 PRINT_REG(rbx, regs[12]);
900 PRINT_REG(rcx, regs[14]);
901 PRINT_REG(rdx, regs[13]);
902 PRINT_REG(rdi, regs[8]);
903 PRINT_REG(rsi, regs[9]);
904 PRINT_REG(rbp, regs[10]);
905 PRINT_REG(rsp, regs[11]);
906 PRINT_REG(r8 , regs[7]);
907 PRINT_REG(r9 , regs[6]);
908 PRINT_REG(r10, regs[5]);
909 PRINT_REG(r11, regs[4]);
910 PRINT_REG(r12, regs[3]);
911 PRINT_REG(r13, regs[2]);
912 PRINT_REG(r14, regs[1]);
913 PRINT_REG(r15, regs[0]);
914 #undef PRINT_REG
915 // Print some words near top of staack.
916 int64_t* rsp = (int64_t*) regs[11];
917 int64_t* dump_sp = rsp;
918 for (int col1 = 0; col1 < 8; col1++) {
919 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
920 os::print_location(tty, *dump_sp++);
921 }
922 for (int row = 0; row < 25; row++) {
923 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
924 for (int col = 0; col < 4; col++) {
925 tty->print(" 0x%016lx", *dump_sp++);
926 }
927 tty->cr();
928 }
929 // Print some instructions around pc:
930 Disassembler::decode((address)pc-64, (address)pc);
931 tty->print_cr("--------");
932 Disassembler::decode((address)pc, (address)pc+32);
933 }
935 #endif // _LP64
937 // Now versions that are common to 32/64 bit
939 void MacroAssembler::addptr(Register dst, int32_t imm32) {
940 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
941 }
943 void MacroAssembler::addptr(Register dst, Register src) {
944 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
945 }
947 void MacroAssembler::addptr(Address dst, Register src) {
948 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
949 }
951 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) {
952 if (reachable(src)) {
953 Assembler::addsd(dst, as_Address(src));
954 } else {
955 lea(rscratch1, src);
956 Assembler::addsd(dst, Address(rscratch1, 0));
957 }
958 }
960 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) {
961 if (reachable(src)) {
962 addss(dst, as_Address(src));
963 } else {
964 lea(rscratch1, src);
965 addss(dst, Address(rscratch1, 0));
966 }
967 }
969 void MacroAssembler::align(int modulus) {
970 if (offset() % modulus != 0) {
971 nop(modulus - (offset() % modulus));
972 }
973 }
975 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
976 // Used in sign-masking with aligned address.
977 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
978 if (reachable(src)) {
979 Assembler::andpd(dst, as_Address(src));
980 } else {
981 lea(rscratch1, src);
982 Assembler::andpd(dst, Address(rscratch1, 0));
983 }
984 }
986 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) {
987 // Used in sign-masking with aligned address.
988 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
989 if (reachable(src)) {
990 Assembler::andps(dst, as_Address(src));
991 } else {
992 lea(rscratch1, src);
993 Assembler::andps(dst, Address(rscratch1, 0));
994 }
995 }
997 void MacroAssembler::andptr(Register dst, int32_t imm32) {
998 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
999 }
1001 void MacroAssembler::atomic_incl(Address counter_addr) {
1002 if (os::is_MP())
1003 lock();
1004 incrementl(counter_addr);
1005 }
1007 void MacroAssembler::atomic_incl(AddressLiteral counter_addr, Register scr) {
1008 if (reachable(counter_addr)) {
1009 atomic_incl(as_Address(counter_addr));
1010 } else {
1011 lea(scr, counter_addr);
1012 atomic_incl(Address(scr, 0));
1013 }
1014 }
1016 #ifdef _LP64
1017 void MacroAssembler::atomic_incq(Address counter_addr) {
1018 if (os::is_MP())
1019 lock();
1020 incrementq(counter_addr);
1021 }
1023 void MacroAssembler::atomic_incq(AddressLiteral counter_addr, Register scr) {
1024 if (reachable(counter_addr)) {
1025 atomic_incq(as_Address(counter_addr));
1026 } else {
1027 lea(scr, counter_addr);
1028 atomic_incq(Address(scr, 0));
1029 }
1030 }
1031 #endif
1033 // Writes to stack successive pages until offset reached to check for
1034 // stack overflow + shadow pages. This clobbers tmp.
1035 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
1036 movptr(tmp, rsp);
1037 // Bang stack for total size given plus shadow page size.
1038 // Bang one page at a time because large size can bang beyond yellow and
1039 // red zones.
1040 Label loop;
1041 bind(loop);
1042 movl(Address(tmp, (-os::vm_page_size())), size );
1043 subptr(tmp, os::vm_page_size());
1044 subl(size, os::vm_page_size());
1045 jcc(Assembler::greater, loop);
1047 // Bang down shadow pages too.
1048 // At this point, (tmp-0) is the last address touched, so don't
1049 // touch it again. (It was touched as (tmp-pagesize) but then tmp
1050 // was post-decremented.) Skip this address by starting at i=1, and
1051 // touch a few more pages below. N.B. It is important to touch all
1052 // the way down to and including i=StackShadowPages.
1053 for (int i = 1; i < StackShadowPages; i++) {
1054 // this could be any sized move but this is can be a debugging crumb
1055 // so the bigger the better.
1056 movptr(Address(tmp, (-i*os::vm_page_size())), size );
1057 }
1058 }
1060 int MacroAssembler::biased_locking_enter(Register lock_reg,
1061 Register obj_reg,
1062 Register swap_reg,
1063 Register tmp_reg,
1064 bool swap_reg_contains_mark,
1065 Label& done,
1066 Label* slow_case,
1067 BiasedLockingCounters* counters) {
1068 assert(UseBiasedLocking, "why call this otherwise?");
1069 assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
1070 LP64_ONLY( assert(tmp_reg != noreg, "tmp_reg must be supplied"); )
1071 bool need_tmp_reg = false;
1072 if (tmp_reg == noreg) {
1073 need_tmp_reg = true;
1074 tmp_reg = lock_reg;
1075 assert_different_registers(lock_reg, obj_reg, swap_reg);
1076 } else {
1077 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
1078 }
1079 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
1080 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
1081 Address saved_mark_addr(lock_reg, 0);
1083 if (PrintBiasedLockingStatistics && counters == NULL) {
1084 counters = BiasedLocking::counters();
1085 }
1086 // Biased locking
1087 // See whether the lock is currently biased toward our thread and
1088 // whether the epoch is still valid
1089 // Note that the runtime guarantees sufficient alignment of JavaThread
1090 // pointers to allow age to be placed into low bits
1091 // First check to see whether biasing is even enabled for this object
1092 Label cas_label;
1093 int null_check_offset = -1;
1094 if (!swap_reg_contains_mark) {
1095 null_check_offset = offset();
1096 movptr(swap_reg, mark_addr);
1097 }
1098 if (need_tmp_reg) {
1099 push(tmp_reg);
1100 }
1101 movptr(tmp_reg, swap_reg);
1102 andptr(tmp_reg, markOopDesc::biased_lock_mask_in_place);
1103 cmpptr(tmp_reg, markOopDesc::biased_lock_pattern);
1104 if (need_tmp_reg) {
1105 pop(tmp_reg);
1106 }
1107 jcc(Assembler::notEqual, cas_label);
1108 // The bias pattern is present in the object's header. Need to check
1109 // whether the bias owner and the epoch are both still current.
1110 #ifndef _LP64
1111 // Note that because there is no current thread register on x86_32 we
1112 // need to store off the mark word we read out of the object to
1113 // avoid reloading it and needing to recheck invariants below. This
1114 // store is unfortunate but it makes the overall code shorter and
1115 // simpler.
1116 movptr(saved_mark_addr, swap_reg);
1117 #endif
1118 if (need_tmp_reg) {
1119 push(tmp_reg);
1120 }
1121 if (swap_reg_contains_mark) {
1122 null_check_offset = offset();
1123 }
1124 load_prototype_header(tmp_reg, obj_reg);
1125 #ifdef _LP64
1126 orptr(tmp_reg, r15_thread);
1127 xorptr(tmp_reg, swap_reg);
1128 Register header_reg = tmp_reg;
1129 #else
1130 xorptr(tmp_reg, swap_reg);
1131 get_thread(swap_reg);
1132 xorptr(swap_reg, tmp_reg);
1133 Register header_reg = swap_reg;
1134 #endif
1135 andptr(header_reg, ~((int) markOopDesc::age_mask_in_place));
1136 if (need_tmp_reg) {
1137 pop(tmp_reg);
1138 }
1139 if (counters != NULL) {
1140 cond_inc32(Assembler::zero,
1141 ExternalAddress((address) counters->biased_lock_entry_count_addr()));
1142 }
1143 jcc(Assembler::equal, done);
1145 Label try_revoke_bias;
1146 Label try_rebias;
1148 // At this point we know that the header has the bias pattern and
1149 // that we are not the bias owner in the current epoch. We need to
1150 // figure out more details about the state of the header in order to
1151 // know what operations can be legally performed on the object's
1152 // header.
1154 // If the low three bits in the xor result aren't clear, that means
1155 // the prototype header is no longer biased and we have to revoke
1156 // the bias on this object.
1157 testptr(header_reg, markOopDesc::biased_lock_mask_in_place);
1158 jccb(Assembler::notZero, try_revoke_bias);
1160 // Biasing is still enabled for this data type. See whether the
1161 // epoch of the current bias is still valid, meaning that the epoch
1162 // bits of the mark word are equal to the epoch bits of the
1163 // prototype header. (Note that the prototype header's epoch bits
1164 // only change at a safepoint.) If not, attempt to rebias the object
1165 // toward the current thread. Note that we must be absolutely sure
1166 // that the current epoch is invalid in order to do this because
1167 // otherwise the manipulations it performs on the mark word are
1168 // illegal.
1169 testptr(header_reg, markOopDesc::epoch_mask_in_place);
1170 jccb(Assembler::notZero, try_rebias);
1172 // The epoch of the current bias is still valid but we know nothing
1173 // about the owner; it might be set or it might be clear. Try to
1174 // acquire the bias of the object using an atomic operation. If this
1175 // fails we will go in to the runtime to revoke the object's bias.
1176 // Note that we first construct the presumed unbiased header so we
1177 // don't accidentally blow away another thread's valid bias.
1178 NOT_LP64( movptr(swap_reg, saved_mark_addr); )
1179 andptr(swap_reg,
1180 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
1181 if (need_tmp_reg) {
1182 push(tmp_reg);
1183 }
1184 #ifdef _LP64
1185 movptr(tmp_reg, swap_reg);
1186 orptr(tmp_reg, r15_thread);
1187 #else
1188 get_thread(tmp_reg);
1189 orptr(tmp_reg, swap_reg);
1190 #endif
1191 if (os::is_MP()) {
1192 lock();
1193 }
1194 cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
1195 if (need_tmp_reg) {
1196 pop(tmp_reg);
1197 }
1198 // If the biasing toward our thread failed, this means that
1199 // another thread succeeded in biasing it toward itself and we
1200 // need to revoke that bias. The revocation will occur in the
1201 // interpreter runtime in the slow case.
1202 if (counters != NULL) {
1203 cond_inc32(Assembler::zero,
1204 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
1205 }
1206 if (slow_case != NULL) {
1207 jcc(Assembler::notZero, *slow_case);
1208 }
1209 jmp(done);
1211 bind(try_rebias);
1212 // At this point we know the epoch has expired, meaning that the
1213 // current "bias owner", if any, is actually invalid. Under these
1214 // circumstances _only_, we are allowed to use the current header's
1215 // value as the comparison value when doing the cas to acquire the
1216 // bias in the current epoch. In other words, we allow transfer of
1217 // the bias from one thread to another directly in this situation.
1218 //
1219 // FIXME: due to a lack of registers we currently blow away the age
1220 // bits in this situation. Should attempt to preserve them.
1221 if (need_tmp_reg) {
1222 push(tmp_reg);
1223 }
1224 load_prototype_header(tmp_reg, obj_reg);
1225 #ifdef _LP64
1226 orptr(tmp_reg, r15_thread);
1227 #else
1228 get_thread(swap_reg);
1229 orptr(tmp_reg, swap_reg);
1230 movptr(swap_reg, saved_mark_addr);
1231 #endif
1232 if (os::is_MP()) {
1233 lock();
1234 }
1235 cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
1236 if (need_tmp_reg) {
1237 pop(tmp_reg);
1238 }
1239 // If the biasing toward our thread failed, then another thread
1240 // succeeded in biasing it toward itself and we need to revoke that
1241 // bias. The revocation will occur in the runtime in the slow case.
1242 if (counters != NULL) {
1243 cond_inc32(Assembler::zero,
1244 ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
1245 }
1246 if (slow_case != NULL) {
1247 jcc(Assembler::notZero, *slow_case);
1248 }
1249 jmp(done);
1251 bind(try_revoke_bias);
1252 // The prototype mark in the klass doesn't have the bias bit set any
1253 // more, indicating that objects of this data type are not supposed
1254 // to be biased any more. We are going to try to reset the mark of
1255 // this object to the prototype value and fall through to the
1256 // CAS-based locking scheme. Note that if our CAS fails, it means
1257 // that another thread raced us for the privilege of revoking the
1258 // bias of this particular object, so it's okay to continue in the
1259 // normal locking code.
1260 //
1261 // FIXME: due to a lack of registers we currently blow away the age
1262 // bits in this situation. Should attempt to preserve them.
1263 NOT_LP64( movptr(swap_reg, saved_mark_addr); )
1264 if (need_tmp_reg) {
1265 push(tmp_reg);
1266 }
1267 load_prototype_header(tmp_reg, obj_reg);
1268 if (os::is_MP()) {
1269 lock();
1270 }
1271 cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
1272 if (need_tmp_reg) {
1273 pop(tmp_reg);
1274 }
1275 // Fall through to the normal CAS-based lock, because no matter what
1276 // the result of the above CAS, some thread must have succeeded in
1277 // removing the bias bit from the object's header.
1278 if (counters != NULL) {
1279 cond_inc32(Assembler::zero,
1280 ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
1281 }
1283 bind(cas_label);
1285 return null_check_offset;
1286 }
1288 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
1289 assert(UseBiasedLocking, "why call this otherwise?");
1291 // Check for biased locking unlock case, which is a no-op
1292 // Note: we do not have to check the thread ID for two reasons.
1293 // First, the interpreter checks for IllegalMonitorStateException at
1294 // a higher level. Second, if the bias was revoked while we held the
1295 // lock, the object could not be rebiased toward another thread, so
1296 // the bias bit would be clear.
1297 movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
1298 andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
1299 cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
1300 jcc(Assembler::equal, done);
1301 }
1303 #ifdef COMPILER2
1305 #if INCLUDE_RTM_OPT
1307 // Update rtm_counters based on abort status
1308 // input: abort_status
1309 // rtm_counters (RTMLockingCounters*)
1310 // flags are killed
1311 void MacroAssembler::rtm_counters_update(Register abort_status, Register rtm_counters) {
1313 atomic_incptr(Address(rtm_counters, RTMLockingCounters::abort_count_offset()));
1314 if (PrintPreciseRTMLockingStatistics) {
1315 for (int i = 0; i < RTMLockingCounters::ABORT_STATUS_LIMIT; i++) {
1316 Label check_abort;
1317 testl(abort_status, (1<<i));
1318 jccb(Assembler::equal, check_abort);
1319 atomic_incptr(Address(rtm_counters, RTMLockingCounters::abortX_count_offset() + (i * sizeof(uintx))));
1320 bind(check_abort);
1321 }
1322 }
1323 }
1325 // Branch if (random & (count-1) != 0), count is 2^n
1326 // tmp, scr and flags are killed
1327 void MacroAssembler::branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel) {
1328 assert(tmp == rax, "");
1329 assert(scr == rdx, "");
1330 rdtsc(); // modifies EDX:EAX
1331 andptr(tmp, count-1);
1332 jccb(Assembler::notZero, brLabel);
1333 }
1335 // Perform abort ratio calculation, set no_rtm bit if high ratio
1336 // input: rtm_counters_Reg (RTMLockingCounters* address)
1337 // tmpReg, rtm_counters_Reg and flags are killed
1338 void MacroAssembler::rtm_abort_ratio_calculation(Register tmpReg,
1339 Register rtm_counters_Reg,
1340 RTMLockingCounters* rtm_counters,
1341 Metadata* method_data) {
1342 Label L_done, L_check_always_rtm1, L_check_always_rtm2;
1344 if (RTMLockingCalculationDelay > 0) {
1345 // Delay calculation
1346 movptr(tmpReg, ExternalAddress((address) RTMLockingCounters::rtm_calculation_flag_addr()), tmpReg);
1347 testptr(tmpReg, tmpReg);
1348 jccb(Assembler::equal, L_done);
1349 }
1350 // Abort ratio calculation only if abort_count > RTMAbortThreshold
1351 // Aborted transactions = abort_count * 100
1352 // All transactions = total_count * RTMTotalCountIncrRate
1353 // Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
1355 movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::abort_count_offset()));
1356 cmpptr(tmpReg, RTMAbortThreshold);
1357 jccb(Assembler::below, L_check_always_rtm2);
1358 imulptr(tmpReg, tmpReg, 100);
1360 Register scrReg = rtm_counters_Reg;
1361 movptr(scrReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
1362 imulptr(scrReg, scrReg, RTMTotalCountIncrRate);
1363 imulptr(scrReg, scrReg, RTMAbortRatio);
1364 cmpptr(tmpReg, scrReg);
1365 jccb(Assembler::below, L_check_always_rtm1);
1366 if (method_data != NULL) {
1367 // set rtm_state to "no rtm" in MDO
1368 mov_metadata(tmpReg, method_data);
1369 if (os::is_MP()) {
1370 lock();
1371 }
1372 orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), NoRTM);
1373 }
1374 jmpb(L_done);
1375 bind(L_check_always_rtm1);
1376 // Reload RTMLockingCounters* address
1377 lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
1378 bind(L_check_always_rtm2);
1379 movptr(tmpReg, Address(rtm_counters_Reg, RTMLockingCounters::total_count_offset()));
1380 cmpptr(tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);
1381 jccb(Assembler::below, L_done);
1382 if (method_data != NULL) {
1383 // set rtm_state to "always rtm" in MDO
1384 mov_metadata(tmpReg, method_data);
1385 if (os::is_MP()) {
1386 lock();
1387 }
1388 orl(Address(tmpReg, MethodData::rtm_state_offset_in_bytes()), UseRTM);
1389 }
1390 bind(L_done);
1391 }
1393 // Update counters and perform abort ratio calculation
1394 // input: abort_status_Reg
1395 // rtm_counters_Reg, flags are killed
1396 void MacroAssembler::rtm_profiling(Register abort_status_Reg,
1397 Register rtm_counters_Reg,
1398 RTMLockingCounters* rtm_counters,
1399 Metadata* method_data,
1400 bool profile_rtm) {
1402 assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1403 // update rtm counters based on rax value at abort
1404 // reads abort_status_Reg, updates flags
1405 lea(rtm_counters_Reg, ExternalAddress((address)rtm_counters));
1406 rtm_counters_update(abort_status_Reg, rtm_counters_Reg);
1407 if (profile_rtm) {
1408 // Save abort status because abort_status_Reg is used by following code.
1409 if (RTMRetryCount > 0) {
1410 push(abort_status_Reg);
1411 }
1412 assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1413 rtm_abort_ratio_calculation(abort_status_Reg, rtm_counters_Reg, rtm_counters, method_data);
1414 // restore abort status
1415 if (RTMRetryCount > 0) {
1416 pop(abort_status_Reg);
1417 }
1418 }
1419 }
1421 // Retry on abort if abort's status is 0x6: can retry (0x2) | memory conflict (0x4)
1422 // inputs: retry_count_Reg
1423 // : abort_status_Reg
1424 // output: retry_count_Reg decremented by 1
1425 // flags are killed
1426 void MacroAssembler::rtm_retry_lock_on_abort(Register retry_count_Reg, Register abort_status_Reg, Label& retryLabel) {
1427 Label doneRetry;
1428 assert(abort_status_Reg == rax, "");
1429 // The abort reason bits are in eax (see all states in rtmLocking.hpp)
1430 // 0x6 = conflict on which we can retry (0x2) | memory conflict (0x4)
1431 // if reason is in 0x6 and retry count != 0 then retry
1432 andptr(abort_status_Reg, 0x6);
1433 jccb(Assembler::zero, doneRetry);
1434 testl(retry_count_Reg, retry_count_Reg);
1435 jccb(Assembler::zero, doneRetry);
1436 pause();
1437 decrementl(retry_count_Reg);
1438 jmp(retryLabel);
1439 bind(doneRetry);
1440 }
1442 // Spin and retry if lock is busy,
1443 // inputs: box_Reg (monitor address)
1444 // : retry_count_Reg
1445 // output: retry_count_Reg decremented by 1
1446 // : clear z flag if retry count exceeded
1447 // tmp_Reg, scr_Reg, flags are killed
1448 void MacroAssembler::rtm_retry_lock_on_busy(Register retry_count_Reg, Register box_Reg,
1449 Register tmp_Reg, Register scr_Reg, Label& retryLabel) {
1450 Label SpinLoop, SpinExit, doneRetry;
1451 // Clean monitor_value bit to get valid pointer
1452 int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
1454 testl(retry_count_Reg, retry_count_Reg);
1455 jccb(Assembler::zero, doneRetry);
1456 decrementl(retry_count_Reg);
1457 movptr(scr_Reg, RTMSpinLoopCount);
1459 bind(SpinLoop);
1460 pause();
1461 decrementl(scr_Reg);
1462 jccb(Assembler::lessEqual, SpinExit);
1463 movptr(tmp_Reg, Address(box_Reg, owner_offset));
1464 testptr(tmp_Reg, tmp_Reg);
1465 jccb(Assembler::notZero, SpinLoop);
1467 bind(SpinExit);
1468 jmp(retryLabel);
1469 bind(doneRetry);
1470 incrementl(retry_count_Reg); // clear z flag
1471 }
1473 // Use RTM for normal stack locks
1474 // Input: objReg (object to lock)
1475 void MacroAssembler::rtm_stack_locking(Register objReg, Register tmpReg, Register scrReg,
1476 Register retry_on_abort_count_Reg,
1477 RTMLockingCounters* stack_rtm_counters,
1478 Metadata* method_data, bool profile_rtm,
1479 Label& DONE_LABEL, Label& IsInflated) {
1480 assert(UseRTMForStackLocks, "why call this otherwise?");
1481 assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
1482 assert(tmpReg == rax, "");
1483 assert(scrReg == rdx, "");
1484 Label L_rtm_retry, L_decrement_retry, L_on_abort;
1486 if (RTMRetryCount > 0) {
1487 movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
1488 bind(L_rtm_retry);
1489 }
1490 movptr(tmpReg, Address(objReg, 0));
1491 testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
1492 jcc(Assembler::notZero, IsInflated);
1494 if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1495 Label L_noincrement;
1496 if (RTMTotalCountIncrRate > 1) {
1497 // tmpReg, scrReg and flags are killed
1498 branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
1499 }
1500 assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
1501 atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg);
1502 bind(L_noincrement);
1503 }
1504 xbegin(L_on_abort);
1505 movptr(tmpReg, Address(objReg, 0)); // fetch markword
1506 andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
1507 cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
1508 jcc(Assembler::equal, DONE_LABEL); // all done if unlocked
1510 Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
1511 if (UseRTMXendForLockBusy) {
1512 xend();
1513 movptr(abort_status_Reg, 0x2); // Set the abort status to 2 (so we can retry)
1514 jmp(L_decrement_retry);
1515 }
1516 else {
1517 xabort(0);
1518 }
1519 bind(L_on_abort);
1520 if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1521 rtm_profiling(abort_status_Reg, scrReg, stack_rtm_counters, method_data, profile_rtm);
1522 }
1523 bind(L_decrement_retry);
1524 if (RTMRetryCount > 0) {
1525 // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
1526 rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
1527 }
1528 }
1530 // Use RTM for inflating locks
1531 // inputs: objReg (object to lock)
1532 // boxReg (on-stack box address (displaced header location) - KILLED)
1533 // tmpReg (ObjectMonitor address + 2(monitor_value))
1534 void MacroAssembler::rtm_inflated_locking(Register objReg, Register boxReg, Register tmpReg,
1535 Register scrReg, Register retry_on_busy_count_Reg,
1536 Register retry_on_abort_count_Reg,
1537 RTMLockingCounters* rtm_counters,
1538 Metadata* method_data, bool profile_rtm,
1539 Label& DONE_LABEL) {
1540 assert(UseRTMLocking, "why call this otherwise?");
1541 assert(tmpReg == rax, "");
1542 assert(scrReg == rdx, "");
1543 Label L_rtm_retry, L_decrement_retry, L_on_abort;
1544 // Clean monitor_value bit to get valid pointer
1545 int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
1547 // Without cast to int32_t a movptr will destroy r10 which is typically obj
1548 movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1549 movptr(boxReg, tmpReg); // Save ObjectMonitor address
1551 if (RTMRetryCount > 0) {
1552 movl(retry_on_busy_count_Reg, RTMRetryCount); // Retry on lock busy
1553 movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
1554 bind(L_rtm_retry);
1555 }
1556 if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1557 Label L_noincrement;
1558 if (RTMTotalCountIncrRate > 1) {
1559 // tmpReg, scrReg and flags are killed
1560 branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
1561 }
1562 assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1563 atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg);
1564 bind(L_noincrement);
1565 }
1566 xbegin(L_on_abort);
1567 movptr(tmpReg, Address(objReg, 0));
1568 movptr(tmpReg, Address(tmpReg, owner_offset));
1569 testptr(tmpReg, tmpReg);
1570 jcc(Assembler::zero, DONE_LABEL);
1571 if (UseRTMXendForLockBusy) {
1572 xend();
1573 jmp(L_decrement_retry);
1574 }
1575 else {
1576 xabort(0);
1577 }
1578 bind(L_on_abort);
1579 Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
1580 if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1581 rtm_profiling(abort_status_Reg, scrReg, rtm_counters, method_data, profile_rtm);
1582 }
1583 if (RTMRetryCount > 0) {
1584 // retry on lock abort if abort status is 'can retry' (0x2) or 'memory conflict' (0x4)
1585 rtm_retry_lock_on_abort(retry_on_abort_count_Reg, abort_status_Reg, L_rtm_retry);
1586 }
1588 movptr(tmpReg, Address(boxReg, owner_offset)) ;
1589 testptr(tmpReg, tmpReg) ;
1590 jccb(Assembler::notZero, L_decrement_retry) ;
1592 // Appears unlocked - try to swing _owner from null to non-null.
1593 // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
1594 #ifdef _LP64
1595 Register threadReg = r15_thread;
1596 #else
1597 get_thread(scrReg);
1598 Register threadReg = scrReg;
1599 #endif
1600 if (os::is_MP()) {
1601 lock();
1602 }
1603 cmpxchgptr(threadReg, Address(boxReg, owner_offset)); // Updates tmpReg
1605 if (RTMRetryCount > 0) {
1606 // success done else retry
1607 jccb(Assembler::equal, DONE_LABEL) ;
1608 bind(L_decrement_retry);
1609 // Spin and retry if lock is busy.
1610 rtm_retry_lock_on_busy(retry_on_busy_count_Reg, boxReg, tmpReg, scrReg, L_rtm_retry);
1611 }
1612 else {
1613 bind(L_decrement_retry);
1614 }
1615 }
1617 #endif // INCLUDE_RTM_OPT
1619 // Fast_Lock and Fast_Unlock used by C2
1621 // Because the transitions from emitted code to the runtime
1622 // monitorenter/exit helper stubs are so slow it's critical that
1623 // we inline both the stack-locking fast-path and the inflated fast path.
1624 //
1625 // See also: cmpFastLock and cmpFastUnlock.
1626 //
1627 // What follows is a specialized inline transliteration of the code
1628 // in slow_enter() and slow_exit(). If we're concerned about I$ bloat
1629 // another option would be to emit TrySlowEnter and TrySlowExit methods
1630 // at startup-time. These methods would accept arguments as
1631 // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
1632 // indications in the icc.ZFlag. Fast_Lock and Fast_Unlock would simply
1633 // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.
1634 // In practice, however, the # of lock sites is bounded and is usually small.
1635 // Besides the call overhead, TrySlowEnter and TrySlowExit might suffer
1636 // if the processor uses simple bimodal branch predictors keyed by EIP
1637 // Since the helper routines would be called from multiple synchronization
1638 // sites.
1639 //
1640 // An even better approach would be write "MonitorEnter()" and "MonitorExit()"
1641 // in java - using j.u.c and unsafe - and just bind the lock and unlock sites
1642 // to those specialized methods. That'd give us a mostly platform-independent
1643 // implementation that the JITs could optimize and inline at their pleasure.
1644 // Done correctly, the only time we'd need to cross to native could would be
1645 // to park() or unpark() threads. We'd also need a few more unsafe operators
1646 // to (a) prevent compiler-JIT reordering of non-volatile accesses, and
1647 // (b) explicit barriers or fence operations.
1648 //
1649 // TODO:
1650 //
1651 // * Arrange for C2 to pass "Self" into Fast_Lock and Fast_Unlock in one of the registers (scr).
1652 // This avoids manifesting the Self pointer in the Fast_Lock and Fast_Unlock terminals.
1653 // Given TLAB allocation, Self is usually manifested in a register, so passing it into
1654 // the lock operators would typically be faster than reifying Self.
1655 //
1656 // * Ideally I'd define the primitives as:
1657 // fast_lock (nax Obj, nax box, EAX tmp, nax scr) where box, tmp and scr are KILLED.
1658 // fast_unlock (nax Obj, EAX box, nax tmp) where box and tmp are KILLED
1659 // Unfortunately ADLC bugs prevent us from expressing the ideal form.
1660 // Instead, we're stuck with a rather awkward and brittle register assignments below.
1661 // Furthermore the register assignments are overconstrained, possibly resulting in
1662 // sub-optimal code near the synchronization site.
1663 //
1664 // * Eliminate the sp-proximity tests and just use "== Self" tests instead.
1665 // Alternately, use a better sp-proximity test.
1666 //
1667 // * Currently ObjectMonitor._Owner can hold either an sp value or a (THREAD *) value.
1668 // Either one is sufficient to uniquely identify a thread.
1669 // TODO: eliminate use of sp in _owner and use get_thread(tr) instead.
1670 //
1671 // * Intrinsify notify() and notifyAll() for the common cases where the
1672 // object is locked by the calling thread but the waitlist is empty.
1673 // avoid the expensive JNI call to JVM_Notify() and JVM_NotifyAll().
1674 //
1675 // * use jccb and jmpb instead of jcc and jmp to improve code density.
1676 // But beware of excessive branch density on AMD Opterons.
1677 //
1678 // * Both Fast_Lock and Fast_Unlock set the ICC.ZF to indicate success
1679 // or failure of the fast-path. If the fast-path fails then we pass
1680 // control to the slow-path, typically in C. In Fast_Lock and
1681 // Fast_Unlock we often branch to DONE_LABEL, just to find that C2
1682 // will emit a conditional branch immediately after the node.
1683 // So we have branches to branches and lots of ICC.ZF games.
1684 // Instead, it might be better to have C2 pass a "FailureLabel"
1685 // into Fast_Lock and Fast_Unlock. In the case of success, control
1686 // will drop through the node. ICC.ZF is undefined at exit.
1687 // In the case of failure, the node will branch directly to the
1688 // FailureLabel
1691 // obj: object to lock
1692 // box: on-stack box address (displaced header location) - KILLED
1693 // rax,: tmp -- KILLED
1694 // scr: tmp -- KILLED
1695 void MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmpReg,
1696 Register scrReg, Register cx1Reg, Register cx2Reg,
1697 BiasedLockingCounters* counters,
1698 RTMLockingCounters* rtm_counters,
1699 RTMLockingCounters* stack_rtm_counters,
1700 Metadata* method_data,
1701 bool use_rtm, bool profile_rtm) {
1702 // Ensure the register assignents are disjoint
1703 assert(tmpReg == rax, "");
1705 if (use_rtm) {
1706 assert_different_registers(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg);
1707 } else {
1708 assert(cx1Reg == noreg, "");
1709 assert(cx2Reg == noreg, "");
1710 assert_different_registers(objReg, boxReg, tmpReg, scrReg);
1711 }
1713 if (counters != NULL) {
1714 atomic_incl(ExternalAddress((address)counters->total_entry_count_addr()), scrReg);
1715 }
1716 if (EmitSync & 1) {
1717 // set box->dhw = unused_mark (3)
1718 // Force all sync thru slow-path: slow_enter() and slow_exit()
1719 movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1720 cmpptr (rsp, (int32_t)NULL_WORD);
1721 } else
1722 if (EmitSync & 2) {
1723 Label DONE_LABEL ;
1724 if (UseBiasedLocking) {
1725 // Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
1726 biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters);
1727 }
1729 movptr(tmpReg, Address(objReg, 0)); // fetch markword
1730 orptr (tmpReg, 0x1);
1731 movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
1732 if (os::is_MP()) {
1733 lock();
1734 }
1735 cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
1736 jccb(Assembler::equal, DONE_LABEL);
1737 // Recursive locking
1738 subptr(tmpReg, rsp);
1739 andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
1740 movptr(Address(boxReg, 0), tmpReg);
1741 bind(DONE_LABEL);
1742 } else {
1743 // Possible cases that we'll encounter in fast_lock
1744 // ------------------------------------------------
1745 // * Inflated
1746 // -- unlocked
1747 // -- Locked
1748 // = by self
1749 // = by other
1750 // * biased
1751 // -- by Self
1752 // -- by other
1753 // * neutral
1754 // * stack-locked
1755 // -- by self
1756 // = sp-proximity test hits
1757 // = sp-proximity test generates false-negative
1758 // -- by other
1759 //
1761 Label IsInflated, DONE_LABEL;
1763 // it's stack-locked, biased or neutral
1764 // TODO: optimize away redundant LDs of obj->mark and improve the markword triage
1765 // order to reduce the number of conditional branches in the most common cases.
1766 // Beware -- there's a subtle invariant that fetch of the markword
1767 // at [FETCH], below, will never observe a biased encoding (*101b).
1768 // If this invariant is not held we risk exclusion (safety) failure.
1769 if (UseBiasedLocking && !UseOptoBiasInlining) {
1770 biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters);
1771 }
1773 #if INCLUDE_RTM_OPT
1774 if (UseRTMForStackLocks && use_rtm) {
1775 rtm_stack_locking(objReg, tmpReg, scrReg, cx2Reg,
1776 stack_rtm_counters, method_data, profile_rtm,
1777 DONE_LABEL, IsInflated);
1778 }
1779 #endif // INCLUDE_RTM_OPT
1781 movptr(tmpReg, Address(objReg, 0)); // [FETCH]
1782 testptr(tmpReg, markOopDesc::monitor_value); // inflated vs stack-locked|neutral|biased
1783 jccb(Assembler::notZero, IsInflated);
1785 // Attempt stack-locking ...
1786 orptr (tmpReg, markOopDesc::unlocked_value);
1787 movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
1788 if (os::is_MP()) {
1789 lock();
1790 }
1791 cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
1792 if (counters != NULL) {
1793 cond_inc32(Assembler::equal,
1794 ExternalAddress((address)counters->fast_path_entry_count_addr()));
1795 }
1796 jcc(Assembler::equal, DONE_LABEL); // Success
1798 // Recursive locking.
1799 // The object is stack-locked: markword contains stack pointer to BasicLock.
1800 // Locked by current thread if difference with current SP is less than one page.
1801 subptr(tmpReg, rsp);
1802 // Next instruction set ZFlag == 1 (Success) if difference is less then one page.
1803 andptr(tmpReg, (int32_t) (NOT_LP64(0xFFFFF003) LP64_ONLY(7 - os::vm_page_size())) );
1804 movptr(Address(boxReg, 0), tmpReg);
1805 if (counters != NULL) {
1806 cond_inc32(Assembler::equal,
1807 ExternalAddress((address)counters->fast_path_entry_count_addr()));
1808 }
1809 jmp(DONE_LABEL);
1811 bind(IsInflated);
1812 // The object is inflated. tmpReg contains pointer to ObjectMonitor* + 2(monitor_value)
1814 #if INCLUDE_RTM_OPT
1815 // Use the same RTM locking code in 32- and 64-bit VM.
1816 if (use_rtm) {
1817 rtm_inflated_locking(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg,
1818 rtm_counters, method_data, profile_rtm, DONE_LABEL);
1819 } else {
1820 #endif // INCLUDE_RTM_OPT
1822 #ifndef _LP64
1823 // The object is inflated.
1824 //
1825 // TODO-FIXME: eliminate the ugly use of manifest constants:
1826 // Use markOopDesc::monitor_value instead of "2".
1827 // use markOop::unused_mark() instead of "3".
1828 // The tmpReg value is an objectMonitor reference ORed with
1829 // markOopDesc::monitor_value (2). We can either convert tmpReg to an
1830 // objectmonitor pointer by masking off the "2" bit or we can just
1831 // use tmpReg as an objectmonitor pointer but bias the objectmonitor
1832 // field offsets with "-2" to compensate for and annul the low-order tag bit.
1833 //
1834 // I use the latter as it avoids AGI stalls.
1835 // As such, we write "mov r, [tmpReg+OFFSETOF(Owner)-2]"
1836 // instead of "mov r, [tmpReg+OFFSETOF(Owner)]".
1837 //
1838 #define OFFSET_SKEWED(f) ((ObjectMonitor::f ## _offset_in_bytes())-2)
1840 // boxReg refers to the on-stack BasicLock in the current frame.
1841 // We'd like to write:
1842 // set box->_displaced_header = markOop::unused_mark(). Any non-0 value suffices.
1843 // This is convenient but results a ST-before-CAS penalty. The following CAS suffers
1844 // additional latency as we have another ST in the store buffer that must drain.
1846 if (EmitSync & 8192) {
1847 movptr(Address(boxReg, 0), 3); // results in ST-before-CAS penalty
1848 get_thread (scrReg);
1849 movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
1850 movptr(tmpReg, NULL_WORD); // consider: xor vs mov
1851 if (os::is_MP()) {
1852 lock();
1853 }
1854 cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
1855 } else
1856 if ((EmitSync & 128) == 0) { // avoid ST-before-CAS
1857 movptr(scrReg, boxReg);
1858 movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
1860 // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
1861 if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
1862 // prefetchw [eax + Offset(_owner)-2]
1863 prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
1864 }
1866 if ((EmitSync & 64) == 0) {
1867 // Optimistic form: consider XORL tmpReg,tmpReg
1868 movptr(tmpReg, NULL_WORD);
1869 } else {
1870 // Can suffer RTS->RTO upgrades on shared or cold $ lines
1871 // Test-And-CAS instead of CAS
1872 movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); // rax, = m->_owner
1873 testptr(tmpReg, tmpReg); // Locked ?
1874 jccb (Assembler::notZero, DONE_LABEL);
1875 }
1877 // Appears unlocked - try to swing _owner from null to non-null.
1878 // Ideally, I'd manifest "Self" with get_thread and then attempt
1879 // to CAS the register containing Self into m->Owner.
1880 // But we don't have enough registers, so instead we can either try to CAS
1881 // rsp or the address of the box (in scr) into &m->owner. If the CAS succeeds
1882 // we later store "Self" into m->Owner. Transiently storing a stack address
1883 // (rsp or the address of the box) into m->owner is harmless.
1884 // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
1885 if (os::is_MP()) {
1886 lock();
1887 }
1888 cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
1889 movptr(Address(scrReg, 0), 3); // box->_displaced_header = 3
1890 jccb (Assembler::notZero, DONE_LABEL);
1891 get_thread (scrReg); // beware: clobbers ICCs
1892 movptr(Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg);
1893 xorptr(boxReg, boxReg); // set icc.ZFlag = 1 to indicate success
1895 // If the CAS fails we can either retry or pass control to the slow-path.
1896 // We use the latter tactic.
1897 // Pass the CAS result in the icc.ZFlag into DONE_LABEL
1898 // If the CAS was successful ...
1899 // Self has acquired the lock
1900 // Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
1901 // Intentional fall-through into DONE_LABEL ...
1902 } else {
1903 movptr(Address(boxReg, 0), intptr_t(markOopDesc::unused_mark())); // results in ST-before-CAS penalty
1904 movptr(boxReg, tmpReg);
1906 // Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
1907 if ((EmitSync & 2048) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
1908 // prefetchw [eax + Offset(_owner)-2]
1909 prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
1910 }
1912 if ((EmitSync & 64) == 0) {
1913 // Optimistic form
1914 xorptr (tmpReg, tmpReg);
1915 } else {
1916 // Can suffer RTS->RTO upgrades on shared or cold $ lines
1917 movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)); // rax, = m->_owner
1918 testptr(tmpReg, tmpReg); // Locked ?
1919 jccb (Assembler::notZero, DONE_LABEL);
1920 }
1922 // Appears unlocked - try to swing _owner from null to non-null.
1923 // Use either "Self" (in scr) or rsp as thread identity in _owner.
1924 // Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
1925 get_thread (scrReg);
1926 if (os::is_MP()) {
1927 lock();
1928 }
1929 cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
1931 // If the CAS fails we can either retry or pass control to the slow-path.
1932 // We use the latter tactic.
1933 // Pass the CAS result in the icc.ZFlag into DONE_LABEL
1934 // If the CAS was successful ...
1935 // Self has acquired the lock
1936 // Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
1937 // Intentional fall-through into DONE_LABEL ...
1938 }
1939 #else // _LP64
1940 // It's inflated
1942 // TODO: someday avoid the ST-before-CAS penalty by
1943 // relocating (deferring) the following ST.
1944 // We should also think about trying a CAS without having
1945 // fetched _owner. If the CAS is successful we may
1946 // avoid an RTO->RTS upgrade on the $line.
1948 // Without cast to int32_t a movptr will destroy r10 which is typically obj
1949 movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1951 movptr (boxReg, tmpReg);
1952 movptr (tmpReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
1953 testptr(tmpReg, tmpReg);
1954 jccb (Assembler::notZero, DONE_LABEL);
1956 // It's inflated and appears unlocked
1957 if (os::is_MP()) {
1958 lock();
1959 }
1960 cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2));
1961 // Intentional fall-through into DONE_LABEL ...
1962 #endif // _LP64
1964 #if INCLUDE_RTM_OPT
1965 } // use_rtm()
1966 #endif
1967 // DONE_LABEL is a hot target - we'd really like to place it at the
1968 // start of cache line by padding with NOPs.
1969 // See the AMD and Intel software optimization manuals for the
1970 // most efficient "long" NOP encodings.
1971 // Unfortunately none of our alignment mechanisms suffice.
1972 bind(DONE_LABEL);
1974 // At DONE_LABEL the icc ZFlag is set as follows ...
1975 // Fast_Unlock uses the same protocol.
1976 // ZFlag == 1 -> Success
1977 // ZFlag == 0 -> Failure - force control through the slow-path
1978 }
1979 }
1981 // obj: object to unlock
1982 // box: box address (displaced header location), killed. Must be EAX.
1983 // tmp: killed, cannot be obj nor box.
1984 //
1985 // Some commentary on balanced locking:
1986 //
1987 // Fast_Lock and Fast_Unlock are emitted only for provably balanced lock sites.
1988 // Methods that don't have provably balanced locking are forced to run in the
1989 // interpreter - such methods won't be compiled to use fast_lock and fast_unlock.
1990 // The interpreter provides two properties:
1991 // I1: At return-time the interpreter automatically and quietly unlocks any
1992 // objects acquired the current activation (frame). Recall that the
1993 // interpreter maintains an on-stack list of locks currently held by
1994 // a frame.
1995 // I2: If a method attempts to unlock an object that is not held by the
1996 // the frame the interpreter throws IMSX.
1997 //
1998 // Lets say A(), which has provably balanced locking, acquires O and then calls B().
1999 // B() doesn't have provably balanced locking so it runs in the interpreter.
2000 // Control returns to A() and A() unlocks O. By I1 and I2, above, we know that O
2001 // is still locked by A().
2002 //
2003 // The only other source of unbalanced locking would be JNI. The "Java Native Interface:
2004 // Programmer's Guide and Specification" claims that an object locked by jni_monitorenter
2005 // should not be unlocked by "normal" java-level locking and vice-versa. The specification
2006 // doesn't specify what will occur if a program engages in such mixed-mode locking, however.
2008 void MacroAssembler::fast_unlock(Register objReg, Register boxReg, Register tmpReg, bool use_rtm) {
2009 assert(boxReg == rax, "");
2010 assert_different_registers(objReg, boxReg, tmpReg);
2012 if (EmitSync & 4) {
2013 // Disable - inhibit all inlining. Force control through the slow-path
2014 cmpptr (rsp, 0);
2015 } else
2016 if (EmitSync & 8) {
2017 Label DONE_LABEL;
2018 if (UseBiasedLocking) {
2019 biased_locking_exit(objReg, tmpReg, DONE_LABEL);
2020 }
2021 // Classic stack-locking code ...
2022 // Check whether the displaced header is 0
2023 //(=> recursive unlock)
2024 movptr(tmpReg, Address(boxReg, 0));
2025 testptr(tmpReg, tmpReg);
2026 jccb(Assembler::zero, DONE_LABEL);
2027 // If not recursive lock, reset the header to displaced header
2028 if (os::is_MP()) {
2029 lock();
2030 }
2031 cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
2032 bind(DONE_LABEL);
2033 } else {
2034 Label DONE_LABEL, Stacked, CheckSucc;
2036 // Critically, the biased locking test must have precedence over
2037 // and appear before the (box->dhw == 0) recursive stack-lock test.
2038 if (UseBiasedLocking && !UseOptoBiasInlining) {
2039 biased_locking_exit(objReg, tmpReg, DONE_LABEL);
2040 }
2042 #if INCLUDE_RTM_OPT
2043 if (UseRTMForStackLocks && use_rtm) {
2044 assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
2045 Label L_regular_unlock;
2046 movptr(tmpReg, Address(objReg, 0)); // fetch markword
2047 andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
2048 cmpptr(tmpReg, markOopDesc::unlocked_value); // bits = 001 unlocked
2049 jccb(Assembler::notEqual, L_regular_unlock); // if !HLE RegularLock
2050 xend(); // otherwise end...
2051 jmp(DONE_LABEL); // ... and we're done
2052 bind(L_regular_unlock);
2053 }
2054 #endif
2056 cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD); // Examine the displaced header
2057 jcc (Assembler::zero, DONE_LABEL); // 0 indicates recursive stack-lock
2058 movptr(tmpReg, Address(objReg, 0)); // Examine the object's markword
2059 testptr(tmpReg, markOopDesc::monitor_value); // Inflated?
2060 jccb (Assembler::zero, Stacked);
2062 // It's inflated.
2063 #if INCLUDE_RTM_OPT
2064 if (use_rtm) {
2065 Label L_regular_inflated_unlock;
2066 // Clean monitor_value bit to get valid pointer
2067 int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
2068 movptr(boxReg, Address(tmpReg, owner_offset));
2069 testptr(boxReg, boxReg);
2070 jccb(Assembler::notZero, L_regular_inflated_unlock);
2071 xend();
2072 jmpb(DONE_LABEL);
2073 bind(L_regular_inflated_unlock);
2074 }
2075 #endif
2077 // Despite our balanced locking property we still check that m->_owner == Self
2078 // as java routines or native JNI code called by this thread might
2079 // have released the lock.
2080 // Refer to the comments in synchronizer.cpp for how we might encode extra
2081 // state in _succ so we can avoid fetching EntryList|cxq.
2082 //
2083 // I'd like to add more cases in fast_lock() and fast_unlock() --
2084 // such as recursive enter and exit -- but we have to be wary of
2085 // I$ bloat, T$ effects and BP$ effects.
2086 //
2087 // If there's no contention try a 1-0 exit. That is, exit without
2088 // a costly MEMBAR or CAS. See synchronizer.cpp for details on how
2089 // we detect and recover from the race that the 1-0 exit admits.
2090 //
2091 // Conceptually Fast_Unlock() must execute a STST|LDST "release" barrier
2092 // before it STs null into _owner, releasing the lock. Updates
2093 // to data protected by the critical section must be visible before
2094 // we drop the lock (and thus before any other thread could acquire
2095 // the lock and observe the fields protected by the lock).
2096 // IA32's memory-model is SPO, so STs are ordered with respect to
2097 // each other and there's no need for an explicit barrier (fence).
2098 // See also http://gee.cs.oswego.edu/dl/jmm/cookbook.html.
2099 #ifndef _LP64
2100 get_thread (boxReg);
2101 if ((EmitSync & 4096) && VM_Version::supports_3dnow_prefetch() && os::is_MP()) {
2102 // prefetchw [ebx + Offset(_owner)-2]
2103 prefetchw(Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
2104 }
2106 // Note that we could employ various encoding schemes to reduce
2107 // the number of loads below (currently 4) to just 2 or 3.
2108 // Refer to the comments in synchronizer.cpp.
2109 // In practice the chain of fetches doesn't seem to impact performance, however.
2110 if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
2111 // Attempt to reduce branch density - AMD's branch predictor.
2112 xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
2113 orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
2114 orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
2115 orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
2116 jccb (Assembler::notZero, DONE_LABEL);
2117 movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
2118 jmpb (DONE_LABEL);
2119 } else {
2120 xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
2121 orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
2122 jccb (Assembler::notZero, DONE_LABEL);
2123 movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
2124 orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
2125 jccb (Assembler::notZero, CheckSucc);
2126 movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
2127 jmpb (DONE_LABEL);
2128 }
2130 // The Following code fragment (EmitSync & 65536) improves the performance of
2131 // contended applications and contended synchronization microbenchmarks.
2132 // Unfortunately the emission of the code - even though not executed - causes regressions
2133 // in scimark and jetstream, evidently because of $ effects. Replacing the code
2134 // with an equal number of never-executed NOPs results in the same regression.
2135 // We leave it off by default.
2137 if ((EmitSync & 65536) != 0) {
2138 Label LSuccess, LGoSlowPath ;
2140 bind (CheckSucc);
2142 // Optional pre-test ... it's safe to elide this
2143 if ((EmitSync & 16) == 0) {
2144 cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
2145 jccb (Assembler::zero, LGoSlowPath);
2146 }
2148 // We have a classic Dekker-style idiom:
2149 // ST m->_owner = 0 ; MEMBAR; LD m->_succ
2150 // There are a number of ways to implement the barrier:
2151 // (1) lock:andl &m->_owner, 0
2152 // is fast, but mask doesn't currently support the "ANDL M,IMM32" form.
2153 // LOCK: ANDL [ebx+Offset(_Owner)-2], 0
2154 // Encodes as 81 31 OFF32 IMM32 or 83 63 OFF8 IMM8
2155 // (2) If supported, an explicit MFENCE is appealing.
2156 // In older IA32 processors MFENCE is slower than lock:add or xchg
2157 // particularly if the write-buffer is full as might be the case if
2158 // if stores closely precede the fence or fence-equivalent instruction.
2159 // In more modern implementations MFENCE appears faster, however.
2160 // (3) In lieu of an explicit fence, use lock:addl to the top-of-stack
2161 // The $lines underlying the top-of-stack should be in M-state.
2162 // The locked add instruction is serializing, of course.
2163 // (4) Use xchg, which is serializing
2164 // mov boxReg, 0; xchgl boxReg, [tmpReg + Offset(_owner)-2] also works
2165 // (5) ST m->_owner = 0 and then execute lock:orl &m->_succ, 0.
2166 // The integer condition codes will tell us if succ was 0.
2167 // Since _succ and _owner should reside in the same $line and
2168 // we just stored into _owner, it's likely that the $line
2169 // remains in M-state for the lock:orl.
2170 //
2171 // We currently use (3), although it's likely that switching to (2)
2172 // is correct for the future.
2174 movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), NULL_WORD);
2175 if (os::is_MP()) {
2176 if (VM_Version::supports_sse2() && 1 == FenceInstruction) {
2177 mfence();
2178 } else {
2179 lock (); addptr(Address(rsp, 0), 0);
2180 }
2181 }
2182 // Ratify _succ remains non-null
2183 cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0);
2184 jccb (Assembler::notZero, LSuccess);
2186 xorptr(boxReg, boxReg); // box is really EAX
2187 if (os::is_MP()) { lock(); }
2188 cmpxchgptr(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
2189 jccb (Assembler::notEqual, LSuccess);
2190 // Since we're low on registers we installed rsp as a placeholding in _owner.
2191 // Now install Self over rsp. This is safe as we're transitioning from
2192 // non-null to non=null
2193 get_thread (boxReg);
2194 movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg);
2195 // Intentional fall-through into LGoSlowPath ...
2197 bind (LGoSlowPath);
2198 orptr(boxReg, 1); // set ICC.ZF=0 to indicate failure
2199 jmpb (DONE_LABEL);
2201 bind (LSuccess);
2202 xorptr(boxReg, boxReg); // set ICC.ZF=1 to indicate success
2203 jmpb (DONE_LABEL);
2204 }
2206 bind (Stacked);
2207 // It's not inflated and it's not recursively stack-locked and it's not biased.
2208 // It must be stack-locked.
2209 // Try to reset the header to displaced header.
2210 // The "box" value on the stack is stable, so we can reload
2211 // and be assured we observe the same value as above.
2212 movptr(tmpReg, Address(boxReg, 0));
2213 if (os::is_MP()) {
2214 lock();
2215 }
2216 cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
2217 // Intention fall-thru into DONE_LABEL
2219 // DONE_LABEL is a hot target - we'd really like to place it at the
2220 // start of cache line by padding with NOPs.
2221 // See the AMD and Intel software optimization manuals for the
2222 // most efficient "long" NOP encodings.
2223 // Unfortunately none of our alignment mechanisms suffice.
2224 if ((EmitSync & 65536) == 0) {
2225 bind (CheckSucc);
2226 }
2227 #else // _LP64
2228 // It's inflated
2229 movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
2230 xorptr(boxReg, r15_thread);
2231 orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2));
2232 jccb (Assembler::notZero, DONE_LABEL);
2233 movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2));
2234 orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2));
2235 jccb (Assembler::notZero, CheckSucc);
2236 movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD);
2237 jmpb (DONE_LABEL);
2239 if ((EmitSync & 65536) == 0) {
2240 Label LSuccess, LGoSlowPath ;
2241 bind (CheckSucc);
2242 cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
2243 jccb (Assembler::zero, LGoSlowPath);
2245 // I'd much rather use lock:andl m->_owner, 0 as it's faster than the
2246 // the explicit ST;MEMBAR combination, but masm doesn't currently support
2247 // "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
2248 // are all faster when the write buffer is populated.
2249 movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD);
2250 if (os::is_MP()) {
2251 lock (); addl (Address(rsp, 0), 0);
2252 }
2253 cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD);
2254 jccb (Assembler::notZero, LSuccess);
2256 movptr (boxReg, (int32_t)NULL_WORD); // box is really EAX
2257 if (os::is_MP()) { lock(); }
2258 cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
2259 jccb (Assembler::notEqual, LSuccess);
2260 // Intentional fall-through into slow-path
2262 bind (LGoSlowPath);
2263 orl (boxReg, 1); // set ICC.ZF=0 to indicate failure
2264 jmpb (DONE_LABEL);
2266 bind (LSuccess);
2267 testl (boxReg, 0); // set ICC.ZF=1 to indicate success
2268 jmpb (DONE_LABEL);
2269 }
2271 bind (Stacked);
2272 movptr(tmpReg, Address (boxReg, 0)); // re-fetch
2273 if (os::is_MP()) { lock(); }
2274 cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
2276 if (EmitSync & 65536) {
2277 bind (CheckSucc);
2278 }
2279 #endif
2280 bind(DONE_LABEL);
2281 // Avoid branch to branch on AMD processors
2282 if (EmitSync & 32768) {
2283 nop();
2284 }
2285 }
2286 }
2287 #endif // COMPILER2
2289 void MacroAssembler::c2bool(Register x) {
2290 // implements x == 0 ? 0 : 1
2291 // note: must only look at least-significant byte of x
2292 // since C-style booleans are stored in one byte
2293 // only! (was bug)
2294 andl(x, 0xFF);
2295 setb(Assembler::notZero, x);
2296 }
2298 // Wouldn't need if AddressLiteral version had new name
2299 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
2300 Assembler::call(L, rtype);
2301 }
2303 void MacroAssembler::call(Register entry) {
2304 Assembler::call(entry);
2305 }
2307 void MacroAssembler::call(AddressLiteral entry) {
2308 if (reachable(entry)) {
2309 Assembler::call_literal(entry.target(), entry.rspec());
2310 } else {
2311 lea(rscratch1, entry);
2312 Assembler::call(rscratch1);
2313 }
2314 }
2316 void MacroAssembler::ic_call(address entry) {
2317 RelocationHolder rh = virtual_call_Relocation::spec(pc());
2318 movptr(rax, (intptr_t)Universe::non_oop_word());
2319 call(AddressLiteral(entry, rh));
2320 }
2322 // Implementation of call_VM versions
2324 void MacroAssembler::call_VM(Register oop_result,
2325 address entry_point,
2326 bool check_exceptions) {
2327 Label C, E;
2328 call(C, relocInfo::none);
2329 jmp(E);
2331 bind(C);
2332 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
2333 ret(0);
2335 bind(E);
2336 }
2338 void MacroAssembler::call_VM(Register oop_result,
2339 address entry_point,
2340 Register arg_1,
2341 bool check_exceptions) {
2342 Label C, E;
2343 call(C, relocInfo::none);
2344 jmp(E);
2346 bind(C);
2347 pass_arg1(this, arg_1);
2348 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
2349 ret(0);
2351 bind(E);
2352 }
2354 void MacroAssembler::call_VM(Register oop_result,
2355 address entry_point,
2356 Register arg_1,
2357 Register arg_2,
2358 bool check_exceptions) {
2359 Label C, E;
2360 call(C, relocInfo::none);
2361 jmp(E);
2363 bind(C);
2365 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2367 pass_arg2(this, arg_2);
2368 pass_arg1(this, arg_1);
2369 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
2370 ret(0);
2372 bind(E);
2373 }
2375 void MacroAssembler::call_VM(Register oop_result,
2376 address entry_point,
2377 Register arg_1,
2378 Register arg_2,
2379 Register arg_3,
2380 bool check_exceptions) {
2381 Label C, E;
2382 call(C, relocInfo::none);
2383 jmp(E);
2385 bind(C);
2387 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2388 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2389 pass_arg3(this, arg_3);
2391 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2392 pass_arg2(this, arg_2);
2394 pass_arg1(this, arg_1);
2395 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
2396 ret(0);
2398 bind(E);
2399 }
2401 void MacroAssembler::call_VM(Register oop_result,
2402 Register last_java_sp,
2403 address entry_point,
2404 int number_of_arguments,
2405 bool check_exceptions) {
2406 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2407 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
2408 }
2410 void MacroAssembler::call_VM(Register oop_result,
2411 Register last_java_sp,
2412 address entry_point,
2413 Register arg_1,
2414 bool check_exceptions) {
2415 pass_arg1(this, arg_1);
2416 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
2417 }
2419 void MacroAssembler::call_VM(Register oop_result,
2420 Register last_java_sp,
2421 address entry_point,
2422 Register arg_1,
2423 Register arg_2,
2424 bool check_exceptions) {
2426 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2427 pass_arg2(this, arg_2);
2428 pass_arg1(this, arg_1);
2429 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
2430 }
2432 void MacroAssembler::call_VM(Register oop_result,
2433 Register last_java_sp,
2434 address entry_point,
2435 Register arg_1,
2436 Register arg_2,
2437 Register arg_3,
2438 bool check_exceptions) {
2439 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2440 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2441 pass_arg3(this, arg_3);
2442 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2443 pass_arg2(this, arg_2);
2444 pass_arg1(this, arg_1);
2445 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
2446 }
2448 void MacroAssembler::super_call_VM(Register oop_result,
2449 Register last_java_sp,
2450 address entry_point,
2451 int number_of_arguments,
2452 bool check_exceptions) {
2453 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2454 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
2455 }
2457 void MacroAssembler::super_call_VM(Register oop_result,
2458 Register last_java_sp,
2459 address entry_point,
2460 Register arg_1,
2461 bool check_exceptions) {
2462 pass_arg1(this, arg_1);
2463 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
2464 }
2466 void MacroAssembler::super_call_VM(Register oop_result,
2467 Register last_java_sp,
2468 address entry_point,
2469 Register arg_1,
2470 Register arg_2,
2471 bool check_exceptions) {
2473 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2474 pass_arg2(this, arg_2);
2475 pass_arg1(this, arg_1);
2476 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
2477 }
2479 void MacroAssembler::super_call_VM(Register oop_result,
2480 Register last_java_sp,
2481 address entry_point,
2482 Register arg_1,
2483 Register arg_2,
2484 Register arg_3,
2485 bool check_exceptions) {
2486 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2487 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2488 pass_arg3(this, arg_3);
2489 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2490 pass_arg2(this, arg_2);
2491 pass_arg1(this, arg_1);
2492 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
2493 }
2495 void MacroAssembler::call_VM_base(Register oop_result,
2496 Register java_thread,
2497 Register last_java_sp,
2498 address entry_point,
2499 int number_of_arguments,
2500 bool check_exceptions) {
2501 // determine java_thread register
2502 if (!java_thread->is_valid()) {
2503 #ifdef _LP64
2504 java_thread = r15_thread;
2505 #else
2506 java_thread = rdi;
2507 get_thread(java_thread);
2508 #endif // LP64
2509 }
2510 // determine last_java_sp register
2511 if (!last_java_sp->is_valid()) {
2512 last_java_sp = rsp;
2513 }
2514 // debugging support
2515 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
2516 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
2517 #ifdef ASSERT
2518 // TraceBytecodes does not use r12 but saves it over the call, so don't verify
2519 // r12 is the heapbase.
2520 LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
2521 #endif // ASSERT
2523 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
2524 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
2526 // push java thread (becomes first argument of C function)
2528 NOT_LP64(push(java_thread); number_of_arguments++);
2529 LP64_ONLY(mov(c_rarg0, r15_thread));
2531 // set last Java frame before call
2532 assert(last_java_sp != rbp, "can't use ebp/rbp");
2534 // Only interpreter should have to set fp
2535 set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
2537 // do the call, remove parameters
2538 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
2540 // restore the thread (cannot use the pushed argument since arguments
2541 // may be overwritten by C code generated by an optimizing compiler);
2542 // however can use the register value directly if it is callee saved.
2543 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
2544 // rdi & rsi (also r15) are callee saved -> nothing to do
2545 #ifdef ASSERT
2546 guarantee(java_thread != rax, "change this code");
2547 push(rax);
2548 { Label L;
2549 get_thread(rax);
2550 cmpptr(java_thread, rax);
2551 jcc(Assembler::equal, L);
2552 STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
2553 bind(L);
2554 }
2555 pop(rax);
2556 #endif
2557 } else {
2558 get_thread(java_thread);
2559 }
2560 // reset last Java frame
2561 // Only interpreter should have to clear fp
2562 reset_last_Java_frame(java_thread, true);
2564 #ifndef CC_INTERP
2565 // C++ interp handles this in the interpreter
2566 check_and_handle_popframe(java_thread);
2567 check_and_handle_earlyret(java_thread);
2568 #endif /* CC_INTERP */
2570 if (check_exceptions) {
2571 // check for pending exceptions (java_thread is set upon return)
2572 cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
2573 #ifndef _LP64
2574 jump_cc(Assembler::notEqual,
2575 RuntimeAddress(StubRoutines::forward_exception_entry()));
2576 #else
2577 // This used to conditionally jump to forward_exception however it is
2578 // possible if we relocate that the branch will not reach. So we must jump
2579 // around so we can always reach
2581 Label ok;
2582 jcc(Assembler::equal, ok);
2583 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
2584 bind(ok);
2585 #endif // LP64
2586 }
2588 // get oop result if there is one and reset the value in the thread
2589 if (oop_result->is_valid()) {
2590 get_vm_result(oop_result, java_thread);
2591 }
2592 }
2594 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
2596 // Calculate the value for last_Java_sp
2597 // somewhat subtle. call_VM does an intermediate call
2598 // which places a return address on the stack just under the
2599 // stack pointer as the user finsihed with it. This allows
2600 // use to retrieve last_Java_pc from last_Java_sp[-1].
2601 // On 32bit we then have to push additional args on the stack to accomplish
2602 // the actual requested call. On 64bit call_VM only can use register args
2603 // so the only extra space is the return address that call_VM created.
2604 // This hopefully explains the calculations here.
2606 #ifdef _LP64
2607 // We've pushed one address, correct last_Java_sp
2608 lea(rax, Address(rsp, wordSize));
2609 #else
2610 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
2611 #endif // LP64
2613 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
2615 }
2617 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
2618 call_VM_leaf_base(entry_point, number_of_arguments);
2619 }
2621 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
2622 pass_arg0(this, arg_0);
2623 call_VM_leaf(entry_point, 1);
2624 }
2626 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2628 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2629 pass_arg1(this, arg_1);
2630 pass_arg0(this, arg_0);
2631 call_VM_leaf(entry_point, 2);
2632 }
2634 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2635 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
2636 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2637 pass_arg2(this, arg_2);
2638 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2639 pass_arg1(this, arg_1);
2640 pass_arg0(this, arg_0);
2641 call_VM_leaf(entry_point, 3);
2642 }
2644 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
2645 pass_arg0(this, arg_0);
2646 MacroAssembler::call_VM_leaf_base(entry_point, 1);
2647 }
2649 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
2651 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2652 pass_arg1(this, arg_1);
2653 pass_arg0(this, arg_0);
2654 MacroAssembler::call_VM_leaf_base(entry_point, 2);
2655 }
2657 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
2658 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
2659 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2660 pass_arg2(this, arg_2);
2661 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2662 pass_arg1(this, arg_1);
2663 pass_arg0(this, arg_0);
2664 MacroAssembler::call_VM_leaf_base(entry_point, 3);
2665 }
2667 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
2668 LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
2669 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
2670 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
2671 pass_arg3(this, arg_3);
2672 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
2673 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
2674 pass_arg2(this, arg_2);
2675 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
2676 pass_arg1(this, arg_1);
2677 pass_arg0(this, arg_0);
2678 MacroAssembler::call_VM_leaf_base(entry_point, 4);
2679 }
2681 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
2682 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
2683 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
2684 verify_oop(oop_result, "broken oop in call_VM_base");
2685 }
2687 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
2688 movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
2689 movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD);
2690 }
2692 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
2693 }
2695 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
2696 }
2698 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
2699 if (reachable(src1)) {
2700 cmpl(as_Address(src1), imm);
2701 } else {
2702 lea(rscratch1, src1);
2703 cmpl(Address(rscratch1, 0), imm);
2704 }
2705 }
2707 void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
2708 assert(!src2.is_lval(), "use cmpptr");
2709 if (reachable(src2)) {
2710 cmpl(src1, as_Address(src2));
2711 } else {
2712 lea(rscratch1, src2);
2713 cmpl(src1, Address(rscratch1, 0));
2714 }
2715 }
2717 void MacroAssembler::cmp32(Register src1, int32_t imm) {
2718 Assembler::cmpl(src1, imm);
2719 }
2721 void MacroAssembler::cmp32(Register src1, Address src2) {
2722 Assembler::cmpl(src1, src2);
2723 }
2725 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
2726 ucomisd(opr1, opr2);
2728 Label L;
2729 if (unordered_is_less) {
2730 movl(dst, -1);
2731 jcc(Assembler::parity, L);
2732 jcc(Assembler::below , L);
2733 movl(dst, 0);
2734 jcc(Assembler::equal , L);
2735 increment(dst);
2736 } else { // unordered is greater
2737 movl(dst, 1);
2738 jcc(Assembler::parity, L);
2739 jcc(Assembler::above , L);
2740 movl(dst, 0);
2741 jcc(Assembler::equal , L);
2742 decrementl(dst);
2743 }
2744 bind(L);
2745 }
2747 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
2748 ucomiss(opr1, opr2);
2750 Label L;
2751 if (unordered_is_less) {
2752 movl(dst, -1);
2753 jcc(Assembler::parity, L);
2754 jcc(Assembler::below , L);
2755 movl(dst, 0);
2756 jcc(Assembler::equal , L);
2757 increment(dst);
2758 } else { // unordered is greater
2759 movl(dst, 1);
2760 jcc(Assembler::parity, L);
2761 jcc(Assembler::above , L);
2762 movl(dst, 0);
2763 jcc(Assembler::equal , L);
2764 decrementl(dst);
2765 }
2766 bind(L);
2767 }
2770 void MacroAssembler::cmp8(AddressLiteral src1, int imm) {
2771 if (reachable(src1)) {
2772 cmpb(as_Address(src1), imm);
2773 } else {
2774 lea(rscratch1, src1);
2775 cmpb(Address(rscratch1, 0), imm);
2776 }
2777 }
2779 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
2780 #ifdef _LP64
2781 if (src2.is_lval()) {
2782 movptr(rscratch1, src2);
2783 Assembler::cmpq(src1, rscratch1);
2784 } else if (reachable(src2)) {
2785 cmpq(src1, as_Address(src2));
2786 } else {
2787 lea(rscratch1, src2);
2788 Assembler::cmpq(src1, Address(rscratch1, 0));
2789 }
2790 #else
2791 if (src2.is_lval()) {
2792 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
2793 } else {
2794 cmpl(src1, as_Address(src2));
2795 }
2796 #endif // _LP64
2797 }
2799 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
2800 assert(src2.is_lval(), "not a mem-mem compare");
2801 #ifdef _LP64
2802 // moves src2's literal address
2803 movptr(rscratch1, src2);
2804 Assembler::cmpq(src1, rscratch1);
2805 #else
2806 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
2807 #endif // _LP64
2808 }
2810 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
2811 if (reachable(adr)) {
2812 if (os::is_MP())
2813 lock();
2814 cmpxchgptr(reg, as_Address(adr));
2815 } else {
2816 lea(rscratch1, adr);
2817 if (os::is_MP())
2818 lock();
2819 cmpxchgptr(reg, Address(rscratch1, 0));
2820 }
2821 }
2823 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
2824 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
2825 }
2827 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
2828 if (reachable(src)) {
2829 Assembler::comisd(dst, as_Address(src));
2830 } else {
2831 lea(rscratch1, src);
2832 Assembler::comisd(dst, Address(rscratch1, 0));
2833 }
2834 }
2836 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
2837 if (reachable(src)) {
2838 Assembler::comiss(dst, as_Address(src));
2839 } else {
2840 lea(rscratch1, src);
2841 Assembler::comiss(dst, Address(rscratch1, 0));
2842 }
2843 }
2846 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
2847 Condition negated_cond = negate_condition(cond);
2848 Label L;
2849 jcc(negated_cond, L);
2850 pushf(); // Preserve flags
2851 atomic_incl(counter_addr);
2852 popf();
2853 bind(L);
2854 }
2856 int MacroAssembler::corrected_idivl(Register reg) {
2857 // Full implementation of Java idiv and irem; checks for
2858 // special case as described in JVM spec., p.243 & p.271.
2859 // The function returns the (pc) offset of the idivl
2860 // instruction - may be needed for implicit exceptions.
2861 //
2862 // normal case special case
2863 //
2864 // input : rax,: dividend min_int
2865 // reg: divisor (may not be rax,/rdx) -1
2866 //
2867 // output: rax,: quotient (= rax, idiv reg) min_int
2868 // rdx: remainder (= rax, irem reg) 0
2869 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
2870 const int min_int = 0x80000000;
2871 Label normal_case, special_case;
2873 // check for special case
2874 cmpl(rax, min_int);
2875 jcc(Assembler::notEqual, normal_case);
2876 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
2877 cmpl(reg, -1);
2878 jcc(Assembler::equal, special_case);
2880 // handle normal case
2881 bind(normal_case);
2882 cdql();
2883 int idivl_offset = offset();
2884 idivl(reg);
2886 // normal and special case exit
2887 bind(special_case);
2889 return idivl_offset;
2890 }
2894 void MacroAssembler::decrementl(Register reg, int value) {
2895 if (value == min_jint) {subl(reg, value) ; return; }
2896 if (value < 0) { incrementl(reg, -value); return; }
2897 if (value == 0) { ; return; }
2898 if (value == 1 && UseIncDec) { decl(reg) ; return; }
2899 /* else */ { subl(reg, value) ; return; }
2900 }
2902 void MacroAssembler::decrementl(Address dst, int value) {
2903 if (value == min_jint) {subl(dst, value) ; return; }
2904 if (value < 0) { incrementl(dst, -value); return; }
2905 if (value == 0) { ; return; }
2906 if (value == 1 && UseIncDec) { decl(dst) ; return; }
2907 /* else */ { subl(dst, value) ; return; }
2908 }
2910 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
2911 assert (shift_value > 0, "illegal shift value");
2912 Label _is_positive;
2913 testl (reg, reg);
2914 jcc (Assembler::positive, _is_positive);
2915 int offset = (1 << shift_value) - 1 ;
2917 if (offset == 1) {
2918 incrementl(reg);
2919 } else {
2920 addl(reg, offset);
2921 }
2923 bind (_is_positive);
2924 sarl(reg, shift_value);
2925 }
2927 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src) {
2928 if (reachable(src)) {
2929 Assembler::divsd(dst, as_Address(src));
2930 } else {
2931 lea(rscratch1, src);
2932 Assembler::divsd(dst, Address(rscratch1, 0));
2933 }
2934 }
2936 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src) {
2937 if (reachable(src)) {
2938 Assembler::divss(dst, as_Address(src));
2939 } else {
2940 lea(rscratch1, src);
2941 Assembler::divss(dst, Address(rscratch1, 0));
2942 }
2943 }
2945 // !defined(COMPILER2) is because of stupid core builds
2946 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
2947 void MacroAssembler::empty_FPU_stack() {
2948 if (VM_Version::supports_mmx()) {
2949 emms();
2950 } else {
2951 for (int i = 8; i-- > 0; ) ffree(i);
2952 }
2953 }
2954 #endif // !LP64 || C1 || !C2
2957 // Defines obj, preserves var_size_in_bytes
2958 void MacroAssembler::eden_allocate(Register obj,
2959 Register var_size_in_bytes,
2960 int con_size_in_bytes,
2961 Register t1,
2962 Label& slow_case) {
2963 assert(obj == rax, "obj must be in rax, for cmpxchg");
2964 assert_different_registers(obj, var_size_in_bytes, t1);
2965 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
2966 jmp(slow_case);
2967 } else {
2968 Register end = t1;
2969 Label retry;
2970 bind(retry);
2971 ExternalAddress heap_top((address) Universe::heap()->top_addr());
2972 movptr(obj, heap_top);
2973 if (var_size_in_bytes == noreg) {
2974 lea(end, Address(obj, con_size_in_bytes));
2975 } else {
2976 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
2977 }
2978 // if end < obj then we wrapped around => object too long => slow case
2979 cmpptr(end, obj);
2980 jcc(Assembler::below, slow_case);
2981 cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
2982 jcc(Assembler::above, slow_case);
2983 // Compare obj with the top addr, and if still equal, store the new top addr in
2984 // end at the address of the top addr pointer. Sets ZF if was equal, and clears
2985 // it otherwise. Use lock prefix for atomicity on MPs.
2986 locked_cmpxchgptr(end, heap_top);
2987 jcc(Assembler::notEqual, retry);
2988 }
2989 }
2991 void MacroAssembler::enter() {
2992 push(rbp);
2993 mov(rbp, rsp);
2994 }
2996 // A 5 byte nop that is safe for patching (see patch_verified_entry)
2997 void MacroAssembler::fat_nop() {
2998 if (UseAddressNop) {
2999 addr_nop_5();
3000 } else {
3001 emit_int8(0x26); // es:
3002 emit_int8(0x2e); // cs:
3003 emit_int8(0x64); // fs:
3004 emit_int8(0x65); // gs:
3005 emit_int8((unsigned char)0x90);
3006 }
3007 }
3009 void MacroAssembler::fcmp(Register tmp) {
3010 fcmp(tmp, 1, true, true);
3011 }
3013 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
3014 assert(!pop_right || pop_left, "usage error");
3015 if (VM_Version::supports_cmov()) {
3016 assert(tmp == noreg, "unneeded temp");
3017 if (pop_left) {
3018 fucomip(index);
3019 } else {
3020 fucomi(index);
3021 }
3022 if (pop_right) {
3023 fpop();
3024 }
3025 } else {
3026 assert(tmp != noreg, "need temp");
3027 if (pop_left) {
3028 if (pop_right) {
3029 fcompp();
3030 } else {
3031 fcomp(index);
3032 }
3033 } else {
3034 fcom(index);
3035 }
3036 // convert FPU condition into eflags condition via rax,
3037 save_rax(tmp);
3038 fwait(); fnstsw_ax();
3039 sahf();
3040 restore_rax(tmp);
3041 }
3042 // condition codes set as follows:
3043 //
3044 // CF (corresponds to C0) if x < y
3045 // PF (corresponds to C2) if unordered
3046 // ZF (corresponds to C3) if x = y
3047 }
3049 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
3050 fcmp2int(dst, unordered_is_less, 1, true, true);
3051 }
3053 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
3054 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
3055 Label L;
3056 if (unordered_is_less) {
3057 movl(dst, -1);
3058 jcc(Assembler::parity, L);
3059 jcc(Assembler::below , L);
3060 movl(dst, 0);
3061 jcc(Assembler::equal , L);
3062 increment(dst);
3063 } else { // unordered is greater
3064 movl(dst, 1);
3065 jcc(Assembler::parity, L);
3066 jcc(Assembler::above , L);
3067 movl(dst, 0);
3068 jcc(Assembler::equal , L);
3069 decrementl(dst);
3070 }
3071 bind(L);
3072 }
3074 void MacroAssembler::fld_d(AddressLiteral src) {
3075 fld_d(as_Address(src));
3076 }
3078 void MacroAssembler::fld_s(AddressLiteral src) {
3079 fld_s(as_Address(src));
3080 }
3082 void MacroAssembler::fld_x(AddressLiteral src) {
3083 Assembler::fld_x(as_Address(src));
3084 }
3086 void MacroAssembler::fldcw(AddressLiteral src) {
3087 Assembler::fldcw(as_Address(src));
3088 }
3090 void MacroAssembler::pow_exp_core_encoding() {
3091 // kills rax, rcx, rdx
3092 subptr(rsp,sizeof(jdouble));
3093 // computes 2^X. Stack: X ...
3094 // f2xm1 computes 2^X-1 but only operates on -1<=X<=1. Get int(X) and
3095 // keep it on the thread's stack to compute 2^int(X) later
3096 // then compute 2^(X-int(X)) as (2^(X-int(X)-1+1)
3097 // final result is obtained with: 2^X = 2^int(X) * 2^(X-int(X))
3098 fld_s(0); // Stack: X X ...
3099 frndint(); // Stack: int(X) X ...
3100 fsuba(1); // Stack: int(X) X-int(X) ...
3101 fistp_s(Address(rsp,0)); // move int(X) as integer to thread's stack. Stack: X-int(X) ...
3102 f2xm1(); // Stack: 2^(X-int(X))-1 ...
3103 fld1(); // Stack: 1 2^(X-int(X))-1 ...
3104 faddp(1); // Stack: 2^(X-int(X))
3105 // computes 2^(int(X)): add exponent bias (1023) to int(X), then
3106 // shift int(X)+1023 to exponent position.
3107 // Exponent is limited to 11 bits if int(X)+1023 does not fit in 11
3108 // bits, set result to NaN. 0x000 and 0x7FF are reserved exponent
3109 // values so detect them and set result to NaN.
3110 movl(rax,Address(rsp,0));
3111 movl(rcx, -2048); // 11 bit mask and valid NaN binary encoding
3112 addl(rax, 1023);
3113 movl(rdx,rax);
3114 shll(rax,20);
3115 // Check that 0 < int(X)+1023 < 2047. Otherwise set rax to NaN.
3116 addl(rdx,1);
3117 // Check that 1 < int(X)+1023+1 < 2048
3118 // in 3 steps:
3119 // 1- (int(X)+1023+1)&-2048 == 0 => 0 <= int(X)+1023+1 < 2048
3120 // 2- (int(X)+1023+1)&-2048 != 0
3121 // 3- (int(X)+1023+1)&-2048 != 1
3122 // Do 2- first because addl just updated the flags.
3123 cmov32(Assembler::equal,rax,rcx);
3124 cmpl(rdx,1);
3125 cmov32(Assembler::equal,rax,rcx);
3126 testl(rdx,rcx);
3127 cmov32(Assembler::notEqual,rax,rcx);
3128 movl(Address(rsp,4),rax);
3129 movl(Address(rsp,0),0);
3130 fmul_d(Address(rsp,0)); // Stack: 2^X ...
3131 addptr(rsp,sizeof(jdouble));
3132 }
3134 void MacroAssembler::increase_precision() {
3135 subptr(rsp, BytesPerWord);
3136 fnstcw(Address(rsp, 0));
3137 movl(rax, Address(rsp, 0));
3138 orl(rax, 0x300);
3139 push(rax);
3140 fldcw(Address(rsp, 0));
3141 pop(rax);
3142 }
3144 void MacroAssembler::restore_precision() {
3145 fldcw(Address(rsp, 0));
3146 addptr(rsp, BytesPerWord);
3147 }
3149 void MacroAssembler::fast_pow() {
3150 // computes X^Y = 2^(Y * log2(X))
3151 // if fast computation is not possible, result is NaN. Requires
3152 // fallback from user of this macro.
3153 // increase precision for intermediate steps of the computation
3154 BLOCK_COMMENT("fast_pow {");
3155 increase_precision();
3156 fyl2x(); // Stack: (Y*log2(X)) ...
3157 pow_exp_core_encoding(); // Stack: exp(X) ...
3158 restore_precision();
3159 BLOCK_COMMENT("} fast_pow");
3160 }
3162 void MacroAssembler::fast_exp() {
3163 // computes exp(X) = 2^(X * log2(e))
3164 // if fast computation is not possible, result is NaN. Requires
3165 // fallback from user of this macro.
3166 // increase precision for intermediate steps of the computation
3167 increase_precision();
3168 fldl2e(); // Stack: log2(e) X ...
3169 fmulp(1); // Stack: (X*log2(e)) ...
3170 pow_exp_core_encoding(); // Stack: exp(X) ...
3171 restore_precision();
3172 }
3174 void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) {
3175 // kills rax, rcx, rdx
3176 // pow and exp needs 2 extra registers on the fpu stack.
3177 Label slow_case, done;
3178 Register tmp = noreg;
3179 if (!VM_Version::supports_cmov()) {
3180 // fcmp needs a temporary so preserve rdx,
3181 tmp = rdx;
3182 }
3183 Register tmp2 = rax;
3184 Register tmp3 = rcx;
3186 if (is_exp) {
3187 // Stack: X
3188 fld_s(0); // duplicate argument for runtime call. Stack: X X
3189 fast_exp(); // Stack: exp(X) X
3190 fcmp(tmp, 0, false, false); // Stack: exp(X) X
3191 // exp(X) not equal to itself: exp(X) is NaN go to slow case.
3192 jcc(Assembler::parity, slow_case);
3193 // get rid of duplicate argument. Stack: exp(X)
3194 if (num_fpu_regs_in_use > 0) {
3195 fxch();
3196 fpop();
3197 } else {
3198 ffree(1);
3199 }
3200 jmp(done);
3201 } else {
3202 // Stack: X Y
3203 Label x_negative, y_not_2;
3205 static double two = 2.0;
3206 ExternalAddress two_addr((address)&two);
3208 // constant maybe too far on 64 bit
3209 lea(tmp2, two_addr);
3210 fld_d(Address(tmp2, 0)); // Stack: 2 X Y
3211 fcmp(tmp, 2, true, false); // Stack: X Y
3212 jcc(Assembler::parity, y_not_2);
3213 jcc(Assembler::notEqual, y_not_2);
3215 fxch(); fpop(); // Stack: X
3216 fmul(0); // Stack: X*X
3218 jmp(done);
3220 bind(y_not_2);
3222 fldz(); // Stack: 0 X Y
3223 fcmp(tmp, 1, true, false); // Stack: X Y
3224 jcc(Assembler::above, x_negative);
3226 // X >= 0
3228 fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
3229 fld_s(1); // Stack: X Y X Y
3230 fast_pow(); // Stack: X^Y X Y
3231 fcmp(tmp, 0, false, false); // Stack: X^Y X Y
3232 // X^Y not equal to itself: X^Y is NaN go to slow case.
3233 jcc(Assembler::parity, slow_case);
3234 // get rid of duplicate arguments. Stack: X^Y
3235 if (num_fpu_regs_in_use > 0) {
3236 fxch(); fpop();
3237 fxch(); fpop();
3238 } else {
3239 ffree(2);
3240 ffree(1);
3241 }
3242 jmp(done);
3244 // X <= 0
3245 bind(x_negative);
3247 fld_s(1); // Stack: Y X Y
3248 frndint(); // Stack: int(Y) X Y
3249 fcmp(tmp, 2, false, false); // Stack: int(Y) X Y
3250 jcc(Assembler::notEqual, slow_case);
3252 subptr(rsp, 8);
3254 // For X^Y, when X < 0, Y has to be an integer and the final
3255 // result depends on whether it's odd or even. We just checked
3256 // that int(Y) == Y. We move int(Y) to gp registers as a 64 bit
3257 // integer to test its parity. If int(Y) is huge and doesn't fit
3258 // in the 64 bit integer range, the integer indefinite value will
3259 // end up in the gp registers. Huge numbers are all even, the
3260 // integer indefinite number is even so it's fine.
3262 #ifdef ASSERT
3263 // Let's check we don't end up with an integer indefinite number
3264 // when not expected. First test for huge numbers: check whether
3265 // int(Y)+1 == int(Y) which is true for very large numbers and
3266 // those are all even. A 64 bit integer is guaranteed to not
3267 // overflow for numbers where y+1 != y (when precision is set to
3268 // double precision).
3269 Label y_not_huge;
3271 fld1(); // Stack: 1 int(Y) X Y
3272 fadd(1); // Stack: 1+int(Y) int(Y) X Y
3274 #ifdef _LP64
3275 // trip to memory to force the precision down from double extended
3276 // precision
3277 fstp_d(Address(rsp, 0));
3278 fld_d(Address(rsp, 0));
3279 #endif
3281 fcmp(tmp, 1, true, false); // Stack: int(Y) X Y
3282 #endif
3284 // move int(Y) as 64 bit integer to thread's stack
3285 fistp_d(Address(rsp,0)); // Stack: X Y
3287 #ifdef ASSERT
3288 jcc(Assembler::notEqual, y_not_huge);
3290 // Y is huge so we know it's even. It may not fit in a 64 bit
3291 // integer and we don't want the debug code below to see the
3292 // integer indefinite value so overwrite int(Y) on the thread's
3293 // stack with 0.
3294 movl(Address(rsp, 0), 0);
3295 movl(Address(rsp, 4), 0);
3297 bind(y_not_huge);
3298 #endif
3300 fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
3301 fld_s(1); // Stack: X Y X Y
3302 fabs(); // Stack: abs(X) Y X Y
3303 fast_pow(); // Stack: abs(X)^Y X Y
3304 fcmp(tmp, 0, false, false); // Stack: abs(X)^Y X Y
3305 // abs(X)^Y not equal to itself: abs(X)^Y is NaN go to slow case.
3307 pop(tmp2);
3308 NOT_LP64(pop(tmp3));
3309 jcc(Assembler::parity, slow_case);
3311 #ifdef ASSERT
3312 // Check that int(Y) is not integer indefinite value (int
3313 // overflow). Shouldn't happen because for values that would
3314 // overflow, 1+int(Y)==Y which was tested earlier.
3315 #ifndef _LP64
3316 {
3317 Label integer;
3318 testl(tmp2, tmp2);
3319 jcc(Assembler::notZero, integer);
3320 cmpl(tmp3, 0x80000000);
3321 jcc(Assembler::notZero, integer);
3322 STOP("integer indefinite value shouldn't be seen here");
3323 bind(integer);
3324 }
3325 #else
3326 {
3327 Label integer;
3328 mov(tmp3, tmp2); // preserve tmp2 for parity check below
3329 shlq(tmp3, 1);
3330 jcc(Assembler::carryClear, integer);
3331 jcc(Assembler::notZero, integer);
3332 STOP("integer indefinite value shouldn't be seen here");
3333 bind(integer);
3334 }
3335 #endif
3336 #endif
3338 // get rid of duplicate arguments. Stack: X^Y
3339 if (num_fpu_regs_in_use > 0) {
3340 fxch(); fpop();
3341 fxch(); fpop();
3342 } else {
3343 ffree(2);
3344 ffree(1);
3345 }
3347 testl(tmp2, 1);
3348 jcc(Assembler::zero, done); // X <= 0, Y even: X^Y = abs(X)^Y
3349 // X <= 0, Y even: X^Y = -abs(X)^Y
3351 fchs(); // Stack: -abs(X)^Y Y
3352 jmp(done);
3353 }
3355 // slow case: runtime call
3356 bind(slow_case);
3358 fpop(); // pop incorrect result or int(Y)
3360 fp_runtime_fallback(is_exp ? CAST_FROM_FN_PTR(address, SharedRuntime::dexp) : CAST_FROM_FN_PTR(address, SharedRuntime::dpow),
3361 is_exp ? 1 : 2, num_fpu_regs_in_use);
3363 // Come here with result in F-TOS
3364 bind(done);
3365 }
3367 void MacroAssembler::fpop() {
3368 ffree();
3369 fincstp();
3370 }
3372 void MacroAssembler::fremr(Register tmp) {
3373 save_rax(tmp);
3374 { Label L;
3375 bind(L);
3376 fprem();
3377 fwait(); fnstsw_ax();
3378 #ifdef _LP64
3379 testl(rax, 0x400);
3380 jcc(Assembler::notEqual, L);
3381 #else
3382 sahf();
3383 jcc(Assembler::parity, L);
3384 #endif // _LP64
3385 }
3386 restore_rax(tmp);
3387 // Result is in ST0.
3388 // Note: fxch & fpop to get rid of ST1
3389 // (otherwise FPU stack could overflow eventually)
3390 fxch(1);
3391 fpop();
3392 }
3395 void MacroAssembler::incrementl(AddressLiteral dst) {
3396 if (reachable(dst)) {
3397 incrementl(as_Address(dst));
3398 } else {
3399 lea(rscratch1, dst);
3400 incrementl(Address(rscratch1, 0));
3401 }
3402 }
3404 void MacroAssembler::incrementl(ArrayAddress dst) {
3405 incrementl(as_Address(dst));
3406 }
3408 void MacroAssembler::incrementl(Register reg, int value) {
3409 if (value == min_jint) {addl(reg, value) ; return; }
3410 if (value < 0) { decrementl(reg, -value); return; }
3411 if (value == 0) { ; return; }
3412 if (value == 1 && UseIncDec) { incl(reg) ; return; }
3413 /* else */ { addl(reg, value) ; return; }
3414 }
3416 void MacroAssembler::incrementl(Address dst, int value) {
3417 if (value == min_jint) {addl(dst, value) ; return; }
3418 if (value < 0) { decrementl(dst, -value); return; }
3419 if (value == 0) { ; return; }
3420 if (value == 1 && UseIncDec) { incl(dst) ; return; }
3421 /* else */ { addl(dst, value) ; return; }
3422 }
3424 void MacroAssembler::jump(AddressLiteral dst) {
3425 if (reachable(dst)) {
3426 jmp_literal(dst.target(), dst.rspec());
3427 } else {
3428 lea(rscratch1, dst);
3429 jmp(rscratch1);
3430 }
3431 }
3433 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
3434 if (reachable(dst)) {
3435 InstructionMark im(this);
3436 relocate(dst.reloc());
3437 const int short_size = 2;
3438 const int long_size = 6;
3439 int offs = (intptr_t)dst.target() - ((intptr_t)pc());
3440 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
3441 // 0111 tttn #8-bit disp
3442 emit_int8(0x70 | cc);
3443 emit_int8((offs - short_size) & 0xFF);
3444 } else {
3445 // 0000 1111 1000 tttn #32-bit disp
3446 emit_int8(0x0F);
3447 emit_int8((unsigned char)(0x80 | cc));
3448 emit_int32(offs - long_size);
3449 }
3450 } else {
3451 #ifdef ASSERT
3452 warning("reversing conditional branch");
3453 #endif /* ASSERT */
3454 Label skip;
3455 jccb(reverse[cc], skip);
3456 lea(rscratch1, dst);
3457 Assembler::jmp(rscratch1);
3458 bind(skip);
3459 }
3460 }
3462 void MacroAssembler::ldmxcsr(AddressLiteral src) {
3463 if (reachable(src)) {
3464 Assembler::ldmxcsr(as_Address(src));
3465 } else {
3466 lea(rscratch1, src);
3467 Assembler::ldmxcsr(Address(rscratch1, 0));
3468 }
3469 }
3471 int MacroAssembler::load_signed_byte(Register dst, Address src) {
3472 int off;
3473 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
3474 off = offset();
3475 movsbl(dst, src); // movsxb
3476 } else {
3477 off = load_unsigned_byte(dst, src);
3478 shll(dst, 24);
3479 sarl(dst, 24);
3480 }
3481 return off;
3482 }
3484 // Note: load_signed_short used to be called load_signed_word.
3485 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
3486 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
3487 // The term "word" in HotSpot means a 32- or 64-bit machine word.
3488 int MacroAssembler::load_signed_short(Register dst, Address src) {
3489 int off;
3490 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
3491 // This is dubious to me since it seems safe to do a signed 16 => 64 bit
3492 // version but this is what 64bit has always done. This seems to imply
3493 // that users are only using 32bits worth.
3494 off = offset();
3495 movswl(dst, src); // movsxw
3496 } else {
3497 off = load_unsigned_short(dst, src);
3498 shll(dst, 16);
3499 sarl(dst, 16);
3500 }
3501 return off;
3502 }
3504 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
3505 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
3506 // and "3.9 Partial Register Penalties", p. 22).
3507 int off;
3508 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
3509 off = offset();
3510 movzbl(dst, src); // movzxb
3511 } else {
3512 xorl(dst, dst);
3513 off = offset();
3514 movb(dst, src);
3515 }
3516 return off;
3517 }
3519 // Note: load_unsigned_short used to be called load_unsigned_word.
3520 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
3521 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
3522 // and "3.9 Partial Register Penalties", p. 22).
3523 int off;
3524 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
3525 off = offset();
3526 movzwl(dst, src); // movzxw
3527 } else {
3528 xorl(dst, dst);
3529 off = offset();
3530 movw(dst, src);
3531 }
3532 return off;
3533 }
3535 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
3536 switch (size_in_bytes) {
3537 #ifndef _LP64
3538 case 8:
3539 assert(dst2 != noreg, "second dest register required");
3540 movl(dst, src);
3541 movl(dst2, src.plus_disp(BytesPerInt));
3542 break;
3543 #else
3544 case 8: movq(dst, src); break;
3545 #endif
3546 case 4: movl(dst, src); break;
3547 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
3548 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
3549 default: ShouldNotReachHere();
3550 }
3551 }
3553 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
3554 switch (size_in_bytes) {
3555 #ifndef _LP64
3556 case 8:
3557 assert(src2 != noreg, "second source register required");
3558 movl(dst, src);
3559 movl(dst.plus_disp(BytesPerInt), src2);
3560 break;
3561 #else
3562 case 8: movq(dst, src); break;
3563 #endif
3564 case 4: movl(dst, src); break;
3565 case 2: movw(dst, src); break;
3566 case 1: movb(dst, src); break;
3567 default: ShouldNotReachHere();
3568 }
3569 }
3571 void MacroAssembler::mov32(AddressLiteral dst, Register src) {
3572 if (reachable(dst)) {
3573 movl(as_Address(dst), src);
3574 } else {
3575 lea(rscratch1, dst);
3576 movl(Address(rscratch1, 0), src);
3577 }
3578 }
3580 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
3581 if (reachable(src)) {
3582 movl(dst, as_Address(src));
3583 } else {
3584 lea(rscratch1, src);
3585 movl(dst, Address(rscratch1, 0));
3586 }
3587 }
3589 // C++ bool manipulation
3591 void MacroAssembler::movbool(Register dst, Address src) {
3592 if(sizeof(bool) == 1)
3593 movb(dst, src);
3594 else if(sizeof(bool) == 2)
3595 movw(dst, src);
3596 else if(sizeof(bool) == 4)
3597 movl(dst, src);
3598 else
3599 // unsupported
3600 ShouldNotReachHere();
3601 }
3603 void MacroAssembler::movbool(Address dst, bool boolconst) {
3604 if(sizeof(bool) == 1)
3605 movb(dst, (int) boolconst);
3606 else if(sizeof(bool) == 2)
3607 movw(dst, (int) boolconst);
3608 else if(sizeof(bool) == 4)
3609 movl(dst, (int) boolconst);
3610 else
3611 // unsupported
3612 ShouldNotReachHere();
3613 }
3615 void MacroAssembler::movbool(Address dst, Register src) {
3616 if(sizeof(bool) == 1)
3617 movb(dst, src);
3618 else if(sizeof(bool) == 2)
3619 movw(dst, src);
3620 else if(sizeof(bool) == 4)
3621 movl(dst, src);
3622 else
3623 // unsupported
3624 ShouldNotReachHere();
3625 }
3627 void MacroAssembler::movbyte(ArrayAddress dst, int src) {
3628 movb(as_Address(dst), src);
3629 }
3631 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) {
3632 if (reachable(src)) {
3633 movdl(dst, as_Address(src));
3634 } else {
3635 lea(rscratch1, src);
3636 movdl(dst, Address(rscratch1, 0));
3637 }
3638 }
3640 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) {
3641 if (reachable(src)) {
3642 movq(dst, as_Address(src));
3643 } else {
3644 lea(rscratch1, src);
3645 movq(dst, Address(rscratch1, 0));
3646 }
3647 }
3649 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
3650 if (reachable(src)) {
3651 if (UseXmmLoadAndClearUpper) {
3652 movsd (dst, as_Address(src));
3653 } else {
3654 movlpd(dst, as_Address(src));
3655 }
3656 } else {
3657 lea(rscratch1, src);
3658 if (UseXmmLoadAndClearUpper) {
3659 movsd (dst, Address(rscratch1, 0));
3660 } else {
3661 movlpd(dst, Address(rscratch1, 0));
3662 }
3663 }
3664 }
3666 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
3667 if (reachable(src)) {
3668 movss(dst, as_Address(src));
3669 } else {
3670 lea(rscratch1, src);
3671 movss(dst, Address(rscratch1, 0));
3672 }
3673 }
3675 void MacroAssembler::movptr(Register dst, Register src) {
3676 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
3677 }
3679 void MacroAssembler::movptr(Register dst, Address src) {
3680 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
3681 }
3683 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
3684 void MacroAssembler::movptr(Register dst, intptr_t src) {
3685 LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
3686 }
3688 void MacroAssembler::movptr(Address dst, Register src) {
3689 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
3690 }
3692 void MacroAssembler::movdqu(XMMRegister dst, AddressLiteral src) {
3693 if (reachable(src)) {
3694 Assembler::movdqu(dst, as_Address(src));
3695 } else {
3696 lea(rscratch1, src);
3697 Assembler::movdqu(dst, Address(rscratch1, 0));
3698 }
3699 }
3701 void MacroAssembler::movdqa(XMMRegister dst, AddressLiteral src) {
3702 if (reachable(src)) {
3703 Assembler::movdqa(dst, as_Address(src));
3704 } else {
3705 lea(rscratch1, src);
3706 Assembler::movdqa(dst, Address(rscratch1, 0));
3707 }
3708 }
3710 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
3711 if (reachable(src)) {
3712 Assembler::movsd(dst, as_Address(src));
3713 } else {
3714 lea(rscratch1, src);
3715 Assembler::movsd(dst, Address(rscratch1, 0));
3716 }
3717 }
3719 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
3720 if (reachable(src)) {
3721 Assembler::movss(dst, as_Address(src));
3722 } else {
3723 lea(rscratch1, src);
3724 Assembler::movss(dst, Address(rscratch1, 0));
3725 }
3726 }
3728 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src) {
3729 if (reachable(src)) {
3730 Assembler::mulsd(dst, as_Address(src));
3731 } else {
3732 lea(rscratch1, src);
3733 Assembler::mulsd(dst, Address(rscratch1, 0));
3734 }
3735 }
3737 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src) {
3738 if (reachable(src)) {
3739 Assembler::mulss(dst, as_Address(src));
3740 } else {
3741 lea(rscratch1, src);
3742 Assembler::mulss(dst, Address(rscratch1, 0));
3743 }
3744 }
3746 void MacroAssembler::null_check(Register reg, int offset) {
3747 if (needs_explicit_null_check(offset)) {
3748 // provoke OS NULL exception if reg = NULL by
3749 // accessing M[reg] w/o changing any (non-CC) registers
3750 // NOTE: cmpl is plenty here to provoke a segv
3751 cmpptr(rax, Address(reg, 0));
3752 // Note: should probably use testl(rax, Address(reg, 0));
3753 // may be shorter code (however, this version of
3754 // testl needs to be implemented first)
3755 } else {
3756 // nothing to do, (later) access of M[reg + offset]
3757 // will provoke OS NULL exception if reg = NULL
3758 }
3759 }
3761 void MacroAssembler::os_breakpoint() {
3762 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
3763 // (e.g., MSVC can't call ps() otherwise)
3764 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
3765 }
3767 void MacroAssembler::pop_CPU_state() {
3768 pop_FPU_state();
3769 pop_IU_state();
3770 }
3772 void MacroAssembler::pop_FPU_state() {
3773 NOT_LP64(frstor(Address(rsp, 0));)
3774 LP64_ONLY(fxrstor(Address(rsp, 0));)
3775 addptr(rsp, FPUStateSizeInWords * wordSize);
3776 }
3778 void MacroAssembler::pop_IU_state() {
3779 popa();
3780 LP64_ONLY(addq(rsp, 8));
3781 popf();
3782 }
3784 // Save Integer and Float state
3785 // Warning: Stack must be 16 byte aligned (64bit)
3786 void MacroAssembler::push_CPU_state() {
3787 push_IU_state();
3788 push_FPU_state();
3789 }
3791 void MacroAssembler::push_FPU_state() {
3792 subptr(rsp, FPUStateSizeInWords * wordSize);
3793 #ifndef _LP64
3794 fnsave(Address(rsp, 0));
3795 fwait();
3796 #else
3797 fxsave(Address(rsp, 0));
3798 #endif // LP64
3799 }
3801 void MacroAssembler::push_IU_state() {
3802 // Push flags first because pusha kills them
3803 pushf();
3804 // Make sure rsp stays 16-byte aligned
3805 LP64_ONLY(subq(rsp, 8));
3806 pusha();
3807 }
3809 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp) {
3810 // determine java_thread register
3811 if (!java_thread->is_valid()) {
3812 java_thread = rdi;
3813 get_thread(java_thread);
3814 }
3815 // we must set sp to zero to clear frame
3816 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
3817 if (clear_fp) {
3818 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
3819 }
3821 // Always clear the pc because it could have been set by make_walkable()
3822 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
3824 }
3826 void MacroAssembler::restore_rax(Register tmp) {
3827 if (tmp == noreg) pop(rax);
3828 else if (tmp != rax) mov(rax, tmp);
3829 }
3831 void MacroAssembler::round_to(Register reg, int modulus) {
3832 addptr(reg, modulus - 1);
3833 andptr(reg, -modulus);
3834 }
3836 void MacroAssembler::save_rax(Register tmp) {
3837 if (tmp == noreg) push(rax);
3838 else if (tmp != rax) mov(tmp, rax);
3839 }
3841 // Write serialization page so VM thread can do a pseudo remote membar.
3842 // We use the current thread pointer to calculate a thread specific
3843 // offset to write to within the page. This minimizes bus traffic
3844 // due to cache line collision.
3845 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
3846 movl(tmp, thread);
3847 shrl(tmp, os::get_serialize_page_shift_count());
3848 andl(tmp, (os::vm_page_size() - sizeof(int)));
3850 Address index(noreg, tmp, Address::times_1);
3851 ExternalAddress page(os::get_memory_serialize_page());
3853 // Size of store must match masking code above
3854 movl(as_Address(ArrayAddress(page, index)), tmp);
3855 }
3857 // Calls to C land
3858 //
3859 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
3860 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
3861 // has to be reset to 0. This is required to allow proper stack traversal.
3862 void MacroAssembler::set_last_Java_frame(Register java_thread,
3863 Register last_java_sp,
3864 Register last_java_fp,
3865 address last_java_pc) {
3866 // determine java_thread register
3867 if (!java_thread->is_valid()) {
3868 java_thread = rdi;
3869 get_thread(java_thread);
3870 }
3871 // determine last_java_sp register
3872 if (!last_java_sp->is_valid()) {
3873 last_java_sp = rsp;
3874 }
3876 // last_java_fp is optional
3878 if (last_java_fp->is_valid()) {
3879 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
3880 }
3882 // last_java_pc is optional
3884 if (last_java_pc != NULL) {
3885 lea(Address(java_thread,
3886 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
3887 InternalAddress(last_java_pc));
3889 }
3890 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
3891 }
3893 void MacroAssembler::shlptr(Register dst, int imm8) {
3894 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
3895 }
3897 void MacroAssembler::shrptr(Register dst, int imm8) {
3898 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
3899 }
3901 void MacroAssembler::sign_extend_byte(Register reg) {
3902 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
3903 movsbl(reg, reg); // movsxb
3904 } else {
3905 shll(reg, 24);
3906 sarl(reg, 24);
3907 }
3908 }
3910 void MacroAssembler::sign_extend_short(Register reg) {
3911 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
3912 movswl(reg, reg); // movsxw
3913 } else {
3914 shll(reg, 16);
3915 sarl(reg, 16);
3916 }
3917 }
3919 void MacroAssembler::testl(Register dst, AddressLiteral src) {
3920 assert(reachable(src), "Address should be reachable");
3921 testl(dst, as_Address(src));
3922 }
3924 void MacroAssembler::sqrtsd(XMMRegister dst, AddressLiteral src) {
3925 if (reachable(src)) {
3926 Assembler::sqrtsd(dst, as_Address(src));
3927 } else {
3928 lea(rscratch1, src);
3929 Assembler::sqrtsd(dst, Address(rscratch1, 0));
3930 }
3931 }
3933 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src) {
3934 if (reachable(src)) {
3935 Assembler::sqrtss(dst, as_Address(src));
3936 } else {
3937 lea(rscratch1, src);
3938 Assembler::sqrtss(dst, Address(rscratch1, 0));
3939 }
3940 }
3942 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) {
3943 if (reachable(src)) {
3944 Assembler::subsd(dst, as_Address(src));
3945 } else {
3946 lea(rscratch1, src);
3947 Assembler::subsd(dst, Address(rscratch1, 0));
3948 }
3949 }
3951 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) {
3952 if (reachable(src)) {
3953 Assembler::subss(dst, as_Address(src));
3954 } else {
3955 lea(rscratch1, src);
3956 Assembler::subss(dst, Address(rscratch1, 0));
3957 }
3958 }
3960 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
3961 if (reachable(src)) {
3962 Assembler::ucomisd(dst, as_Address(src));
3963 } else {
3964 lea(rscratch1, src);
3965 Assembler::ucomisd(dst, Address(rscratch1, 0));
3966 }
3967 }
3969 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
3970 if (reachable(src)) {
3971 Assembler::ucomiss(dst, as_Address(src));
3972 } else {
3973 lea(rscratch1, src);
3974 Assembler::ucomiss(dst, Address(rscratch1, 0));
3975 }
3976 }
3978 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
3979 // Used in sign-bit flipping with aligned address.
3980 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
3981 if (reachable(src)) {
3982 Assembler::xorpd(dst, as_Address(src));
3983 } else {
3984 lea(rscratch1, src);
3985 Assembler::xorpd(dst, Address(rscratch1, 0));
3986 }
3987 }
3989 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
3990 // Used in sign-bit flipping with aligned address.
3991 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
3992 if (reachable(src)) {
3993 Assembler::xorps(dst, as_Address(src));
3994 } else {
3995 lea(rscratch1, src);
3996 Assembler::xorps(dst, Address(rscratch1, 0));
3997 }
3998 }
4000 void MacroAssembler::pshufb(XMMRegister dst, AddressLiteral src) {
4001 // Used in sign-bit flipping with aligned address.
4002 bool aligned_adr = (((intptr_t)src.target() & 15) == 0);
4003 assert((UseAVX > 0) || aligned_adr, "SSE mode requires address alignment 16 bytes");
4004 if (reachable(src)) {
4005 Assembler::pshufb(dst, as_Address(src));
4006 } else {
4007 lea(rscratch1, src);
4008 Assembler::pshufb(dst, Address(rscratch1, 0));
4009 }
4010 }
4012 // AVX 3-operands instructions
4014 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4015 if (reachable(src)) {
4016 vaddsd(dst, nds, as_Address(src));
4017 } else {
4018 lea(rscratch1, src);
4019 vaddsd(dst, nds, Address(rscratch1, 0));
4020 }
4021 }
4023 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4024 if (reachable(src)) {
4025 vaddss(dst, nds, as_Address(src));
4026 } else {
4027 lea(rscratch1, src);
4028 vaddss(dst, nds, Address(rscratch1, 0));
4029 }
4030 }
4032 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
4033 if (reachable(src)) {
4034 vandpd(dst, nds, as_Address(src), vector256);
4035 } else {
4036 lea(rscratch1, src);
4037 vandpd(dst, nds, Address(rscratch1, 0), vector256);
4038 }
4039 }
4041 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
4042 if (reachable(src)) {
4043 vandps(dst, nds, as_Address(src), vector256);
4044 } else {
4045 lea(rscratch1, src);
4046 vandps(dst, nds, Address(rscratch1, 0), vector256);
4047 }
4048 }
4050 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4051 if (reachable(src)) {
4052 vdivsd(dst, nds, as_Address(src));
4053 } else {
4054 lea(rscratch1, src);
4055 vdivsd(dst, nds, Address(rscratch1, 0));
4056 }
4057 }
4059 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4060 if (reachable(src)) {
4061 vdivss(dst, nds, as_Address(src));
4062 } else {
4063 lea(rscratch1, src);
4064 vdivss(dst, nds, Address(rscratch1, 0));
4065 }
4066 }
4068 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4069 if (reachable(src)) {
4070 vmulsd(dst, nds, as_Address(src));
4071 } else {
4072 lea(rscratch1, src);
4073 vmulsd(dst, nds, Address(rscratch1, 0));
4074 }
4075 }
4077 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4078 if (reachable(src)) {
4079 vmulss(dst, nds, as_Address(src));
4080 } else {
4081 lea(rscratch1, src);
4082 vmulss(dst, nds, Address(rscratch1, 0));
4083 }
4084 }
4086 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4087 if (reachable(src)) {
4088 vsubsd(dst, nds, as_Address(src));
4089 } else {
4090 lea(rscratch1, src);
4091 vsubsd(dst, nds, Address(rscratch1, 0));
4092 }
4093 }
4095 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
4096 if (reachable(src)) {
4097 vsubss(dst, nds, as_Address(src));
4098 } else {
4099 lea(rscratch1, src);
4100 vsubss(dst, nds, Address(rscratch1, 0));
4101 }
4102 }
4104 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
4105 if (reachable(src)) {
4106 vxorpd(dst, nds, as_Address(src), vector256);
4107 } else {
4108 lea(rscratch1, src);
4109 vxorpd(dst, nds, Address(rscratch1, 0), vector256);
4110 }
4111 }
4113 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
4114 if (reachable(src)) {
4115 vxorps(dst, nds, as_Address(src), vector256);
4116 } else {
4117 lea(rscratch1, src);
4118 vxorps(dst, nds, Address(rscratch1, 0), vector256);
4119 }
4120 }
4123 //////////////////////////////////////////////////////////////////////////////////
4124 #if INCLUDE_ALL_GCS
4126 void MacroAssembler::g1_write_barrier_pre(Register obj,
4127 Register pre_val,
4128 Register thread,
4129 Register tmp,
4130 bool tosca_live,
4131 bool expand_call) {
4133 // If expand_call is true then we expand the call_VM_leaf macro
4134 // directly to skip generating the check by
4135 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
4137 #ifdef _LP64
4138 assert(thread == r15_thread, "must be");
4139 #endif // _LP64
4141 Label done;
4142 Label runtime;
4144 assert(pre_val != noreg, "check this code");
4146 if (obj != noreg) {
4147 assert_different_registers(obj, pre_val, tmp);
4148 assert(pre_val != rax, "check this code");
4149 }
4151 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
4152 PtrQueue::byte_offset_of_active()));
4153 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
4154 PtrQueue::byte_offset_of_index()));
4155 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
4156 PtrQueue::byte_offset_of_buf()));
4159 // Is marking active?
4160 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
4161 cmpl(in_progress, 0);
4162 } else {
4163 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
4164 cmpb(in_progress, 0);
4165 }
4166 jcc(Assembler::equal, done);
4168 // Do we need to load the previous value?
4169 if (obj != noreg) {
4170 load_heap_oop(pre_val, Address(obj, 0));
4171 }
4173 // Is the previous value null?
4174 cmpptr(pre_val, (int32_t) NULL_WORD);
4175 jcc(Assembler::equal, done);
4177 // Can we store original value in the thread's buffer?
4178 // Is index == 0?
4179 // (The index field is typed as size_t.)
4181 movptr(tmp, index); // tmp := *index_adr
4182 cmpptr(tmp, 0); // tmp == 0?
4183 jcc(Assembler::equal, runtime); // If yes, goto runtime
4185 subptr(tmp, wordSize); // tmp := tmp - wordSize
4186 movptr(index, tmp); // *index_adr := tmp
4187 addptr(tmp, buffer); // tmp := tmp + *buffer_adr
4189 // Record the previous value
4190 movptr(Address(tmp, 0), pre_val);
4191 jmp(done);
4193 bind(runtime);
4194 // save the live input values
4195 if(tosca_live) push(rax);
4197 if (obj != noreg && obj != rax)
4198 push(obj);
4200 if (pre_val != rax)
4201 push(pre_val);
4203 // Calling the runtime using the regular call_VM_leaf mechanism generates
4204 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
4205 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
4206 //
4207 // If we care generating the pre-barrier without a frame (e.g. in the
4208 // intrinsified Reference.get() routine) then ebp might be pointing to
4209 // the caller frame and so this check will most likely fail at runtime.
4210 //
4211 // Expanding the call directly bypasses the generation of the check.
4212 // So when we do not have have a full interpreter frame on the stack
4213 // expand_call should be passed true.
4215 NOT_LP64( push(thread); )
4217 if (expand_call) {
4218 LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
4219 pass_arg1(this, thread);
4220 pass_arg0(this, pre_val);
4221 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
4222 } else {
4223 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
4224 }
4226 NOT_LP64( pop(thread); )
4228 // save the live input values
4229 if (pre_val != rax)
4230 pop(pre_val);
4232 if (obj != noreg && obj != rax)
4233 pop(obj);
4235 if(tosca_live) pop(rax);
4237 bind(done);
4238 }
4240 void MacroAssembler::g1_write_barrier_post(Register store_addr,
4241 Register new_val,
4242 Register thread,
4243 Register tmp,
4244 Register tmp2) {
4245 #ifdef _LP64
4246 assert(thread == r15_thread, "must be");
4247 #endif // _LP64
4249 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
4250 PtrQueue::byte_offset_of_index()));
4251 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
4252 PtrQueue::byte_offset_of_buf()));
4254 BarrierSet* bs = Universe::heap()->barrier_set();
4255 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
4256 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
4258 Label done;
4259 Label runtime;
4261 // Does store cross heap regions?
4263 movptr(tmp, store_addr);
4264 xorptr(tmp, new_val);
4265 shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
4266 jcc(Assembler::equal, done);
4268 // crosses regions, storing NULL?
4270 cmpptr(new_val, (int32_t) NULL_WORD);
4271 jcc(Assembler::equal, done);
4273 // storing region crossing non-NULL, is card already dirty?
4275 const Register card_addr = tmp;
4276 const Register cardtable = tmp2;
4278 movptr(card_addr, store_addr);
4279 shrptr(card_addr, CardTableModRefBS::card_shift);
4280 // Do not use ExternalAddress to load 'byte_map_base', since 'byte_map_base' is NOT
4281 // a valid address and therefore is not properly handled by the relocation code.
4282 movptr(cardtable, (intptr_t)ct->byte_map_base);
4283 addptr(card_addr, cardtable);
4285 cmpb(Address(card_addr, 0), (int)G1SATBCardTableModRefBS::g1_young_card_val());
4286 jcc(Assembler::equal, done);
4288 membar(Assembler::Membar_mask_bits(Assembler::StoreLoad));
4289 cmpb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
4290 jcc(Assembler::equal, done);
4293 // storing a region crossing, non-NULL oop, card is clean.
4294 // dirty card and log.
4296 movb(Address(card_addr, 0), (int)CardTableModRefBS::dirty_card_val());
4298 cmpl(queue_index, 0);
4299 jcc(Assembler::equal, runtime);
4300 subl(queue_index, wordSize);
4301 movptr(tmp2, buffer);
4302 #ifdef _LP64
4303 movslq(rscratch1, queue_index);
4304 addq(tmp2, rscratch1);
4305 movq(Address(tmp2, 0), card_addr);
4306 #else
4307 addl(tmp2, queue_index);
4308 movl(Address(tmp2, 0), card_addr);
4309 #endif
4310 jmp(done);
4312 bind(runtime);
4313 // save the live input values
4314 push(store_addr);
4315 push(new_val);
4316 #ifdef _LP64
4317 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
4318 #else
4319 push(thread);
4320 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
4321 pop(thread);
4322 #endif
4323 pop(new_val);
4324 pop(store_addr);
4326 bind(done);
4327 }
4329 #endif // INCLUDE_ALL_GCS
4330 //////////////////////////////////////////////////////////////////////////////////
4333 void MacroAssembler::store_check(Register obj) {
4334 // Does a store check for the oop in register obj. The content of
4335 // register obj is destroyed afterwards.
4336 store_check_part_1(obj);
4337 store_check_part_2(obj);
4338 }
4340 void MacroAssembler::store_check(Register obj, Address dst) {
4341 store_check(obj);
4342 }
4345 // split the store check operation so that other instructions can be scheduled inbetween
4346 void MacroAssembler::store_check_part_1(Register obj) {
4347 BarrierSet* bs = Universe::heap()->barrier_set();
4348 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
4349 shrptr(obj, CardTableModRefBS::card_shift);
4350 }
4352 void MacroAssembler::store_check_part_2(Register obj) {
4353 BarrierSet* bs = Universe::heap()->barrier_set();
4354 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
4355 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
4356 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
4358 // The calculation for byte_map_base is as follows:
4359 // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
4360 // So this essentially converts an address to a displacement and it will
4361 // never need to be relocated. On 64bit however the value may be too
4362 // large for a 32bit displacement.
4363 intptr_t disp = (intptr_t) ct->byte_map_base;
4364 if (is_simm32(disp)) {
4365 Address cardtable(noreg, obj, Address::times_1, disp);
4366 movb(cardtable, 0);
4367 } else {
4368 // By doing it as an ExternalAddress 'disp' could be converted to a rip-relative
4369 // displacement and done in a single instruction given favorable mapping and a
4370 // smarter version of as_Address. However, 'ExternalAddress' generates a relocation
4371 // entry and that entry is not properly handled by the relocation code.
4372 AddressLiteral cardtable((address)ct->byte_map_base, relocInfo::none);
4373 Address index(noreg, obj, Address::times_1);
4374 movb(as_Address(ArrayAddress(cardtable, index)), 0);
4375 }
4376 }
4378 void MacroAssembler::subptr(Register dst, int32_t imm32) {
4379 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
4380 }
4382 // Force generation of a 4 byte immediate value even if it fits into 8bit
4383 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
4384 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32));
4385 }
4387 void MacroAssembler::subptr(Register dst, Register src) {
4388 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
4389 }
4391 // C++ bool manipulation
4392 void MacroAssembler::testbool(Register dst) {
4393 if(sizeof(bool) == 1)
4394 testb(dst, 0xff);
4395 else if(sizeof(bool) == 2) {
4396 // testw implementation needed for two byte bools
4397 ShouldNotReachHere();
4398 } else if(sizeof(bool) == 4)
4399 testl(dst, dst);
4400 else
4401 // unsupported
4402 ShouldNotReachHere();
4403 }
4405 void MacroAssembler::testptr(Register dst, Register src) {
4406 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
4407 }
4409 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
4410 void MacroAssembler::tlab_allocate(Register obj,
4411 Register var_size_in_bytes,
4412 int con_size_in_bytes,
4413 Register t1,
4414 Register t2,
4415 Label& slow_case) {
4416 assert_different_registers(obj, t1, t2);
4417 assert_different_registers(obj, var_size_in_bytes, t1);
4418 Register end = t2;
4419 Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
4421 verify_tlab();
4423 NOT_LP64(get_thread(thread));
4425 movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
4426 if (var_size_in_bytes == noreg) {
4427 lea(end, Address(obj, con_size_in_bytes));
4428 } else {
4429 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
4430 }
4431 cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
4432 jcc(Assembler::above, slow_case);
4434 // update the tlab top pointer
4435 movptr(Address(thread, JavaThread::tlab_top_offset()), end);
4437 // recover var_size_in_bytes if necessary
4438 if (var_size_in_bytes == end) {
4439 subptr(var_size_in_bytes, obj);
4440 }
4441 verify_tlab();
4442 }
4444 // Preserves rbx, and rdx.
4445 Register MacroAssembler::tlab_refill(Label& retry,
4446 Label& try_eden,
4447 Label& slow_case) {
4448 Register top = rax;
4449 Register t1 = rcx;
4450 Register t2 = rsi;
4451 Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
4452 assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
4453 Label do_refill, discard_tlab;
4455 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
4456 // No allocation in the shared eden.
4457 jmp(slow_case);
4458 }
4460 NOT_LP64(get_thread(thread_reg));
4462 movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
4463 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
4465 // calculate amount of free space
4466 subptr(t1, top);
4467 shrptr(t1, LogHeapWordSize);
4469 // Retain tlab and allocate object in shared space if
4470 // the amount free in the tlab is too large to discard.
4471 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
4472 jcc(Assembler::lessEqual, discard_tlab);
4474 // Retain
4475 // %%% yuck as movptr...
4476 movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
4477 addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
4478 if (TLABStats) {
4479 // increment number of slow_allocations
4480 addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
4481 }
4482 jmp(try_eden);
4484 bind(discard_tlab);
4485 if (TLABStats) {
4486 // increment number of refills
4487 addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
4488 // accumulate wastage -- t1 is amount free in tlab
4489 addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
4490 }
4492 // if tlab is currently allocated (top or end != null) then
4493 // fill [top, end + alignment_reserve) with array object
4494 testptr(top, top);
4495 jcc(Assembler::zero, do_refill);
4497 // set up the mark word
4498 movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
4499 // set the length to the remaining space
4500 subptr(t1, typeArrayOopDesc::header_size(T_INT));
4501 addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
4502 shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
4503 movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
4504 // set klass to intArrayKlass
4505 // dubious reloc why not an oop reloc?
4506 movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr()));
4507 // store klass last. concurrent gcs assumes klass length is valid if
4508 // klass field is not null.
4509 store_klass(top, t1);
4511 movptr(t1, top);
4512 subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
4513 incr_allocated_bytes(thread_reg, t1, 0);
4515 // refill the tlab with an eden allocation
4516 bind(do_refill);
4517 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
4518 shlptr(t1, LogHeapWordSize);
4519 // allocate new tlab, address returned in top
4520 eden_allocate(top, t1, 0, t2, slow_case);
4522 // Check that t1 was preserved in eden_allocate.
4523 #ifdef ASSERT
4524 if (UseTLAB) {
4525 Label ok;
4526 Register tsize = rsi;
4527 assert_different_registers(tsize, thread_reg, t1);
4528 push(tsize);
4529 movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
4530 shlptr(tsize, LogHeapWordSize);
4531 cmpptr(t1, tsize);
4532 jcc(Assembler::equal, ok);
4533 STOP("assert(t1 != tlab size)");
4534 should_not_reach_here();
4536 bind(ok);
4537 pop(tsize);
4538 }
4539 #endif
4540 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
4541 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
4542 addptr(top, t1);
4543 subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
4544 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
4545 verify_tlab();
4546 jmp(retry);
4548 return thread_reg; // for use by caller
4549 }
4551 void MacroAssembler::incr_allocated_bytes(Register thread,
4552 Register var_size_in_bytes,
4553 int con_size_in_bytes,
4554 Register t1) {
4555 if (!thread->is_valid()) {
4556 #ifdef _LP64
4557 thread = r15_thread;
4558 #else
4559 assert(t1->is_valid(), "need temp reg");
4560 thread = t1;
4561 get_thread(thread);
4562 #endif
4563 }
4565 #ifdef _LP64
4566 if (var_size_in_bytes->is_valid()) {
4567 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
4568 } else {
4569 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
4570 }
4571 #else
4572 if (var_size_in_bytes->is_valid()) {
4573 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
4574 } else {
4575 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
4576 }
4577 adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
4578 #endif
4579 }
4581 void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use) {
4582 pusha();
4584 // if we are coming from c1, xmm registers may be live
4585 int off = 0;
4586 if (UseSSE == 1) {
4587 subptr(rsp, sizeof(jdouble)*8);
4588 movflt(Address(rsp,off++*sizeof(jdouble)),xmm0);
4589 movflt(Address(rsp,off++*sizeof(jdouble)),xmm1);
4590 movflt(Address(rsp,off++*sizeof(jdouble)),xmm2);
4591 movflt(Address(rsp,off++*sizeof(jdouble)),xmm3);
4592 movflt(Address(rsp,off++*sizeof(jdouble)),xmm4);
4593 movflt(Address(rsp,off++*sizeof(jdouble)),xmm5);
4594 movflt(Address(rsp,off++*sizeof(jdouble)),xmm6);
4595 movflt(Address(rsp,off++*sizeof(jdouble)),xmm7);
4596 } else if (UseSSE >= 2) {
4597 #ifdef COMPILER2
4598 if (MaxVectorSize > 16) {
4599 assert(UseAVX > 0, "256bit vectors are supported only with AVX");
4600 // Save upper half of YMM registes
4601 subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
4602 vextractf128h(Address(rsp, 0),xmm0);
4603 vextractf128h(Address(rsp, 16),xmm1);
4604 vextractf128h(Address(rsp, 32),xmm2);
4605 vextractf128h(Address(rsp, 48),xmm3);
4606 vextractf128h(Address(rsp, 64),xmm4);
4607 vextractf128h(Address(rsp, 80),xmm5);
4608 vextractf128h(Address(rsp, 96),xmm6);
4609 vextractf128h(Address(rsp,112),xmm7);
4610 #ifdef _LP64
4611 vextractf128h(Address(rsp,128),xmm8);
4612 vextractf128h(Address(rsp,144),xmm9);
4613 vextractf128h(Address(rsp,160),xmm10);
4614 vextractf128h(Address(rsp,176),xmm11);
4615 vextractf128h(Address(rsp,192),xmm12);
4616 vextractf128h(Address(rsp,208),xmm13);
4617 vextractf128h(Address(rsp,224),xmm14);
4618 vextractf128h(Address(rsp,240),xmm15);
4619 #endif
4620 }
4621 #endif
4622 // Save whole 128bit (16 bytes) XMM regiters
4623 subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
4624 movdqu(Address(rsp,off++*16),xmm0);
4625 movdqu(Address(rsp,off++*16),xmm1);
4626 movdqu(Address(rsp,off++*16),xmm2);
4627 movdqu(Address(rsp,off++*16),xmm3);
4628 movdqu(Address(rsp,off++*16),xmm4);
4629 movdqu(Address(rsp,off++*16),xmm5);
4630 movdqu(Address(rsp,off++*16),xmm6);
4631 movdqu(Address(rsp,off++*16),xmm7);
4632 #ifdef _LP64
4633 movdqu(Address(rsp,off++*16),xmm8);
4634 movdqu(Address(rsp,off++*16),xmm9);
4635 movdqu(Address(rsp,off++*16),xmm10);
4636 movdqu(Address(rsp,off++*16),xmm11);
4637 movdqu(Address(rsp,off++*16),xmm12);
4638 movdqu(Address(rsp,off++*16),xmm13);
4639 movdqu(Address(rsp,off++*16),xmm14);
4640 movdqu(Address(rsp,off++*16),xmm15);
4641 #endif
4642 }
4644 // Preserve registers across runtime call
4645 int incoming_argument_and_return_value_offset = -1;
4646 if (num_fpu_regs_in_use > 1) {
4647 // Must preserve all other FPU regs (could alternatively convert
4648 // SharedRuntime::dsin, dcos etc. into assembly routines known not to trash
4649 // FPU state, but can not trust C compiler)
4650 NEEDS_CLEANUP;
4651 // NOTE that in this case we also push the incoming argument(s) to
4652 // the stack and restore it later; we also use this stack slot to
4653 // hold the return value from dsin, dcos etc.
4654 for (int i = 0; i < num_fpu_regs_in_use; i++) {
4655 subptr(rsp, sizeof(jdouble));
4656 fstp_d(Address(rsp, 0));
4657 }
4658 incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1);
4659 for (int i = nb_args-1; i >= 0; i--) {
4660 fld_d(Address(rsp, incoming_argument_and_return_value_offset-i*sizeof(jdouble)));
4661 }
4662 }
4664 subptr(rsp, nb_args*sizeof(jdouble));
4665 for (int i = 0; i < nb_args; i++) {
4666 fstp_d(Address(rsp, i*sizeof(jdouble)));
4667 }
4669 #ifdef _LP64
4670 if (nb_args > 0) {
4671 movdbl(xmm0, Address(rsp, 0));
4672 }
4673 if (nb_args > 1) {
4674 movdbl(xmm1, Address(rsp, sizeof(jdouble)));
4675 }
4676 assert(nb_args <= 2, "unsupported number of args");
4677 #endif // _LP64
4679 // NOTE: we must not use call_VM_leaf here because that requires a
4680 // complete interpreter frame in debug mode -- same bug as 4387334
4681 // MacroAssembler::call_VM_leaf_base is perfectly safe and will
4682 // do proper 64bit abi
4684 NEEDS_CLEANUP;
4685 // Need to add stack banging before this runtime call if it needs to
4686 // be taken; however, there is no generic stack banging routine at
4687 // the MacroAssembler level
4689 MacroAssembler::call_VM_leaf_base(runtime_entry, 0);
4691 #ifdef _LP64
4692 movsd(Address(rsp, 0), xmm0);
4693 fld_d(Address(rsp, 0));
4694 #endif // _LP64
4695 addptr(rsp, sizeof(jdouble) * nb_args);
4696 if (num_fpu_regs_in_use > 1) {
4697 // Must save return value to stack and then restore entire FPU
4698 // stack except incoming arguments
4699 fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
4700 for (int i = 0; i < num_fpu_regs_in_use - nb_args; i++) {
4701 fld_d(Address(rsp, 0));
4702 addptr(rsp, sizeof(jdouble));
4703 }
4704 fld_d(Address(rsp, (nb_args-1)*sizeof(jdouble)));
4705 addptr(rsp, sizeof(jdouble) * nb_args);
4706 }
4708 off = 0;
4709 if (UseSSE == 1) {
4710 movflt(xmm0, Address(rsp,off++*sizeof(jdouble)));
4711 movflt(xmm1, Address(rsp,off++*sizeof(jdouble)));
4712 movflt(xmm2, Address(rsp,off++*sizeof(jdouble)));
4713 movflt(xmm3, Address(rsp,off++*sizeof(jdouble)));
4714 movflt(xmm4, Address(rsp,off++*sizeof(jdouble)));
4715 movflt(xmm5, Address(rsp,off++*sizeof(jdouble)));
4716 movflt(xmm6, Address(rsp,off++*sizeof(jdouble)));
4717 movflt(xmm7, Address(rsp,off++*sizeof(jdouble)));
4718 addptr(rsp, sizeof(jdouble)*8);
4719 } else if (UseSSE >= 2) {
4720 // Restore whole 128bit (16 bytes) XMM regiters
4721 movdqu(xmm0, Address(rsp,off++*16));
4722 movdqu(xmm1, Address(rsp,off++*16));
4723 movdqu(xmm2, Address(rsp,off++*16));
4724 movdqu(xmm3, Address(rsp,off++*16));
4725 movdqu(xmm4, Address(rsp,off++*16));
4726 movdqu(xmm5, Address(rsp,off++*16));
4727 movdqu(xmm6, Address(rsp,off++*16));
4728 movdqu(xmm7, Address(rsp,off++*16));
4729 #ifdef _LP64
4730 movdqu(xmm8, Address(rsp,off++*16));
4731 movdqu(xmm9, Address(rsp,off++*16));
4732 movdqu(xmm10, Address(rsp,off++*16));
4733 movdqu(xmm11, Address(rsp,off++*16));
4734 movdqu(xmm12, Address(rsp,off++*16));
4735 movdqu(xmm13, Address(rsp,off++*16));
4736 movdqu(xmm14, Address(rsp,off++*16));
4737 movdqu(xmm15, Address(rsp,off++*16));
4738 #endif
4739 addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
4740 #ifdef COMPILER2
4741 if (MaxVectorSize > 16) {
4742 // Restore upper half of YMM registes.
4743 vinsertf128h(xmm0, Address(rsp, 0));
4744 vinsertf128h(xmm1, Address(rsp, 16));
4745 vinsertf128h(xmm2, Address(rsp, 32));
4746 vinsertf128h(xmm3, Address(rsp, 48));
4747 vinsertf128h(xmm4, Address(rsp, 64));
4748 vinsertf128h(xmm5, Address(rsp, 80));
4749 vinsertf128h(xmm6, Address(rsp, 96));
4750 vinsertf128h(xmm7, Address(rsp,112));
4751 #ifdef _LP64
4752 vinsertf128h(xmm8, Address(rsp,128));
4753 vinsertf128h(xmm9, Address(rsp,144));
4754 vinsertf128h(xmm10, Address(rsp,160));
4755 vinsertf128h(xmm11, Address(rsp,176));
4756 vinsertf128h(xmm12, Address(rsp,192));
4757 vinsertf128h(xmm13, Address(rsp,208));
4758 vinsertf128h(xmm14, Address(rsp,224));
4759 vinsertf128h(xmm15, Address(rsp,240));
4760 #endif
4761 addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
4762 }
4763 #endif
4764 }
4765 popa();
4766 }
4768 static const double pi_4 = 0.7853981633974483;
4770 void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
4771 // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
4772 // was attempted in this code; unfortunately it appears that the
4773 // switch to 80-bit precision and back causes this to be
4774 // unprofitable compared with simply performing a runtime call if
4775 // the argument is out of the (-pi/4, pi/4) range.
4777 Register tmp = noreg;
4778 if (!VM_Version::supports_cmov()) {
4779 // fcmp needs a temporary so preserve rbx,
4780 tmp = rbx;
4781 push(tmp);
4782 }
4784 Label slow_case, done;
4786 ExternalAddress pi4_adr = (address)&pi_4;
4787 if (reachable(pi4_adr)) {
4788 // x ?<= pi/4
4789 fld_d(pi4_adr);
4790 fld_s(1); // Stack: X PI/4 X
4791 fabs(); // Stack: |X| PI/4 X
4792 fcmp(tmp);
4793 jcc(Assembler::above, slow_case);
4795 // fastest case: -pi/4 <= x <= pi/4
4796 switch(trig) {
4797 case 's':
4798 fsin();
4799 break;
4800 case 'c':
4801 fcos();
4802 break;
4803 case 't':
4804 ftan();
4805 break;
4806 default:
4807 assert(false, "bad intrinsic");
4808 break;
4809 }
4810 jmp(done);
4811 }
4813 // slow case: runtime call
4814 bind(slow_case);
4816 switch(trig) {
4817 case 's':
4818 {
4819 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 1, num_fpu_regs_in_use);
4820 }
4821 break;
4822 case 'c':
4823 {
4824 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 1, num_fpu_regs_in_use);
4825 }
4826 break;
4827 case 't':
4828 {
4829 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 1, num_fpu_regs_in_use);
4830 }
4831 break;
4832 default:
4833 assert(false, "bad intrinsic");
4834 break;
4835 }
4837 // Come here with result in F-TOS
4838 bind(done);
4840 if (tmp != noreg) {
4841 pop(tmp);
4842 }
4843 }
4846 // Look up the method for a megamorphic invokeinterface call.
4847 // The target method is determined by <intf_klass, itable_index>.
4848 // The receiver klass is in recv_klass.
4849 // On success, the result will be in method_result, and execution falls through.
4850 // On failure, execution transfers to the given label.
4851 void MacroAssembler::lookup_interface_method(Register recv_klass,
4852 Register intf_klass,
4853 RegisterOrConstant itable_index,
4854 Register method_result,
4855 Register scan_temp,
4856 Label& L_no_such_interface,
4857 bool return_method) {
4858 assert_different_registers(recv_klass, intf_klass, scan_temp);
4859 assert_different_registers(method_result, intf_klass, scan_temp);
4860 assert(recv_klass != method_result || !return_method,
4861 "recv_klass can be destroyed when method isn't needed");
4863 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
4864 "caller must use same register for non-constant itable index as for method");
4866 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
4867 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
4868 int itentry_off = itableMethodEntry::method_offset_in_bytes();
4869 int scan_step = itableOffsetEntry::size() * wordSize;
4870 int vte_size = vtableEntry::size() * wordSize;
4871 Address::ScaleFactor times_vte_scale = Address::times_ptr;
4872 assert(vte_size == wordSize, "else adjust times_vte_scale");
4874 movl(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
4876 // %%% Could store the aligned, prescaled offset in the klassoop.
4877 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
4878 if (HeapWordsPerLong > 1) {
4879 // Round up to align_object_offset boundary
4880 // see code for InstanceKlass::start_of_itable!
4881 round_to(scan_temp, BytesPerLong);
4882 }
4884 if (return_method) {
4885 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
4886 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
4887 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
4888 }
4890 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
4891 // if (scan->interface() == intf) {
4892 // result = (klass + scan->offset() + itable_index);
4893 // }
4894 // }
4895 Label search, found_method;
4897 for (int peel = 1; peel >= 0; peel--) {
4898 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
4899 cmpptr(intf_klass, method_result);
4901 if (peel) {
4902 jccb(Assembler::equal, found_method);
4903 } else {
4904 jccb(Assembler::notEqual, search);
4905 // (invert the test to fall through to found_method...)
4906 }
4908 if (!peel) break;
4910 bind(search);
4912 // Check that the previous entry is non-null. A null entry means that
4913 // the receiver class doesn't implement the interface, and wasn't the
4914 // same as when the caller was compiled.
4915 testptr(method_result, method_result);
4916 jcc(Assembler::zero, L_no_such_interface);
4917 addptr(scan_temp, scan_step);
4918 }
4920 bind(found_method);
4922 if (return_method) {
4923 // Got a hit.
4924 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
4925 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
4926 }
4927 }
4930 // virtual method calling
4931 void MacroAssembler::lookup_virtual_method(Register recv_klass,
4932 RegisterOrConstant vtable_index,
4933 Register method_result) {
4934 const int base = InstanceKlass::vtable_start_offset() * wordSize;
4935 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
4936 Address vtable_entry_addr(recv_klass,
4937 vtable_index, Address::times_ptr,
4938 base + vtableEntry::method_offset_in_bytes());
4939 movptr(method_result, vtable_entry_addr);
4940 }
4943 void MacroAssembler::check_klass_subtype(Register sub_klass,
4944 Register super_klass,
4945 Register temp_reg,
4946 Label& L_success) {
4947 Label L_failure;
4948 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL);
4949 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
4950 bind(L_failure);
4951 }
4954 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
4955 Register super_klass,
4956 Register temp_reg,
4957 Label* L_success,
4958 Label* L_failure,
4959 Label* L_slow_path,
4960 RegisterOrConstant super_check_offset) {
4961 assert_different_registers(sub_klass, super_klass, temp_reg);
4962 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
4963 if (super_check_offset.is_register()) {
4964 assert_different_registers(sub_klass, super_klass,
4965 super_check_offset.as_register());
4966 } else if (must_load_sco) {
4967 assert(temp_reg != noreg, "supply either a temp or a register offset");
4968 }
4970 Label L_fallthrough;
4971 int label_nulls = 0;
4972 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
4973 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
4974 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
4975 assert(label_nulls <= 1, "at most one NULL in the batch");
4977 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
4978 int sco_offset = in_bytes(Klass::super_check_offset_offset());
4979 Address super_check_offset_addr(super_klass, sco_offset);
4981 // Hacked jcc, which "knows" that L_fallthrough, at least, is in
4982 // range of a jccb. If this routine grows larger, reconsider at
4983 // least some of these.
4984 #define local_jcc(assembler_cond, label) \
4985 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \
4986 else jcc( assembler_cond, label) /*omit semi*/
4988 // Hacked jmp, which may only be used just before L_fallthrough.
4989 #define final_jmp(label) \
4990 if (&(label) == &L_fallthrough) { /*do nothing*/ } \
4991 else jmp(label) /*omit semi*/
4993 // If the pointers are equal, we are done (e.g., String[] elements).
4994 // This self-check enables sharing of secondary supertype arrays among
4995 // non-primary types such as array-of-interface. Otherwise, each such
4996 // type would need its own customized SSA.
4997 // We move this check to the front of the fast path because many
4998 // type checks are in fact trivially successful in this manner,
4999 // so we get a nicely predicted branch right at the start of the check.
5000 cmpptr(sub_klass, super_klass);
5001 local_jcc(Assembler::equal, *L_success);
5003 // Check the supertype display:
5004 if (must_load_sco) {
5005 // Positive movl does right thing on LP64.
5006 movl(temp_reg, super_check_offset_addr);
5007 super_check_offset = RegisterOrConstant(temp_reg);
5008 }
5009 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
5010 cmpptr(super_klass, super_check_addr); // load displayed supertype
5012 // This check has worked decisively for primary supers.
5013 // Secondary supers are sought in the super_cache ('super_cache_addr').
5014 // (Secondary supers are interfaces and very deeply nested subtypes.)
5015 // This works in the same check above because of a tricky aliasing
5016 // between the super_cache and the primary super display elements.
5017 // (The 'super_check_addr' can address either, as the case requires.)
5018 // Note that the cache is updated below if it does not help us find
5019 // what we need immediately.
5020 // So if it was a primary super, we can just fail immediately.
5021 // Otherwise, it's the slow path for us (no success at this point).
5023 if (super_check_offset.is_register()) {
5024 local_jcc(Assembler::equal, *L_success);
5025 cmpl(super_check_offset.as_register(), sc_offset);
5026 if (L_failure == &L_fallthrough) {
5027 local_jcc(Assembler::equal, *L_slow_path);
5028 } else {
5029 local_jcc(Assembler::notEqual, *L_failure);
5030 final_jmp(*L_slow_path);
5031 }
5032 } else if (super_check_offset.as_constant() == sc_offset) {
5033 // Need a slow path; fast failure is impossible.
5034 if (L_slow_path == &L_fallthrough) {
5035 local_jcc(Assembler::equal, *L_success);
5036 } else {
5037 local_jcc(Assembler::notEqual, *L_slow_path);
5038 final_jmp(*L_success);
5039 }
5040 } else {
5041 // No slow path; it's a fast decision.
5042 if (L_failure == &L_fallthrough) {
5043 local_jcc(Assembler::equal, *L_success);
5044 } else {
5045 local_jcc(Assembler::notEqual, *L_failure);
5046 final_jmp(*L_success);
5047 }
5048 }
5050 bind(L_fallthrough);
5052 #undef local_jcc
5053 #undef final_jmp
5054 }
5057 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
5058 Register super_klass,
5059 Register temp_reg,
5060 Register temp2_reg,
5061 Label* L_success,
5062 Label* L_failure,
5063 bool set_cond_codes) {
5064 assert_different_registers(sub_klass, super_klass, temp_reg);
5065 if (temp2_reg != noreg)
5066 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
5067 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
5069 Label L_fallthrough;
5070 int label_nulls = 0;
5071 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
5072 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
5073 assert(label_nulls <= 1, "at most one NULL in the batch");
5075 // a couple of useful fields in sub_klass:
5076 int ss_offset = in_bytes(Klass::secondary_supers_offset());
5077 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
5078 Address secondary_supers_addr(sub_klass, ss_offset);
5079 Address super_cache_addr( sub_klass, sc_offset);
5081 // Do a linear scan of the secondary super-klass chain.
5082 // This code is rarely used, so simplicity is a virtue here.
5083 // The repne_scan instruction uses fixed registers, which we must spill.
5084 // Don't worry too much about pre-existing connections with the input regs.
5086 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
5087 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
5089 // Get super_klass value into rax (even if it was in rdi or rcx).
5090 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
5091 if (super_klass != rax || UseCompressedOops) {
5092 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
5093 mov(rax, super_klass);
5094 }
5095 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
5096 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
5098 #ifndef PRODUCT
5099 int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
5100 ExternalAddress pst_counter_addr((address) pst_counter);
5101 NOT_LP64( incrementl(pst_counter_addr) );
5102 LP64_ONLY( lea(rcx, pst_counter_addr) );
5103 LP64_ONLY( incrementl(Address(rcx, 0)) );
5104 #endif //PRODUCT
5106 // We will consult the secondary-super array.
5107 movptr(rdi, secondary_supers_addr);
5108 // Load the array length. (Positive movl does right thing on LP64.)
5109 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
5110 // Skip to start of data.
5111 addptr(rdi, Array<Klass*>::base_offset_in_bytes());
5113 // Scan RCX words at [RDI] for an occurrence of RAX.
5114 // Set NZ/Z based on last compare.
5115 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
5116 // not change flags (only scas instruction which is repeated sets flags).
5117 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
5119 testptr(rax,rax); // Set Z = 0
5120 repne_scan();
5122 // Unspill the temp. registers:
5123 if (pushed_rdi) pop(rdi);
5124 if (pushed_rcx) pop(rcx);
5125 if (pushed_rax) pop(rax);
5127 if (set_cond_codes) {
5128 // Special hack for the AD files: rdi is guaranteed non-zero.
5129 assert(!pushed_rdi, "rdi must be left non-NULL");
5130 // Also, the condition codes are properly set Z/NZ on succeed/failure.
5131 }
5133 if (L_failure == &L_fallthrough)
5134 jccb(Assembler::notEqual, *L_failure);
5135 else jcc(Assembler::notEqual, *L_failure);
5137 // Success. Cache the super we found and proceed in triumph.
5138 movptr(super_cache_addr, super_klass);
5140 if (L_success != &L_fallthrough) {
5141 jmp(*L_success);
5142 }
5144 #undef IS_A_TEMP
5146 bind(L_fallthrough);
5147 }
5150 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
5151 if (VM_Version::supports_cmov()) {
5152 cmovl(cc, dst, src);
5153 } else {
5154 Label L;
5155 jccb(negate_condition(cc), L);
5156 movl(dst, src);
5157 bind(L);
5158 }
5159 }
5161 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
5162 if (VM_Version::supports_cmov()) {
5163 cmovl(cc, dst, src);
5164 } else {
5165 Label L;
5166 jccb(negate_condition(cc), L);
5167 movl(dst, src);
5168 bind(L);
5169 }
5170 }
5172 void MacroAssembler::verify_oop(Register reg, const char* s) {
5173 if (!VerifyOops) return;
5175 // Pass register number to verify_oop_subroutine
5176 const char* b = NULL;
5177 {
5178 ResourceMark rm;
5179 stringStream ss;
5180 ss.print("verify_oop: %s: %s", reg->name(), s);
5181 b = code_string(ss.as_string());
5182 }
5183 BLOCK_COMMENT("verify_oop {");
5184 #ifdef _LP64
5185 push(rscratch1); // save r10, trashed by movptr()
5186 #endif
5187 push(rax); // save rax,
5188 push(reg); // pass register argument
5189 ExternalAddress buffer((address) b);
5190 // avoid using pushptr, as it modifies scratch registers
5191 // and our contract is not to modify anything
5192 movptr(rax, buffer.addr());
5193 push(rax);
5194 // call indirectly to solve generation ordering problem
5195 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
5196 call(rax);
5197 // Caller pops the arguments (oop, message) and restores rax, r10
5198 BLOCK_COMMENT("} verify_oop");
5199 }
5202 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
5203 Register tmp,
5204 int offset) {
5205 intptr_t value = *delayed_value_addr;
5206 if (value != 0)
5207 return RegisterOrConstant(value + offset);
5209 // load indirectly to solve generation ordering problem
5210 movptr(tmp, ExternalAddress((address) delayed_value_addr));
5212 #ifdef ASSERT
5213 { Label L;
5214 testptr(tmp, tmp);
5215 if (WizardMode) {
5216 const char* buf = NULL;
5217 {
5218 ResourceMark rm;
5219 stringStream ss;
5220 ss.print("DelayedValue=" INTPTR_FORMAT, delayed_value_addr[1]);
5221 buf = code_string(ss.as_string());
5222 }
5223 jcc(Assembler::notZero, L);
5224 STOP(buf);
5225 } else {
5226 jccb(Assembler::notZero, L);
5227 hlt();
5228 }
5229 bind(L);
5230 }
5231 #endif
5233 if (offset != 0)
5234 addptr(tmp, offset);
5236 return RegisterOrConstant(tmp);
5237 }
5240 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
5241 int extra_slot_offset) {
5242 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
5243 int stackElementSize = Interpreter::stackElementSize;
5244 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
5245 #ifdef ASSERT
5246 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
5247 assert(offset1 - offset == stackElementSize, "correct arithmetic");
5248 #endif
5249 Register scale_reg = noreg;
5250 Address::ScaleFactor scale_factor = Address::no_scale;
5251 if (arg_slot.is_constant()) {
5252 offset += arg_slot.as_constant() * stackElementSize;
5253 } else {
5254 scale_reg = arg_slot.as_register();
5255 scale_factor = Address::times(stackElementSize);
5256 }
5257 offset += wordSize; // return PC is on stack
5258 return Address(rsp, scale_reg, scale_factor, offset);
5259 }
5262 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
5263 if (!VerifyOops) return;
5265 // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
5266 // Pass register number to verify_oop_subroutine
5267 const char* b = NULL;
5268 {
5269 ResourceMark rm;
5270 stringStream ss;
5271 ss.print("verify_oop_addr: %s", s);
5272 b = code_string(ss.as_string());
5273 }
5274 #ifdef _LP64
5275 push(rscratch1); // save r10, trashed by movptr()
5276 #endif
5277 push(rax); // save rax,
5278 // addr may contain rsp so we will have to adjust it based on the push
5279 // we just did (and on 64 bit we do two pushes)
5280 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
5281 // stores rax into addr which is backwards of what was intended.
5282 if (addr.uses(rsp)) {
5283 lea(rax, addr);
5284 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
5285 } else {
5286 pushptr(addr);
5287 }
5289 ExternalAddress buffer((address) b);
5290 // pass msg argument
5291 // avoid using pushptr, as it modifies scratch registers
5292 // and our contract is not to modify anything
5293 movptr(rax, buffer.addr());
5294 push(rax);
5296 // call indirectly to solve generation ordering problem
5297 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
5298 call(rax);
5299 // Caller pops the arguments (addr, message) and restores rax, r10.
5300 }
5302 void MacroAssembler::verify_tlab() {
5303 #ifdef ASSERT
5304 if (UseTLAB && VerifyOops) {
5305 Label next, ok;
5306 Register t1 = rsi;
5307 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
5309 push(t1);
5310 NOT_LP64(push(thread_reg));
5311 NOT_LP64(get_thread(thread_reg));
5313 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
5314 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
5315 jcc(Assembler::aboveEqual, next);
5316 STOP("assert(top >= start)");
5317 should_not_reach_here();
5319 bind(next);
5320 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
5321 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
5322 jcc(Assembler::aboveEqual, ok);
5323 STOP("assert(top <= end)");
5324 should_not_reach_here();
5326 bind(ok);
5327 NOT_LP64(pop(thread_reg));
5328 pop(t1);
5329 }
5330 #endif
5331 }
5333 class ControlWord {
5334 public:
5335 int32_t _value;
5337 int rounding_control() const { return (_value >> 10) & 3 ; }
5338 int precision_control() const { return (_value >> 8) & 3 ; }
5339 bool precision() const { return ((_value >> 5) & 1) != 0; }
5340 bool underflow() const { return ((_value >> 4) & 1) != 0; }
5341 bool overflow() const { return ((_value >> 3) & 1) != 0; }
5342 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
5343 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
5344 bool invalid() const { return ((_value >> 0) & 1) != 0; }
5346 void print() const {
5347 // rounding control
5348 const char* rc;
5349 switch (rounding_control()) {
5350 case 0: rc = "round near"; break;
5351 case 1: rc = "round down"; break;
5352 case 2: rc = "round up "; break;
5353 case 3: rc = "chop "; break;
5354 };
5355 // precision control
5356 const char* pc;
5357 switch (precision_control()) {
5358 case 0: pc = "24 bits "; break;
5359 case 1: pc = "reserved"; break;
5360 case 2: pc = "53 bits "; break;
5361 case 3: pc = "64 bits "; break;
5362 };
5363 // flags
5364 char f[9];
5365 f[0] = ' ';
5366 f[1] = ' ';
5367 f[2] = (precision ()) ? 'P' : 'p';
5368 f[3] = (underflow ()) ? 'U' : 'u';
5369 f[4] = (overflow ()) ? 'O' : 'o';
5370 f[5] = (zero_divide ()) ? 'Z' : 'z';
5371 f[6] = (denormalized()) ? 'D' : 'd';
5372 f[7] = (invalid ()) ? 'I' : 'i';
5373 f[8] = '\x0';
5374 // output
5375 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
5376 }
5378 };
5380 class StatusWord {
5381 public:
5382 int32_t _value;
5384 bool busy() const { return ((_value >> 15) & 1) != 0; }
5385 bool C3() const { return ((_value >> 14) & 1) != 0; }
5386 bool C2() const { return ((_value >> 10) & 1) != 0; }
5387 bool C1() const { return ((_value >> 9) & 1) != 0; }
5388 bool C0() const { return ((_value >> 8) & 1) != 0; }
5389 int top() const { return (_value >> 11) & 7 ; }
5390 bool error_status() const { return ((_value >> 7) & 1) != 0; }
5391 bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
5392 bool precision() const { return ((_value >> 5) & 1) != 0; }
5393 bool underflow() const { return ((_value >> 4) & 1) != 0; }
5394 bool overflow() const { return ((_value >> 3) & 1) != 0; }
5395 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
5396 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
5397 bool invalid() const { return ((_value >> 0) & 1) != 0; }
5399 void print() const {
5400 // condition codes
5401 char c[5];
5402 c[0] = (C3()) ? '3' : '-';
5403 c[1] = (C2()) ? '2' : '-';
5404 c[2] = (C1()) ? '1' : '-';
5405 c[3] = (C0()) ? '0' : '-';
5406 c[4] = '\x0';
5407 // flags
5408 char f[9];
5409 f[0] = (error_status()) ? 'E' : '-';
5410 f[1] = (stack_fault ()) ? 'S' : '-';
5411 f[2] = (precision ()) ? 'P' : '-';
5412 f[3] = (underflow ()) ? 'U' : '-';
5413 f[4] = (overflow ()) ? 'O' : '-';
5414 f[5] = (zero_divide ()) ? 'Z' : '-';
5415 f[6] = (denormalized()) ? 'D' : '-';
5416 f[7] = (invalid ()) ? 'I' : '-';
5417 f[8] = '\x0';
5418 // output
5419 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
5420 }
5422 };
5424 class TagWord {
5425 public:
5426 int32_t _value;
5428 int tag_at(int i) const { return (_value >> (i*2)) & 3; }
5430 void print() const {
5431 printf("%04x", _value & 0xFFFF);
5432 }
5434 };
5436 class FPU_Register {
5437 public:
5438 int32_t _m0;
5439 int32_t _m1;
5440 int16_t _ex;
5442 bool is_indefinite() const {
5443 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
5444 }
5446 void print() const {
5447 char sign = (_ex < 0) ? '-' : '+';
5448 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
5449 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
5450 };
5452 };
5454 class FPU_State {
5455 public:
5456 enum {
5457 register_size = 10,
5458 number_of_registers = 8,
5459 register_mask = 7
5460 };
5462 ControlWord _control_word;
5463 StatusWord _status_word;
5464 TagWord _tag_word;
5465 int32_t _error_offset;
5466 int32_t _error_selector;
5467 int32_t _data_offset;
5468 int32_t _data_selector;
5469 int8_t _register[register_size * number_of_registers];
5471 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
5472 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
5474 const char* tag_as_string(int tag) const {
5475 switch (tag) {
5476 case 0: return "valid";
5477 case 1: return "zero";
5478 case 2: return "special";
5479 case 3: return "empty";
5480 }
5481 ShouldNotReachHere();
5482 return NULL;
5483 }
5485 void print() const {
5486 // print computation registers
5487 { int t = _status_word.top();
5488 for (int i = 0; i < number_of_registers; i++) {
5489 int j = (i - t) & register_mask;
5490 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
5491 st(j)->print();
5492 printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
5493 }
5494 }
5495 printf("\n");
5496 // print control registers
5497 printf("ctrl = "); _control_word.print(); printf("\n");
5498 printf("stat = "); _status_word .print(); printf("\n");
5499 printf("tags = "); _tag_word .print(); printf("\n");
5500 }
5502 };
5504 class Flag_Register {
5505 public:
5506 int32_t _value;
5508 bool overflow() const { return ((_value >> 11) & 1) != 0; }
5509 bool direction() const { return ((_value >> 10) & 1) != 0; }
5510 bool sign() const { return ((_value >> 7) & 1) != 0; }
5511 bool zero() const { return ((_value >> 6) & 1) != 0; }
5512 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
5513 bool parity() const { return ((_value >> 2) & 1) != 0; }
5514 bool carry() const { return ((_value >> 0) & 1) != 0; }
5516 void print() const {
5517 // flags
5518 char f[8];
5519 f[0] = (overflow ()) ? 'O' : '-';
5520 f[1] = (direction ()) ? 'D' : '-';
5521 f[2] = (sign ()) ? 'S' : '-';
5522 f[3] = (zero ()) ? 'Z' : '-';
5523 f[4] = (auxiliary_carry()) ? 'A' : '-';
5524 f[5] = (parity ()) ? 'P' : '-';
5525 f[6] = (carry ()) ? 'C' : '-';
5526 f[7] = '\x0';
5527 // output
5528 printf("%08x flags = %s", _value, f);
5529 }
5531 };
5533 class IU_Register {
5534 public:
5535 int32_t _value;
5537 void print() const {
5538 printf("%08x %11d", _value, _value);
5539 }
5541 };
5543 class IU_State {
5544 public:
5545 Flag_Register _eflags;
5546 IU_Register _rdi;
5547 IU_Register _rsi;
5548 IU_Register _rbp;
5549 IU_Register _rsp;
5550 IU_Register _rbx;
5551 IU_Register _rdx;
5552 IU_Register _rcx;
5553 IU_Register _rax;
5555 void print() const {
5556 // computation registers
5557 printf("rax, = "); _rax.print(); printf("\n");
5558 printf("rbx, = "); _rbx.print(); printf("\n");
5559 printf("rcx = "); _rcx.print(); printf("\n");
5560 printf("rdx = "); _rdx.print(); printf("\n");
5561 printf("rdi = "); _rdi.print(); printf("\n");
5562 printf("rsi = "); _rsi.print(); printf("\n");
5563 printf("rbp, = "); _rbp.print(); printf("\n");
5564 printf("rsp = "); _rsp.print(); printf("\n");
5565 printf("\n");
5566 // control registers
5567 printf("flgs = "); _eflags.print(); printf("\n");
5568 }
5569 };
5572 class CPU_State {
5573 public:
5574 FPU_State _fpu_state;
5575 IU_State _iu_state;
5577 void print() const {
5578 printf("--------------------------------------------------\n");
5579 _iu_state .print();
5580 printf("\n");
5581 _fpu_state.print();
5582 printf("--------------------------------------------------\n");
5583 }
5585 };
5588 static void _print_CPU_state(CPU_State* state) {
5589 state->print();
5590 };
5593 void MacroAssembler::print_CPU_state() {
5594 push_CPU_state();
5595 push(rsp); // pass CPU state
5596 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
5597 addptr(rsp, wordSize); // discard argument
5598 pop_CPU_state();
5599 }
5602 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
5603 static int counter = 0;
5604 FPU_State* fs = &state->_fpu_state;
5605 counter++;
5606 // For leaf calls, only verify that the top few elements remain empty.
5607 // We only need 1 empty at the top for C2 code.
5608 if( stack_depth < 0 ) {
5609 if( fs->tag_for_st(7) != 3 ) {
5610 printf("FPR7 not empty\n");
5611 state->print();
5612 assert(false, "error");
5613 return false;
5614 }
5615 return true; // All other stack states do not matter
5616 }
5618 assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
5619 "bad FPU control word");
5621 // compute stack depth
5622 int i = 0;
5623 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++;
5624 int d = i;
5625 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
5626 // verify findings
5627 if (i != FPU_State::number_of_registers) {
5628 // stack not contiguous
5629 printf("%s: stack not contiguous at ST%d\n", s, i);
5630 state->print();
5631 assert(false, "error");
5632 return false;
5633 }
5634 // check if computed stack depth corresponds to expected stack depth
5635 if (stack_depth < 0) {
5636 // expected stack depth is -stack_depth or less
5637 if (d > -stack_depth) {
5638 // too many elements on the stack
5639 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
5640 state->print();
5641 assert(false, "error");
5642 return false;
5643 }
5644 } else {
5645 // expected stack depth is stack_depth
5646 if (d != stack_depth) {
5647 // wrong stack depth
5648 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
5649 state->print();
5650 assert(false, "error");
5651 return false;
5652 }
5653 }
5654 // everything is cool
5655 return true;
5656 }
5659 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
5660 if (!VerifyFPU) return;
5661 push_CPU_state();
5662 push(rsp); // pass CPU state
5663 ExternalAddress msg((address) s);
5664 // pass message string s
5665 pushptr(msg.addr());
5666 push(stack_depth); // pass stack depth
5667 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
5668 addptr(rsp, 3 * wordSize); // discard arguments
5669 // check for error
5670 { Label L;
5671 testl(rax, rax);
5672 jcc(Assembler::notZero, L);
5673 int3(); // break if error condition
5674 bind(L);
5675 }
5676 pop_CPU_state();
5677 }
5679 void MacroAssembler::restore_cpu_control_state_after_jni() {
5680 // Either restore the MXCSR register after returning from the JNI Call
5681 // or verify that it wasn't changed (with -Xcheck:jni flag).
5682 if (VM_Version::supports_sse()) {
5683 if (RestoreMXCSROnJNICalls) {
5684 ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
5685 } else if (CheckJNICalls) {
5686 call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
5687 }
5688 }
5689 if (VM_Version::supports_avx()) {
5690 // Clear upper bits of YMM registers to avoid SSE <-> AVX transition penalty.
5691 vzeroupper();
5692 }
5694 #ifndef _LP64
5695 // Either restore the x87 floating pointer control word after returning
5696 // from the JNI call or verify that it wasn't changed.
5697 if (CheckJNICalls) {
5698 call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
5699 }
5700 #endif // _LP64
5701 }
5704 void MacroAssembler::load_klass(Register dst, Register src) {
5705 #ifdef _LP64
5706 if (UseCompressedClassPointers) {
5707 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5708 decode_klass_not_null(dst);
5709 } else
5710 #endif
5711 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5712 }
5714 void MacroAssembler::load_prototype_header(Register dst, Register src) {
5715 load_klass(dst, src);
5716 movptr(dst, Address(dst, Klass::prototype_header_offset()));
5717 }
5719 void MacroAssembler::store_klass(Register dst, Register src) {
5720 #ifdef _LP64
5721 if (UseCompressedClassPointers) {
5722 encode_klass_not_null(src);
5723 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5724 } else
5725 #endif
5726 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5727 }
5729 void MacroAssembler::load_heap_oop(Register dst, Address src) {
5730 #ifdef _LP64
5731 // FIXME: Must change all places where we try to load the klass.
5732 if (UseCompressedOops) {
5733 movl(dst, src);
5734 decode_heap_oop(dst);
5735 } else
5736 #endif
5737 movptr(dst, src);
5738 }
5740 // Doesn't do verfication, generates fixed size code
5741 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) {
5742 #ifdef _LP64
5743 if (UseCompressedOops) {
5744 movl(dst, src);
5745 decode_heap_oop_not_null(dst);
5746 } else
5747 #endif
5748 movptr(dst, src);
5749 }
5751 void MacroAssembler::store_heap_oop(Address dst, Register src) {
5752 #ifdef _LP64
5753 if (UseCompressedOops) {
5754 assert(!dst.uses(src), "not enough registers");
5755 encode_heap_oop(src);
5756 movl(dst, src);
5757 } else
5758 #endif
5759 movptr(dst, src);
5760 }
5762 void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) {
5763 assert_different_registers(src1, tmp);
5764 #ifdef _LP64
5765 if (UseCompressedOops) {
5766 bool did_push = false;
5767 if (tmp == noreg) {
5768 tmp = rax;
5769 push(tmp);
5770 did_push = true;
5771 assert(!src2.uses(rsp), "can't push");
5772 }
5773 load_heap_oop(tmp, src2);
5774 cmpptr(src1, tmp);
5775 if (did_push) pop(tmp);
5776 } else
5777 #endif
5778 cmpptr(src1, src2);
5779 }
5781 // Used for storing NULLs.
5782 void MacroAssembler::store_heap_oop_null(Address dst) {
5783 #ifdef _LP64
5784 if (UseCompressedOops) {
5785 movl(dst, (int32_t)NULL_WORD);
5786 } else {
5787 movslq(dst, (int32_t)NULL_WORD);
5788 }
5789 #else
5790 movl(dst, (int32_t)NULL_WORD);
5791 #endif
5792 }
5794 #ifdef _LP64
5795 void MacroAssembler::store_klass_gap(Register dst, Register src) {
5796 if (UseCompressedClassPointers) {
5797 // Store to klass gap in destination
5798 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
5799 }
5800 }
5802 #ifdef ASSERT
5803 void MacroAssembler::verify_heapbase(const char* msg) {
5804 assert (UseCompressedOops, "should be compressed");
5805 assert (Universe::heap() != NULL, "java heap should be initialized");
5806 if (CheckCompressedOops) {
5807 Label ok;
5808 push(rscratch1); // cmpptr trashes rscratch1
5809 cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
5810 jcc(Assembler::equal, ok);
5811 STOP(msg);
5812 bind(ok);
5813 pop(rscratch1);
5814 }
5815 }
5816 #endif
5818 // Algorithm must match oop.inline.hpp encode_heap_oop.
5819 void MacroAssembler::encode_heap_oop(Register r) {
5820 #ifdef ASSERT
5821 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
5822 #endif
5823 verify_oop(r, "broken oop in encode_heap_oop");
5824 if (Universe::narrow_oop_base() == NULL) {
5825 if (Universe::narrow_oop_shift() != 0) {
5826 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5827 shrq(r, LogMinObjAlignmentInBytes);
5828 }
5829 return;
5830 }
5831 testq(r, r);
5832 cmovq(Assembler::equal, r, r12_heapbase);
5833 subq(r, r12_heapbase);
5834 shrq(r, LogMinObjAlignmentInBytes);
5835 }
5837 void MacroAssembler::encode_heap_oop_not_null(Register r) {
5838 #ifdef ASSERT
5839 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
5840 if (CheckCompressedOops) {
5841 Label ok;
5842 testq(r, r);
5843 jcc(Assembler::notEqual, ok);
5844 STOP("null oop passed to encode_heap_oop_not_null");
5845 bind(ok);
5846 }
5847 #endif
5848 verify_oop(r, "broken oop in encode_heap_oop_not_null");
5849 if (Universe::narrow_oop_base() != NULL) {
5850 subq(r, r12_heapbase);
5851 }
5852 if (Universe::narrow_oop_shift() != 0) {
5853 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5854 shrq(r, LogMinObjAlignmentInBytes);
5855 }
5856 }
5858 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
5859 #ifdef ASSERT
5860 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
5861 if (CheckCompressedOops) {
5862 Label ok;
5863 testq(src, src);
5864 jcc(Assembler::notEqual, ok);
5865 STOP("null oop passed to encode_heap_oop_not_null2");
5866 bind(ok);
5867 }
5868 #endif
5869 verify_oop(src, "broken oop in encode_heap_oop_not_null2");
5870 if (dst != src) {
5871 movq(dst, src);
5872 }
5873 if (Universe::narrow_oop_base() != NULL) {
5874 subq(dst, r12_heapbase);
5875 }
5876 if (Universe::narrow_oop_shift() != 0) {
5877 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5878 shrq(dst, LogMinObjAlignmentInBytes);
5879 }
5880 }
5882 void MacroAssembler::decode_heap_oop(Register r) {
5883 #ifdef ASSERT
5884 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
5885 #endif
5886 if (Universe::narrow_oop_base() == NULL) {
5887 if (Universe::narrow_oop_shift() != 0) {
5888 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5889 shlq(r, LogMinObjAlignmentInBytes);
5890 }
5891 } else {
5892 Label done;
5893 shlq(r, LogMinObjAlignmentInBytes);
5894 jccb(Assembler::equal, done);
5895 addq(r, r12_heapbase);
5896 bind(done);
5897 }
5898 verify_oop(r, "broken oop in decode_heap_oop");
5899 }
5901 void MacroAssembler::decode_heap_oop_not_null(Register r) {
5902 // Note: it will change flags
5903 assert (UseCompressedOops, "should only be used for compressed headers");
5904 assert (Universe::heap() != NULL, "java heap should be initialized");
5905 // Cannot assert, unverified entry point counts instructions (see .ad file)
5906 // vtableStubs also counts instructions in pd_code_size_limit.
5907 // Also do not verify_oop as this is called by verify_oop.
5908 if (Universe::narrow_oop_shift() != 0) {
5909 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5910 shlq(r, LogMinObjAlignmentInBytes);
5911 if (Universe::narrow_oop_base() != NULL) {
5912 addq(r, r12_heapbase);
5913 }
5914 } else {
5915 assert (Universe::narrow_oop_base() == NULL, "sanity");
5916 }
5917 }
5919 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
5920 // Note: it will change flags
5921 assert (UseCompressedOops, "should only be used for compressed headers");
5922 assert (Universe::heap() != NULL, "java heap should be initialized");
5923 // Cannot assert, unverified entry point counts instructions (see .ad file)
5924 // vtableStubs also counts instructions in pd_code_size_limit.
5925 // Also do not verify_oop as this is called by verify_oop.
5926 if (Universe::narrow_oop_shift() != 0) {
5927 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
5928 if (LogMinObjAlignmentInBytes == Address::times_8) {
5929 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
5930 } else {
5931 if (dst != src) {
5932 movq(dst, src);
5933 }
5934 shlq(dst, LogMinObjAlignmentInBytes);
5935 if (Universe::narrow_oop_base() != NULL) {
5936 addq(dst, r12_heapbase);
5937 }
5938 }
5939 } else {
5940 assert (Universe::narrow_oop_base() == NULL, "sanity");
5941 if (dst != src) {
5942 movq(dst, src);
5943 }
5944 }
5945 }
5947 void MacroAssembler::encode_klass_not_null(Register r) {
5948 if (Universe::narrow_klass_base() != NULL) {
5949 // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
5950 assert(r != r12_heapbase, "Encoding a klass in r12");
5951 mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
5952 subq(r, r12_heapbase);
5953 }
5954 if (Universe::narrow_klass_shift() != 0) {
5955 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
5956 shrq(r, LogKlassAlignmentInBytes);
5957 }
5958 if (Universe::narrow_klass_base() != NULL) {
5959 reinit_heapbase();
5960 }
5961 }
5963 void MacroAssembler::encode_klass_not_null(Register dst, Register src) {
5964 if (dst == src) {
5965 encode_klass_not_null(src);
5966 } else {
5967 if (Universe::narrow_klass_base() != NULL) {
5968 mov64(dst, (int64_t)Universe::narrow_klass_base());
5969 negq(dst);
5970 addq(dst, src);
5971 } else {
5972 movptr(dst, src);
5973 }
5974 if (Universe::narrow_klass_shift() != 0) {
5975 assert (LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
5976 shrq(dst, LogKlassAlignmentInBytes);
5977 }
5978 }
5979 }
5981 // Function instr_size_for_decode_klass_not_null() counts the instructions
5982 // generated by decode_klass_not_null(register r) and reinit_heapbase(),
5983 // when (Universe::heap() != NULL). Hence, if the instructions they
5984 // generate change, then this method needs to be updated.
5985 int MacroAssembler::instr_size_for_decode_klass_not_null() {
5986 assert (UseCompressedClassPointers, "only for compressed klass ptrs");
5987 if (Universe::narrow_klass_base() != NULL) {
5988 // mov64 + addq + shlq? + mov64 (for reinit_heapbase()).
5989 return (Universe::narrow_klass_shift() == 0 ? 20 : 24);
5990 } else {
5991 // longest load decode klass function, mov64, leaq
5992 return 16;
5993 }
5994 }
5996 // !!! If the instructions that get generated here change then function
5997 // instr_size_for_decode_klass_not_null() needs to get updated.
5998 void MacroAssembler::decode_klass_not_null(Register r) {
5999 // Note: it will change flags
6000 assert (UseCompressedClassPointers, "should only be used for compressed headers");
6001 assert(r != r12_heapbase, "Decoding a klass in r12");
6002 // Cannot assert, unverified entry point counts instructions (see .ad file)
6003 // vtableStubs also counts instructions in pd_code_size_limit.
6004 // Also do not verify_oop as this is called by verify_oop.
6005 if (Universe::narrow_klass_shift() != 0) {
6006 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
6007 shlq(r, LogKlassAlignmentInBytes);
6008 }
6009 // Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
6010 if (Universe::narrow_klass_base() != NULL) {
6011 mov64(r12_heapbase, (int64_t)Universe::narrow_klass_base());
6012 addq(r, r12_heapbase);
6013 reinit_heapbase();
6014 }
6015 }
6017 void MacroAssembler::decode_klass_not_null(Register dst, Register src) {
6018 // Note: it will change flags
6019 assert (UseCompressedClassPointers, "should only be used for compressed headers");
6020 if (dst == src) {
6021 decode_klass_not_null(dst);
6022 } else {
6023 // Cannot assert, unverified entry point counts instructions (see .ad file)
6024 // vtableStubs also counts instructions in pd_code_size_limit.
6025 // Also do not verify_oop as this is called by verify_oop.
6026 mov64(dst, (int64_t)Universe::narrow_klass_base());
6027 if (Universe::narrow_klass_shift() != 0) {
6028 assert(LogKlassAlignmentInBytes == Universe::narrow_klass_shift(), "decode alg wrong");
6029 assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
6030 leaq(dst, Address(dst, src, Address::times_8, 0));
6031 } else {
6032 addq(dst, src);
6033 }
6034 }
6035 }
6037 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
6038 assert (UseCompressedOops, "should only be used for compressed headers");
6039 assert (Universe::heap() != NULL, "java heap should be initialized");
6040 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6041 int oop_index = oop_recorder()->find_index(obj);
6042 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6043 mov_narrow_oop(dst, oop_index, rspec);
6044 }
6046 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
6047 assert (UseCompressedOops, "should only be used for compressed headers");
6048 assert (Universe::heap() != NULL, "java heap should be initialized");
6049 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6050 int oop_index = oop_recorder()->find_index(obj);
6051 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6052 mov_narrow_oop(dst, oop_index, rspec);
6053 }
6055 void MacroAssembler::set_narrow_klass(Register dst, Klass* k) {
6056 assert (UseCompressedClassPointers, "should only be used for compressed headers");
6057 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6058 int klass_index = oop_recorder()->find_index(k);
6059 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6060 mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
6061 }
6063 void MacroAssembler::set_narrow_klass(Address dst, Klass* k) {
6064 assert (UseCompressedClassPointers, "should only be used for compressed headers");
6065 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6066 int klass_index = oop_recorder()->find_index(k);
6067 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6068 mov_narrow_oop(dst, Klass::encode_klass(k), rspec);
6069 }
6071 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
6072 assert (UseCompressedOops, "should only be used for compressed headers");
6073 assert (Universe::heap() != NULL, "java heap should be initialized");
6074 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6075 int oop_index = oop_recorder()->find_index(obj);
6076 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6077 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
6078 }
6080 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
6081 assert (UseCompressedOops, "should only be used for compressed headers");
6082 assert (Universe::heap() != NULL, "java heap should be initialized");
6083 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6084 int oop_index = oop_recorder()->find_index(obj);
6085 RelocationHolder rspec = oop_Relocation::spec(oop_index);
6086 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
6087 }
6089 void MacroAssembler::cmp_narrow_klass(Register dst, Klass* k) {
6090 assert (UseCompressedClassPointers, "should only be used for compressed headers");
6091 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6092 int klass_index = oop_recorder()->find_index(k);
6093 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6094 Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
6095 }
6097 void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
6098 assert (UseCompressedClassPointers, "should only be used for compressed headers");
6099 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
6100 int klass_index = oop_recorder()->find_index(k);
6101 RelocationHolder rspec = metadata_Relocation::spec(klass_index);
6102 Assembler::cmp_narrow_oop(dst, Klass::encode_klass(k), rspec);
6103 }
6105 void MacroAssembler::reinit_heapbase() {
6106 if (UseCompressedOops || UseCompressedClassPointers) {
6107 if (Universe::heap() != NULL) {
6108 if (Universe::narrow_oop_base() == NULL) {
6109 MacroAssembler::xorptr(r12_heapbase, r12_heapbase);
6110 } else {
6111 mov64(r12_heapbase, (int64_t)Universe::narrow_ptrs_base());
6112 }
6113 } else {
6114 movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_ptrs_base_addr()));
6115 }
6116 }
6117 }
6119 #endif // _LP64
6122 // C2 compiled method's prolog code.
6123 void MacroAssembler::verified_entry(int framesize, int stack_bang_size, bool fp_mode_24b) {
6125 // WARNING: Initial instruction MUST be 5 bytes or longer so that
6126 // NativeJump::patch_verified_entry will be able to patch out the entry
6127 // code safely. The push to verify stack depth is ok at 5 bytes,
6128 // the frame allocation can be either 3 or 6 bytes. So if we don't do
6129 // stack bang then we must use the 6 byte frame allocation even if
6130 // we have no frame. :-(
6131 assert(stack_bang_size >= framesize || stack_bang_size <= 0, "stack bang size incorrect");
6133 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
6134 // Remove word for return addr
6135 framesize -= wordSize;
6136 stack_bang_size -= wordSize;
6138 // Calls to C2R adapters often do not accept exceptional returns.
6139 // We require that their callers must bang for them. But be careful, because
6140 // some VM calls (such as call site linkage) can use several kilobytes of
6141 // stack. But the stack safety zone should account for that.
6142 // See bugs 4446381, 4468289, 4497237.
6143 if (stack_bang_size > 0) {
6144 generate_stack_overflow_check(stack_bang_size);
6146 // We always push rbp, so that on return to interpreter rbp, will be
6147 // restored correctly and we can correct the stack.
6148 push(rbp);
6149 // Save caller's stack pointer into RBP if the frame pointer is preserved.
6150 if (PreserveFramePointer) {
6151 mov(rbp, rsp);
6152 }
6153 // Remove word for ebp
6154 framesize -= wordSize;
6156 // Create frame
6157 if (framesize) {
6158 subptr(rsp, framesize);
6159 }
6160 } else {
6161 // Create frame (force generation of a 4 byte immediate value)
6162 subptr_imm32(rsp, framesize);
6164 // Save RBP register now.
6165 framesize -= wordSize;
6166 movptr(Address(rsp, framesize), rbp);
6167 // Save caller's stack pointer into RBP if the frame pointer is preserved.
6168 if (PreserveFramePointer) {
6169 movptr(rbp, rsp);
6170 if (framesize > 0) {
6171 addptr(rbp, framesize);
6172 }
6173 }
6174 }
6176 if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
6177 framesize -= wordSize;
6178 movptr(Address(rsp, framesize), (int32_t)0xbadb100d);
6179 }
6181 #ifndef _LP64
6182 // If method sets FPU control word do it now
6183 if (fp_mode_24b) {
6184 fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
6185 }
6186 if (UseSSE >= 2 && VerifyFPU) {
6187 verify_FPU(0, "FPU stack must be clean on entry");
6188 }
6189 #endif
6191 #ifdef ASSERT
6192 if (VerifyStackAtCalls) {
6193 Label L;
6194 push(rax);
6195 mov(rax, rsp);
6196 andptr(rax, StackAlignmentInBytes-1);
6197 cmpptr(rax, StackAlignmentInBytes-wordSize);
6198 pop(rax);
6199 jcc(Assembler::equal, L);
6200 STOP("Stack is not properly aligned!");
6201 bind(L);
6202 }
6203 #endif
6205 }
6207 void MacroAssembler::clear_mem(Register base, Register cnt, Register tmp) {
6208 // cnt - number of qwords (8-byte words).
6209 // base - start address, qword aligned.
6210 assert(base==rdi, "base register must be edi for rep stos");
6211 assert(tmp==rax, "tmp register must be eax for rep stos");
6212 assert(cnt==rcx, "cnt register must be ecx for rep stos");
6214 xorptr(tmp, tmp);
6215 if (UseFastStosb) {
6216 shlptr(cnt,3); // convert to number of bytes
6217 rep_stosb();
6218 } else {
6219 NOT_LP64(shlptr(cnt,1);) // convert to number of dwords for 32-bit VM
6220 rep_stos();
6221 }
6222 }
6224 // IndexOf for constant substrings with size >= 8 chars
6225 // which don't need to be loaded through stack.
6226 void MacroAssembler::string_indexofC8(Register str1, Register str2,
6227 Register cnt1, Register cnt2,
6228 int int_cnt2, Register result,
6229 XMMRegister vec, Register tmp) {
6230 ShortBranchVerifier sbv(this);
6231 assert(UseSSE42Intrinsics, "SSE4.2 is required");
6233 // This method uses pcmpestri inxtruction with bound registers
6234 // inputs:
6235 // xmm - substring
6236 // rax - substring length (elements count)
6237 // mem - scanned string
6238 // rdx - string length (elements count)
6239 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
6240 // outputs:
6241 // rcx - matched index in string
6242 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
6244 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR,
6245 RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR,
6246 MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE;
6248 // Note, inline_string_indexOf() generates checks:
6249 // if (substr.count > string.count) return -1;
6250 // if (substr.count == 0) return 0;
6251 assert(int_cnt2 >= 8, "this code isused only for cnt2 >= 8 chars");
6253 // Load substring.
6254 movdqu(vec, Address(str2, 0));
6255 movl(cnt2, int_cnt2);
6256 movptr(result, str1); // string addr
6258 if (int_cnt2 > 8) {
6259 jmpb(SCAN_TO_SUBSTR);
6261 // Reload substr for rescan, this code
6262 // is executed only for large substrings (> 8 chars)
6263 bind(RELOAD_SUBSTR);
6264 movdqu(vec, Address(str2, 0));
6265 negptr(cnt2); // Jumped here with negative cnt2, convert to positive
6267 bind(RELOAD_STR);
6268 // We came here after the beginning of the substring was
6269 // matched but the rest of it was not so we need to search
6270 // again. Start from the next element after the previous match.
6272 // cnt2 is number of substring reminding elements and
6273 // cnt1 is number of string reminding elements when cmp failed.
6274 // Restored cnt1 = cnt1 - cnt2 + int_cnt2
6275 subl(cnt1, cnt2);
6276 addl(cnt1, int_cnt2);
6277 movl(cnt2, int_cnt2); // Now restore cnt2
6279 decrementl(cnt1); // Shift to next element
6280 cmpl(cnt1, cnt2);
6281 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
6283 addptr(result, 2);
6285 } // (int_cnt2 > 8)
6287 // Scan string for start of substr in 16-byte vectors
6288 bind(SCAN_TO_SUBSTR);
6289 pcmpestri(vec, Address(result, 0), 0x0d);
6290 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
6291 subl(cnt1, 8);
6292 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
6293 cmpl(cnt1, cnt2);
6294 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
6295 addptr(result, 16);
6296 jmpb(SCAN_TO_SUBSTR);
6298 // Found a potential substr
6299 bind(FOUND_CANDIDATE);
6300 // Matched whole vector if first element matched (tmp(rcx) == 0).
6301 if (int_cnt2 == 8) {
6302 jccb(Assembler::overflow, RET_FOUND); // OF == 1
6303 } else { // int_cnt2 > 8
6304 jccb(Assembler::overflow, FOUND_SUBSTR);
6305 }
6306 // After pcmpestri tmp(rcx) contains matched element index
6307 // Compute start addr of substr
6308 lea(result, Address(result, tmp, Address::times_2));
6310 // Make sure string is still long enough
6311 subl(cnt1, tmp);
6312 cmpl(cnt1, cnt2);
6313 if (int_cnt2 == 8) {
6314 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
6315 } else { // int_cnt2 > 8
6316 jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD);
6317 }
6318 // Left less then substring.
6320 bind(RET_NOT_FOUND);
6321 movl(result, -1);
6322 jmpb(EXIT);
6324 if (int_cnt2 > 8) {
6325 // This code is optimized for the case when whole substring
6326 // is matched if its head is matched.
6327 bind(MATCH_SUBSTR_HEAD);
6328 pcmpestri(vec, Address(result, 0), 0x0d);
6329 // Reload only string if does not match
6330 jccb(Assembler::noOverflow, RELOAD_STR); // OF == 0
6332 Label CONT_SCAN_SUBSTR;
6333 // Compare the rest of substring (> 8 chars).
6334 bind(FOUND_SUBSTR);
6335 // First 8 chars are already matched.
6336 negptr(cnt2);
6337 addptr(cnt2, 8);
6339 bind(SCAN_SUBSTR);
6340 subl(cnt1, 8);
6341 cmpl(cnt2, -8); // Do not read beyond substring
6342 jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR);
6343 // Back-up strings to avoid reading beyond substring:
6344 // cnt1 = cnt1 - cnt2 + 8
6345 addl(cnt1, cnt2); // cnt2 is negative
6346 addl(cnt1, 8);
6347 movl(cnt2, 8); negptr(cnt2);
6348 bind(CONT_SCAN_SUBSTR);
6349 if (int_cnt2 < (int)G) {
6350 movdqu(vec, Address(str2, cnt2, Address::times_2, int_cnt2*2));
6351 pcmpestri(vec, Address(result, cnt2, Address::times_2, int_cnt2*2), 0x0d);
6352 } else {
6353 // calculate index in register to avoid integer overflow (int_cnt2*2)
6354 movl(tmp, int_cnt2);
6355 addptr(tmp, cnt2);
6356 movdqu(vec, Address(str2, tmp, Address::times_2, 0));
6357 pcmpestri(vec, Address(result, tmp, Address::times_2, 0), 0x0d);
6358 }
6359 // Need to reload strings pointers if not matched whole vector
6360 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
6361 addptr(cnt2, 8);
6362 jcc(Assembler::negative, SCAN_SUBSTR);
6363 // Fall through if found full substring
6365 } // (int_cnt2 > 8)
6367 bind(RET_FOUND);
6368 // Found result if we matched full small substring.
6369 // Compute substr offset
6370 subptr(result, str1);
6371 shrl(result, 1); // index
6372 bind(EXIT);
6374 } // string_indexofC8
6376 // Small strings are loaded through stack if they cross page boundary.
6377 void MacroAssembler::string_indexof(Register str1, Register str2,
6378 Register cnt1, Register cnt2,
6379 int int_cnt2, Register result,
6380 XMMRegister vec, Register tmp) {
6381 ShortBranchVerifier sbv(this);
6382 assert(UseSSE42Intrinsics, "SSE4.2 is required");
6383 //
6384 // int_cnt2 is length of small (< 8 chars) constant substring
6385 // or (-1) for non constant substring in which case its length
6386 // is in cnt2 register.
6387 //
6388 // Note, inline_string_indexOf() generates checks:
6389 // if (substr.count > string.count) return -1;
6390 // if (substr.count == 0) return 0;
6391 //
6392 assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < 8), "should be != 0");
6394 // This method uses pcmpestri inxtruction with bound registers
6395 // inputs:
6396 // xmm - substring
6397 // rax - substring length (elements count)
6398 // mem - scanned string
6399 // rdx - string length (elements count)
6400 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
6401 // outputs:
6402 // rcx - matched index in string
6403 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
6405 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR,
6406 RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR,
6407 FOUND_CANDIDATE;
6409 { //========================================================
6410 // We don't know where these strings are located
6411 // and we can't read beyond them. Load them through stack.
6412 Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR;
6414 movptr(tmp, rsp); // save old SP
6416 if (int_cnt2 > 0) { // small (< 8 chars) constant substring
6417 if (int_cnt2 == 1) { // One char
6418 load_unsigned_short(result, Address(str2, 0));
6419 movdl(vec, result); // move 32 bits
6420 } else if (int_cnt2 == 2) { // Two chars
6421 movdl(vec, Address(str2, 0)); // move 32 bits
6422 } else if (int_cnt2 == 4) { // Four chars
6423 movq(vec, Address(str2, 0)); // move 64 bits
6424 } else { // cnt2 = { 3, 5, 6, 7 }
6425 // Array header size is 12 bytes in 32-bit VM
6426 // + 6 bytes for 3 chars == 18 bytes,
6427 // enough space to load vec and shift.
6428 assert(HeapWordSize*TypeArrayKlass::header_size() >= 12,"sanity");
6429 movdqu(vec, Address(str2, (int_cnt2*2)-16));
6430 psrldq(vec, 16-(int_cnt2*2));
6431 }
6432 } else { // not constant substring
6433 cmpl(cnt2, 8);
6434 jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough
6436 // We can read beyond string if srt+16 does not cross page boundary
6437 // since heaps are aligned and mapped by pages.
6438 assert(os::vm_page_size() < (int)G, "default page should be small");
6439 movl(result, str2); // We need only low 32 bits
6440 andl(result, (os::vm_page_size()-1));
6441 cmpl(result, (os::vm_page_size()-16));
6442 jccb(Assembler::belowEqual, CHECK_STR);
6444 // Move small strings to stack to allow load 16 bytes into vec.
6445 subptr(rsp, 16);
6446 int stk_offset = wordSize-2;
6447 push(cnt2);
6449 bind(COPY_SUBSTR);
6450 load_unsigned_short(result, Address(str2, cnt2, Address::times_2, -2));
6451 movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
6452 decrement(cnt2);
6453 jccb(Assembler::notZero, COPY_SUBSTR);
6455 pop(cnt2);
6456 movptr(str2, rsp); // New substring address
6457 } // non constant
6459 bind(CHECK_STR);
6460 cmpl(cnt1, 8);
6461 jccb(Assembler::aboveEqual, BIG_STRINGS);
6463 // Check cross page boundary.
6464 movl(result, str1); // We need only low 32 bits
6465 andl(result, (os::vm_page_size()-1));
6466 cmpl(result, (os::vm_page_size()-16));
6467 jccb(Assembler::belowEqual, BIG_STRINGS);
6469 subptr(rsp, 16);
6470 int stk_offset = -2;
6471 if (int_cnt2 < 0) { // not constant
6472 push(cnt2);
6473 stk_offset += wordSize;
6474 }
6475 movl(cnt2, cnt1);
6477 bind(COPY_STR);
6478 load_unsigned_short(result, Address(str1, cnt2, Address::times_2, -2));
6479 movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
6480 decrement(cnt2);
6481 jccb(Assembler::notZero, COPY_STR);
6483 if (int_cnt2 < 0) { // not constant
6484 pop(cnt2);
6485 }
6486 movptr(str1, rsp); // New string address
6488 bind(BIG_STRINGS);
6489 // Load substring.
6490 if (int_cnt2 < 0) { // -1
6491 movdqu(vec, Address(str2, 0));
6492 push(cnt2); // substr count
6493 push(str2); // substr addr
6494 push(str1); // string addr
6495 } else {
6496 // Small (< 8 chars) constant substrings are loaded already.
6497 movl(cnt2, int_cnt2);
6498 }
6499 push(tmp); // original SP
6501 } // Finished loading
6503 //========================================================
6504 // Start search
6505 //
6507 movptr(result, str1); // string addr
6509 if (int_cnt2 < 0) { // Only for non constant substring
6510 jmpb(SCAN_TO_SUBSTR);
6512 // SP saved at sp+0
6513 // String saved at sp+1*wordSize
6514 // Substr saved at sp+2*wordSize
6515 // Substr count saved at sp+3*wordSize
6517 // Reload substr for rescan, this code
6518 // is executed only for large substrings (> 8 chars)
6519 bind(RELOAD_SUBSTR);
6520 movptr(str2, Address(rsp, 2*wordSize));
6521 movl(cnt2, Address(rsp, 3*wordSize));
6522 movdqu(vec, Address(str2, 0));
6523 // We came here after the beginning of the substring was
6524 // matched but the rest of it was not so we need to search
6525 // again. Start from the next element after the previous match.
6526 subptr(str1, result); // Restore counter
6527 shrl(str1, 1);
6528 addl(cnt1, str1);
6529 decrementl(cnt1); // Shift to next element
6530 cmpl(cnt1, cnt2);
6531 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
6533 addptr(result, 2);
6534 } // non constant
6536 // Scan string for start of substr in 16-byte vectors
6537 bind(SCAN_TO_SUBSTR);
6538 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
6539 pcmpestri(vec, Address(result, 0), 0x0d);
6540 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
6541 subl(cnt1, 8);
6542 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
6543 cmpl(cnt1, cnt2);
6544 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
6545 addptr(result, 16);
6547 bind(ADJUST_STR);
6548 cmpl(cnt1, 8); // Do not read beyond string
6549 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
6550 // Back-up string to avoid reading beyond string.
6551 lea(result, Address(result, cnt1, Address::times_2, -16));
6552 movl(cnt1, 8);
6553 jmpb(SCAN_TO_SUBSTR);
6555 // Found a potential substr
6556 bind(FOUND_CANDIDATE);
6557 // After pcmpestri tmp(rcx) contains matched element index
6559 // Make sure string is still long enough
6560 subl(cnt1, tmp);
6561 cmpl(cnt1, cnt2);
6562 jccb(Assembler::greaterEqual, FOUND_SUBSTR);
6563 // Left less then substring.
6565 bind(RET_NOT_FOUND);
6566 movl(result, -1);
6567 jmpb(CLEANUP);
6569 bind(FOUND_SUBSTR);
6570 // Compute start addr of substr
6571 lea(result, Address(result, tmp, Address::times_2));
6573 if (int_cnt2 > 0) { // Constant substring
6574 // Repeat search for small substring (< 8 chars)
6575 // from new point without reloading substring.
6576 // Have to check that we don't read beyond string.
6577 cmpl(tmp, 8-int_cnt2);
6578 jccb(Assembler::greater, ADJUST_STR);
6579 // Fall through if matched whole substring.
6580 } else { // non constant
6581 assert(int_cnt2 == -1, "should be != 0");
6583 addl(tmp, cnt2);
6584 // Found result if we matched whole substring.
6585 cmpl(tmp, 8);
6586 jccb(Assembler::lessEqual, RET_FOUND);
6588 // Repeat search for small substring (<= 8 chars)
6589 // from new point 'str1' without reloading substring.
6590 cmpl(cnt2, 8);
6591 // Have to check that we don't read beyond string.
6592 jccb(Assembler::lessEqual, ADJUST_STR);
6594 Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG;
6595 // Compare the rest of substring (> 8 chars).
6596 movptr(str1, result);
6598 cmpl(tmp, cnt2);
6599 // First 8 chars are already matched.
6600 jccb(Assembler::equal, CHECK_NEXT);
6602 bind(SCAN_SUBSTR);
6603 pcmpestri(vec, Address(str1, 0), 0x0d);
6604 // Need to reload strings pointers if not matched whole vector
6605 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
6607 bind(CHECK_NEXT);
6608 subl(cnt2, 8);
6609 jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring
6610 addptr(str1, 16);
6611 addptr(str2, 16);
6612 subl(cnt1, 8);
6613 cmpl(cnt2, 8); // Do not read beyond substring
6614 jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR);
6615 // Back-up strings to avoid reading beyond substring.
6616 lea(str2, Address(str2, cnt2, Address::times_2, -16));
6617 lea(str1, Address(str1, cnt2, Address::times_2, -16));
6618 subl(cnt1, cnt2);
6619 movl(cnt2, 8);
6620 addl(cnt1, 8);
6621 bind(CONT_SCAN_SUBSTR);
6622 movdqu(vec, Address(str2, 0));
6623 jmpb(SCAN_SUBSTR);
6625 bind(RET_FOUND_LONG);
6626 movptr(str1, Address(rsp, wordSize));
6627 } // non constant
6629 bind(RET_FOUND);
6630 // Compute substr offset
6631 subptr(result, str1);
6632 shrl(result, 1); // index
6634 bind(CLEANUP);
6635 pop(rsp); // restore SP
6637 } // string_indexof
6639 // Compare strings.
6640 void MacroAssembler::string_compare(Register str1, Register str2,
6641 Register cnt1, Register cnt2, Register result,
6642 XMMRegister vec1) {
6643 ShortBranchVerifier sbv(this);
6644 Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL;
6646 // Compute the minimum of the string lengths and the
6647 // difference of the string lengths (stack).
6648 // Do the conditional move stuff
6649 movl(result, cnt1);
6650 subl(cnt1, cnt2);
6651 push(cnt1);
6652 cmov32(Assembler::lessEqual, cnt2, result);
6654 // Is the minimum length zero?
6655 testl(cnt2, cnt2);
6656 jcc(Assembler::zero, LENGTH_DIFF_LABEL);
6658 // Compare first characters
6659 load_unsigned_short(result, Address(str1, 0));
6660 load_unsigned_short(cnt1, Address(str2, 0));
6661 subl(result, cnt1);
6662 jcc(Assembler::notZero, POP_LABEL);
6663 cmpl(cnt2, 1);
6664 jcc(Assembler::equal, LENGTH_DIFF_LABEL);
6666 // Check if the strings start at the same location.
6667 cmpptr(str1, str2);
6668 jcc(Assembler::equal, LENGTH_DIFF_LABEL);
6670 Address::ScaleFactor scale = Address::times_2;
6671 int stride = 8;
6673 if (UseAVX >= 2 && UseSSE42Intrinsics) {
6674 Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_WIDE_TAIL, COMPARE_SMALL_STR;
6675 Label COMPARE_WIDE_VECTORS_LOOP, COMPARE_16_CHARS, COMPARE_INDEX_CHAR;
6676 Label COMPARE_TAIL_LONG;
6677 int pcmpmask = 0x19;
6679 // Setup to compare 16-chars (32-bytes) vectors,
6680 // start from first character again because it has aligned address.
6681 int stride2 = 16;
6682 int adr_stride = stride << scale;
6683 int adr_stride2 = stride2 << scale;
6685 assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
6686 // rax and rdx are used by pcmpestri as elements counters
6687 movl(result, cnt2);
6688 andl(cnt2, ~(stride2-1)); // cnt2 holds the vector count
6689 jcc(Assembler::zero, COMPARE_TAIL_LONG);
6691 // fast path : compare first 2 8-char vectors.
6692 bind(COMPARE_16_CHARS);
6693 movdqu(vec1, Address(str1, 0));
6694 pcmpestri(vec1, Address(str2, 0), pcmpmask);
6695 jccb(Assembler::below, COMPARE_INDEX_CHAR);
6697 movdqu(vec1, Address(str1, adr_stride));
6698 pcmpestri(vec1, Address(str2, adr_stride), pcmpmask);
6699 jccb(Assembler::aboveEqual, COMPARE_WIDE_VECTORS);
6700 addl(cnt1, stride);
6702 // Compare the characters at index in cnt1
6703 bind(COMPARE_INDEX_CHAR); //cnt1 has the offset of the mismatching character
6704 load_unsigned_short(result, Address(str1, cnt1, scale));
6705 load_unsigned_short(cnt2, Address(str2, cnt1, scale));
6706 subl(result, cnt2);
6707 jmp(POP_LABEL);
6709 // Setup the registers to start vector comparison loop
6710 bind(COMPARE_WIDE_VECTORS);
6711 lea(str1, Address(str1, result, scale));
6712 lea(str2, Address(str2, result, scale));
6713 subl(result, stride2);
6714 subl(cnt2, stride2);
6715 jccb(Assembler::zero, COMPARE_WIDE_TAIL);
6716 negptr(result);
6718 // In a loop, compare 16-chars (32-bytes) at once using (vpxor+vptest)
6719 bind(COMPARE_WIDE_VECTORS_LOOP);
6720 vmovdqu(vec1, Address(str1, result, scale));
6721 vpxor(vec1, Address(str2, result, scale));
6722 vptest(vec1, vec1);
6723 jccb(Assembler::notZero, VECTOR_NOT_EQUAL);
6724 addptr(result, stride2);
6725 subl(cnt2, stride2);
6726 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP);
6727 // clean upper bits of YMM registers
6728 vpxor(vec1, vec1);
6730 // compare wide vectors tail
6731 bind(COMPARE_WIDE_TAIL);
6732 testptr(result, result);
6733 jccb(Assembler::zero, LENGTH_DIFF_LABEL);
6735 movl(result, stride2);
6736 movl(cnt2, result);
6737 negptr(result);
6738 jmpb(COMPARE_WIDE_VECTORS_LOOP);
6740 // Identifies the mismatching (higher or lower)16-bytes in the 32-byte vectors.
6741 bind(VECTOR_NOT_EQUAL);
6742 // clean upper bits of YMM registers
6743 vpxor(vec1, vec1);
6744 lea(str1, Address(str1, result, scale));
6745 lea(str2, Address(str2, result, scale));
6746 jmp(COMPARE_16_CHARS);
6748 // Compare tail chars, length between 1 to 15 chars
6749 bind(COMPARE_TAIL_LONG);
6750 movl(cnt2, result);
6751 cmpl(cnt2, stride);
6752 jccb(Assembler::less, COMPARE_SMALL_STR);
6754 movdqu(vec1, Address(str1, 0));
6755 pcmpestri(vec1, Address(str2, 0), pcmpmask);
6756 jcc(Assembler::below, COMPARE_INDEX_CHAR);
6757 subptr(cnt2, stride);
6758 jccb(Assembler::zero, LENGTH_DIFF_LABEL);
6759 lea(str1, Address(str1, result, scale));
6760 lea(str2, Address(str2, result, scale));
6761 negptr(cnt2);
6762 jmpb(WHILE_HEAD_LABEL);
6764 bind(COMPARE_SMALL_STR);
6765 } else if (UseSSE42Intrinsics) {
6766 Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
6767 int pcmpmask = 0x19;
6768 // Setup to compare 8-char (16-byte) vectors,
6769 // start from first character again because it has aligned address.
6770 movl(result, cnt2);
6771 andl(cnt2, ~(stride - 1)); // cnt2 holds the vector count
6772 jccb(Assembler::zero, COMPARE_TAIL);
6774 lea(str1, Address(str1, result, scale));
6775 lea(str2, Address(str2, result, scale));
6776 negptr(result);
6778 // pcmpestri
6779 // inputs:
6780 // vec1- substring
6781 // rax - negative string length (elements count)
6782 // mem - scaned string
6783 // rdx - string length (elements count)
6784 // pcmpmask - cmp mode: 11000 (string compare with negated result)
6785 // + 00 (unsigned bytes) or + 01 (unsigned shorts)
6786 // outputs:
6787 // rcx - first mismatched element index
6788 assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
6790 bind(COMPARE_WIDE_VECTORS);
6791 movdqu(vec1, Address(str1, result, scale));
6792 pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
6793 // After pcmpestri cnt1(rcx) contains mismatched element index
6795 jccb(Assembler::below, VECTOR_NOT_EQUAL); // CF==1
6796 addptr(result, stride);
6797 subptr(cnt2, stride);
6798 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS);
6800 // compare wide vectors tail
6801 testptr(result, result);
6802 jccb(Assembler::zero, LENGTH_DIFF_LABEL);
6804 movl(cnt2, stride);
6805 movl(result, stride);
6806 negptr(result);
6807 movdqu(vec1, Address(str1, result, scale));
6808 pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
6809 jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL);
6811 // Mismatched characters in the vectors
6812 bind(VECTOR_NOT_EQUAL);
6813 addptr(cnt1, result);
6814 load_unsigned_short(result, Address(str1, cnt1, scale));
6815 load_unsigned_short(cnt2, Address(str2, cnt1, scale));
6816 subl(result, cnt2);
6817 jmpb(POP_LABEL);
6819 bind(COMPARE_TAIL); // limit is zero
6820 movl(cnt2, result);
6821 // Fallthru to tail compare
6822 }
6823 // Shift str2 and str1 to the end of the arrays, negate min
6824 lea(str1, Address(str1, cnt2, scale));
6825 lea(str2, Address(str2, cnt2, scale));
6826 decrementl(cnt2); // first character was compared already
6827 negptr(cnt2);
6829 // Compare the rest of the elements
6830 bind(WHILE_HEAD_LABEL);
6831 load_unsigned_short(result, Address(str1, cnt2, scale, 0));
6832 load_unsigned_short(cnt1, Address(str2, cnt2, scale, 0));
6833 subl(result, cnt1);
6834 jccb(Assembler::notZero, POP_LABEL);
6835 increment(cnt2);
6836 jccb(Assembler::notZero, WHILE_HEAD_LABEL);
6838 // Strings are equal up to min length. Return the length difference.
6839 bind(LENGTH_DIFF_LABEL);
6840 pop(result);
6841 jmpb(DONE_LABEL);
6843 // Discard the stored length difference
6844 bind(POP_LABEL);
6845 pop(cnt1);
6847 // That's it
6848 bind(DONE_LABEL);
6849 }
6851 // Compare char[] arrays aligned to 4 bytes or substrings.
6852 void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
6853 Register limit, Register result, Register chr,
6854 XMMRegister vec1, XMMRegister vec2) {
6855 ShortBranchVerifier sbv(this);
6856 Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_VECTORS, COMPARE_CHAR;
6858 int length_offset = arrayOopDesc::length_offset_in_bytes();
6859 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
6861 // Check the input args
6862 cmpptr(ary1, ary2);
6863 jcc(Assembler::equal, TRUE_LABEL);
6865 if (is_array_equ) {
6866 // Need additional checks for arrays_equals.
6867 testptr(ary1, ary1);
6868 jcc(Assembler::zero, FALSE_LABEL);
6869 testptr(ary2, ary2);
6870 jcc(Assembler::zero, FALSE_LABEL);
6872 // Check the lengths
6873 movl(limit, Address(ary1, length_offset));
6874 cmpl(limit, Address(ary2, length_offset));
6875 jcc(Assembler::notEqual, FALSE_LABEL);
6876 }
6878 // count == 0
6879 testl(limit, limit);
6880 jcc(Assembler::zero, TRUE_LABEL);
6882 if (is_array_equ) {
6883 // Load array address
6884 lea(ary1, Address(ary1, base_offset));
6885 lea(ary2, Address(ary2, base_offset));
6886 }
6888 shll(limit, 1); // byte count != 0
6889 movl(result, limit); // copy
6891 if (UseAVX >= 2) {
6892 // With AVX2, use 32-byte vector compare
6893 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
6895 // Compare 32-byte vectors
6896 andl(result, 0x0000001e); // tail count (in bytes)
6897 andl(limit, 0xffffffe0); // vector count (in bytes)
6898 jccb(Assembler::zero, COMPARE_TAIL);
6900 lea(ary1, Address(ary1, limit, Address::times_1));
6901 lea(ary2, Address(ary2, limit, Address::times_1));
6902 negptr(limit);
6904 bind(COMPARE_WIDE_VECTORS);
6905 vmovdqu(vec1, Address(ary1, limit, Address::times_1));
6906 vmovdqu(vec2, Address(ary2, limit, Address::times_1));
6907 vpxor(vec1, vec2);
6909 vptest(vec1, vec1);
6910 jccb(Assembler::notZero, FALSE_LABEL);
6911 addptr(limit, 32);
6912 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
6914 testl(result, result);
6915 jccb(Assembler::zero, TRUE_LABEL);
6917 vmovdqu(vec1, Address(ary1, result, Address::times_1, -32));
6918 vmovdqu(vec2, Address(ary2, result, Address::times_1, -32));
6919 vpxor(vec1, vec2);
6921 vptest(vec1, vec1);
6922 jccb(Assembler::notZero, FALSE_LABEL);
6923 jmpb(TRUE_LABEL);
6925 bind(COMPARE_TAIL); // limit is zero
6926 movl(limit, result);
6927 // Fallthru to tail compare
6928 } else if (UseSSE42Intrinsics) {
6929 // With SSE4.2, use double quad vector compare
6930 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
6932 // Compare 16-byte vectors
6933 andl(result, 0x0000000e); // tail count (in bytes)
6934 andl(limit, 0xfffffff0); // vector count (in bytes)
6935 jccb(Assembler::zero, COMPARE_TAIL);
6937 lea(ary1, Address(ary1, limit, Address::times_1));
6938 lea(ary2, Address(ary2, limit, Address::times_1));
6939 negptr(limit);
6941 bind(COMPARE_WIDE_VECTORS);
6942 movdqu(vec1, Address(ary1, limit, Address::times_1));
6943 movdqu(vec2, Address(ary2, limit, Address::times_1));
6944 pxor(vec1, vec2);
6946 ptest(vec1, vec1);
6947 jccb(Assembler::notZero, FALSE_LABEL);
6948 addptr(limit, 16);
6949 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
6951 testl(result, result);
6952 jccb(Assembler::zero, TRUE_LABEL);
6954 movdqu(vec1, Address(ary1, result, Address::times_1, -16));
6955 movdqu(vec2, Address(ary2, result, Address::times_1, -16));
6956 pxor(vec1, vec2);
6958 ptest(vec1, vec1);
6959 jccb(Assembler::notZero, FALSE_LABEL);
6960 jmpb(TRUE_LABEL);
6962 bind(COMPARE_TAIL); // limit is zero
6963 movl(limit, result);
6964 // Fallthru to tail compare
6965 }
6967 // Compare 4-byte vectors
6968 andl(limit, 0xfffffffc); // vector count (in bytes)
6969 jccb(Assembler::zero, COMPARE_CHAR);
6971 lea(ary1, Address(ary1, limit, Address::times_1));
6972 lea(ary2, Address(ary2, limit, Address::times_1));
6973 negptr(limit);
6975 bind(COMPARE_VECTORS);
6976 movl(chr, Address(ary1, limit, Address::times_1));
6977 cmpl(chr, Address(ary2, limit, Address::times_1));
6978 jccb(Assembler::notEqual, FALSE_LABEL);
6979 addptr(limit, 4);
6980 jcc(Assembler::notZero, COMPARE_VECTORS);
6982 // Compare trailing char (final 2 bytes), if any
6983 bind(COMPARE_CHAR);
6984 testl(result, 0x2); // tail char
6985 jccb(Assembler::zero, TRUE_LABEL);
6986 load_unsigned_short(chr, Address(ary1, 0));
6987 load_unsigned_short(limit, Address(ary2, 0));
6988 cmpl(chr, limit);
6989 jccb(Assembler::notEqual, FALSE_LABEL);
6991 bind(TRUE_LABEL);
6992 movl(result, 1); // return true
6993 jmpb(DONE);
6995 bind(FALSE_LABEL);
6996 xorl(result, result); // return false
6998 // That's it
6999 bind(DONE);
7000 if (UseAVX >= 2) {
7001 // clean upper bits of YMM registers
7002 vpxor(vec1, vec1);
7003 vpxor(vec2, vec2);
7004 }
7005 }
7007 void MacroAssembler::generate_fill(BasicType t, bool aligned,
7008 Register to, Register value, Register count,
7009 Register rtmp, XMMRegister xtmp) {
7010 ShortBranchVerifier sbv(this);
7011 assert_different_registers(to, value, count, rtmp);
7012 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
7013 Label L_fill_2_bytes, L_fill_4_bytes;
7015 int shift = -1;
7016 switch (t) {
7017 case T_BYTE:
7018 shift = 2;
7019 break;
7020 case T_SHORT:
7021 shift = 1;
7022 break;
7023 case T_INT:
7024 shift = 0;
7025 break;
7026 default: ShouldNotReachHere();
7027 }
7029 if (t == T_BYTE) {
7030 andl(value, 0xff);
7031 movl(rtmp, value);
7032 shll(rtmp, 8);
7033 orl(value, rtmp);
7034 }
7035 if (t == T_SHORT) {
7036 andl(value, 0xffff);
7037 }
7038 if (t == T_BYTE || t == T_SHORT) {
7039 movl(rtmp, value);
7040 shll(rtmp, 16);
7041 orl(value, rtmp);
7042 }
7044 cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
7045 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
7046 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
7047 // align source address at 4 bytes address boundary
7048 if (t == T_BYTE) {
7049 // One byte misalignment happens only for byte arrays
7050 testptr(to, 1);
7051 jccb(Assembler::zero, L_skip_align1);
7052 movb(Address(to, 0), value);
7053 increment(to);
7054 decrement(count);
7055 BIND(L_skip_align1);
7056 }
7057 // Two bytes misalignment happens only for byte and short (char) arrays
7058 testptr(to, 2);
7059 jccb(Assembler::zero, L_skip_align2);
7060 movw(Address(to, 0), value);
7061 addptr(to, 2);
7062 subl(count, 1<<(shift-1));
7063 BIND(L_skip_align2);
7064 }
7065 if (UseSSE < 2) {
7066 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
7067 // Fill 32-byte chunks
7068 subl(count, 8 << shift);
7069 jcc(Assembler::less, L_check_fill_8_bytes);
7070 align(16);
7072 BIND(L_fill_32_bytes_loop);
7074 for (int i = 0; i < 32; i += 4) {
7075 movl(Address(to, i), value);
7076 }
7078 addptr(to, 32);
7079 subl(count, 8 << shift);
7080 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
7081 BIND(L_check_fill_8_bytes);
7082 addl(count, 8 << shift);
7083 jccb(Assembler::zero, L_exit);
7084 jmpb(L_fill_8_bytes);
7086 //
7087 // length is too short, just fill qwords
7088 //
7089 BIND(L_fill_8_bytes_loop);
7090 movl(Address(to, 0), value);
7091 movl(Address(to, 4), value);
7092 addptr(to, 8);
7093 BIND(L_fill_8_bytes);
7094 subl(count, 1 << (shift + 1));
7095 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
7096 // fall through to fill 4 bytes
7097 } else {
7098 Label L_fill_32_bytes;
7099 if (!UseUnalignedLoadStores) {
7100 // align to 8 bytes, we know we are 4 byte aligned to start
7101 testptr(to, 4);
7102 jccb(Assembler::zero, L_fill_32_bytes);
7103 movl(Address(to, 0), value);
7104 addptr(to, 4);
7105 subl(count, 1<<shift);
7106 }
7107 BIND(L_fill_32_bytes);
7108 {
7109 assert( UseSSE >= 2, "supported cpu only" );
7110 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
7111 movdl(xtmp, value);
7112 if (UseAVX >= 2 && UseUnalignedLoadStores) {
7113 // Fill 64-byte chunks
7114 Label L_fill_64_bytes_loop, L_check_fill_32_bytes;
7115 vpbroadcastd(xtmp, xtmp);
7117 subl(count, 16 << shift);
7118 jcc(Assembler::less, L_check_fill_32_bytes);
7119 align(16);
7121 BIND(L_fill_64_bytes_loop);
7122 vmovdqu(Address(to, 0), xtmp);
7123 vmovdqu(Address(to, 32), xtmp);
7124 addptr(to, 64);
7125 subl(count, 16 << shift);
7126 jcc(Assembler::greaterEqual, L_fill_64_bytes_loop);
7128 BIND(L_check_fill_32_bytes);
7129 addl(count, 8 << shift);
7130 jccb(Assembler::less, L_check_fill_8_bytes);
7131 vmovdqu(Address(to, 0), xtmp);
7132 addptr(to, 32);
7133 subl(count, 8 << shift);
7135 BIND(L_check_fill_8_bytes);
7136 // clean upper bits of YMM registers
7137 movdl(xtmp, value);
7138 pshufd(xtmp, xtmp, 0);
7139 } else {
7140 // Fill 32-byte chunks
7141 pshufd(xtmp, xtmp, 0);
7143 subl(count, 8 << shift);
7144 jcc(Assembler::less, L_check_fill_8_bytes);
7145 align(16);
7147 BIND(L_fill_32_bytes_loop);
7149 if (UseUnalignedLoadStores) {
7150 movdqu(Address(to, 0), xtmp);
7151 movdqu(Address(to, 16), xtmp);
7152 } else {
7153 movq(Address(to, 0), xtmp);
7154 movq(Address(to, 8), xtmp);
7155 movq(Address(to, 16), xtmp);
7156 movq(Address(to, 24), xtmp);
7157 }
7159 addptr(to, 32);
7160 subl(count, 8 << shift);
7161 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
7163 BIND(L_check_fill_8_bytes);
7164 }
7165 addl(count, 8 << shift);
7166 jccb(Assembler::zero, L_exit);
7167 jmpb(L_fill_8_bytes);
7169 //
7170 // length is too short, just fill qwords
7171 //
7172 BIND(L_fill_8_bytes_loop);
7173 movq(Address(to, 0), xtmp);
7174 addptr(to, 8);
7175 BIND(L_fill_8_bytes);
7176 subl(count, 1 << (shift + 1));
7177 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
7178 }
7179 }
7180 // fill trailing 4 bytes
7181 BIND(L_fill_4_bytes);
7182 testl(count, 1<<shift);
7183 jccb(Assembler::zero, L_fill_2_bytes);
7184 movl(Address(to, 0), value);
7185 if (t == T_BYTE || t == T_SHORT) {
7186 addptr(to, 4);
7187 BIND(L_fill_2_bytes);
7188 // fill trailing 2 bytes
7189 testl(count, 1<<(shift-1));
7190 jccb(Assembler::zero, L_fill_byte);
7191 movw(Address(to, 0), value);
7192 if (t == T_BYTE) {
7193 addptr(to, 2);
7194 BIND(L_fill_byte);
7195 // fill trailing byte
7196 testl(count, 1);
7197 jccb(Assembler::zero, L_exit);
7198 movb(Address(to, 0), value);
7199 } else {
7200 BIND(L_fill_byte);
7201 }
7202 } else {
7203 BIND(L_fill_2_bytes);
7204 }
7205 BIND(L_exit);
7206 }
7208 // encode char[] to byte[] in ISO_8859_1
7209 void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
7210 XMMRegister tmp1Reg, XMMRegister tmp2Reg,
7211 XMMRegister tmp3Reg, XMMRegister tmp4Reg,
7212 Register tmp5, Register result) {
7213 // rsi: src
7214 // rdi: dst
7215 // rdx: len
7216 // rcx: tmp5
7217 // rax: result
7218 ShortBranchVerifier sbv(this);
7219 assert_different_registers(src, dst, len, tmp5, result);
7220 Label L_done, L_copy_1_char, L_copy_1_char_exit;
7222 // set result
7223 xorl(result, result);
7224 // check for zero length
7225 testl(len, len);
7226 jcc(Assembler::zero, L_done);
7227 movl(result, len);
7229 // Setup pointers
7230 lea(src, Address(src, len, Address::times_2)); // char[]
7231 lea(dst, Address(dst, len, Address::times_1)); // byte[]
7232 negptr(len);
7234 if (UseSSE42Intrinsics || UseAVX >= 2) {
7235 Label L_chars_8_check, L_copy_8_chars, L_copy_8_chars_exit;
7236 Label L_chars_16_check, L_copy_16_chars, L_copy_16_chars_exit;
7238 if (UseAVX >= 2) {
7239 Label L_chars_32_check, L_copy_32_chars, L_copy_32_chars_exit;
7240 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vector
7241 movdl(tmp1Reg, tmp5);
7242 vpbroadcastd(tmp1Reg, tmp1Reg);
7243 jmpb(L_chars_32_check);
7245 bind(L_copy_32_chars);
7246 vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64));
7247 vmovdqu(tmp4Reg, Address(src, len, Address::times_2, -32));
7248 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector256 */ true);
7249 vptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector
7250 jccb(Assembler::notZero, L_copy_32_chars_exit);
7251 vpackuswb(tmp3Reg, tmp3Reg, tmp4Reg, /* vector256 */ true);
7252 vpermq(tmp4Reg, tmp3Reg, 0xD8, /* vector256 */ true);
7253 vmovdqu(Address(dst, len, Address::times_1, -32), tmp4Reg);
7255 bind(L_chars_32_check);
7256 addptr(len, 32);
7257 jccb(Assembler::lessEqual, L_copy_32_chars);
7259 bind(L_copy_32_chars_exit);
7260 subptr(len, 16);
7261 jccb(Assembler::greater, L_copy_16_chars_exit);
7263 } else if (UseSSE42Intrinsics) {
7264 movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vector
7265 movdl(tmp1Reg, tmp5);
7266 pshufd(tmp1Reg, tmp1Reg, 0);
7267 jmpb(L_chars_16_check);
7268 }
7270 bind(L_copy_16_chars);
7271 if (UseAVX >= 2) {
7272 vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32));
7273 vptest(tmp2Reg, tmp1Reg);
7274 jccb(Assembler::notZero, L_copy_16_chars_exit);
7275 vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector256 */ true);
7276 vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector256 */ true);
7277 } else {
7278 if (UseAVX > 0) {
7279 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
7280 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
7281 vpor(tmp2Reg, tmp3Reg, tmp4Reg, /* vector256 */ false);
7282 } else {
7283 movdqu(tmp3Reg, Address(src, len, Address::times_2, -32));
7284 por(tmp2Reg, tmp3Reg);
7285 movdqu(tmp4Reg, Address(src, len, Address::times_2, -16));
7286 por(tmp2Reg, tmp4Reg);
7287 }
7288 ptest(tmp2Reg, tmp1Reg); // check for Unicode chars in vector
7289 jccb(Assembler::notZero, L_copy_16_chars_exit);
7290 packuswb(tmp3Reg, tmp4Reg);
7291 }
7292 movdqu(Address(dst, len, Address::times_1, -16), tmp3Reg);
7294 bind(L_chars_16_check);
7295 addptr(len, 16);
7296 jccb(Assembler::lessEqual, L_copy_16_chars);
7298 bind(L_copy_16_chars_exit);
7299 if (UseAVX >= 2) {
7300 // clean upper bits of YMM registers
7301 vpxor(tmp2Reg, tmp2Reg);
7302 vpxor(tmp3Reg, tmp3Reg);
7303 vpxor(tmp4Reg, tmp4Reg);
7304 movdl(tmp1Reg, tmp5);
7305 pshufd(tmp1Reg, tmp1Reg, 0);
7306 }
7307 subptr(len, 8);
7308 jccb(Assembler::greater, L_copy_8_chars_exit);
7310 bind(L_copy_8_chars);
7311 movdqu(tmp3Reg, Address(src, len, Address::times_2, -16));
7312 ptest(tmp3Reg, tmp1Reg);
7313 jccb(Assembler::notZero, L_copy_8_chars_exit);
7314 packuswb(tmp3Reg, tmp1Reg);
7315 movq(Address(dst, len, Address::times_1, -8), tmp3Reg);
7316 addptr(len, 8);
7317 jccb(Assembler::lessEqual, L_copy_8_chars);
7319 bind(L_copy_8_chars_exit);
7320 subptr(len, 8);
7321 jccb(Assembler::zero, L_done);
7322 }
7324 bind(L_copy_1_char);
7325 load_unsigned_short(tmp5, Address(src, len, Address::times_2, 0));
7326 testl(tmp5, 0xff00); // check if Unicode char
7327 jccb(Assembler::notZero, L_copy_1_char_exit);
7328 movb(Address(dst, len, Address::times_1, 0), tmp5);
7329 addptr(len, 1);
7330 jccb(Assembler::less, L_copy_1_char);
7332 bind(L_copy_1_char_exit);
7333 addptr(result, len); // len is negative count of not processed elements
7334 bind(L_done);
7335 }
7337 #ifdef _LP64
7338 /**
7339 * Helper for multiply_to_len().
7340 */
7341 void MacroAssembler::add2_with_carry(Register dest_hi, Register dest_lo, Register src1, Register src2) {
7342 addq(dest_lo, src1);
7343 adcq(dest_hi, 0);
7344 addq(dest_lo, src2);
7345 adcq(dest_hi, 0);
7346 }
7348 /**
7349 * Multiply 64 bit by 64 bit first loop.
7350 */
7351 void MacroAssembler::multiply_64_x_64_loop(Register x, Register xstart, Register x_xstart,
7352 Register y, Register y_idx, Register z,
7353 Register carry, Register product,
7354 Register idx, Register kdx) {
7355 //
7356 // jlong carry, x[], y[], z[];
7357 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7358 // huge_128 product = y[idx] * x[xstart] + carry;
7359 // z[kdx] = (jlong)product;
7360 // carry = (jlong)(product >>> 64);
7361 // }
7362 // z[xstart] = carry;
7363 //
7365 Label L_first_loop, L_first_loop_exit;
7366 Label L_one_x, L_one_y, L_multiply;
7368 decrementl(xstart);
7369 jcc(Assembler::negative, L_one_x);
7371 movq(x_xstart, Address(x, xstart, Address::times_4, 0));
7372 rorq(x_xstart, 32); // convert big-endian to little-endian
7374 bind(L_first_loop);
7375 decrementl(idx);
7376 jcc(Assembler::negative, L_first_loop_exit);
7377 decrementl(idx);
7378 jcc(Assembler::negative, L_one_y);
7379 movq(y_idx, Address(y, idx, Address::times_4, 0));
7380 rorq(y_idx, 32); // convert big-endian to little-endian
7381 bind(L_multiply);
7382 movq(product, x_xstart);
7383 mulq(y_idx); // product(rax) * y_idx -> rdx:rax
7384 addq(product, carry);
7385 adcq(rdx, 0);
7386 subl(kdx, 2);
7387 movl(Address(z, kdx, Address::times_4, 4), product);
7388 shrq(product, 32);
7389 movl(Address(z, kdx, Address::times_4, 0), product);
7390 movq(carry, rdx);
7391 jmp(L_first_loop);
7393 bind(L_one_y);
7394 movl(y_idx, Address(y, 0));
7395 jmp(L_multiply);
7397 bind(L_one_x);
7398 movl(x_xstart, Address(x, 0));
7399 jmp(L_first_loop);
7401 bind(L_first_loop_exit);
7402 }
7404 /**
7405 * Multiply 64 bit by 64 bit and add 128 bit.
7406 */
7407 void MacroAssembler::multiply_add_128_x_128(Register x_xstart, Register y, Register z,
7408 Register yz_idx, Register idx,
7409 Register carry, Register product, int offset) {
7410 // huge_128 product = (y[idx] * x_xstart) + z[kdx] + carry;
7411 // z[kdx] = (jlong)product;
7413 movq(yz_idx, Address(y, idx, Address::times_4, offset));
7414 rorq(yz_idx, 32); // convert big-endian to little-endian
7415 movq(product, x_xstart);
7416 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
7417 movq(yz_idx, Address(z, idx, Address::times_4, offset));
7418 rorq(yz_idx, 32); // convert big-endian to little-endian
7420 add2_with_carry(rdx, product, carry, yz_idx);
7422 movl(Address(z, idx, Address::times_4, offset+4), product);
7423 shrq(product, 32);
7424 movl(Address(z, idx, Address::times_4, offset), product);
7426 }
7428 /**
7429 * Multiply 128 bit by 128 bit. Unrolled inner loop.
7430 */
7431 void MacroAssembler::multiply_128_x_128_loop(Register x_xstart, Register y, Register z,
7432 Register yz_idx, Register idx, Register jdx,
7433 Register carry, Register product,
7434 Register carry2) {
7435 // jlong carry, x[], y[], z[];
7436 // int kdx = ystart+1;
7437 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
7438 // huge_128 product = (y[idx+1] * x_xstart) + z[kdx+idx+1] + carry;
7439 // z[kdx+idx+1] = (jlong)product;
7440 // jlong carry2 = (jlong)(product >>> 64);
7441 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry2;
7442 // z[kdx+idx] = (jlong)product;
7443 // carry = (jlong)(product >>> 64);
7444 // }
7445 // idx += 2;
7446 // if (idx > 0) {
7447 // product = (y[idx] * x_xstart) + z[kdx+idx] + carry;
7448 // z[kdx+idx] = (jlong)product;
7449 // carry = (jlong)(product >>> 64);
7450 // }
7451 //
7453 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
7455 movl(jdx, idx);
7456 andl(jdx, 0xFFFFFFFC);
7457 shrl(jdx, 2);
7459 bind(L_third_loop);
7460 subl(jdx, 1);
7461 jcc(Assembler::negative, L_third_loop_exit);
7462 subl(idx, 4);
7464 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 8);
7465 movq(carry2, rdx);
7467 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry2, product, 0);
7468 movq(carry, rdx);
7469 jmp(L_third_loop);
7471 bind (L_third_loop_exit);
7473 andl (idx, 0x3);
7474 jcc(Assembler::zero, L_post_third_loop_done);
7476 Label L_check_1;
7477 subl(idx, 2);
7478 jcc(Assembler::negative, L_check_1);
7480 multiply_add_128_x_128(x_xstart, y, z, yz_idx, idx, carry, product, 0);
7481 movq(carry, rdx);
7483 bind (L_check_1);
7484 addl (idx, 0x2);
7485 andl (idx, 0x1);
7486 subl(idx, 1);
7487 jcc(Assembler::negative, L_post_third_loop_done);
7489 movl(yz_idx, Address(y, idx, Address::times_4, 0));
7490 movq(product, x_xstart);
7491 mulq(yz_idx); // product(rax) * yz_idx -> rdx:product(rax)
7492 movl(yz_idx, Address(z, idx, Address::times_4, 0));
7494 add2_with_carry(rdx, product, yz_idx, carry);
7496 movl(Address(z, idx, Address::times_4, 0), product);
7497 shrq(product, 32);
7499 shlq(rdx, 32);
7500 orq(product, rdx);
7501 movq(carry, product);
7503 bind(L_post_third_loop_done);
7504 }
7506 /**
7507 * Multiply 128 bit by 128 bit using BMI2. Unrolled inner loop.
7508 *
7509 */
7510 void MacroAssembler::multiply_128_x_128_bmi2_loop(Register y, Register z,
7511 Register carry, Register carry2,
7512 Register idx, Register jdx,
7513 Register yz_idx1, Register yz_idx2,
7514 Register tmp, Register tmp3, Register tmp4) {
7515 assert(UseBMI2Instructions, "should be used only when BMI2 is available");
7517 // jlong carry, x[], y[], z[];
7518 // int kdx = ystart+1;
7519 // for (int idx=ystart-2; idx >= 0; idx -= 2) { // Third loop
7520 // huge_128 tmp3 = (y[idx+1] * rdx) + z[kdx+idx+1] + carry;
7521 // jlong carry2 = (jlong)(tmp3 >>> 64);
7522 // huge_128 tmp4 = (y[idx] * rdx) + z[kdx+idx] + carry2;
7523 // carry = (jlong)(tmp4 >>> 64);
7524 // z[kdx+idx+1] = (jlong)tmp3;
7525 // z[kdx+idx] = (jlong)tmp4;
7526 // }
7527 // idx += 2;
7528 // if (idx > 0) {
7529 // yz_idx1 = (y[idx] * rdx) + z[kdx+idx] + carry;
7530 // z[kdx+idx] = (jlong)yz_idx1;
7531 // carry = (jlong)(yz_idx1 >>> 64);
7532 // }
7533 //
7535 Label L_third_loop, L_third_loop_exit, L_post_third_loop_done;
7537 movl(jdx, idx);
7538 andl(jdx, 0xFFFFFFFC);
7539 shrl(jdx, 2);
7541 bind(L_third_loop);
7542 subl(jdx, 1);
7543 jcc(Assembler::negative, L_third_loop_exit);
7544 subl(idx, 4);
7546 movq(yz_idx1, Address(y, idx, Address::times_4, 8));
7547 rorxq(yz_idx1, yz_idx1, 32); // convert big-endian to little-endian
7548 movq(yz_idx2, Address(y, idx, Address::times_4, 0));
7549 rorxq(yz_idx2, yz_idx2, 32);
7551 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3
7552 mulxq(carry2, tmp, yz_idx2); // yz_idx2 * rdx -> carry2:tmp
7554 movq(yz_idx1, Address(z, idx, Address::times_4, 8));
7555 rorxq(yz_idx1, yz_idx1, 32);
7556 movq(yz_idx2, Address(z, idx, Address::times_4, 0));
7557 rorxq(yz_idx2, yz_idx2, 32);
7559 if (VM_Version::supports_adx()) {
7560 adcxq(tmp3, carry);
7561 adoxq(tmp3, yz_idx1);
7563 adcxq(tmp4, tmp);
7564 adoxq(tmp4, yz_idx2);
7566 movl(carry, 0); // does not affect flags
7567 adcxq(carry2, carry);
7568 adoxq(carry2, carry);
7569 } else {
7570 add2_with_carry(tmp4, tmp3, carry, yz_idx1);
7571 add2_with_carry(carry2, tmp4, tmp, yz_idx2);
7572 }
7573 movq(carry, carry2);
7575 movl(Address(z, idx, Address::times_4, 12), tmp3);
7576 shrq(tmp3, 32);
7577 movl(Address(z, idx, Address::times_4, 8), tmp3);
7579 movl(Address(z, idx, Address::times_4, 4), tmp4);
7580 shrq(tmp4, 32);
7581 movl(Address(z, idx, Address::times_4, 0), tmp4);
7583 jmp(L_third_loop);
7585 bind (L_third_loop_exit);
7587 andl (idx, 0x3);
7588 jcc(Assembler::zero, L_post_third_loop_done);
7590 Label L_check_1;
7591 subl(idx, 2);
7592 jcc(Assembler::negative, L_check_1);
7594 movq(yz_idx1, Address(y, idx, Address::times_4, 0));
7595 rorxq(yz_idx1, yz_idx1, 32);
7596 mulxq(tmp4, tmp3, yz_idx1); // yz_idx1 * rdx -> tmp4:tmp3
7597 movq(yz_idx2, Address(z, idx, Address::times_4, 0));
7598 rorxq(yz_idx2, yz_idx2, 32);
7600 add2_with_carry(tmp4, tmp3, carry, yz_idx2);
7602 movl(Address(z, idx, Address::times_4, 4), tmp3);
7603 shrq(tmp3, 32);
7604 movl(Address(z, idx, Address::times_4, 0), tmp3);
7605 movq(carry, tmp4);
7607 bind (L_check_1);
7608 addl (idx, 0x2);
7609 andl (idx, 0x1);
7610 subl(idx, 1);
7611 jcc(Assembler::negative, L_post_third_loop_done);
7612 movl(tmp4, Address(y, idx, Address::times_4, 0));
7613 mulxq(carry2, tmp3, tmp4); // tmp4 * rdx -> carry2:tmp3
7614 movl(tmp4, Address(z, idx, Address::times_4, 0));
7616 add2_with_carry(carry2, tmp3, tmp4, carry);
7618 movl(Address(z, idx, Address::times_4, 0), tmp3);
7619 shrq(tmp3, 32);
7621 shlq(carry2, 32);
7622 orq(tmp3, carry2);
7623 movq(carry, tmp3);
7625 bind(L_post_third_loop_done);
7626 }
7628 /**
7629 * Code for BigInteger::multiplyToLen() instrinsic.
7630 *
7631 * rdi: x
7632 * rax: xlen
7633 * rsi: y
7634 * rcx: ylen
7635 * r8: z
7636 * r11: zlen
7637 * r12: tmp1
7638 * r13: tmp2
7639 * r14: tmp3
7640 * r15: tmp4
7641 * rbx: tmp5
7642 *
7643 */
7644 void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Register ylen, Register z, Register zlen,
7645 Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5) {
7646 ShortBranchVerifier sbv(this);
7647 assert_different_registers(x, xlen, y, ylen, z, zlen, tmp1, tmp2, tmp3, tmp4, tmp5, rdx);
7649 push(tmp1);
7650 push(tmp2);
7651 push(tmp3);
7652 push(tmp4);
7653 push(tmp5);
7655 push(xlen);
7656 push(zlen);
7658 const Register idx = tmp1;
7659 const Register kdx = tmp2;
7660 const Register xstart = tmp3;
7662 const Register y_idx = tmp4;
7663 const Register carry = tmp5;
7664 const Register product = xlen;
7665 const Register x_xstart = zlen; // reuse register
7667 // First Loop.
7668 //
7669 // final static long LONG_MASK = 0xffffffffL;
7670 // int xstart = xlen - 1;
7671 // int ystart = ylen - 1;
7672 // long carry = 0;
7673 // for (int idx=ystart, kdx=ystart+1+xstart; idx >= 0; idx-, kdx--) {
7674 // long product = (y[idx] & LONG_MASK) * (x[xstart] & LONG_MASK) + carry;
7675 // z[kdx] = (int)product;
7676 // carry = product >>> 32;
7677 // }
7678 // z[xstart] = (int)carry;
7679 //
7681 movl(idx, ylen); // idx = ylen;
7682 movl(kdx, zlen); // kdx = xlen+ylen;
7683 xorq(carry, carry); // carry = 0;
7685 Label L_done;
7687 movl(xstart, xlen);
7688 decrementl(xstart);
7689 jcc(Assembler::negative, L_done);
7691 multiply_64_x_64_loop(x, xstart, x_xstart, y, y_idx, z, carry, product, idx, kdx);
7693 Label L_second_loop;
7694 testl(kdx, kdx);
7695 jcc(Assembler::zero, L_second_loop);
7697 Label L_carry;
7698 subl(kdx, 1);
7699 jcc(Assembler::zero, L_carry);
7701 movl(Address(z, kdx, Address::times_4, 0), carry);
7702 shrq(carry, 32);
7703 subl(kdx, 1);
7705 bind(L_carry);
7706 movl(Address(z, kdx, Address::times_4, 0), carry);
7708 // Second and third (nested) loops.
7709 //
7710 // for (int i = xstart-1; i >= 0; i--) { // Second loop
7711 // carry = 0;
7712 // for (int jdx=ystart, k=ystart+1+i; jdx >= 0; jdx--, k--) { // Third loop
7713 // long product = (y[jdx] & LONG_MASK) * (x[i] & LONG_MASK) +
7714 // (z[k] & LONG_MASK) + carry;
7715 // z[k] = (int)product;
7716 // carry = product >>> 32;
7717 // }
7718 // z[i] = (int)carry;
7719 // }
7720 //
7721 // i = xlen, j = tmp1, k = tmp2, carry = tmp5, x[i] = rdx
7723 const Register jdx = tmp1;
7725 bind(L_second_loop);
7726 xorl(carry, carry); // carry = 0;
7727 movl(jdx, ylen); // j = ystart+1
7729 subl(xstart, 1); // i = xstart-1;
7730 jcc(Assembler::negative, L_done);
7732 push (z);
7734 Label L_last_x;
7735 lea(z, Address(z, xstart, Address::times_4, 4)); // z = z + k - j
7736 subl(xstart, 1); // i = xstart-1;
7737 jcc(Assembler::negative, L_last_x);
7739 if (UseBMI2Instructions) {
7740 movq(rdx, Address(x, xstart, Address::times_4, 0));
7741 rorxq(rdx, rdx, 32); // convert big-endian to little-endian
7742 } else {
7743 movq(x_xstart, Address(x, xstart, Address::times_4, 0));
7744 rorq(x_xstart, 32); // convert big-endian to little-endian
7745 }
7747 Label L_third_loop_prologue;
7748 bind(L_third_loop_prologue);
7750 push (x);
7751 push (xstart);
7752 push (ylen);
7755 if (UseBMI2Instructions) {
7756 multiply_128_x_128_bmi2_loop(y, z, carry, x, jdx, ylen, product, tmp2, x_xstart, tmp3, tmp4);
7757 } else { // !UseBMI2Instructions
7758 multiply_128_x_128_loop(x_xstart, y, z, y_idx, jdx, ylen, carry, product, x);
7759 }
7761 pop(ylen);
7762 pop(xlen);
7763 pop(x);
7764 pop(z);
7766 movl(tmp3, xlen);
7767 addl(tmp3, 1);
7768 movl(Address(z, tmp3, Address::times_4, 0), carry);
7769 subl(tmp3, 1);
7770 jccb(Assembler::negative, L_done);
7772 shrq(carry, 32);
7773 movl(Address(z, tmp3, Address::times_4, 0), carry);
7774 jmp(L_second_loop);
7776 // Next infrequent code is moved outside loops.
7777 bind(L_last_x);
7778 if (UseBMI2Instructions) {
7779 movl(rdx, Address(x, 0));
7780 } else {
7781 movl(x_xstart, Address(x, 0));
7782 }
7783 jmp(L_third_loop_prologue);
7785 bind(L_done);
7787 pop(zlen);
7788 pop(xlen);
7790 pop(tmp5);
7791 pop(tmp4);
7792 pop(tmp3);
7793 pop(tmp2);
7794 pop(tmp1);
7795 }
7797 //Helper functions for square_to_len()
7799 /**
7800 * Store the squares of x[], right shifted one bit (divided by 2) into z[]
7801 * Preserves x and z and modifies rest of the registers.
7802 */
7804 void MacroAssembler::square_rshift(Register x, Register xlen, Register z, Register tmp1, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
7805 // Perform square and right shift by 1
7806 // Handle odd xlen case first, then for even xlen do the following
7807 // jlong carry = 0;
7808 // for (int j=0, i=0; j < xlen; j+=2, i+=4) {
7809 // huge_128 product = x[j:j+1] * x[j:j+1];
7810 // z[i:i+1] = (carry << 63) | (jlong)(product >>> 65);
7811 // z[i+2:i+3] = (jlong)(product >>> 1);
7812 // carry = (jlong)product;
7813 // }
7815 xorq(tmp5, tmp5); // carry
7816 xorq(rdxReg, rdxReg);
7817 xorl(tmp1, tmp1); // index for x
7818 xorl(tmp4, tmp4); // index for z
7820 Label L_first_loop, L_first_loop_exit;
7822 testl(xlen, 1);
7823 jccb(Assembler::zero, L_first_loop); //jump if xlen is even
7825 // Square and right shift by 1 the odd element using 32 bit multiply
7826 movl(raxReg, Address(x, tmp1, Address::times_4, 0));
7827 imulq(raxReg, raxReg);
7828 shrq(raxReg, 1);
7829 adcq(tmp5, 0);
7830 movq(Address(z, tmp4, Address::times_4, 0), raxReg);
7831 incrementl(tmp1);
7832 addl(tmp4, 2);
7834 // Square and right shift by 1 the rest using 64 bit multiply
7835 bind(L_first_loop);
7836 cmpptr(tmp1, xlen);
7837 jccb(Assembler::equal, L_first_loop_exit);
7839 // Square
7840 movq(raxReg, Address(x, tmp1, Address::times_4, 0));
7841 rorq(raxReg, 32); // convert big-endian to little-endian
7842 mulq(raxReg); // 64-bit multiply rax * rax -> rdx:rax
7844 // Right shift by 1 and save carry
7845 shrq(tmp5, 1); // rdx:rax:tmp5 = (tmp5:rdx:rax) >>> 1
7846 rcrq(rdxReg, 1);
7847 rcrq(raxReg, 1);
7848 adcq(tmp5, 0);
7850 // Store result in z
7851 movq(Address(z, tmp4, Address::times_4, 0), rdxReg);
7852 movq(Address(z, tmp4, Address::times_4, 8), raxReg);
7854 // Update indices for x and z
7855 addl(tmp1, 2);
7856 addl(tmp4, 4);
7857 jmp(L_first_loop);
7859 bind(L_first_loop_exit);
7860 }
7863 /**
7864 * Perform the following multiply add operation using BMI2 instructions
7865 * carry:sum = sum + op1*op2 + carry
7866 * op2 should be in rdx
7867 * op2 is preserved, all other registers are modified
7868 */
7869 void MacroAssembler::multiply_add_64_bmi2(Register sum, Register op1, Register op2, Register carry, Register tmp2) {
7870 // assert op2 is rdx
7871 mulxq(tmp2, op1, op1); // op1 * op2 -> tmp2:op1
7872 addq(sum, carry);
7873 adcq(tmp2, 0);
7874 addq(sum, op1);
7875 adcq(tmp2, 0);
7876 movq(carry, tmp2);
7877 }
7879 /**
7880 * Perform the following multiply add operation:
7881 * carry:sum = sum + op1*op2 + carry
7882 * Preserves op1, op2 and modifies rest of registers
7883 */
7884 void MacroAssembler::multiply_add_64(Register sum, Register op1, Register op2, Register carry, Register rdxReg, Register raxReg) {
7885 // rdx:rax = op1 * op2
7886 movq(raxReg, op2);
7887 mulq(op1);
7889 // rdx:rax = sum + carry + rdx:rax
7890 addq(sum, carry);
7891 adcq(rdxReg, 0);
7892 addq(sum, raxReg);
7893 adcq(rdxReg, 0);
7895 // carry:sum = rdx:sum
7896 movq(carry, rdxReg);
7897 }
7899 /**
7900 * Add 64 bit long carry into z[] with carry propogation.
7901 * Preserves z and carry register values and modifies rest of registers.
7902 *
7903 */
7904 void MacroAssembler::add_one_64(Register z, Register zlen, Register carry, Register tmp1) {
7905 Label L_fourth_loop, L_fourth_loop_exit;
7907 movl(tmp1, 1);
7908 subl(zlen, 2);
7909 addq(Address(z, zlen, Address::times_4, 0), carry);
7911 bind(L_fourth_loop);
7912 jccb(Assembler::carryClear, L_fourth_loop_exit);
7913 subl(zlen, 2);
7914 jccb(Assembler::negative, L_fourth_loop_exit);
7915 addq(Address(z, zlen, Address::times_4, 0), tmp1);
7916 jmp(L_fourth_loop);
7917 bind(L_fourth_loop_exit);
7918 }
7920 /**
7921 * Shift z[] left by 1 bit.
7922 * Preserves x, len, z and zlen registers and modifies rest of the registers.
7923 *
7924 */
7925 void MacroAssembler::lshift_by_1(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
7927 Label L_fifth_loop, L_fifth_loop_exit;
7929 // Fifth loop
7930 // Perform primitiveLeftShift(z, zlen, 1)
7932 const Register prev_carry = tmp1;
7933 const Register new_carry = tmp4;
7934 const Register value = tmp2;
7935 const Register zidx = tmp3;
7937 // int zidx, carry;
7938 // long value;
7939 // carry = 0;
7940 // for (zidx = zlen-2; zidx >=0; zidx -= 2) {
7941 // (carry:value) = (z[i] << 1) | carry ;
7942 // z[i] = value;
7943 // }
7945 movl(zidx, zlen);
7946 xorl(prev_carry, prev_carry); // clear carry flag and prev_carry register
7948 bind(L_fifth_loop);
7949 decl(zidx); // Use decl to preserve carry flag
7950 decl(zidx);
7951 jccb(Assembler::negative, L_fifth_loop_exit);
7953 if (UseBMI2Instructions) {
7954 movq(value, Address(z, zidx, Address::times_4, 0));
7955 rclq(value, 1);
7956 rorxq(value, value, 32);
7957 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form
7958 }
7959 else {
7960 // clear new_carry
7961 xorl(new_carry, new_carry);
7963 // Shift z[i] by 1, or in previous carry and save new carry
7964 movq(value, Address(z, zidx, Address::times_4, 0));
7965 shlq(value, 1);
7966 adcl(new_carry, 0);
7968 orq(value, prev_carry);
7969 rorq(value, 0x20);
7970 movq(Address(z, zidx, Address::times_4, 0), value); // Store back in big endian form
7972 // Set previous carry = new carry
7973 movl(prev_carry, new_carry);
7974 }
7975 jmp(L_fifth_loop);
7977 bind(L_fifth_loop_exit);
7978 }
7981 /**
7982 * Code for BigInteger::squareToLen() intrinsic
7983 *
7984 * rdi: x
7985 * rsi: len
7986 * r8: z
7987 * rcx: zlen
7988 * r12: tmp1
7989 * r13: tmp2
7990 * r14: tmp3
7991 * r15: tmp4
7992 * rbx: tmp5
7993 *
7994 */
7995 void MacroAssembler::square_to_len(Register x, Register len, Register z, Register zlen, Register tmp1, Register tmp2, Register tmp3, Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
7997 Label L_second_loop, L_second_loop_exit, L_third_loop, L_third_loop_exit, fifth_loop, fifth_loop_exit, L_last_x, L_multiply;
7998 push(tmp1);
7999 push(tmp2);
8000 push(tmp3);
8001 push(tmp4);
8002 push(tmp5);
8004 // First loop
8005 // Store the squares, right shifted one bit (i.e., divided by 2).
8006 square_rshift(x, len, z, tmp1, tmp3, tmp4, tmp5, rdxReg, raxReg);
8008 // Add in off-diagonal sums.
8009 //
8010 // Second, third (nested) and fourth loops.
8011 // zlen +=2;
8012 // for (int xidx=len-2,zidx=zlen-4; xidx > 0; xidx-=2,zidx-=4) {
8013 // carry = 0;
8014 // long op2 = x[xidx:xidx+1];
8015 // for (int j=xidx-2,k=zidx; j >= 0; j-=2) {
8016 // k -= 2;
8017 // long op1 = x[j:j+1];
8018 // long sum = z[k:k+1];
8019 // carry:sum = multiply_add_64(sum, op1, op2, carry, tmp_regs);
8020 // z[k:k+1] = sum;
8021 // }
8022 // add_one_64(z, k, carry, tmp_regs);
8023 // }
8025 const Register carry = tmp5;
8026 const Register sum = tmp3;
8027 const Register op1 = tmp4;
8028 Register op2 = tmp2;
8030 push(zlen);
8031 push(len);
8032 addl(zlen,2);
8033 bind(L_second_loop);
8034 xorq(carry, carry);
8035 subl(zlen, 4);
8036 subl(len, 2);
8037 push(zlen);
8038 push(len);
8039 cmpl(len, 0);
8040 jccb(Assembler::lessEqual, L_second_loop_exit);
8042 // Multiply an array by one 64 bit long.
8043 if (UseBMI2Instructions) {
8044 op2 = rdxReg;
8045 movq(op2, Address(x, len, Address::times_4, 0));
8046 rorxq(op2, op2, 32);
8047 }
8048 else {
8049 movq(op2, Address(x, len, Address::times_4, 0));
8050 rorq(op2, 32);
8051 }
8053 bind(L_third_loop);
8054 decrementl(len);
8055 jccb(Assembler::negative, L_third_loop_exit);
8056 decrementl(len);
8057 jccb(Assembler::negative, L_last_x);
8059 movq(op1, Address(x, len, Address::times_4, 0));
8060 rorq(op1, 32);
8062 bind(L_multiply);
8063 subl(zlen, 2);
8064 movq(sum, Address(z, zlen, Address::times_4, 0));
8066 // Multiply 64 bit by 64 bit and add 64 bits lower half and upper 64 bits as carry.
8067 if (UseBMI2Instructions) {
8068 multiply_add_64_bmi2(sum, op1, op2, carry, tmp2);
8069 }
8070 else {
8071 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8072 }
8074 movq(Address(z, zlen, Address::times_4, 0), sum);
8076 jmp(L_third_loop);
8077 bind(L_third_loop_exit);
8079 // Fourth loop
8080 // Add 64 bit long carry into z with carry propogation.
8081 // Uses offsetted zlen.
8082 add_one_64(z, zlen, carry, tmp1);
8084 pop(len);
8085 pop(zlen);
8086 jmp(L_second_loop);
8088 // Next infrequent code is moved outside loops.
8089 bind(L_last_x);
8090 movl(op1, Address(x, 0));
8091 jmp(L_multiply);
8093 bind(L_second_loop_exit);
8094 pop(len);
8095 pop(zlen);
8096 pop(len);
8097 pop(zlen);
8099 // Fifth loop
8100 // Shift z left 1 bit.
8101 lshift_by_1(x, len, z, zlen, tmp1, tmp2, tmp3, tmp4);
8103 // z[zlen-1] |= x[len-1] & 1;
8104 movl(tmp3, Address(x, len, Address::times_4, -4));
8105 andl(tmp3, 1);
8106 orl(Address(z, zlen, Address::times_4, -4), tmp3);
8108 pop(tmp5);
8109 pop(tmp4);
8110 pop(tmp3);
8111 pop(tmp2);
8112 pop(tmp1);
8113 }
8115 /**
8116 * Helper function for mul_add()
8117 * Multiply the in[] by int k and add to out[] starting at offset offs using
8118 * 128 bit by 32 bit multiply and return the carry in tmp5.
8119 * Only quad int aligned length of in[] is operated on in this function.
8120 * k is in rdxReg for BMI2Instructions, for others it is in tmp2.
8121 * This function preserves out, in and k registers.
8122 * len and offset point to the appropriate index in "in" & "out" correspondingly
8123 * tmp5 has the carry.
8124 * other registers are temporary and are modified.
8125 *
8126 */
8127 void MacroAssembler::mul_add_128_x_32_loop(Register out, Register in,
8128 Register offset, Register len, Register tmp1, Register tmp2, Register tmp3,
8129 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8131 Label L_first_loop, L_first_loop_exit;
8133 movl(tmp1, len);
8134 shrl(tmp1, 2);
8136 bind(L_first_loop);
8137 subl(tmp1, 1);
8138 jccb(Assembler::negative, L_first_loop_exit);
8140 subl(len, 4);
8141 subl(offset, 4);
8143 Register op2 = tmp2;
8144 const Register sum = tmp3;
8145 const Register op1 = tmp4;
8146 const Register carry = tmp5;
8148 if (UseBMI2Instructions) {
8149 op2 = rdxReg;
8150 }
8152 movq(op1, Address(in, len, Address::times_4, 8));
8153 rorq(op1, 32);
8154 movq(sum, Address(out, offset, Address::times_4, 8));
8155 rorq(sum, 32);
8156 if (UseBMI2Instructions) {
8157 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8158 }
8159 else {
8160 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8161 }
8162 // Store back in big endian from little endian
8163 rorq(sum, 0x20);
8164 movq(Address(out, offset, Address::times_4, 8), sum);
8166 movq(op1, Address(in, len, Address::times_4, 0));
8167 rorq(op1, 32);
8168 movq(sum, Address(out, offset, Address::times_4, 0));
8169 rorq(sum, 32);
8170 if (UseBMI2Instructions) {
8171 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8172 }
8173 else {
8174 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8175 }
8176 // Store back in big endian from little endian
8177 rorq(sum, 0x20);
8178 movq(Address(out, offset, Address::times_4, 0), sum);
8180 jmp(L_first_loop);
8181 bind(L_first_loop_exit);
8182 }
8184 /**
8185 * Code for BigInteger::mulAdd() intrinsic
8186 *
8187 * rdi: out
8188 * rsi: in
8189 * r11: offs (out.length - offset)
8190 * rcx: len
8191 * r8: k
8192 * r12: tmp1
8193 * r13: tmp2
8194 * r14: tmp3
8195 * r15: tmp4
8196 * rbx: tmp5
8197 * Multiply the in[] by word k and add to out[], return the carry in rax
8198 */
8199 void MacroAssembler::mul_add(Register out, Register in, Register offs,
8200 Register len, Register k, Register tmp1, Register tmp2, Register tmp3,
8201 Register tmp4, Register tmp5, Register rdxReg, Register raxReg) {
8203 Label L_carry, L_last_in, L_done;
8205 // carry = 0;
8206 // for (int j=len-1; j >= 0; j--) {
8207 // long product = (in[j] & LONG_MASK) * kLong +
8208 // (out[offs] & LONG_MASK) + carry;
8209 // out[offs--] = (int)product;
8210 // carry = product >>> 32;
8211 // }
8212 //
8213 push(tmp1);
8214 push(tmp2);
8215 push(tmp3);
8216 push(tmp4);
8217 push(tmp5);
8219 Register op2 = tmp2;
8220 const Register sum = tmp3;
8221 const Register op1 = tmp4;
8222 const Register carry = tmp5;
8224 if (UseBMI2Instructions) {
8225 op2 = rdxReg;
8226 movl(op2, k);
8227 }
8228 else {
8229 movl(op2, k);
8230 }
8232 xorq(carry, carry);
8234 //First loop
8236 //Multiply in[] by k in a 4 way unrolled loop using 128 bit by 32 bit multiply
8237 //The carry is in tmp5
8238 mul_add_128_x_32_loop(out, in, offs, len, tmp1, tmp2, tmp3, tmp4, tmp5, rdxReg, raxReg);
8240 //Multiply the trailing in[] entry using 64 bit by 32 bit, if any
8241 decrementl(len);
8242 jccb(Assembler::negative, L_carry);
8243 decrementl(len);
8244 jccb(Assembler::negative, L_last_in);
8246 movq(op1, Address(in, len, Address::times_4, 0));
8247 rorq(op1, 32);
8249 subl(offs, 2);
8250 movq(sum, Address(out, offs, Address::times_4, 0));
8251 rorq(sum, 32);
8253 if (UseBMI2Instructions) {
8254 multiply_add_64_bmi2(sum, op1, op2, carry, raxReg);
8255 }
8256 else {
8257 multiply_add_64(sum, op1, op2, carry, rdxReg, raxReg);
8258 }
8260 // Store back in big endian from little endian
8261 rorq(sum, 0x20);
8262 movq(Address(out, offs, Address::times_4, 0), sum);
8264 testl(len, len);
8265 jccb(Assembler::zero, L_carry);
8267 //Multiply the last in[] entry, if any
8268 bind(L_last_in);
8269 movl(op1, Address(in, 0));
8270 movl(sum, Address(out, offs, Address::times_4, -4));
8272 movl(raxReg, k);
8273 mull(op1); //tmp4 * eax -> edx:eax
8274 addl(sum, carry);
8275 adcl(rdxReg, 0);
8276 addl(sum, raxReg);
8277 adcl(rdxReg, 0);
8278 movl(carry, rdxReg);
8280 movl(Address(out, offs, Address::times_4, -4), sum);
8282 bind(L_carry);
8283 //return tmp5/carry as carry in rax
8284 movl(rax, carry);
8286 bind(L_done);
8287 pop(tmp5);
8288 pop(tmp4);
8289 pop(tmp3);
8290 pop(tmp2);
8291 pop(tmp1);
8292 }
8293 #endif
8295 /**
8296 * Emits code to update CRC-32 with a byte value according to constants in table
8297 *
8298 * @param [in,out]crc Register containing the crc.
8299 * @param [in]val Register containing the byte to fold into the CRC.
8300 * @param [in]table Register containing the table of crc constants.
8301 *
8302 * uint32_t crc;
8303 * val = crc_table[(val ^ crc) & 0xFF];
8304 * crc = val ^ (crc >> 8);
8305 *
8306 */
8307 void MacroAssembler::update_byte_crc32(Register crc, Register val, Register table) {
8308 xorl(val, crc);
8309 andl(val, 0xFF);
8310 shrl(crc, 8); // unsigned shift
8311 xorl(crc, Address(table, val, Address::times_4, 0));
8312 }
8314 /**
8315 * Fold 128-bit data chunk
8316 */
8317 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, Register buf, int offset) {
8318 if (UseAVX > 0) {
8319 vpclmulhdq(xtmp, xK, xcrc); // [123:64]
8320 vpclmulldq(xcrc, xK, xcrc); // [63:0]
8321 vpxor(xcrc, xcrc, Address(buf, offset), false /* vector256 */);
8322 pxor(xcrc, xtmp);
8323 } else {
8324 movdqa(xtmp, xcrc);
8325 pclmulhdq(xtmp, xK); // [123:64]
8326 pclmulldq(xcrc, xK); // [63:0]
8327 pxor(xcrc, xtmp);
8328 movdqu(xtmp, Address(buf, offset));
8329 pxor(xcrc, xtmp);
8330 }
8331 }
8333 void MacroAssembler::fold_128bit_crc32(XMMRegister xcrc, XMMRegister xK, XMMRegister xtmp, XMMRegister xbuf) {
8334 if (UseAVX > 0) {
8335 vpclmulhdq(xtmp, xK, xcrc);
8336 vpclmulldq(xcrc, xK, xcrc);
8337 pxor(xcrc, xbuf);
8338 pxor(xcrc, xtmp);
8339 } else {
8340 movdqa(xtmp, xcrc);
8341 pclmulhdq(xtmp, xK);
8342 pclmulldq(xcrc, xK);
8343 pxor(xcrc, xbuf);
8344 pxor(xcrc, xtmp);
8345 }
8346 }
8348 /**
8349 * 8-bit folds to compute 32-bit CRC
8350 *
8351 * uint64_t xcrc;
8352 * timesXtoThe32[xcrc & 0xFF] ^ (xcrc >> 8);
8353 */
8354 void MacroAssembler::fold_8bit_crc32(XMMRegister xcrc, Register table, XMMRegister xtmp, Register tmp) {
8355 movdl(tmp, xcrc);
8356 andl(tmp, 0xFF);
8357 movdl(xtmp, Address(table, tmp, Address::times_4, 0));
8358 psrldq(xcrc, 1); // unsigned shift one byte
8359 pxor(xcrc, xtmp);
8360 }
8362 /**
8363 * uint32_t crc;
8364 * timesXtoThe32[crc & 0xFF] ^ (crc >> 8);
8365 */
8366 void MacroAssembler::fold_8bit_crc32(Register crc, Register table, Register tmp) {
8367 movl(tmp, crc);
8368 andl(tmp, 0xFF);
8369 shrl(crc, 8);
8370 xorl(crc, Address(table, tmp, Address::times_4, 0));
8371 }
8373 /**
8374 * @param crc register containing existing CRC (32-bit)
8375 * @param buf register pointing to input byte buffer (byte*)
8376 * @param len register containing number of bytes
8377 * @param table register that will contain address of CRC table
8378 * @param tmp scratch register
8379 */
8380 void MacroAssembler::kernel_crc32(Register crc, Register buf, Register len, Register table, Register tmp) {
8381 assert_different_registers(crc, buf, len, table, tmp, rax);
8383 Label L_tail, L_tail_restore, L_tail_loop, L_exit, L_align_loop, L_aligned;
8384 Label L_fold_tail, L_fold_128b, L_fold_512b, L_fold_512b_loop, L_fold_tail_loop;
8386 lea(table, ExternalAddress(StubRoutines::crc_table_addr()));
8387 notl(crc); // ~crc
8388 cmpl(len, 16);
8389 jcc(Assembler::less, L_tail);
8391 // Align buffer to 16 bytes
8392 movl(tmp, buf);
8393 andl(tmp, 0xF);
8394 jccb(Assembler::zero, L_aligned);
8395 subl(tmp, 16);
8396 addl(len, tmp);
8398 align(4);
8399 BIND(L_align_loop);
8400 movsbl(rax, Address(buf, 0)); // load byte with sign extension
8401 update_byte_crc32(crc, rax, table);
8402 increment(buf);
8403 incrementl(tmp);
8404 jccb(Assembler::less, L_align_loop);
8406 BIND(L_aligned);
8407 movl(tmp, len); // save
8408 shrl(len, 4);
8409 jcc(Assembler::zero, L_tail_restore);
8411 // Fold crc into first bytes of vector
8412 movdqa(xmm1, Address(buf, 0));
8413 movdl(rax, xmm1);
8414 xorl(crc, rax);
8415 pinsrd(xmm1, crc, 0);
8416 addptr(buf, 16);
8417 subl(len, 4); // len > 0
8418 jcc(Assembler::less, L_fold_tail);
8420 movdqa(xmm2, Address(buf, 0));
8421 movdqa(xmm3, Address(buf, 16));
8422 movdqa(xmm4, Address(buf, 32));
8423 addptr(buf, 48);
8424 subl(len, 3);
8425 jcc(Assembler::lessEqual, L_fold_512b);
8427 // Fold total 512 bits of polynomial on each iteration,
8428 // 128 bits per each of 4 parallel streams.
8429 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 32));
8431 align(32);
8432 BIND(L_fold_512b_loop);
8433 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
8434 fold_128bit_crc32(xmm2, xmm0, xmm5, buf, 16);
8435 fold_128bit_crc32(xmm3, xmm0, xmm5, buf, 32);
8436 fold_128bit_crc32(xmm4, xmm0, xmm5, buf, 48);
8437 addptr(buf, 64);
8438 subl(len, 4);
8439 jcc(Assembler::greater, L_fold_512b_loop);
8441 // Fold 512 bits to 128 bits.
8442 BIND(L_fold_512b);
8443 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16));
8444 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm2);
8445 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm3);
8446 fold_128bit_crc32(xmm1, xmm0, xmm5, xmm4);
8448 // Fold the rest of 128 bits data chunks
8449 BIND(L_fold_tail);
8450 addl(len, 3);
8451 jccb(Assembler::lessEqual, L_fold_128b);
8452 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr() + 16));
8454 BIND(L_fold_tail_loop);
8455 fold_128bit_crc32(xmm1, xmm0, xmm5, buf, 0);
8456 addptr(buf, 16);
8457 decrementl(len);
8458 jccb(Assembler::greater, L_fold_tail_loop);
8460 // Fold 128 bits in xmm1 down into 32 bits in crc register.
8461 BIND(L_fold_128b);
8462 movdqu(xmm0, ExternalAddress(StubRoutines::x86::crc_by128_masks_addr()));
8463 if (UseAVX > 0) {
8464 vpclmulqdq(xmm2, xmm0, xmm1, 0x1);
8465 vpand(xmm3, xmm0, xmm2, false /* vector256 */);
8466 vpclmulqdq(xmm0, xmm0, xmm3, 0x1);
8467 } else {
8468 movdqa(xmm2, xmm0);
8469 pclmulqdq(xmm2, xmm1, 0x1);
8470 movdqa(xmm3, xmm0);
8471 pand(xmm3, xmm2);
8472 pclmulqdq(xmm0, xmm3, 0x1);
8473 }
8474 psrldq(xmm1, 8);
8475 psrldq(xmm2, 4);
8476 pxor(xmm0, xmm1);
8477 pxor(xmm0, xmm2);
8479 // 8 8-bit folds to compute 32-bit CRC.
8480 for (int j = 0; j < 4; j++) {
8481 fold_8bit_crc32(xmm0, table, xmm1, rax);
8482 }
8483 movdl(crc, xmm0); // mov 32 bits to general register
8484 for (int j = 0; j < 4; j++) {
8485 fold_8bit_crc32(crc, table, rax);
8486 }
8488 BIND(L_tail_restore);
8489 movl(len, tmp); // restore
8490 BIND(L_tail);
8491 andl(len, 0xf);
8492 jccb(Assembler::zero, L_exit);
8494 // Fold the rest of bytes
8495 align(4);
8496 BIND(L_tail_loop);
8497 movsbl(rax, Address(buf, 0)); // load byte with sign extension
8498 update_byte_crc32(crc, rax, table);
8499 increment(buf);
8500 decrementl(len);
8501 jccb(Assembler::greater, L_tail_loop);
8503 BIND(L_exit);
8504 notl(crc); // ~c
8505 }
8507 #undef BIND
8508 #undef BLOCK_COMMENT
8511 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
8512 switch (cond) {
8513 // Note some conditions are synonyms for others
8514 case Assembler::zero: return Assembler::notZero;
8515 case Assembler::notZero: return Assembler::zero;
8516 case Assembler::less: return Assembler::greaterEqual;
8517 case Assembler::lessEqual: return Assembler::greater;
8518 case Assembler::greater: return Assembler::lessEqual;
8519 case Assembler::greaterEqual: return Assembler::less;
8520 case Assembler::below: return Assembler::aboveEqual;
8521 case Assembler::belowEqual: return Assembler::above;
8522 case Assembler::above: return Assembler::belowEqual;
8523 case Assembler::aboveEqual: return Assembler::below;
8524 case Assembler::overflow: return Assembler::noOverflow;
8525 case Assembler::noOverflow: return Assembler::overflow;
8526 case Assembler::negative: return Assembler::positive;
8527 case Assembler::positive: return Assembler::negative;
8528 case Assembler::parity: return Assembler::noParity;
8529 case Assembler::noParity: return Assembler::parity;
8530 }
8531 ShouldNotReachHere(); return Assembler::overflow;
8532 }
8534 SkipIfEqual::SkipIfEqual(
8535 MacroAssembler* masm, const bool* flag_addr, bool value) {
8536 _masm = masm;
8537 _masm->cmp8(ExternalAddress((address)flag_addr), value);
8538 _masm->jcc(Assembler::equal, _label);
8539 }
8541 SkipIfEqual::~SkipIfEqual() {
8542 _masm->bind(_label);
8543 }