Mon, 20 Aug 2012 09:58:58 -0700
7190310: Inlining WeakReference.get(), and hoisting $referent may lead to non-terminating loops
Summary: In C2 add software membar after load from Reference.referent field to prevent commoning of loads across safepoint since GC can change its value. In C1 always generate Reference.get() intrinsic.
Reviewed-by: roland, twisti, dholmes, johnc
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "assembler_x86.inline.hpp"
27 #include "gc_interface/collectedHeap.inline.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "memory/cardTableModRefBS.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "prims/methodHandles.hpp"
32 #include "runtime/biasedLocking.hpp"
33 #include "runtime/interfaceSupport.hpp"
34 #include "runtime/objectMonitor.hpp"
35 #include "runtime/os.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #ifndef SERIALGC
39 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
40 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
41 #include "gc_implementation/g1/heapRegion.hpp"
42 #endif
44 #ifdef PRODUCT
45 #define BLOCK_COMMENT(str) /* nothing */
46 #define STOP(error) stop(error)
47 #else
48 #define BLOCK_COMMENT(str) block_comment(str)
49 #define STOP(error) block_comment(error); stop(error)
50 #endif
52 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
53 // Implementation of AddressLiteral
55 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
56 _is_lval = false;
57 _target = target;
58 switch (rtype) {
59 case relocInfo::oop_type:
60 // Oops are a special case. Normally they would be their own section
61 // but in cases like icBuffer they are literals in the code stream that
62 // we don't have a section for. We use none so that we get a literal address
63 // which is always patchable.
64 break;
65 case relocInfo::external_word_type:
66 _rspec = external_word_Relocation::spec(target);
67 break;
68 case relocInfo::internal_word_type:
69 _rspec = internal_word_Relocation::spec(target);
70 break;
71 case relocInfo::opt_virtual_call_type:
72 _rspec = opt_virtual_call_Relocation::spec();
73 break;
74 case relocInfo::static_call_type:
75 _rspec = static_call_Relocation::spec();
76 break;
77 case relocInfo::runtime_call_type:
78 _rspec = runtime_call_Relocation::spec();
79 break;
80 case relocInfo::poll_type:
81 case relocInfo::poll_return_type:
82 _rspec = Relocation::spec_simple(rtype);
83 break;
84 case relocInfo::none:
85 break;
86 default:
87 ShouldNotReachHere();
88 break;
89 }
90 }
92 // Implementation of Address
94 #ifdef _LP64
96 Address Address::make_array(ArrayAddress adr) {
97 // Not implementable on 64bit machines
98 // Should have been handled higher up the call chain.
99 ShouldNotReachHere();
100 return Address();
101 }
103 // exceedingly dangerous constructor
104 Address::Address(int disp, address loc, relocInfo::relocType rtype) {
105 _base = noreg;
106 _index = noreg;
107 _scale = no_scale;
108 _disp = disp;
109 switch (rtype) {
110 case relocInfo::external_word_type:
111 _rspec = external_word_Relocation::spec(loc);
112 break;
113 case relocInfo::internal_word_type:
114 _rspec = internal_word_Relocation::spec(loc);
115 break;
116 case relocInfo::runtime_call_type:
117 // HMM
118 _rspec = runtime_call_Relocation::spec();
119 break;
120 case relocInfo::poll_type:
121 case relocInfo::poll_return_type:
122 _rspec = Relocation::spec_simple(rtype);
123 break;
124 case relocInfo::none:
125 break;
126 default:
127 ShouldNotReachHere();
128 }
129 }
130 #else // LP64
132 Address Address::make_array(ArrayAddress adr) {
133 AddressLiteral base = adr.base();
134 Address index = adr.index();
135 assert(index._disp == 0, "must not have disp"); // maybe it can?
136 Address array(index._base, index._index, index._scale, (intptr_t) base.target());
137 array._rspec = base._rspec;
138 return array;
139 }
141 // exceedingly dangerous constructor
142 Address::Address(address loc, RelocationHolder spec) {
143 _base = noreg;
144 _index = noreg;
145 _scale = no_scale;
146 _disp = (intptr_t) loc;
147 _rspec = spec;
148 }
150 #endif // _LP64
154 // Convert the raw encoding form into the form expected by the constructor for
155 // Address. An index of 4 (rsp) corresponds to having no index, so convert
156 // that to noreg for the Address constructor.
157 Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) {
158 RelocationHolder rspec;
159 if (disp_is_oop) {
160 rspec = Relocation::spec_simple(relocInfo::oop_type);
161 }
162 bool valid_index = index != rsp->encoding();
163 if (valid_index) {
164 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
165 madr._rspec = rspec;
166 return madr;
167 } else {
168 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
169 madr._rspec = rspec;
170 return madr;
171 }
172 }
174 // Implementation of Assembler
176 int AbstractAssembler::code_fill_byte() {
177 return (u_char)'\xF4'; // hlt
178 }
180 // make this go away someday
181 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
182 if (rtype == relocInfo::none)
183 emit_long(data);
184 else emit_data(data, Relocation::spec_simple(rtype), format);
185 }
187 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
188 assert(imm_operand == 0, "default format must be immediate in this file");
189 assert(inst_mark() != NULL, "must be inside InstructionMark");
190 if (rspec.type() != relocInfo::none) {
191 #ifdef ASSERT
192 check_relocation(rspec, format);
193 #endif
194 // Do not use AbstractAssembler::relocate, which is not intended for
195 // embedded words. Instead, relocate to the enclosing instruction.
197 // hack. call32 is too wide for mask so use disp32
198 if (format == call32_operand)
199 code_section()->relocate(inst_mark(), rspec, disp32_operand);
200 else
201 code_section()->relocate(inst_mark(), rspec, format);
202 }
203 emit_long(data);
204 }
206 static int encode(Register r) {
207 int enc = r->encoding();
208 if (enc >= 8) {
209 enc -= 8;
210 }
211 return enc;
212 }
214 static int encode(XMMRegister r) {
215 int enc = r->encoding();
216 if (enc >= 8) {
217 enc -= 8;
218 }
219 return enc;
220 }
222 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
223 assert(dst->has_byte_register(), "must have byte register");
224 assert(isByte(op1) && isByte(op2), "wrong opcode");
225 assert(isByte(imm8), "not a byte");
226 assert((op1 & 0x01) == 0, "should be 8bit operation");
227 emit_byte(op1);
228 emit_byte(op2 | encode(dst));
229 emit_byte(imm8);
230 }
233 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
234 assert(isByte(op1) && isByte(op2), "wrong opcode");
235 assert((op1 & 0x01) == 1, "should be 32bit operation");
236 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
237 if (is8bit(imm32)) {
238 emit_byte(op1 | 0x02); // set sign bit
239 emit_byte(op2 | encode(dst));
240 emit_byte(imm32 & 0xFF);
241 } else {
242 emit_byte(op1);
243 emit_byte(op2 | encode(dst));
244 emit_long(imm32);
245 }
246 }
248 // Force generation of a 4 byte immediate value even if it fits into 8bit
249 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) {
250 assert(isByte(op1) && isByte(op2), "wrong opcode");
251 assert((op1 & 0x01) == 1, "should be 32bit operation");
252 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
253 emit_byte(op1);
254 emit_byte(op2 | encode(dst));
255 emit_long(imm32);
256 }
258 // immediate-to-memory forms
259 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
260 assert((op1 & 0x01) == 1, "should be 32bit operation");
261 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
262 if (is8bit(imm32)) {
263 emit_byte(op1 | 0x02); // set sign bit
264 emit_operand(rm, adr, 1);
265 emit_byte(imm32 & 0xFF);
266 } else {
267 emit_byte(op1);
268 emit_operand(rm, adr, 4);
269 emit_long(imm32);
270 }
271 }
273 void Assembler::emit_arith(int op1, int op2, Register dst, jobject obj) {
274 LP64_ONLY(ShouldNotReachHere());
275 assert(isByte(op1) && isByte(op2), "wrong opcode");
276 assert((op1 & 0x01) == 1, "should be 32bit operation");
277 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
278 InstructionMark im(this);
279 emit_byte(op1);
280 emit_byte(op2 | encode(dst));
281 emit_data((intptr_t)obj, relocInfo::oop_type, 0);
282 }
285 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
286 assert(isByte(op1) && isByte(op2), "wrong opcode");
287 emit_byte(op1);
288 emit_byte(op2 | encode(dst) << 3 | encode(src));
289 }
292 void Assembler::emit_operand(Register reg, Register base, Register index,
293 Address::ScaleFactor scale, int disp,
294 RelocationHolder const& rspec,
295 int rip_relative_correction) {
296 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
298 // Encode the registers as needed in the fields they are used in
300 int regenc = encode(reg) << 3;
301 int indexenc = index->is_valid() ? encode(index) << 3 : 0;
302 int baseenc = base->is_valid() ? encode(base) : 0;
304 if (base->is_valid()) {
305 if (index->is_valid()) {
306 assert(scale != Address::no_scale, "inconsistent address");
307 // [base + index*scale + disp]
308 if (disp == 0 && rtype == relocInfo::none &&
309 base != rbp LP64_ONLY(&& base != r13)) {
310 // [base + index*scale]
311 // [00 reg 100][ss index base]
312 assert(index != rsp, "illegal addressing mode");
313 emit_byte(0x04 | regenc);
314 emit_byte(scale << 6 | indexenc | baseenc);
315 } else if (is8bit(disp) && rtype == relocInfo::none) {
316 // [base + index*scale + imm8]
317 // [01 reg 100][ss index base] imm8
318 assert(index != rsp, "illegal addressing mode");
319 emit_byte(0x44 | regenc);
320 emit_byte(scale << 6 | indexenc | baseenc);
321 emit_byte(disp & 0xFF);
322 } else {
323 // [base + index*scale + disp32]
324 // [10 reg 100][ss index base] disp32
325 assert(index != rsp, "illegal addressing mode");
326 emit_byte(0x84 | regenc);
327 emit_byte(scale << 6 | indexenc | baseenc);
328 emit_data(disp, rspec, disp32_operand);
329 }
330 } else if (base == rsp LP64_ONLY(|| base == r12)) {
331 // [rsp + disp]
332 if (disp == 0 && rtype == relocInfo::none) {
333 // [rsp]
334 // [00 reg 100][00 100 100]
335 emit_byte(0x04 | regenc);
336 emit_byte(0x24);
337 } else if (is8bit(disp) && rtype == relocInfo::none) {
338 // [rsp + imm8]
339 // [01 reg 100][00 100 100] disp8
340 emit_byte(0x44 | regenc);
341 emit_byte(0x24);
342 emit_byte(disp & 0xFF);
343 } else {
344 // [rsp + imm32]
345 // [10 reg 100][00 100 100] disp32
346 emit_byte(0x84 | regenc);
347 emit_byte(0x24);
348 emit_data(disp, rspec, disp32_operand);
349 }
350 } else {
351 // [base + disp]
352 assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode");
353 if (disp == 0 && rtype == relocInfo::none &&
354 base != rbp LP64_ONLY(&& base != r13)) {
355 // [base]
356 // [00 reg base]
357 emit_byte(0x00 | regenc | baseenc);
358 } else if (is8bit(disp) && rtype == relocInfo::none) {
359 // [base + disp8]
360 // [01 reg base] disp8
361 emit_byte(0x40 | regenc | baseenc);
362 emit_byte(disp & 0xFF);
363 } else {
364 // [base + disp32]
365 // [10 reg base] disp32
366 emit_byte(0x80 | regenc | baseenc);
367 emit_data(disp, rspec, disp32_operand);
368 }
369 }
370 } else {
371 if (index->is_valid()) {
372 assert(scale != Address::no_scale, "inconsistent address");
373 // [index*scale + disp]
374 // [00 reg 100][ss index 101] disp32
375 assert(index != rsp, "illegal addressing mode");
376 emit_byte(0x04 | regenc);
377 emit_byte(scale << 6 | indexenc | 0x05);
378 emit_data(disp, rspec, disp32_operand);
379 } else if (rtype != relocInfo::none ) {
380 // [disp] (64bit) RIP-RELATIVE (32bit) abs
381 // [00 000 101] disp32
383 emit_byte(0x05 | regenc);
384 // Note that the RIP-rel. correction applies to the generated
385 // disp field, but _not_ to the target address in the rspec.
387 // disp was created by converting the target address minus the pc
388 // at the start of the instruction. That needs more correction here.
389 // intptr_t disp = target - next_ip;
390 assert(inst_mark() != NULL, "must be inside InstructionMark");
391 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
392 int64_t adjusted = disp;
393 // Do rip-rel adjustment for 64bit
394 LP64_ONLY(adjusted -= (next_ip - inst_mark()));
395 assert(is_simm32(adjusted),
396 "must be 32bit offset (RIP relative address)");
397 emit_data((int32_t) adjusted, rspec, disp32_operand);
399 } else {
400 // 32bit never did this, did everything as the rip-rel/disp code above
401 // [disp] ABSOLUTE
402 // [00 reg 100][00 100 101] disp32
403 emit_byte(0x04 | regenc);
404 emit_byte(0x25);
405 emit_data(disp, rspec, disp32_operand);
406 }
407 }
408 }
410 void Assembler::emit_operand(XMMRegister reg, Register base, Register index,
411 Address::ScaleFactor scale, int disp,
412 RelocationHolder const& rspec) {
413 emit_operand((Register)reg, base, index, scale, disp, rspec);
414 }
416 // Secret local extension to Assembler::WhichOperand:
417 #define end_pc_operand (_WhichOperand_limit)
419 address Assembler::locate_operand(address inst, WhichOperand which) {
420 // Decode the given instruction, and return the address of
421 // an embedded 32-bit operand word.
423 // If "which" is disp32_operand, selects the displacement portion
424 // of an effective address specifier.
425 // If "which" is imm64_operand, selects the trailing immediate constant.
426 // If "which" is call32_operand, selects the displacement of a call or jump.
427 // Caller is responsible for ensuring that there is such an operand,
428 // and that it is 32/64 bits wide.
430 // If "which" is end_pc_operand, find the end of the instruction.
432 address ip = inst;
433 bool is_64bit = false;
435 debug_only(bool has_disp32 = false);
436 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
438 again_after_prefix:
439 switch (0xFF & *ip++) {
441 // These convenience macros generate groups of "case" labels for the switch.
442 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
443 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
444 case (x)+4: case (x)+5: case (x)+6: case (x)+7
445 #define REP16(x) REP8((x)+0): \
446 case REP8((x)+8)
448 case CS_segment:
449 case SS_segment:
450 case DS_segment:
451 case ES_segment:
452 case FS_segment:
453 case GS_segment:
454 // Seems dubious
455 LP64_ONLY(assert(false, "shouldn't have that prefix"));
456 assert(ip == inst+1, "only one prefix allowed");
457 goto again_after_prefix;
459 case 0x67:
460 case REX:
461 case REX_B:
462 case REX_X:
463 case REX_XB:
464 case REX_R:
465 case REX_RB:
466 case REX_RX:
467 case REX_RXB:
468 NOT_LP64(assert(false, "64bit prefixes"));
469 goto again_after_prefix;
471 case REX_W:
472 case REX_WB:
473 case REX_WX:
474 case REX_WXB:
475 case REX_WR:
476 case REX_WRB:
477 case REX_WRX:
478 case REX_WRXB:
479 NOT_LP64(assert(false, "64bit prefixes"));
480 is_64bit = true;
481 goto again_after_prefix;
483 case 0xFF: // pushq a; decl a; incl a; call a; jmp a
484 case 0x88: // movb a, r
485 case 0x89: // movl a, r
486 case 0x8A: // movb r, a
487 case 0x8B: // movl r, a
488 case 0x8F: // popl a
489 debug_only(has_disp32 = true);
490 break;
492 case 0x68: // pushq #32
493 if (which == end_pc_operand) {
494 return ip + 4;
495 }
496 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate");
497 return ip; // not produced by emit_operand
499 case 0x66: // movw ... (size prefix)
500 again_after_size_prefix2:
501 switch (0xFF & *ip++) {
502 case REX:
503 case REX_B:
504 case REX_X:
505 case REX_XB:
506 case REX_R:
507 case REX_RB:
508 case REX_RX:
509 case REX_RXB:
510 case REX_W:
511 case REX_WB:
512 case REX_WX:
513 case REX_WXB:
514 case REX_WR:
515 case REX_WRB:
516 case REX_WRX:
517 case REX_WRXB:
518 NOT_LP64(assert(false, "64bit prefix found"));
519 goto again_after_size_prefix2;
520 case 0x8B: // movw r, a
521 case 0x89: // movw a, r
522 debug_only(has_disp32 = true);
523 break;
524 case 0xC7: // movw a, #16
525 debug_only(has_disp32 = true);
526 tail_size = 2; // the imm16
527 break;
528 case 0x0F: // several SSE/SSE2 variants
529 ip--; // reparse the 0x0F
530 goto again_after_prefix;
531 default:
532 ShouldNotReachHere();
533 }
534 break;
536 case REP8(0xB8): // movl/q r, #32/#64(oop?)
537 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4);
538 // these asserts are somewhat nonsensical
539 #ifndef _LP64
540 assert(which == imm_operand || which == disp32_operand,
541 err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, ip));
542 #else
543 assert((which == call32_operand || which == imm_operand) && is_64bit ||
544 which == narrow_oop_operand && !is_64bit,
545 err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, ip));
546 #endif // _LP64
547 return ip;
549 case 0x69: // imul r, a, #32
550 case 0xC7: // movl a, #32(oop?)
551 tail_size = 4;
552 debug_only(has_disp32 = true); // has both kinds of operands!
553 break;
555 case 0x0F: // movx..., etc.
556 switch (0xFF & *ip++) {
557 case 0x3A: // pcmpestri
558 tail_size = 1;
559 case 0x38: // ptest, pmovzxbw
560 ip++; // skip opcode
561 debug_only(has_disp32 = true); // has both kinds of operands!
562 break;
564 case 0x70: // pshufd r, r/a, #8
565 debug_only(has_disp32 = true); // has both kinds of operands!
566 case 0x73: // psrldq r, #8
567 tail_size = 1;
568 break;
570 case 0x12: // movlps
571 case 0x28: // movaps
572 case 0x2E: // ucomiss
573 case 0x2F: // comiss
574 case 0x54: // andps
575 case 0x55: // andnps
576 case 0x56: // orps
577 case 0x57: // xorps
578 case 0x6E: // movd
579 case 0x7E: // movd
580 case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
581 debug_only(has_disp32 = true);
582 break;
584 case 0xAD: // shrd r, a, %cl
585 case 0xAF: // imul r, a
586 case 0xBE: // movsbl r, a (movsxb)
587 case 0xBF: // movswl r, a (movsxw)
588 case 0xB6: // movzbl r, a (movzxb)
589 case 0xB7: // movzwl r, a (movzxw)
590 case REP16(0x40): // cmovl cc, r, a
591 case 0xB0: // cmpxchgb
592 case 0xB1: // cmpxchg
593 case 0xC1: // xaddl
594 case 0xC7: // cmpxchg8
595 case REP16(0x90): // setcc a
596 debug_only(has_disp32 = true);
597 // fall out of the switch to decode the address
598 break;
600 case 0xC4: // pinsrw r, a, #8
601 debug_only(has_disp32 = true);
602 case 0xC5: // pextrw r, r, #8
603 tail_size = 1; // the imm8
604 break;
606 case 0xAC: // shrd r, a, #8
607 debug_only(has_disp32 = true);
608 tail_size = 1; // the imm8
609 break;
611 case REP16(0x80): // jcc rdisp32
612 if (which == end_pc_operand) return ip + 4;
613 assert(which == call32_operand, "jcc has no disp32 or imm");
614 return ip;
615 default:
616 ShouldNotReachHere();
617 }
618 break;
620 case 0x81: // addl a, #32; addl r, #32
621 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
622 // on 32bit in the case of cmpl, the imm might be an oop
623 tail_size = 4;
624 debug_only(has_disp32 = true); // has both kinds of operands!
625 break;
627 case 0x83: // addl a, #8; addl r, #8
628 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
629 debug_only(has_disp32 = true); // has both kinds of operands!
630 tail_size = 1;
631 break;
633 case 0x9B:
634 switch (0xFF & *ip++) {
635 case 0xD9: // fnstcw a
636 debug_only(has_disp32 = true);
637 break;
638 default:
639 ShouldNotReachHere();
640 }
641 break;
643 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
644 case REP4(0x10): // adc...
645 case REP4(0x20): // and...
646 case REP4(0x30): // xor...
647 case REP4(0x08): // or...
648 case REP4(0x18): // sbb...
649 case REP4(0x28): // sub...
650 case 0xF7: // mull a
651 case 0x8D: // lea r, a
652 case 0x87: // xchg r, a
653 case REP4(0x38): // cmp...
654 case 0x85: // test r, a
655 debug_only(has_disp32 = true); // has both kinds of operands!
656 break;
658 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
659 case 0xC6: // movb a, #8
660 case 0x80: // cmpb a, #8
661 case 0x6B: // imul r, a, #8
662 debug_only(has_disp32 = true); // has both kinds of operands!
663 tail_size = 1; // the imm8
664 break;
666 case 0xC4: // VEX_3bytes
667 case 0xC5: // VEX_2bytes
668 assert((UseAVX > 0), "shouldn't have VEX prefix");
669 assert(ip == inst+1, "no prefixes allowed");
670 // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions
671 // but they have prefix 0x0F and processed when 0x0F processed above.
672 //
673 // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES
674 // instructions (these instructions are not supported in 64-bit mode).
675 // To distinguish them bits [7:6] are set in the VEX second byte since
676 // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set
677 // those VEX bits REX and vvvv bits are inverted.
678 //
679 // Fortunately C2 doesn't generate these instructions so we don't need
680 // to check for them in product version.
682 // Check second byte
683 NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions"));
685 // First byte
686 if ((0xFF & *inst) == VEX_3bytes) {
687 ip++; // third byte
688 is_64bit = ((VEX_W & *ip) == VEX_W);
689 }
690 ip++; // opcode
691 // To find the end of instruction (which == end_pc_operand).
692 switch (0xFF & *ip) {
693 case 0x61: // pcmpestri r, r/a, #8
694 case 0x70: // pshufd r, r/a, #8
695 case 0x73: // psrldq r, #8
696 tail_size = 1; // the imm8
697 break;
698 default:
699 break;
700 }
701 ip++; // skip opcode
702 debug_only(has_disp32 = true); // has both kinds of operands!
703 break;
705 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
706 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
707 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
708 case 0xDD: // fld_d a; fst_d a; fstp_d a
709 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
710 case 0xDF: // fild_d a; fistp_d a
711 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
712 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
713 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
714 debug_only(has_disp32 = true);
715 break;
717 case 0xE8: // call rdisp32
718 case 0xE9: // jmp rdisp32
719 if (which == end_pc_operand) return ip + 4;
720 assert(which == call32_operand, "call has no disp32 or imm");
721 return ip;
723 case 0xF0: // Lock
724 assert(os::is_MP(), "only on MP");
725 goto again_after_prefix;
727 case 0xF3: // For SSE
728 case 0xF2: // For SSE2
729 switch (0xFF & *ip++) {
730 case REX:
731 case REX_B:
732 case REX_X:
733 case REX_XB:
734 case REX_R:
735 case REX_RB:
736 case REX_RX:
737 case REX_RXB:
738 case REX_W:
739 case REX_WB:
740 case REX_WX:
741 case REX_WXB:
742 case REX_WR:
743 case REX_WRB:
744 case REX_WRX:
745 case REX_WRXB:
746 NOT_LP64(assert(false, "found 64bit prefix"));
747 ip++;
748 default:
749 ip++;
750 }
751 debug_only(has_disp32 = true); // has both kinds of operands!
752 break;
754 default:
755 ShouldNotReachHere();
757 #undef REP8
758 #undef REP16
759 }
761 assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
762 #ifdef _LP64
763 assert(which != imm_operand, "instruction is not a movq reg, imm64");
764 #else
765 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
766 assert(which != imm_operand || has_disp32, "instruction has no imm32 field");
767 #endif // LP64
768 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");
770 // parse the output of emit_operand
771 int op2 = 0xFF & *ip++;
772 int base = op2 & 0x07;
773 int op3 = -1;
774 const int b100 = 4;
775 const int b101 = 5;
776 if (base == b100 && (op2 >> 6) != 3) {
777 op3 = 0xFF & *ip++;
778 base = op3 & 0x07; // refetch the base
779 }
780 // now ip points at the disp (if any)
782 switch (op2 >> 6) {
783 case 0:
784 // [00 reg 100][ss index base]
785 // [00 reg 100][00 100 esp]
786 // [00 reg base]
787 // [00 reg 100][ss index 101][disp32]
788 // [00 reg 101] [disp32]
790 if (base == b101) {
791 if (which == disp32_operand)
792 return ip; // caller wants the disp32
793 ip += 4; // skip the disp32
794 }
795 break;
797 case 1:
798 // [01 reg 100][ss index base][disp8]
799 // [01 reg 100][00 100 esp][disp8]
800 // [01 reg base] [disp8]
801 ip += 1; // skip the disp8
802 break;
804 case 2:
805 // [10 reg 100][ss index base][disp32]
806 // [10 reg 100][00 100 esp][disp32]
807 // [10 reg base] [disp32]
808 if (which == disp32_operand)
809 return ip; // caller wants the disp32
810 ip += 4; // skip the disp32
811 break;
813 case 3:
814 // [11 reg base] (not a memory addressing mode)
815 break;
816 }
818 if (which == end_pc_operand) {
819 return ip + tail_size;
820 }
822 #ifdef _LP64
823 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32");
824 #else
825 assert(which == imm_operand, "instruction has only an imm field");
826 #endif // LP64
827 return ip;
828 }
830 address Assembler::locate_next_instruction(address inst) {
831 // Secretly share code with locate_operand:
832 return locate_operand(inst, end_pc_operand);
833 }
836 #ifdef ASSERT
837 void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
838 address inst = inst_mark();
839 assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
840 address opnd;
842 Relocation* r = rspec.reloc();
843 if (r->type() == relocInfo::none) {
844 return;
845 } else if (r->is_call() || format == call32_operand) {
846 // assert(format == imm32_operand, "cannot specify a nonzero format");
847 opnd = locate_operand(inst, call32_operand);
848 } else if (r->is_data()) {
849 assert(format == imm_operand || format == disp32_operand
850 LP64_ONLY(|| format == narrow_oop_operand), "format ok");
851 opnd = locate_operand(inst, (WhichOperand)format);
852 } else {
853 assert(format == imm_operand, "cannot specify a format");
854 return;
855 }
856 assert(opnd == pc(), "must put operand where relocs can find it");
857 }
858 #endif // ASSERT
860 void Assembler::emit_operand32(Register reg, Address adr) {
861 assert(reg->encoding() < 8, "no extended registers");
862 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
863 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
864 adr._rspec);
865 }
867 void Assembler::emit_operand(Register reg, Address adr,
868 int rip_relative_correction) {
869 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
870 adr._rspec,
871 rip_relative_correction);
872 }
874 void Assembler::emit_operand(XMMRegister reg, Address adr) {
875 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
876 adr._rspec);
877 }
879 // MMX operations
880 void Assembler::emit_operand(MMXRegister reg, Address adr) {
881 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
882 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
883 }
885 // work around gcc (3.2.1-7a) bug
886 void Assembler::emit_operand(Address adr, MMXRegister reg) {
887 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
888 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
889 }
892 void Assembler::emit_farith(int b1, int b2, int i) {
893 assert(isByte(b1) && isByte(b2), "wrong opcode");
894 assert(0 <= i && i < 8, "illegal stack offset");
895 emit_byte(b1);
896 emit_byte(b2 + i);
897 }
900 // Now the Assembler instructions (identical for 32/64 bits)
902 void Assembler::adcl(Address dst, int32_t imm32) {
903 InstructionMark im(this);
904 prefix(dst);
905 emit_arith_operand(0x81, rdx, dst, imm32);
906 }
908 void Assembler::adcl(Address dst, Register src) {
909 InstructionMark im(this);
910 prefix(dst, src);
911 emit_byte(0x11);
912 emit_operand(src, dst);
913 }
915 void Assembler::adcl(Register dst, int32_t imm32) {
916 prefix(dst);
917 emit_arith(0x81, 0xD0, dst, imm32);
918 }
920 void Assembler::adcl(Register dst, Address src) {
921 InstructionMark im(this);
922 prefix(src, dst);
923 emit_byte(0x13);
924 emit_operand(dst, src);
925 }
927 void Assembler::adcl(Register dst, Register src) {
928 (void) prefix_and_encode(dst->encoding(), src->encoding());
929 emit_arith(0x13, 0xC0, dst, src);
930 }
932 void Assembler::addl(Address dst, int32_t imm32) {
933 InstructionMark im(this);
934 prefix(dst);
935 emit_arith_operand(0x81, rax, dst, imm32);
936 }
938 void Assembler::addl(Address dst, Register src) {
939 InstructionMark im(this);
940 prefix(dst, src);
941 emit_byte(0x01);
942 emit_operand(src, dst);
943 }
945 void Assembler::addl(Register dst, int32_t imm32) {
946 prefix(dst);
947 emit_arith(0x81, 0xC0, dst, imm32);
948 }
950 void Assembler::addl(Register dst, Address src) {
951 InstructionMark im(this);
952 prefix(src, dst);
953 emit_byte(0x03);
954 emit_operand(dst, src);
955 }
957 void Assembler::addl(Register dst, Register src) {
958 (void) prefix_and_encode(dst->encoding(), src->encoding());
959 emit_arith(0x03, 0xC0, dst, src);
960 }
962 void Assembler::addr_nop_4() {
963 assert(UseAddressNop, "no CPU support");
964 // 4 bytes: NOP DWORD PTR [EAX+0]
965 emit_byte(0x0F);
966 emit_byte(0x1F);
967 emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
968 emit_byte(0); // 8-bits offset (1 byte)
969 }
971 void Assembler::addr_nop_5() {
972 assert(UseAddressNop, "no CPU support");
973 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
974 emit_byte(0x0F);
975 emit_byte(0x1F);
976 emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
977 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
978 emit_byte(0); // 8-bits offset (1 byte)
979 }
981 void Assembler::addr_nop_7() {
982 assert(UseAddressNop, "no CPU support");
983 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
984 emit_byte(0x0F);
985 emit_byte(0x1F);
986 emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
987 emit_long(0); // 32-bits offset (4 bytes)
988 }
990 void Assembler::addr_nop_8() {
991 assert(UseAddressNop, "no CPU support");
992 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
993 emit_byte(0x0F);
994 emit_byte(0x1F);
995 emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
996 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
997 emit_long(0); // 32-bits offset (4 bytes)
998 }
1000 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
1001 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1002 emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
1003 }
1005 void Assembler::addsd(XMMRegister dst, Address src) {
1006 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1007 emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
1008 }
1010 void Assembler::addss(XMMRegister dst, XMMRegister src) {
1011 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1012 emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
1013 }
1015 void Assembler::addss(XMMRegister dst, Address src) {
1016 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1017 emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
1018 }
1020 void Assembler::andl(Address dst, int32_t imm32) {
1021 InstructionMark im(this);
1022 prefix(dst);
1023 emit_byte(0x81);
1024 emit_operand(rsp, dst, 4);
1025 emit_long(imm32);
1026 }
1028 void Assembler::andl(Register dst, int32_t imm32) {
1029 prefix(dst);
1030 emit_arith(0x81, 0xE0, dst, imm32);
1031 }
1033 void Assembler::andl(Register dst, Address src) {
1034 InstructionMark im(this);
1035 prefix(src, dst);
1036 emit_byte(0x23);
1037 emit_operand(dst, src);
1038 }
1040 void Assembler::andl(Register dst, Register src) {
1041 (void) prefix_and_encode(dst->encoding(), src->encoding());
1042 emit_arith(0x23, 0xC0, dst, src);
1043 }
1045 void Assembler::bsfl(Register dst, Register src) {
1046 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1047 emit_byte(0x0F);
1048 emit_byte(0xBC);
1049 emit_byte(0xC0 | encode);
1050 }
1052 void Assembler::bsrl(Register dst, Register src) {
1053 assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
1054 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1055 emit_byte(0x0F);
1056 emit_byte(0xBD);
1057 emit_byte(0xC0 | encode);
1058 }
1060 void Assembler::bswapl(Register reg) { // bswap
1061 int encode = prefix_and_encode(reg->encoding());
1062 emit_byte(0x0F);
1063 emit_byte(0xC8 | encode);
1064 }
1066 void Assembler::call(Label& L, relocInfo::relocType rtype) {
1067 // suspect disp32 is always good
1068 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
1070 if (L.is_bound()) {
1071 const int long_size = 5;
1072 int offs = (int)( target(L) - pc() );
1073 assert(offs <= 0, "assembler error");
1074 InstructionMark im(this);
1075 // 1110 1000 #32-bit disp
1076 emit_byte(0xE8);
1077 emit_data(offs - long_size, rtype, operand);
1078 } else {
1079 InstructionMark im(this);
1080 // 1110 1000 #32-bit disp
1081 L.add_patch_at(code(), locator());
1083 emit_byte(0xE8);
1084 emit_data(int(0), rtype, operand);
1085 }
1086 }
1088 void Assembler::call(Register dst) {
1089 int encode = prefix_and_encode(dst->encoding());
1090 emit_byte(0xFF);
1091 emit_byte(0xD0 | encode);
1092 }
1095 void Assembler::call(Address adr) {
1096 InstructionMark im(this);
1097 prefix(adr);
1098 emit_byte(0xFF);
1099 emit_operand(rdx, adr);
1100 }
1102 void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
1103 assert(entry != NULL, "call most probably wrong");
1104 InstructionMark im(this);
1105 emit_byte(0xE8);
1106 intptr_t disp = entry - (_code_pos + sizeof(int32_t));
1107 assert(is_simm32(disp), "must be 32bit offset (call2)");
1108 // Technically, should use call32_operand, but this format is
1109 // implied by the fact that we're emitting a call instruction.
1111 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
1112 emit_data((int) disp, rspec, operand);
1113 }
1115 void Assembler::cdql() {
1116 emit_byte(0x99);
1117 }
1119 void Assembler::cmovl(Condition cc, Register dst, Register src) {
1120 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
1121 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1122 emit_byte(0x0F);
1123 emit_byte(0x40 | cc);
1124 emit_byte(0xC0 | encode);
1125 }
1128 void Assembler::cmovl(Condition cc, Register dst, Address src) {
1129 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
1130 prefix(src, dst);
1131 emit_byte(0x0F);
1132 emit_byte(0x40 | cc);
1133 emit_operand(dst, src);
1134 }
1136 void Assembler::cmpb(Address dst, int imm8) {
1137 InstructionMark im(this);
1138 prefix(dst);
1139 emit_byte(0x80);
1140 emit_operand(rdi, dst, 1);
1141 emit_byte(imm8);
1142 }
1144 void Assembler::cmpl(Address dst, int32_t imm32) {
1145 InstructionMark im(this);
1146 prefix(dst);
1147 emit_byte(0x81);
1148 emit_operand(rdi, dst, 4);
1149 emit_long(imm32);
1150 }
1152 void Assembler::cmpl(Register dst, int32_t imm32) {
1153 prefix(dst);
1154 emit_arith(0x81, 0xF8, dst, imm32);
1155 }
1157 void Assembler::cmpl(Register dst, Register src) {
1158 (void) prefix_and_encode(dst->encoding(), src->encoding());
1159 emit_arith(0x3B, 0xC0, dst, src);
1160 }
1163 void Assembler::cmpl(Register dst, Address src) {
1164 InstructionMark im(this);
1165 prefix(src, dst);
1166 emit_byte(0x3B);
1167 emit_operand(dst, src);
1168 }
1170 void Assembler::cmpw(Address dst, int imm16) {
1171 InstructionMark im(this);
1172 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers");
1173 emit_byte(0x66);
1174 emit_byte(0x81);
1175 emit_operand(rdi, dst, 2);
1176 emit_word(imm16);
1177 }
1179 // The 32-bit cmpxchg compares the value at adr with the contents of rax,
1180 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
1181 // The ZF is set if the compared values were equal, and cleared otherwise.
1182 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
1183 if (Atomics & 2) {
1184 // caveat: no instructionmark, so this isn't relocatable.
1185 // Emit a synthetic, non-atomic, CAS equivalent.
1186 // Beware. The synthetic form sets all ICCs, not just ZF.
1187 // cmpxchg r,[m] is equivalent to rax, = CAS (m, rax, r)
1188 cmpl(rax, adr);
1189 movl(rax, adr);
1190 if (reg != rax) {
1191 Label L ;
1192 jcc(Assembler::notEqual, L);
1193 movl(adr, reg);
1194 bind(L);
1195 }
1196 } else {
1197 InstructionMark im(this);
1198 prefix(adr, reg);
1199 emit_byte(0x0F);
1200 emit_byte(0xB1);
1201 emit_operand(reg, adr);
1202 }
1203 }
1205 void Assembler::comisd(XMMRegister dst, Address src) {
1206 // NOTE: dbx seems to decode this as comiss even though the
1207 // 0x66 is there. Strangly ucomisd comes out correct
1208 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1209 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
1210 }
1212 void Assembler::comisd(XMMRegister dst, XMMRegister src) {
1213 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1214 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
1215 }
1217 void Assembler::comiss(XMMRegister dst, Address src) {
1218 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1219 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
1220 }
1222 void Assembler::comiss(XMMRegister dst, XMMRegister src) {
1223 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1224 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
1225 }
1227 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
1228 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1229 emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3);
1230 }
1232 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
1233 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1234 emit_simd_arith_nonds(0x5B, dst, src, VEX_SIMD_NONE);
1235 }
1237 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
1238 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1239 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
1240 }
1242 void Assembler::cvtsd2ss(XMMRegister dst, Address src) {
1243 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1244 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
1245 }
1247 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
1248 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1249 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2);
1250 emit_byte(0x2A);
1251 emit_byte(0xC0 | encode);
1252 }
1254 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) {
1255 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1256 emit_simd_arith(0x2A, dst, src, VEX_SIMD_F2);
1257 }
1259 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
1260 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1261 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3);
1262 emit_byte(0x2A);
1263 emit_byte(0xC0 | encode);
1264 }
1266 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) {
1267 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1268 emit_simd_arith(0x2A, dst, src, VEX_SIMD_F3);
1269 }
1271 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
1272 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1273 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
1274 }
1276 void Assembler::cvtss2sd(XMMRegister dst, Address src) {
1277 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1278 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
1279 }
1282 void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
1283 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1284 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F2);
1285 emit_byte(0x2C);
1286 emit_byte(0xC0 | encode);
1287 }
1289 void Assembler::cvttss2sil(Register dst, XMMRegister src) {
1290 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1291 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F3);
1292 emit_byte(0x2C);
1293 emit_byte(0xC0 | encode);
1294 }
1296 void Assembler::decl(Address dst) {
1297 // Don't use it directly. Use MacroAssembler::decrement() instead.
1298 InstructionMark im(this);
1299 prefix(dst);
1300 emit_byte(0xFF);
1301 emit_operand(rcx, dst);
1302 }
1304 void Assembler::divsd(XMMRegister dst, Address src) {
1305 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1306 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
1307 }
1309 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
1310 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1311 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
1312 }
1314 void Assembler::divss(XMMRegister dst, Address src) {
1315 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1316 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
1317 }
1319 void Assembler::divss(XMMRegister dst, XMMRegister src) {
1320 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1321 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
1322 }
1324 void Assembler::emms() {
1325 NOT_LP64(assert(VM_Version::supports_mmx(), ""));
1326 emit_byte(0x0F);
1327 emit_byte(0x77);
1328 }
1330 void Assembler::hlt() {
1331 emit_byte(0xF4);
1332 }
1334 void Assembler::idivl(Register src) {
1335 int encode = prefix_and_encode(src->encoding());
1336 emit_byte(0xF7);
1337 emit_byte(0xF8 | encode);
1338 }
1340 void Assembler::divl(Register src) { // Unsigned
1341 int encode = prefix_and_encode(src->encoding());
1342 emit_byte(0xF7);
1343 emit_byte(0xF0 | encode);
1344 }
1346 void Assembler::imull(Register dst, Register src) {
1347 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1348 emit_byte(0x0F);
1349 emit_byte(0xAF);
1350 emit_byte(0xC0 | encode);
1351 }
1354 void Assembler::imull(Register dst, Register src, int value) {
1355 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1356 if (is8bit(value)) {
1357 emit_byte(0x6B);
1358 emit_byte(0xC0 | encode);
1359 emit_byte(value & 0xFF);
1360 } else {
1361 emit_byte(0x69);
1362 emit_byte(0xC0 | encode);
1363 emit_long(value);
1364 }
1365 }
1367 void Assembler::incl(Address dst) {
1368 // Don't use it directly. Use MacroAssembler::increment() instead.
1369 InstructionMark im(this);
1370 prefix(dst);
1371 emit_byte(0xFF);
1372 emit_operand(rax, dst);
1373 }
1375 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) {
1376 InstructionMark im(this);
1377 assert((0 <= cc) && (cc < 16), "illegal cc");
1378 if (L.is_bound()) {
1379 address dst = target(L);
1380 assert(dst != NULL, "jcc most probably wrong");
1382 const int short_size = 2;
1383 const int long_size = 6;
1384 intptr_t offs = (intptr_t)dst - (intptr_t)_code_pos;
1385 if (maybe_short && is8bit(offs - short_size)) {
1386 // 0111 tttn #8-bit disp
1387 emit_byte(0x70 | cc);
1388 emit_byte((offs - short_size) & 0xFF);
1389 } else {
1390 // 0000 1111 1000 tttn #32-bit disp
1391 assert(is_simm32(offs - long_size),
1392 "must be 32bit offset (call4)");
1393 emit_byte(0x0F);
1394 emit_byte(0x80 | cc);
1395 emit_long(offs - long_size);
1396 }
1397 } else {
1398 // Note: could eliminate cond. jumps to this jump if condition
1399 // is the same however, seems to be rather unlikely case.
1400 // Note: use jccb() if label to be bound is very close to get
1401 // an 8-bit displacement
1402 L.add_patch_at(code(), locator());
1403 emit_byte(0x0F);
1404 emit_byte(0x80 | cc);
1405 emit_long(0);
1406 }
1407 }
1409 void Assembler::jccb(Condition cc, Label& L) {
1410 if (L.is_bound()) {
1411 const int short_size = 2;
1412 address entry = target(L);
1413 #ifdef ASSERT
1414 intptr_t dist = (intptr_t)entry - ((intptr_t)_code_pos + short_size);
1415 intptr_t delta = short_branch_delta();
1416 if (delta != 0) {
1417 dist += (dist < 0 ? (-delta) :delta);
1418 }
1419 assert(is8bit(dist), "Dispacement too large for a short jmp");
1420 #endif
1421 intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos;
1422 // 0111 tttn #8-bit disp
1423 emit_byte(0x70 | cc);
1424 emit_byte((offs - short_size) & 0xFF);
1425 } else {
1426 InstructionMark im(this);
1427 L.add_patch_at(code(), locator());
1428 emit_byte(0x70 | cc);
1429 emit_byte(0);
1430 }
1431 }
1433 void Assembler::jmp(Address adr) {
1434 InstructionMark im(this);
1435 prefix(adr);
1436 emit_byte(0xFF);
1437 emit_operand(rsp, adr);
1438 }
1440 void Assembler::jmp(Label& L, bool maybe_short) {
1441 if (L.is_bound()) {
1442 address entry = target(L);
1443 assert(entry != NULL, "jmp most probably wrong");
1444 InstructionMark im(this);
1445 const int short_size = 2;
1446 const int long_size = 5;
1447 intptr_t offs = entry - _code_pos;
1448 if (maybe_short && is8bit(offs - short_size)) {
1449 emit_byte(0xEB);
1450 emit_byte((offs - short_size) & 0xFF);
1451 } else {
1452 emit_byte(0xE9);
1453 emit_long(offs - long_size);
1454 }
1455 } else {
1456 // By default, forward jumps are always 32-bit displacements, since
1457 // we can't yet know where the label will be bound. If you're sure that
1458 // the forward jump will not run beyond 256 bytes, use jmpb to
1459 // force an 8-bit displacement.
1460 InstructionMark im(this);
1461 L.add_patch_at(code(), locator());
1462 emit_byte(0xE9);
1463 emit_long(0);
1464 }
1465 }
1467 void Assembler::jmp(Register entry) {
1468 int encode = prefix_and_encode(entry->encoding());
1469 emit_byte(0xFF);
1470 emit_byte(0xE0 | encode);
1471 }
1473 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
1474 InstructionMark im(this);
1475 emit_byte(0xE9);
1476 assert(dest != NULL, "must have a target");
1477 intptr_t disp = dest - (_code_pos + sizeof(int32_t));
1478 assert(is_simm32(disp), "must be 32bit offset (jmp)");
1479 emit_data(disp, rspec.reloc(), call32_operand);
1480 }
1482 void Assembler::jmpb(Label& L) {
1483 if (L.is_bound()) {
1484 const int short_size = 2;
1485 address entry = target(L);
1486 assert(entry != NULL, "jmp most probably wrong");
1487 #ifdef ASSERT
1488 intptr_t dist = (intptr_t)entry - ((intptr_t)_code_pos + short_size);
1489 intptr_t delta = short_branch_delta();
1490 if (delta != 0) {
1491 dist += (dist < 0 ? (-delta) :delta);
1492 }
1493 assert(is8bit(dist), "Dispacement too large for a short jmp");
1494 #endif
1495 intptr_t offs = entry - _code_pos;
1496 emit_byte(0xEB);
1497 emit_byte((offs - short_size) & 0xFF);
1498 } else {
1499 InstructionMark im(this);
1500 L.add_patch_at(code(), locator());
1501 emit_byte(0xEB);
1502 emit_byte(0);
1503 }
1504 }
1506 void Assembler::ldmxcsr( Address src) {
1507 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1508 InstructionMark im(this);
1509 prefix(src);
1510 emit_byte(0x0F);
1511 emit_byte(0xAE);
1512 emit_operand(as_Register(2), src);
1513 }
1515 void Assembler::leal(Register dst, Address src) {
1516 InstructionMark im(this);
1517 #ifdef _LP64
1518 emit_byte(0x67); // addr32
1519 prefix(src, dst);
1520 #endif // LP64
1521 emit_byte(0x8D);
1522 emit_operand(dst, src);
1523 }
1525 void Assembler::lock() {
1526 if (Atomics & 1) {
1527 // Emit either nothing, a NOP, or a NOP: prefix
1528 emit_byte(0x90) ;
1529 } else {
1530 emit_byte(0xF0);
1531 }
1532 }
1534 void Assembler::lzcntl(Register dst, Register src) {
1535 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
1536 emit_byte(0xF3);
1537 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1538 emit_byte(0x0F);
1539 emit_byte(0xBD);
1540 emit_byte(0xC0 | encode);
1541 }
1543 // Emit mfence instruction
1544 void Assembler::mfence() {
1545 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");)
1546 emit_byte( 0x0F );
1547 emit_byte( 0xAE );
1548 emit_byte( 0xF0 );
1549 }
1551 void Assembler::mov(Register dst, Register src) {
1552 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
1553 }
1555 void Assembler::movapd(XMMRegister dst, XMMRegister src) {
1556 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1557 emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_66);
1558 }
1560 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
1561 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1562 emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_NONE);
1563 }
1565 void Assembler::movlhps(XMMRegister dst, XMMRegister src) {
1566 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1567 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE);
1568 emit_byte(0x16);
1569 emit_byte(0xC0 | encode);
1570 }
1572 void Assembler::movb(Register dst, Address src) {
1573 NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
1574 InstructionMark im(this);
1575 prefix(src, dst, true);
1576 emit_byte(0x8A);
1577 emit_operand(dst, src);
1578 }
1581 void Assembler::movb(Address dst, int imm8) {
1582 InstructionMark im(this);
1583 prefix(dst);
1584 emit_byte(0xC6);
1585 emit_operand(rax, dst, 1);
1586 emit_byte(imm8);
1587 }
1590 void Assembler::movb(Address dst, Register src) {
1591 assert(src->has_byte_register(), "must have byte register");
1592 InstructionMark im(this);
1593 prefix(dst, src, true);
1594 emit_byte(0x88);
1595 emit_operand(src, dst);
1596 }
1598 void Assembler::movdl(XMMRegister dst, Register src) {
1599 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1600 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_66);
1601 emit_byte(0x6E);
1602 emit_byte(0xC0 | encode);
1603 }
1605 void Assembler::movdl(Register dst, XMMRegister src) {
1606 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1607 // swap src/dst to get correct prefix
1608 int encode = simd_prefix_and_encode(src, dst, VEX_SIMD_66);
1609 emit_byte(0x7E);
1610 emit_byte(0xC0 | encode);
1611 }
1613 void Assembler::movdl(XMMRegister dst, Address src) {
1614 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1615 InstructionMark im(this);
1616 simd_prefix(dst, src, VEX_SIMD_66);
1617 emit_byte(0x6E);
1618 emit_operand(dst, src);
1619 }
1621 void Assembler::movdl(Address dst, XMMRegister src) {
1622 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1623 InstructionMark im(this);
1624 simd_prefix(dst, src, VEX_SIMD_66);
1625 emit_byte(0x7E);
1626 emit_operand(src, dst);
1627 }
1629 void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
1630 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1631 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
1632 }
1634 void Assembler::movdqu(XMMRegister dst, Address src) {
1635 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1636 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
1637 }
1639 void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
1640 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1641 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
1642 }
1644 void Assembler::movdqu(Address dst, XMMRegister src) {
1645 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1646 InstructionMark im(this);
1647 simd_prefix(dst, src, VEX_SIMD_F3);
1648 emit_byte(0x7F);
1649 emit_operand(src, dst);
1650 }
1652 // Move Unaligned 256bit Vector
1653 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
1654 assert(UseAVX, "");
1655 bool vector256 = true;
1656 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, vector256);
1657 emit_byte(0x6F);
1658 emit_byte(0xC0 | encode);
1659 }
1661 void Assembler::vmovdqu(XMMRegister dst, Address src) {
1662 assert(UseAVX, "");
1663 InstructionMark im(this);
1664 bool vector256 = true;
1665 vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector256);
1666 emit_byte(0x6F);
1667 emit_operand(dst, src);
1668 }
1670 void Assembler::vmovdqu(Address dst, XMMRegister src) {
1671 assert(UseAVX, "");
1672 InstructionMark im(this);
1673 bool vector256 = true;
1674 // swap src<->dst for encoding
1675 assert(src != xnoreg, "sanity");
1676 vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector256);
1677 emit_byte(0x7F);
1678 emit_operand(src, dst);
1679 }
1681 // Uses zero extension on 64bit
1683 void Assembler::movl(Register dst, int32_t imm32) {
1684 int encode = prefix_and_encode(dst->encoding());
1685 emit_byte(0xB8 | encode);
1686 emit_long(imm32);
1687 }
1689 void Assembler::movl(Register dst, Register src) {
1690 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1691 emit_byte(0x8B);
1692 emit_byte(0xC0 | encode);
1693 }
1695 void Assembler::movl(Register dst, Address src) {
1696 InstructionMark im(this);
1697 prefix(src, dst);
1698 emit_byte(0x8B);
1699 emit_operand(dst, src);
1700 }
1702 void Assembler::movl(Address dst, int32_t imm32) {
1703 InstructionMark im(this);
1704 prefix(dst);
1705 emit_byte(0xC7);
1706 emit_operand(rax, dst, 4);
1707 emit_long(imm32);
1708 }
1710 void Assembler::movl(Address dst, Register src) {
1711 InstructionMark im(this);
1712 prefix(dst, src);
1713 emit_byte(0x89);
1714 emit_operand(src, dst);
1715 }
1717 // New cpus require to use movsd and movss to avoid partial register stall
1718 // when loading from memory. But for old Opteron use movlpd instead of movsd.
1719 // The selection is done in MacroAssembler::movdbl() and movflt().
1720 void Assembler::movlpd(XMMRegister dst, Address src) {
1721 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1722 emit_simd_arith(0x12, dst, src, VEX_SIMD_66);
1723 }
1725 void Assembler::movq( MMXRegister dst, Address src ) {
1726 assert( VM_Version::supports_mmx(), "" );
1727 emit_byte(0x0F);
1728 emit_byte(0x6F);
1729 emit_operand(dst, src);
1730 }
1732 void Assembler::movq( Address dst, MMXRegister src ) {
1733 assert( VM_Version::supports_mmx(), "" );
1734 emit_byte(0x0F);
1735 emit_byte(0x7F);
1736 // workaround gcc (3.2.1-7a) bug
1737 // In that version of gcc with only an emit_operand(MMX, Address)
1738 // gcc will tail jump and try and reverse the parameters completely
1739 // obliterating dst in the process. By having a version available
1740 // that doesn't need to swap the args at the tail jump the bug is
1741 // avoided.
1742 emit_operand(dst, src);
1743 }
1745 void Assembler::movq(XMMRegister dst, Address src) {
1746 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1747 InstructionMark im(this);
1748 simd_prefix(dst, src, VEX_SIMD_F3);
1749 emit_byte(0x7E);
1750 emit_operand(dst, src);
1751 }
1753 void Assembler::movq(Address dst, XMMRegister src) {
1754 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1755 InstructionMark im(this);
1756 simd_prefix(dst, src, VEX_SIMD_66);
1757 emit_byte(0xD6);
1758 emit_operand(src, dst);
1759 }
1761 void Assembler::movsbl(Register dst, Address src) { // movsxb
1762 InstructionMark im(this);
1763 prefix(src, dst);
1764 emit_byte(0x0F);
1765 emit_byte(0xBE);
1766 emit_operand(dst, src);
1767 }
1769 void Assembler::movsbl(Register dst, Register src) { // movsxb
1770 NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
1771 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1772 emit_byte(0x0F);
1773 emit_byte(0xBE);
1774 emit_byte(0xC0 | encode);
1775 }
1777 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
1778 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1779 emit_simd_arith(0x10, dst, src, VEX_SIMD_F2);
1780 }
1782 void Assembler::movsd(XMMRegister dst, Address src) {
1783 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1784 emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F2);
1785 }
1787 void Assembler::movsd(Address dst, XMMRegister src) {
1788 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1789 InstructionMark im(this);
1790 simd_prefix(dst, src, VEX_SIMD_F2);
1791 emit_byte(0x11);
1792 emit_operand(src, dst);
1793 }
1795 void Assembler::movss(XMMRegister dst, XMMRegister src) {
1796 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1797 emit_simd_arith(0x10, dst, src, VEX_SIMD_F3);
1798 }
1800 void Assembler::movss(XMMRegister dst, Address src) {
1801 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1802 emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F3);
1803 }
1805 void Assembler::movss(Address dst, XMMRegister src) {
1806 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1807 InstructionMark im(this);
1808 simd_prefix(dst, src, VEX_SIMD_F3);
1809 emit_byte(0x11);
1810 emit_operand(src, dst);
1811 }
1813 void Assembler::movswl(Register dst, Address src) { // movsxw
1814 InstructionMark im(this);
1815 prefix(src, dst);
1816 emit_byte(0x0F);
1817 emit_byte(0xBF);
1818 emit_operand(dst, src);
1819 }
1821 void Assembler::movswl(Register dst, Register src) { // movsxw
1822 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1823 emit_byte(0x0F);
1824 emit_byte(0xBF);
1825 emit_byte(0xC0 | encode);
1826 }
1828 void Assembler::movw(Address dst, int imm16) {
1829 InstructionMark im(this);
1831 emit_byte(0x66); // switch to 16-bit mode
1832 prefix(dst);
1833 emit_byte(0xC7);
1834 emit_operand(rax, dst, 2);
1835 emit_word(imm16);
1836 }
1838 void Assembler::movw(Register dst, Address src) {
1839 InstructionMark im(this);
1840 emit_byte(0x66);
1841 prefix(src, dst);
1842 emit_byte(0x8B);
1843 emit_operand(dst, src);
1844 }
1846 void Assembler::movw(Address dst, Register src) {
1847 InstructionMark im(this);
1848 emit_byte(0x66);
1849 prefix(dst, src);
1850 emit_byte(0x89);
1851 emit_operand(src, dst);
1852 }
1854 void Assembler::movzbl(Register dst, Address src) { // movzxb
1855 InstructionMark im(this);
1856 prefix(src, dst);
1857 emit_byte(0x0F);
1858 emit_byte(0xB6);
1859 emit_operand(dst, src);
1860 }
1862 void Assembler::movzbl(Register dst, Register src) { // movzxb
1863 NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
1864 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1865 emit_byte(0x0F);
1866 emit_byte(0xB6);
1867 emit_byte(0xC0 | encode);
1868 }
1870 void Assembler::movzwl(Register dst, Address src) { // movzxw
1871 InstructionMark im(this);
1872 prefix(src, dst);
1873 emit_byte(0x0F);
1874 emit_byte(0xB7);
1875 emit_operand(dst, src);
1876 }
1878 void Assembler::movzwl(Register dst, Register src) { // movzxw
1879 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1880 emit_byte(0x0F);
1881 emit_byte(0xB7);
1882 emit_byte(0xC0 | encode);
1883 }
1885 void Assembler::mull(Address src) {
1886 InstructionMark im(this);
1887 prefix(src);
1888 emit_byte(0xF7);
1889 emit_operand(rsp, src);
1890 }
1892 void Assembler::mull(Register src) {
1893 int encode = prefix_and_encode(src->encoding());
1894 emit_byte(0xF7);
1895 emit_byte(0xE0 | encode);
1896 }
1898 void Assembler::mulsd(XMMRegister dst, Address src) {
1899 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1900 emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
1901 }
1903 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
1904 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1905 emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
1906 }
1908 void Assembler::mulss(XMMRegister dst, Address src) {
1909 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1910 emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
1911 }
1913 void Assembler::mulss(XMMRegister dst, XMMRegister src) {
1914 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1915 emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
1916 }
1918 void Assembler::negl(Register dst) {
1919 int encode = prefix_and_encode(dst->encoding());
1920 emit_byte(0xF7);
1921 emit_byte(0xD8 | encode);
1922 }
1924 void Assembler::nop(int i) {
1925 #ifdef ASSERT
1926 assert(i > 0, " ");
1927 // The fancy nops aren't currently recognized by debuggers making it a
1928 // pain to disassemble code while debugging. If asserts are on clearly
1929 // speed is not an issue so simply use the single byte traditional nop
1930 // to do alignment.
1932 for (; i > 0 ; i--) emit_byte(0x90);
1933 return;
1935 #endif // ASSERT
1937 if (UseAddressNop && VM_Version::is_intel()) {
1938 //
1939 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
1940 // 1: 0x90
1941 // 2: 0x66 0x90
1942 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
1943 // 4: 0x0F 0x1F 0x40 0x00
1944 // 5: 0x0F 0x1F 0x44 0x00 0x00
1945 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
1946 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
1947 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1948 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1949 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1950 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1952 // The rest coding is Intel specific - don't use consecutive address nops
1954 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1955 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1956 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1957 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1959 while(i >= 15) {
1960 // For Intel don't generate consecutive addess nops (mix with regular nops)
1961 i -= 15;
1962 emit_byte(0x66); // size prefix
1963 emit_byte(0x66); // size prefix
1964 emit_byte(0x66); // size prefix
1965 addr_nop_8();
1966 emit_byte(0x66); // size prefix
1967 emit_byte(0x66); // size prefix
1968 emit_byte(0x66); // size prefix
1969 emit_byte(0x90); // nop
1970 }
1971 switch (i) {
1972 case 14:
1973 emit_byte(0x66); // size prefix
1974 case 13:
1975 emit_byte(0x66); // size prefix
1976 case 12:
1977 addr_nop_8();
1978 emit_byte(0x66); // size prefix
1979 emit_byte(0x66); // size prefix
1980 emit_byte(0x66); // size prefix
1981 emit_byte(0x90); // nop
1982 break;
1983 case 11:
1984 emit_byte(0x66); // size prefix
1985 case 10:
1986 emit_byte(0x66); // size prefix
1987 case 9:
1988 emit_byte(0x66); // size prefix
1989 case 8:
1990 addr_nop_8();
1991 break;
1992 case 7:
1993 addr_nop_7();
1994 break;
1995 case 6:
1996 emit_byte(0x66); // size prefix
1997 case 5:
1998 addr_nop_5();
1999 break;
2000 case 4:
2001 addr_nop_4();
2002 break;
2003 case 3:
2004 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
2005 emit_byte(0x66); // size prefix
2006 case 2:
2007 emit_byte(0x66); // size prefix
2008 case 1:
2009 emit_byte(0x90); // nop
2010 break;
2011 default:
2012 assert(i == 0, " ");
2013 }
2014 return;
2015 }
2016 if (UseAddressNop && VM_Version::is_amd()) {
2017 //
2018 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
2019 // 1: 0x90
2020 // 2: 0x66 0x90
2021 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
2022 // 4: 0x0F 0x1F 0x40 0x00
2023 // 5: 0x0F 0x1F 0x44 0x00 0x00
2024 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
2025 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
2026 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2027 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2028 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2029 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2031 // The rest coding is AMD specific - use consecutive address nops
2033 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
2034 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
2035 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
2036 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
2037 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2038 // Size prefixes (0x66) are added for larger sizes
2040 while(i >= 22) {
2041 i -= 11;
2042 emit_byte(0x66); // size prefix
2043 emit_byte(0x66); // size prefix
2044 emit_byte(0x66); // size prefix
2045 addr_nop_8();
2046 }
2047 // Generate first nop for size between 21-12
2048 switch (i) {
2049 case 21:
2050 i -= 1;
2051 emit_byte(0x66); // size prefix
2052 case 20:
2053 case 19:
2054 i -= 1;
2055 emit_byte(0x66); // size prefix
2056 case 18:
2057 case 17:
2058 i -= 1;
2059 emit_byte(0x66); // size prefix
2060 case 16:
2061 case 15:
2062 i -= 8;
2063 addr_nop_8();
2064 break;
2065 case 14:
2066 case 13:
2067 i -= 7;
2068 addr_nop_7();
2069 break;
2070 case 12:
2071 i -= 6;
2072 emit_byte(0x66); // size prefix
2073 addr_nop_5();
2074 break;
2075 default:
2076 assert(i < 12, " ");
2077 }
2079 // Generate second nop for size between 11-1
2080 switch (i) {
2081 case 11:
2082 emit_byte(0x66); // size prefix
2083 case 10:
2084 emit_byte(0x66); // size prefix
2085 case 9:
2086 emit_byte(0x66); // size prefix
2087 case 8:
2088 addr_nop_8();
2089 break;
2090 case 7:
2091 addr_nop_7();
2092 break;
2093 case 6:
2094 emit_byte(0x66); // size prefix
2095 case 5:
2096 addr_nop_5();
2097 break;
2098 case 4:
2099 addr_nop_4();
2100 break;
2101 case 3:
2102 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
2103 emit_byte(0x66); // size prefix
2104 case 2:
2105 emit_byte(0x66); // size prefix
2106 case 1:
2107 emit_byte(0x90); // nop
2108 break;
2109 default:
2110 assert(i == 0, " ");
2111 }
2112 return;
2113 }
2115 // Using nops with size prefixes "0x66 0x90".
2116 // From AMD Optimization Guide:
2117 // 1: 0x90
2118 // 2: 0x66 0x90
2119 // 3: 0x66 0x66 0x90
2120 // 4: 0x66 0x66 0x66 0x90
2121 // 5: 0x66 0x66 0x90 0x66 0x90
2122 // 6: 0x66 0x66 0x90 0x66 0x66 0x90
2123 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
2124 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
2125 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
2126 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
2127 //
2128 while(i > 12) {
2129 i -= 4;
2130 emit_byte(0x66); // size prefix
2131 emit_byte(0x66);
2132 emit_byte(0x66);
2133 emit_byte(0x90); // nop
2134 }
2135 // 1 - 12 nops
2136 if(i > 8) {
2137 if(i > 9) {
2138 i -= 1;
2139 emit_byte(0x66);
2140 }
2141 i -= 3;
2142 emit_byte(0x66);
2143 emit_byte(0x66);
2144 emit_byte(0x90);
2145 }
2146 // 1 - 8 nops
2147 if(i > 4) {
2148 if(i > 6) {
2149 i -= 1;
2150 emit_byte(0x66);
2151 }
2152 i -= 3;
2153 emit_byte(0x66);
2154 emit_byte(0x66);
2155 emit_byte(0x90);
2156 }
2157 switch (i) {
2158 case 4:
2159 emit_byte(0x66);
2160 case 3:
2161 emit_byte(0x66);
2162 case 2:
2163 emit_byte(0x66);
2164 case 1:
2165 emit_byte(0x90);
2166 break;
2167 default:
2168 assert(i == 0, " ");
2169 }
2170 }
2172 void Assembler::notl(Register dst) {
2173 int encode = prefix_and_encode(dst->encoding());
2174 emit_byte(0xF7);
2175 emit_byte(0xD0 | encode );
2176 }
2178 void Assembler::orl(Address dst, int32_t imm32) {
2179 InstructionMark im(this);
2180 prefix(dst);
2181 emit_arith_operand(0x81, rcx, dst, imm32);
2182 }
2184 void Assembler::orl(Register dst, int32_t imm32) {
2185 prefix(dst);
2186 emit_arith(0x81, 0xC8, dst, imm32);
2187 }
2189 void Assembler::orl(Register dst, Address src) {
2190 InstructionMark im(this);
2191 prefix(src, dst);
2192 emit_byte(0x0B);
2193 emit_operand(dst, src);
2194 }
2196 void Assembler::orl(Register dst, Register src) {
2197 (void) prefix_and_encode(dst->encoding(), src->encoding());
2198 emit_arith(0x0B, 0xC0, dst, src);
2199 }
2201 void Assembler::packuswb(XMMRegister dst, Address src) {
2202 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2203 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2204 emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
2205 }
2207 void Assembler::packuswb(XMMRegister dst, XMMRegister src) {
2208 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2209 emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
2210 }
2212 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
2213 assert(VM_Version::supports_sse4_2(), "");
2214 InstructionMark im(this);
2215 simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
2216 emit_byte(0x61);
2217 emit_operand(dst, src);
2218 emit_byte(imm8);
2219 }
2221 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
2222 assert(VM_Version::supports_sse4_2(), "");
2223 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
2224 emit_byte(0x61);
2225 emit_byte(0xC0 | encode);
2226 emit_byte(imm8);
2227 }
2229 void Assembler::pmovzxbw(XMMRegister dst, Address src) {
2230 assert(VM_Version::supports_sse4_1(), "");
2231 InstructionMark im(this);
2232 simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2233 emit_byte(0x30);
2234 emit_operand(dst, src);
2235 }
2237 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
2238 assert(VM_Version::supports_sse4_1(), "");
2239 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2240 emit_byte(0x30);
2241 emit_byte(0xC0 | encode);
2242 }
2244 // generic
2245 void Assembler::pop(Register dst) {
2246 int encode = prefix_and_encode(dst->encoding());
2247 emit_byte(0x58 | encode);
2248 }
2250 void Assembler::popcntl(Register dst, Address src) {
2251 assert(VM_Version::supports_popcnt(), "must support");
2252 InstructionMark im(this);
2253 emit_byte(0xF3);
2254 prefix(src, dst);
2255 emit_byte(0x0F);
2256 emit_byte(0xB8);
2257 emit_operand(dst, src);
2258 }
2260 void Assembler::popcntl(Register dst, Register src) {
2261 assert(VM_Version::supports_popcnt(), "must support");
2262 emit_byte(0xF3);
2263 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2264 emit_byte(0x0F);
2265 emit_byte(0xB8);
2266 emit_byte(0xC0 | encode);
2267 }
2269 void Assembler::popf() {
2270 emit_byte(0x9D);
2271 }
2273 #ifndef _LP64 // no 32bit push/pop on amd64
2274 void Assembler::popl(Address dst) {
2275 // NOTE: this will adjust stack by 8byte on 64bits
2276 InstructionMark im(this);
2277 prefix(dst);
2278 emit_byte(0x8F);
2279 emit_operand(rax, dst);
2280 }
2281 #endif
2283 void Assembler::prefetch_prefix(Address src) {
2284 prefix(src);
2285 emit_byte(0x0F);
2286 }
2288 void Assembler::prefetchnta(Address src) {
2289 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2290 InstructionMark im(this);
2291 prefetch_prefix(src);
2292 emit_byte(0x18);
2293 emit_operand(rax, src); // 0, src
2294 }
2296 void Assembler::prefetchr(Address src) {
2297 assert(VM_Version::supports_3dnow_prefetch(), "must support");
2298 InstructionMark im(this);
2299 prefetch_prefix(src);
2300 emit_byte(0x0D);
2301 emit_operand(rax, src); // 0, src
2302 }
2304 void Assembler::prefetcht0(Address src) {
2305 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2306 InstructionMark im(this);
2307 prefetch_prefix(src);
2308 emit_byte(0x18);
2309 emit_operand(rcx, src); // 1, src
2310 }
2312 void Assembler::prefetcht1(Address src) {
2313 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2314 InstructionMark im(this);
2315 prefetch_prefix(src);
2316 emit_byte(0x18);
2317 emit_operand(rdx, src); // 2, src
2318 }
2320 void Assembler::prefetcht2(Address src) {
2321 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2322 InstructionMark im(this);
2323 prefetch_prefix(src);
2324 emit_byte(0x18);
2325 emit_operand(rbx, src); // 3, src
2326 }
2328 void Assembler::prefetchw(Address src) {
2329 assert(VM_Version::supports_3dnow_prefetch(), "must support");
2330 InstructionMark im(this);
2331 prefetch_prefix(src);
2332 emit_byte(0x0D);
2333 emit_operand(rcx, src); // 1, src
2334 }
2336 void Assembler::prefix(Prefix p) {
2337 a_byte(p);
2338 }
2340 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
2341 assert(isByte(mode), "invalid value");
2342 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2343 emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_66);
2344 emit_byte(mode & 0xFF);
2346 }
2348 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
2349 assert(isByte(mode), "invalid value");
2350 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2351 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2352 InstructionMark im(this);
2353 simd_prefix(dst, src, VEX_SIMD_66);
2354 emit_byte(0x70);
2355 emit_operand(dst, src);
2356 emit_byte(mode & 0xFF);
2357 }
2359 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
2360 assert(isByte(mode), "invalid value");
2361 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2362 emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_F2);
2363 emit_byte(mode & 0xFF);
2364 }
2366 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
2367 assert(isByte(mode), "invalid value");
2368 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2369 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2370 InstructionMark im(this);
2371 simd_prefix(dst, src, VEX_SIMD_F2);
2372 emit_byte(0x70);
2373 emit_operand(dst, src);
2374 emit_byte(mode & 0xFF);
2375 }
2377 void Assembler::psrldq(XMMRegister dst, int shift) {
2378 // Shift 128 bit value in xmm register by number of bytes.
2379 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2380 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66);
2381 emit_byte(0x73);
2382 emit_byte(0xC0 | encode);
2383 emit_byte(shift);
2384 }
2386 void Assembler::ptest(XMMRegister dst, Address src) {
2387 assert(VM_Version::supports_sse4_1(), "");
2388 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2389 InstructionMark im(this);
2390 simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2391 emit_byte(0x17);
2392 emit_operand(dst, src);
2393 }
2395 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
2396 assert(VM_Version::supports_sse4_1(), "");
2397 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2398 emit_byte(0x17);
2399 emit_byte(0xC0 | encode);
2400 }
2402 void Assembler::punpcklbw(XMMRegister dst, Address src) {
2403 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2404 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2405 emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
2406 }
2408 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
2409 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2410 emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
2411 }
2413 void Assembler::punpckldq(XMMRegister dst, Address src) {
2414 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2415 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2416 emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
2417 }
2419 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
2420 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2421 emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
2422 }
2424 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
2425 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2426 emit_simd_arith(0x6C, dst, src, VEX_SIMD_66);
2427 }
2429 void Assembler::push(int32_t imm32) {
2430 // in 64bits we push 64bits onto the stack but only
2431 // take a 32bit immediate
2432 emit_byte(0x68);
2433 emit_long(imm32);
2434 }
2436 void Assembler::push(Register src) {
2437 int encode = prefix_and_encode(src->encoding());
2439 emit_byte(0x50 | encode);
2440 }
2442 void Assembler::pushf() {
2443 emit_byte(0x9C);
2444 }
2446 #ifndef _LP64 // no 32bit push/pop on amd64
2447 void Assembler::pushl(Address src) {
2448 // Note this will push 64bit on 64bit
2449 InstructionMark im(this);
2450 prefix(src);
2451 emit_byte(0xFF);
2452 emit_operand(rsi, src);
2453 }
2454 #endif
2456 void Assembler::rcll(Register dst, int imm8) {
2457 assert(isShiftCount(imm8), "illegal shift count");
2458 int encode = prefix_and_encode(dst->encoding());
2459 if (imm8 == 1) {
2460 emit_byte(0xD1);
2461 emit_byte(0xD0 | encode);
2462 } else {
2463 emit_byte(0xC1);
2464 emit_byte(0xD0 | encode);
2465 emit_byte(imm8);
2466 }
2467 }
2469 // copies data from [esi] to [edi] using rcx pointer sized words
2470 // generic
2471 void Assembler::rep_mov() {
2472 emit_byte(0xF3);
2473 // MOVSQ
2474 LP64_ONLY(prefix(REX_W));
2475 emit_byte(0xA5);
2476 }
2478 // sets rcx pointer sized words with rax, value at [edi]
2479 // generic
2480 void Assembler::rep_set() { // rep_set
2481 emit_byte(0xF3);
2482 // STOSQ
2483 LP64_ONLY(prefix(REX_W));
2484 emit_byte(0xAB);
2485 }
2487 // scans rcx pointer sized words at [edi] for occurance of rax,
2488 // generic
2489 void Assembler::repne_scan() { // repne_scan
2490 emit_byte(0xF2);
2491 // SCASQ
2492 LP64_ONLY(prefix(REX_W));
2493 emit_byte(0xAF);
2494 }
2496 #ifdef _LP64
2497 // scans rcx 4 byte words at [edi] for occurance of rax,
2498 // generic
2499 void Assembler::repne_scanl() { // repne_scan
2500 emit_byte(0xF2);
2501 // SCASL
2502 emit_byte(0xAF);
2503 }
2504 #endif
2506 void Assembler::ret(int imm16) {
2507 if (imm16 == 0) {
2508 emit_byte(0xC3);
2509 } else {
2510 emit_byte(0xC2);
2511 emit_word(imm16);
2512 }
2513 }
2515 void Assembler::sahf() {
2516 #ifdef _LP64
2517 // Not supported in 64bit mode
2518 ShouldNotReachHere();
2519 #endif
2520 emit_byte(0x9E);
2521 }
2523 void Assembler::sarl(Register dst, int imm8) {
2524 int encode = prefix_and_encode(dst->encoding());
2525 assert(isShiftCount(imm8), "illegal shift count");
2526 if (imm8 == 1) {
2527 emit_byte(0xD1);
2528 emit_byte(0xF8 | encode);
2529 } else {
2530 emit_byte(0xC1);
2531 emit_byte(0xF8 | encode);
2532 emit_byte(imm8);
2533 }
2534 }
2536 void Assembler::sarl(Register dst) {
2537 int encode = prefix_and_encode(dst->encoding());
2538 emit_byte(0xD3);
2539 emit_byte(0xF8 | encode);
2540 }
2542 void Assembler::sbbl(Address dst, int32_t imm32) {
2543 InstructionMark im(this);
2544 prefix(dst);
2545 emit_arith_operand(0x81, rbx, dst, imm32);
2546 }
2548 void Assembler::sbbl(Register dst, int32_t imm32) {
2549 prefix(dst);
2550 emit_arith(0x81, 0xD8, dst, imm32);
2551 }
2554 void Assembler::sbbl(Register dst, Address src) {
2555 InstructionMark im(this);
2556 prefix(src, dst);
2557 emit_byte(0x1B);
2558 emit_operand(dst, src);
2559 }
2561 void Assembler::sbbl(Register dst, Register src) {
2562 (void) prefix_and_encode(dst->encoding(), src->encoding());
2563 emit_arith(0x1B, 0xC0, dst, src);
2564 }
2566 void Assembler::setb(Condition cc, Register dst) {
2567 assert(0 <= cc && cc < 16, "illegal cc");
2568 int encode = prefix_and_encode(dst->encoding(), true);
2569 emit_byte(0x0F);
2570 emit_byte(0x90 | cc);
2571 emit_byte(0xC0 | encode);
2572 }
2574 void Assembler::shll(Register dst, int imm8) {
2575 assert(isShiftCount(imm8), "illegal shift count");
2576 int encode = prefix_and_encode(dst->encoding());
2577 if (imm8 == 1 ) {
2578 emit_byte(0xD1);
2579 emit_byte(0xE0 | encode);
2580 } else {
2581 emit_byte(0xC1);
2582 emit_byte(0xE0 | encode);
2583 emit_byte(imm8);
2584 }
2585 }
2587 void Assembler::shll(Register dst) {
2588 int encode = prefix_and_encode(dst->encoding());
2589 emit_byte(0xD3);
2590 emit_byte(0xE0 | encode);
2591 }
2593 void Assembler::shrl(Register dst, int imm8) {
2594 assert(isShiftCount(imm8), "illegal shift count");
2595 int encode = prefix_and_encode(dst->encoding());
2596 emit_byte(0xC1);
2597 emit_byte(0xE8 | encode);
2598 emit_byte(imm8);
2599 }
2601 void Assembler::shrl(Register dst) {
2602 int encode = prefix_and_encode(dst->encoding());
2603 emit_byte(0xD3);
2604 emit_byte(0xE8 | encode);
2605 }
2607 // copies a single word from [esi] to [edi]
2608 void Assembler::smovl() {
2609 emit_byte(0xA5);
2610 }
2612 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
2613 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2614 emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
2615 }
2617 void Assembler::sqrtsd(XMMRegister dst, Address src) {
2618 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2619 emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
2620 }
2622 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
2623 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2624 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
2625 }
2627 void Assembler::sqrtss(XMMRegister dst, Address src) {
2628 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2629 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
2630 }
2632 void Assembler::stmxcsr( Address dst) {
2633 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2634 InstructionMark im(this);
2635 prefix(dst);
2636 emit_byte(0x0F);
2637 emit_byte(0xAE);
2638 emit_operand(as_Register(3), dst);
2639 }
2641 void Assembler::subl(Address dst, int32_t imm32) {
2642 InstructionMark im(this);
2643 prefix(dst);
2644 emit_arith_operand(0x81, rbp, dst, imm32);
2645 }
2647 void Assembler::subl(Address dst, Register src) {
2648 InstructionMark im(this);
2649 prefix(dst, src);
2650 emit_byte(0x29);
2651 emit_operand(src, dst);
2652 }
2654 void Assembler::subl(Register dst, int32_t imm32) {
2655 prefix(dst);
2656 emit_arith(0x81, 0xE8, dst, imm32);
2657 }
2659 // Force generation of a 4 byte immediate value even if it fits into 8bit
2660 void Assembler::subl_imm32(Register dst, int32_t imm32) {
2661 prefix(dst);
2662 emit_arith_imm32(0x81, 0xE8, dst, imm32);
2663 }
2665 void Assembler::subl(Register dst, Address src) {
2666 InstructionMark im(this);
2667 prefix(src, dst);
2668 emit_byte(0x2B);
2669 emit_operand(dst, src);
2670 }
2672 void Assembler::subl(Register dst, Register src) {
2673 (void) prefix_and_encode(dst->encoding(), src->encoding());
2674 emit_arith(0x2B, 0xC0, dst, src);
2675 }
2677 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
2678 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2679 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
2680 }
2682 void Assembler::subsd(XMMRegister dst, Address src) {
2683 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2684 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
2685 }
2687 void Assembler::subss(XMMRegister dst, XMMRegister src) {
2688 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2689 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
2690 }
2692 void Assembler::subss(XMMRegister dst, Address src) {
2693 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2694 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
2695 }
2697 void Assembler::testb(Register dst, int imm8) {
2698 NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
2699 (void) prefix_and_encode(dst->encoding(), true);
2700 emit_arith_b(0xF6, 0xC0, dst, imm8);
2701 }
2703 void Assembler::testl(Register dst, int32_t imm32) {
2704 // not using emit_arith because test
2705 // doesn't support sign-extension of
2706 // 8bit operands
2707 int encode = dst->encoding();
2708 if (encode == 0) {
2709 emit_byte(0xA9);
2710 } else {
2711 encode = prefix_and_encode(encode);
2712 emit_byte(0xF7);
2713 emit_byte(0xC0 | encode);
2714 }
2715 emit_long(imm32);
2716 }
2718 void Assembler::testl(Register dst, Register src) {
2719 (void) prefix_and_encode(dst->encoding(), src->encoding());
2720 emit_arith(0x85, 0xC0, dst, src);
2721 }
2723 void Assembler::testl(Register dst, Address src) {
2724 InstructionMark im(this);
2725 prefix(src, dst);
2726 emit_byte(0x85);
2727 emit_operand(dst, src);
2728 }
2730 void Assembler::ucomisd(XMMRegister dst, Address src) {
2731 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2732 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
2733 }
2735 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
2736 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2737 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
2738 }
2740 void Assembler::ucomiss(XMMRegister dst, Address src) {
2741 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2742 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
2743 }
2745 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
2746 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2747 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
2748 }
2751 void Assembler::xaddl(Address dst, Register src) {
2752 InstructionMark im(this);
2753 prefix(dst, src);
2754 emit_byte(0x0F);
2755 emit_byte(0xC1);
2756 emit_operand(src, dst);
2757 }
2759 void Assembler::xchgl(Register dst, Address src) { // xchg
2760 InstructionMark im(this);
2761 prefix(src, dst);
2762 emit_byte(0x87);
2763 emit_operand(dst, src);
2764 }
2766 void Assembler::xchgl(Register dst, Register src) {
2767 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2768 emit_byte(0x87);
2769 emit_byte(0xc0 | encode);
2770 }
2772 void Assembler::xorl(Register dst, int32_t imm32) {
2773 prefix(dst);
2774 emit_arith(0x81, 0xF0, dst, imm32);
2775 }
2777 void Assembler::xorl(Register dst, Address src) {
2778 InstructionMark im(this);
2779 prefix(src, dst);
2780 emit_byte(0x33);
2781 emit_operand(dst, src);
2782 }
2784 void Assembler::xorl(Register dst, Register src) {
2785 (void) prefix_and_encode(dst->encoding(), src->encoding());
2786 emit_arith(0x33, 0xC0, dst, src);
2787 }
2790 // AVX 3-operands scalar float-point arithmetic instructions
2792 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
2793 assert(VM_Version::supports_avx(), "");
2794 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2795 }
2797 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2798 assert(VM_Version::supports_avx(), "");
2799 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2800 }
2802 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
2803 assert(VM_Version::supports_avx(), "");
2804 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2805 }
2807 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2808 assert(VM_Version::supports_avx(), "");
2809 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2810 }
2812 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
2813 assert(VM_Version::supports_avx(), "");
2814 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2815 }
2817 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2818 assert(VM_Version::supports_avx(), "");
2819 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2820 }
2822 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
2823 assert(VM_Version::supports_avx(), "");
2824 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2825 }
2827 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2828 assert(VM_Version::supports_avx(), "");
2829 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2830 }
2832 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
2833 assert(VM_Version::supports_avx(), "");
2834 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2835 }
2837 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2838 assert(VM_Version::supports_avx(), "");
2839 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2840 }
2842 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
2843 assert(VM_Version::supports_avx(), "");
2844 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2845 }
2847 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2848 assert(VM_Version::supports_avx(), "");
2849 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2850 }
2852 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
2853 assert(VM_Version::supports_avx(), "");
2854 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2855 }
2857 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2858 assert(VM_Version::supports_avx(), "");
2859 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2860 }
2862 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
2863 assert(VM_Version::supports_avx(), "");
2864 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2865 }
2867 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2868 assert(VM_Version::supports_avx(), "");
2869 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2870 }
2872 //====================VECTOR ARITHMETIC=====================================
2874 // Float-point vector arithmetic
2876 void Assembler::addpd(XMMRegister dst, XMMRegister src) {
2877 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2878 emit_simd_arith(0x58, dst, src, VEX_SIMD_66);
2879 }
2881 void Assembler::addps(XMMRegister dst, XMMRegister src) {
2882 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2883 emit_simd_arith(0x58, dst, src, VEX_SIMD_NONE);
2884 }
2886 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2887 assert(VM_Version::supports_avx(), "");
2888 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
2889 }
2891 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2892 assert(VM_Version::supports_avx(), "");
2893 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
2894 }
2896 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2897 assert(VM_Version::supports_avx(), "");
2898 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
2899 }
2901 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2902 assert(VM_Version::supports_avx(), "");
2903 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
2904 }
2906 void Assembler::subpd(XMMRegister dst, XMMRegister src) {
2907 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2908 emit_simd_arith(0x5C, dst, src, VEX_SIMD_66);
2909 }
2911 void Assembler::subps(XMMRegister dst, XMMRegister src) {
2912 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2913 emit_simd_arith(0x5C, dst, src, VEX_SIMD_NONE);
2914 }
2916 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2917 assert(VM_Version::supports_avx(), "");
2918 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
2919 }
2921 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2922 assert(VM_Version::supports_avx(), "");
2923 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
2924 }
2926 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2927 assert(VM_Version::supports_avx(), "");
2928 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
2929 }
2931 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2932 assert(VM_Version::supports_avx(), "");
2933 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
2934 }
2936 void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
2937 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2938 emit_simd_arith(0x59, dst, src, VEX_SIMD_66);
2939 }
2941 void Assembler::mulps(XMMRegister dst, XMMRegister src) {
2942 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2943 emit_simd_arith(0x59, dst, src, VEX_SIMD_NONE);
2944 }
2946 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2947 assert(VM_Version::supports_avx(), "");
2948 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
2949 }
2951 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2952 assert(VM_Version::supports_avx(), "");
2953 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
2954 }
2956 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2957 assert(VM_Version::supports_avx(), "");
2958 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
2959 }
2961 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2962 assert(VM_Version::supports_avx(), "");
2963 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
2964 }
2966 void Assembler::divpd(XMMRegister dst, XMMRegister src) {
2967 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2968 emit_simd_arith(0x5E, dst, src, VEX_SIMD_66);
2969 }
2971 void Assembler::divps(XMMRegister dst, XMMRegister src) {
2972 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2973 emit_simd_arith(0x5E, dst, src, VEX_SIMD_NONE);
2974 }
2976 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2977 assert(VM_Version::supports_avx(), "");
2978 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
2979 }
2981 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2982 assert(VM_Version::supports_avx(), "");
2983 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
2984 }
2986 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2987 assert(VM_Version::supports_avx(), "");
2988 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
2989 }
2991 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2992 assert(VM_Version::supports_avx(), "");
2993 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
2994 }
2996 void Assembler::andpd(XMMRegister dst, XMMRegister src) {
2997 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2998 emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
2999 }
3001 void Assembler::andps(XMMRegister dst, XMMRegister src) {
3002 NOT_LP64(assert(VM_Version::supports_sse(), ""));
3003 emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
3004 }
3006 void Assembler::andps(XMMRegister dst, Address src) {
3007 NOT_LP64(assert(VM_Version::supports_sse(), ""));
3008 emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
3009 }
3011 void Assembler::andpd(XMMRegister dst, Address src) {
3012 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3013 emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
3014 }
3016 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3017 assert(VM_Version::supports_avx(), "");
3018 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
3019 }
3021 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3022 assert(VM_Version::supports_avx(), "");
3023 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
3024 }
3026 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3027 assert(VM_Version::supports_avx(), "");
3028 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
3029 }
3031 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3032 assert(VM_Version::supports_avx(), "");
3033 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
3034 }
3036 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
3037 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3038 emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
3039 }
3041 void Assembler::xorps(XMMRegister dst, XMMRegister src) {
3042 NOT_LP64(assert(VM_Version::supports_sse(), ""));
3043 emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
3044 }
3046 void Assembler::xorpd(XMMRegister dst, Address src) {
3047 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3048 emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
3049 }
3051 void Assembler::xorps(XMMRegister dst, Address src) {
3052 NOT_LP64(assert(VM_Version::supports_sse(), ""));
3053 emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
3054 }
3056 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3057 assert(VM_Version::supports_avx(), "");
3058 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
3059 }
3061 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3062 assert(VM_Version::supports_avx(), "");
3063 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
3064 }
3066 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3067 assert(VM_Version::supports_avx(), "");
3068 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
3069 }
3071 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3072 assert(VM_Version::supports_avx(), "");
3073 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
3074 }
3077 // Integer vector arithmetic
3078 void Assembler::paddb(XMMRegister dst, XMMRegister src) {
3079 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3080 emit_simd_arith(0xFC, dst, src, VEX_SIMD_66);
3081 }
3083 void Assembler::paddw(XMMRegister dst, XMMRegister src) {
3084 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3085 emit_simd_arith(0xFD, dst, src, VEX_SIMD_66);
3086 }
3088 void Assembler::paddd(XMMRegister dst, XMMRegister src) {
3089 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3090 emit_simd_arith(0xFE, dst, src, VEX_SIMD_66);
3091 }
3093 void Assembler::paddq(XMMRegister dst, XMMRegister src) {
3094 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3095 emit_simd_arith(0xD4, dst, src, VEX_SIMD_66);
3096 }
3098 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3099 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3100 emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
3101 }
3103 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3104 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3105 emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
3106 }
3108 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3109 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3110 emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
3111 }
3113 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3114 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3115 emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
3116 }
3118 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3119 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3120 emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
3121 }
3123 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3124 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3125 emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
3126 }
3128 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3129 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3130 emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
3131 }
3133 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3134 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3135 emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
3136 }
3138 void Assembler::psubb(XMMRegister dst, XMMRegister src) {
3139 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3140 emit_simd_arith(0xF8, dst, src, VEX_SIMD_66);
3141 }
3143 void Assembler::psubw(XMMRegister dst, XMMRegister src) {
3144 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3145 emit_simd_arith(0xF9, dst, src, VEX_SIMD_66);
3146 }
3148 void Assembler::psubd(XMMRegister dst, XMMRegister src) {
3149 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3150 emit_simd_arith(0xFA, dst, src, VEX_SIMD_66);
3151 }
3153 void Assembler::psubq(XMMRegister dst, XMMRegister src) {
3154 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3155 emit_simd_arith(0xFB, dst, src, VEX_SIMD_66);
3156 }
3158 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3159 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3160 emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
3161 }
3163 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3164 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3165 emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
3166 }
3168 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3169 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3170 emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
3171 }
3173 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3174 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3175 emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
3176 }
3178 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3179 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3180 emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
3181 }
3183 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3184 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3185 emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
3186 }
3188 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3189 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3190 emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
3191 }
3193 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3194 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3195 emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
3196 }
3198 void Assembler::pmullw(XMMRegister dst, XMMRegister src) {
3199 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3200 emit_simd_arith(0xD5, dst, src, VEX_SIMD_66);
3201 }
3203 void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
3204 assert(VM_Version::supports_sse4_1(), "");
3205 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
3206 emit_byte(0x40);
3207 emit_byte(0xC0 | encode);
3208 }
3210 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3211 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3212 emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
3213 }
3215 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3216 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3217 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
3218 emit_byte(0x40);
3219 emit_byte(0xC0 | encode);
3220 }
3222 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3223 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3224 emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
3225 }
3227 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3228 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3229 InstructionMark im(this);
3230 int dst_enc = dst->encoding();
3231 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
3232 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
3233 emit_byte(0x40);
3234 emit_operand(dst, src);
3235 }
3237 // Shift packed integers left by specified number of bits.
3238 void Assembler::psllw(XMMRegister dst, int shift) {
3239 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3240 // XMM6 is for /6 encoding: 66 0F 71 /6 ib
3241 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
3242 emit_byte(0x71);
3243 emit_byte(0xC0 | encode);
3244 emit_byte(shift & 0xFF);
3245 }
3247 void Assembler::pslld(XMMRegister dst, int shift) {
3248 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3249 // XMM6 is for /6 encoding: 66 0F 72 /6 ib
3250 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
3251 emit_byte(0x72);
3252 emit_byte(0xC0 | encode);
3253 emit_byte(shift & 0xFF);
3254 }
3256 void Assembler::psllq(XMMRegister dst, int shift) {
3257 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3258 // XMM6 is for /6 encoding: 66 0F 73 /6 ib
3259 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
3260 emit_byte(0x73);
3261 emit_byte(0xC0 | encode);
3262 emit_byte(shift & 0xFF);
3263 }
3265 void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
3266 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3267 emit_simd_arith(0xF1, dst, shift, VEX_SIMD_66);
3268 }
3270 void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
3271 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3272 emit_simd_arith(0xF2, dst, shift, VEX_SIMD_66);
3273 }
3275 void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
3276 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3277 emit_simd_arith(0xF3, dst, shift, VEX_SIMD_66);
3278 }
3280 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3281 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3282 // XMM6 is for /6 encoding: 66 0F 71 /6 ib
3283 emit_vex_arith(0x71, xmm6, dst, src, VEX_SIMD_66, vector256);
3284 emit_byte(shift & 0xFF);
3285 }
3287 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3288 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3289 // XMM6 is for /6 encoding: 66 0F 72 /6 ib
3290 emit_vex_arith(0x72, xmm6, dst, src, VEX_SIMD_66, vector256);
3291 emit_byte(shift & 0xFF);
3292 }
3294 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3295 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3296 // XMM6 is for /6 encoding: 66 0F 73 /6 ib
3297 emit_vex_arith(0x73, xmm6, dst, src, VEX_SIMD_66, vector256);
3298 emit_byte(shift & 0xFF);
3299 }
3301 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3302 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3303 emit_vex_arith(0xF1, dst, src, shift, VEX_SIMD_66, vector256);
3304 }
3306 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3307 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3308 emit_vex_arith(0xF2, dst, src, shift, VEX_SIMD_66, vector256);
3309 }
3311 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3312 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3313 emit_vex_arith(0xF3, dst, src, shift, VEX_SIMD_66, vector256);
3314 }
3316 // Shift packed integers logically right by specified number of bits.
3317 void Assembler::psrlw(XMMRegister dst, int shift) {
3318 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3319 // XMM2 is for /2 encoding: 66 0F 71 /2 ib
3320 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
3321 emit_byte(0x71);
3322 emit_byte(0xC0 | encode);
3323 emit_byte(shift & 0xFF);
3324 }
3326 void Assembler::psrld(XMMRegister dst, int shift) {
3327 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3328 // XMM2 is for /2 encoding: 66 0F 72 /2 ib
3329 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
3330 emit_byte(0x72);
3331 emit_byte(0xC0 | encode);
3332 emit_byte(shift & 0xFF);
3333 }
3335 void Assembler::psrlq(XMMRegister dst, int shift) {
3336 // Do not confuse it with psrldq SSE2 instruction which
3337 // shifts 128 bit value in xmm register by number of bytes.
3338 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3339 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
3340 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
3341 emit_byte(0x73);
3342 emit_byte(0xC0 | encode);
3343 emit_byte(shift & 0xFF);
3344 }
3346 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) {
3347 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3348 emit_simd_arith(0xD1, dst, shift, VEX_SIMD_66);
3349 }
3351 void Assembler::psrld(XMMRegister dst, XMMRegister shift) {
3352 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3353 emit_simd_arith(0xD2, dst, shift, VEX_SIMD_66);
3354 }
3356 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) {
3357 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3358 emit_simd_arith(0xD3, dst, shift, VEX_SIMD_66);
3359 }
3361 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3362 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3363 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
3364 emit_vex_arith(0x71, xmm2, dst, src, VEX_SIMD_66, vector256);
3365 emit_byte(shift & 0xFF);
3366 }
3368 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3369 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3370 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
3371 emit_vex_arith(0x72, xmm2, dst, src, VEX_SIMD_66, vector256);
3372 emit_byte(shift & 0xFF);
3373 }
3375 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3376 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3377 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
3378 emit_vex_arith(0x73, xmm2, dst, src, VEX_SIMD_66, vector256);
3379 emit_byte(shift & 0xFF);
3380 }
3382 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3383 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3384 emit_vex_arith(0xD1, dst, src, shift, VEX_SIMD_66, vector256);
3385 }
3387 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3388 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3389 emit_vex_arith(0xD2, dst, src, shift, VEX_SIMD_66, vector256);
3390 }
3392 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3393 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3394 emit_vex_arith(0xD3, dst, src, shift, VEX_SIMD_66, vector256);
3395 }
3397 // Shift packed integers arithmetically right by specified number of bits.
3398 void Assembler::psraw(XMMRegister dst, int shift) {
3399 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3400 // XMM4 is for /4 encoding: 66 0F 71 /4 ib
3401 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
3402 emit_byte(0x71);
3403 emit_byte(0xC0 | encode);
3404 emit_byte(shift & 0xFF);
3405 }
3407 void Assembler::psrad(XMMRegister dst, int shift) {
3408 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3409 // XMM4 is for /4 encoding: 66 0F 72 /4 ib
3410 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
3411 emit_byte(0x72);
3412 emit_byte(0xC0 | encode);
3413 emit_byte(shift & 0xFF);
3414 }
3416 void Assembler::psraw(XMMRegister dst, XMMRegister shift) {
3417 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3418 emit_simd_arith(0xE1, dst, shift, VEX_SIMD_66);
3419 }
3421 void Assembler::psrad(XMMRegister dst, XMMRegister shift) {
3422 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3423 emit_simd_arith(0xE2, dst, shift, VEX_SIMD_66);
3424 }
3426 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3427 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3428 // XMM4 is for /4 encoding: 66 0F 71 /4 ib
3429 emit_vex_arith(0x71, xmm4, dst, src, VEX_SIMD_66, vector256);
3430 emit_byte(shift & 0xFF);
3431 }
3433 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3434 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3435 // XMM4 is for /4 encoding: 66 0F 71 /4 ib
3436 emit_vex_arith(0x72, xmm4, dst, src, VEX_SIMD_66, vector256);
3437 emit_byte(shift & 0xFF);
3438 }
3440 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3441 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3442 emit_vex_arith(0xE1, dst, src, shift, VEX_SIMD_66, vector256);
3443 }
3445 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3446 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3447 emit_vex_arith(0xE2, dst, src, shift, VEX_SIMD_66, vector256);
3448 }
3451 // AND packed integers
3452 void Assembler::pand(XMMRegister dst, XMMRegister src) {
3453 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3454 emit_simd_arith(0xDB, dst, src, VEX_SIMD_66);
3455 }
3457 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3458 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3459 emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
3460 }
3462 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3463 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3464 emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
3465 }
3467 void Assembler::por(XMMRegister dst, XMMRegister src) {
3468 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3469 emit_simd_arith(0xEB, dst, src, VEX_SIMD_66);
3470 }
3472 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3473 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3474 emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
3475 }
3477 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3478 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3479 emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
3480 }
3482 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
3483 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3484 emit_simd_arith(0xEF, dst, src, VEX_SIMD_66);
3485 }
3487 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3488 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3489 emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
3490 }
3492 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3493 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3494 emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
3495 }
3498 void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
3499 assert(VM_Version::supports_avx(), "");
3500 bool vector256 = true;
3501 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
3502 emit_byte(0x18);
3503 emit_byte(0xC0 | encode);
3504 // 0x00 - insert into lower 128 bits
3505 // 0x01 - insert into upper 128 bits
3506 emit_byte(0x01);
3507 }
3509 void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
3510 assert(VM_Version::supports_avx2(), "");
3511 bool vector256 = true;
3512 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
3513 emit_byte(0x38);
3514 emit_byte(0xC0 | encode);
3515 // 0x00 - insert into lower 128 bits
3516 // 0x01 - insert into upper 128 bits
3517 emit_byte(0x01);
3518 }
3520 void Assembler::vzeroupper() {
3521 assert(VM_Version::supports_avx(), "");
3522 (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
3523 emit_byte(0x77);
3524 }
3527 #ifndef _LP64
3528 // 32bit only pieces of the assembler
3530 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
3531 // NO PREFIX AS NEVER 64BIT
3532 InstructionMark im(this);
3533 emit_byte(0x81);
3534 emit_byte(0xF8 | src1->encoding());
3535 emit_data(imm32, rspec, 0);
3536 }
3538 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
3539 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
3540 InstructionMark im(this);
3541 emit_byte(0x81);
3542 emit_operand(rdi, src1);
3543 emit_data(imm32, rspec, 0);
3544 }
3546 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax,
3547 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
3548 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise.
3549 void Assembler::cmpxchg8(Address adr) {
3550 InstructionMark im(this);
3551 emit_byte(0x0F);
3552 emit_byte(0xc7);
3553 emit_operand(rcx, adr);
3554 }
3556 void Assembler::decl(Register dst) {
3557 // Don't use it directly. Use MacroAssembler::decrementl() instead.
3558 emit_byte(0x48 | dst->encoding());
3559 }
3561 #endif // _LP64
3563 // 64bit typically doesn't use the x87 but needs to for the trig funcs
3565 void Assembler::fabs() {
3566 emit_byte(0xD9);
3567 emit_byte(0xE1);
3568 }
3570 void Assembler::fadd(int i) {
3571 emit_farith(0xD8, 0xC0, i);
3572 }
3574 void Assembler::fadd_d(Address src) {
3575 InstructionMark im(this);
3576 emit_byte(0xDC);
3577 emit_operand32(rax, src);
3578 }
3580 void Assembler::fadd_s(Address src) {
3581 InstructionMark im(this);
3582 emit_byte(0xD8);
3583 emit_operand32(rax, src);
3584 }
3586 void Assembler::fadda(int i) {
3587 emit_farith(0xDC, 0xC0, i);
3588 }
3590 void Assembler::faddp(int i) {
3591 emit_farith(0xDE, 0xC0, i);
3592 }
3594 void Assembler::fchs() {
3595 emit_byte(0xD9);
3596 emit_byte(0xE0);
3597 }
3599 void Assembler::fcom(int i) {
3600 emit_farith(0xD8, 0xD0, i);
3601 }
3603 void Assembler::fcomp(int i) {
3604 emit_farith(0xD8, 0xD8, i);
3605 }
3607 void Assembler::fcomp_d(Address src) {
3608 InstructionMark im(this);
3609 emit_byte(0xDC);
3610 emit_operand32(rbx, src);
3611 }
3613 void Assembler::fcomp_s(Address src) {
3614 InstructionMark im(this);
3615 emit_byte(0xD8);
3616 emit_operand32(rbx, src);
3617 }
3619 void Assembler::fcompp() {
3620 emit_byte(0xDE);
3621 emit_byte(0xD9);
3622 }
3624 void Assembler::fcos() {
3625 emit_byte(0xD9);
3626 emit_byte(0xFF);
3627 }
3629 void Assembler::fdecstp() {
3630 emit_byte(0xD9);
3631 emit_byte(0xF6);
3632 }
3634 void Assembler::fdiv(int i) {
3635 emit_farith(0xD8, 0xF0, i);
3636 }
3638 void Assembler::fdiv_d(Address src) {
3639 InstructionMark im(this);
3640 emit_byte(0xDC);
3641 emit_operand32(rsi, src);
3642 }
3644 void Assembler::fdiv_s(Address src) {
3645 InstructionMark im(this);
3646 emit_byte(0xD8);
3647 emit_operand32(rsi, src);
3648 }
3650 void Assembler::fdiva(int i) {
3651 emit_farith(0xDC, 0xF8, i);
3652 }
3654 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
3655 // is erroneous for some of the floating-point instructions below.
3657 void Assembler::fdivp(int i) {
3658 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
3659 }
3661 void Assembler::fdivr(int i) {
3662 emit_farith(0xD8, 0xF8, i);
3663 }
3665 void Assembler::fdivr_d(Address src) {
3666 InstructionMark im(this);
3667 emit_byte(0xDC);
3668 emit_operand32(rdi, src);
3669 }
3671 void Assembler::fdivr_s(Address src) {
3672 InstructionMark im(this);
3673 emit_byte(0xD8);
3674 emit_operand32(rdi, src);
3675 }
3677 void Assembler::fdivra(int i) {
3678 emit_farith(0xDC, 0xF0, i);
3679 }
3681 void Assembler::fdivrp(int i) {
3682 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
3683 }
3685 void Assembler::ffree(int i) {
3686 emit_farith(0xDD, 0xC0, i);
3687 }
3689 void Assembler::fild_d(Address adr) {
3690 InstructionMark im(this);
3691 emit_byte(0xDF);
3692 emit_operand32(rbp, adr);
3693 }
3695 void Assembler::fild_s(Address adr) {
3696 InstructionMark im(this);
3697 emit_byte(0xDB);
3698 emit_operand32(rax, adr);
3699 }
3701 void Assembler::fincstp() {
3702 emit_byte(0xD9);
3703 emit_byte(0xF7);
3704 }
3706 void Assembler::finit() {
3707 emit_byte(0x9B);
3708 emit_byte(0xDB);
3709 emit_byte(0xE3);
3710 }
3712 void Assembler::fist_s(Address adr) {
3713 InstructionMark im(this);
3714 emit_byte(0xDB);
3715 emit_operand32(rdx, adr);
3716 }
3718 void Assembler::fistp_d(Address adr) {
3719 InstructionMark im(this);
3720 emit_byte(0xDF);
3721 emit_operand32(rdi, adr);
3722 }
3724 void Assembler::fistp_s(Address adr) {
3725 InstructionMark im(this);
3726 emit_byte(0xDB);
3727 emit_operand32(rbx, adr);
3728 }
3730 void Assembler::fld1() {
3731 emit_byte(0xD9);
3732 emit_byte(0xE8);
3733 }
3735 void Assembler::fld_d(Address adr) {
3736 InstructionMark im(this);
3737 emit_byte(0xDD);
3738 emit_operand32(rax, adr);
3739 }
3741 void Assembler::fld_s(Address adr) {
3742 InstructionMark im(this);
3743 emit_byte(0xD9);
3744 emit_operand32(rax, adr);
3745 }
3748 void Assembler::fld_s(int index) {
3749 emit_farith(0xD9, 0xC0, index);
3750 }
3752 void Assembler::fld_x(Address adr) {
3753 InstructionMark im(this);
3754 emit_byte(0xDB);
3755 emit_operand32(rbp, adr);
3756 }
3758 void Assembler::fldcw(Address src) {
3759 InstructionMark im(this);
3760 emit_byte(0xd9);
3761 emit_operand32(rbp, src);
3762 }
3764 void Assembler::fldenv(Address src) {
3765 InstructionMark im(this);
3766 emit_byte(0xD9);
3767 emit_operand32(rsp, src);
3768 }
3770 void Assembler::fldlg2() {
3771 emit_byte(0xD9);
3772 emit_byte(0xEC);
3773 }
3775 void Assembler::fldln2() {
3776 emit_byte(0xD9);
3777 emit_byte(0xED);
3778 }
3780 void Assembler::fldz() {
3781 emit_byte(0xD9);
3782 emit_byte(0xEE);
3783 }
3785 void Assembler::flog() {
3786 fldln2();
3787 fxch();
3788 fyl2x();
3789 }
3791 void Assembler::flog10() {
3792 fldlg2();
3793 fxch();
3794 fyl2x();
3795 }
3797 void Assembler::fmul(int i) {
3798 emit_farith(0xD8, 0xC8, i);
3799 }
3801 void Assembler::fmul_d(Address src) {
3802 InstructionMark im(this);
3803 emit_byte(0xDC);
3804 emit_operand32(rcx, src);
3805 }
3807 void Assembler::fmul_s(Address src) {
3808 InstructionMark im(this);
3809 emit_byte(0xD8);
3810 emit_operand32(rcx, src);
3811 }
3813 void Assembler::fmula(int i) {
3814 emit_farith(0xDC, 0xC8, i);
3815 }
3817 void Assembler::fmulp(int i) {
3818 emit_farith(0xDE, 0xC8, i);
3819 }
3821 void Assembler::fnsave(Address dst) {
3822 InstructionMark im(this);
3823 emit_byte(0xDD);
3824 emit_operand32(rsi, dst);
3825 }
3827 void Assembler::fnstcw(Address src) {
3828 InstructionMark im(this);
3829 emit_byte(0x9B);
3830 emit_byte(0xD9);
3831 emit_operand32(rdi, src);
3832 }
3834 void Assembler::fnstsw_ax() {
3835 emit_byte(0xdF);
3836 emit_byte(0xE0);
3837 }
3839 void Assembler::fprem() {
3840 emit_byte(0xD9);
3841 emit_byte(0xF8);
3842 }
3844 void Assembler::fprem1() {
3845 emit_byte(0xD9);
3846 emit_byte(0xF5);
3847 }
3849 void Assembler::frstor(Address src) {
3850 InstructionMark im(this);
3851 emit_byte(0xDD);
3852 emit_operand32(rsp, src);
3853 }
3855 void Assembler::fsin() {
3856 emit_byte(0xD9);
3857 emit_byte(0xFE);
3858 }
3860 void Assembler::fsqrt() {
3861 emit_byte(0xD9);
3862 emit_byte(0xFA);
3863 }
3865 void Assembler::fst_d(Address adr) {
3866 InstructionMark im(this);
3867 emit_byte(0xDD);
3868 emit_operand32(rdx, adr);
3869 }
3871 void Assembler::fst_s(Address adr) {
3872 InstructionMark im(this);
3873 emit_byte(0xD9);
3874 emit_operand32(rdx, adr);
3875 }
3877 void Assembler::fstp_d(Address adr) {
3878 InstructionMark im(this);
3879 emit_byte(0xDD);
3880 emit_operand32(rbx, adr);
3881 }
3883 void Assembler::fstp_d(int index) {
3884 emit_farith(0xDD, 0xD8, index);
3885 }
3887 void Assembler::fstp_s(Address adr) {
3888 InstructionMark im(this);
3889 emit_byte(0xD9);
3890 emit_operand32(rbx, adr);
3891 }
3893 void Assembler::fstp_x(Address adr) {
3894 InstructionMark im(this);
3895 emit_byte(0xDB);
3896 emit_operand32(rdi, adr);
3897 }
3899 void Assembler::fsub(int i) {
3900 emit_farith(0xD8, 0xE0, i);
3901 }
3903 void Assembler::fsub_d(Address src) {
3904 InstructionMark im(this);
3905 emit_byte(0xDC);
3906 emit_operand32(rsp, src);
3907 }
3909 void Assembler::fsub_s(Address src) {
3910 InstructionMark im(this);
3911 emit_byte(0xD8);
3912 emit_operand32(rsp, src);
3913 }
3915 void Assembler::fsuba(int i) {
3916 emit_farith(0xDC, 0xE8, i);
3917 }
3919 void Assembler::fsubp(int i) {
3920 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
3921 }
3923 void Assembler::fsubr(int i) {
3924 emit_farith(0xD8, 0xE8, i);
3925 }
3927 void Assembler::fsubr_d(Address src) {
3928 InstructionMark im(this);
3929 emit_byte(0xDC);
3930 emit_operand32(rbp, src);
3931 }
3933 void Assembler::fsubr_s(Address src) {
3934 InstructionMark im(this);
3935 emit_byte(0xD8);
3936 emit_operand32(rbp, src);
3937 }
3939 void Assembler::fsubra(int i) {
3940 emit_farith(0xDC, 0xE0, i);
3941 }
3943 void Assembler::fsubrp(int i) {
3944 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
3945 }
3947 void Assembler::ftan() {
3948 emit_byte(0xD9);
3949 emit_byte(0xF2);
3950 emit_byte(0xDD);
3951 emit_byte(0xD8);
3952 }
3954 void Assembler::ftst() {
3955 emit_byte(0xD9);
3956 emit_byte(0xE4);
3957 }
3959 void Assembler::fucomi(int i) {
3960 // make sure the instruction is supported (introduced for P6, together with cmov)
3961 guarantee(VM_Version::supports_cmov(), "illegal instruction");
3962 emit_farith(0xDB, 0xE8, i);
3963 }
3965 void Assembler::fucomip(int i) {
3966 // make sure the instruction is supported (introduced for P6, together with cmov)
3967 guarantee(VM_Version::supports_cmov(), "illegal instruction");
3968 emit_farith(0xDF, 0xE8, i);
3969 }
3971 void Assembler::fwait() {
3972 emit_byte(0x9B);
3973 }
3975 void Assembler::fxch(int i) {
3976 emit_farith(0xD9, 0xC8, i);
3977 }
3979 void Assembler::fyl2x() {
3980 emit_byte(0xD9);
3981 emit_byte(0xF1);
3982 }
3984 void Assembler::frndint() {
3985 emit_byte(0xD9);
3986 emit_byte(0xFC);
3987 }
3989 void Assembler::f2xm1() {
3990 emit_byte(0xD9);
3991 emit_byte(0xF0);
3992 }
3994 void Assembler::fldl2e() {
3995 emit_byte(0xD9);
3996 emit_byte(0xEA);
3997 }
3999 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding.
4000 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 };
4001 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding.
4002 static int simd_opc[4] = { 0, 0, 0x38, 0x3A };
4004 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding.
4005 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
4006 if (pre > 0) {
4007 emit_byte(simd_pre[pre]);
4008 }
4009 if (rex_w) {
4010 prefixq(adr, xreg);
4011 } else {
4012 prefix(adr, xreg);
4013 }
4014 if (opc > 0) {
4015 emit_byte(0x0F);
4016 int opc2 = simd_opc[opc];
4017 if (opc2 > 0) {
4018 emit_byte(opc2);
4019 }
4020 }
4021 }
4023 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
4024 if (pre > 0) {
4025 emit_byte(simd_pre[pre]);
4026 }
4027 int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) :
4028 prefix_and_encode(dst_enc, src_enc);
4029 if (opc > 0) {
4030 emit_byte(0x0F);
4031 int opc2 = simd_opc[opc];
4032 if (opc2 > 0) {
4033 emit_byte(opc2);
4034 }
4035 }
4036 return encode;
4037 }
4040 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, int nds_enc, VexSimdPrefix pre, VexOpcode opc, bool vector256) {
4041 if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) {
4042 prefix(VEX_3bytes);
4044 int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0);
4045 byte1 = (~byte1) & 0xE0;
4046 byte1 |= opc;
4047 a_byte(byte1);
4049 int byte2 = ((~nds_enc) & 0xf) << 3;
4050 byte2 |= (vex_w ? VEX_W : 0) | (vector256 ? 4 : 0) | pre;
4051 emit_byte(byte2);
4052 } else {
4053 prefix(VEX_2bytes);
4055 int byte1 = vex_r ? VEX_R : 0;
4056 byte1 = (~byte1) & 0x80;
4057 byte1 |= ((~nds_enc) & 0xf) << 3;
4058 byte1 |= (vector256 ? 4 : 0) | pre;
4059 emit_byte(byte1);
4060 }
4061 }
4063 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256){
4064 bool vex_r = (xreg_enc >= 8);
4065 bool vex_b = adr.base_needs_rex();
4066 bool vex_x = adr.index_needs_rex();
4067 vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
4068 }
4070 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256) {
4071 bool vex_r = (dst_enc >= 8);
4072 bool vex_b = (src_enc >= 8);
4073 bool vex_x = false;
4074 vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
4075 return (((dst_enc & 7) << 3) | (src_enc & 7));
4076 }
4079 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
4080 if (UseAVX > 0) {
4081 int xreg_enc = xreg->encoding();
4082 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
4083 vex_prefix(adr, nds_enc, xreg_enc, pre, opc, rex_w, vector256);
4084 } else {
4085 assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding");
4086 rex_prefix(adr, xreg, pre, opc, rex_w);
4087 }
4088 }
4090 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
4091 int dst_enc = dst->encoding();
4092 int src_enc = src->encoding();
4093 if (UseAVX > 0) {
4094 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
4095 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector256);
4096 } else {
4097 assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding");
4098 return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, rex_w);
4099 }
4100 }
4102 void Assembler::emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
4103 InstructionMark im(this);
4104 simd_prefix(dst, dst, src, pre);
4105 emit_byte(opcode);
4106 emit_operand(dst, src);
4107 }
4109 void Assembler::emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
4110 int encode = simd_prefix_and_encode(dst, dst, src, pre);
4111 emit_byte(opcode);
4112 emit_byte(0xC0 | encode);
4113 }
4115 // Versions with no second source register (non-destructive source).
4116 void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
4117 InstructionMark im(this);
4118 simd_prefix(dst, xnoreg, src, pre);
4119 emit_byte(opcode);
4120 emit_operand(dst, src);
4121 }
4123 void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
4124 int encode = simd_prefix_and_encode(dst, xnoreg, src, pre);
4125 emit_byte(opcode);
4126 emit_byte(0xC0 | encode);
4127 }
4129 // 3-operands AVX instructions
4130 void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
4131 Address src, VexSimdPrefix pre, bool vector256) {
4132 InstructionMark im(this);
4133 vex_prefix(dst, nds, src, pre, vector256);
4134 emit_byte(opcode);
4135 emit_operand(dst, src);
4136 }
4138 void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
4139 XMMRegister src, VexSimdPrefix pre, bool vector256) {
4140 int encode = vex_prefix_and_encode(dst, nds, src, pre, vector256);
4141 emit_byte(opcode);
4142 emit_byte(0xC0 | encode);
4143 }
4145 #ifndef _LP64
4147 void Assembler::incl(Register dst) {
4148 // Don't use it directly. Use MacroAssembler::incrementl() instead.
4149 emit_byte(0x40 | dst->encoding());
4150 }
4152 void Assembler::lea(Register dst, Address src) {
4153 leal(dst, src);
4154 }
4156 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) {
4157 InstructionMark im(this);
4158 emit_byte(0xC7);
4159 emit_operand(rax, dst);
4160 emit_data((int)imm32, rspec, 0);
4161 }
4163 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
4164 InstructionMark im(this);
4165 int encode = prefix_and_encode(dst->encoding());
4166 emit_byte(0xB8 | encode);
4167 emit_data((int)imm32, rspec, 0);
4168 }
4170 void Assembler::popa() { // 32bit
4171 emit_byte(0x61);
4172 }
4174 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
4175 InstructionMark im(this);
4176 emit_byte(0x68);
4177 emit_data(imm32, rspec, 0);
4178 }
4180 void Assembler::pusha() { // 32bit
4181 emit_byte(0x60);
4182 }
4184 void Assembler::set_byte_if_not_zero(Register dst) {
4185 emit_byte(0x0F);
4186 emit_byte(0x95);
4187 emit_byte(0xE0 | dst->encoding());
4188 }
4190 void Assembler::shldl(Register dst, Register src) {
4191 emit_byte(0x0F);
4192 emit_byte(0xA5);
4193 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
4194 }
4196 void Assembler::shrdl(Register dst, Register src) {
4197 emit_byte(0x0F);
4198 emit_byte(0xAD);
4199 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
4200 }
4202 #else // LP64
4204 void Assembler::set_byte_if_not_zero(Register dst) {
4205 int enc = prefix_and_encode(dst->encoding(), true);
4206 emit_byte(0x0F);
4207 emit_byte(0x95);
4208 emit_byte(0xE0 | enc);
4209 }
4211 // 64bit only pieces of the assembler
4212 // This should only be used by 64bit instructions that can use rip-relative
4213 // it cannot be used by instructions that want an immediate value.
4215 bool Assembler::reachable(AddressLiteral adr) {
4216 int64_t disp;
4217 // None will force a 64bit literal to the code stream. Likely a placeholder
4218 // for something that will be patched later and we need to certain it will
4219 // always be reachable.
4220 if (adr.reloc() == relocInfo::none) {
4221 return false;
4222 }
4223 if (adr.reloc() == relocInfo::internal_word_type) {
4224 // This should be rip relative and easily reachable.
4225 return true;
4226 }
4227 if (adr.reloc() == relocInfo::virtual_call_type ||
4228 adr.reloc() == relocInfo::opt_virtual_call_type ||
4229 adr.reloc() == relocInfo::static_call_type ||
4230 adr.reloc() == relocInfo::static_stub_type ) {
4231 // This should be rip relative within the code cache and easily
4232 // reachable until we get huge code caches. (At which point
4233 // ic code is going to have issues).
4234 return true;
4235 }
4236 if (adr.reloc() != relocInfo::external_word_type &&
4237 adr.reloc() != relocInfo::poll_return_type && // these are really external_word but need special
4238 adr.reloc() != relocInfo::poll_type && // relocs to identify them
4239 adr.reloc() != relocInfo::runtime_call_type ) {
4240 return false;
4241 }
4243 // Stress the correction code
4244 if (ForceUnreachable) {
4245 // Must be runtimecall reloc, see if it is in the codecache
4246 // Flipping stuff in the codecache to be unreachable causes issues
4247 // with things like inline caches where the additional instructions
4248 // are not handled.
4249 if (CodeCache::find_blob(adr._target) == NULL) {
4250 return false;
4251 }
4252 }
4253 // For external_word_type/runtime_call_type if it is reachable from where we
4254 // are now (possibly a temp buffer) and where we might end up
4255 // anywhere in the codeCache then we are always reachable.
4256 // This would have to change if we ever save/restore shared code
4257 // to be more pessimistic.
4258 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
4259 if (!is_simm32(disp)) return false;
4260 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
4261 if (!is_simm32(disp)) return false;
4263 disp = (int64_t)adr._target - ((int64_t)_code_pos + sizeof(int));
4265 // Because rip relative is a disp + address_of_next_instruction and we
4266 // don't know the value of address_of_next_instruction we apply a fudge factor
4267 // to make sure we will be ok no matter the size of the instruction we get placed into.
4268 // We don't have to fudge the checks above here because they are already worst case.
4270 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
4271 // + 4 because better safe than sorry.
4272 const int fudge = 12 + 4;
4273 if (disp < 0) {
4274 disp -= fudge;
4275 } else {
4276 disp += fudge;
4277 }
4278 return is_simm32(disp);
4279 }
4281 // Check if the polling page is not reachable from the code cache using rip-relative
4282 // addressing.
4283 bool Assembler::is_polling_page_far() {
4284 intptr_t addr = (intptr_t)os::get_polling_page();
4285 return ForceUnreachable ||
4286 !is_simm32(addr - (intptr_t)CodeCache::low_bound()) ||
4287 !is_simm32(addr - (intptr_t)CodeCache::high_bound());
4288 }
4290 void Assembler::emit_data64(jlong data,
4291 relocInfo::relocType rtype,
4292 int format) {
4293 if (rtype == relocInfo::none) {
4294 emit_long64(data);
4295 } else {
4296 emit_data64(data, Relocation::spec_simple(rtype), format);
4297 }
4298 }
4300 void Assembler::emit_data64(jlong data,
4301 RelocationHolder const& rspec,
4302 int format) {
4303 assert(imm_operand == 0, "default format must be immediate in this file");
4304 assert(imm_operand == format, "must be immediate");
4305 assert(inst_mark() != NULL, "must be inside InstructionMark");
4306 // Do not use AbstractAssembler::relocate, which is not intended for
4307 // embedded words. Instead, relocate to the enclosing instruction.
4308 code_section()->relocate(inst_mark(), rspec, format);
4309 #ifdef ASSERT
4310 check_relocation(rspec, format);
4311 #endif
4312 emit_long64(data);
4313 }
4315 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
4316 if (reg_enc >= 8) {
4317 prefix(REX_B);
4318 reg_enc -= 8;
4319 } else if (byteinst && reg_enc >= 4) {
4320 prefix(REX);
4321 }
4322 return reg_enc;
4323 }
4325 int Assembler::prefixq_and_encode(int reg_enc) {
4326 if (reg_enc < 8) {
4327 prefix(REX_W);
4328 } else {
4329 prefix(REX_WB);
4330 reg_enc -= 8;
4331 }
4332 return reg_enc;
4333 }
4335 int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) {
4336 if (dst_enc < 8) {
4337 if (src_enc >= 8) {
4338 prefix(REX_B);
4339 src_enc -= 8;
4340 } else if (byteinst && src_enc >= 4) {
4341 prefix(REX);
4342 }
4343 } else {
4344 if (src_enc < 8) {
4345 prefix(REX_R);
4346 } else {
4347 prefix(REX_RB);
4348 src_enc -= 8;
4349 }
4350 dst_enc -= 8;
4351 }
4352 return dst_enc << 3 | src_enc;
4353 }
4355 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) {
4356 if (dst_enc < 8) {
4357 if (src_enc < 8) {
4358 prefix(REX_W);
4359 } else {
4360 prefix(REX_WB);
4361 src_enc -= 8;
4362 }
4363 } else {
4364 if (src_enc < 8) {
4365 prefix(REX_WR);
4366 } else {
4367 prefix(REX_WRB);
4368 src_enc -= 8;
4369 }
4370 dst_enc -= 8;
4371 }
4372 return dst_enc << 3 | src_enc;
4373 }
4375 void Assembler::prefix(Register reg) {
4376 if (reg->encoding() >= 8) {
4377 prefix(REX_B);
4378 }
4379 }
4381 void Assembler::prefix(Address adr) {
4382 if (adr.base_needs_rex()) {
4383 if (adr.index_needs_rex()) {
4384 prefix(REX_XB);
4385 } else {
4386 prefix(REX_B);
4387 }
4388 } else {
4389 if (adr.index_needs_rex()) {
4390 prefix(REX_X);
4391 }
4392 }
4393 }
4395 void Assembler::prefixq(Address adr) {
4396 if (adr.base_needs_rex()) {
4397 if (adr.index_needs_rex()) {
4398 prefix(REX_WXB);
4399 } else {
4400 prefix(REX_WB);
4401 }
4402 } else {
4403 if (adr.index_needs_rex()) {
4404 prefix(REX_WX);
4405 } else {
4406 prefix(REX_W);
4407 }
4408 }
4409 }
4412 void Assembler::prefix(Address adr, Register reg, bool byteinst) {
4413 if (reg->encoding() < 8) {
4414 if (adr.base_needs_rex()) {
4415 if (adr.index_needs_rex()) {
4416 prefix(REX_XB);
4417 } else {
4418 prefix(REX_B);
4419 }
4420 } else {
4421 if (adr.index_needs_rex()) {
4422 prefix(REX_X);
4423 } else if (byteinst && reg->encoding() >= 4 ) {
4424 prefix(REX);
4425 }
4426 }
4427 } else {
4428 if (adr.base_needs_rex()) {
4429 if (adr.index_needs_rex()) {
4430 prefix(REX_RXB);
4431 } else {
4432 prefix(REX_RB);
4433 }
4434 } else {
4435 if (adr.index_needs_rex()) {
4436 prefix(REX_RX);
4437 } else {
4438 prefix(REX_R);
4439 }
4440 }
4441 }
4442 }
4444 void Assembler::prefixq(Address adr, Register src) {
4445 if (src->encoding() < 8) {
4446 if (adr.base_needs_rex()) {
4447 if (adr.index_needs_rex()) {
4448 prefix(REX_WXB);
4449 } else {
4450 prefix(REX_WB);
4451 }
4452 } else {
4453 if (adr.index_needs_rex()) {
4454 prefix(REX_WX);
4455 } else {
4456 prefix(REX_W);
4457 }
4458 }
4459 } else {
4460 if (adr.base_needs_rex()) {
4461 if (adr.index_needs_rex()) {
4462 prefix(REX_WRXB);
4463 } else {
4464 prefix(REX_WRB);
4465 }
4466 } else {
4467 if (adr.index_needs_rex()) {
4468 prefix(REX_WRX);
4469 } else {
4470 prefix(REX_WR);
4471 }
4472 }
4473 }
4474 }
4476 void Assembler::prefix(Address adr, XMMRegister reg) {
4477 if (reg->encoding() < 8) {
4478 if (adr.base_needs_rex()) {
4479 if (adr.index_needs_rex()) {
4480 prefix(REX_XB);
4481 } else {
4482 prefix(REX_B);
4483 }
4484 } else {
4485 if (adr.index_needs_rex()) {
4486 prefix(REX_X);
4487 }
4488 }
4489 } else {
4490 if (adr.base_needs_rex()) {
4491 if (adr.index_needs_rex()) {
4492 prefix(REX_RXB);
4493 } else {
4494 prefix(REX_RB);
4495 }
4496 } else {
4497 if (adr.index_needs_rex()) {
4498 prefix(REX_RX);
4499 } else {
4500 prefix(REX_R);
4501 }
4502 }
4503 }
4504 }
4506 void Assembler::prefixq(Address adr, XMMRegister src) {
4507 if (src->encoding() < 8) {
4508 if (adr.base_needs_rex()) {
4509 if (adr.index_needs_rex()) {
4510 prefix(REX_WXB);
4511 } else {
4512 prefix(REX_WB);
4513 }
4514 } else {
4515 if (adr.index_needs_rex()) {
4516 prefix(REX_WX);
4517 } else {
4518 prefix(REX_W);
4519 }
4520 }
4521 } else {
4522 if (adr.base_needs_rex()) {
4523 if (adr.index_needs_rex()) {
4524 prefix(REX_WRXB);
4525 } else {
4526 prefix(REX_WRB);
4527 }
4528 } else {
4529 if (adr.index_needs_rex()) {
4530 prefix(REX_WRX);
4531 } else {
4532 prefix(REX_WR);
4533 }
4534 }
4535 }
4536 }
4538 void Assembler::adcq(Register dst, int32_t imm32) {
4539 (void) prefixq_and_encode(dst->encoding());
4540 emit_arith(0x81, 0xD0, dst, imm32);
4541 }
4543 void Assembler::adcq(Register dst, Address src) {
4544 InstructionMark im(this);
4545 prefixq(src, dst);
4546 emit_byte(0x13);
4547 emit_operand(dst, src);
4548 }
4550 void Assembler::adcq(Register dst, Register src) {
4551 (int) prefixq_and_encode(dst->encoding(), src->encoding());
4552 emit_arith(0x13, 0xC0, dst, src);
4553 }
4555 void Assembler::addq(Address dst, int32_t imm32) {
4556 InstructionMark im(this);
4557 prefixq(dst);
4558 emit_arith_operand(0x81, rax, dst,imm32);
4559 }
4561 void Assembler::addq(Address dst, Register src) {
4562 InstructionMark im(this);
4563 prefixq(dst, src);
4564 emit_byte(0x01);
4565 emit_operand(src, dst);
4566 }
4568 void Assembler::addq(Register dst, int32_t imm32) {
4569 (void) prefixq_and_encode(dst->encoding());
4570 emit_arith(0x81, 0xC0, dst, imm32);
4571 }
4573 void Assembler::addq(Register dst, Address src) {
4574 InstructionMark im(this);
4575 prefixq(src, dst);
4576 emit_byte(0x03);
4577 emit_operand(dst, src);
4578 }
4580 void Assembler::addq(Register dst, Register src) {
4581 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4582 emit_arith(0x03, 0xC0, dst, src);
4583 }
4585 void Assembler::andq(Address dst, int32_t imm32) {
4586 InstructionMark im(this);
4587 prefixq(dst);
4588 emit_byte(0x81);
4589 emit_operand(rsp, dst, 4);
4590 emit_long(imm32);
4591 }
4593 void Assembler::andq(Register dst, int32_t imm32) {
4594 (void) prefixq_and_encode(dst->encoding());
4595 emit_arith(0x81, 0xE0, dst, imm32);
4596 }
4598 void Assembler::andq(Register dst, Address src) {
4599 InstructionMark im(this);
4600 prefixq(src, dst);
4601 emit_byte(0x23);
4602 emit_operand(dst, src);
4603 }
4605 void Assembler::andq(Register dst, Register src) {
4606 (int) prefixq_and_encode(dst->encoding(), src->encoding());
4607 emit_arith(0x23, 0xC0, dst, src);
4608 }
4610 void Assembler::bsfq(Register dst, Register src) {
4611 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4612 emit_byte(0x0F);
4613 emit_byte(0xBC);
4614 emit_byte(0xC0 | encode);
4615 }
4617 void Assembler::bsrq(Register dst, Register src) {
4618 assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
4619 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4620 emit_byte(0x0F);
4621 emit_byte(0xBD);
4622 emit_byte(0xC0 | encode);
4623 }
4625 void Assembler::bswapq(Register reg) {
4626 int encode = prefixq_and_encode(reg->encoding());
4627 emit_byte(0x0F);
4628 emit_byte(0xC8 | encode);
4629 }
4631 void Assembler::cdqq() {
4632 prefix(REX_W);
4633 emit_byte(0x99);
4634 }
4636 void Assembler::clflush(Address adr) {
4637 prefix(adr);
4638 emit_byte(0x0F);
4639 emit_byte(0xAE);
4640 emit_operand(rdi, adr);
4641 }
4643 void Assembler::cmovq(Condition cc, Register dst, Register src) {
4644 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4645 emit_byte(0x0F);
4646 emit_byte(0x40 | cc);
4647 emit_byte(0xC0 | encode);
4648 }
4650 void Assembler::cmovq(Condition cc, Register dst, Address src) {
4651 InstructionMark im(this);
4652 prefixq(src, dst);
4653 emit_byte(0x0F);
4654 emit_byte(0x40 | cc);
4655 emit_operand(dst, src);
4656 }
4658 void Assembler::cmpq(Address dst, int32_t imm32) {
4659 InstructionMark im(this);
4660 prefixq(dst);
4661 emit_byte(0x81);
4662 emit_operand(rdi, dst, 4);
4663 emit_long(imm32);
4664 }
4666 void Assembler::cmpq(Register dst, int32_t imm32) {
4667 (void) prefixq_and_encode(dst->encoding());
4668 emit_arith(0x81, 0xF8, dst, imm32);
4669 }
4671 void Assembler::cmpq(Address dst, Register src) {
4672 InstructionMark im(this);
4673 prefixq(dst, src);
4674 emit_byte(0x3B);
4675 emit_operand(src, dst);
4676 }
4678 void Assembler::cmpq(Register dst, Register src) {
4679 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4680 emit_arith(0x3B, 0xC0, dst, src);
4681 }
4683 void Assembler::cmpq(Register dst, Address src) {
4684 InstructionMark im(this);
4685 prefixq(src, dst);
4686 emit_byte(0x3B);
4687 emit_operand(dst, src);
4688 }
4690 void Assembler::cmpxchgq(Register reg, Address adr) {
4691 InstructionMark im(this);
4692 prefixq(adr, reg);
4693 emit_byte(0x0F);
4694 emit_byte(0xB1);
4695 emit_operand(reg, adr);
4696 }
4698 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
4699 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4700 int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F2);
4701 emit_byte(0x2A);
4702 emit_byte(0xC0 | encode);
4703 }
4705 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) {
4706 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4707 InstructionMark im(this);
4708 simd_prefix_q(dst, dst, src, VEX_SIMD_F2);
4709 emit_byte(0x2A);
4710 emit_operand(dst, src);
4711 }
4713 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
4714 NOT_LP64(assert(VM_Version::supports_sse(), ""));
4715 int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F3);
4716 emit_byte(0x2A);
4717 emit_byte(0xC0 | encode);
4718 }
4720 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) {
4721 NOT_LP64(assert(VM_Version::supports_sse(), ""));
4722 InstructionMark im(this);
4723 simd_prefix_q(dst, dst, src, VEX_SIMD_F3);
4724 emit_byte(0x2A);
4725 emit_operand(dst, src);
4726 }
4728 void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
4729 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4730 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F2);
4731 emit_byte(0x2C);
4732 emit_byte(0xC0 | encode);
4733 }
4735 void Assembler::cvttss2siq(Register dst, XMMRegister src) {
4736 NOT_LP64(assert(VM_Version::supports_sse(), ""));
4737 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F3);
4738 emit_byte(0x2C);
4739 emit_byte(0xC0 | encode);
4740 }
4742 void Assembler::decl(Register dst) {
4743 // Don't use it directly. Use MacroAssembler::decrementl() instead.
4744 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
4745 int encode = prefix_and_encode(dst->encoding());
4746 emit_byte(0xFF);
4747 emit_byte(0xC8 | encode);
4748 }
4750 void Assembler::decq(Register dst) {
4751 // Don't use it directly. Use MacroAssembler::decrementq() instead.
4752 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
4753 int encode = prefixq_and_encode(dst->encoding());
4754 emit_byte(0xFF);
4755 emit_byte(0xC8 | encode);
4756 }
4758 void Assembler::decq(Address dst) {
4759 // Don't use it directly. Use MacroAssembler::decrementq() instead.
4760 InstructionMark im(this);
4761 prefixq(dst);
4762 emit_byte(0xFF);
4763 emit_operand(rcx, dst);
4764 }
4766 void Assembler::fxrstor(Address src) {
4767 prefixq(src);
4768 emit_byte(0x0F);
4769 emit_byte(0xAE);
4770 emit_operand(as_Register(1), src);
4771 }
4773 void Assembler::fxsave(Address dst) {
4774 prefixq(dst);
4775 emit_byte(0x0F);
4776 emit_byte(0xAE);
4777 emit_operand(as_Register(0), dst);
4778 }
4780 void Assembler::idivq(Register src) {
4781 int encode = prefixq_and_encode(src->encoding());
4782 emit_byte(0xF7);
4783 emit_byte(0xF8 | encode);
4784 }
4786 void Assembler::imulq(Register dst, Register src) {
4787 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4788 emit_byte(0x0F);
4789 emit_byte(0xAF);
4790 emit_byte(0xC0 | encode);
4791 }
4793 void Assembler::imulq(Register dst, Register src, int value) {
4794 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4795 if (is8bit(value)) {
4796 emit_byte(0x6B);
4797 emit_byte(0xC0 | encode);
4798 emit_byte(value & 0xFF);
4799 } else {
4800 emit_byte(0x69);
4801 emit_byte(0xC0 | encode);
4802 emit_long(value);
4803 }
4804 }
4806 void Assembler::incl(Register dst) {
4807 // Don't use it directly. Use MacroAssembler::incrementl() instead.
4808 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
4809 int encode = prefix_and_encode(dst->encoding());
4810 emit_byte(0xFF);
4811 emit_byte(0xC0 | encode);
4812 }
4814 void Assembler::incq(Register dst) {
4815 // Don't use it directly. Use MacroAssembler::incrementq() instead.
4816 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
4817 int encode = prefixq_and_encode(dst->encoding());
4818 emit_byte(0xFF);
4819 emit_byte(0xC0 | encode);
4820 }
4822 void Assembler::incq(Address dst) {
4823 // Don't use it directly. Use MacroAssembler::incrementq() instead.
4824 InstructionMark im(this);
4825 prefixq(dst);
4826 emit_byte(0xFF);
4827 emit_operand(rax, dst);
4828 }
4830 void Assembler::lea(Register dst, Address src) {
4831 leaq(dst, src);
4832 }
4834 void Assembler::leaq(Register dst, Address src) {
4835 InstructionMark im(this);
4836 prefixq(src, dst);
4837 emit_byte(0x8D);
4838 emit_operand(dst, src);
4839 }
4841 void Assembler::mov64(Register dst, int64_t imm64) {
4842 InstructionMark im(this);
4843 int encode = prefixq_and_encode(dst->encoding());
4844 emit_byte(0xB8 | encode);
4845 emit_long64(imm64);
4846 }
4848 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
4849 InstructionMark im(this);
4850 int encode = prefixq_and_encode(dst->encoding());
4851 emit_byte(0xB8 | encode);
4852 emit_data64(imm64, rspec);
4853 }
4855 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) {
4856 InstructionMark im(this);
4857 int encode = prefix_and_encode(dst->encoding());
4858 emit_byte(0xB8 | encode);
4859 emit_data((int)imm32, rspec, narrow_oop_operand);
4860 }
4862 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) {
4863 InstructionMark im(this);
4864 prefix(dst);
4865 emit_byte(0xC7);
4866 emit_operand(rax, dst, 4);
4867 emit_data((int)imm32, rspec, narrow_oop_operand);
4868 }
4870 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) {
4871 InstructionMark im(this);
4872 int encode = prefix_and_encode(src1->encoding());
4873 emit_byte(0x81);
4874 emit_byte(0xF8 | encode);
4875 emit_data((int)imm32, rspec, narrow_oop_operand);
4876 }
4878 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) {
4879 InstructionMark im(this);
4880 prefix(src1);
4881 emit_byte(0x81);
4882 emit_operand(rax, src1, 4);
4883 emit_data((int)imm32, rspec, narrow_oop_operand);
4884 }
4886 void Assembler::lzcntq(Register dst, Register src) {
4887 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
4888 emit_byte(0xF3);
4889 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4890 emit_byte(0x0F);
4891 emit_byte(0xBD);
4892 emit_byte(0xC0 | encode);
4893 }
4895 void Assembler::movdq(XMMRegister dst, Register src) {
4896 // table D-1 says MMX/SSE2
4897 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4898 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_66);
4899 emit_byte(0x6E);
4900 emit_byte(0xC0 | encode);
4901 }
4903 void Assembler::movdq(Register dst, XMMRegister src) {
4904 // table D-1 says MMX/SSE2
4905 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4906 // swap src/dst to get correct prefix
4907 int encode = simd_prefix_and_encode_q(src, dst, VEX_SIMD_66);
4908 emit_byte(0x7E);
4909 emit_byte(0xC0 | encode);
4910 }
4912 void Assembler::movq(Register dst, Register src) {
4913 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4914 emit_byte(0x8B);
4915 emit_byte(0xC0 | encode);
4916 }
4918 void Assembler::movq(Register dst, Address src) {
4919 InstructionMark im(this);
4920 prefixq(src, dst);
4921 emit_byte(0x8B);
4922 emit_operand(dst, src);
4923 }
4925 void Assembler::movq(Address dst, Register src) {
4926 InstructionMark im(this);
4927 prefixq(dst, src);
4928 emit_byte(0x89);
4929 emit_operand(src, dst);
4930 }
4932 void Assembler::movsbq(Register dst, Address src) {
4933 InstructionMark im(this);
4934 prefixq(src, dst);
4935 emit_byte(0x0F);
4936 emit_byte(0xBE);
4937 emit_operand(dst, src);
4938 }
4940 void Assembler::movsbq(Register dst, Register src) {
4941 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4942 emit_byte(0x0F);
4943 emit_byte(0xBE);
4944 emit_byte(0xC0 | encode);
4945 }
4947 void Assembler::movslq(Register dst, int32_t imm32) {
4948 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx)
4949 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx)
4950 // as a result we shouldn't use until tested at runtime...
4951 ShouldNotReachHere();
4952 InstructionMark im(this);
4953 int encode = prefixq_and_encode(dst->encoding());
4954 emit_byte(0xC7 | encode);
4955 emit_long(imm32);
4956 }
4958 void Assembler::movslq(Address dst, int32_t imm32) {
4959 assert(is_simm32(imm32), "lost bits");
4960 InstructionMark im(this);
4961 prefixq(dst);
4962 emit_byte(0xC7);
4963 emit_operand(rax, dst, 4);
4964 emit_long(imm32);
4965 }
4967 void Assembler::movslq(Register dst, Address src) {
4968 InstructionMark im(this);
4969 prefixq(src, dst);
4970 emit_byte(0x63);
4971 emit_operand(dst, src);
4972 }
4974 void Assembler::movslq(Register dst, Register src) {
4975 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4976 emit_byte(0x63);
4977 emit_byte(0xC0 | encode);
4978 }
4980 void Assembler::movswq(Register dst, Address src) {
4981 InstructionMark im(this);
4982 prefixq(src, dst);
4983 emit_byte(0x0F);
4984 emit_byte(0xBF);
4985 emit_operand(dst, src);
4986 }
4988 void Assembler::movswq(Register dst, Register src) {
4989 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4990 emit_byte(0x0F);
4991 emit_byte(0xBF);
4992 emit_byte(0xC0 | encode);
4993 }
4995 void Assembler::movzbq(Register dst, Address src) {
4996 InstructionMark im(this);
4997 prefixq(src, dst);
4998 emit_byte(0x0F);
4999 emit_byte(0xB6);
5000 emit_operand(dst, src);
5001 }
5003 void Assembler::movzbq(Register dst, Register src) {
5004 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5005 emit_byte(0x0F);
5006 emit_byte(0xB6);
5007 emit_byte(0xC0 | encode);
5008 }
5010 void Assembler::movzwq(Register dst, Address src) {
5011 InstructionMark im(this);
5012 prefixq(src, dst);
5013 emit_byte(0x0F);
5014 emit_byte(0xB7);
5015 emit_operand(dst, src);
5016 }
5018 void Assembler::movzwq(Register dst, Register src) {
5019 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5020 emit_byte(0x0F);
5021 emit_byte(0xB7);
5022 emit_byte(0xC0 | encode);
5023 }
5025 void Assembler::negq(Register dst) {
5026 int encode = prefixq_and_encode(dst->encoding());
5027 emit_byte(0xF7);
5028 emit_byte(0xD8 | encode);
5029 }
5031 void Assembler::notq(Register dst) {
5032 int encode = prefixq_and_encode(dst->encoding());
5033 emit_byte(0xF7);
5034 emit_byte(0xD0 | encode);
5035 }
5037 void Assembler::orq(Address dst, int32_t imm32) {
5038 InstructionMark im(this);
5039 prefixq(dst);
5040 emit_byte(0x81);
5041 emit_operand(rcx, dst, 4);
5042 emit_long(imm32);
5043 }
5045 void Assembler::orq(Register dst, int32_t imm32) {
5046 (void) prefixq_and_encode(dst->encoding());
5047 emit_arith(0x81, 0xC8, dst, imm32);
5048 }
5050 void Assembler::orq(Register dst, Address src) {
5051 InstructionMark im(this);
5052 prefixq(src, dst);
5053 emit_byte(0x0B);
5054 emit_operand(dst, src);
5055 }
5057 void Assembler::orq(Register dst, Register src) {
5058 (void) prefixq_and_encode(dst->encoding(), src->encoding());
5059 emit_arith(0x0B, 0xC0, dst, src);
5060 }
5062 void Assembler::popa() { // 64bit
5063 movq(r15, Address(rsp, 0));
5064 movq(r14, Address(rsp, wordSize));
5065 movq(r13, Address(rsp, 2 * wordSize));
5066 movq(r12, Address(rsp, 3 * wordSize));
5067 movq(r11, Address(rsp, 4 * wordSize));
5068 movq(r10, Address(rsp, 5 * wordSize));
5069 movq(r9, Address(rsp, 6 * wordSize));
5070 movq(r8, Address(rsp, 7 * wordSize));
5071 movq(rdi, Address(rsp, 8 * wordSize));
5072 movq(rsi, Address(rsp, 9 * wordSize));
5073 movq(rbp, Address(rsp, 10 * wordSize));
5074 // skip rsp
5075 movq(rbx, Address(rsp, 12 * wordSize));
5076 movq(rdx, Address(rsp, 13 * wordSize));
5077 movq(rcx, Address(rsp, 14 * wordSize));
5078 movq(rax, Address(rsp, 15 * wordSize));
5080 addq(rsp, 16 * wordSize);
5081 }
5083 void Assembler::popcntq(Register dst, Address src) {
5084 assert(VM_Version::supports_popcnt(), "must support");
5085 InstructionMark im(this);
5086 emit_byte(0xF3);
5087 prefixq(src, dst);
5088 emit_byte(0x0F);
5089 emit_byte(0xB8);
5090 emit_operand(dst, src);
5091 }
5093 void Assembler::popcntq(Register dst, Register src) {
5094 assert(VM_Version::supports_popcnt(), "must support");
5095 emit_byte(0xF3);
5096 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5097 emit_byte(0x0F);
5098 emit_byte(0xB8);
5099 emit_byte(0xC0 | encode);
5100 }
5102 void Assembler::popq(Address dst) {
5103 InstructionMark im(this);
5104 prefixq(dst);
5105 emit_byte(0x8F);
5106 emit_operand(rax, dst);
5107 }
5109 void Assembler::pusha() { // 64bit
5110 // we have to store original rsp. ABI says that 128 bytes
5111 // below rsp are local scratch.
5112 movq(Address(rsp, -5 * wordSize), rsp);
5114 subq(rsp, 16 * wordSize);
5116 movq(Address(rsp, 15 * wordSize), rax);
5117 movq(Address(rsp, 14 * wordSize), rcx);
5118 movq(Address(rsp, 13 * wordSize), rdx);
5119 movq(Address(rsp, 12 * wordSize), rbx);
5120 // skip rsp
5121 movq(Address(rsp, 10 * wordSize), rbp);
5122 movq(Address(rsp, 9 * wordSize), rsi);
5123 movq(Address(rsp, 8 * wordSize), rdi);
5124 movq(Address(rsp, 7 * wordSize), r8);
5125 movq(Address(rsp, 6 * wordSize), r9);
5126 movq(Address(rsp, 5 * wordSize), r10);
5127 movq(Address(rsp, 4 * wordSize), r11);
5128 movq(Address(rsp, 3 * wordSize), r12);
5129 movq(Address(rsp, 2 * wordSize), r13);
5130 movq(Address(rsp, wordSize), r14);
5131 movq(Address(rsp, 0), r15);
5132 }
5134 void Assembler::pushq(Address src) {
5135 InstructionMark im(this);
5136 prefixq(src);
5137 emit_byte(0xFF);
5138 emit_operand(rsi, src);
5139 }
5141 void Assembler::rclq(Register dst, int imm8) {
5142 assert(isShiftCount(imm8 >> 1), "illegal shift count");
5143 int encode = prefixq_and_encode(dst->encoding());
5144 if (imm8 == 1) {
5145 emit_byte(0xD1);
5146 emit_byte(0xD0 | encode);
5147 } else {
5148 emit_byte(0xC1);
5149 emit_byte(0xD0 | encode);
5150 emit_byte(imm8);
5151 }
5152 }
5153 void Assembler::sarq(Register dst, int imm8) {
5154 assert(isShiftCount(imm8 >> 1), "illegal shift count");
5155 int encode = prefixq_and_encode(dst->encoding());
5156 if (imm8 == 1) {
5157 emit_byte(0xD1);
5158 emit_byte(0xF8 | encode);
5159 } else {
5160 emit_byte(0xC1);
5161 emit_byte(0xF8 | encode);
5162 emit_byte(imm8);
5163 }
5164 }
5166 void Assembler::sarq(Register dst) {
5167 int encode = prefixq_and_encode(dst->encoding());
5168 emit_byte(0xD3);
5169 emit_byte(0xF8 | encode);
5170 }
5172 void Assembler::sbbq(Address dst, int32_t imm32) {
5173 InstructionMark im(this);
5174 prefixq(dst);
5175 emit_arith_operand(0x81, rbx, dst, imm32);
5176 }
5178 void Assembler::sbbq(Register dst, int32_t imm32) {
5179 (void) prefixq_and_encode(dst->encoding());
5180 emit_arith(0x81, 0xD8, dst, imm32);
5181 }
5183 void Assembler::sbbq(Register dst, Address src) {
5184 InstructionMark im(this);
5185 prefixq(src, dst);
5186 emit_byte(0x1B);
5187 emit_operand(dst, src);
5188 }
5190 void Assembler::sbbq(Register dst, Register src) {
5191 (void) prefixq_and_encode(dst->encoding(), src->encoding());
5192 emit_arith(0x1B, 0xC0, dst, src);
5193 }
5195 void Assembler::shlq(Register dst, int imm8) {
5196 assert(isShiftCount(imm8 >> 1), "illegal shift count");
5197 int encode = prefixq_and_encode(dst->encoding());
5198 if (imm8 == 1) {
5199 emit_byte(0xD1);
5200 emit_byte(0xE0 | encode);
5201 } else {
5202 emit_byte(0xC1);
5203 emit_byte(0xE0 | encode);
5204 emit_byte(imm8);
5205 }
5206 }
5208 void Assembler::shlq(Register dst) {
5209 int encode = prefixq_and_encode(dst->encoding());
5210 emit_byte(0xD3);
5211 emit_byte(0xE0 | encode);
5212 }
5214 void Assembler::shrq(Register dst, int imm8) {
5215 assert(isShiftCount(imm8 >> 1), "illegal shift count");
5216 int encode = prefixq_and_encode(dst->encoding());
5217 emit_byte(0xC1);
5218 emit_byte(0xE8 | encode);
5219 emit_byte(imm8);
5220 }
5222 void Assembler::shrq(Register dst) {
5223 int encode = prefixq_and_encode(dst->encoding());
5224 emit_byte(0xD3);
5225 emit_byte(0xE8 | encode);
5226 }
5228 void Assembler::subq(Address dst, int32_t imm32) {
5229 InstructionMark im(this);
5230 prefixq(dst);
5231 emit_arith_operand(0x81, rbp, dst, imm32);
5232 }
5234 void Assembler::subq(Address dst, Register src) {
5235 InstructionMark im(this);
5236 prefixq(dst, src);
5237 emit_byte(0x29);
5238 emit_operand(src, dst);
5239 }
5241 void Assembler::subq(Register dst, int32_t imm32) {
5242 (void) prefixq_and_encode(dst->encoding());
5243 emit_arith(0x81, 0xE8, dst, imm32);
5244 }
5246 // Force generation of a 4 byte immediate value even if it fits into 8bit
5247 void Assembler::subq_imm32(Register dst, int32_t imm32) {
5248 (void) prefixq_and_encode(dst->encoding());
5249 emit_arith_imm32(0x81, 0xE8, dst, imm32);
5250 }
5252 void Assembler::subq(Register dst, Address src) {
5253 InstructionMark im(this);
5254 prefixq(src, dst);
5255 emit_byte(0x2B);
5256 emit_operand(dst, src);
5257 }
5259 void Assembler::subq(Register dst, Register src) {
5260 (void) prefixq_and_encode(dst->encoding(), src->encoding());
5261 emit_arith(0x2B, 0xC0, dst, src);
5262 }
5264 void Assembler::testq(Register dst, int32_t imm32) {
5265 // not using emit_arith because test
5266 // doesn't support sign-extension of
5267 // 8bit operands
5268 int encode = dst->encoding();
5269 if (encode == 0) {
5270 prefix(REX_W);
5271 emit_byte(0xA9);
5272 } else {
5273 encode = prefixq_and_encode(encode);
5274 emit_byte(0xF7);
5275 emit_byte(0xC0 | encode);
5276 }
5277 emit_long(imm32);
5278 }
5280 void Assembler::testq(Register dst, Register src) {
5281 (void) prefixq_and_encode(dst->encoding(), src->encoding());
5282 emit_arith(0x85, 0xC0, dst, src);
5283 }
5285 void Assembler::xaddq(Address dst, Register src) {
5286 InstructionMark im(this);
5287 prefixq(dst, src);
5288 emit_byte(0x0F);
5289 emit_byte(0xC1);
5290 emit_operand(src, dst);
5291 }
5293 void Assembler::xchgq(Register dst, Address src) {
5294 InstructionMark im(this);
5295 prefixq(src, dst);
5296 emit_byte(0x87);
5297 emit_operand(dst, src);
5298 }
5300 void Assembler::xchgq(Register dst, Register src) {
5301 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5302 emit_byte(0x87);
5303 emit_byte(0xc0 | encode);
5304 }
5306 void Assembler::xorq(Register dst, Register src) {
5307 (void) prefixq_and_encode(dst->encoding(), src->encoding());
5308 emit_arith(0x33, 0xC0, dst, src);
5309 }
5311 void Assembler::xorq(Register dst, Address src) {
5312 InstructionMark im(this);
5313 prefixq(src, dst);
5314 emit_byte(0x33);
5315 emit_operand(dst, src);
5316 }
5318 #endif // !LP64
5320 static Assembler::Condition reverse[] = {
5321 Assembler::noOverflow /* overflow = 0x0 */ ,
5322 Assembler::overflow /* noOverflow = 0x1 */ ,
5323 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
5324 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
5325 Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
5326 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
5327 Assembler::above /* belowEqual = 0x6 */ ,
5328 Assembler::belowEqual /* above = 0x7 */ ,
5329 Assembler::positive /* negative = 0x8 */ ,
5330 Assembler::negative /* positive = 0x9 */ ,
5331 Assembler::noParity /* parity = 0xa */ ,
5332 Assembler::parity /* noParity = 0xb */ ,
5333 Assembler::greaterEqual /* less = 0xc */ ,
5334 Assembler::less /* greaterEqual = 0xd */ ,
5335 Assembler::greater /* lessEqual = 0xe */ ,
5336 Assembler::lessEqual /* greater = 0xf, */
5338 };
5341 // Implementation of MacroAssembler
5343 // First all the versions that have distinct versions depending on 32/64 bit
5344 // Unless the difference is trivial (1 line or so).
5346 #ifndef _LP64
5348 // 32bit versions
5350 Address MacroAssembler::as_Address(AddressLiteral adr) {
5351 return Address(adr.target(), adr.rspec());
5352 }
5354 Address MacroAssembler::as_Address(ArrayAddress adr) {
5355 return Address::make_array(adr);
5356 }
5358 int MacroAssembler::biased_locking_enter(Register lock_reg,
5359 Register obj_reg,
5360 Register swap_reg,
5361 Register tmp_reg,
5362 bool swap_reg_contains_mark,
5363 Label& done,
5364 Label* slow_case,
5365 BiasedLockingCounters* counters) {
5366 assert(UseBiasedLocking, "why call this otherwise?");
5367 assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg");
5368 assert_different_registers(lock_reg, obj_reg, swap_reg);
5370 if (PrintBiasedLockingStatistics && counters == NULL)
5371 counters = BiasedLocking::counters();
5373 bool need_tmp_reg = false;
5374 if (tmp_reg == noreg) {
5375 need_tmp_reg = true;
5376 tmp_reg = lock_reg;
5377 } else {
5378 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
5379 }
5380 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
5381 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
5382 Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
5383 Address saved_mark_addr(lock_reg, 0);
5385 // Biased locking
5386 // See whether the lock is currently biased toward our thread and
5387 // whether the epoch is still valid
5388 // Note that the runtime guarantees sufficient alignment of JavaThread
5389 // pointers to allow age to be placed into low bits
5390 // First check to see whether biasing is even enabled for this object
5391 Label cas_label;
5392 int null_check_offset = -1;
5393 if (!swap_reg_contains_mark) {
5394 null_check_offset = offset();
5395 movl(swap_reg, mark_addr);
5396 }
5397 if (need_tmp_reg) {
5398 push(tmp_reg);
5399 }
5400 movl(tmp_reg, swap_reg);
5401 andl(tmp_reg, markOopDesc::biased_lock_mask_in_place);
5402 cmpl(tmp_reg, markOopDesc::biased_lock_pattern);
5403 if (need_tmp_reg) {
5404 pop(tmp_reg);
5405 }
5406 jcc(Assembler::notEqual, cas_label);
5407 // The bias pattern is present in the object's header. Need to check
5408 // whether the bias owner and the epoch are both still current.
5409 // Note that because there is no current thread register on x86 we
5410 // need to store off the mark word we read out of the object to
5411 // avoid reloading it and needing to recheck invariants below. This
5412 // store is unfortunate but it makes the overall code shorter and
5413 // simpler.
5414 movl(saved_mark_addr, swap_reg);
5415 if (need_tmp_reg) {
5416 push(tmp_reg);
5417 }
5418 get_thread(tmp_reg);
5419 xorl(swap_reg, tmp_reg);
5420 if (swap_reg_contains_mark) {
5421 null_check_offset = offset();
5422 }
5423 movl(tmp_reg, klass_addr);
5424 xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset()));
5425 andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
5426 if (need_tmp_reg) {
5427 pop(tmp_reg);
5428 }
5429 if (counters != NULL) {
5430 cond_inc32(Assembler::zero,
5431 ExternalAddress((address)counters->biased_lock_entry_count_addr()));
5432 }
5433 jcc(Assembler::equal, done);
5435 Label try_revoke_bias;
5436 Label try_rebias;
5438 // At this point we know that the header has the bias pattern and
5439 // that we are not the bias owner in the current epoch. We need to
5440 // figure out more details about the state of the header in order to
5441 // know what operations can be legally performed on the object's
5442 // header.
5444 // If the low three bits in the xor result aren't clear, that means
5445 // the prototype header is no longer biased and we have to revoke
5446 // the bias on this object.
5447 testl(swap_reg, markOopDesc::biased_lock_mask_in_place);
5448 jcc(Assembler::notZero, try_revoke_bias);
5450 // Biasing is still enabled for this data type. See whether the
5451 // epoch of the current bias is still valid, meaning that the epoch
5452 // bits of the mark word are equal to the epoch bits of the
5453 // prototype header. (Note that the prototype header's epoch bits
5454 // only change at a safepoint.) If not, attempt to rebias the object
5455 // toward the current thread. Note that we must be absolutely sure
5456 // that the current epoch is invalid in order to do this because
5457 // otherwise the manipulations it performs on the mark word are
5458 // illegal.
5459 testl(swap_reg, markOopDesc::epoch_mask_in_place);
5460 jcc(Assembler::notZero, try_rebias);
5462 // The epoch of the current bias is still valid but we know nothing
5463 // about the owner; it might be set or it might be clear. Try to
5464 // acquire the bias of the object using an atomic operation. If this
5465 // fails we will go in to the runtime to revoke the object's bias.
5466 // Note that we first construct the presumed unbiased header so we
5467 // don't accidentally blow away another thread's valid bias.
5468 movl(swap_reg, saved_mark_addr);
5469 andl(swap_reg,
5470 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
5471 if (need_tmp_reg) {
5472 push(tmp_reg);
5473 }
5474 get_thread(tmp_reg);
5475 orl(tmp_reg, swap_reg);
5476 if (os::is_MP()) {
5477 lock();
5478 }
5479 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
5480 if (need_tmp_reg) {
5481 pop(tmp_reg);
5482 }
5483 // If the biasing toward our thread failed, this means that
5484 // another thread succeeded in biasing it toward itself and we
5485 // need to revoke that bias. The revocation will occur in the
5486 // interpreter runtime in the slow case.
5487 if (counters != NULL) {
5488 cond_inc32(Assembler::zero,
5489 ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr()));
5490 }
5491 if (slow_case != NULL) {
5492 jcc(Assembler::notZero, *slow_case);
5493 }
5494 jmp(done);
5496 bind(try_rebias);
5497 // At this point we know the epoch has expired, meaning that the
5498 // current "bias owner", if any, is actually invalid. Under these
5499 // circumstances _only_, we are allowed to use the current header's
5500 // value as the comparison value when doing the cas to acquire the
5501 // bias in the current epoch. In other words, we allow transfer of
5502 // the bias from one thread to another directly in this situation.
5503 //
5504 // FIXME: due to a lack of registers we currently blow away the age
5505 // bits in this situation. Should attempt to preserve them.
5506 if (need_tmp_reg) {
5507 push(tmp_reg);
5508 }
5509 get_thread(tmp_reg);
5510 movl(swap_reg, klass_addr);
5511 orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset()));
5512 movl(swap_reg, saved_mark_addr);
5513 if (os::is_MP()) {
5514 lock();
5515 }
5516 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
5517 if (need_tmp_reg) {
5518 pop(tmp_reg);
5519 }
5520 // If the biasing toward our thread failed, then another thread
5521 // succeeded in biasing it toward itself and we need to revoke that
5522 // bias. The revocation will occur in the runtime in the slow case.
5523 if (counters != NULL) {
5524 cond_inc32(Assembler::zero,
5525 ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
5526 }
5527 if (slow_case != NULL) {
5528 jcc(Assembler::notZero, *slow_case);
5529 }
5530 jmp(done);
5532 bind(try_revoke_bias);
5533 // The prototype mark in the klass doesn't have the bias bit set any
5534 // more, indicating that objects of this data type are not supposed
5535 // to be biased any more. We are going to try to reset the mark of
5536 // this object to the prototype value and fall through to the
5537 // CAS-based locking scheme. Note that if our CAS fails, it means
5538 // that another thread raced us for the privilege of revoking the
5539 // bias of this particular object, so it's okay to continue in the
5540 // normal locking code.
5541 //
5542 // FIXME: due to a lack of registers we currently blow away the age
5543 // bits in this situation. Should attempt to preserve them.
5544 movl(swap_reg, saved_mark_addr);
5545 if (need_tmp_reg) {
5546 push(tmp_reg);
5547 }
5548 movl(tmp_reg, klass_addr);
5549 movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset()));
5550 if (os::is_MP()) {
5551 lock();
5552 }
5553 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
5554 if (need_tmp_reg) {
5555 pop(tmp_reg);
5556 }
5557 // Fall through to the normal CAS-based lock, because no matter what
5558 // the result of the above CAS, some thread must have succeeded in
5559 // removing the bias bit from the object's header.
5560 if (counters != NULL) {
5561 cond_inc32(Assembler::zero,
5562 ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
5563 }
5565 bind(cas_label);
5567 return null_check_offset;
5568 }
5569 void MacroAssembler::call_VM_leaf_base(address entry_point,
5570 int number_of_arguments) {
5571 call(RuntimeAddress(entry_point));
5572 increment(rsp, number_of_arguments * wordSize);
5573 }
5575 void MacroAssembler::cmpoop(Address src1, jobject obj) {
5576 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
5577 }
5579 void MacroAssembler::cmpoop(Register src1, jobject obj) {
5580 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
5581 }
5583 void MacroAssembler::extend_sign(Register hi, Register lo) {
5584 // According to Intel Doc. AP-526, "Integer Divide", p.18.
5585 if (VM_Version::is_P6() && hi == rdx && lo == rax) {
5586 cdql();
5587 } else {
5588 movl(hi, lo);
5589 sarl(hi, 31);
5590 }
5591 }
5593 void MacroAssembler::jC2(Register tmp, Label& L) {
5594 // set parity bit if FPU flag C2 is set (via rax)
5595 save_rax(tmp);
5596 fwait(); fnstsw_ax();
5597 sahf();
5598 restore_rax(tmp);
5599 // branch
5600 jcc(Assembler::parity, L);
5601 }
5603 void MacroAssembler::jnC2(Register tmp, Label& L) {
5604 // set parity bit if FPU flag C2 is set (via rax)
5605 save_rax(tmp);
5606 fwait(); fnstsw_ax();
5607 sahf();
5608 restore_rax(tmp);
5609 // branch
5610 jcc(Assembler::noParity, L);
5611 }
5613 // 32bit can do a case table jump in one instruction but we no longer allow the base
5614 // to be installed in the Address class
5615 void MacroAssembler::jump(ArrayAddress entry) {
5616 jmp(as_Address(entry));
5617 }
5619 // Note: y_lo will be destroyed
5620 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
5621 // Long compare for Java (semantics as described in JVM spec.)
5622 Label high, low, done;
5624 cmpl(x_hi, y_hi);
5625 jcc(Assembler::less, low);
5626 jcc(Assembler::greater, high);
5627 // x_hi is the return register
5628 xorl(x_hi, x_hi);
5629 cmpl(x_lo, y_lo);
5630 jcc(Assembler::below, low);
5631 jcc(Assembler::equal, done);
5633 bind(high);
5634 xorl(x_hi, x_hi);
5635 increment(x_hi);
5636 jmp(done);
5638 bind(low);
5639 xorl(x_hi, x_hi);
5640 decrementl(x_hi);
5642 bind(done);
5643 }
5645 void MacroAssembler::lea(Register dst, AddressLiteral src) {
5646 mov_literal32(dst, (int32_t)src.target(), src.rspec());
5647 }
5649 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
5650 // leal(dst, as_Address(adr));
5651 // see note in movl as to why we must use a move
5652 mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
5653 }
5655 void MacroAssembler::leave() {
5656 mov(rsp, rbp);
5657 pop(rbp);
5658 }
5660 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
5661 // Multiplication of two Java long values stored on the stack
5662 // as illustrated below. Result is in rdx:rax.
5663 //
5664 // rsp ---> [ ?? ] \ \
5665 // .... | y_rsp_offset |
5666 // [ y_lo ] / (in bytes) | x_rsp_offset
5667 // [ y_hi ] | (in bytes)
5668 // .... |
5669 // [ x_lo ] /
5670 // [ x_hi ]
5671 // ....
5672 //
5673 // Basic idea: lo(result) = lo(x_lo * y_lo)
5674 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
5675 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
5676 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
5677 Label quick;
5678 // load x_hi, y_hi and check if quick
5679 // multiplication is possible
5680 movl(rbx, x_hi);
5681 movl(rcx, y_hi);
5682 movl(rax, rbx);
5683 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0
5684 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply
5685 // do full multiplication
5686 // 1st step
5687 mull(y_lo); // x_hi * y_lo
5688 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx,
5689 // 2nd step
5690 movl(rax, x_lo);
5691 mull(rcx); // x_lo * y_hi
5692 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx,
5693 // 3rd step
5694 bind(quick); // note: rbx, = 0 if quick multiply!
5695 movl(rax, x_lo);
5696 mull(y_lo); // x_lo * y_lo
5697 addl(rdx, rbx); // correct hi(x_lo * y_lo)
5698 }
5700 void MacroAssembler::lneg(Register hi, Register lo) {
5701 negl(lo);
5702 adcl(hi, 0);
5703 negl(hi);
5704 }
5706 void MacroAssembler::lshl(Register hi, Register lo) {
5707 // Java shift left long support (semantics as described in JVM spec., p.305)
5708 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
5709 // shift value is in rcx !
5710 assert(hi != rcx, "must not use rcx");
5711 assert(lo != rcx, "must not use rcx");
5712 const Register s = rcx; // shift count
5713 const int n = BitsPerWord;
5714 Label L;
5715 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
5716 cmpl(s, n); // if (s < n)
5717 jcc(Assembler::less, L); // else (s >= n)
5718 movl(hi, lo); // x := x << n
5719 xorl(lo, lo);
5720 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
5721 bind(L); // s (mod n) < n
5722 shldl(hi, lo); // x := x << s
5723 shll(lo);
5724 }
5727 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
5728 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
5729 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
5730 assert(hi != rcx, "must not use rcx");
5731 assert(lo != rcx, "must not use rcx");
5732 const Register s = rcx; // shift count
5733 const int n = BitsPerWord;
5734 Label L;
5735 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
5736 cmpl(s, n); // if (s < n)
5737 jcc(Assembler::less, L); // else (s >= n)
5738 movl(lo, hi); // x := x >> n
5739 if (sign_extension) sarl(hi, 31);
5740 else xorl(hi, hi);
5741 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
5742 bind(L); // s (mod n) < n
5743 shrdl(lo, hi); // x := x >> s
5744 if (sign_extension) sarl(hi);
5745 else shrl(hi);
5746 }
5748 void MacroAssembler::movoop(Register dst, jobject obj) {
5749 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
5750 }
5752 void MacroAssembler::movoop(Address dst, jobject obj) {
5753 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
5754 }
5756 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
5757 if (src.is_lval()) {
5758 mov_literal32(dst, (intptr_t)src.target(), src.rspec());
5759 } else {
5760 movl(dst, as_Address(src));
5761 }
5762 }
5764 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
5765 movl(as_Address(dst), src);
5766 }
5768 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
5769 movl(dst, as_Address(src));
5770 }
5772 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
5773 void MacroAssembler::movptr(Address dst, intptr_t src) {
5774 movl(dst, src);
5775 }
5778 void MacroAssembler::pop_callee_saved_registers() {
5779 pop(rcx);
5780 pop(rdx);
5781 pop(rdi);
5782 pop(rsi);
5783 }
5785 void MacroAssembler::pop_fTOS() {
5786 fld_d(Address(rsp, 0));
5787 addl(rsp, 2 * wordSize);
5788 }
5790 void MacroAssembler::push_callee_saved_registers() {
5791 push(rsi);
5792 push(rdi);
5793 push(rdx);
5794 push(rcx);
5795 }
5797 void MacroAssembler::push_fTOS() {
5798 subl(rsp, 2 * wordSize);
5799 fstp_d(Address(rsp, 0));
5800 }
5803 void MacroAssembler::pushoop(jobject obj) {
5804 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
5805 }
5808 void MacroAssembler::pushptr(AddressLiteral src) {
5809 if (src.is_lval()) {
5810 push_literal32((int32_t)src.target(), src.rspec());
5811 } else {
5812 pushl(as_Address(src));
5813 }
5814 }
5816 void MacroAssembler::set_word_if_not_zero(Register dst) {
5817 xorl(dst, dst);
5818 set_byte_if_not_zero(dst);
5819 }
5821 static void pass_arg0(MacroAssembler* masm, Register arg) {
5822 masm->push(arg);
5823 }
5825 static void pass_arg1(MacroAssembler* masm, Register arg) {
5826 masm->push(arg);
5827 }
5829 static void pass_arg2(MacroAssembler* masm, Register arg) {
5830 masm->push(arg);
5831 }
5833 static void pass_arg3(MacroAssembler* masm, Register arg) {
5834 masm->push(arg);
5835 }
5837 #ifndef PRODUCT
5838 extern "C" void findpc(intptr_t x);
5839 #endif
5841 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
5842 // In order to get locks to work, we need to fake a in_VM state
5843 JavaThread* thread = JavaThread::current();
5844 JavaThreadState saved_state = thread->thread_state();
5845 thread->set_thread_state(_thread_in_vm);
5846 if (ShowMessageBoxOnError) {
5847 JavaThread* thread = JavaThread::current();
5848 JavaThreadState saved_state = thread->thread_state();
5849 thread->set_thread_state(_thread_in_vm);
5850 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
5851 ttyLocker ttyl;
5852 BytecodeCounter::print();
5853 }
5854 // To see where a verify_oop failed, get $ebx+40/X for this frame.
5855 // This is the value of eip which points to where verify_oop will return.
5856 if (os::message_box(msg, "Execution stopped, print registers?")) {
5857 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip);
5858 BREAKPOINT;
5859 assert(false, "start up GDB");
5860 }
5861 } else {
5862 ttyLocker ttyl;
5863 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
5864 assert(false, err_msg("DEBUG MESSAGE: %s", msg));
5865 }
5866 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
5867 }
5869 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) {
5870 ttyLocker ttyl;
5871 FlagSetting fs(Debugging, true);
5872 tty->print_cr("eip = 0x%08x", eip);
5873 #ifndef PRODUCT
5874 if ((WizardMode || Verbose) && PrintMiscellaneous) {
5875 tty->cr();
5876 findpc(eip);
5877 tty->cr();
5878 }
5879 #endif
5880 #define PRINT_REG(rax) \
5881 { tty->print("%s = ", #rax); os::print_location(tty, rax); }
5882 PRINT_REG(rax);
5883 PRINT_REG(rbx);
5884 PRINT_REG(rcx);
5885 PRINT_REG(rdx);
5886 PRINT_REG(rdi);
5887 PRINT_REG(rsi);
5888 PRINT_REG(rbp);
5889 PRINT_REG(rsp);
5890 #undef PRINT_REG
5891 // Print some words near top of staack.
5892 int* dump_sp = (int*) rsp;
5893 for (int col1 = 0; col1 < 8; col1++) {
5894 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
5895 os::print_location(tty, *dump_sp++);
5896 }
5897 for (int row = 0; row < 16; row++) {
5898 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
5899 for (int col = 0; col < 8; col++) {
5900 tty->print(" 0x%08x", *dump_sp++);
5901 }
5902 tty->cr();
5903 }
5904 // Print some instructions around pc:
5905 Disassembler::decode((address)eip-64, (address)eip);
5906 tty->print_cr("--------");
5907 Disassembler::decode((address)eip, (address)eip+32);
5908 }
5910 void MacroAssembler::stop(const char* msg) {
5911 ExternalAddress message((address)msg);
5912 // push address of message
5913 pushptr(message.addr());
5914 { Label L; call(L, relocInfo::none); bind(L); } // push eip
5915 pusha(); // push registers
5916 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
5917 hlt();
5918 }
5920 void MacroAssembler::warn(const char* msg) {
5921 push_CPU_state();
5923 ExternalAddress message((address) msg);
5924 // push address of message
5925 pushptr(message.addr());
5927 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
5928 addl(rsp, wordSize); // discard argument
5929 pop_CPU_state();
5930 }
5932 void MacroAssembler::print_state() {
5933 { Label L; call(L, relocInfo::none); bind(L); } // push eip
5934 pusha(); // push registers
5936 push_CPU_state();
5937 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32)));
5938 pop_CPU_state();
5940 popa();
5941 addl(rsp, wordSize);
5942 }
5944 #else // _LP64
5946 // 64 bit versions
5948 Address MacroAssembler::as_Address(AddressLiteral adr) {
5949 // amd64 always does this as a pc-rel
5950 // we can be absolute or disp based on the instruction type
5951 // jmp/call are displacements others are absolute
5952 assert(!adr.is_lval(), "must be rval");
5953 assert(reachable(adr), "must be");
5954 return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
5956 }
5958 Address MacroAssembler::as_Address(ArrayAddress adr) {
5959 AddressLiteral base = adr.base();
5960 lea(rscratch1, base);
5961 Address index = adr.index();
5962 assert(index._disp == 0, "must not have disp"); // maybe it can?
5963 Address array(rscratch1, index._index, index._scale, index._disp);
5964 return array;
5965 }
5967 int MacroAssembler::biased_locking_enter(Register lock_reg,
5968 Register obj_reg,
5969 Register swap_reg,
5970 Register tmp_reg,
5971 bool swap_reg_contains_mark,
5972 Label& done,
5973 Label* slow_case,
5974 BiasedLockingCounters* counters) {
5975 assert(UseBiasedLocking, "why call this otherwise?");
5976 assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
5977 assert(tmp_reg != noreg, "tmp_reg must be supplied");
5978 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
5979 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
5980 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
5981 Address saved_mark_addr(lock_reg, 0);
5983 if (PrintBiasedLockingStatistics && counters == NULL)
5984 counters = BiasedLocking::counters();
5986 // Biased locking
5987 // See whether the lock is currently biased toward our thread and
5988 // whether the epoch is still valid
5989 // Note that the runtime guarantees sufficient alignment of JavaThread
5990 // pointers to allow age to be placed into low bits
5991 // First check to see whether biasing is even enabled for this object
5992 Label cas_label;
5993 int null_check_offset = -1;
5994 if (!swap_reg_contains_mark) {
5995 null_check_offset = offset();
5996 movq(swap_reg, mark_addr);
5997 }
5998 movq(tmp_reg, swap_reg);
5999 andq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
6000 cmpq(tmp_reg, markOopDesc::biased_lock_pattern);
6001 jcc(Assembler::notEqual, cas_label);
6002 // The bias pattern is present in the object's header. Need to check
6003 // whether the bias owner and the epoch are both still current.
6004 load_prototype_header(tmp_reg, obj_reg);
6005 orq(tmp_reg, r15_thread);
6006 xorq(tmp_reg, swap_reg);
6007 andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place));
6008 if (counters != NULL) {
6009 cond_inc32(Assembler::zero,
6010 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
6011 }
6012 jcc(Assembler::equal, done);
6014 Label try_revoke_bias;
6015 Label try_rebias;
6017 // At this point we know that the header has the bias pattern and
6018 // that we are not the bias owner in the current epoch. We need to
6019 // figure out more details about the state of the header in order to
6020 // know what operations can be legally performed on the object's
6021 // header.
6023 // If the low three bits in the xor result aren't clear, that means
6024 // the prototype header is no longer biased and we have to revoke
6025 // the bias on this object.
6026 testq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
6027 jcc(Assembler::notZero, try_revoke_bias);
6029 // Biasing is still enabled for this data type. See whether the
6030 // epoch of the current bias is still valid, meaning that the epoch
6031 // bits of the mark word are equal to the epoch bits of the
6032 // prototype header. (Note that the prototype header's epoch bits
6033 // only change at a safepoint.) If not, attempt to rebias the object
6034 // toward the current thread. Note that we must be absolutely sure
6035 // that the current epoch is invalid in order to do this because
6036 // otherwise the manipulations it performs on the mark word are
6037 // illegal.
6038 testq(tmp_reg, markOopDesc::epoch_mask_in_place);
6039 jcc(Assembler::notZero, try_rebias);
6041 // The epoch of the current bias is still valid but we know nothing
6042 // about the owner; it might be set or it might be clear. Try to
6043 // acquire the bias of the object using an atomic operation. If this
6044 // fails we will go in to the runtime to revoke the object's bias.
6045 // Note that we first construct the presumed unbiased header so we
6046 // don't accidentally blow away another thread's valid bias.
6047 andq(swap_reg,
6048 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
6049 movq(tmp_reg, swap_reg);
6050 orq(tmp_reg, r15_thread);
6051 if (os::is_MP()) {
6052 lock();
6053 }
6054 cmpxchgq(tmp_reg, Address(obj_reg, 0));
6055 // If the biasing toward our thread failed, this means that
6056 // another thread succeeded in biasing it toward itself and we
6057 // need to revoke that bias. The revocation will occur in the
6058 // interpreter runtime in the slow case.
6059 if (counters != NULL) {
6060 cond_inc32(Assembler::zero,
6061 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
6062 }
6063 if (slow_case != NULL) {
6064 jcc(Assembler::notZero, *slow_case);
6065 }
6066 jmp(done);
6068 bind(try_rebias);
6069 // At this point we know the epoch has expired, meaning that the
6070 // current "bias owner", if any, is actually invalid. Under these
6071 // circumstances _only_, we are allowed to use the current header's
6072 // value as the comparison value when doing the cas to acquire the
6073 // bias in the current epoch. In other words, we allow transfer of
6074 // the bias from one thread to another directly in this situation.
6075 //
6076 // FIXME: due to a lack of registers we currently blow away the age
6077 // bits in this situation. Should attempt to preserve them.
6078 load_prototype_header(tmp_reg, obj_reg);
6079 orq(tmp_reg, r15_thread);
6080 if (os::is_MP()) {
6081 lock();
6082 }
6083 cmpxchgq(tmp_reg, Address(obj_reg, 0));
6084 // If the biasing toward our thread failed, then another thread
6085 // succeeded in biasing it toward itself and we need to revoke that
6086 // bias. The revocation will occur in the runtime in the slow case.
6087 if (counters != NULL) {
6088 cond_inc32(Assembler::zero,
6089 ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
6090 }
6091 if (slow_case != NULL) {
6092 jcc(Assembler::notZero, *slow_case);
6093 }
6094 jmp(done);
6096 bind(try_revoke_bias);
6097 // The prototype mark in the klass doesn't have the bias bit set any
6098 // more, indicating that objects of this data type are not supposed
6099 // to be biased any more. We are going to try to reset the mark of
6100 // this object to the prototype value and fall through to the
6101 // CAS-based locking scheme. Note that if our CAS fails, it means
6102 // that another thread raced us for the privilege of revoking the
6103 // bias of this particular object, so it's okay to continue in the
6104 // normal locking code.
6105 //
6106 // FIXME: due to a lack of registers we currently blow away the age
6107 // bits in this situation. Should attempt to preserve them.
6108 load_prototype_header(tmp_reg, obj_reg);
6109 if (os::is_MP()) {
6110 lock();
6111 }
6112 cmpxchgq(tmp_reg, Address(obj_reg, 0));
6113 // Fall through to the normal CAS-based lock, because no matter what
6114 // the result of the above CAS, some thread must have succeeded in
6115 // removing the bias bit from the object's header.
6116 if (counters != NULL) {
6117 cond_inc32(Assembler::zero,
6118 ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
6119 }
6121 bind(cas_label);
6123 return null_check_offset;
6124 }
6126 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
6127 Label L, E;
6129 #ifdef _WIN64
6130 // Windows always allocates space for it's register args
6131 assert(num_args <= 4, "only register arguments supported");
6132 subq(rsp, frame::arg_reg_save_area_bytes);
6133 #endif
6135 // Align stack if necessary
6136 testl(rsp, 15);
6137 jcc(Assembler::zero, L);
6139 subq(rsp, 8);
6140 {
6141 call(RuntimeAddress(entry_point));
6142 }
6143 addq(rsp, 8);
6144 jmp(E);
6146 bind(L);
6147 {
6148 call(RuntimeAddress(entry_point));
6149 }
6151 bind(E);
6153 #ifdef _WIN64
6154 // restore stack pointer
6155 addq(rsp, frame::arg_reg_save_area_bytes);
6156 #endif
6158 }
6160 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
6161 assert(!src2.is_lval(), "should use cmpptr");
6163 if (reachable(src2)) {
6164 cmpq(src1, as_Address(src2));
6165 } else {
6166 lea(rscratch1, src2);
6167 Assembler::cmpq(src1, Address(rscratch1, 0));
6168 }
6169 }
6171 int MacroAssembler::corrected_idivq(Register reg) {
6172 // Full implementation of Java ldiv and lrem; checks for special
6173 // case as described in JVM spec., p.243 & p.271. The function
6174 // returns the (pc) offset of the idivl instruction - may be needed
6175 // for implicit exceptions.
6176 //
6177 // normal case special case
6178 //
6179 // input : rax: dividend min_long
6180 // reg: divisor (may not be eax/edx) -1
6181 //
6182 // output: rax: quotient (= rax idiv reg) min_long
6183 // rdx: remainder (= rax irem reg) 0
6184 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
6185 static const int64_t min_long = 0x8000000000000000;
6186 Label normal_case, special_case;
6188 // check for special case
6189 cmp64(rax, ExternalAddress((address) &min_long));
6190 jcc(Assembler::notEqual, normal_case);
6191 xorl(rdx, rdx); // prepare rdx for possible special case (where
6192 // remainder = 0)
6193 cmpq(reg, -1);
6194 jcc(Assembler::equal, special_case);
6196 // handle normal case
6197 bind(normal_case);
6198 cdqq();
6199 int idivq_offset = offset();
6200 idivq(reg);
6202 // normal and special case exit
6203 bind(special_case);
6205 return idivq_offset;
6206 }
6208 void MacroAssembler::decrementq(Register reg, int value) {
6209 if (value == min_jint) { subq(reg, value); return; }
6210 if (value < 0) { incrementq(reg, -value); return; }
6211 if (value == 0) { ; return; }
6212 if (value == 1 && UseIncDec) { decq(reg) ; return; }
6213 /* else */ { subq(reg, value) ; return; }
6214 }
6216 void MacroAssembler::decrementq(Address dst, int value) {
6217 if (value == min_jint) { subq(dst, value); return; }
6218 if (value < 0) { incrementq(dst, -value); return; }
6219 if (value == 0) { ; return; }
6220 if (value == 1 && UseIncDec) { decq(dst) ; return; }
6221 /* else */ { subq(dst, value) ; return; }
6222 }
6224 void MacroAssembler::incrementq(Register reg, int value) {
6225 if (value == min_jint) { addq(reg, value); return; }
6226 if (value < 0) { decrementq(reg, -value); return; }
6227 if (value == 0) { ; return; }
6228 if (value == 1 && UseIncDec) { incq(reg) ; return; }
6229 /* else */ { addq(reg, value) ; return; }
6230 }
6232 void MacroAssembler::incrementq(Address dst, int value) {
6233 if (value == min_jint) { addq(dst, value); return; }
6234 if (value < 0) { decrementq(dst, -value); return; }
6235 if (value == 0) { ; return; }
6236 if (value == 1 && UseIncDec) { incq(dst) ; return; }
6237 /* else */ { addq(dst, value) ; return; }
6238 }
6240 // 32bit can do a case table jump in one instruction but we no longer allow the base
6241 // to be installed in the Address class
6242 void MacroAssembler::jump(ArrayAddress entry) {
6243 lea(rscratch1, entry.base());
6244 Address dispatch = entry.index();
6245 assert(dispatch._base == noreg, "must be");
6246 dispatch._base = rscratch1;
6247 jmp(dispatch);
6248 }
6250 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
6251 ShouldNotReachHere(); // 64bit doesn't use two regs
6252 cmpq(x_lo, y_lo);
6253 }
6255 void MacroAssembler::lea(Register dst, AddressLiteral src) {
6256 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
6257 }
6259 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
6260 mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec());
6261 movptr(dst, rscratch1);
6262 }
6264 void MacroAssembler::leave() {
6265 // %%% is this really better? Why not on 32bit too?
6266 emit_byte(0xC9); // LEAVE
6267 }
6269 void MacroAssembler::lneg(Register hi, Register lo) {
6270 ShouldNotReachHere(); // 64bit doesn't use two regs
6271 negq(lo);
6272 }
6274 void MacroAssembler::movoop(Register dst, jobject obj) {
6275 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
6276 }
6278 void MacroAssembler::movoop(Address dst, jobject obj) {
6279 mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
6280 movq(dst, rscratch1);
6281 }
6283 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
6284 if (src.is_lval()) {
6285 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
6286 } else {
6287 if (reachable(src)) {
6288 movq(dst, as_Address(src));
6289 } else {
6290 lea(rscratch1, src);
6291 movq(dst, Address(rscratch1,0));
6292 }
6293 }
6294 }
6296 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
6297 movq(as_Address(dst), src);
6298 }
6300 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
6301 movq(dst, as_Address(src));
6302 }
6304 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
6305 void MacroAssembler::movptr(Address dst, intptr_t src) {
6306 mov64(rscratch1, src);
6307 movq(dst, rscratch1);
6308 }
6310 // These are mostly for initializing NULL
6311 void MacroAssembler::movptr(Address dst, int32_t src) {
6312 movslq(dst, src);
6313 }
6315 void MacroAssembler::movptr(Register dst, int32_t src) {
6316 mov64(dst, (intptr_t)src);
6317 }
6319 void MacroAssembler::pushoop(jobject obj) {
6320 movoop(rscratch1, obj);
6321 push(rscratch1);
6322 }
6324 void MacroAssembler::pushptr(AddressLiteral src) {
6325 lea(rscratch1, src);
6326 if (src.is_lval()) {
6327 push(rscratch1);
6328 } else {
6329 pushq(Address(rscratch1, 0));
6330 }
6331 }
6333 void MacroAssembler::reset_last_Java_frame(bool clear_fp,
6334 bool clear_pc) {
6335 // we must set sp to zero to clear frame
6336 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
6337 // must clear fp, so that compiled frames are not confused; it is
6338 // possible that we need it only for debugging
6339 if (clear_fp) {
6340 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
6341 }
6343 if (clear_pc) {
6344 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
6345 }
6346 }
6348 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
6349 Register last_java_fp,
6350 address last_java_pc) {
6351 // determine last_java_sp register
6352 if (!last_java_sp->is_valid()) {
6353 last_java_sp = rsp;
6354 }
6356 // last_java_fp is optional
6357 if (last_java_fp->is_valid()) {
6358 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
6359 last_java_fp);
6360 }
6362 // last_java_pc is optional
6363 if (last_java_pc != NULL) {
6364 Address java_pc(r15_thread,
6365 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
6366 lea(rscratch1, InternalAddress(last_java_pc));
6367 movptr(java_pc, rscratch1);
6368 }
6370 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
6371 }
6373 static void pass_arg0(MacroAssembler* masm, Register arg) {
6374 if (c_rarg0 != arg ) {
6375 masm->mov(c_rarg0, arg);
6376 }
6377 }
6379 static void pass_arg1(MacroAssembler* masm, Register arg) {
6380 if (c_rarg1 != arg ) {
6381 masm->mov(c_rarg1, arg);
6382 }
6383 }
6385 static void pass_arg2(MacroAssembler* masm, Register arg) {
6386 if (c_rarg2 != arg ) {
6387 masm->mov(c_rarg2, arg);
6388 }
6389 }
6391 static void pass_arg3(MacroAssembler* masm, Register arg) {
6392 if (c_rarg3 != arg ) {
6393 masm->mov(c_rarg3, arg);
6394 }
6395 }
6397 void MacroAssembler::stop(const char* msg) {
6398 address rip = pc();
6399 pusha(); // get regs on stack
6400 lea(c_rarg0, ExternalAddress((address) msg));
6401 lea(c_rarg1, InternalAddress(rip));
6402 movq(c_rarg2, rsp); // pass pointer to regs array
6403 andq(rsp, -16); // align stack as required by ABI
6404 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
6405 hlt();
6406 }
6408 void MacroAssembler::warn(const char* msg) {
6409 push(rbp);
6410 movq(rbp, rsp);
6411 andq(rsp, -16); // align stack as required by push_CPU_state and call
6412 push_CPU_state(); // keeps alignment at 16 bytes
6413 lea(c_rarg0, ExternalAddress((address) msg));
6414 call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
6415 pop_CPU_state();
6416 mov(rsp, rbp);
6417 pop(rbp);
6418 }
6420 void MacroAssembler::print_state() {
6421 address rip = pc();
6422 pusha(); // get regs on stack
6423 push(rbp);
6424 movq(rbp, rsp);
6425 andq(rsp, -16); // align stack as required by push_CPU_state and call
6426 push_CPU_state(); // keeps alignment at 16 bytes
6428 lea(c_rarg0, InternalAddress(rip));
6429 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
6430 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
6432 pop_CPU_state();
6433 mov(rsp, rbp);
6434 pop(rbp);
6435 popa();
6436 }
6438 #ifndef PRODUCT
6439 extern "C" void findpc(intptr_t x);
6440 #endif
6442 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
6443 // In order to get locks to work, we need to fake a in_VM state
6444 if (ShowMessageBoxOnError) {
6445 JavaThread* thread = JavaThread::current();
6446 JavaThreadState saved_state = thread->thread_state();
6447 thread->set_thread_state(_thread_in_vm);
6448 #ifndef PRODUCT
6449 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
6450 ttyLocker ttyl;
6451 BytecodeCounter::print();
6452 }
6453 #endif
6454 // To see where a verify_oop failed, get $ebx+40/X for this frame.
6455 // XXX correct this offset for amd64
6456 // This is the value of eip which points to where verify_oop will return.
6457 if (os::message_box(msg, "Execution stopped, print registers?")) {
6458 print_state64(pc, regs);
6459 BREAKPOINT;
6460 assert(false, "start up GDB");
6461 }
6462 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
6463 } else {
6464 ttyLocker ttyl;
6465 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
6466 msg);
6467 assert(false, err_msg("DEBUG MESSAGE: %s", msg));
6468 }
6469 }
6471 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
6472 ttyLocker ttyl;
6473 FlagSetting fs(Debugging, true);
6474 tty->print_cr("rip = 0x%016lx", pc);
6475 #ifndef PRODUCT
6476 tty->cr();
6477 findpc(pc);
6478 tty->cr();
6479 #endif
6480 #define PRINT_REG(rax, value) \
6481 { tty->print("%s = ", #rax); os::print_location(tty, value); }
6482 PRINT_REG(rax, regs[15]);
6483 PRINT_REG(rbx, regs[12]);
6484 PRINT_REG(rcx, regs[14]);
6485 PRINT_REG(rdx, regs[13]);
6486 PRINT_REG(rdi, regs[8]);
6487 PRINT_REG(rsi, regs[9]);
6488 PRINT_REG(rbp, regs[10]);
6489 PRINT_REG(rsp, regs[11]);
6490 PRINT_REG(r8 , regs[7]);
6491 PRINT_REG(r9 , regs[6]);
6492 PRINT_REG(r10, regs[5]);
6493 PRINT_REG(r11, regs[4]);
6494 PRINT_REG(r12, regs[3]);
6495 PRINT_REG(r13, regs[2]);
6496 PRINT_REG(r14, regs[1]);
6497 PRINT_REG(r15, regs[0]);
6498 #undef PRINT_REG
6499 // Print some words near top of staack.
6500 int64_t* rsp = (int64_t*) regs[11];
6501 int64_t* dump_sp = rsp;
6502 for (int col1 = 0; col1 < 8; col1++) {
6503 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
6504 os::print_location(tty, *dump_sp++);
6505 }
6506 for (int row = 0; row < 25; row++) {
6507 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
6508 for (int col = 0; col < 4; col++) {
6509 tty->print(" 0x%016lx", *dump_sp++);
6510 }
6511 tty->cr();
6512 }
6513 // Print some instructions around pc:
6514 Disassembler::decode((address)pc-64, (address)pc);
6515 tty->print_cr("--------");
6516 Disassembler::decode((address)pc, (address)pc+32);
6517 }
6519 #endif // _LP64
6521 // Now versions that are common to 32/64 bit
6523 void MacroAssembler::addptr(Register dst, int32_t imm32) {
6524 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
6525 }
6527 void MacroAssembler::addptr(Register dst, Register src) {
6528 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
6529 }
6531 void MacroAssembler::addptr(Address dst, Register src) {
6532 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
6533 }
6535 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) {
6536 if (reachable(src)) {
6537 Assembler::addsd(dst, as_Address(src));
6538 } else {
6539 lea(rscratch1, src);
6540 Assembler::addsd(dst, Address(rscratch1, 0));
6541 }
6542 }
6544 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) {
6545 if (reachable(src)) {
6546 addss(dst, as_Address(src));
6547 } else {
6548 lea(rscratch1, src);
6549 addss(dst, Address(rscratch1, 0));
6550 }
6551 }
6553 void MacroAssembler::align(int modulus) {
6554 if (offset() % modulus != 0) {
6555 nop(modulus - (offset() % modulus));
6556 }
6557 }
6559 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
6560 // Used in sign-masking with aligned address.
6561 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
6562 if (reachable(src)) {
6563 Assembler::andpd(dst, as_Address(src));
6564 } else {
6565 lea(rscratch1, src);
6566 Assembler::andpd(dst, Address(rscratch1, 0));
6567 }
6568 }
6570 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) {
6571 // Used in sign-masking with aligned address.
6572 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
6573 if (reachable(src)) {
6574 Assembler::andps(dst, as_Address(src));
6575 } else {
6576 lea(rscratch1, src);
6577 Assembler::andps(dst, Address(rscratch1, 0));
6578 }
6579 }
6581 void MacroAssembler::andptr(Register dst, int32_t imm32) {
6582 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
6583 }
6585 void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
6586 pushf();
6587 if (os::is_MP())
6588 lock();
6589 incrementl(counter_addr);
6590 popf();
6591 }
6593 // Writes to stack successive pages until offset reached to check for
6594 // stack overflow + shadow pages. This clobbers tmp.
6595 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
6596 movptr(tmp, rsp);
6597 // Bang stack for total size given plus shadow page size.
6598 // Bang one page at a time because large size can bang beyond yellow and
6599 // red zones.
6600 Label loop;
6601 bind(loop);
6602 movl(Address(tmp, (-os::vm_page_size())), size );
6603 subptr(tmp, os::vm_page_size());
6604 subl(size, os::vm_page_size());
6605 jcc(Assembler::greater, loop);
6607 // Bang down shadow pages too.
6608 // The -1 because we already subtracted 1 page.
6609 for (int i = 0; i< StackShadowPages-1; i++) {
6610 // this could be any sized move but this is can be a debugging crumb
6611 // so the bigger the better.
6612 movptr(Address(tmp, (-i*os::vm_page_size())), size );
6613 }
6614 }
6616 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
6617 assert(UseBiasedLocking, "why call this otherwise?");
6619 // Check for biased locking unlock case, which is a no-op
6620 // Note: we do not have to check the thread ID for two reasons.
6621 // First, the interpreter checks for IllegalMonitorStateException at
6622 // a higher level. Second, if the bias was revoked while we held the
6623 // lock, the object could not be rebiased toward another thread, so
6624 // the bias bit would be clear.
6625 movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
6626 andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
6627 cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
6628 jcc(Assembler::equal, done);
6629 }
6631 void MacroAssembler::c2bool(Register x) {
6632 // implements x == 0 ? 0 : 1
6633 // note: must only look at least-significant byte of x
6634 // since C-style booleans are stored in one byte
6635 // only! (was bug)
6636 andl(x, 0xFF);
6637 setb(Assembler::notZero, x);
6638 }
6640 // Wouldn't need if AddressLiteral version had new name
6641 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
6642 Assembler::call(L, rtype);
6643 }
6645 void MacroAssembler::call(Register entry) {
6646 Assembler::call(entry);
6647 }
6649 void MacroAssembler::call(AddressLiteral entry) {
6650 if (reachable(entry)) {
6651 Assembler::call_literal(entry.target(), entry.rspec());
6652 } else {
6653 lea(rscratch1, entry);
6654 Assembler::call(rscratch1);
6655 }
6656 }
6658 // Implementation of call_VM versions
6660 void MacroAssembler::call_VM(Register oop_result,
6661 address entry_point,
6662 bool check_exceptions) {
6663 Label C, E;
6664 call(C, relocInfo::none);
6665 jmp(E);
6667 bind(C);
6668 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
6669 ret(0);
6671 bind(E);
6672 }
6674 void MacroAssembler::call_VM(Register oop_result,
6675 address entry_point,
6676 Register arg_1,
6677 bool check_exceptions) {
6678 Label C, E;
6679 call(C, relocInfo::none);
6680 jmp(E);
6682 bind(C);
6683 pass_arg1(this, arg_1);
6684 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
6685 ret(0);
6687 bind(E);
6688 }
6690 void MacroAssembler::call_VM(Register oop_result,
6691 address entry_point,
6692 Register arg_1,
6693 Register arg_2,
6694 bool check_exceptions) {
6695 Label C, E;
6696 call(C, relocInfo::none);
6697 jmp(E);
6699 bind(C);
6701 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6703 pass_arg2(this, arg_2);
6704 pass_arg1(this, arg_1);
6705 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
6706 ret(0);
6708 bind(E);
6709 }
6711 void MacroAssembler::call_VM(Register oop_result,
6712 address entry_point,
6713 Register arg_1,
6714 Register arg_2,
6715 Register arg_3,
6716 bool check_exceptions) {
6717 Label C, E;
6718 call(C, relocInfo::none);
6719 jmp(E);
6721 bind(C);
6723 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
6724 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
6725 pass_arg3(this, arg_3);
6727 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6728 pass_arg2(this, arg_2);
6730 pass_arg1(this, arg_1);
6731 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
6732 ret(0);
6734 bind(E);
6735 }
6737 void MacroAssembler::call_VM(Register oop_result,
6738 Register last_java_sp,
6739 address entry_point,
6740 int number_of_arguments,
6741 bool check_exceptions) {
6742 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
6743 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
6744 }
6746 void MacroAssembler::call_VM(Register oop_result,
6747 Register last_java_sp,
6748 address entry_point,
6749 Register arg_1,
6750 bool check_exceptions) {
6751 pass_arg1(this, arg_1);
6752 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
6753 }
6755 void MacroAssembler::call_VM(Register oop_result,
6756 Register last_java_sp,
6757 address entry_point,
6758 Register arg_1,
6759 Register arg_2,
6760 bool check_exceptions) {
6762 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6763 pass_arg2(this, arg_2);
6764 pass_arg1(this, arg_1);
6765 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
6766 }
6768 void MacroAssembler::call_VM(Register oop_result,
6769 Register last_java_sp,
6770 address entry_point,
6771 Register arg_1,
6772 Register arg_2,
6773 Register arg_3,
6774 bool check_exceptions) {
6775 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
6776 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
6777 pass_arg3(this, arg_3);
6778 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6779 pass_arg2(this, arg_2);
6780 pass_arg1(this, arg_1);
6781 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
6782 }
6784 void MacroAssembler::super_call_VM(Register oop_result,
6785 Register last_java_sp,
6786 address entry_point,
6787 int number_of_arguments,
6788 bool check_exceptions) {
6789 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
6790 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
6791 }
6793 void MacroAssembler::super_call_VM(Register oop_result,
6794 Register last_java_sp,
6795 address entry_point,
6796 Register arg_1,
6797 bool check_exceptions) {
6798 pass_arg1(this, arg_1);
6799 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
6800 }
6802 void MacroAssembler::super_call_VM(Register oop_result,
6803 Register last_java_sp,
6804 address entry_point,
6805 Register arg_1,
6806 Register arg_2,
6807 bool check_exceptions) {
6809 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6810 pass_arg2(this, arg_2);
6811 pass_arg1(this, arg_1);
6812 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
6813 }
6815 void MacroAssembler::super_call_VM(Register oop_result,
6816 Register last_java_sp,
6817 address entry_point,
6818 Register arg_1,
6819 Register arg_2,
6820 Register arg_3,
6821 bool check_exceptions) {
6822 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
6823 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
6824 pass_arg3(this, arg_3);
6825 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6826 pass_arg2(this, arg_2);
6827 pass_arg1(this, arg_1);
6828 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
6829 }
6831 void MacroAssembler::call_VM_base(Register oop_result,
6832 Register java_thread,
6833 Register last_java_sp,
6834 address entry_point,
6835 int number_of_arguments,
6836 bool check_exceptions) {
6837 // determine java_thread register
6838 if (!java_thread->is_valid()) {
6839 #ifdef _LP64
6840 java_thread = r15_thread;
6841 #else
6842 java_thread = rdi;
6843 get_thread(java_thread);
6844 #endif // LP64
6845 }
6846 // determine last_java_sp register
6847 if (!last_java_sp->is_valid()) {
6848 last_java_sp = rsp;
6849 }
6850 // debugging support
6851 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
6852 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
6853 #ifdef ASSERT
6854 // TraceBytecodes does not use r12 but saves it over the call, so don't verify
6855 // r12 is the heapbase.
6856 LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base");)
6857 #endif // ASSERT
6859 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
6860 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
6862 // push java thread (becomes first argument of C function)
6864 NOT_LP64(push(java_thread); number_of_arguments++);
6865 LP64_ONLY(mov(c_rarg0, r15_thread));
6867 // set last Java frame before call
6868 assert(last_java_sp != rbp, "can't use ebp/rbp");
6870 // Only interpreter should have to set fp
6871 set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
6873 // do the call, remove parameters
6874 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
6876 // restore the thread (cannot use the pushed argument since arguments
6877 // may be overwritten by C code generated by an optimizing compiler);
6878 // however can use the register value directly if it is callee saved.
6879 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
6880 // rdi & rsi (also r15) are callee saved -> nothing to do
6881 #ifdef ASSERT
6882 guarantee(java_thread != rax, "change this code");
6883 push(rax);
6884 { Label L;
6885 get_thread(rax);
6886 cmpptr(java_thread, rax);
6887 jcc(Assembler::equal, L);
6888 STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
6889 bind(L);
6890 }
6891 pop(rax);
6892 #endif
6893 } else {
6894 get_thread(java_thread);
6895 }
6896 // reset last Java frame
6897 // Only interpreter should have to clear fp
6898 reset_last_Java_frame(java_thread, true, false);
6900 #ifndef CC_INTERP
6901 // C++ interp handles this in the interpreter
6902 check_and_handle_popframe(java_thread);
6903 check_and_handle_earlyret(java_thread);
6904 #endif /* CC_INTERP */
6906 if (check_exceptions) {
6907 // check for pending exceptions (java_thread is set upon return)
6908 cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
6909 #ifndef _LP64
6910 jump_cc(Assembler::notEqual,
6911 RuntimeAddress(StubRoutines::forward_exception_entry()));
6912 #else
6913 // This used to conditionally jump to forward_exception however it is
6914 // possible if we relocate that the branch will not reach. So we must jump
6915 // around so we can always reach
6917 Label ok;
6918 jcc(Assembler::equal, ok);
6919 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
6920 bind(ok);
6921 #endif // LP64
6922 }
6924 // get oop result if there is one and reset the value in the thread
6925 if (oop_result->is_valid()) {
6926 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
6927 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
6928 verify_oop(oop_result, "broken oop in call_VM_base");
6929 }
6930 }
6932 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
6934 // Calculate the value for last_Java_sp
6935 // somewhat subtle. call_VM does an intermediate call
6936 // which places a return address on the stack just under the
6937 // stack pointer as the user finsihed with it. This allows
6938 // use to retrieve last_Java_pc from last_Java_sp[-1].
6939 // On 32bit we then have to push additional args on the stack to accomplish
6940 // the actual requested call. On 64bit call_VM only can use register args
6941 // so the only extra space is the return address that call_VM created.
6942 // This hopefully explains the calculations here.
6944 #ifdef _LP64
6945 // We've pushed one address, correct last_Java_sp
6946 lea(rax, Address(rsp, wordSize));
6947 #else
6948 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
6949 #endif // LP64
6951 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
6953 }
6955 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
6956 call_VM_leaf_base(entry_point, number_of_arguments);
6957 }
6959 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
6960 pass_arg0(this, arg_0);
6961 call_VM_leaf(entry_point, 1);
6962 }
6964 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
6966 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
6967 pass_arg1(this, arg_1);
6968 pass_arg0(this, arg_0);
6969 call_VM_leaf(entry_point, 2);
6970 }
6972 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
6973 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
6974 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6975 pass_arg2(this, arg_2);
6976 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
6977 pass_arg1(this, arg_1);
6978 pass_arg0(this, arg_0);
6979 call_VM_leaf(entry_point, 3);
6980 }
6982 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
6983 pass_arg0(this, arg_0);
6984 MacroAssembler::call_VM_leaf_base(entry_point, 1);
6985 }
6987 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
6989 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
6990 pass_arg1(this, arg_1);
6991 pass_arg0(this, arg_0);
6992 MacroAssembler::call_VM_leaf_base(entry_point, 2);
6993 }
6995 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
6996 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
6997 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6998 pass_arg2(this, arg_2);
6999 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
7000 pass_arg1(this, arg_1);
7001 pass_arg0(this, arg_0);
7002 MacroAssembler::call_VM_leaf_base(entry_point, 3);
7003 }
7005 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
7006 LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
7007 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
7008 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
7009 pass_arg3(this, arg_3);
7010 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
7011 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
7012 pass_arg2(this, arg_2);
7013 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
7014 pass_arg1(this, arg_1);
7015 pass_arg0(this, arg_0);
7016 MacroAssembler::call_VM_leaf_base(entry_point, 4);
7017 }
7019 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
7020 }
7022 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
7023 }
7025 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
7026 if (reachable(src1)) {
7027 cmpl(as_Address(src1), imm);
7028 } else {
7029 lea(rscratch1, src1);
7030 cmpl(Address(rscratch1, 0), imm);
7031 }
7032 }
7034 void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
7035 assert(!src2.is_lval(), "use cmpptr");
7036 if (reachable(src2)) {
7037 cmpl(src1, as_Address(src2));
7038 } else {
7039 lea(rscratch1, src2);
7040 cmpl(src1, Address(rscratch1, 0));
7041 }
7042 }
7044 void MacroAssembler::cmp32(Register src1, int32_t imm) {
7045 Assembler::cmpl(src1, imm);
7046 }
7048 void MacroAssembler::cmp32(Register src1, Address src2) {
7049 Assembler::cmpl(src1, src2);
7050 }
7052 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
7053 ucomisd(opr1, opr2);
7055 Label L;
7056 if (unordered_is_less) {
7057 movl(dst, -1);
7058 jcc(Assembler::parity, L);
7059 jcc(Assembler::below , L);
7060 movl(dst, 0);
7061 jcc(Assembler::equal , L);
7062 increment(dst);
7063 } else { // unordered is greater
7064 movl(dst, 1);
7065 jcc(Assembler::parity, L);
7066 jcc(Assembler::above , L);
7067 movl(dst, 0);
7068 jcc(Assembler::equal , L);
7069 decrementl(dst);
7070 }
7071 bind(L);
7072 }
7074 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
7075 ucomiss(opr1, opr2);
7077 Label L;
7078 if (unordered_is_less) {
7079 movl(dst, -1);
7080 jcc(Assembler::parity, L);
7081 jcc(Assembler::below , L);
7082 movl(dst, 0);
7083 jcc(Assembler::equal , L);
7084 increment(dst);
7085 } else { // unordered is greater
7086 movl(dst, 1);
7087 jcc(Assembler::parity, L);
7088 jcc(Assembler::above , L);
7089 movl(dst, 0);
7090 jcc(Assembler::equal , L);
7091 decrementl(dst);
7092 }
7093 bind(L);
7094 }
7097 void MacroAssembler::cmp8(AddressLiteral src1, int imm) {
7098 if (reachable(src1)) {
7099 cmpb(as_Address(src1), imm);
7100 } else {
7101 lea(rscratch1, src1);
7102 cmpb(Address(rscratch1, 0), imm);
7103 }
7104 }
7106 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
7107 #ifdef _LP64
7108 if (src2.is_lval()) {
7109 movptr(rscratch1, src2);
7110 Assembler::cmpq(src1, rscratch1);
7111 } else if (reachable(src2)) {
7112 cmpq(src1, as_Address(src2));
7113 } else {
7114 lea(rscratch1, src2);
7115 Assembler::cmpq(src1, Address(rscratch1, 0));
7116 }
7117 #else
7118 if (src2.is_lval()) {
7119 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
7120 } else {
7121 cmpl(src1, as_Address(src2));
7122 }
7123 #endif // _LP64
7124 }
7126 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
7127 assert(src2.is_lval(), "not a mem-mem compare");
7128 #ifdef _LP64
7129 // moves src2's literal address
7130 movptr(rscratch1, src2);
7131 Assembler::cmpq(src1, rscratch1);
7132 #else
7133 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
7134 #endif // _LP64
7135 }
7137 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
7138 if (reachable(adr)) {
7139 if (os::is_MP())
7140 lock();
7141 cmpxchgptr(reg, as_Address(adr));
7142 } else {
7143 lea(rscratch1, adr);
7144 if (os::is_MP())
7145 lock();
7146 cmpxchgptr(reg, Address(rscratch1, 0));
7147 }
7148 }
7150 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
7151 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
7152 }
7154 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
7155 if (reachable(src)) {
7156 Assembler::comisd(dst, as_Address(src));
7157 } else {
7158 lea(rscratch1, src);
7159 Assembler::comisd(dst, Address(rscratch1, 0));
7160 }
7161 }
7163 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
7164 if (reachable(src)) {
7165 Assembler::comiss(dst, as_Address(src));
7166 } else {
7167 lea(rscratch1, src);
7168 Assembler::comiss(dst, Address(rscratch1, 0));
7169 }
7170 }
7173 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
7174 Condition negated_cond = negate_condition(cond);
7175 Label L;
7176 jcc(negated_cond, L);
7177 atomic_incl(counter_addr);
7178 bind(L);
7179 }
7181 int MacroAssembler::corrected_idivl(Register reg) {
7182 // Full implementation of Java idiv and irem; checks for
7183 // special case as described in JVM spec., p.243 & p.271.
7184 // The function returns the (pc) offset of the idivl
7185 // instruction - may be needed for implicit exceptions.
7186 //
7187 // normal case special case
7188 //
7189 // input : rax,: dividend min_int
7190 // reg: divisor (may not be rax,/rdx) -1
7191 //
7192 // output: rax,: quotient (= rax, idiv reg) min_int
7193 // rdx: remainder (= rax, irem reg) 0
7194 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
7195 const int min_int = 0x80000000;
7196 Label normal_case, special_case;
7198 // check for special case
7199 cmpl(rax, min_int);
7200 jcc(Assembler::notEqual, normal_case);
7201 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
7202 cmpl(reg, -1);
7203 jcc(Assembler::equal, special_case);
7205 // handle normal case
7206 bind(normal_case);
7207 cdql();
7208 int idivl_offset = offset();
7209 idivl(reg);
7211 // normal and special case exit
7212 bind(special_case);
7214 return idivl_offset;
7215 }
7219 void MacroAssembler::decrementl(Register reg, int value) {
7220 if (value == min_jint) {subl(reg, value) ; return; }
7221 if (value < 0) { incrementl(reg, -value); return; }
7222 if (value == 0) { ; return; }
7223 if (value == 1 && UseIncDec) { decl(reg) ; return; }
7224 /* else */ { subl(reg, value) ; return; }
7225 }
7227 void MacroAssembler::decrementl(Address dst, int value) {
7228 if (value == min_jint) {subl(dst, value) ; return; }
7229 if (value < 0) { incrementl(dst, -value); return; }
7230 if (value == 0) { ; return; }
7231 if (value == 1 && UseIncDec) { decl(dst) ; return; }
7232 /* else */ { subl(dst, value) ; return; }
7233 }
7235 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
7236 assert (shift_value > 0, "illegal shift value");
7237 Label _is_positive;
7238 testl (reg, reg);
7239 jcc (Assembler::positive, _is_positive);
7240 int offset = (1 << shift_value) - 1 ;
7242 if (offset == 1) {
7243 incrementl(reg);
7244 } else {
7245 addl(reg, offset);
7246 }
7248 bind (_is_positive);
7249 sarl(reg, shift_value);
7250 }
7252 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src) {
7253 if (reachable(src)) {
7254 Assembler::divsd(dst, as_Address(src));
7255 } else {
7256 lea(rscratch1, src);
7257 Assembler::divsd(dst, Address(rscratch1, 0));
7258 }
7259 }
7261 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src) {
7262 if (reachable(src)) {
7263 Assembler::divss(dst, as_Address(src));
7264 } else {
7265 lea(rscratch1, src);
7266 Assembler::divss(dst, Address(rscratch1, 0));
7267 }
7268 }
7270 // !defined(COMPILER2) is because of stupid core builds
7271 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
7272 void MacroAssembler::empty_FPU_stack() {
7273 if (VM_Version::supports_mmx()) {
7274 emms();
7275 } else {
7276 for (int i = 8; i-- > 0; ) ffree(i);
7277 }
7278 }
7279 #endif // !LP64 || C1 || !C2
7282 // Defines obj, preserves var_size_in_bytes
7283 void MacroAssembler::eden_allocate(Register obj,
7284 Register var_size_in_bytes,
7285 int con_size_in_bytes,
7286 Register t1,
7287 Label& slow_case) {
7288 assert(obj == rax, "obj must be in rax, for cmpxchg");
7289 assert_different_registers(obj, var_size_in_bytes, t1);
7290 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
7291 jmp(slow_case);
7292 } else {
7293 Register end = t1;
7294 Label retry;
7295 bind(retry);
7296 ExternalAddress heap_top((address) Universe::heap()->top_addr());
7297 movptr(obj, heap_top);
7298 if (var_size_in_bytes == noreg) {
7299 lea(end, Address(obj, con_size_in_bytes));
7300 } else {
7301 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
7302 }
7303 // if end < obj then we wrapped around => object too long => slow case
7304 cmpptr(end, obj);
7305 jcc(Assembler::below, slow_case);
7306 cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
7307 jcc(Assembler::above, slow_case);
7308 // Compare obj with the top addr, and if still equal, store the new top addr in
7309 // end at the address of the top addr pointer. Sets ZF if was equal, and clears
7310 // it otherwise. Use lock prefix for atomicity on MPs.
7311 locked_cmpxchgptr(end, heap_top);
7312 jcc(Assembler::notEqual, retry);
7313 }
7314 }
7316 void MacroAssembler::enter() {
7317 push(rbp);
7318 mov(rbp, rsp);
7319 }
7321 // A 5 byte nop that is safe for patching (see patch_verified_entry)
7322 void MacroAssembler::fat_nop() {
7323 if (UseAddressNop) {
7324 addr_nop_5();
7325 } else {
7326 emit_byte(0x26); // es:
7327 emit_byte(0x2e); // cs:
7328 emit_byte(0x64); // fs:
7329 emit_byte(0x65); // gs:
7330 emit_byte(0x90);
7331 }
7332 }
7334 void MacroAssembler::fcmp(Register tmp) {
7335 fcmp(tmp, 1, true, true);
7336 }
7338 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
7339 assert(!pop_right || pop_left, "usage error");
7340 if (VM_Version::supports_cmov()) {
7341 assert(tmp == noreg, "unneeded temp");
7342 if (pop_left) {
7343 fucomip(index);
7344 } else {
7345 fucomi(index);
7346 }
7347 if (pop_right) {
7348 fpop();
7349 }
7350 } else {
7351 assert(tmp != noreg, "need temp");
7352 if (pop_left) {
7353 if (pop_right) {
7354 fcompp();
7355 } else {
7356 fcomp(index);
7357 }
7358 } else {
7359 fcom(index);
7360 }
7361 // convert FPU condition into eflags condition via rax,
7362 save_rax(tmp);
7363 fwait(); fnstsw_ax();
7364 sahf();
7365 restore_rax(tmp);
7366 }
7367 // condition codes set as follows:
7368 //
7369 // CF (corresponds to C0) if x < y
7370 // PF (corresponds to C2) if unordered
7371 // ZF (corresponds to C3) if x = y
7372 }
7374 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
7375 fcmp2int(dst, unordered_is_less, 1, true, true);
7376 }
7378 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
7379 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
7380 Label L;
7381 if (unordered_is_less) {
7382 movl(dst, -1);
7383 jcc(Assembler::parity, L);
7384 jcc(Assembler::below , L);
7385 movl(dst, 0);
7386 jcc(Assembler::equal , L);
7387 increment(dst);
7388 } else { // unordered is greater
7389 movl(dst, 1);
7390 jcc(Assembler::parity, L);
7391 jcc(Assembler::above , L);
7392 movl(dst, 0);
7393 jcc(Assembler::equal , L);
7394 decrementl(dst);
7395 }
7396 bind(L);
7397 }
7399 void MacroAssembler::fld_d(AddressLiteral src) {
7400 fld_d(as_Address(src));
7401 }
7403 void MacroAssembler::fld_s(AddressLiteral src) {
7404 fld_s(as_Address(src));
7405 }
7407 void MacroAssembler::fld_x(AddressLiteral src) {
7408 Assembler::fld_x(as_Address(src));
7409 }
7411 void MacroAssembler::fldcw(AddressLiteral src) {
7412 Assembler::fldcw(as_Address(src));
7413 }
7415 void MacroAssembler::pow_exp_core_encoding() {
7416 // kills rax, rcx, rdx
7417 subptr(rsp,sizeof(jdouble));
7418 // computes 2^X. Stack: X ...
7419 // f2xm1 computes 2^X-1 but only operates on -1<=X<=1. Get int(X) and
7420 // keep it on the thread's stack to compute 2^int(X) later
7421 // then compute 2^(X-int(X)) as (2^(X-int(X)-1+1)
7422 // final result is obtained with: 2^X = 2^int(X) * 2^(X-int(X))
7423 fld_s(0); // Stack: X X ...
7424 frndint(); // Stack: int(X) X ...
7425 fsuba(1); // Stack: int(X) X-int(X) ...
7426 fistp_s(Address(rsp,0)); // move int(X) as integer to thread's stack. Stack: X-int(X) ...
7427 f2xm1(); // Stack: 2^(X-int(X))-1 ...
7428 fld1(); // Stack: 1 2^(X-int(X))-1 ...
7429 faddp(1); // Stack: 2^(X-int(X))
7430 // computes 2^(int(X)): add exponent bias (1023) to int(X), then
7431 // shift int(X)+1023 to exponent position.
7432 // Exponent is limited to 11 bits if int(X)+1023 does not fit in 11
7433 // bits, set result to NaN. 0x000 and 0x7FF are reserved exponent
7434 // values so detect them and set result to NaN.
7435 movl(rax,Address(rsp,0));
7436 movl(rcx, -2048); // 11 bit mask and valid NaN binary encoding
7437 addl(rax, 1023);
7438 movl(rdx,rax);
7439 shll(rax,20);
7440 // Check that 0 < int(X)+1023 < 2047. Otherwise set rax to NaN.
7441 addl(rdx,1);
7442 // Check that 1 < int(X)+1023+1 < 2048
7443 // in 3 steps:
7444 // 1- (int(X)+1023+1)&-2048 == 0 => 0 <= int(X)+1023+1 < 2048
7445 // 2- (int(X)+1023+1)&-2048 != 0
7446 // 3- (int(X)+1023+1)&-2048 != 1
7447 // Do 2- first because addl just updated the flags.
7448 cmov32(Assembler::equal,rax,rcx);
7449 cmpl(rdx,1);
7450 cmov32(Assembler::equal,rax,rcx);
7451 testl(rdx,rcx);
7452 cmov32(Assembler::notEqual,rax,rcx);
7453 movl(Address(rsp,4),rax);
7454 movl(Address(rsp,0),0);
7455 fmul_d(Address(rsp,0)); // Stack: 2^X ...
7456 addptr(rsp,sizeof(jdouble));
7457 }
7459 void MacroAssembler::increase_precision() {
7460 subptr(rsp, BytesPerWord);
7461 fnstcw(Address(rsp, 0));
7462 movl(rax, Address(rsp, 0));
7463 orl(rax, 0x300);
7464 push(rax);
7465 fldcw(Address(rsp, 0));
7466 pop(rax);
7467 }
7469 void MacroAssembler::restore_precision() {
7470 fldcw(Address(rsp, 0));
7471 addptr(rsp, BytesPerWord);
7472 }
7474 void MacroAssembler::fast_pow() {
7475 // computes X^Y = 2^(Y * log2(X))
7476 // if fast computation is not possible, result is NaN. Requires
7477 // fallback from user of this macro.
7478 // increase precision for intermediate steps of the computation
7479 increase_precision();
7480 fyl2x(); // Stack: (Y*log2(X)) ...
7481 pow_exp_core_encoding(); // Stack: exp(X) ...
7482 restore_precision();
7483 }
7485 void MacroAssembler::fast_exp() {
7486 // computes exp(X) = 2^(X * log2(e))
7487 // if fast computation is not possible, result is NaN. Requires
7488 // fallback from user of this macro.
7489 // increase precision for intermediate steps of the computation
7490 increase_precision();
7491 fldl2e(); // Stack: log2(e) X ...
7492 fmulp(1); // Stack: (X*log2(e)) ...
7493 pow_exp_core_encoding(); // Stack: exp(X) ...
7494 restore_precision();
7495 }
7497 void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) {
7498 // kills rax, rcx, rdx
7499 // pow and exp needs 2 extra registers on the fpu stack.
7500 Label slow_case, done;
7501 Register tmp = noreg;
7502 if (!VM_Version::supports_cmov()) {
7503 // fcmp needs a temporary so preserve rdx,
7504 tmp = rdx;
7505 }
7506 Register tmp2 = rax;
7507 Register tmp3 = rcx;
7509 if (is_exp) {
7510 // Stack: X
7511 fld_s(0); // duplicate argument for runtime call. Stack: X X
7512 fast_exp(); // Stack: exp(X) X
7513 fcmp(tmp, 0, false, false); // Stack: exp(X) X
7514 // exp(X) not equal to itself: exp(X) is NaN go to slow case.
7515 jcc(Assembler::parity, slow_case);
7516 // get rid of duplicate argument. Stack: exp(X)
7517 if (num_fpu_regs_in_use > 0) {
7518 fxch();
7519 fpop();
7520 } else {
7521 ffree(1);
7522 }
7523 jmp(done);
7524 } else {
7525 // Stack: X Y
7526 Label x_negative, y_odd;
7528 fldz(); // Stack: 0 X Y
7529 fcmp(tmp, 1, true, false); // Stack: X Y
7530 jcc(Assembler::above, x_negative);
7532 // X >= 0
7534 fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
7535 fld_s(1); // Stack: X Y X Y
7536 fast_pow(); // Stack: X^Y X Y
7537 fcmp(tmp, 0, false, false); // Stack: X^Y X Y
7538 // X^Y not equal to itself: X^Y is NaN go to slow case.
7539 jcc(Assembler::parity, slow_case);
7540 // get rid of duplicate arguments. Stack: X^Y
7541 if (num_fpu_regs_in_use > 0) {
7542 fxch(); fpop();
7543 fxch(); fpop();
7544 } else {
7545 ffree(2);
7546 ffree(1);
7547 }
7548 jmp(done);
7550 // X <= 0
7551 bind(x_negative);
7553 fld_s(1); // Stack: Y X Y
7554 frndint(); // Stack: int(Y) X Y
7555 fcmp(tmp, 2, false, false); // Stack: int(Y) X Y
7556 jcc(Assembler::notEqual, slow_case);
7558 subptr(rsp, 8);
7560 // For X^Y, when X < 0, Y has to be an integer and the final
7561 // result depends on whether it's odd or even. We just checked
7562 // that int(Y) == Y. We move int(Y) to gp registers as a 64 bit
7563 // integer to test its parity. If int(Y) is huge and doesn't fit
7564 // in the 64 bit integer range, the integer indefinite value will
7565 // end up in the gp registers. Huge numbers are all even, the
7566 // integer indefinite number is even so it's fine.
7568 #ifdef ASSERT
7569 // Let's check we don't end up with an integer indefinite number
7570 // when not expected. First test for huge numbers: check whether
7571 // int(Y)+1 == int(Y) which is true for very large numbers and
7572 // those are all even. A 64 bit integer is guaranteed to not
7573 // overflow for numbers where y+1 != y (when precision is set to
7574 // double precision).
7575 Label y_not_huge;
7577 fld1(); // Stack: 1 int(Y) X Y
7578 fadd(1); // Stack: 1+int(Y) int(Y) X Y
7580 #ifdef _LP64
7581 // trip to memory to force the precision down from double extended
7582 // precision
7583 fstp_d(Address(rsp, 0));
7584 fld_d(Address(rsp, 0));
7585 #endif
7587 fcmp(tmp, 1, true, false); // Stack: int(Y) X Y
7588 #endif
7590 // move int(Y) as 64 bit integer to thread's stack
7591 fistp_d(Address(rsp,0)); // Stack: X Y
7593 #ifdef ASSERT
7594 jcc(Assembler::notEqual, y_not_huge);
7596 // Y is huge so we know it's even. It may not fit in a 64 bit
7597 // integer and we don't want the debug code below to see the
7598 // integer indefinite value so overwrite int(Y) on the thread's
7599 // stack with 0.
7600 movl(Address(rsp, 0), 0);
7601 movl(Address(rsp, 4), 0);
7603 bind(y_not_huge);
7604 #endif
7606 fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
7607 fld_s(1); // Stack: X Y X Y
7608 fabs(); // Stack: abs(X) Y X Y
7609 fast_pow(); // Stack: abs(X)^Y X Y
7610 fcmp(tmp, 0, false, false); // Stack: abs(X)^Y X Y
7611 // abs(X)^Y not equal to itself: abs(X)^Y is NaN go to slow case.
7613 pop(tmp2);
7614 NOT_LP64(pop(tmp3));
7615 jcc(Assembler::parity, slow_case);
7617 #ifdef ASSERT
7618 // Check that int(Y) is not integer indefinite value (int
7619 // overflow). Shouldn't happen because for values that would
7620 // overflow, 1+int(Y)==Y which was tested earlier.
7621 #ifndef _LP64
7622 {
7623 Label integer;
7624 testl(tmp2, tmp2);
7625 jcc(Assembler::notZero, integer);
7626 cmpl(tmp3, 0x80000000);
7627 jcc(Assembler::notZero, integer);
7628 STOP("integer indefinite value shouldn't be seen here");
7629 bind(integer);
7630 }
7631 #else
7632 {
7633 Label integer;
7634 mov(tmp3, tmp2); // preserve tmp2 for parity check below
7635 shlq(tmp3, 1);
7636 jcc(Assembler::carryClear, integer);
7637 jcc(Assembler::notZero, integer);
7638 STOP("integer indefinite value shouldn't be seen here");
7639 bind(integer);
7640 }
7641 #endif
7642 #endif
7644 // get rid of duplicate arguments. Stack: X^Y
7645 if (num_fpu_regs_in_use > 0) {
7646 fxch(); fpop();
7647 fxch(); fpop();
7648 } else {
7649 ffree(2);
7650 ffree(1);
7651 }
7653 testl(tmp2, 1);
7654 jcc(Assembler::zero, done); // X <= 0, Y even: X^Y = abs(X)^Y
7655 // X <= 0, Y even: X^Y = -abs(X)^Y
7657 fchs(); // Stack: -abs(X)^Y Y
7658 jmp(done);
7659 }
7661 // slow case: runtime call
7662 bind(slow_case);
7664 fpop(); // pop incorrect result or int(Y)
7666 fp_runtime_fallback(is_exp ? CAST_FROM_FN_PTR(address, SharedRuntime::dexp) : CAST_FROM_FN_PTR(address, SharedRuntime::dpow),
7667 is_exp ? 1 : 2, num_fpu_regs_in_use);
7669 // Come here with result in F-TOS
7670 bind(done);
7671 }
7673 void MacroAssembler::fpop() {
7674 ffree();
7675 fincstp();
7676 }
7678 void MacroAssembler::fremr(Register tmp) {
7679 save_rax(tmp);
7680 { Label L;
7681 bind(L);
7682 fprem();
7683 fwait(); fnstsw_ax();
7684 #ifdef _LP64
7685 testl(rax, 0x400);
7686 jcc(Assembler::notEqual, L);
7687 #else
7688 sahf();
7689 jcc(Assembler::parity, L);
7690 #endif // _LP64
7691 }
7692 restore_rax(tmp);
7693 // Result is in ST0.
7694 // Note: fxch & fpop to get rid of ST1
7695 // (otherwise FPU stack could overflow eventually)
7696 fxch(1);
7697 fpop();
7698 }
7701 void MacroAssembler::incrementl(AddressLiteral dst) {
7702 if (reachable(dst)) {
7703 incrementl(as_Address(dst));
7704 } else {
7705 lea(rscratch1, dst);
7706 incrementl(Address(rscratch1, 0));
7707 }
7708 }
7710 void MacroAssembler::incrementl(ArrayAddress dst) {
7711 incrementl(as_Address(dst));
7712 }
7714 void MacroAssembler::incrementl(Register reg, int value) {
7715 if (value == min_jint) {addl(reg, value) ; return; }
7716 if (value < 0) { decrementl(reg, -value); return; }
7717 if (value == 0) { ; return; }
7718 if (value == 1 && UseIncDec) { incl(reg) ; return; }
7719 /* else */ { addl(reg, value) ; return; }
7720 }
7722 void MacroAssembler::incrementl(Address dst, int value) {
7723 if (value == min_jint) {addl(dst, value) ; return; }
7724 if (value < 0) { decrementl(dst, -value); return; }
7725 if (value == 0) { ; return; }
7726 if (value == 1 && UseIncDec) { incl(dst) ; return; }
7727 /* else */ { addl(dst, value) ; return; }
7728 }
7730 void MacroAssembler::jump(AddressLiteral dst) {
7731 if (reachable(dst)) {
7732 jmp_literal(dst.target(), dst.rspec());
7733 } else {
7734 lea(rscratch1, dst);
7735 jmp(rscratch1);
7736 }
7737 }
7739 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
7740 if (reachable(dst)) {
7741 InstructionMark im(this);
7742 relocate(dst.reloc());
7743 const int short_size = 2;
7744 const int long_size = 6;
7745 int offs = (intptr_t)dst.target() - ((intptr_t)_code_pos);
7746 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
7747 // 0111 tttn #8-bit disp
7748 emit_byte(0x70 | cc);
7749 emit_byte((offs - short_size) & 0xFF);
7750 } else {
7751 // 0000 1111 1000 tttn #32-bit disp
7752 emit_byte(0x0F);
7753 emit_byte(0x80 | cc);
7754 emit_long(offs - long_size);
7755 }
7756 } else {
7757 #ifdef ASSERT
7758 warning("reversing conditional branch");
7759 #endif /* ASSERT */
7760 Label skip;
7761 jccb(reverse[cc], skip);
7762 lea(rscratch1, dst);
7763 Assembler::jmp(rscratch1);
7764 bind(skip);
7765 }
7766 }
7768 void MacroAssembler::ldmxcsr(AddressLiteral src) {
7769 if (reachable(src)) {
7770 Assembler::ldmxcsr(as_Address(src));
7771 } else {
7772 lea(rscratch1, src);
7773 Assembler::ldmxcsr(Address(rscratch1, 0));
7774 }
7775 }
7777 int MacroAssembler::load_signed_byte(Register dst, Address src) {
7778 int off;
7779 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
7780 off = offset();
7781 movsbl(dst, src); // movsxb
7782 } else {
7783 off = load_unsigned_byte(dst, src);
7784 shll(dst, 24);
7785 sarl(dst, 24);
7786 }
7787 return off;
7788 }
7790 // Note: load_signed_short used to be called load_signed_word.
7791 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
7792 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
7793 // The term "word" in HotSpot means a 32- or 64-bit machine word.
7794 int MacroAssembler::load_signed_short(Register dst, Address src) {
7795 int off;
7796 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
7797 // This is dubious to me since it seems safe to do a signed 16 => 64 bit
7798 // version but this is what 64bit has always done. This seems to imply
7799 // that users are only using 32bits worth.
7800 off = offset();
7801 movswl(dst, src); // movsxw
7802 } else {
7803 off = load_unsigned_short(dst, src);
7804 shll(dst, 16);
7805 sarl(dst, 16);
7806 }
7807 return off;
7808 }
7810 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
7811 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
7812 // and "3.9 Partial Register Penalties", p. 22).
7813 int off;
7814 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
7815 off = offset();
7816 movzbl(dst, src); // movzxb
7817 } else {
7818 xorl(dst, dst);
7819 off = offset();
7820 movb(dst, src);
7821 }
7822 return off;
7823 }
7825 // Note: load_unsigned_short used to be called load_unsigned_word.
7826 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
7827 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
7828 // and "3.9 Partial Register Penalties", p. 22).
7829 int off;
7830 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
7831 off = offset();
7832 movzwl(dst, src); // movzxw
7833 } else {
7834 xorl(dst, dst);
7835 off = offset();
7836 movw(dst, src);
7837 }
7838 return off;
7839 }
7841 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
7842 switch (size_in_bytes) {
7843 #ifndef _LP64
7844 case 8:
7845 assert(dst2 != noreg, "second dest register required");
7846 movl(dst, src);
7847 movl(dst2, src.plus_disp(BytesPerInt));
7848 break;
7849 #else
7850 case 8: movq(dst, src); break;
7851 #endif
7852 case 4: movl(dst, src); break;
7853 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
7854 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
7855 default: ShouldNotReachHere();
7856 }
7857 }
7859 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
7860 switch (size_in_bytes) {
7861 #ifndef _LP64
7862 case 8:
7863 assert(src2 != noreg, "second source register required");
7864 movl(dst, src);
7865 movl(dst.plus_disp(BytesPerInt), src2);
7866 break;
7867 #else
7868 case 8: movq(dst, src); break;
7869 #endif
7870 case 4: movl(dst, src); break;
7871 case 2: movw(dst, src); break;
7872 case 1: movb(dst, src); break;
7873 default: ShouldNotReachHere();
7874 }
7875 }
7877 void MacroAssembler::mov32(AddressLiteral dst, Register src) {
7878 if (reachable(dst)) {
7879 movl(as_Address(dst), src);
7880 } else {
7881 lea(rscratch1, dst);
7882 movl(Address(rscratch1, 0), src);
7883 }
7884 }
7886 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
7887 if (reachable(src)) {
7888 movl(dst, as_Address(src));
7889 } else {
7890 lea(rscratch1, src);
7891 movl(dst, Address(rscratch1, 0));
7892 }
7893 }
7895 // C++ bool manipulation
7897 void MacroAssembler::movbool(Register dst, Address src) {
7898 if(sizeof(bool) == 1)
7899 movb(dst, src);
7900 else if(sizeof(bool) == 2)
7901 movw(dst, src);
7902 else if(sizeof(bool) == 4)
7903 movl(dst, src);
7904 else
7905 // unsupported
7906 ShouldNotReachHere();
7907 }
7909 void MacroAssembler::movbool(Address dst, bool boolconst) {
7910 if(sizeof(bool) == 1)
7911 movb(dst, (int) boolconst);
7912 else if(sizeof(bool) == 2)
7913 movw(dst, (int) boolconst);
7914 else if(sizeof(bool) == 4)
7915 movl(dst, (int) boolconst);
7916 else
7917 // unsupported
7918 ShouldNotReachHere();
7919 }
7921 void MacroAssembler::movbool(Address dst, Register src) {
7922 if(sizeof(bool) == 1)
7923 movb(dst, src);
7924 else if(sizeof(bool) == 2)
7925 movw(dst, src);
7926 else if(sizeof(bool) == 4)
7927 movl(dst, src);
7928 else
7929 // unsupported
7930 ShouldNotReachHere();
7931 }
7933 void MacroAssembler::movbyte(ArrayAddress dst, int src) {
7934 movb(as_Address(dst), src);
7935 }
7937 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) {
7938 if (reachable(src)) {
7939 movdl(dst, as_Address(src));
7940 } else {
7941 lea(rscratch1, src);
7942 movdl(dst, Address(rscratch1, 0));
7943 }
7944 }
7946 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) {
7947 if (reachable(src)) {
7948 movq(dst, as_Address(src));
7949 } else {
7950 lea(rscratch1, src);
7951 movq(dst, Address(rscratch1, 0));
7952 }
7953 }
7955 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
7956 if (reachable(src)) {
7957 if (UseXmmLoadAndClearUpper) {
7958 movsd (dst, as_Address(src));
7959 } else {
7960 movlpd(dst, as_Address(src));
7961 }
7962 } else {
7963 lea(rscratch1, src);
7964 if (UseXmmLoadAndClearUpper) {
7965 movsd (dst, Address(rscratch1, 0));
7966 } else {
7967 movlpd(dst, Address(rscratch1, 0));
7968 }
7969 }
7970 }
7972 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
7973 if (reachable(src)) {
7974 movss(dst, as_Address(src));
7975 } else {
7976 lea(rscratch1, src);
7977 movss(dst, Address(rscratch1, 0));
7978 }
7979 }
7981 void MacroAssembler::movptr(Register dst, Register src) {
7982 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
7983 }
7985 void MacroAssembler::movptr(Register dst, Address src) {
7986 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
7987 }
7989 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
7990 void MacroAssembler::movptr(Register dst, intptr_t src) {
7991 LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
7992 }
7994 void MacroAssembler::movptr(Address dst, Register src) {
7995 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
7996 }
7998 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
7999 if (reachable(src)) {
8000 Assembler::movsd(dst, as_Address(src));
8001 } else {
8002 lea(rscratch1, src);
8003 Assembler::movsd(dst, Address(rscratch1, 0));
8004 }
8005 }
8007 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
8008 if (reachable(src)) {
8009 Assembler::movss(dst, as_Address(src));
8010 } else {
8011 lea(rscratch1, src);
8012 Assembler::movss(dst, Address(rscratch1, 0));
8013 }
8014 }
8016 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src) {
8017 if (reachable(src)) {
8018 Assembler::mulsd(dst, as_Address(src));
8019 } else {
8020 lea(rscratch1, src);
8021 Assembler::mulsd(dst, Address(rscratch1, 0));
8022 }
8023 }
8025 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src) {
8026 if (reachable(src)) {
8027 Assembler::mulss(dst, as_Address(src));
8028 } else {
8029 lea(rscratch1, src);
8030 Assembler::mulss(dst, Address(rscratch1, 0));
8031 }
8032 }
8034 void MacroAssembler::null_check(Register reg, int offset) {
8035 if (needs_explicit_null_check(offset)) {
8036 // provoke OS NULL exception if reg = NULL by
8037 // accessing M[reg] w/o changing any (non-CC) registers
8038 // NOTE: cmpl is plenty here to provoke a segv
8039 cmpptr(rax, Address(reg, 0));
8040 // Note: should probably use testl(rax, Address(reg, 0));
8041 // may be shorter code (however, this version of
8042 // testl needs to be implemented first)
8043 } else {
8044 // nothing to do, (later) access of M[reg + offset]
8045 // will provoke OS NULL exception if reg = NULL
8046 }
8047 }
8049 void MacroAssembler::os_breakpoint() {
8050 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
8051 // (e.g., MSVC can't call ps() otherwise)
8052 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
8053 }
8055 void MacroAssembler::pop_CPU_state() {
8056 pop_FPU_state();
8057 pop_IU_state();
8058 }
8060 void MacroAssembler::pop_FPU_state() {
8061 NOT_LP64(frstor(Address(rsp, 0));)
8062 LP64_ONLY(fxrstor(Address(rsp, 0));)
8063 addptr(rsp, FPUStateSizeInWords * wordSize);
8064 }
8066 void MacroAssembler::pop_IU_state() {
8067 popa();
8068 LP64_ONLY(addq(rsp, 8));
8069 popf();
8070 }
8072 // Save Integer and Float state
8073 // Warning: Stack must be 16 byte aligned (64bit)
8074 void MacroAssembler::push_CPU_state() {
8075 push_IU_state();
8076 push_FPU_state();
8077 }
8079 void MacroAssembler::push_FPU_state() {
8080 subptr(rsp, FPUStateSizeInWords * wordSize);
8081 #ifndef _LP64
8082 fnsave(Address(rsp, 0));
8083 fwait();
8084 #else
8085 fxsave(Address(rsp, 0));
8086 #endif // LP64
8087 }
8089 void MacroAssembler::push_IU_state() {
8090 // Push flags first because pusha kills them
8091 pushf();
8092 // Make sure rsp stays 16-byte aligned
8093 LP64_ONLY(subq(rsp, 8));
8094 pusha();
8095 }
8097 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
8098 // determine java_thread register
8099 if (!java_thread->is_valid()) {
8100 java_thread = rdi;
8101 get_thread(java_thread);
8102 }
8103 // we must set sp to zero to clear frame
8104 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
8105 if (clear_fp) {
8106 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
8107 }
8109 if (clear_pc)
8110 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
8112 }
8114 void MacroAssembler::restore_rax(Register tmp) {
8115 if (tmp == noreg) pop(rax);
8116 else if (tmp != rax) mov(rax, tmp);
8117 }
8119 void MacroAssembler::round_to(Register reg, int modulus) {
8120 addptr(reg, modulus - 1);
8121 andptr(reg, -modulus);
8122 }
8124 void MacroAssembler::save_rax(Register tmp) {
8125 if (tmp == noreg) push(rax);
8126 else if (tmp != rax) mov(tmp, rax);
8127 }
8129 // Write serialization page so VM thread can do a pseudo remote membar.
8130 // We use the current thread pointer to calculate a thread specific
8131 // offset to write to within the page. This minimizes bus traffic
8132 // due to cache line collision.
8133 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
8134 movl(tmp, thread);
8135 shrl(tmp, os::get_serialize_page_shift_count());
8136 andl(tmp, (os::vm_page_size() - sizeof(int)));
8138 Address index(noreg, tmp, Address::times_1);
8139 ExternalAddress page(os::get_memory_serialize_page());
8141 // Size of store must match masking code above
8142 movl(as_Address(ArrayAddress(page, index)), tmp);
8143 }
8145 // Calls to C land
8146 //
8147 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
8148 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
8149 // has to be reset to 0. This is required to allow proper stack traversal.
8150 void MacroAssembler::set_last_Java_frame(Register java_thread,
8151 Register last_java_sp,
8152 Register last_java_fp,
8153 address last_java_pc) {
8154 // determine java_thread register
8155 if (!java_thread->is_valid()) {
8156 java_thread = rdi;
8157 get_thread(java_thread);
8158 }
8159 // determine last_java_sp register
8160 if (!last_java_sp->is_valid()) {
8161 last_java_sp = rsp;
8162 }
8164 // last_java_fp is optional
8166 if (last_java_fp->is_valid()) {
8167 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
8168 }
8170 // last_java_pc is optional
8172 if (last_java_pc != NULL) {
8173 lea(Address(java_thread,
8174 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
8175 InternalAddress(last_java_pc));
8177 }
8178 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
8179 }
8181 void MacroAssembler::shlptr(Register dst, int imm8) {
8182 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
8183 }
8185 void MacroAssembler::shrptr(Register dst, int imm8) {
8186 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
8187 }
8189 void MacroAssembler::sign_extend_byte(Register reg) {
8190 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
8191 movsbl(reg, reg); // movsxb
8192 } else {
8193 shll(reg, 24);
8194 sarl(reg, 24);
8195 }
8196 }
8198 void MacroAssembler::sign_extend_short(Register reg) {
8199 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
8200 movswl(reg, reg); // movsxw
8201 } else {
8202 shll(reg, 16);
8203 sarl(reg, 16);
8204 }
8205 }
8207 void MacroAssembler::testl(Register dst, AddressLiteral src) {
8208 assert(reachable(src), "Address should be reachable");
8209 testl(dst, as_Address(src));
8210 }
8212 void MacroAssembler::sqrtsd(XMMRegister dst, AddressLiteral src) {
8213 if (reachable(src)) {
8214 Assembler::sqrtsd(dst, as_Address(src));
8215 } else {
8216 lea(rscratch1, src);
8217 Assembler::sqrtsd(dst, Address(rscratch1, 0));
8218 }
8219 }
8221 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src) {
8222 if (reachable(src)) {
8223 Assembler::sqrtss(dst, as_Address(src));
8224 } else {
8225 lea(rscratch1, src);
8226 Assembler::sqrtss(dst, Address(rscratch1, 0));
8227 }
8228 }
8230 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) {
8231 if (reachable(src)) {
8232 Assembler::subsd(dst, as_Address(src));
8233 } else {
8234 lea(rscratch1, src);
8235 Assembler::subsd(dst, Address(rscratch1, 0));
8236 }
8237 }
8239 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) {
8240 if (reachable(src)) {
8241 Assembler::subss(dst, as_Address(src));
8242 } else {
8243 lea(rscratch1, src);
8244 Assembler::subss(dst, Address(rscratch1, 0));
8245 }
8246 }
8248 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
8249 if (reachable(src)) {
8250 Assembler::ucomisd(dst, as_Address(src));
8251 } else {
8252 lea(rscratch1, src);
8253 Assembler::ucomisd(dst, Address(rscratch1, 0));
8254 }
8255 }
8257 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
8258 if (reachable(src)) {
8259 Assembler::ucomiss(dst, as_Address(src));
8260 } else {
8261 lea(rscratch1, src);
8262 Assembler::ucomiss(dst, Address(rscratch1, 0));
8263 }
8264 }
8266 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
8267 // Used in sign-bit flipping with aligned address.
8268 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
8269 if (reachable(src)) {
8270 Assembler::xorpd(dst, as_Address(src));
8271 } else {
8272 lea(rscratch1, src);
8273 Assembler::xorpd(dst, Address(rscratch1, 0));
8274 }
8275 }
8277 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
8278 // Used in sign-bit flipping with aligned address.
8279 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
8280 if (reachable(src)) {
8281 Assembler::xorps(dst, as_Address(src));
8282 } else {
8283 lea(rscratch1, src);
8284 Assembler::xorps(dst, Address(rscratch1, 0));
8285 }
8286 }
8288 // AVX 3-operands instructions
8290 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8291 if (reachable(src)) {
8292 vaddsd(dst, nds, as_Address(src));
8293 } else {
8294 lea(rscratch1, src);
8295 vaddsd(dst, nds, Address(rscratch1, 0));
8296 }
8297 }
8299 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8300 if (reachable(src)) {
8301 vaddss(dst, nds, as_Address(src));
8302 } else {
8303 lea(rscratch1, src);
8304 vaddss(dst, nds, Address(rscratch1, 0));
8305 }
8306 }
8308 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
8309 if (reachable(src)) {
8310 vandpd(dst, nds, as_Address(src), vector256);
8311 } else {
8312 lea(rscratch1, src);
8313 vandpd(dst, nds, Address(rscratch1, 0), vector256);
8314 }
8315 }
8317 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
8318 if (reachable(src)) {
8319 vandps(dst, nds, as_Address(src), vector256);
8320 } else {
8321 lea(rscratch1, src);
8322 vandps(dst, nds, Address(rscratch1, 0), vector256);
8323 }
8324 }
8326 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8327 if (reachable(src)) {
8328 vdivsd(dst, nds, as_Address(src));
8329 } else {
8330 lea(rscratch1, src);
8331 vdivsd(dst, nds, Address(rscratch1, 0));
8332 }
8333 }
8335 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8336 if (reachable(src)) {
8337 vdivss(dst, nds, as_Address(src));
8338 } else {
8339 lea(rscratch1, src);
8340 vdivss(dst, nds, Address(rscratch1, 0));
8341 }
8342 }
8344 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8345 if (reachable(src)) {
8346 vmulsd(dst, nds, as_Address(src));
8347 } else {
8348 lea(rscratch1, src);
8349 vmulsd(dst, nds, Address(rscratch1, 0));
8350 }
8351 }
8353 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8354 if (reachable(src)) {
8355 vmulss(dst, nds, as_Address(src));
8356 } else {
8357 lea(rscratch1, src);
8358 vmulss(dst, nds, Address(rscratch1, 0));
8359 }
8360 }
8362 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8363 if (reachable(src)) {
8364 vsubsd(dst, nds, as_Address(src));
8365 } else {
8366 lea(rscratch1, src);
8367 vsubsd(dst, nds, Address(rscratch1, 0));
8368 }
8369 }
8371 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8372 if (reachable(src)) {
8373 vsubss(dst, nds, as_Address(src));
8374 } else {
8375 lea(rscratch1, src);
8376 vsubss(dst, nds, Address(rscratch1, 0));
8377 }
8378 }
8380 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
8381 if (reachable(src)) {
8382 vxorpd(dst, nds, as_Address(src), vector256);
8383 } else {
8384 lea(rscratch1, src);
8385 vxorpd(dst, nds, Address(rscratch1, 0), vector256);
8386 }
8387 }
8389 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
8390 if (reachable(src)) {
8391 vxorps(dst, nds, as_Address(src), vector256);
8392 } else {
8393 lea(rscratch1, src);
8394 vxorps(dst, nds, Address(rscratch1, 0), vector256);
8395 }
8396 }
8399 //////////////////////////////////////////////////////////////////////////////////
8400 #ifndef SERIALGC
8402 void MacroAssembler::g1_write_barrier_pre(Register obj,
8403 Register pre_val,
8404 Register thread,
8405 Register tmp,
8406 bool tosca_live,
8407 bool expand_call) {
8409 // If expand_call is true then we expand the call_VM_leaf macro
8410 // directly to skip generating the check by
8411 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
8413 #ifdef _LP64
8414 assert(thread == r15_thread, "must be");
8415 #endif // _LP64
8417 Label done;
8418 Label runtime;
8420 assert(pre_val != noreg, "check this code");
8422 if (obj != noreg) {
8423 assert_different_registers(obj, pre_val, tmp);
8424 assert(pre_val != rax, "check this code");
8425 }
8427 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
8428 PtrQueue::byte_offset_of_active()));
8429 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
8430 PtrQueue::byte_offset_of_index()));
8431 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
8432 PtrQueue::byte_offset_of_buf()));
8435 // Is marking active?
8436 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
8437 cmpl(in_progress, 0);
8438 } else {
8439 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
8440 cmpb(in_progress, 0);
8441 }
8442 jcc(Assembler::equal, done);
8444 // Do we need to load the previous value?
8445 if (obj != noreg) {
8446 load_heap_oop(pre_val, Address(obj, 0));
8447 }
8449 // Is the previous value null?
8450 cmpptr(pre_val, (int32_t) NULL_WORD);
8451 jcc(Assembler::equal, done);
8453 // Can we store original value in the thread's buffer?
8454 // Is index == 0?
8455 // (The index field is typed as size_t.)
8457 movptr(tmp, index); // tmp := *index_adr
8458 cmpptr(tmp, 0); // tmp == 0?
8459 jcc(Assembler::equal, runtime); // If yes, goto runtime
8461 subptr(tmp, wordSize); // tmp := tmp - wordSize
8462 movptr(index, tmp); // *index_adr := tmp
8463 addptr(tmp, buffer); // tmp := tmp + *buffer_adr
8465 // Record the previous value
8466 movptr(Address(tmp, 0), pre_val);
8467 jmp(done);
8469 bind(runtime);
8470 // save the live input values
8471 if(tosca_live) push(rax);
8473 if (obj != noreg && obj != rax)
8474 push(obj);
8476 if (pre_val != rax)
8477 push(pre_val);
8479 // Calling the runtime using the regular call_VM_leaf mechanism generates
8480 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
8481 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
8482 //
8483 // If we care generating the pre-barrier without a frame (e.g. in the
8484 // intrinsified Reference.get() routine) then ebp might be pointing to
8485 // the caller frame and so this check will most likely fail at runtime.
8486 //
8487 // Expanding the call directly bypasses the generation of the check.
8488 // So when we do not have have a full interpreter frame on the stack
8489 // expand_call should be passed true.
8491 NOT_LP64( push(thread); )
8493 if (expand_call) {
8494 LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
8495 pass_arg1(this, thread);
8496 pass_arg0(this, pre_val);
8497 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
8498 } else {
8499 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
8500 }
8502 NOT_LP64( pop(thread); )
8504 // save the live input values
8505 if (pre_val != rax)
8506 pop(pre_val);
8508 if (obj != noreg && obj != rax)
8509 pop(obj);
8511 if(tosca_live) pop(rax);
8513 bind(done);
8514 }
8516 void MacroAssembler::g1_write_barrier_post(Register store_addr,
8517 Register new_val,
8518 Register thread,
8519 Register tmp,
8520 Register tmp2) {
8521 #ifdef _LP64
8522 assert(thread == r15_thread, "must be");
8523 #endif // _LP64
8525 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
8526 PtrQueue::byte_offset_of_index()));
8527 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
8528 PtrQueue::byte_offset_of_buf()));
8530 BarrierSet* bs = Universe::heap()->barrier_set();
8531 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
8532 Label done;
8533 Label runtime;
8535 // Does store cross heap regions?
8537 movptr(tmp, store_addr);
8538 xorptr(tmp, new_val);
8539 shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
8540 jcc(Assembler::equal, done);
8542 // crosses regions, storing NULL?
8544 cmpptr(new_val, (int32_t) NULL_WORD);
8545 jcc(Assembler::equal, done);
8547 // storing region crossing non-NULL, is card already dirty?
8549 ExternalAddress cardtable((address) ct->byte_map_base);
8550 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
8551 #ifdef _LP64
8552 const Register card_addr = tmp;
8554 movq(card_addr, store_addr);
8555 shrq(card_addr, CardTableModRefBS::card_shift);
8557 lea(tmp2, cardtable);
8559 // get the address of the card
8560 addq(card_addr, tmp2);
8561 #else
8562 const Register card_index = tmp;
8564 movl(card_index, store_addr);
8565 shrl(card_index, CardTableModRefBS::card_shift);
8567 Address index(noreg, card_index, Address::times_1);
8568 const Register card_addr = tmp;
8569 lea(card_addr, as_Address(ArrayAddress(cardtable, index)));
8570 #endif
8571 cmpb(Address(card_addr, 0), 0);
8572 jcc(Assembler::equal, done);
8574 // storing a region crossing, non-NULL oop, card is clean.
8575 // dirty card and log.
8577 movb(Address(card_addr, 0), 0);
8579 cmpl(queue_index, 0);
8580 jcc(Assembler::equal, runtime);
8581 subl(queue_index, wordSize);
8582 movptr(tmp2, buffer);
8583 #ifdef _LP64
8584 movslq(rscratch1, queue_index);
8585 addq(tmp2, rscratch1);
8586 movq(Address(tmp2, 0), card_addr);
8587 #else
8588 addl(tmp2, queue_index);
8589 movl(Address(tmp2, 0), card_index);
8590 #endif
8591 jmp(done);
8593 bind(runtime);
8594 // save the live input values
8595 push(store_addr);
8596 push(new_val);
8597 #ifdef _LP64
8598 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
8599 #else
8600 push(thread);
8601 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
8602 pop(thread);
8603 #endif
8604 pop(new_val);
8605 pop(store_addr);
8607 bind(done);
8608 }
8610 #endif // SERIALGC
8611 //////////////////////////////////////////////////////////////////////////////////
8614 void MacroAssembler::store_check(Register obj) {
8615 // Does a store check for the oop in register obj. The content of
8616 // register obj is destroyed afterwards.
8617 store_check_part_1(obj);
8618 store_check_part_2(obj);
8619 }
8621 void MacroAssembler::store_check(Register obj, Address dst) {
8622 store_check(obj);
8623 }
8626 // split the store check operation so that other instructions can be scheduled inbetween
8627 void MacroAssembler::store_check_part_1(Register obj) {
8628 BarrierSet* bs = Universe::heap()->barrier_set();
8629 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
8630 shrptr(obj, CardTableModRefBS::card_shift);
8631 }
8633 void MacroAssembler::store_check_part_2(Register obj) {
8634 BarrierSet* bs = Universe::heap()->barrier_set();
8635 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
8636 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
8637 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
8639 // The calculation for byte_map_base is as follows:
8640 // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
8641 // So this essentially converts an address to a displacement and
8642 // it will never need to be relocated. On 64bit however the value may be too
8643 // large for a 32bit displacement
8645 intptr_t disp = (intptr_t) ct->byte_map_base;
8646 if (is_simm32(disp)) {
8647 Address cardtable(noreg, obj, Address::times_1, disp);
8648 movb(cardtable, 0);
8649 } else {
8650 // By doing it as an ExternalAddress disp could be converted to a rip-relative
8651 // displacement and done in a single instruction given favorable mapping and
8652 // a smarter version of as_Address. Worst case it is two instructions which
8653 // is no worse off then loading disp into a register and doing as a simple
8654 // Address() as above.
8655 // We can't do as ExternalAddress as the only style since if disp == 0 we'll
8656 // assert since NULL isn't acceptable in a reloci (see 6644928). In any case
8657 // in some cases we'll get a single instruction version.
8659 ExternalAddress cardtable((address)disp);
8660 Address index(noreg, obj, Address::times_1);
8661 movb(as_Address(ArrayAddress(cardtable, index)), 0);
8662 }
8663 }
8665 void MacroAssembler::subptr(Register dst, int32_t imm32) {
8666 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
8667 }
8669 // Force generation of a 4 byte immediate value even if it fits into 8bit
8670 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
8671 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32));
8672 }
8674 void MacroAssembler::subptr(Register dst, Register src) {
8675 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
8676 }
8678 // C++ bool manipulation
8679 void MacroAssembler::testbool(Register dst) {
8680 if(sizeof(bool) == 1)
8681 testb(dst, 0xff);
8682 else if(sizeof(bool) == 2) {
8683 // testw implementation needed for two byte bools
8684 ShouldNotReachHere();
8685 } else if(sizeof(bool) == 4)
8686 testl(dst, dst);
8687 else
8688 // unsupported
8689 ShouldNotReachHere();
8690 }
8692 void MacroAssembler::testptr(Register dst, Register src) {
8693 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
8694 }
8696 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
8697 void MacroAssembler::tlab_allocate(Register obj,
8698 Register var_size_in_bytes,
8699 int con_size_in_bytes,
8700 Register t1,
8701 Register t2,
8702 Label& slow_case) {
8703 assert_different_registers(obj, t1, t2);
8704 assert_different_registers(obj, var_size_in_bytes, t1);
8705 Register end = t2;
8706 Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
8708 verify_tlab();
8710 NOT_LP64(get_thread(thread));
8712 movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
8713 if (var_size_in_bytes == noreg) {
8714 lea(end, Address(obj, con_size_in_bytes));
8715 } else {
8716 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
8717 }
8718 cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
8719 jcc(Assembler::above, slow_case);
8721 // update the tlab top pointer
8722 movptr(Address(thread, JavaThread::tlab_top_offset()), end);
8724 // recover var_size_in_bytes if necessary
8725 if (var_size_in_bytes == end) {
8726 subptr(var_size_in_bytes, obj);
8727 }
8728 verify_tlab();
8729 }
8731 // Preserves rbx, and rdx.
8732 Register MacroAssembler::tlab_refill(Label& retry,
8733 Label& try_eden,
8734 Label& slow_case) {
8735 Register top = rax;
8736 Register t1 = rcx;
8737 Register t2 = rsi;
8738 Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
8739 assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
8740 Label do_refill, discard_tlab;
8742 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
8743 // No allocation in the shared eden.
8744 jmp(slow_case);
8745 }
8747 NOT_LP64(get_thread(thread_reg));
8749 movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
8750 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
8752 // calculate amount of free space
8753 subptr(t1, top);
8754 shrptr(t1, LogHeapWordSize);
8756 // Retain tlab and allocate object in shared space if
8757 // the amount free in the tlab is too large to discard.
8758 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
8759 jcc(Assembler::lessEqual, discard_tlab);
8761 // Retain
8762 // %%% yuck as movptr...
8763 movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
8764 addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
8765 if (TLABStats) {
8766 // increment number of slow_allocations
8767 addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
8768 }
8769 jmp(try_eden);
8771 bind(discard_tlab);
8772 if (TLABStats) {
8773 // increment number of refills
8774 addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
8775 // accumulate wastage -- t1 is amount free in tlab
8776 addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
8777 }
8779 // if tlab is currently allocated (top or end != null) then
8780 // fill [top, end + alignment_reserve) with array object
8781 testptr(top, top);
8782 jcc(Assembler::zero, do_refill);
8784 // set up the mark word
8785 movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
8786 // set the length to the remaining space
8787 subptr(t1, typeArrayOopDesc::header_size(T_INT));
8788 addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
8789 shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
8790 movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
8791 // set klass to intArrayKlass
8792 // dubious reloc why not an oop reloc?
8793 movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr()));
8794 // store klass last. concurrent gcs assumes klass length is valid if
8795 // klass field is not null.
8796 store_klass(top, t1);
8798 movptr(t1, top);
8799 subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
8800 incr_allocated_bytes(thread_reg, t1, 0);
8802 // refill the tlab with an eden allocation
8803 bind(do_refill);
8804 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
8805 shlptr(t1, LogHeapWordSize);
8806 // allocate new tlab, address returned in top
8807 eden_allocate(top, t1, 0, t2, slow_case);
8809 // Check that t1 was preserved in eden_allocate.
8810 #ifdef ASSERT
8811 if (UseTLAB) {
8812 Label ok;
8813 Register tsize = rsi;
8814 assert_different_registers(tsize, thread_reg, t1);
8815 push(tsize);
8816 movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
8817 shlptr(tsize, LogHeapWordSize);
8818 cmpptr(t1, tsize);
8819 jcc(Assembler::equal, ok);
8820 STOP("assert(t1 != tlab size)");
8821 should_not_reach_here();
8823 bind(ok);
8824 pop(tsize);
8825 }
8826 #endif
8827 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
8828 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
8829 addptr(top, t1);
8830 subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
8831 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
8832 verify_tlab();
8833 jmp(retry);
8835 return thread_reg; // for use by caller
8836 }
8838 void MacroAssembler::incr_allocated_bytes(Register thread,
8839 Register var_size_in_bytes,
8840 int con_size_in_bytes,
8841 Register t1) {
8842 if (!thread->is_valid()) {
8843 #ifdef _LP64
8844 thread = r15_thread;
8845 #else
8846 assert(t1->is_valid(), "need temp reg");
8847 thread = t1;
8848 get_thread(thread);
8849 #endif
8850 }
8852 #ifdef _LP64
8853 if (var_size_in_bytes->is_valid()) {
8854 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
8855 } else {
8856 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
8857 }
8858 #else
8859 if (var_size_in_bytes->is_valid()) {
8860 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
8861 } else {
8862 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
8863 }
8864 adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
8865 #endif
8866 }
8868 void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use) {
8869 pusha();
8871 // if we are coming from c1, xmm registers may be live
8872 if (UseSSE >= 1) {
8873 subptr(rsp, sizeof(jdouble)* LP64_ONLY(16) NOT_LP64(8));
8874 }
8875 int off = 0;
8876 if (UseSSE == 1) {
8877 movflt(Address(rsp,off++*sizeof(jdouble)),xmm0);
8878 movflt(Address(rsp,off++*sizeof(jdouble)),xmm1);
8879 movflt(Address(rsp,off++*sizeof(jdouble)),xmm2);
8880 movflt(Address(rsp,off++*sizeof(jdouble)),xmm3);
8881 movflt(Address(rsp,off++*sizeof(jdouble)),xmm4);
8882 movflt(Address(rsp,off++*sizeof(jdouble)),xmm5);
8883 movflt(Address(rsp,off++*sizeof(jdouble)),xmm6);
8884 movflt(Address(rsp,off++*sizeof(jdouble)),xmm7);
8885 } else if (UseSSE >= 2) {
8886 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm0);
8887 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm1);
8888 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm2);
8889 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm3);
8890 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm4);
8891 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm5);
8892 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm6);
8893 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm7);
8894 #ifdef _LP64
8895 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm8);
8896 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm9);
8897 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm10);
8898 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm11);
8899 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm12);
8900 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm13);
8901 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm14);
8902 movdbl(Address(rsp,off++*sizeof(jdouble)),xmm15);
8903 #endif
8904 }
8906 // Preserve registers across runtime call
8907 int incoming_argument_and_return_value_offset = -1;
8908 if (num_fpu_regs_in_use > 1) {
8909 // Must preserve all other FPU regs (could alternatively convert
8910 // SharedRuntime::dsin, dcos etc. into assembly routines known not to trash
8911 // FPU state, but can not trust C compiler)
8912 NEEDS_CLEANUP;
8913 // NOTE that in this case we also push the incoming argument(s) to
8914 // the stack and restore it later; we also use this stack slot to
8915 // hold the return value from dsin, dcos etc.
8916 for (int i = 0; i < num_fpu_regs_in_use; i++) {
8917 subptr(rsp, sizeof(jdouble));
8918 fstp_d(Address(rsp, 0));
8919 }
8920 incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1);
8921 for (int i = nb_args-1; i >= 0; i--) {
8922 fld_d(Address(rsp, incoming_argument_and_return_value_offset-i*sizeof(jdouble)));
8923 }
8924 }
8926 subptr(rsp, nb_args*sizeof(jdouble));
8927 for (int i = 0; i < nb_args; i++) {
8928 fstp_d(Address(rsp, i*sizeof(jdouble)));
8929 }
8931 #ifdef _LP64
8932 if (nb_args > 0) {
8933 movdbl(xmm0, Address(rsp, 0));
8934 }
8935 if (nb_args > 1) {
8936 movdbl(xmm1, Address(rsp, sizeof(jdouble)));
8937 }
8938 assert(nb_args <= 2, "unsupported number of args");
8939 #endif // _LP64
8941 // NOTE: we must not use call_VM_leaf here because that requires a
8942 // complete interpreter frame in debug mode -- same bug as 4387334
8943 // MacroAssembler::call_VM_leaf_base is perfectly safe and will
8944 // do proper 64bit abi
8946 NEEDS_CLEANUP;
8947 // Need to add stack banging before this runtime call if it needs to
8948 // be taken; however, there is no generic stack banging routine at
8949 // the MacroAssembler level
8951 MacroAssembler::call_VM_leaf_base(runtime_entry, 0);
8953 #ifdef _LP64
8954 movsd(Address(rsp, 0), xmm0);
8955 fld_d(Address(rsp, 0));
8956 #endif // _LP64
8957 addptr(rsp, sizeof(jdouble) * nb_args);
8958 if (num_fpu_regs_in_use > 1) {
8959 // Must save return value to stack and then restore entire FPU
8960 // stack except incoming arguments
8961 fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
8962 for (int i = 0; i < num_fpu_regs_in_use - nb_args; i++) {
8963 fld_d(Address(rsp, 0));
8964 addptr(rsp, sizeof(jdouble));
8965 }
8966 fld_d(Address(rsp, (nb_args-1)*sizeof(jdouble)));
8967 addptr(rsp, sizeof(jdouble) * nb_args);
8968 }
8970 off = 0;
8971 if (UseSSE == 1) {
8972 movflt(xmm0, Address(rsp,off++*sizeof(jdouble)));
8973 movflt(xmm1, Address(rsp,off++*sizeof(jdouble)));
8974 movflt(xmm2, Address(rsp,off++*sizeof(jdouble)));
8975 movflt(xmm3, Address(rsp,off++*sizeof(jdouble)));
8976 movflt(xmm4, Address(rsp,off++*sizeof(jdouble)));
8977 movflt(xmm5, Address(rsp,off++*sizeof(jdouble)));
8978 movflt(xmm6, Address(rsp,off++*sizeof(jdouble)));
8979 movflt(xmm7, Address(rsp,off++*sizeof(jdouble)));
8980 } else if (UseSSE >= 2) {
8981 movdbl(xmm0, Address(rsp,off++*sizeof(jdouble)));
8982 movdbl(xmm1, Address(rsp,off++*sizeof(jdouble)));
8983 movdbl(xmm2, Address(rsp,off++*sizeof(jdouble)));
8984 movdbl(xmm3, Address(rsp,off++*sizeof(jdouble)));
8985 movdbl(xmm4, Address(rsp,off++*sizeof(jdouble)));
8986 movdbl(xmm5, Address(rsp,off++*sizeof(jdouble)));
8987 movdbl(xmm6, Address(rsp,off++*sizeof(jdouble)));
8988 movdbl(xmm7, Address(rsp,off++*sizeof(jdouble)));
8989 #ifdef _LP64
8990 movdbl(xmm8, Address(rsp,off++*sizeof(jdouble)));
8991 movdbl(xmm9, Address(rsp,off++*sizeof(jdouble)));
8992 movdbl(xmm10, Address(rsp,off++*sizeof(jdouble)));
8993 movdbl(xmm11, Address(rsp,off++*sizeof(jdouble)));
8994 movdbl(xmm12, Address(rsp,off++*sizeof(jdouble)));
8995 movdbl(xmm13, Address(rsp,off++*sizeof(jdouble)));
8996 movdbl(xmm14, Address(rsp,off++*sizeof(jdouble)));
8997 movdbl(xmm15, Address(rsp,off++*sizeof(jdouble)));
8998 #endif
8999 }
9000 if (UseSSE >= 1) {
9001 addptr(rsp, sizeof(jdouble)* LP64_ONLY(16) NOT_LP64(8));
9002 }
9003 popa();
9004 }
9006 static const double pi_4 = 0.7853981633974483;
9008 void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
9009 // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
9010 // was attempted in this code; unfortunately it appears that the
9011 // switch to 80-bit precision and back causes this to be
9012 // unprofitable compared with simply performing a runtime call if
9013 // the argument is out of the (-pi/4, pi/4) range.
9015 Register tmp = noreg;
9016 if (!VM_Version::supports_cmov()) {
9017 // fcmp needs a temporary so preserve rbx,
9018 tmp = rbx;
9019 push(tmp);
9020 }
9022 Label slow_case, done;
9024 ExternalAddress pi4_adr = (address)&pi_4;
9025 if (reachable(pi4_adr)) {
9026 // x ?<= pi/4
9027 fld_d(pi4_adr);
9028 fld_s(1); // Stack: X PI/4 X
9029 fabs(); // Stack: |X| PI/4 X
9030 fcmp(tmp);
9031 jcc(Assembler::above, slow_case);
9033 // fastest case: -pi/4 <= x <= pi/4
9034 switch(trig) {
9035 case 's':
9036 fsin();
9037 break;
9038 case 'c':
9039 fcos();
9040 break;
9041 case 't':
9042 ftan();
9043 break;
9044 default:
9045 assert(false, "bad intrinsic");
9046 break;
9047 }
9048 jmp(done);
9049 }
9051 // slow case: runtime call
9052 bind(slow_case);
9054 switch(trig) {
9055 case 's':
9056 {
9057 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 1, num_fpu_regs_in_use);
9058 }
9059 break;
9060 case 'c':
9061 {
9062 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 1, num_fpu_regs_in_use);
9063 }
9064 break;
9065 case 't':
9066 {
9067 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 1, num_fpu_regs_in_use);
9068 }
9069 break;
9070 default:
9071 assert(false, "bad intrinsic");
9072 break;
9073 }
9075 // Come here with result in F-TOS
9076 bind(done);
9078 if (tmp != noreg) {
9079 pop(tmp);
9080 }
9081 }
9084 // Look up the method for a megamorphic invokeinterface call.
9085 // The target method is determined by <intf_klass, itable_index>.
9086 // The receiver klass is in recv_klass.
9087 // On success, the result will be in method_result, and execution falls through.
9088 // On failure, execution transfers to the given label.
9089 void MacroAssembler::lookup_interface_method(Register recv_klass,
9090 Register intf_klass,
9091 RegisterOrConstant itable_index,
9092 Register method_result,
9093 Register scan_temp,
9094 Label& L_no_such_interface) {
9095 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
9096 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
9097 "caller must use same register for non-constant itable index as for method");
9099 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
9100 int vtable_base = instanceKlass::vtable_start_offset() * wordSize;
9101 int itentry_off = itableMethodEntry::method_offset_in_bytes();
9102 int scan_step = itableOffsetEntry::size() * wordSize;
9103 int vte_size = vtableEntry::size() * wordSize;
9104 Address::ScaleFactor times_vte_scale = Address::times_ptr;
9105 assert(vte_size == wordSize, "else adjust times_vte_scale");
9107 movl(scan_temp, Address(recv_klass, instanceKlass::vtable_length_offset() * wordSize));
9109 // %%% Could store the aligned, prescaled offset in the klassoop.
9110 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
9111 if (HeapWordsPerLong > 1) {
9112 // Round up to align_object_offset boundary
9113 // see code for instanceKlass::start_of_itable!
9114 round_to(scan_temp, BytesPerLong);
9115 }
9117 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
9118 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
9119 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
9121 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
9122 // if (scan->interface() == intf) {
9123 // result = (klass + scan->offset() + itable_index);
9124 // }
9125 // }
9126 Label search, found_method;
9128 for (int peel = 1; peel >= 0; peel--) {
9129 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
9130 cmpptr(intf_klass, method_result);
9132 if (peel) {
9133 jccb(Assembler::equal, found_method);
9134 } else {
9135 jccb(Assembler::notEqual, search);
9136 // (invert the test to fall through to found_method...)
9137 }
9139 if (!peel) break;
9141 bind(search);
9143 // Check that the previous entry is non-null. A null entry means that
9144 // the receiver class doesn't implement the interface, and wasn't the
9145 // same as when the caller was compiled.
9146 testptr(method_result, method_result);
9147 jcc(Assembler::zero, L_no_such_interface);
9148 addptr(scan_temp, scan_step);
9149 }
9151 bind(found_method);
9153 // Got a hit.
9154 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
9155 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
9156 }
9159 // virtual method calling
9160 void MacroAssembler::lookup_virtual_method(Register recv_klass,
9161 RegisterOrConstant vtable_index,
9162 Register method_result) {
9163 const int base = instanceKlass::vtable_start_offset() * wordSize;
9164 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
9165 Address vtable_entry_addr(recv_klass,
9166 vtable_index, Address::times_ptr,
9167 base + vtableEntry::method_offset_in_bytes());
9168 movptr(method_result, vtable_entry_addr);
9169 }
9172 void MacroAssembler::check_klass_subtype(Register sub_klass,
9173 Register super_klass,
9174 Register temp_reg,
9175 Label& L_success) {
9176 Label L_failure;
9177 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL);
9178 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
9179 bind(L_failure);
9180 }
9183 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
9184 Register super_klass,
9185 Register temp_reg,
9186 Label* L_success,
9187 Label* L_failure,
9188 Label* L_slow_path,
9189 RegisterOrConstant super_check_offset) {
9190 assert_different_registers(sub_klass, super_klass, temp_reg);
9191 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
9192 if (super_check_offset.is_register()) {
9193 assert_different_registers(sub_klass, super_klass,
9194 super_check_offset.as_register());
9195 } else if (must_load_sco) {
9196 assert(temp_reg != noreg, "supply either a temp or a register offset");
9197 }
9199 Label L_fallthrough;
9200 int label_nulls = 0;
9201 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
9202 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
9203 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
9204 assert(label_nulls <= 1, "at most one NULL in the batch");
9206 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
9207 int sco_offset = in_bytes(Klass::super_check_offset_offset());
9208 Address super_check_offset_addr(super_klass, sco_offset);
9210 // Hacked jcc, which "knows" that L_fallthrough, at least, is in
9211 // range of a jccb. If this routine grows larger, reconsider at
9212 // least some of these.
9213 #define local_jcc(assembler_cond, label) \
9214 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \
9215 else jcc( assembler_cond, label) /*omit semi*/
9217 // Hacked jmp, which may only be used just before L_fallthrough.
9218 #define final_jmp(label) \
9219 if (&(label) == &L_fallthrough) { /*do nothing*/ } \
9220 else jmp(label) /*omit semi*/
9222 // If the pointers are equal, we are done (e.g., String[] elements).
9223 // This self-check enables sharing of secondary supertype arrays among
9224 // non-primary types such as array-of-interface. Otherwise, each such
9225 // type would need its own customized SSA.
9226 // We move this check to the front of the fast path because many
9227 // type checks are in fact trivially successful in this manner,
9228 // so we get a nicely predicted branch right at the start of the check.
9229 cmpptr(sub_klass, super_klass);
9230 local_jcc(Assembler::equal, *L_success);
9232 // Check the supertype display:
9233 if (must_load_sco) {
9234 // Positive movl does right thing on LP64.
9235 movl(temp_reg, super_check_offset_addr);
9236 super_check_offset = RegisterOrConstant(temp_reg);
9237 }
9238 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
9239 cmpptr(super_klass, super_check_addr); // load displayed supertype
9241 // This check has worked decisively for primary supers.
9242 // Secondary supers are sought in the super_cache ('super_cache_addr').
9243 // (Secondary supers are interfaces and very deeply nested subtypes.)
9244 // This works in the same check above because of a tricky aliasing
9245 // between the super_cache and the primary super display elements.
9246 // (The 'super_check_addr' can address either, as the case requires.)
9247 // Note that the cache is updated below if it does not help us find
9248 // what we need immediately.
9249 // So if it was a primary super, we can just fail immediately.
9250 // Otherwise, it's the slow path for us (no success at this point).
9252 if (super_check_offset.is_register()) {
9253 local_jcc(Assembler::equal, *L_success);
9254 cmpl(super_check_offset.as_register(), sc_offset);
9255 if (L_failure == &L_fallthrough) {
9256 local_jcc(Assembler::equal, *L_slow_path);
9257 } else {
9258 local_jcc(Assembler::notEqual, *L_failure);
9259 final_jmp(*L_slow_path);
9260 }
9261 } else if (super_check_offset.as_constant() == sc_offset) {
9262 // Need a slow path; fast failure is impossible.
9263 if (L_slow_path == &L_fallthrough) {
9264 local_jcc(Assembler::equal, *L_success);
9265 } else {
9266 local_jcc(Assembler::notEqual, *L_slow_path);
9267 final_jmp(*L_success);
9268 }
9269 } else {
9270 // No slow path; it's a fast decision.
9271 if (L_failure == &L_fallthrough) {
9272 local_jcc(Assembler::equal, *L_success);
9273 } else {
9274 local_jcc(Assembler::notEqual, *L_failure);
9275 final_jmp(*L_success);
9276 }
9277 }
9279 bind(L_fallthrough);
9281 #undef local_jcc
9282 #undef final_jmp
9283 }
9286 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
9287 Register super_klass,
9288 Register temp_reg,
9289 Register temp2_reg,
9290 Label* L_success,
9291 Label* L_failure,
9292 bool set_cond_codes) {
9293 assert_different_registers(sub_klass, super_klass, temp_reg);
9294 if (temp2_reg != noreg)
9295 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
9296 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
9298 Label L_fallthrough;
9299 int label_nulls = 0;
9300 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
9301 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
9302 assert(label_nulls <= 1, "at most one NULL in the batch");
9304 // a couple of useful fields in sub_klass:
9305 int ss_offset = in_bytes(Klass::secondary_supers_offset());
9306 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
9307 Address secondary_supers_addr(sub_klass, ss_offset);
9308 Address super_cache_addr( sub_klass, sc_offset);
9310 // Do a linear scan of the secondary super-klass chain.
9311 // This code is rarely used, so simplicity is a virtue here.
9312 // The repne_scan instruction uses fixed registers, which we must spill.
9313 // Don't worry too much about pre-existing connections with the input regs.
9315 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
9316 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
9318 // Get super_klass value into rax (even if it was in rdi or rcx).
9319 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
9320 if (super_klass != rax || UseCompressedOops) {
9321 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
9322 mov(rax, super_klass);
9323 }
9324 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
9325 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
9327 #ifndef PRODUCT
9328 int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
9329 ExternalAddress pst_counter_addr((address) pst_counter);
9330 NOT_LP64( incrementl(pst_counter_addr) );
9331 LP64_ONLY( lea(rcx, pst_counter_addr) );
9332 LP64_ONLY( incrementl(Address(rcx, 0)) );
9333 #endif //PRODUCT
9335 // We will consult the secondary-super array.
9336 movptr(rdi, secondary_supers_addr);
9337 // Load the array length. (Positive movl does right thing on LP64.)
9338 movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
9339 // Skip to start of data.
9340 addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
9342 // Scan RCX words at [RDI] for an occurrence of RAX.
9343 // Set NZ/Z based on last compare.
9344 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
9345 // not change flags (only scas instruction which is repeated sets flags).
9346 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
9347 #ifdef _LP64
9348 // This part is tricky, as values in supers array could be 32 or 64 bit wide
9349 // and we store values in objArrays always encoded, thus we need to encode
9350 // the value of rax before repne. Note that rax is dead after the repne.
9351 if (UseCompressedOops) {
9352 encode_heap_oop_not_null(rax); // Changes flags.
9353 // The superclass is never null; it would be a basic system error if a null
9354 // pointer were to sneak in here. Note that we have already loaded the
9355 // Klass::super_check_offset from the super_klass in the fast path,
9356 // so if there is a null in that register, we are already in the afterlife.
9357 testl(rax,rax); // Set Z = 0
9358 repne_scanl();
9359 } else
9360 #endif // _LP64
9361 {
9362 testptr(rax,rax); // Set Z = 0
9363 repne_scan();
9364 }
9365 // Unspill the temp. registers:
9366 if (pushed_rdi) pop(rdi);
9367 if (pushed_rcx) pop(rcx);
9368 if (pushed_rax) pop(rax);
9370 if (set_cond_codes) {
9371 // Special hack for the AD files: rdi is guaranteed non-zero.
9372 assert(!pushed_rdi, "rdi must be left non-NULL");
9373 // Also, the condition codes are properly set Z/NZ on succeed/failure.
9374 }
9376 if (L_failure == &L_fallthrough)
9377 jccb(Assembler::notEqual, *L_failure);
9378 else jcc(Assembler::notEqual, *L_failure);
9380 // Success. Cache the super we found and proceed in triumph.
9381 movptr(super_cache_addr, super_klass);
9383 if (L_success != &L_fallthrough) {
9384 jmp(*L_success);
9385 }
9387 #undef IS_A_TEMP
9389 bind(L_fallthrough);
9390 }
9393 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
9394 if (VM_Version::supports_cmov()) {
9395 cmovl(cc, dst, src);
9396 } else {
9397 Label L;
9398 jccb(negate_condition(cc), L);
9399 movl(dst, src);
9400 bind(L);
9401 }
9402 }
9404 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
9405 if (VM_Version::supports_cmov()) {
9406 cmovl(cc, dst, src);
9407 } else {
9408 Label L;
9409 jccb(negate_condition(cc), L);
9410 movl(dst, src);
9411 bind(L);
9412 }
9413 }
9415 void MacroAssembler::verify_oop(Register reg, const char* s) {
9416 if (!VerifyOops) return;
9418 // Pass register number to verify_oop_subroutine
9419 char* b = new char[strlen(s) + 50];
9420 sprintf(b, "verify_oop: %s: %s", reg->name(), s);
9421 BLOCK_COMMENT("verify_oop {");
9422 #ifdef _LP64
9423 push(rscratch1); // save r10, trashed by movptr()
9424 #endif
9425 push(rax); // save rax,
9426 push(reg); // pass register argument
9427 ExternalAddress buffer((address) b);
9428 // avoid using pushptr, as it modifies scratch registers
9429 // and our contract is not to modify anything
9430 movptr(rax, buffer.addr());
9431 push(rax);
9432 // call indirectly to solve generation ordering problem
9433 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
9434 call(rax);
9435 // Caller pops the arguments (oop, message) and restores rax, r10
9436 BLOCK_COMMENT("} verify_oop");
9437 }
9440 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
9441 Register tmp,
9442 int offset) {
9443 intptr_t value = *delayed_value_addr;
9444 if (value != 0)
9445 return RegisterOrConstant(value + offset);
9447 // load indirectly to solve generation ordering problem
9448 movptr(tmp, ExternalAddress((address) delayed_value_addr));
9450 #ifdef ASSERT
9451 { Label L;
9452 testptr(tmp, tmp);
9453 if (WizardMode) {
9454 jcc(Assembler::notZero, L);
9455 char* buf = new char[40];
9456 sprintf(buf, "DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]);
9457 STOP(buf);
9458 } else {
9459 jccb(Assembler::notZero, L);
9460 hlt();
9461 }
9462 bind(L);
9463 }
9464 #endif
9466 if (offset != 0)
9467 addptr(tmp, offset);
9469 return RegisterOrConstant(tmp);
9470 }
9473 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
9474 int extra_slot_offset) {
9475 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
9476 int stackElementSize = Interpreter::stackElementSize;
9477 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
9478 #ifdef ASSERT
9479 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
9480 assert(offset1 - offset == stackElementSize, "correct arithmetic");
9481 #endif
9482 Register scale_reg = noreg;
9483 Address::ScaleFactor scale_factor = Address::no_scale;
9484 if (arg_slot.is_constant()) {
9485 offset += arg_slot.as_constant() * stackElementSize;
9486 } else {
9487 scale_reg = arg_slot.as_register();
9488 scale_factor = Address::times(stackElementSize);
9489 }
9490 offset += wordSize; // return PC is on stack
9491 return Address(rsp, scale_reg, scale_factor, offset);
9492 }
9495 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
9496 if (!VerifyOops) return;
9498 // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
9499 // Pass register number to verify_oop_subroutine
9500 char* b = new char[strlen(s) + 50];
9501 sprintf(b, "verify_oop_addr: %s", s);
9503 #ifdef _LP64
9504 push(rscratch1); // save r10, trashed by movptr()
9505 #endif
9506 push(rax); // save rax,
9507 // addr may contain rsp so we will have to adjust it based on the push
9508 // we just did (and on 64 bit we do two pushes)
9509 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
9510 // stores rax into addr which is backwards of what was intended.
9511 if (addr.uses(rsp)) {
9512 lea(rax, addr);
9513 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
9514 } else {
9515 pushptr(addr);
9516 }
9518 ExternalAddress buffer((address) b);
9519 // pass msg argument
9520 // avoid using pushptr, as it modifies scratch registers
9521 // and our contract is not to modify anything
9522 movptr(rax, buffer.addr());
9523 push(rax);
9525 // call indirectly to solve generation ordering problem
9526 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
9527 call(rax);
9528 // Caller pops the arguments (addr, message) and restores rax, r10.
9529 }
9531 void MacroAssembler::verify_tlab() {
9532 #ifdef ASSERT
9533 if (UseTLAB && VerifyOops) {
9534 Label next, ok;
9535 Register t1 = rsi;
9536 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
9538 push(t1);
9539 NOT_LP64(push(thread_reg));
9540 NOT_LP64(get_thread(thread_reg));
9542 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
9543 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
9544 jcc(Assembler::aboveEqual, next);
9545 STOP("assert(top >= start)");
9546 should_not_reach_here();
9548 bind(next);
9549 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
9550 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
9551 jcc(Assembler::aboveEqual, ok);
9552 STOP("assert(top <= end)");
9553 should_not_reach_here();
9555 bind(ok);
9556 NOT_LP64(pop(thread_reg));
9557 pop(t1);
9558 }
9559 #endif
9560 }
9562 class ControlWord {
9563 public:
9564 int32_t _value;
9566 int rounding_control() const { return (_value >> 10) & 3 ; }
9567 int precision_control() const { return (_value >> 8) & 3 ; }
9568 bool precision() const { return ((_value >> 5) & 1) != 0; }
9569 bool underflow() const { return ((_value >> 4) & 1) != 0; }
9570 bool overflow() const { return ((_value >> 3) & 1) != 0; }
9571 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
9572 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
9573 bool invalid() const { return ((_value >> 0) & 1) != 0; }
9575 void print() const {
9576 // rounding control
9577 const char* rc;
9578 switch (rounding_control()) {
9579 case 0: rc = "round near"; break;
9580 case 1: rc = "round down"; break;
9581 case 2: rc = "round up "; break;
9582 case 3: rc = "chop "; break;
9583 };
9584 // precision control
9585 const char* pc;
9586 switch (precision_control()) {
9587 case 0: pc = "24 bits "; break;
9588 case 1: pc = "reserved"; break;
9589 case 2: pc = "53 bits "; break;
9590 case 3: pc = "64 bits "; break;
9591 };
9592 // flags
9593 char f[9];
9594 f[0] = ' ';
9595 f[1] = ' ';
9596 f[2] = (precision ()) ? 'P' : 'p';
9597 f[3] = (underflow ()) ? 'U' : 'u';
9598 f[4] = (overflow ()) ? 'O' : 'o';
9599 f[5] = (zero_divide ()) ? 'Z' : 'z';
9600 f[6] = (denormalized()) ? 'D' : 'd';
9601 f[7] = (invalid ()) ? 'I' : 'i';
9602 f[8] = '\x0';
9603 // output
9604 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
9605 }
9607 };
9609 class StatusWord {
9610 public:
9611 int32_t _value;
9613 bool busy() const { return ((_value >> 15) & 1) != 0; }
9614 bool C3() const { return ((_value >> 14) & 1) != 0; }
9615 bool C2() const { return ((_value >> 10) & 1) != 0; }
9616 bool C1() const { return ((_value >> 9) & 1) != 0; }
9617 bool C0() const { return ((_value >> 8) & 1) != 0; }
9618 int top() const { return (_value >> 11) & 7 ; }
9619 bool error_status() const { return ((_value >> 7) & 1) != 0; }
9620 bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
9621 bool precision() const { return ((_value >> 5) & 1) != 0; }
9622 bool underflow() const { return ((_value >> 4) & 1) != 0; }
9623 bool overflow() const { return ((_value >> 3) & 1) != 0; }
9624 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
9625 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
9626 bool invalid() const { return ((_value >> 0) & 1) != 0; }
9628 void print() const {
9629 // condition codes
9630 char c[5];
9631 c[0] = (C3()) ? '3' : '-';
9632 c[1] = (C2()) ? '2' : '-';
9633 c[2] = (C1()) ? '1' : '-';
9634 c[3] = (C0()) ? '0' : '-';
9635 c[4] = '\x0';
9636 // flags
9637 char f[9];
9638 f[0] = (error_status()) ? 'E' : '-';
9639 f[1] = (stack_fault ()) ? 'S' : '-';
9640 f[2] = (precision ()) ? 'P' : '-';
9641 f[3] = (underflow ()) ? 'U' : '-';
9642 f[4] = (overflow ()) ? 'O' : '-';
9643 f[5] = (zero_divide ()) ? 'Z' : '-';
9644 f[6] = (denormalized()) ? 'D' : '-';
9645 f[7] = (invalid ()) ? 'I' : '-';
9646 f[8] = '\x0';
9647 // output
9648 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
9649 }
9651 };
9653 class TagWord {
9654 public:
9655 int32_t _value;
9657 int tag_at(int i) const { return (_value >> (i*2)) & 3; }
9659 void print() const {
9660 printf("%04x", _value & 0xFFFF);
9661 }
9663 };
9665 class FPU_Register {
9666 public:
9667 int32_t _m0;
9668 int32_t _m1;
9669 int16_t _ex;
9671 bool is_indefinite() const {
9672 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
9673 }
9675 void print() const {
9676 char sign = (_ex < 0) ? '-' : '+';
9677 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
9678 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
9679 };
9681 };
9683 class FPU_State {
9684 public:
9685 enum {
9686 register_size = 10,
9687 number_of_registers = 8,
9688 register_mask = 7
9689 };
9691 ControlWord _control_word;
9692 StatusWord _status_word;
9693 TagWord _tag_word;
9694 int32_t _error_offset;
9695 int32_t _error_selector;
9696 int32_t _data_offset;
9697 int32_t _data_selector;
9698 int8_t _register[register_size * number_of_registers];
9700 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
9701 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
9703 const char* tag_as_string(int tag) const {
9704 switch (tag) {
9705 case 0: return "valid";
9706 case 1: return "zero";
9707 case 2: return "special";
9708 case 3: return "empty";
9709 }
9710 ShouldNotReachHere();
9711 return NULL;
9712 }
9714 void print() const {
9715 // print computation registers
9716 { int t = _status_word.top();
9717 for (int i = 0; i < number_of_registers; i++) {
9718 int j = (i - t) & register_mask;
9719 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
9720 st(j)->print();
9721 printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
9722 }
9723 }
9724 printf("\n");
9725 // print control registers
9726 printf("ctrl = "); _control_word.print(); printf("\n");
9727 printf("stat = "); _status_word .print(); printf("\n");
9728 printf("tags = "); _tag_word .print(); printf("\n");
9729 }
9731 };
9733 class Flag_Register {
9734 public:
9735 int32_t _value;
9737 bool overflow() const { return ((_value >> 11) & 1) != 0; }
9738 bool direction() const { return ((_value >> 10) & 1) != 0; }
9739 bool sign() const { return ((_value >> 7) & 1) != 0; }
9740 bool zero() const { return ((_value >> 6) & 1) != 0; }
9741 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
9742 bool parity() const { return ((_value >> 2) & 1) != 0; }
9743 bool carry() const { return ((_value >> 0) & 1) != 0; }
9745 void print() const {
9746 // flags
9747 char f[8];
9748 f[0] = (overflow ()) ? 'O' : '-';
9749 f[1] = (direction ()) ? 'D' : '-';
9750 f[2] = (sign ()) ? 'S' : '-';
9751 f[3] = (zero ()) ? 'Z' : '-';
9752 f[4] = (auxiliary_carry()) ? 'A' : '-';
9753 f[5] = (parity ()) ? 'P' : '-';
9754 f[6] = (carry ()) ? 'C' : '-';
9755 f[7] = '\x0';
9756 // output
9757 printf("%08x flags = %s", _value, f);
9758 }
9760 };
9762 class IU_Register {
9763 public:
9764 int32_t _value;
9766 void print() const {
9767 printf("%08x %11d", _value, _value);
9768 }
9770 };
9772 class IU_State {
9773 public:
9774 Flag_Register _eflags;
9775 IU_Register _rdi;
9776 IU_Register _rsi;
9777 IU_Register _rbp;
9778 IU_Register _rsp;
9779 IU_Register _rbx;
9780 IU_Register _rdx;
9781 IU_Register _rcx;
9782 IU_Register _rax;
9784 void print() const {
9785 // computation registers
9786 printf("rax, = "); _rax.print(); printf("\n");
9787 printf("rbx, = "); _rbx.print(); printf("\n");
9788 printf("rcx = "); _rcx.print(); printf("\n");
9789 printf("rdx = "); _rdx.print(); printf("\n");
9790 printf("rdi = "); _rdi.print(); printf("\n");
9791 printf("rsi = "); _rsi.print(); printf("\n");
9792 printf("rbp, = "); _rbp.print(); printf("\n");
9793 printf("rsp = "); _rsp.print(); printf("\n");
9794 printf("\n");
9795 // control registers
9796 printf("flgs = "); _eflags.print(); printf("\n");
9797 }
9798 };
9801 class CPU_State {
9802 public:
9803 FPU_State _fpu_state;
9804 IU_State _iu_state;
9806 void print() const {
9807 printf("--------------------------------------------------\n");
9808 _iu_state .print();
9809 printf("\n");
9810 _fpu_state.print();
9811 printf("--------------------------------------------------\n");
9812 }
9814 };
9817 static void _print_CPU_state(CPU_State* state) {
9818 state->print();
9819 };
9822 void MacroAssembler::print_CPU_state() {
9823 push_CPU_state();
9824 push(rsp); // pass CPU state
9825 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
9826 addptr(rsp, wordSize); // discard argument
9827 pop_CPU_state();
9828 }
9831 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
9832 static int counter = 0;
9833 FPU_State* fs = &state->_fpu_state;
9834 counter++;
9835 // For leaf calls, only verify that the top few elements remain empty.
9836 // We only need 1 empty at the top for C2 code.
9837 if( stack_depth < 0 ) {
9838 if( fs->tag_for_st(7) != 3 ) {
9839 printf("FPR7 not empty\n");
9840 state->print();
9841 assert(false, "error");
9842 return false;
9843 }
9844 return true; // All other stack states do not matter
9845 }
9847 assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
9848 "bad FPU control word");
9850 // compute stack depth
9851 int i = 0;
9852 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++;
9853 int d = i;
9854 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
9855 // verify findings
9856 if (i != FPU_State::number_of_registers) {
9857 // stack not contiguous
9858 printf("%s: stack not contiguous at ST%d\n", s, i);
9859 state->print();
9860 assert(false, "error");
9861 return false;
9862 }
9863 // check if computed stack depth corresponds to expected stack depth
9864 if (stack_depth < 0) {
9865 // expected stack depth is -stack_depth or less
9866 if (d > -stack_depth) {
9867 // too many elements on the stack
9868 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
9869 state->print();
9870 assert(false, "error");
9871 return false;
9872 }
9873 } else {
9874 // expected stack depth is stack_depth
9875 if (d != stack_depth) {
9876 // wrong stack depth
9877 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
9878 state->print();
9879 assert(false, "error");
9880 return false;
9881 }
9882 }
9883 // everything is cool
9884 return true;
9885 }
9888 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
9889 if (!VerifyFPU) return;
9890 push_CPU_state();
9891 push(rsp); // pass CPU state
9892 ExternalAddress msg((address) s);
9893 // pass message string s
9894 pushptr(msg.addr());
9895 push(stack_depth); // pass stack depth
9896 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
9897 addptr(rsp, 3 * wordSize); // discard arguments
9898 // check for error
9899 { Label L;
9900 testl(rax, rax);
9901 jcc(Assembler::notZero, L);
9902 int3(); // break if error condition
9903 bind(L);
9904 }
9905 pop_CPU_state();
9906 }
9908 void MacroAssembler::load_klass(Register dst, Register src) {
9909 #ifdef _LP64
9910 if (UseCompressedOops) {
9911 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
9912 decode_heap_oop_not_null(dst);
9913 } else
9914 #endif
9915 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
9916 }
9918 void MacroAssembler::load_prototype_header(Register dst, Register src) {
9919 #ifdef _LP64
9920 if (UseCompressedOops) {
9921 assert (Universe::heap() != NULL, "java heap should be initialized");
9922 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
9923 if (Universe::narrow_oop_shift() != 0) {
9924 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
9925 if (LogMinObjAlignmentInBytes == Address::times_8) {
9926 movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset()));
9927 } else {
9928 // OK to use shift since we don't need to preserve flags.
9929 shlq(dst, LogMinObjAlignmentInBytes);
9930 movq(dst, Address(r12_heapbase, dst, Address::times_1, Klass::prototype_header_offset()));
9931 }
9932 } else {
9933 movq(dst, Address(dst, Klass::prototype_header_offset()));
9934 }
9935 } else
9936 #endif
9937 {
9938 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
9939 movptr(dst, Address(dst, Klass::prototype_header_offset()));
9940 }
9941 }
9943 void MacroAssembler::store_klass(Register dst, Register src) {
9944 #ifdef _LP64
9945 if (UseCompressedOops) {
9946 encode_heap_oop_not_null(src);
9947 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
9948 } else
9949 #endif
9950 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
9951 }
9953 void MacroAssembler::load_heap_oop(Register dst, Address src) {
9954 #ifdef _LP64
9955 if (UseCompressedOops) {
9956 movl(dst, src);
9957 decode_heap_oop(dst);
9958 } else
9959 #endif
9960 movptr(dst, src);
9961 }
9963 // Doesn't do verfication, generates fixed size code
9964 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) {
9965 #ifdef _LP64
9966 if (UseCompressedOops) {
9967 movl(dst, src);
9968 decode_heap_oop_not_null(dst);
9969 } else
9970 #endif
9971 movptr(dst, src);
9972 }
9974 void MacroAssembler::store_heap_oop(Address dst, Register src) {
9975 #ifdef _LP64
9976 if (UseCompressedOops) {
9977 assert(!dst.uses(src), "not enough registers");
9978 encode_heap_oop(src);
9979 movl(dst, src);
9980 } else
9981 #endif
9982 movptr(dst, src);
9983 }
9985 void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) {
9986 assert_different_registers(src1, tmp);
9987 #ifdef _LP64
9988 if (UseCompressedOops) {
9989 bool did_push = false;
9990 if (tmp == noreg) {
9991 tmp = rax;
9992 push(tmp);
9993 did_push = true;
9994 assert(!src2.uses(rsp), "can't push");
9995 }
9996 load_heap_oop(tmp, src2);
9997 cmpptr(src1, tmp);
9998 if (did_push) pop(tmp);
9999 } else
10000 #endif
10001 cmpptr(src1, src2);
10002 }
10004 // Used for storing NULLs.
10005 void MacroAssembler::store_heap_oop_null(Address dst) {
10006 #ifdef _LP64
10007 if (UseCompressedOops) {
10008 movl(dst, (int32_t)NULL_WORD);
10009 } else {
10010 movslq(dst, (int32_t)NULL_WORD);
10011 }
10012 #else
10013 movl(dst, (int32_t)NULL_WORD);
10014 #endif
10015 }
10017 #ifdef _LP64
10018 void MacroAssembler::store_klass_gap(Register dst, Register src) {
10019 if (UseCompressedOops) {
10020 // Store to klass gap in destination
10021 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
10022 }
10023 }
10025 #ifdef ASSERT
10026 void MacroAssembler::verify_heapbase(const char* msg) {
10027 assert (UseCompressedOops, "should be compressed");
10028 assert (Universe::heap() != NULL, "java heap should be initialized");
10029 if (CheckCompressedOops) {
10030 Label ok;
10031 push(rscratch1); // cmpptr trashes rscratch1
10032 cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
10033 jcc(Assembler::equal, ok);
10034 STOP(msg);
10035 bind(ok);
10036 pop(rscratch1);
10037 }
10038 }
10039 #endif
10041 // Algorithm must match oop.inline.hpp encode_heap_oop.
10042 void MacroAssembler::encode_heap_oop(Register r) {
10043 #ifdef ASSERT
10044 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
10045 #endif
10046 verify_oop(r, "broken oop in encode_heap_oop");
10047 if (Universe::narrow_oop_base() == NULL) {
10048 if (Universe::narrow_oop_shift() != 0) {
10049 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10050 shrq(r, LogMinObjAlignmentInBytes);
10051 }
10052 return;
10053 }
10054 testq(r, r);
10055 cmovq(Assembler::equal, r, r12_heapbase);
10056 subq(r, r12_heapbase);
10057 shrq(r, LogMinObjAlignmentInBytes);
10058 }
10060 void MacroAssembler::encode_heap_oop_not_null(Register r) {
10061 #ifdef ASSERT
10062 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
10063 if (CheckCompressedOops) {
10064 Label ok;
10065 testq(r, r);
10066 jcc(Assembler::notEqual, ok);
10067 STOP("null oop passed to encode_heap_oop_not_null");
10068 bind(ok);
10069 }
10070 #endif
10071 verify_oop(r, "broken oop in encode_heap_oop_not_null");
10072 if (Universe::narrow_oop_base() != NULL) {
10073 subq(r, r12_heapbase);
10074 }
10075 if (Universe::narrow_oop_shift() != 0) {
10076 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10077 shrq(r, LogMinObjAlignmentInBytes);
10078 }
10079 }
10081 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
10082 #ifdef ASSERT
10083 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
10084 if (CheckCompressedOops) {
10085 Label ok;
10086 testq(src, src);
10087 jcc(Assembler::notEqual, ok);
10088 STOP("null oop passed to encode_heap_oop_not_null2");
10089 bind(ok);
10090 }
10091 #endif
10092 verify_oop(src, "broken oop in encode_heap_oop_not_null2");
10093 if (dst != src) {
10094 movq(dst, src);
10095 }
10096 if (Universe::narrow_oop_base() != NULL) {
10097 subq(dst, r12_heapbase);
10098 }
10099 if (Universe::narrow_oop_shift() != 0) {
10100 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10101 shrq(dst, LogMinObjAlignmentInBytes);
10102 }
10103 }
10105 void MacroAssembler::decode_heap_oop(Register r) {
10106 #ifdef ASSERT
10107 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
10108 #endif
10109 if (Universe::narrow_oop_base() == NULL) {
10110 if (Universe::narrow_oop_shift() != 0) {
10111 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10112 shlq(r, LogMinObjAlignmentInBytes);
10113 }
10114 } else {
10115 Label done;
10116 shlq(r, LogMinObjAlignmentInBytes);
10117 jccb(Assembler::equal, done);
10118 addq(r, r12_heapbase);
10119 bind(done);
10120 }
10121 verify_oop(r, "broken oop in decode_heap_oop");
10122 }
10124 void MacroAssembler::decode_heap_oop_not_null(Register r) {
10125 // Note: it will change flags
10126 assert (UseCompressedOops, "should only be used for compressed headers");
10127 assert (Universe::heap() != NULL, "java heap should be initialized");
10128 // Cannot assert, unverified entry point counts instructions (see .ad file)
10129 // vtableStubs also counts instructions in pd_code_size_limit.
10130 // Also do not verify_oop as this is called by verify_oop.
10131 if (Universe::narrow_oop_shift() != 0) {
10132 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10133 shlq(r, LogMinObjAlignmentInBytes);
10134 if (Universe::narrow_oop_base() != NULL) {
10135 addq(r, r12_heapbase);
10136 }
10137 } else {
10138 assert (Universe::narrow_oop_base() == NULL, "sanity");
10139 }
10140 }
10142 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
10143 // Note: it will change flags
10144 assert (UseCompressedOops, "should only be used for compressed headers");
10145 assert (Universe::heap() != NULL, "java heap should be initialized");
10146 // Cannot assert, unverified entry point counts instructions (see .ad file)
10147 // vtableStubs also counts instructions in pd_code_size_limit.
10148 // Also do not verify_oop as this is called by verify_oop.
10149 if (Universe::narrow_oop_shift() != 0) {
10150 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10151 if (LogMinObjAlignmentInBytes == Address::times_8) {
10152 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
10153 } else {
10154 if (dst != src) {
10155 movq(dst, src);
10156 }
10157 shlq(dst, LogMinObjAlignmentInBytes);
10158 if (Universe::narrow_oop_base() != NULL) {
10159 addq(dst, r12_heapbase);
10160 }
10161 }
10162 } else {
10163 assert (Universe::narrow_oop_base() == NULL, "sanity");
10164 if (dst != src) {
10165 movq(dst, src);
10166 }
10167 }
10168 }
10170 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
10171 assert (UseCompressedOops, "should only be used for compressed headers");
10172 assert (Universe::heap() != NULL, "java heap should be initialized");
10173 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10174 int oop_index = oop_recorder()->find_index(obj);
10175 RelocationHolder rspec = oop_Relocation::spec(oop_index);
10176 mov_narrow_oop(dst, oop_index, rspec);
10177 }
10179 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
10180 assert (UseCompressedOops, "should only be used for compressed headers");
10181 assert (Universe::heap() != NULL, "java heap should be initialized");
10182 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10183 int oop_index = oop_recorder()->find_index(obj);
10184 RelocationHolder rspec = oop_Relocation::spec(oop_index);
10185 mov_narrow_oop(dst, oop_index, rspec);
10186 }
10188 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
10189 assert (UseCompressedOops, "should only be used for compressed headers");
10190 assert (Universe::heap() != NULL, "java heap should be initialized");
10191 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10192 int oop_index = oop_recorder()->find_index(obj);
10193 RelocationHolder rspec = oop_Relocation::spec(oop_index);
10194 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
10195 }
10197 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
10198 assert (UseCompressedOops, "should only be used for compressed headers");
10199 assert (Universe::heap() != NULL, "java heap should be initialized");
10200 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10201 int oop_index = oop_recorder()->find_index(obj);
10202 RelocationHolder rspec = oop_Relocation::spec(oop_index);
10203 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
10204 }
10206 void MacroAssembler::reinit_heapbase() {
10207 if (UseCompressedOops) {
10208 movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
10209 }
10210 }
10211 #endif // _LP64
10214 // C2 compiled method's prolog code.
10215 void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode_24b) {
10217 // WARNING: Initial instruction MUST be 5 bytes or longer so that
10218 // NativeJump::patch_verified_entry will be able to patch out the entry
10219 // code safely. The push to verify stack depth is ok at 5 bytes,
10220 // the frame allocation can be either 3 or 6 bytes. So if we don't do
10221 // stack bang then we must use the 6 byte frame allocation even if
10222 // we have no frame. :-(
10224 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
10225 // Remove word for return addr
10226 framesize -= wordSize;
10228 // Calls to C2R adapters often do not accept exceptional returns.
10229 // We require that their callers must bang for them. But be careful, because
10230 // some VM calls (such as call site linkage) can use several kilobytes of
10231 // stack. But the stack safety zone should account for that.
10232 // See bugs 4446381, 4468289, 4497237.
10233 if (stack_bang) {
10234 generate_stack_overflow_check(framesize);
10236 // We always push rbp, so that on return to interpreter rbp, will be
10237 // restored correctly and we can correct the stack.
10238 push(rbp);
10239 // Remove word for ebp
10240 framesize -= wordSize;
10242 // Create frame
10243 if (framesize) {
10244 subptr(rsp, framesize);
10245 }
10246 } else {
10247 // Create frame (force generation of a 4 byte immediate value)
10248 subptr_imm32(rsp, framesize);
10250 // Save RBP register now.
10251 framesize -= wordSize;
10252 movptr(Address(rsp, framesize), rbp);
10253 }
10255 if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
10256 framesize -= wordSize;
10257 movptr(Address(rsp, framesize), (int32_t)0xbadb100d);
10258 }
10260 #ifndef _LP64
10261 // If method sets FPU control word do it now
10262 if (fp_mode_24b) {
10263 fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
10264 }
10265 if (UseSSE >= 2 && VerifyFPU) {
10266 verify_FPU(0, "FPU stack must be clean on entry");
10267 }
10268 #endif
10270 #ifdef ASSERT
10271 if (VerifyStackAtCalls) {
10272 Label L;
10273 push(rax);
10274 mov(rax, rsp);
10275 andptr(rax, StackAlignmentInBytes-1);
10276 cmpptr(rax, StackAlignmentInBytes-wordSize);
10277 pop(rax);
10278 jcc(Assembler::equal, L);
10279 STOP("Stack is not properly aligned!");
10280 bind(L);
10281 }
10282 #endif
10284 }
10287 // IndexOf for constant substrings with size >= 8 chars
10288 // which don't need to be loaded through stack.
10289 void MacroAssembler::string_indexofC8(Register str1, Register str2,
10290 Register cnt1, Register cnt2,
10291 int int_cnt2, Register result,
10292 XMMRegister vec, Register tmp) {
10293 ShortBranchVerifier sbv(this);
10294 assert(UseSSE42Intrinsics, "SSE4.2 is required");
10296 // This method uses pcmpestri inxtruction with bound registers
10297 // inputs:
10298 // xmm - substring
10299 // rax - substring length (elements count)
10300 // mem - scanned string
10301 // rdx - string length (elements count)
10302 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
10303 // outputs:
10304 // rcx - matched index in string
10305 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
10307 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR,
10308 RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR,
10309 MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE;
10311 // Note, inline_string_indexOf() generates checks:
10312 // if (substr.count > string.count) return -1;
10313 // if (substr.count == 0) return 0;
10314 assert(int_cnt2 >= 8, "this code isused only for cnt2 >= 8 chars");
10316 // Load substring.
10317 movdqu(vec, Address(str2, 0));
10318 movl(cnt2, int_cnt2);
10319 movptr(result, str1); // string addr
10321 if (int_cnt2 > 8) {
10322 jmpb(SCAN_TO_SUBSTR);
10324 // Reload substr for rescan, this code
10325 // is executed only for large substrings (> 8 chars)
10326 bind(RELOAD_SUBSTR);
10327 movdqu(vec, Address(str2, 0));
10328 negptr(cnt2); // Jumped here with negative cnt2, convert to positive
10330 bind(RELOAD_STR);
10331 // We came here after the beginning of the substring was
10332 // matched but the rest of it was not so we need to search
10333 // again. Start from the next element after the previous match.
10335 // cnt2 is number of substring reminding elements and
10336 // cnt1 is number of string reminding elements when cmp failed.
10337 // Restored cnt1 = cnt1 - cnt2 + int_cnt2
10338 subl(cnt1, cnt2);
10339 addl(cnt1, int_cnt2);
10340 movl(cnt2, int_cnt2); // Now restore cnt2
10342 decrementl(cnt1); // Shift to next element
10343 cmpl(cnt1, cnt2);
10344 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
10346 addptr(result, 2);
10348 } // (int_cnt2 > 8)
10350 // Scan string for start of substr in 16-byte vectors
10351 bind(SCAN_TO_SUBSTR);
10352 pcmpestri(vec, Address(result, 0), 0x0d);
10353 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
10354 subl(cnt1, 8);
10355 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
10356 cmpl(cnt1, cnt2);
10357 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
10358 addptr(result, 16);
10359 jmpb(SCAN_TO_SUBSTR);
10361 // Found a potential substr
10362 bind(FOUND_CANDIDATE);
10363 // Matched whole vector if first element matched (tmp(rcx) == 0).
10364 if (int_cnt2 == 8) {
10365 jccb(Assembler::overflow, RET_FOUND); // OF == 1
10366 } else { // int_cnt2 > 8
10367 jccb(Assembler::overflow, FOUND_SUBSTR);
10368 }
10369 // After pcmpestri tmp(rcx) contains matched element index
10370 // Compute start addr of substr
10371 lea(result, Address(result, tmp, Address::times_2));
10373 // Make sure string is still long enough
10374 subl(cnt1, tmp);
10375 cmpl(cnt1, cnt2);
10376 if (int_cnt2 == 8) {
10377 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
10378 } else { // int_cnt2 > 8
10379 jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD);
10380 }
10381 // Left less then substring.
10383 bind(RET_NOT_FOUND);
10384 movl(result, -1);
10385 jmpb(EXIT);
10387 if (int_cnt2 > 8) {
10388 // This code is optimized for the case when whole substring
10389 // is matched if its head is matched.
10390 bind(MATCH_SUBSTR_HEAD);
10391 pcmpestri(vec, Address(result, 0), 0x0d);
10392 // Reload only string if does not match
10393 jccb(Assembler::noOverflow, RELOAD_STR); // OF == 0
10395 Label CONT_SCAN_SUBSTR;
10396 // Compare the rest of substring (> 8 chars).
10397 bind(FOUND_SUBSTR);
10398 // First 8 chars are already matched.
10399 negptr(cnt2);
10400 addptr(cnt2, 8);
10402 bind(SCAN_SUBSTR);
10403 subl(cnt1, 8);
10404 cmpl(cnt2, -8); // Do not read beyond substring
10405 jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR);
10406 // Back-up strings to avoid reading beyond substring:
10407 // cnt1 = cnt1 - cnt2 + 8
10408 addl(cnt1, cnt2); // cnt2 is negative
10409 addl(cnt1, 8);
10410 movl(cnt2, 8); negptr(cnt2);
10411 bind(CONT_SCAN_SUBSTR);
10412 if (int_cnt2 < (int)G) {
10413 movdqu(vec, Address(str2, cnt2, Address::times_2, int_cnt2*2));
10414 pcmpestri(vec, Address(result, cnt2, Address::times_2, int_cnt2*2), 0x0d);
10415 } else {
10416 // calculate index in register to avoid integer overflow (int_cnt2*2)
10417 movl(tmp, int_cnt2);
10418 addptr(tmp, cnt2);
10419 movdqu(vec, Address(str2, tmp, Address::times_2, 0));
10420 pcmpestri(vec, Address(result, tmp, Address::times_2, 0), 0x0d);
10421 }
10422 // Need to reload strings pointers if not matched whole vector
10423 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
10424 addptr(cnt2, 8);
10425 jcc(Assembler::negative, SCAN_SUBSTR);
10426 // Fall through if found full substring
10428 } // (int_cnt2 > 8)
10430 bind(RET_FOUND);
10431 // Found result if we matched full small substring.
10432 // Compute substr offset
10433 subptr(result, str1);
10434 shrl(result, 1); // index
10435 bind(EXIT);
10437 } // string_indexofC8
10439 // Small strings are loaded through stack if they cross page boundary.
10440 void MacroAssembler::string_indexof(Register str1, Register str2,
10441 Register cnt1, Register cnt2,
10442 int int_cnt2, Register result,
10443 XMMRegister vec, Register tmp) {
10444 ShortBranchVerifier sbv(this);
10445 assert(UseSSE42Intrinsics, "SSE4.2 is required");
10446 //
10447 // int_cnt2 is length of small (< 8 chars) constant substring
10448 // or (-1) for non constant substring in which case its length
10449 // is in cnt2 register.
10450 //
10451 // Note, inline_string_indexOf() generates checks:
10452 // if (substr.count > string.count) return -1;
10453 // if (substr.count == 0) return 0;
10454 //
10455 assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < 8), "should be != 0");
10457 // This method uses pcmpestri inxtruction with bound registers
10458 // inputs:
10459 // xmm - substring
10460 // rax - substring length (elements count)
10461 // mem - scanned string
10462 // rdx - string length (elements count)
10463 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
10464 // outputs:
10465 // rcx - matched index in string
10466 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
10468 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR,
10469 RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR,
10470 FOUND_CANDIDATE;
10472 { //========================================================
10473 // We don't know where these strings are located
10474 // and we can't read beyond them. Load them through stack.
10475 Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR;
10477 movptr(tmp, rsp); // save old SP
10479 if (int_cnt2 > 0) { // small (< 8 chars) constant substring
10480 if (int_cnt2 == 1) { // One char
10481 load_unsigned_short(result, Address(str2, 0));
10482 movdl(vec, result); // move 32 bits
10483 } else if (int_cnt2 == 2) { // Two chars
10484 movdl(vec, Address(str2, 0)); // move 32 bits
10485 } else if (int_cnt2 == 4) { // Four chars
10486 movq(vec, Address(str2, 0)); // move 64 bits
10487 } else { // cnt2 = { 3, 5, 6, 7 }
10488 // Array header size is 12 bytes in 32-bit VM
10489 // + 6 bytes for 3 chars == 18 bytes,
10490 // enough space to load vec and shift.
10491 assert(HeapWordSize*typeArrayKlass::header_size() >= 12,"sanity");
10492 movdqu(vec, Address(str2, (int_cnt2*2)-16));
10493 psrldq(vec, 16-(int_cnt2*2));
10494 }
10495 } else { // not constant substring
10496 cmpl(cnt2, 8);
10497 jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough
10499 // We can read beyond string if srt+16 does not cross page boundary
10500 // since heaps are aligned and mapped by pages.
10501 assert(os::vm_page_size() < (int)G, "default page should be small");
10502 movl(result, str2); // We need only low 32 bits
10503 andl(result, (os::vm_page_size()-1));
10504 cmpl(result, (os::vm_page_size()-16));
10505 jccb(Assembler::belowEqual, CHECK_STR);
10507 // Move small strings to stack to allow load 16 bytes into vec.
10508 subptr(rsp, 16);
10509 int stk_offset = wordSize-2;
10510 push(cnt2);
10512 bind(COPY_SUBSTR);
10513 load_unsigned_short(result, Address(str2, cnt2, Address::times_2, -2));
10514 movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
10515 decrement(cnt2);
10516 jccb(Assembler::notZero, COPY_SUBSTR);
10518 pop(cnt2);
10519 movptr(str2, rsp); // New substring address
10520 } // non constant
10522 bind(CHECK_STR);
10523 cmpl(cnt1, 8);
10524 jccb(Assembler::aboveEqual, BIG_STRINGS);
10526 // Check cross page boundary.
10527 movl(result, str1); // We need only low 32 bits
10528 andl(result, (os::vm_page_size()-1));
10529 cmpl(result, (os::vm_page_size()-16));
10530 jccb(Assembler::belowEqual, BIG_STRINGS);
10532 subptr(rsp, 16);
10533 int stk_offset = -2;
10534 if (int_cnt2 < 0) { // not constant
10535 push(cnt2);
10536 stk_offset += wordSize;
10537 }
10538 movl(cnt2, cnt1);
10540 bind(COPY_STR);
10541 load_unsigned_short(result, Address(str1, cnt2, Address::times_2, -2));
10542 movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
10543 decrement(cnt2);
10544 jccb(Assembler::notZero, COPY_STR);
10546 if (int_cnt2 < 0) { // not constant
10547 pop(cnt2);
10548 }
10549 movptr(str1, rsp); // New string address
10551 bind(BIG_STRINGS);
10552 // Load substring.
10553 if (int_cnt2 < 0) { // -1
10554 movdqu(vec, Address(str2, 0));
10555 push(cnt2); // substr count
10556 push(str2); // substr addr
10557 push(str1); // string addr
10558 } else {
10559 // Small (< 8 chars) constant substrings are loaded already.
10560 movl(cnt2, int_cnt2);
10561 }
10562 push(tmp); // original SP
10564 } // Finished loading
10566 //========================================================
10567 // Start search
10568 //
10570 movptr(result, str1); // string addr
10572 if (int_cnt2 < 0) { // Only for non constant substring
10573 jmpb(SCAN_TO_SUBSTR);
10575 // SP saved at sp+0
10576 // String saved at sp+1*wordSize
10577 // Substr saved at sp+2*wordSize
10578 // Substr count saved at sp+3*wordSize
10580 // Reload substr for rescan, this code
10581 // is executed only for large substrings (> 8 chars)
10582 bind(RELOAD_SUBSTR);
10583 movptr(str2, Address(rsp, 2*wordSize));
10584 movl(cnt2, Address(rsp, 3*wordSize));
10585 movdqu(vec, Address(str2, 0));
10586 // We came here after the beginning of the substring was
10587 // matched but the rest of it was not so we need to search
10588 // again. Start from the next element after the previous match.
10589 subptr(str1, result); // Restore counter
10590 shrl(str1, 1);
10591 addl(cnt1, str1);
10592 decrementl(cnt1); // Shift to next element
10593 cmpl(cnt1, cnt2);
10594 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
10596 addptr(result, 2);
10597 } // non constant
10599 // Scan string for start of substr in 16-byte vectors
10600 bind(SCAN_TO_SUBSTR);
10601 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
10602 pcmpestri(vec, Address(result, 0), 0x0d);
10603 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
10604 subl(cnt1, 8);
10605 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
10606 cmpl(cnt1, cnt2);
10607 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
10608 addptr(result, 16);
10610 bind(ADJUST_STR);
10611 cmpl(cnt1, 8); // Do not read beyond string
10612 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
10613 // Back-up string to avoid reading beyond string.
10614 lea(result, Address(result, cnt1, Address::times_2, -16));
10615 movl(cnt1, 8);
10616 jmpb(SCAN_TO_SUBSTR);
10618 // Found a potential substr
10619 bind(FOUND_CANDIDATE);
10620 // After pcmpestri tmp(rcx) contains matched element index
10622 // Make sure string is still long enough
10623 subl(cnt1, tmp);
10624 cmpl(cnt1, cnt2);
10625 jccb(Assembler::greaterEqual, FOUND_SUBSTR);
10626 // Left less then substring.
10628 bind(RET_NOT_FOUND);
10629 movl(result, -1);
10630 jmpb(CLEANUP);
10632 bind(FOUND_SUBSTR);
10633 // Compute start addr of substr
10634 lea(result, Address(result, tmp, Address::times_2));
10636 if (int_cnt2 > 0) { // Constant substring
10637 // Repeat search for small substring (< 8 chars)
10638 // from new point without reloading substring.
10639 // Have to check that we don't read beyond string.
10640 cmpl(tmp, 8-int_cnt2);
10641 jccb(Assembler::greater, ADJUST_STR);
10642 // Fall through if matched whole substring.
10643 } else { // non constant
10644 assert(int_cnt2 == -1, "should be != 0");
10646 addl(tmp, cnt2);
10647 // Found result if we matched whole substring.
10648 cmpl(tmp, 8);
10649 jccb(Assembler::lessEqual, RET_FOUND);
10651 // Repeat search for small substring (<= 8 chars)
10652 // from new point 'str1' without reloading substring.
10653 cmpl(cnt2, 8);
10654 // Have to check that we don't read beyond string.
10655 jccb(Assembler::lessEqual, ADJUST_STR);
10657 Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG;
10658 // Compare the rest of substring (> 8 chars).
10659 movptr(str1, result);
10661 cmpl(tmp, cnt2);
10662 // First 8 chars are already matched.
10663 jccb(Assembler::equal, CHECK_NEXT);
10665 bind(SCAN_SUBSTR);
10666 pcmpestri(vec, Address(str1, 0), 0x0d);
10667 // Need to reload strings pointers if not matched whole vector
10668 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
10670 bind(CHECK_NEXT);
10671 subl(cnt2, 8);
10672 jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring
10673 addptr(str1, 16);
10674 addptr(str2, 16);
10675 subl(cnt1, 8);
10676 cmpl(cnt2, 8); // Do not read beyond substring
10677 jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR);
10678 // Back-up strings to avoid reading beyond substring.
10679 lea(str2, Address(str2, cnt2, Address::times_2, -16));
10680 lea(str1, Address(str1, cnt2, Address::times_2, -16));
10681 subl(cnt1, cnt2);
10682 movl(cnt2, 8);
10683 addl(cnt1, 8);
10684 bind(CONT_SCAN_SUBSTR);
10685 movdqu(vec, Address(str2, 0));
10686 jmpb(SCAN_SUBSTR);
10688 bind(RET_FOUND_LONG);
10689 movptr(str1, Address(rsp, wordSize));
10690 } // non constant
10692 bind(RET_FOUND);
10693 // Compute substr offset
10694 subptr(result, str1);
10695 shrl(result, 1); // index
10697 bind(CLEANUP);
10698 pop(rsp); // restore SP
10700 } // string_indexof
10702 // Compare strings.
10703 void MacroAssembler::string_compare(Register str1, Register str2,
10704 Register cnt1, Register cnt2, Register result,
10705 XMMRegister vec1) {
10706 ShortBranchVerifier sbv(this);
10707 Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL;
10709 // Compute the minimum of the string lengths and the
10710 // difference of the string lengths (stack).
10711 // Do the conditional move stuff
10712 movl(result, cnt1);
10713 subl(cnt1, cnt2);
10714 push(cnt1);
10715 cmov32(Assembler::lessEqual, cnt2, result);
10717 // Is the minimum length zero?
10718 testl(cnt2, cnt2);
10719 jcc(Assembler::zero, LENGTH_DIFF_LABEL);
10721 // Load first characters
10722 load_unsigned_short(result, Address(str1, 0));
10723 load_unsigned_short(cnt1, Address(str2, 0));
10725 // Compare first characters
10726 subl(result, cnt1);
10727 jcc(Assembler::notZero, POP_LABEL);
10728 decrementl(cnt2);
10729 jcc(Assembler::zero, LENGTH_DIFF_LABEL);
10731 {
10732 // Check after comparing first character to see if strings are equivalent
10733 Label LSkip2;
10734 // Check if the strings start at same location
10735 cmpptr(str1, str2);
10736 jccb(Assembler::notEqual, LSkip2);
10738 // Check if the length difference is zero (from stack)
10739 cmpl(Address(rsp, 0), 0x0);
10740 jcc(Assembler::equal, LENGTH_DIFF_LABEL);
10742 // Strings might not be equivalent
10743 bind(LSkip2);
10744 }
10746 Address::ScaleFactor scale = Address::times_2;
10747 int stride = 8;
10749 // Advance to next element
10750 addptr(str1, 16/stride);
10751 addptr(str2, 16/stride);
10753 if (UseSSE42Intrinsics) {
10754 Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
10755 int pcmpmask = 0x19;
10756 // Setup to compare 16-byte vectors
10757 movl(result, cnt2);
10758 andl(cnt2, ~(stride - 1)); // cnt2 holds the vector count
10759 jccb(Assembler::zero, COMPARE_TAIL);
10761 lea(str1, Address(str1, result, scale));
10762 lea(str2, Address(str2, result, scale));
10763 negptr(result);
10765 // pcmpestri
10766 // inputs:
10767 // vec1- substring
10768 // rax - negative string length (elements count)
10769 // mem - scaned string
10770 // rdx - string length (elements count)
10771 // pcmpmask - cmp mode: 11000 (string compare with negated result)
10772 // + 00 (unsigned bytes) or + 01 (unsigned shorts)
10773 // outputs:
10774 // rcx - first mismatched element index
10775 assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
10777 bind(COMPARE_WIDE_VECTORS);
10778 movdqu(vec1, Address(str1, result, scale));
10779 pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
10780 // After pcmpestri cnt1(rcx) contains mismatched element index
10782 jccb(Assembler::below, VECTOR_NOT_EQUAL); // CF==1
10783 addptr(result, stride);
10784 subptr(cnt2, stride);
10785 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS);
10787 // compare wide vectors tail
10788 testl(result, result);
10789 jccb(Assembler::zero, LENGTH_DIFF_LABEL);
10791 movl(cnt2, stride);
10792 movl(result, stride);
10793 negptr(result);
10794 movdqu(vec1, Address(str1, result, scale));
10795 pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
10796 jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL);
10798 // Mismatched characters in the vectors
10799 bind(VECTOR_NOT_EQUAL);
10800 addptr(result, cnt1);
10801 movptr(cnt2, result);
10802 load_unsigned_short(result, Address(str1, cnt2, scale));
10803 load_unsigned_short(cnt1, Address(str2, cnt2, scale));
10804 subl(result, cnt1);
10805 jmpb(POP_LABEL);
10807 bind(COMPARE_TAIL); // limit is zero
10808 movl(cnt2, result);
10809 // Fallthru to tail compare
10810 }
10812 // Shift str2 and str1 to the end of the arrays, negate min
10813 lea(str1, Address(str1, cnt2, scale, 0));
10814 lea(str2, Address(str2, cnt2, scale, 0));
10815 negptr(cnt2);
10817 // Compare the rest of the elements
10818 bind(WHILE_HEAD_LABEL);
10819 load_unsigned_short(result, Address(str1, cnt2, scale, 0));
10820 load_unsigned_short(cnt1, Address(str2, cnt2, scale, 0));
10821 subl(result, cnt1);
10822 jccb(Assembler::notZero, POP_LABEL);
10823 increment(cnt2);
10824 jccb(Assembler::notZero, WHILE_HEAD_LABEL);
10826 // Strings are equal up to min length. Return the length difference.
10827 bind(LENGTH_DIFF_LABEL);
10828 pop(result);
10829 jmpb(DONE_LABEL);
10831 // Discard the stored length difference
10832 bind(POP_LABEL);
10833 pop(cnt1);
10835 // That's it
10836 bind(DONE_LABEL);
10837 }
10839 // Compare char[] arrays aligned to 4 bytes or substrings.
10840 void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
10841 Register limit, Register result, Register chr,
10842 XMMRegister vec1, XMMRegister vec2) {
10843 ShortBranchVerifier sbv(this);
10844 Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_VECTORS, COMPARE_CHAR;
10846 int length_offset = arrayOopDesc::length_offset_in_bytes();
10847 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
10849 // Check the input args
10850 cmpptr(ary1, ary2);
10851 jcc(Assembler::equal, TRUE_LABEL);
10853 if (is_array_equ) {
10854 // Need additional checks for arrays_equals.
10855 testptr(ary1, ary1);
10856 jcc(Assembler::zero, FALSE_LABEL);
10857 testptr(ary2, ary2);
10858 jcc(Assembler::zero, FALSE_LABEL);
10860 // Check the lengths
10861 movl(limit, Address(ary1, length_offset));
10862 cmpl(limit, Address(ary2, length_offset));
10863 jcc(Assembler::notEqual, FALSE_LABEL);
10864 }
10866 // count == 0
10867 testl(limit, limit);
10868 jcc(Assembler::zero, TRUE_LABEL);
10870 if (is_array_equ) {
10871 // Load array address
10872 lea(ary1, Address(ary1, base_offset));
10873 lea(ary2, Address(ary2, base_offset));
10874 }
10876 shll(limit, 1); // byte count != 0
10877 movl(result, limit); // copy
10879 if (UseSSE42Intrinsics) {
10880 // With SSE4.2, use double quad vector compare
10881 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
10883 // Compare 16-byte vectors
10884 andl(result, 0x0000000e); // tail count (in bytes)
10885 andl(limit, 0xfffffff0); // vector count (in bytes)
10886 jccb(Assembler::zero, COMPARE_TAIL);
10888 lea(ary1, Address(ary1, limit, Address::times_1));
10889 lea(ary2, Address(ary2, limit, Address::times_1));
10890 negptr(limit);
10892 bind(COMPARE_WIDE_VECTORS);
10893 movdqu(vec1, Address(ary1, limit, Address::times_1));
10894 movdqu(vec2, Address(ary2, limit, Address::times_1));
10895 pxor(vec1, vec2);
10897 ptest(vec1, vec1);
10898 jccb(Assembler::notZero, FALSE_LABEL);
10899 addptr(limit, 16);
10900 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
10902 testl(result, result);
10903 jccb(Assembler::zero, TRUE_LABEL);
10905 movdqu(vec1, Address(ary1, result, Address::times_1, -16));
10906 movdqu(vec2, Address(ary2, result, Address::times_1, -16));
10907 pxor(vec1, vec2);
10909 ptest(vec1, vec1);
10910 jccb(Assembler::notZero, FALSE_LABEL);
10911 jmpb(TRUE_LABEL);
10913 bind(COMPARE_TAIL); // limit is zero
10914 movl(limit, result);
10915 // Fallthru to tail compare
10916 }
10918 // Compare 4-byte vectors
10919 andl(limit, 0xfffffffc); // vector count (in bytes)
10920 jccb(Assembler::zero, COMPARE_CHAR);
10922 lea(ary1, Address(ary1, limit, Address::times_1));
10923 lea(ary2, Address(ary2, limit, Address::times_1));
10924 negptr(limit);
10926 bind(COMPARE_VECTORS);
10927 movl(chr, Address(ary1, limit, Address::times_1));
10928 cmpl(chr, Address(ary2, limit, Address::times_1));
10929 jccb(Assembler::notEqual, FALSE_LABEL);
10930 addptr(limit, 4);
10931 jcc(Assembler::notZero, COMPARE_VECTORS);
10933 // Compare trailing char (final 2 bytes), if any
10934 bind(COMPARE_CHAR);
10935 testl(result, 0x2); // tail char
10936 jccb(Assembler::zero, TRUE_LABEL);
10937 load_unsigned_short(chr, Address(ary1, 0));
10938 load_unsigned_short(limit, Address(ary2, 0));
10939 cmpl(chr, limit);
10940 jccb(Assembler::notEqual, FALSE_LABEL);
10942 bind(TRUE_LABEL);
10943 movl(result, 1); // return true
10944 jmpb(DONE);
10946 bind(FALSE_LABEL);
10947 xorl(result, result); // return false
10949 // That's it
10950 bind(DONE);
10951 }
10953 void MacroAssembler::generate_fill(BasicType t, bool aligned,
10954 Register to, Register value, Register count,
10955 Register rtmp, XMMRegister xtmp) {
10956 ShortBranchVerifier sbv(this);
10957 assert_different_registers(to, value, count, rtmp);
10958 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
10959 Label L_fill_2_bytes, L_fill_4_bytes;
10961 int shift = -1;
10962 switch (t) {
10963 case T_BYTE:
10964 shift = 2;
10965 break;
10966 case T_SHORT:
10967 shift = 1;
10968 break;
10969 case T_INT:
10970 shift = 0;
10971 break;
10972 default: ShouldNotReachHere();
10973 }
10975 if (t == T_BYTE) {
10976 andl(value, 0xff);
10977 movl(rtmp, value);
10978 shll(rtmp, 8);
10979 orl(value, rtmp);
10980 }
10981 if (t == T_SHORT) {
10982 andl(value, 0xffff);
10983 }
10984 if (t == T_BYTE || t == T_SHORT) {
10985 movl(rtmp, value);
10986 shll(rtmp, 16);
10987 orl(value, rtmp);
10988 }
10990 cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
10991 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
10992 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
10993 // align source address at 4 bytes address boundary
10994 if (t == T_BYTE) {
10995 // One byte misalignment happens only for byte arrays
10996 testptr(to, 1);
10997 jccb(Assembler::zero, L_skip_align1);
10998 movb(Address(to, 0), value);
10999 increment(to);
11000 decrement(count);
11001 BIND(L_skip_align1);
11002 }
11003 // Two bytes misalignment happens only for byte and short (char) arrays
11004 testptr(to, 2);
11005 jccb(Assembler::zero, L_skip_align2);
11006 movw(Address(to, 0), value);
11007 addptr(to, 2);
11008 subl(count, 1<<(shift-1));
11009 BIND(L_skip_align2);
11010 }
11011 if (UseSSE < 2) {
11012 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
11013 // Fill 32-byte chunks
11014 subl(count, 8 << shift);
11015 jcc(Assembler::less, L_check_fill_8_bytes);
11016 align(16);
11018 BIND(L_fill_32_bytes_loop);
11020 for (int i = 0; i < 32; i += 4) {
11021 movl(Address(to, i), value);
11022 }
11024 addptr(to, 32);
11025 subl(count, 8 << shift);
11026 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
11027 BIND(L_check_fill_8_bytes);
11028 addl(count, 8 << shift);
11029 jccb(Assembler::zero, L_exit);
11030 jmpb(L_fill_8_bytes);
11032 //
11033 // length is too short, just fill qwords
11034 //
11035 BIND(L_fill_8_bytes_loop);
11036 movl(Address(to, 0), value);
11037 movl(Address(to, 4), value);
11038 addptr(to, 8);
11039 BIND(L_fill_8_bytes);
11040 subl(count, 1 << (shift + 1));
11041 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
11042 // fall through to fill 4 bytes
11043 } else {
11044 Label L_fill_32_bytes;
11045 if (!UseUnalignedLoadStores) {
11046 // align to 8 bytes, we know we are 4 byte aligned to start
11047 testptr(to, 4);
11048 jccb(Assembler::zero, L_fill_32_bytes);
11049 movl(Address(to, 0), value);
11050 addptr(to, 4);
11051 subl(count, 1<<shift);
11052 }
11053 BIND(L_fill_32_bytes);
11054 {
11055 assert( UseSSE >= 2, "supported cpu only" );
11056 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
11057 // Fill 32-byte chunks
11058 movdl(xtmp, value);
11059 pshufd(xtmp, xtmp, 0);
11061 subl(count, 8 << shift);
11062 jcc(Assembler::less, L_check_fill_8_bytes);
11063 align(16);
11065 BIND(L_fill_32_bytes_loop);
11067 if (UseUnalignedLoadStores) {
11068 movdqu(Address(to, 0), xtmp);
11069 movdqu(Address(to, 16), xtmp);
11070 } else {
11071 movq(Address(to, 0), xtmp);
11072 movq(Address(to, 8), xtmp);
11073 movq(Address(to, 16), xtmp);
11074 movq(Address(to, 24), xtmp);
11075 }
11077 addptr(to, 32);
11078 subl(count, 8 << shift);
11079 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
11080 BIND(L_check_fill_8_bytes);
11081 addl(count, 8 << shift);
11082 jccb(Assembler::zero, L_exit);
11083 jmpb(L_fill_8_bytes);
11085 //
11086 // length is too short, just fill qwords
11087 //
11088 BIND(L_fill_8_bytes_loop);
11089 movq(Address(to, 0), xtmp);
11090 addptr(to, 8);
11091 BIND(L_fill_8_bytes);
11092 subl(count, 1 << (shift + 1));
11093 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
11094 }
11095 }
11096 // fill trailing 4 bytes
11097 BIND(L_fill_4_bytes);
11098 testl(count, 1<<shift);
11099 jccb(Assembler::zero, L_fill_2_bytes);
11100 movl(Address(to, 0), value);
11101 if (t == T_BYTE || t == T_SHORT) {
11102 addptr(to, 4);
11103 BIND(L_fill_2_bytes);
11104 // fill trailing 2 bytes
11105 testl(count, 1<<(shift-1));
11106 jccb(Assembler::zero, L_fill_byte);
11107 movw(Address(to, 0), value);
11108 if (t == T_BYTE) {
11109 addptr(to, 2);
11110 BIND(L_fill_byte);
11111 // fill trailing byte
11112 testl(count, 1);
11113 jccb(Assembler::zero, L_exit);
11114 movb(Address(to, 0), value);
11115 } else {
11116 BIND(L_fill_byte);
11117 }
11118 } else {
11119 BIND(L_fill_2_bytes);
11120 }
11121 BIND(L_exit);
11122 }
11123 #undef BIND
11124 #undef BLOCK_COMMENT
11127 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
11128 switch (cond) {
11129 // Note some conditions are synonyms for others
11130 case Assembler::zero: return Assembler::notZero;
11131 case Assembler::notZero: return Assembler::zero;
11132 case Assembler::less: return Assembler::greaterEqual;
11133 case Assembler::lessEqual: return Assembler::greater;
11134 case Assembler::greater: return Assembler::lessEqual;
11135 case Assembler::greaterEqual: return Assembler::less;
11136 case Assembler::below: return Assembler::aboveEqual;
11137 case Assembler::belowEqual: return Assembler::above;
11138 case Assembler::above: return Assembler::belowEqual;
11139 case Assembler::aboveEqual: return Assembler::below;
11140 case Assembler::overflow: return Assembler::noOverflow;
11141 case Assembler::noOverflow: return Assembler::overflow;
11142 case Assembler::negative: return Assembler::positive;
11143 case Assembler::positive: return Assembler::negative;
11144 case Assembler::parity: return Assembler::noParity;
11145 case Assembler::noParity: return Assembler::parity;
11146 }
11147 ShouldNotReachHere(); return Assembler::overflow;
11148 }
11150 SkipIfEqual::SkipIfEqual(
11151 MacroAssembler* masm, const bool* flag_addr, bool value) {
11152 _masm = masm;
11153 _masm->cmp8(ExternalAddress((address)flag_addr), value);
11154 _masm->jcc(Assembler::equal, _label);
11155 }
11157 SkipIfEqual::~SkipIfEqual() {
11158 _masm->bind(_label);
11159 }