Thu, 04 Oct 2012 08:43:14 -0400
6884973: java -XX:Atomics=2 crashes
Summary: Remove buggy experimental option
Reviewed-by: acorn, coleenp
Contributed-by: harold.seigel@oracle.com
1 /*
2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #include "precompiled.hpp"
26 #include "assembler_x86.inline.hpp"
27 #include "gc_interface/collectedHeap.inline.hpp"
28 #include "interpreter/interpreter.hpp"
29 #include "memory/cardTableModRefBS.hpp"
30 #include "memory/resourceArea.hpp"
31 #include "prims/methodHandles.hpp"
32 #include "runtime/biasedLocking.hpp"
33 #include "runtime/interfaceSupport.hpp"
34 #include "runtime/objectMonitor.hpp"
35 #include "runtime/os.hpp"
36 #include "runtime/sharedRuntime.hpp"
37 #include "runtime/stubRoutines.hpp"
38 #ifndef SERIALGC
39 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
40 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
41 #include "gc_implementation/g1/heapRegion.hpp"
42 #endif
44 #ifdef PRODUCT
45 #define BLOCK_COMMENT(str) /* nothing */
46 #define STOP(error) stop(error)
47 #else
48 #define BLOCK_COMMENT(str) block_comment(str)
49 #define STOP(error) block_comment(error); stop(error)
50 #endif
52 #define BIND(label) bind(label); BLOCK_COMMENT(#label ":")
53 // Implementation of AddressLiteral
55 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
56 _is_lval = false;
57 _target = target;
58 switch (rtype) {
59 case relocInfo::oop_type:
60 case relocInfo::metadata_type:
61 // Oops are a special case. Normally they would be their own section
62 // but in cases like icBuffer they are literals in the code stream that
63 // we don't have a section for. We use none so that we get a literal address
64 // which is always patchable.
65 break;
66 case relocInfo::external_word_type:
67 _rspec = external_word_Relocation::spec(target);
68 break;
69 case relocInfo::internal_word_type:
70 _rspec = internal_word_Relocation::spec(target);
71 break;
72 case relocInfo::opt_virtual_call_type:
73 _rspec = opt_virtual_call_Relocation::spec();
74 break;
75 case relocInfo::static_call_type:
76 _rspec = static_call_Relocation::spec();
77 break;
78 case relocInfo::runtime_call_type:
79 _rspec = runtime_call_Relocation::spec();
80 break;
81 case relocInfo::poll_type:
82 case relocInfo::poll_return_type:
83 _rspec = Relocation::spec_simple(rtype);
84 break;
85 case relocInfo::none:
86 break;
87 default:
88 ShouldNotReachHere();
89 break;
90 }
91 }
93 // Implementation of Address
95 #ifdef _LP64
97 Address Address::make_array(ArrayAddress adr) {
98 // Not implementable on 64bit machines
99 // Should have been handled higher up the call chain.
100 ShouldNotReachHere();
101 return Address();
102 }
104 // exceedingly dangerous constructor
105 Address::Address(int disp, address loc, relocInfo::relocType rtype) {
106 _base = noreg;
107 _index = noreg;
108 _scale = no_scale;
109 _disp = disp;
110 switch (rtype) {
111 case relocInfo::external_word_type:
112 _rspec = external_word_Relocation::spec(loc);
113 break;
114 case relocInfo::internal_word_type:
115 _rspec = internal_word_Relocation::spec(loc);
116 break;
117 case relocInfo::runtime_call_type:
118 // HMM
119 _rspec = runtime_call_Relocation::spec();
120 break;
121 case relocInfo::poll_type:
122 case relocInfo::poll_return_type:
123 _rspec = Relocation::spec_simple(rtype);
124 break;
125 case relocInfo::none:
126 break;
127 default:
128 ShouldNotReachHere();
129 }
130 }
131 #else // LP64
133 Address Address::make_array(ArrayAddress adr) {
134 AddressLiteral base = adr.base();
135 Address index = adr.index();
136 assert(index._disp == 0, "must not have disp"); // maybe it can?
137 Address array(index._base, index._index, index._scale, (intptr_t) base.target());
138 array._rspec = base._rspec;
139 return array;
140 }
142 // exceedingly dangerous constructor
143 Address::Address(address loc, RelocationHolder spec) {
144 _base = noreg;
145 _index = noreg;
146 _scale = no_scale;
147 _disp = (intptr_t) loc;
148 _rspec = spec;
149 }
151 #endif // _LP64
155 // Convert the raw encoding form into the form expected by the constructor for
156 // Address. An index of 4 (rsp) corresponds to having no index, so convert
157 // that to noreg for the Address constructor.
158 Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
159 RelocationHolder rspec;
160 if (disp_reloc != relocInfo::none) {
161 rspec = Relocation::spec_simple(disp_reloc);
162 }
163 bool valid_index = index != rsp->encoding();
164 if (valid_index) {
165 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
166 madr._rspec = rspec;
167 return madr;
168 } else {
169 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
170 madr._rspec = rspec;
171 return madr;
172 }
173 }
175 // Implementation of Assembler
177 int AbstractAssembler::code_fill_byte() {
178 return (u_char)'\xF4'; // hlt
179 }
181 // make this go away someday
182 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
183 if (rtype == relocInfo::none)
184 emit_long(data);
185 else emit_data(data, Relocation::spec_simple(rtype), format);
186 }
188 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
189 assert(imm_operand == 0, "default format must be immediate in this file");
190 assert(inst_mark() != NULL, "must be inside InstructionMark");
191 if (rspec.type() != relocInfo::none) {
192 #ifdef ASSERT
193 check_relocation(rspec, format);
194 #endif
195 // Do not use AbstractAssembler::relocate, which is not intended for
196 // embedded words. Instead, relocate to the enclosing instruction.
198 // hack. call32 is too wide for mask so use disp32
199 if (format == call32_operand)
200 code_section()->relocate(inst_mark(), rspec, disp32_operand);
201 else
202 code_section()->relocate(inst_mark(), rspec, format);
203 }
204 emit_long(data);
205 }
207 static int encode(Register r) {
208 int enc = r->encoding();
209 if (enc >= 8) {
210 enc -= 8;
211 }
212 return enc;
213 }
215 static int encode(XMMRegister r) {
216 int enc = r->encoding();
217 if (enc >= 8) {
218 enc -= 8;
219 }
220 return enc;
221 }
223 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
224 assert(dst->has_byte_register(), "must have byte register");
225 assert(isByte(op1) && isByte(op2), "wrong opcode");
226 assert(isByte(imm8), "not a byte");
227 assert((op1 & 0x01) == 0, "should be 8bit operation");
228 emit_byte(op1);
229 emit_byte(op2 | encode(dst));
230 emit_byte(imm8);
231 }
234 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
235 assert(isByte(op1) && isByte(op2), "wrong opcode");
236 assert((op1 & 0x01) == 1, "should be 32bit operation");
237 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
238 if (is8bit(imm32)) {
239 emit_byte(op1 | 0x02); // set sign bit
240 emit_byte(op2 | encode(dst));
241 emit_byte(imm32 & 0xFF);
242 } else {
243 emit_byte(op1);
244 emit_byte(op2 | encode(dst));
245 emit_long(imm32);
246 }
247 }
249 // Force generation of a 4 byte immediate value even if it fits into 8bit
250 void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) {
251 assert(isByte(op1) && isByte(op2), "wrong opcode");
252 assert((op1 & 0x01) == 1, "should be 32bit operation");
253 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
254 emit_byte(op1);
255 emit_byte(op2 | encode(dst));
256 emit_long(imm32);
257 }
259 // immediate-to-memory forms
260 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
261 assert((op1 & 0x01) == 1, "should be 32bit operation");
262 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
263 if (is8bit(imm32)) {
264 emit_byte(op1 | 0x02); // set sign bit
265 emit_operand(rm, adr, 1);
266 emit_byte(imm32 & 0xFF);
267 } else {
268 emit_byte(op1);
269 emit_operand(rm, adr, 4);
270 emit_long(imm32);
271 }
272 }
275 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
276 assert(isByte(op1) && isByte(op2), "wrong opcode");
277 emit_byte(op1);
278 emit_byte(op2 | encode(dst) << 3 | encode(src));
279 }
282 void Assembler::emit_operand(Register reg, Register base, Register index,
283 Address::ScaleFactor scale, int disp,
284 RelocationHolder const& rspec,
285 int rip_relative_correction) {
286 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
288 // Encode the registers as needed in the fields they are used in
290 int regenc = encode(reg) << 3;
291 int indexenc = index->is_valid() ? encode(index) << 3 : 0;
292 int baseenc = base->is_valid() ? encode(base) : 0;
294 if (base->is_valid()) {
295 if (index->is_valid()) {
296 assert(scale != Address::no_scale, "inconsistent address");
297 // [base + index*scale + disp]
298 if (disp == 0 && rtype == relocInfo::none &&
299 base != rbp LP64_ONLY(&& base != r13)) {
300 // [base + index*scale]
301 // [00 reg 100][ss index base]
302 assert(index != rsp, "illegal addressing mode");
303 emit_byte(0x04 | regenc);
304 emit_byte(scale << 6 | indexenc | baseenc);
305 } else if (is8bit(disp) && rtype == relocInfo::none) {
306 // [base + index*scale + imm8]
307 // [01 reg 100][ss index base] imm8
308 assert(index != rsp, "illegal addressing mode");
309 emit_byte(0x44 | regenc);
310 emit_byte(scale << 6 | indexenc | baseenc);
311 emit_byte(disp & 0xFF);
312 } else {
313 // [base + index*scale + disp32]
314 // [10 reg 100][ss index base] disp32
315 assert(index != rsp, "illegal addressing mode");
316 emit_byte(0x84 | regenc);
317 emit_byte(scale << 6 | indexenc | baseenc);
318 emit_data(disp, rspec, disp32_operand);
319 }
320 } else if (base == rsp LP64_ONLY(|| base == r12)) {
321 // [rsp + disp]
322 if (disp == 0 && rtype == relocInfo::none) {
323 // [rsp]
324 // [00 reg 100][00 100 100]
325 emit_byte(0x04 | regenc);
326 emit_byte(0x24);
327 } else if (is8bit(disp) && rtype == relocInfo::none) {
328 // [rsp + imm8]
329 // [01 reg 100][00 100 100] disp8
330 emit_byte(0x44 | regenc);
331 emit_byte(0x24);
332 emit_byte(disp & 0xFF);
333 } else {
334 // [rsp + imm32]
335 // [10 reg 100][00 100 100] disp32
336 emit_byte(0x84 | regenc);
337 emit_byte(0x24);
338 emit_data(disp, rspec, disp32_operand);
339 }
340 } else {
341 // [base + disp]
342 assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode");
343 if (disp == 0 && rtype == relocInfo::none &&
344 base != rbp LP64_ONLY(&& base != r13)) {
345 // [base]
346 // [00 reg base]
347 emit_byte(0x00 | regenc | baseenc);
348 } else if (is8bit(disp) && rtype == relocInfo::none) {
349 // [base + disp8]
350 // [01 reg base] disp8
351 emit_byte(0x40 | regenc | baseenc);
352 emit_byte(disp & 0xFF);
353 } else {
354 // [base + disp32]
355 // [10 reg base] disp32
356 emit_byte(0x80 | regenc | baseenc);
357 emit_data(disp, rspec, disp32_operand);
358 }
359 }
360 } else {
361 if (index->is_valid()) {
362 assert(scale != Address::no_scale, "inconsistent address");
363 // [index*scale + disp]
364 // [00 reg 100][ss index 101] disp32
365 assert(index != rsp, "illegal addressing mode");
366 emit_byte(0x04 | regenc);
367 emit_byte(scale << 6 | indexenc | 0x05);
368 emit_data(disp, rspec, disp32_operand);
369 } else if (rtype != relocInfo::none ) {
370 // [disp] (64bit) RIP-RELATIVE (32bit) abs
371 // [00 000 101] disp32
373 emit_byte(0x05 | regenc);
374 // Note that the RIP-rel. correction applies to the generated
375 // disp field, but _not_ to the target address in the rspec.
377 // disp was created by converting the target address minus the pc
378 // at the start of the instruction. That needs more correction here.
379 // intptr_t disp = target - next_ip;
380 assert(inst_mark() != NULL, "must be inside InstructionMark");
381 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
382 int64_t adjusted = disp;
383 // Do rip-rel adjustment for 64bit
384 LP64_ONLY(adjusted -= (next_ip - inst_mark()));
385 assert(is_simm32(adjusted),
386 "must be 32bit offset (RIP relative address)");
387 emit_data((int32_t) adjusted, rspec, disp32_operand);
389 } else {
390 // 32bit never did this, did everything as the rip-rel/disp code above
391 // [disp] ABSOLUTE
392 // [00 reg 100][00 100 101] disp32
393 emit_byte(0x04 | regenc);
394 emit_byte(0x25);
395 emit_data(disp, rspec, disp32_operand);
396 }
397 }
398 }
400 void Assembler::emit_operand(XMMRegister reg, Register base, Register index,
401 Address::ScaleFactor scale, int disp,
402 RelocationHolder const& rspec) {
403 emit_operand((Register)reg, base, index, scale, disp, rspec);
404 }
406 // Secret local extension to Assembler::WhichOperand:
407 #define end_pc_operand (_WhichOperand_limit)
409 address Assembler::locate_operand(address inst, WhichOperand which) {
410 // Decode the given instruction, and return the address of
411 // an embedded 32-bit operand word.
413 // If "which" is disp32_operand, selects the displacement portion
414 // of an effective address specifier.
415 // If "which" is imm64_operand, selects the trailing immediate constant.
416 // If "which" is call32_operand, selects the displacement of a call or jump.
417 // Caller is responsible for ensuring that there is such an operand,
418 // and that it is 32/64 bits wide.
420 // If "which" is end_pc_operand, find the end of the instruction.
422 address ip = inst;
423 bool is_64bit = false;
425 debug_only(bool has_disp32 = false);
426 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
428 again_after_prefix:
429 switch (0xFF & *ip++) {
431 // These convenience macros generate groups of "case" labels for the switch.
432 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
433 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
434 case (x)+4: case (x)+5: case (x)+6: case (x)+7
435 #define REP16(x) REP8((x)+0): \
436 case REP8((x)+8)
438 case CS_segment:
439 case SS_segment:
440 case DS_segment:
441 case ES_segment:
442 case FS_segment:
443 case GS_segment:
444 // Seems dubious
445 LP64_ONLY(assert(false, "shouldn't have that prefix"));
446 assert(ip == inst+1, "only one prefix allowed");
447 goto again_after_prefix;
449 case 0x67:
450 case REX:
451 case REX_B:
452 case REX_X:
453 case REX_XB:
454 case REX_R:
455 case REX_RB:
456 case REX_RX:
457 case REX_RXB:
458 NOT_LP64(assert(false, "64bit prefixes"));
459 goto again_after_prefix;
461 case REX_W:
462 case REX_WB:
463 case REX_WX:
464 case REX_WXB:
465 case REX_WR:
466 case REX_WRB:
467 case REX_WRX:
468 case REX_WRXB:
469 NOT_LP64(assert(false, "64bit prefixes"));
470 is_64bit = true;
471 goto again_after_prefix;
473 case 0xFF: // pushq a; decl a; incl a; call a; jmp a
474 case 0x88: // movb a, r
475 case 0x89: // movl a, r
476 case 0x8A: // movb r, a
477 case 0x8B: // movl r, a
478 case 0x8F: // popl a
479 debug_only(has_disp32 = true);
480 break;
482 case 0x68: // pushq #32
483 if (which == end_pc_operand) {
484 return ip + 4;
485 }
486 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate");
487 return ip; // not produced by emit_operand
489 case 0x66: // movw ... (size prefix)
490 again_after_size_prefix2:
491 switch (0xFF & *ip++) {
492 case REX:
493 case REX_B:
494 case REX_X:
495 case REX_XB:
496 case REX_R:
497 case REX_RB:
498 case REX_RX:
499 case REX_RXB:
500 case REX_W:
501 case REX_WB:
502 case REX_WX:
503 case REX_WXB:
504 case REX_WR:
505 case REX_WRB:
506 case REX_WRX:
507 case REX_WRXB:
508 NOT_LP64(assert(false, "64bit prefix found"));
509 goto again_after_size_prefix2;
510 case 0x8B: // movw r, a
511 case 0x89: // movw a, r
512 debug_only(has_disp32 = true);
513 break;
514 case 0xC7: // movw a, #16
515 debug_only(has_disp32 = true);
516 tail_size = 2; // the imm16
517 break;
518 case 0x0F: // several SSE/SSE2 variants
519 ip--; // reparse the 0x0F
520 goto again_after_prefix;
521 default:
522 ShouldNotReachHere();
523 }
524 break;
526 case REP8(0xB8): // movl/q r, #32/#64(oop?)
527 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4);
528 // these asserts are somewhat nonsensical
529 #ifndef _LP64
530 assert(which == imm_operand || which == disp32_operand,
531 err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, ip));
532 #else
533 assert((which == call32_operand || which == imm_operand) && is_64bit ||
534 which == narrow_oop_operand && !is_64bit,
535 err_msg("which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, ip));
536 #endif // _LP64
537 return ip;
539 case 0x69: // imul r, a, #32
540 case 0xC7: // movl a, #32(oop?)
541 tail_size = 4;
542 debug_only(has_disp32 = true); // has both kinds of operands!
543 break;
545 case 0x0F: // movx..., etc.
546 switch (0xFF & *ip++) {
547 case 0x3A: // pcmpestri
548 tail_size = 1;
549 case 0x38: // ptest, pmovzxbw
550 ip++; // skip opcode
551 debug_only(has_disp32 = true); // has both kinds of operands!
552 break;
554 case 0x70: // pshufd r, r/a, #8
555 debug_only(has_disp32 = true); // has both kinds of operands!
556 case 0x73: // psrldq r, #8
557 tail_size = 1;
558 break;
560 case 0x12: // movlps
561 case 0x28: // movaps
562 case 0x2E: // ucomiss
563 case 0x2F: // comiss
564 case 0x54: // andps
565 case 0x55: // andnps
566 case 0x56: // orps
567 case 0x57: // xorps
568 case 0x6E: // movd
569 case 0x7E: // movd
570 case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush
571 debug_only(has_disp32 = true);
572 break;
574 case 0xAD: // shrd r, a, %cl
575 case 0xAF: // imul r, a
576 case 0xBE: // movsbl r, a (movsxb)
577 case 0xBF: // movswl r, a (movsxw)
578 case 0xB6: // movzbl r, a (movzxb)
579 case 0xB7: // movzwl r, a (movzxw)
580 case REP16(0x40): // cmovl cc, r, a
581 case 0xB0: // cmpxchgb
582 case 0xB1: // cmpxchg
583 case 0xC1: // xaddl
584 case 0xC7: // cmpxchg8
585 case REP16(0x90): // setcc a
586 debug_only(has_disp32 = true);
587 // fall out of the switch to decode the address
588 break;
590 case 0xC4: // pinsrw r, a, #8
591 debug_only(has_disp32 = true);
592 case 0xC5: // pextrw r, r, #8
593 tail_size = 1; // the imm8
594 break;
596 case 0xAC: // shrd r, a, #8
597 debug_only(has_disp32 = true);
598 tail_size = 1; // the imm8
599 break;
601 case REP16(0x80): // jcc rdisp32
602 if (which == end_pc_operand) return ip + 4;
603 assert(which == call32_operand, "jcc has no disp32 or imm");
604 return ip;
605 default:
606 ShouldNotReachHere();
607 }
608 break;
610 case 0x81: // addl a, #32; addl r, #32
611 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
612 // on 32bit in the case of cmpl, the imm might be an oop
613 tail_size = 4;
614 debug_only(has_disp32 = true); // has both kinds of operands!
615 break;
617 case 0x83: // addl a, #8; addl r, #8
618 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
619 debug_only(has_disp32 = true); // has both kinds of operands!
620 tail_size = 1;
621 break;
623 case 0x9B:
624 switch (0xFF & *ip++) {
625 case 0xD9: // fnstcw a
626 debug_only(has_disp32 = true);
627 break;
628 default:
629 ShouldNotReachHere();
630 }
631 break;
633 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
634 case REP4(0x10): // adc...
635 case REP4(0x20): // and...
636 case REP4(0x30): // xor...
637 case REP4(0x08): // or...
638 case REP4(0x18): // sbb...
639 case REP4(0x28): // sub...
640 case 0xF7: // mull a
641 case 0x8D: // lea r, a
642 case 0x87: // xchg r, a
643 case REP4(0x38): // cmp...
644 case 0x85: // test r, a
645 debug_only(has_disp32 = true); // has both kinds of operands!
646 break;
648 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
649 case 0xC6: // movb a, #8
650 case 0x80: // cmpb a, #8
651 case 0x6B: // imul r, a, #8
652 debug_only(has_disp32 = true); // has both kinds of operands!
653 tail_size = 1; // the imm8
654 break;
656 case 0xC4: // VEX_3bytes
657 case 0xC5: // VEX_2bytes
658 assert((UseAVX > 0), "shouldn't have VEX prefix");
659 assert(ip == inst+1, "no prefixes allowed");
660 // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions
661 // but they have prefix 0x0F and processed when 0x0F processed above.
662 //
663 // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES
664 // instructions (these instructions are not supported in 64-bit mode).
665 // To distinguish them bits [7:6] are set in the VEX second byte since
666 // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set
667 // those VEX bits REX and vvvv bits are inverted.
668 //
669 // Fortunately C2 doesn't generate these instructions so we don't need
670 // to check for them in product version.
672 // Check second byte
673 NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions"));
675 // First byte
676 if ((0xFF & *inst) == VEX_3bytes) {
677 ip++; // third byte
678 is_64bit = ((VEX_W & *ip) == VEX_W);
679 }
680 ip++; // opcode
681 // To find the end of instruction (which == end_pc_operand).
682 switch (0xFF & *ip) {
683 case 0x61: // pcmpestri r, r/a, #8
684 case 0x70: // pshufd r, r/a, #8
685 case 0x73: // psrldq r, #8
686 tail_size = 1; // the imm8
687 break;
688 default:
689 break;
690 }
691 ip++; // skip opcode
692 debug_only(has_disp32 = true); // has both kinds of operands!
693 break;
695 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
696 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
697 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
698 case 0xDD: // fld_d a; fst_d a; fstp_d a
699 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
700 case 0xDF: // fild_d a; fistp_d a
701 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
702 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
703 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
704 debug_only(has_disp32 = true);
705 break;
707 case 0xE8: // call rdisp32
708 case 0xE9: // jmp rdisp32
709 if (which == end_pc_operand) return ip + 4;
710 assert(which == call32_operand, "call has no disp32 or imm");
711 return ip;
713 case 0xF0: // Lock
714 assert(os::is_MP(), "only on MP");
715 goto again_after_prefix;
717 case 0xF3: // For SSE
718 case 0xF2: // For SSE2
719 switch (0xFF & *ip++) {
720 case REX:
721 case REX_B:
722 case REX_X:
723 case REX_XB:
724 case REX_R:
725 case REX_RB:
726 case REX_RX:
727 case REX_RXB:
728 case REX_W:
729 case REX_WB:
730 case REX_WX:
731 case REX_WXB:
732 case REX_WR:
733 case REX_WRB:
734 case REX_WRX:
735 case REX_WRXB:
736 NOT_LP64(assert(false, "found 64bit prefix"));
737 ip++;
738 default:
739 ip++;
740 }
741 debug_only(has_disp32 = true); // has both kinds of operands!
742 break;
744 default:
745 ShouldNotReachHere();
747 #undef REP8
748 #undef REP16
749 }
751 assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
752 #ifdef _LP64
753 assert(which != imm_operand, "instruction is not a movq reg, imm64");
754 #else
755 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
756 assert(which != imm_operand || has_disp32, "instruction has no imm32 field");
757 #endif // LP64
758 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");
760 // parse the output of emit_operand
761 int op2 = 0xFF & *ip++;
762 int base = op2 & 0x07;
763 int op3 = -1;
764 const int b100 = 4;
765 const int b101 = 5;
766 if (base == b100 && (op2 >> 6) != 3) {
767 op3 = 0xFF & *ip++;
768 base = op3 & 0x07; // refetch the base
769 }
770 // now ip points at the disp (if any)
772 switch (op2 >> 6) {
773 case 0:
774 // [00 reg 100][ss index base]
775 // [00 reg 100][00 100 esp]
776 // [00 reg base]
777 // [00 reg 100][ss index 101][disp32]
778 // [00 reg 101] [disp32]
780 if (base == b101) {
781 if (which == disp32_operand)
782 return ip; // caller wants the disp32
783 ip += 4; // skip the disp32
784 }
785 break;
787 case 1:
788 // [01 reg 100][ss index base][disp8]
789 // [01 reg 100][00 100 esp][disp8]
790 // [01 reg base] [disp8]
791 ip += 1; // skip the disp8
792 break;
794 case 2:
795 // [10 reg 100][ss index base][disp32]
796 // [10 reg 100][00 100 esp][disp32]
797 // [10 reg base] [disp32]
798 if (which == disp32_operand)
799 return ip; // caller wants the disp32
800 ip += 4; // skip the disp32
801 break;
803 case 3:
804 // [11 reg base] (not a memory addressing mode)
805 break;
806 }
808 if (which == end_pc_operand) {
809 return ip + tail_size;
810 }
812 #ifdef _LP64
813 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32");
814 #else
815 assert(which == imm_operand, "instruction has only an imm field");
816 #endif // LP64
817 return ip;
818 }
820 address Assembler::locate_next_instruction(address inst) {
821 // Secretly share code with locate_operand:
822 return locate_operand(inst, end_pc_operand);
823 }
826 #ifdef ASSERT
827 void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
828 address inst = inst_mark();
829 assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
830 address opnd;
832 Relocation* r = rspec.reloc();
833 if (r->type() == relocInfo::none) {
834 return;
835 } else if (r->is_call() || format == call32_operand) {
836 // assert(format == imm32_operand, "cannot specify a nonzero format");
837 opnd = locate_operand(inst, call32_operand);
838 } else if (r->is_data()) {
839 assert(format == imm_operand || format == disp32_operand
840 LP64_ONLY(|| format == narrow_oop_operand), "format ok");
841 opnd = locate_operand(inst, (WhichOperand)format);
842 } else {
843 assert(format == imm_operand, "cannot specify a format");
844 return;
845 }
846 assert(opnd == pc(), "must put operand where relocs can find it");
847 }
848 #endif // ASSERT
850 void Assembler::emit_operand32(Register reg, Address adr) {
851 assert(reg->encoding() < 8, "no extended registers");
852 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
853 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
854 adr._rspec);
855 }
857 void Assembler::emit_operand(Register reg, Address adr,
858 int rip_relative_correction) {
859 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
860 adr._rspec,
861 rip_relative_correction);
862 }
864 void Assembler::emit_operand(XMMRegister reg, Address adr) {
865 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
866 adr._rspec);
867 }
869 // MMX operations
870 void Assembler::emit_operand(MMXRegister reg, Address adr) {
871 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
872 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
873 }
875 // work around gcc (3.2.1-7a) bug
876 void Assembler::emit_operand(Address adr, MMXRegister reg) {
877 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
878 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
879 }
882 void Assembler::emit_farith(int b1, int b2, int i) {
883 assert(isByte(b1) && isByte(b2), "wrong opcode");
884 assert(0 <= i && i < 8, "illegal stack offset");
885 emit_byte(b1);
886 emit_byte(b2 + i);
887 }
890 // Now the Assembler instructions (identical for 32/64 bits)
892 void Assembler::adcl(Address dst, int32_t imm32) {
893 InstructionMark im(this);
894 prefix(dst);
895 emit_arith_operand(0x81, rdx, dst, imm32);
896 }
898 void Assembler::adcl(Address dst, Register src) {
899 InstructionMark im(this);
900 prefix(dst, src);
901 emit_byte(0x11);
902 emit_operand(src, dst);
903 }
905 void Assembler::adcl(Register dst, int32_t imm32) {
906 prefix(dst);
907 emit_arith(0x81, 0xD0, dst, imm32);
908 }
910 void Assembler::adcl(Register dst, Address src) {
911 InstructionMark im(this);
912 prefix(src, dst);
913 emit_byte(0x13);
914 emit_operand(dst, src);
915 }
917 void Assembler::adcl(Register dst, Register src) {
918 (void) prefix_and_encode(dst->encoding(), src->encoding());
919 emit_arith(0x13, 0xC0, dst, src);
920 }
922 void Assembler::addl(Address dst, int32_t imm32) {
923 InstructionMark im(this);
924 prefix(dst);
925 emit_arith_operand(0x81, rax, dst, imm32);
926 }
928 void Assembler::addl(Address dst, Register src) {
929 InstructionMark im(this);
930 prefix(dst, src);
931 emit_byte(0x01);
932 emit_operand(src, dst);
933 }
935 void Assembler::addl(Register dst, int32_t imm32) {
936 prefix(dst);
937 emit_arith(0x81, 0xC0, dst, imm32);
938 }
940 void Assembler::addl(Register dst, Address src) {
941 InstructionMark im(this);
942 prefix(src, dst);
943 emit_byte(0x03);
944 emit_operand(dst, src);
945 }
947 void Assembler::addl(Register dst, Register src) {
948 (void) prefix_and_encode(dst->encoding(), src->encoding());
949 emit_arith(0x03, 0xC0, dst, src);
950 }
952 void Assembler::addr_nop_4() {
953 assert(UseAddressNop, "no CPU support");
954 // 4 bytes: NOP DWORD PTR [EAX+0]
955 emit_byte(0x0F);
956 emit_byte(0x1F);
957 emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
958 emit_byte(0); // 8-bits offset (1 byte)
959 }
961 void Assembler::addr_nop_5() {
962 assert(UseAddressNop, "no CPU support");
963 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
964 emit_byte(0x0F);
965 emit_byte(0x1F);
966 emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
967 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
968 emit_byte(0); // 8-bits offset (1 byte)
969 }
971 void Assembler::addr_nop_7() {
972 assert(UseAddressNop, "no CPU support");
973 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
974 emit_byte(0x0F);
975 emit_byte(0x1F);
976 emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
977 emit_long(0); // 32-bits offset (4 bytes)
978 }
980 void Assembler::addr_nop_8() {
981 assert(UseAddressNop, "no CPU support");
982 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
983 emit_byte(0x0F);
984 emit_byte(0x1F);
985 emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
986 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
987 emit_long(0); // 32-bits offset (4 bytes)
988 }
990 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
991 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
992 emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
993 }
995 void Assembler::addsd(XMMRegister dst, Address src) {
996 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
997 emit_simd_arith(0x58, dst, src, VEX_SIMD_F2);
998 }
1000 void Assembler::addss(XMMRegister dst, XMMRegister src) {
1001 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1002 emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
1003 }
1005 void Assembler::addss(XMMRegister dst, Address src) {
1006 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1007 emit_simd_arith(0x58, dst, src, VEX_SIMD_F3);
1008 }
1010 void Assembler::andl(Address dst, int32_t imm32) {
1011 InstructionMark im(this);
1012 prefix(dst);
1013 emit_byte(0x81);
1014 emit_operand(rsp, dst, 4);
1015 emit_long(imm32);
1016 }
1018 void Assembler::andl(Register dst, int32_t imm32) {
1019 prefix(dst);
1020 emit_arith(0x81, 0xE0, dst, imm32);
1021 }
1023 void Assembler::andl(Register dst, Address src) {
1024 InstructionMark im(this);
1025 prefix(src, dst);
1026 emit_byte(0x23);
1027 emit_operand(dst, src);
1028 }
1030 void Assembler::andl(Register dst, Register src) {
1031 (void) prefix_and_encode(dst->encoding(), src->encoding());
1032 emit_arith(0x23, 0xC0, dst, src);
1033 }
1035 void Assembler::bsfl(Register dst, Register src) {
1036 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1037 emit_byte(0x0F);
1038 emit_byte(0xBC);
1039 emit_byte(0xC0 | encode);
1040 }
1042 void Assembler::bsrl(Register dst, Register src) {
1043 assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
1044 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1045 emit_byte(0x0F);
1046 emit_byte(0xBD);
1047 emit_byte(0xC0 | encode);
1048 }
1050 void Assembler::bswapl(Register reg) { // bswap
1051 int encode = prefix_and_encode(reg->encoding());
1052 emit_byte(0x0F);
1053 emit_byte(0xC8 | encode);
1054 }
1056 void Assembler::call(Label& L, relocInfo::relocType rtype) {
1057 // suspect disp32 is always good
1058 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
1060 if (L.is_bound()) {
1061 const int long_size = 5;
1062 int offs = (int)( target(L) - pc() );
1063 assert(offs <= 0, "assembler error");
1064 InstructionMark im(this);
1065 // 1110 1000 #32-bit disp
1066 emit_byte(0xE8);
1067 emit_data(offs - long_size, rtype, operand);
1068 } else {
1069 InstructionMark im(this);
1070 // 1110 1000 #32-bit disp
1071 L.add_patch_at(code(), locator());
1073 emit_byte(0xE8);
1074 emit_data(int(0), rtype, operand);
1075 }
1076 }
1078 void Assembler::call(Register dst) {
1079 int encode = prefix_and_encode(dst->encoding());
1080 emit_byte(0xFF);
1081 emit_byte(0xD0 | encode);
1082 }
1085 void Assembler::call(Address adr) {
1086 InstructionMark im(this);
1087 prefix(adr);
1088 emit_byte(0xFF);
1089 emit_operand(rdx, adr);
1090 }
1092 void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
1093 assert(entry != NULL, "call most probably wrong");
1094 InstructionMark im(this);
1095 emit_byte(0xE8);
1096 intptr_t disp = entry - (_code_pos + sizeof(int32_t));
1097 assert(is_simm32(disp), "must be 32bit offset (call2)");
1098 // Technically, should use call32_operand, but this format is
1099 // implied by the fact that we're emitting a call instruction.
1101 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
1102 emit_data((int) disp, rspec, operand);
1103 }
1105 void Assembler::cdql() {
1106 emit_byte(0x99);
1107 }
1109 void Assembler::cmovl(Condition cc, Register dst, Register src) {
1110 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
1111 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1112 emit_byte(0x0F);
1113 emit_byte(0x40 | cc);
1114 emit_byte(0xC0 | encode);
1115 }
1118 void Assembler::cmovl(Condition cc, Register dst, Address src) {
1119 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
1120 prefix(src, dst);
1121 emit_byte(0x0F);
1122 emit_byte(0x40 | cc);
1123 emit_operand(dst, src);
1124 }
1126 void Assembler::cmpb(Address dst, int imm8) {
1127 InstructionMark im(this);
1128 prefix(dst);
1129 emit_byte(0x80);
1130 emit_operand(rdi, dst, 1);
1131 emit_byte(imm8);
1132 }
1134 void Assembler::cmpl(Address dst, int32_t imm32) {
1135 InstructionMark im(this);
1136 prefix(dst);
1137 emit_byte(0x81);
1138 emit_operand(rdi, dst, 4);
1139 emit_long(imm32);
1140 }
1142 void Assembler::cmpl(Register dst, int32_t imm32) {
1143 prefix(dst);
1144 emit_arith(0x81, 0xF8, dst, imm32);
1145 }
1147 void Assembler::cmpl(Register dst, Register src) {
1148 (void) prefix_and_encode(dst->encoding(), src->encoding());
1149 emit_arith(0x3B, 0xC0, dst, src);
1150 }
1153 void Assembler::cmpl(Register dst, Address src) {
1154 InstructionMark im(this);
1155 prefix(src, dst);
1156 emit_byte(0x3B);
1157 emit_operand(dst, src);
1158 }
1160 void Assembler::cmpw(Address dst, int imm16) {
1161 InstructionMark im(this);
1162 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers");
1163 emit_byte(0x66);
1164 emit_byte(0x81);
1165 emit_operand(rdi, dst, 2);
1166 emit_word(imm16);
1167 }
1169 // The 32-bit cmpxchg compares the value at adr with the contents of rax,
1170 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
1171 // The ZF is set if the compared values were equal, and cleared otherwise.
1172 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
1173 InstructionMark im(this);
1174 prefix(adr, reg);
1175 emit_byte(0x0F);
1176 emit_byte(0xB1);
1177 emit_operand(reg, adr);
1178 }
1180 void Assembler::comisd(XMMRegister dst, Address src) {
1181 // NOTE: dbx seems to decode this as comiss even though the
1182 // 0x66 is there. Strangly ucomisd comes out correct
1183 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1184 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
1185 }
1187 void Assembler::comisd(XMMRegister dst, XMMRegister src) {
1188 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1189 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_66);
1190 }
1192 void Assembler::comiss(XMMRegister dst, Address src) {
1193 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1194 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
1195 }
1197 void Assembler::comiss(XMMRegister dst, XMMRegister src) {
1198 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1199 emit_simd_arith_nonds(0x2F, dst, src, VEX_SIMD_NONE);
1200 }
1202 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
1203 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1204 emit_simd_arith_nonds(0xE6, dst, src, VEX_SIMD_F3);
1205 }
1207 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
1208 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1209 emit_simd_arith_nonds(0x5B, dst, src, VEX_SIMD_NONE);
1210 }
1212 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
1213 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1214 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
1215 }
1217 void Assembler::cvtsd2ss(XMMRegister dst, Address src) {
1218 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1219 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F2);
1220 }
1222 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
1223 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1224 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F2);
1225 emit_byte(0x2A);
1226 emit_byte(0xC0 | encode);
1227 }
1229 void Assembler::cvtsi2sdl(XMMRegister dst, Address src) {
1230 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1231 emit_simd_arith(0x2A, dst, src, VEX_SIMD_F2);
1232 }
1234 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
1235 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1236 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_F3);
1237 emit_byte(0x2A);
1238 emit_byte(0xC0 | encode);
1239 }
1241 void Assembler::cvtsi2ssl(XMMRegister dst, Address src) {
1242 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1243 emit_simd_arith(0x2A, dst, src, VEX_SIMD_F3);
1244 }
1246 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
1247 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1248 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
1249 }
1251 void Assembler::cvtss2sd(XMMRegister dst, Address src) {
1252 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1253 emit_simd_arith(0x5A, dst, src, VEX_SIMD_F3);
1254 }
1257 void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
1258 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1259 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F2);
1260 emit_byte(0x2C);
1261 emit_byte(0xC0 | encode);
1262 }
1264 void Assembler::cvttss2sil(Register dst, XMMRegister src) {
1265 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1266 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_F3);
1267 emit_byte(0x2C);
1268 emit_byte(0xC0 | encode);
1269 }
1271 void Assembler::decl(Address dst) {
1272 // Don't use it directly. Use MacroAssembler::decrement() instead.
1273 InstructionMark im(this);
1274 prefix(dst);
1275 emit_byte(0xFF);
1276 emit_operand(rcx, dst);
1277 }
1279 void Assembler::divsd(XMMRegister dst, Address src) {
1280 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1281 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
1282 }
1284 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
1285 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1286 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F2);
1287 }
1289 void Assembler::divss(XMMRegister dst, Address src) {
1290 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1291 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
1292 }
1294 void Assembler::divss(XMMRegister dst, XMMRegister src) {
1295 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1296 emit_simd_arith(0x5E, dst, src, VEX_SIMD_F3);
1297 }
1299 void Assembler::emms() {
1300 NOT_LP64(assert(VM_Version::supports_mmx(), ""));
1301 emit_byte(0x0F);
1302 emit_byte(0x77);
1303 }
1305 void Assembler::hlt() {
1306 emit_byte(0xF4);
1307 }
1309 void Assembler::idivl(Register src) {
1310 int encode = prefix_and_encode(src->encoding());
1311 emit_byte(0xF7);
1312 emit_byte(0xF8 | encode);
1313 }
1315 void Assembler::divl(Register src) { // Unsigned
1316 int encode = prefix_and_encode(src->encoding());
1317 emit_byte(0xF7);
1318 emit_byte(0xF0 | encode);
1319 }
1321 void Assembler::imull(Register dst, Register src) {
1322 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1323 emit_byte(0x0F);
1324 emit_byte(0xAF);
1325 emit_byte(0xC0 | encode);
1326 }
1329 void Assembler::imull(Register dst, Register src, int value) {
1330 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1331 if (is8bit(value)) {
1332 emit_byte(0x6B);
1333 emit_byte(0xC0 | encode);
1334 emit_byte(value & 0xFF);
1335 } else {
1336 emit_byte(0x69);
1337 emit_byte(0xC0 | encode);
1338 emit_long(value);
1339 }
1340 }
1342 void Assembler::incl(Address dst) {
1343 // Don't use it directly. Use MacroAssembler::increment() instead.
1344 InstructionMark im(this);
1345 prefix(dst);
1346 emit_byte(0xFF);
1347 emit_operand(rax, dst);
1348 }
1350 void Assembler::jcc(Condition cc, Label& L, bool maybe_short) {
1351 InstructionMark im(this);
1352 assert((0 <= cc) && (cc < 16), "illegal cc");
1353 if (L.is_bound()) {
1354 address dst = target(L);
1355 assert(dst != NULL, "jcc most probably wrong");
1357 const int short_size = 2;
1358 const int long_size = 6;
1359 intptr_t offs = (intptr_t)dst - (intptr_t)_code_pos;
1360 if (maybe_short && is8bit(offs - short_size)) {
1361 // 0111 tttn #8-bit disp
1362 emit_byte(0x70 | cc);
1363 emit_byte((offs - short_size) & 0xFF);
1364 } else {
1365 // 0000 1111 1000 tttn #32-bit disp
1366 assert(is_simm32(offs - long_size),
1367 "must be 32bit offset (call4)");
1368 emit_byte(0x0F);
1369 emit_byte(0x80 | cc);
1370 emit_long(offs - long_size);
1371 }
1372 } else {
1373 // Note: could eliminate cond. jumps to this jump if condition
1374 // is the same however, seems to be rather unlikely case.
1375 // Note: use jccb() if label to be bound is very close to get
1376 // an 8-bit displacement
1377 L.add_patch_at(code(), locator());
1378 emit_byte(0x0F);
1379 emit_byte(0x80 | cc);
1380 emit_long(0);
1381 }
1382 }
1384 void Assembler::jccb(Condition cc, Label& L) {
1385 if (L.is_bound()) {
1386 const int short_size = 2;
1387 address entry = target(L);
1388 #ifdef ASSERT
1389 intptr_t dist = (intptr_t)entry - ((intptr_t)_code_pos + short_size);
1390 intptr_t delta = short_branch_delta();
1391 if (delta != 0) {
1392 dist += (dist < 0 ? (-delta) :delta);
1393 }
1394 assert(is8bit(dist), "Dispacement too large for a short jmp");
1395 #endif
1396 intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos;
1397 // 0111 tttn #8-bit disp
1398 emit_byte(0x70 | cc);
1399 emit_byte((offs - short_size) & 0xFF);
1400 } else {
1401 InstructionMark im(this);
1402 L.add_patch_at(code(), locator());
1403 emit_byte(0x70 | cc);
1404 emit_byte(0);
1405 }
1406 }
1408 void Assembler::jmp(Address adr) {
1409 InstructionMark im(this);
1410 prefix(adr);
1411 emit_byte(0xFF);
1412 emit_operand(rsp, adr);
1413 }
1415 void Assembler::jmp(Label& L, bool maybe_short) {
1416 if (L.is_bound()) {
1417 address entry = target(L);
1418 assert(entry != NULL, "jmp most probably wrong");
1419 InstructionMark im(this);
1420 const int short_size = 2;
1421 const int long_size = 5;
1422 intptr_t offs = entry - _code_pos;
1423 if (maybe_short && is8bit(offs - short_size)) {
1424 emit_byte(0xEB);
1425 emit_byte((offs - short_size) & 0xFF);
1426 } else {
1427 emit_byte(0xE9);
1428 emit_long(offs - long_size);
1429 }
1430 } else {
1431 // By default, forward jumps are always 32-bit displacements, since
1432 // we can't yet know where the label will be bound. If you're sure that
1433 // the forward jump will not run beyond 256 bytes, use jmpb to
1434 // force an 8-bit displacement.
1435 InstructionMark im(this);
1436 L.add_patch_at(code(), locator());
1437 emit_byte(0xE9);
1438 emit_long(0);
1439 }
1440 }
1442 void Assembler::jmp(Register entry) {
1443 int encode = prefix_and_encode(entry->encoding());
1444 emit_byte(0xFF);
1445 emit_byte(0xE0 | encode);
1446 }
1448 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
1449 InstructionMark im(this);
1450 emit_byte(0xE9);
1451 assert(dest != NULL, "must have a target");
1452 intptr_t disp = dest - (_code_pos + sizeof(int32_t));
1453 assert(is_simm32(disp), "must be 32bit offset (jmp)");
1454 emit_data(disp, rspec.reloc(), call32_operand);
1455 }
1457 void Assembler::jmpb(Label& L) {
1458 if (L.is_bound()) {
1459 const int short_size = 2;
1460 address entry = target(L);
1461 assert(entry != NULL, "jmp most probably wrong");
1462 #ifdef ASSERT
1463 intptr_t dist = (intptr_t)entry - ((intptr_t)_code_pos + short_size);
1464 intptr_t delta = short_branch_delta();
1465 if (delta != 0) {
1466 dist += (dist < 0 ? (-delta) :delta);
1467 }
1468 assert(is8bit(dist), "Dispacement too large for a short jmp");
1469 #endif
1470 intptr_t offs = entry - _code_pos;
1471 emit_byte(0xEB);
1472 emit_byte((offs - short_size) & 0xFF);
1473 } else {
1474 InstructionMark im(this);
1475 L.add_patch_at(code(), locator());
1476 emit_byte(0xEB);
1477 emit_byte(0);
1478 }
1479 }
1481 void Assembler::ldmxcsr( Address src) {
1482 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1483 InstructionMark im(this);
1484 prefix(src);
1485 emit_byte(0x0F);
1486 emit_byte(0xAE);
1487 emit_operand(as_Register(2), src);
1488 }
1490 void Assembler::leal(Register dst, Address src) {
1491 InstructionMark im(this);
1492 #ifdef _LP64
1493 emit_byte(0x67); // addr32
1494 prefix(src, dst);
1495 #endif // LP64
1496 emit_byte(0x8D);
1497 emit_operand(dst, src);
1498 }
1500 void Assembler::lock() {
1501 emit_byte(0xF0);
1502 }
1504 void Assembler::lzcntl(Register dst, Register src) {
1505 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
1506 emit_byte(0xF3);
1507 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1508 emit_byte(0x0F);
1509 emit_byte(0xBD);
1510 emit_byte(0xC0 | encode);
1511 }
1513 // Emit mfence instruction
1514 void Assembler::mfence() {
1515 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");)
1516 emit_byte( 0x0F );
1517 emit_byte( 0xAE );
1518 emit_byte( 0xF0 );
1519 }
1521 void Assembler::mov(Register dst, Register src) {
1522 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
1523 }
1525 void Assembler::movapd(XMMRegister dst, XMMRegister src) {
1526 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1527 emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_66);
1528 }
1530 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
1531 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1532 emit_simd_arith_nonds(0x28, dst, src, VEX_SIMD_NONE);
1533 }
1535 void Assembler::movlhps(XMMRegister dst, XMMRegister src) {
1536 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1537 int encode = simd_prefix_and_encode(dst, src, src, VEX_SIMD_NONE);
1538 emit_byte(0x16);
1539 emit_byte(0xC0 | encode);
1540 }
1542 void Assembler::movb(Register dst, Address src) {
1543 NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
1544 InstructionMark im(this);
1545 prefix(src, dst, true);
1546 emit_byte(0x8A);
1547 emit_operand(dst, src);
1548 }
1551 void Assembler::movb(Address dst, int imm8) {
1552 InstructionMark im(this);
1553 prefix(dst);
1554 emit_byte(0xC6);
1555 emit_operand(rax, dst, 1);
1556 emit_byte(imm8);
1557 }
1560 void Assembler::movb(Address dst, Register src) {
1561 assert(src->has_byte_register(), "must have byte register");
1562 InstructionMark im(this);
1563 prefix(dst, src, true);
1564 emit_byte(0x88);
1565 emit_operand(src, dst);
1566 }
1568 void Assembler::movdl(XMMRegister dst, Register src) {
1569 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1570 int encode = simd_prefix_and_encode(dst, src, VEX_SIMD_66);
1571 emit_byte(0x6E);
1572 emit_byte(0xC0 | encode);
1573 }
1575 void Assembler::movdl(Register dst, XMMRegister src) {
1576 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1577 // swap src/dst to get correct prefix
1578 int encode = simd_prefix_and_encode(src, dst, VEX_SIMD_66);
1579 emit_byte(0x7E);
1580 emit_byte(0xC0 | encode);
1581 }
1583 void Assembler::movdl(XMMRegister dst, Address src) {
1584 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1585 InstructionMark im(this);
1586 simd_prefix(dst, src, VEX_SIMD_66);
1587 emit_byte(0x6E);
1588 emit_operand(dst, src);
1589 }
1591 void Assembler::movdl(Address dst, XMMRegister src) {
1592 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1593 InstructionMark im(this);
1594 simd_prefix(dst, src, VEX_SIMD_66);
1595 emit_byte(0x7E);
1596 emit_operand(src, dst);
1597 }
1599 void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
1600 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1601 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_66);
1602 }
1604 void Assembler::movdqu(XMMRegister dst, Address src) {
1605 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1606 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
1607 }
1609 void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
1610 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1611 emit_simd_arith_nonds(0x6F, dst, src, VEX_SIMD_F3);
1612 }
1614 void Assembler::movdqu(Address dst, XMMRegister src) {
1615 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1616 InstructionMark im(this);
1617 simd_prefix(dst, src, VEX_SIMD_F3);
1618 emit_byte(0x7F);
1619 emit_operand(src, dst);
1620 }
1622 // Move Unaligned 256bit Vector
1623 void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
1624 assert(UseAVX, "");
1625 bool vector256 = true;
1626 int encode = vex_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_F3, vector256);
1627 emit_byte(0x6F);
1628 emit_byte(0xC0 | encode);
1629 }
1631 void Assembler::vmovdqu(XMMRegister dst, Address src) {
1632 assert(UseAVX, "");
1633 InstructionMark im(this);
1634 bool vector256 = true;
1635 vex_prefix(dst, xnoreg, src, VEX_SIMD_F3, vector256);
1636 emit_byte(0x6F);
1637 emit_operand(dst, src);
1638 }
1640 void Assembler::vmovdqu(Address dst, XMMRegister src) {
1641 assert(UseAVX, "");
1642 InstructionMark im(this);
1643 bool vector256 = true;
1644 // swap src<->dst for encoding
1645 assert(src != xnoreg, "sanity");
1646 vex_prefix(src, xnoreg, dst, VEX_SIMD_F3, vector256);
1647 emit_byte(0x7F);
1648 emit_operand(src, dst);
1649 }
1651 // Uses zero extension on 64bit
1653 void Assembler::movl(Register dst, int32_t imm32) {
1654 int encode = prefix_and_encode(dst->encoding());
1655 emit_byte(0xB8 | encode);
1656 emit_long(imm32);
1657 }
1659 void Assembler::movl(Register dst, Register src) {
1660 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1661 emit_byte(0x8B);
1662 emit_byte(0xC0 | encode);
1663 }
1665 void Assembler::movl(Register dst, Address src) {
1666 InstructionMark im(this);
1667 prefix(src, dst);
1668 emit_byte(0x8B);
1669 emit_operand(dst, src);
1670 }
1672 void Assembler::movl(Address dst, int32_t imm32) {
1673 InstructionMark im(this);
1674 prefix(dst);
1675 emit_byte(0xC7);
1676 emit_operand(rax, dst, 4);
1677 emit_long(imm32);
1678 }
1680 void Assembler::movl(Address dst, Register src) {
1681 InstructionMark im(this);
1682 prefix(dst, src);
1683 emit_byte(0x89);
1684 emit_operand(src, dst);
1685 }
1687 // New cpus require to use movsd and movss to avoid partial register stall
1688 // when loading from memory. But for old Opteron use movlpd instead of movsd.
1689 // The selection is done in MacroAssembler::movdbl() and movflt().
1690 void Assembler::movlpd(XMMRegister dst, Address src) {
1691 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1692 emit_simd_arith(0x12, dst, src, VEX_SIMD_66);
1693 }
1695 void Assembler::movq( MMXRegister dst, Address src ) {
1696 assert( VM_Version::supports_mmx(), "" );
1697 emit_byte(0x0F);
1698 emit_byte(0x6F);
1699 emit_operand(dst, src);
1700 }
1702 void Assembler::movq( Address dst, MMXRegister src ) {
1703 assert( VM_Version::supports_mmx(), "" );
1704 emit_byte(0x0F);
1705 emit_byte(0x7F);
1706 // workaround gcc (3.2.1-7a) bug
1707 // In that version of gcc with only an emit_operand(MMX, Address)
1708 // gcc will tail jump and try and reverse the parameters completely
1709 // obliterating dst in the process. By having a version available
1710 // that doesn't need to swap the args at the tail jump the bug is
1711 // avoided.
1712 emit_operand(dst, src);
1713 }
1715 void Assembler::movq(XMMRegister dst, Address src) {
1716 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1717 InstructionMark im(this);
1718 simd_prefix(dst, src, VEX_SIMD_F3);
1719 emit_byte(0x7E);
1720 emit_operand(dst, src);
1721 }
1723 void Assembler::movq(Address dst, XMMRegister src) {
1724 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1725 InstructionMark im(this);
1726 simd_prefix(dst, src, VEX_SIMD_66);
1727 emit_byte(0xD6);
1728 emit_operand(src, dst);
1729 }
1731 void Assembler::movsbl(Register dst, Address src) { // movsxb
1732 InstructionMark im(this);
1733 prefix(src, dst);
1734 emit_byte(0x0F);
1735 emit_byte(0xBE);
1736 emit_operand(dst, src);
1737 }
1739 void Assembler::movsbl(Register dst, Register src) { // movsxb
1740 NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
1741 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1742 emit_byte(0x0F);
1743 emit_byte(0xBE);
1744 emit_byte(0xC0 | encode);
1745 }
1747 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
1748 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1749 emit_simd_arith(0x10, dst, src, VEX_SIMD_F2);
1750 }
1752 void Assembler::movsd(XMMRegister dst, Address src) {
1753 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1754 emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F2);
1755 }
1757 void Assembler::movsd(Address dst, XMMRegister src) {
1758 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1759 InstructionMark im(this);
1760 simd_prefix(dst, src, VEX_SIMD_F2);
1761 emit_byte(0x11);
1762 emit_operand(src, dst);
1763 }
1765 void Assembler::movss(XMMRegister dst, XMMRegister src) {
1766 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1767 emit_simd_arith(0x10, dst, src, VEX_SIMD_F3);
1768 }
1770 void Assembler::movss(XMMRegister dst, Address src) {
1771 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1772 emit_simd_arith_nonds(0x10, dst, src, VEX_SIMD_F3);
1773 }
1775 void Assembler::movss(Address dst, XMMRegister src) {
1776 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1777 InstructionMark im(this);
1778 simd_prefix(dst, src, VEX_SIMD_F3);
1779 emit_byte(0x11);
1780 emit_operand(src, dst);
1781 }
1783 void Assembler::movswl(Register dst, Address src) { // movsxw
1784 InstructionMark im(this);
1785 prefix(src, dst);
1786 emit_byte(0x0F);
1787 emit_byte(0xBF);
1788 emit_operand(dst, src);
1789 }
1791 void Assembler::movswl(Register dst, Register src) { // movsxw
1792 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1793 emit_byte(0x0F);
1794 emit_byte(0xBF);
1795 emit_byte(0xC0 | encode);
1796 }
1798 void Assembler::movw(Address dst, int imm16) {
1799 InstructionMark im(this);
1801 emit_byte(0x66); // switch to 16-bit mode
1802 prefix(dst);
1803 emit_byte(0xC7);
1804 emit_operand(rax, dst, 2);
1805 emit_word(imm16);
1806 }
1808 void Assembler::movw(Register dst, Address src) {
1809 InstructionMark im(this);
1810 emit_byte(0x66);
1811 prefix(src, dst);
1812 emit_byte(0x8B);
1813 emit_operand(dst, src);
1814 }
1816 void Assembler::movw(Address dst, Register src) {
1817 InstructionMark im(this);
1818 emit_byte(0x66);
1819 prefix(dst, src);
1820 emit_byte(0x89);
1821 emit_operand(src, dst);
1822 }
1824 void Assembler::movzbl(Register dst, Address src) { // movzxb
1825 InstructionMark im(this);
1826 prefix(src, dst);
1827 emit_byte(0x0F);
1828 emit_byte(0xB6);
1829 emit_operand(dst, src);
1830 }
1832 void Assembler::movzbl(Register dst, Register src) { // movzxb
1833 NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
1834 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1835 emit_byte(0x0F);
1836 emit_byte(0xB6);
1837 emit_byte(0xC0 | encode);
1838 }
1840 void Assembler::movzwl(Register dst, Address src) { // movzxw
1841 InstructionMark im(this);
1842 prefix(src, dst);
1843 emit_byte(0x0F);
1844 emit_byte(0xB7);
1845 emit_operand(dst, src);
1846 }
1848 void Assembler::movzwl(Register dst, Register src) { // movzxw
1849 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1850 emit_byte(0x0F);
1851 emit_byte(0xB7);
1852 emit_byte(0xC0 | encode);
1853 }
1855 void Assembler::mull(Address src) {
1856 InstructionMark im(this);
1857 prefix(src);
1858 emit_byte(0xF7);
1859 emit_operand(rsp, src);
1860 }
1862 void Assembler::mull(Register src) {
1863 int encode = prefix_and_encode(src->encoding());
1864 emit_byte(0xF7);
1865 emit_byte(0xE0 | encode);
1866 }
1868 void Assembler::mulsd(XMMRegister dst, Address src) {
1869 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1870 emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
1871 }
1873 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
1874 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1875 emit_simd_arith(0x59, dst, src, VEX_SIMD_F2);
1876 }
1878 void Assembler::mulss(XMMRegister dst, Address src) {
1879 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1880 emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
1881 }
1883 void Assembler::mulss(XMMRegister dst, XMMRegister src) {
1884 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1885 emit_simd_arith(0x59, dst, src, VEX_SIMD_F3);
1886 }
1888 void Assembler::negl(Register dst) {
1889 int encode = prefix_and_encode(dst->encoding());
1890 emit_byte(0xF7);
1891 emit_byte(0xD8 | encode);
1892 }
1894 void Assembler::nop(int i) {
1895 #ifdef ASSERT
1896 assert(i > 0, " ");
1897 // The fancy nops aren't currently recognized by debuggers making it a
1898 // pain to disassemble code while debugging. If asserts are on clearly
1899 // speed is not an issue so simply use the single byte traditional nop
1900 // to do alignment.
1902 for (; i > 0 ; i--) emit_byte(0x90);
1903 return;
1905 #endif // ASSERT
1907 if (UseAddressNop && VM_Version::is_intel()) {
1908 //
1909 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
1910 // 1: 0x90
1911 // 2: 0x66 0x90
1912 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
1913 // 4: 0x0F 0x1F 0x40 0x00
1914 // 5: 0x0F 0x1F 0x44 0x00 0x00
1915 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
1916 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
1917 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1918 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1919 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1920 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1922 // The rest coding is Intel specific - don't use consecutive address nops
1924 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1925 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1926 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1927 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1929 while(i >= 15) {
1930 // For Intel don't generate consecutive addess nops (mix with regular nops)
1931 i -= 15;
1932 emit_byte(0x66); // size prefix
1933 emit_byte(0x66); // size prefix
1934 emit_byte(0x66); // size prefix
1935 addr_nop_8();
1936 emit_byte(0x66); // size prefix
1937 emit_byte(0x66); // size prefix
1938 emit_byte(0x66); // size prefix
1939 emit_byte(0x90); // nop
1940 }
1941 switch (i) {
1942 case 14:
1943 emit_byte(0x66); // size prefix
1944 case 13:
1945 emit_byte(0x66); // size prefix
1946 case 12:
1947 addr_nop_8();
1948 emit_byte(0x66); // size prefix
1949 emit_byte(0x66); // size prefix
1950 emit_byte(0x66); // size prefix
1951 emit_byte(0x90); // nop
1952 break;
1953 case 11:
1954 emit_byte(0x66); // size prefix
1955 case 10:
1956 emit_byte(0x66); // size prefix
1957 case 9:
1958 emit_byte(0x66); // size prefix
1959 case 8:
1960 addr_nop_8();
1961 break;
1962 case 7:
1963 addr_nop_7();
1964 break;
1965 case 6:
1966 emit_byte(0x66); // size prefix
1967 case 5:
1968 addr_nop_5();
1969 break;
1970 case 4:
1971 addr_nop_4();
1972 break;
1973 case 3:
1974 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
1975 emit_byte(0x66); // size prefix
1976 case 2:
1977 emit_byte(0x66); // size prefix
1978 case 1:
1979 emit_byte(0x90); // nop
1980 break;
1981 default:
1982 assert(i == 0, " ");
1983 }
1984 return;
1985 }
1986 if (UseAddressNop && VM_Version::is_amd()) {
1987 //
1988 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
1989 // 1: 0x90
1990 // 2: 0x66 0x90
1991 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
1992 // 4: 0x0F 0x1F 0x40 0x00
1993 // 5: 0x0F 0x1F 0x44 0x00 0x00
1994 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
1995 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
1996 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1997 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1998 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1999 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2001 // The rest coding is AMD specific - use consecutive address nops
2003 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
2004 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
2005 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
2006 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
2007 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2008 // Size prefixes (0x66) are added for larger sizes
2010 while(i >= 22) {
2011 i -= 11;
2012 emit_byte(0x66); // size prefix
2013 emit_byte(0x66); // size prefix
2014 emit_byte(0x66); // size prefix
2015 addr_nop_8();
2016 }
2017 // Generate first nop for size between 21-12
2018 switch (i) {
2019 case 21:
2020 i -= 1;
2021 emit_byte(0x66); // size prefix
2022 case 20:
2023 case 19:
2024 i -= 1;
2025 emit_byte(0x66); // size prefix
2026 case 18:
2027 case 17:
2028 i -= 1;
2029 emit_byte(0x66); // size prefix
2030 case 16:
2031 case 15:
2032 i -= 8;
2033 addr_nop_8();
2034 break;
2035 case 14:
2036 case 13:
2037 i -= 7;
2038 addr_nop_7();
2039 break;
2040 case 12:
2041 i -= 6;
2042 emit_byte(0x66); // size prefix
2043 addr_nop_5();
2044 break;
2045 default:
2046 assert(i < 12, " ");
2047 }
2049 // Generate second nop for size between 11-1
2050 switch (i) {
2051 case 11:
2052 emit_byte(0x66); // size prefix
2053 case 10:
2054 emit_byte(0x66); // size prefix
2055 case 9:
2056 emit_byte(0x66); // size prefix
2057 case 8:
2058 addr_nop_8();
2059 break;
2060 case 7:
2061 addr_nop_7();
2062 break;
2063 case 6:
2064 emit_byte(0x66); // size prefix
2065 case 5:
2066 addr_nop_5();
2067 break;
2068 case 4:
2069 addr_nop_4();
2070 break;
2071 case 3:
2072 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
2073 emit_byte(0x66); // size prefix
2074 case 2:
2075 emit_byte(0x66); // size prefix
2076 case 1:
2077 emit_byte(0x90); // nop
2078 break;
2079 default:
2080 assert(i == 0, " ");
2081 }
2082 return;
2083 }
2085 // Using nops with size prefixes "0x66 0x90".
2086 // From AMD Optimization Guide:
2087 // 1: 0x90
2088 // 2: 0x66 0x90
2089 // 3: 0x66 0x66 0x90
2090 // 4: 0x66 0x66 0x66 0x90
2091 // 5: 0x66 0x66 0x90 0x66 0x90
2092 // 6: 0x66 0x66 0x90 0x66 0x66 0x90
2093 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
2094 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
2095 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
2096 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
2097 //
2098 while(i > 12) {
2099 i -= 4;
2100 emit_byte(0x66); // size prefix
2101 emit_byte(0x66);
2102 emit_byte(0x66);
2103 emit_byte(0x90); // nop
2104 }
2105 // 1 - 12 nops
2106 if(i > 8) {
2107 if(i > 9) {
2108 i -= 1;
2109 emit_byte(0x66);
2110 }
2111 i -= 3;
2112 emit_byte(0x66);
2113 emit_byte(0x66);
2114 emit_byte(0x90);
2115 }
2116 // 1 - 8 nops
2117 if(i > 4) {
2118 if(i > 6) {
2119 i -= 1;
2120 emit_byte(0x66);
2121 }
2122 i -= 3;
2123 emit_byte(0x66);
2124 emit_byte(0x66);
2125 emit_byte(0x90);
2126 }
2127 switch (i) {
2128 case 4:
2129 emit_byte(0x66);
2130 case 3:
2131 emit_byte(0x66);
2132 case 2:
2133 emit_byte(0x66);
2134 case 1:
2135 emit_byte(0x90);
2136 break;
2137 default:
2138 assert(i == 0, " ");
2139 }
2140 }
2142 void Assembler::notl(Register dst) {
2143 int encode = prefix_and_encode(dst->encoding());
2144 emit_byte(0xF7);
2145 emit_byte(0xD0 | encode );
2146 }
2148 void Assembler::orl(Address dst, int32_t imm32) {
2149 InstructionMark im(this);
2150 prefix(dst);
2151 emit_arith_operand(0x81, rcx, dst, imm32);
2152 }
2154 void Assembler::orl(Register dst, int32_t imm32) {
2155 prefix(dst);
2156 emit_arith(0x81, 0xC8, dst, imm32);
2157 }
2159 void Assembler::orl(Register dst, Address src) {
2160 InstructionMark im(this);
2161 prefix(src, dst);
2162 emit_byte(0x0B);
2163 emit_operand(dst, src);
2164 }
2166 void Assembler::orl(Register dst, Register src) {
2167 (void) prefix_and_encode(dst->encoding(), src->encoding());
2168 emit_arith(0x0B, 0xC0, dst, src);
2169 }
2171 void Assembler::packuswb(XMMRegister dst, Address src) {
2172 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2173 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2174 emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
2175 }
2177 void Assembler::packuswb(XMMRegister dst, XMMRegister src) {
2178 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2179 emit_simd_arith(0x67, dst, src, VEX_SIMD_66);
2180 }
2182 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
2183 assert(VM_Version::supports_sse4_2(), "");
2184 InstructionMark im(this);
2185 simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
2186 emit_byte(0x61);
2187 emit_operand(dst, src);
2188 emit_byte(imm8);
2189 }
2191 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
2192 assert(VM_Version::supports_sse4_2(), "");
2193 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_3A);
2194 emit_byte(0x61);
2195 emit_byte(0xC0 | encode);
2196 emit_byte(imm8);
2197 }
2199 void Assembler::pmovzxbw(XMMRegister dst, Address src) {
2200 assert(VM_Version::supports_sse4_1(), "");
2201 InstructionMark im(this);
2202 simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2203 emit_byte(0x30);
2204 emit_operand(dst, src);
2205 }
2207 void Assembler::pmovzxbw(XMMRegister dst, XMMRegister src) {
2208 assert(VM_Version::supports_sse4_1(), "");
2209 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2210 emit_byte(0x30);
2211 emit_byte(0xC0 | encode);
2212 }
2214 // generic
2215 void Assembler::pop(Register dst) {
2216 int encode = prefix_and_encode(dst->encoding());
2217 emit_byte(0x58 | encode);
2218 }
2220 void Assembler::popcntl(Register dst, Address src) {
2221 assert(VM_Version::supports_popcnt(), "must support");
2222 InstructionMark im(this);
2223 emit_byte(0xF3);
2224 prefix(src, dst);
2225 emit_byte(0x0F);
2226 emit_byte(0xB8);
2227 emit_operand(dst, src);
2228 }
2230 void Assembler::popcntl(Register dst, Register src) {
2231 assert(VM_Version::supports_popcnt(), "must support");
2232 emit_byte(0xF3);
2233 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2234 emit_byte(0x0F);
2235 emit_byte(0xB8);
2236 emit_byte(0xC0 | encode);
2237 }
2239 void Assembler::popf() {
2240 emit_byte(0x9D);
2241 }
2243 #ifndef _LP64 // no 32bit push/pop on amd64
2244 void Assembler::popl(Address dst) {
2245 // NOTE: this will adjust stack by 8byte on 64bits
2246 InstructionMark im(this);
2247 prefix(dst);
2248 emit_byte(0x8F);
2249 emit_operand(rax, dst);
2250 }
2251 #endif
2253 void Assembler::prefetch_prefix(Address src) {
2254 prefix(src);
2255 emit_byte(0x0F);
2256 }
2258 void Assembler::prefetchnta(Address src) {
2259 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2260 InstructionMark im(this);
2261 prefetch_prefix(src);
2262 emit_byte(0x18);
2263 emit_operand(rax, src); // 0, src
2264 }
2266 void Assembler::prefetchr(Address src) {
2267 assert(VM_Version::supports_3dnow_prefetch(), "must support");
2268 InstructionMark im(this);
2269 prefetch_prefix(src);
2270 emit_byte(0x0D);
2271 emit_operand(rax, src); // 0, src
2272 }
2274 void Assembler::prefetcht0(Address src) {
2275 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2276 InstructionMark im(this);
2277 prefetch_prefix(src);
2278 emit_byte(0x18);
2279 emit_operand(rcx, src); // 1, src
2280 }
2282 void Assembler::prefetcht1(Address src) {
2283 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2284 InstructionMark im(this);
2285 prefetch_prefix(src);
2286 emit_byte(0x18);
2287 emit_operand(rdx, src); // 2, src
2288 }
2290 void Assembler::prefetcht2(Address src) {
2291 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2292 InstructionMark im(this);
2293 prefetch_prefix(src);
2294 emit_byte(0x18);
2295 emit_operand(rbx, src); // 3, src
2296 }
2298 void Assembler::prefetchw(Address src) {
2299 assert(VM_Version::supports_3dnow_prefetch(), "must support");
2300 InstructionMark im(this);
2301 prefetch_prefix(src);
2302 emit_byte(0x0D);
2303 emit_operand(rcx, src); // 1, src
2304 }
2306 void Assembler::prefix(Prefix p) {
2307 a_byte(p);
2308 }
2310 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
2311 assert(isByte(mode), "invalid value");
2312 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2313 emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_66);
2314 emit_byte(mode & 0xFF);
2316 }
2318 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
2319 assert(isByte(mode), "invalid value");
2320 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2321 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2322 InstructionMark im(this);
2323 simd_prefix(dst, src, VEX_SIMD_66);
2324 emit_byte(0x70);
2325 emit_operand(dst, src);
2326 emit_byte(mode & 0xFF);
2327 }
2329 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
2330 assert(isByte(mode), "invalid value");
2331 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2332 emit_simd_arith_nonds(0x70, dst, src, VEX_SIMD_F2);
2333 emit_byte(mode & 0xFF);
2334 }
2336 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
2337 assert(isByte(mode), "invalid value");
2338 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2339 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2340 InstructionMark im(this);
2341 simd_prefix(dst, src, VEX_SIMD_F2);
2342 emit_byte(0x70);
2343 emit_operand(dst, src);
2344 emit_byte(mode & 0xFF);
2345 }
2347 void Assembler::psrldq(XMMRegister dst, int shift) {
2348 // Shift 128 bit value in xmm register by number of bytes.
2349 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2350 int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66);
2351 emit_byte(0x73);
2352 emit_byte(0xC0 | encode);
2353 emit_byte(shift);
2354 }
2356 void Assembler::ptest(XMMRegister dst, Address src) {
2357 assert(VM_Version::supports_sse4_1(), "");
2358 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2359 InstructionMark im(this);
2360 simd_prefix(dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2361 emit_byte(0x17);
2362 emit_operand(dst, src);
2363 }
2365 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
2366 assert(VM_Version::supports_sse4_1(), "");
2367 int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
2368 emit_byte(0x17);
2369 emit_byte(0xC0 | encode);
2370 }
2372 void Assembler::punpcklbw(XMMRegister dst, Address src) {
2373 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2374 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2375 emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
2376 }
2378 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
2379 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2380 emit_simd_arith(0x60, dst, src, VEX_SIMD_66);
2381 }
2383 void Assembler::punpckldq(XMMRegister dst, Address src) {
2384 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2385 assert((UseAVX > 0), "SSE mode requires address alignment 16 bytes");
2386 emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
2387 }
2389 void Assembler::punpckldq(XMMRegister dst, XMMRegister src) {
2390 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2391 emit_simd_arith(0x62, dst, src, VEX_SIMD_66);
2392 }
2394 void Assembler::punpcklqdq(XMMRegister dst, XMMRegister src) {
2395 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2396 emit_simd_arith(0x6C, dst, src, VEX_SIMD_66);
2397 }
2399 void Assembler::push(int32_t imm32) {
2400 // in 64bits we push 64bits onto the stack but only
2401 // take a 32bit immediate
2402 emit_byte(0x68);
2403 emit_long(imm32);
2404 }
2406 void Assembler::push(Register src) {
2407 int encode = prefix_and_encode(src->encoding());
2409 emit_byte(0x50 | encode);
2410 }
2412 void Assembler::pushf() {
2413 emit_byte(0x9C);
2414 }
2416 #ifndef _LP64 // no 32bit push/pop on amd64
2417 void Assembler::pushl(Address src) {
2418 // Note this will push 64bit on 64bit
2419 InstructionMark im(this);
2420 prefix(src);
2421 emit_byte(0xFF);
2422 emit_operand(rsi, src);
2423 }
2424 #endif
2426 void Assembler::rcll(Register dst, int imm8) {
2427 assert(isShiftCount(imm8), "illegal shift count");
2428 int encode = prefix_and_encode(dst->encoding());
2429 if (imm8 == 1) {
2430 emit_byte(0xD1);
2431 emit_byte(0xD0 | encode);
2432 } else {
2433 emit_byte(0xC1);
2434 emit_byte(0xD0 | encode);
2435 emit_byte(imm8);
2436 }
2437 }
2439 // copies data from [esi] to [edi] using rcx pointer sized words
2440 // generic
2441 void Assembler::rep_mov() {
2442 emit_byte(0xF3);
2443 // MOVSQ
2444 LP64_ONLY(prefix(REX_W));
2445 emit_byte(0xA5);
2446 }
2448 // sets rcx pointer sized words with rax, value at [edi]
2449 // generic
2450 void Assembler::rep_set() { // rep_set
2451 emit_byte(0xF3);
2452 // STOSQ
2453 LP64_ONLY(prefix(REX_W));
2454 emit_byte(0xAB);
2455 }
2457 // scans rcx pointer sized words at [edi] for occurance of rax,
2458 // generic
2459 void Assembler::repne_scan() { // repne_scan
2460 emit_byte(0xF2);
2461 // SCASQ
2462 LP64_ONLY(prefix(REX_W));
2463 emit_byte(0xAF);
2464 }
2466 #ifdef _LP64
2467 // scans rcx 4 byte words at [edi] for occurance of rax,
2468 // generic
2469 void Assembler::repne_scanl() { // repne_scan
2470 emit_byte(0xF2);
2471 // SCASL
2472 emit_byte(0xAF);
2473 }
2474 #endif
2476 void Assembler::ret(int imm16) {
2477 if (imm16 == 0) {
2478 emit_byte(0xC3);
2479 } else {
2480 emit_byte(0xC2);
2481 emit_word(imm16);
2482 }
2483 }
2485 void Assembler::sahf() {
2486 #ifdef _LP64
2487 // Not supported in 64bit mode
2488 ShouldNotReachHere();
2489 #endif
2490 emit_byte(0x9E);
2491 }
2493 void Assembler::sarl(Register dst, int imm8) {
2494 int encode = prefix_and_encode(dst->encoding());
2495 assert(isShiftCount(imm8), "illegal shift count");
2496 if (imm8 == 1) {
2497 emit_byte(0xD1);
2498 emit_byte(0xF8 | encode);
2499 } else {
2500 emit_byte(0xC1);
2501 emit_byte(0xF8 | encode);
2502 emit_byte(imm8);
2503 }
2504 }
2506 void Assembler::sarl(Register dst) {
2507 int encode = prefix_and_encode(dst->encoding());
2508 emit_byte(0xD3);
2509 emit_byte(0xF8 | encode);
2510 }
2512 void Assembler::sbbl(Address dst, int32_t imm32) {
2513 InstructionMark im(this);
2514 prefix(dst);
2515 emit_arith_operand(0x81, rbx, dst, imm32);
2516 }
2518 void Assembler::sbbl(Register dst, int32_t imm32) {
2519 prefix(dst);
2520 emit_arith(0x81, 0xD8, dst, imm32);
2521 }
2524 void Assembler::sbbl(Register dst, Address src) {
2525 InstructionMark im(this);
2526 prefix(src, dst);
2527 emit_byte(0x1B);
2528 emit_operand(dst, src);
2529 }
2531 void Assembler::sbbl(Register dst, Register src) {
2532 (void) prefix_and_encode(dst->encoding(), src->encoding());
2533 emit_arith(0x1B, 0xC0, dst, src);
2534 }
2536 void Assembler::setb(Condition cc, Register dst) {
2537 assert(0 <= cc && cc < 16, "illegal cc");
2538 int encode = prefix_and_encode(dst->encoding(), true);
2539 emit_byte(0x0F);
2540 emit_byte(0x90 | cc);
2541 emit_byte(0xC0 | encode);
2542 }
2544 void Assembler::shll(Register dst, int imm8) {
2545 assert(isShiftCount(imm8), "illegal shift count");
2546 int encode = prefix_and_encode(dst->encoding());
2547 if (imm8 == 1 ) {
2548 emit_byte(0xD1);
2549 emit_byte(0xE0 | encode);
2550 } else {
2551 emit_byte(0xC1);
2552 emit_byte(0xE0 | encode);
2553 emit_byte(imm8);
2554 }
2555 }
2557 void Assembler::shll(Register dst) {
2558 int encode = prefix_and_encode(dst->encoding());
2559 emit_byte(0xD3);
2560 emit_byte(0xE0 | encode);
2561 }
2563 void Assembler::shrl(Register dst, int imm8) {
2564 assert(isShiftCount(imm8), "illegal shift count");
2565 int encode = prefix_and_encode(dst->encoding());
2566 emit_byte(0xC1);
2567 emit_byte(0xE8 | encode);
2568 emit_byte(imm8);
2569 }
2571 void Assembler::shrl(Register dst) {
2572 int encode = prefix_and_encode(dst->encoding());
2573 emit_byte(0xD3);
2574 emit_byte(0xE8 | encode);
2575 }
2577 // copies a single word from [esi] to [edi]
2578 void Assembler::smovl() {
2579 emit_byte(0xA5);
2580 }
2582 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
2583 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2584 emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
2585 }
2587 void Assembler::sqrtsd(XMMRegister dst, Address src) {
2588 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2589 emit_simd_arith(0x51, dst, src, VEX_SIMD_F2);
2590 }
2592 void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
2593 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2594 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
2595 }
2597 void Assembler::sqrtss(XMMRegister dst, Address src) {
2598 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2599 emit_simd_arith(0x51, dst, src, VEX_SIMD_F3);
2600 }
2602 void Assembler::stmxcsr( Address dst) {
2603 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2604 InstructionMark im(this);
2605 prefix(dst);
2606 emit_byte(0x0F);
2607 emit_byte(0xAE);
2608 emit_operand(as_Register(3), dst);
2609 }
2611 void Assembler::subl(Address dst, int32_t imm32) {
2612 InstructionMark im(this);
2613 prefix(dst);
2614 emit_arith_operand(0x81, rbp, dst, imm32);
2615 }
2617 void Assembler::subl(Address dst, Register src) {
2618 InstructionMark im(this);
2619 prefix(dst, src);
2620 emit_byte(0x29);
2621 emit_operand(src, dst);
2622 }
2624 void Assembler::subl(Register dst, int32_t imm32) {
2625 prefix(dst);
2626 emit_arith(0x81, 0xE8, dst, imm32);
2627 }
2629 // Force generation of a 4 byte immediate value even if it fits into 8bit
2630 void Assembler::subl_imm32(Register dst, int32_t imm32) {
2631 prefix(dst);
2632 emit_arith_imm32(0x81, 0xE8, dst, imm32);
2633 }
2635 void Assembler::subl(Register dst, Address src) {
2636 InstructionMark im(this);
2637 prefix(src, dst);
2638 emit_byte(0x2B);
2639 emit_operand(dst, src);
2640 }
2642 void Assembler::subl(Register dst, Register src) {
2643 (void) prefix_and_encode(dst->encoding(), src->encoding());
2644 emit_arith(0x2B, 0xC0, dst, src);
2645 }
2647 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
2648 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2649 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
2650 }
2652 void Assembler::subsd(XMMRegister dst, Address src) {
2653 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2654 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F2);
2655 }
2657 void Assembler::subss(XMMRegister dst, XMMRegister src) {
2658 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2659 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
2660 }
2662 void Assembler::subss(XMMRegister dst, Address src) {
2663 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2664 emit_simd_arith(0x5C, dst, src, VEX_SIMD_F3);
2665 }
2667 void Assembler::testb(Register dst, int imm8) {
2668 NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
2669 (void) prefix_and_encode(dst->encoding(), true);
2670 emit_arith_b(0xF6, 0xC0, dst, imm8);
2671 }
2673 void Assembler::testl(Register dst, int32_t imm32) {
2674 // not using emit_arith because test
2675 // doesn't support sign-extension of
2676 // 8bit operands
2677 int encode = dst->encoding();
2678 if (encode == 0) {
2679 emit_byte(0xA9);
2680 } else {
2681 encode = prefix_and_encode(encode);
2682 emit_byte(0xF7);
2683 emit_byte(0xC0 | encode);
2684 }
2685 emit_long(imm32);
2686 }
2688 void Assembler::testl(Register dst, Register src) {
2689 (void) prefix_and_encode(dst->encoding(), src->encoding());
2690 emit_arith(0x85, 0xC0, dst, src);
2691 }
2693 void Assembler::testl(Register dst, Address src) {
2694 InstructionMark im(this);
2695 prefix(src, dst);
2696 emit_byte(0x85);
2697 emit_operand(dst, src);
2698 }
2700 void Assembler::ucomisd(XMMRegister dst, Address src) {
2701 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2702 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
2703 }
2705 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
2706 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2707 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_66);
2708 }
2710 void Assembler::ucomiss(XMMRegister dst, Address src) {
2711 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2712 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
2713 }
2715 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
2716 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2717 emit_simd_arith_nonds(0x2E, dst, src, VEX_SIMD_NONE);
2718 }
2721 void Assembler::xaddl(Address dst, Register src) {
2722 InstructionMark im(this);
2723 prefix(dst, src);
2724 emit_byte(0x0F);
2725 emit_byte(0xC1);
2726 emit_operand(src, dst);
2727 }
2729 void Assembler::xchgl(Register dst, Address src) { // xchg
2730 InstructionMark im(this);
2731 prefix(src, dst);
2732 emit_byte(0x87);
2733 emit_operand(dst, src);
2734 }
2736 void Assembler::xchgl(Register dst, Register src) {
2737 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2738 emit_byte(0x87);
2739 emit_byte(0xc0 | encode);
2740 }
2742 void Assembler::xorl(Register dst, int32_t imm32) {
2743 prefix(dst);
2744 emit_arith(0x81, 0xF0, dst, imm32);
2745 }
2747 void Assembler::xorl(Register dst, Address src) {
2748 InstructionMark im(this);
2749 prefix(src, dst);
2750 emit_byte(0x33);
2751 emit_operand(dst, src);
2752 }
2754 void Assembler::xorl(Register dst, Register src) {
2755 (void) prefix_and_encode(dst->encoding(), src->encoding());
2756 emit_arith(0x33, 0xC0, dst, src);
2757 }
2760 // AVX 3-operands scalar float-point arithmetic instructions
2762 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, Address src) {
2763 assert(VM_Version::supports_avx(), "");
2764 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2765 }
2767 void Assembler::vaddsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2768 assert(VM_Version::supports_avx(), "");
2769 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2770 }
2772 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, Address src) {
2773 assert(VM_Version::supports_avx(), "");
2774 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2775 }
2777 void Assembler::vaddss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2778 assert(VM_Version::supports_avx(), "");
2779 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2780 }
2782 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, Address src) {
2783 assert(VM_Version::supports_avx(), "");
2784 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2785 }
2787 void Assembler::vdivsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2788 assert(VM_Version::supports_avx(), "");
2789 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2790 }
2792 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, Address src) {
2793 assert(VM_Version::supports_avx(), "");
2794 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2795 }
2797 void Assembler::vdivss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2798 assert(VM_Version::supports_avx(), "");
2799 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2800 }
2802 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, Address src) {
2803 assert(VM_Version::supports_avx(), "");
2804 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2805 }
2807 void Assembler::vmulsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2808 assert(VM_Version::supports_avx(), "");
2809 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2810 }
2812 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, Address src) {
2813 assert(VM_Version::supports_avx(), "");
2814 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2815 }
2817 void Assembler::vmulss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2818 assert(VM_Version::supports_avx(), "");
2819 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2820 }
2822 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, Address src) {
2823 assert(VM_Version::supports_avx(), "");
2824 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2825 }
2827 void Assembler::vsubsd(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2828 assert(VM_Version::supports_avx(), "");
2829 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F2, /* vector256 */ false);
2830 }
2832 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, Address src) {
2833 assert(VM_Version::supports_avx(), "");
2834 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2835 }
2837 void Assembler::vsubss(XMMRegister dst, XMMRegister nds, XMMRegister src) {
2838 assert(VM_Version::supports_avx(), "");
2839 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_F3, /* vector256 */ false);
2840 }
2842 //====================VECTOR ARITHMETIC=====================================
2844 // Float-point vector arithmetic
2846 void Assembler::addpd(XMMRegister dst, XMMRegister src) {
2847 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2848 emit_simd_arith(0x58, dst, src, VEX_SIMD_66);
2849 }
2851 void Assembler::addps(XMMRegister dst, XMMRegister src) {
2852 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2853 emit_simd_arith(0x58, dst, src, VEX_SIMD_NONE);
2854 }
2856 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2857 assert(VM_Version::supports_avx(), "");
2858 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
2859 }
2861 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2862 assert(VM_Version::supports_avx(), "");
2863 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
2864 }
2866 void Assembler::vaddpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2867 assert(VM_Version::supports_avx(), "");
2868 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_66, vector256);
2869 }
2871 void Assembler::vaddps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2872 assert(VM_Version::supports_avx(), "");
2873 emit_vex_arith(0x58, dst, nds, src, VEX_SIMD_NONE, vector256);
2874 }
2876 void Assembler::subpd(XMMRegister dst, XMMRegister src) {
2877 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2878 emit_simd_arith(0x5C, dst, src, VEX_SIMD_66);
2879 }
2881 void Assembler::subps(XMMRegister dst, XMMRegister src) {
2882 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2883 emit_simd_arith(0x5C, dst, src, VEX_SIMD_NONE);
2884 }
2886 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2887 assert(VM_Version::supports_avx(), "");
2888 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
2889 }
2891 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2892 assert(VM_Version::supports_avx(), "");
2893 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
2894 }
2896 void Assembler::vsubpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2897 assert(VM_Version::supports_avx(), "");
2898 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_66, vector256);
2899 }
2901 void Assembler::vsubps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2902 assert(VM_Version::supports_avx(), "");
2903 emit_vex_arith(0x5C, dst, nds, src, VEX_SIMD_NONE, vector256);
2904 }
2906 void Assembler::mulpd(XMMRegister dst, XMMRegister src) {
2907 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2908 emit_simd_arith(0x59, dst, src, VEX_SIMD_66);
2909 }
2911 void Assembler::mulps(XMMRegister dst, XMMRegister src) {
2912 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2913 emit_simd_arith(0x59, dst, src, VEX_SIMD_NONE);
2914 }
2916 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2917 assert(VM_Version::supports_avx(), "");
2918 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
2919 }
2921 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2922 assert(VM_Version::supports_avx(), "");
2923 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
2924 }
2926 void Assembler::vmulpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2927 assert(VM_Version::supports_avx(), "");
2928 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_66, vector256);
2929 }
2931 void Assembler::vmulps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2932 assert(VM_Version::supports_avx(), "");
2933 emit_vex_arith(0x59, dst, nds, src, VEX_SIMD_NONE, vector256);
2934 }
2936 void Assembler::divpd(XMMRegister dst, XMMRegister src) {
2937 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2938 emit_simd_arith(0x5E, dst, src, VEX_SIMD_66);
2939 }
2941 void Assembler::divps(XMMRegister dst, XMMRegister src) {
2942 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2943 emit_simd_arith(0x5E, dst, src, VEX_SIMD_NONE);
2944 }
2946 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2947 assert(VM_Version::supports_avx(), "");
2948 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
2949 }
2951 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2952 assert(VM_Version::supports_avx(), "");
2953 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
2954 }
2956 void Assembler::vdivpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2957 assert(VM_Version::supports_avx(), "");
2958 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_66, vector256);
2959 }
2961 void Assembler::vdivps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2962 assert(VM_Version::supports_avx(), "");
2963 emit_vex_arith(0x5E, dst, nds, src, VEX_SIMD_NONE, vector256);
2964 }
2966 void Assembler::andpd(XMMRegister dst, XMMRegister src) {
2967 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2968 emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
2969 }
2971 void Assembler::andps(XMMRegister dst, XMMRegister src) {
2972 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2973 emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
2974 }
2976 void Assembler::andps(XMMRegister dst, Address src) {
2977 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2978 emit_simd_arith(0x54, dst, src, VEX_SIMD_NONE);
2979 }
2981 void Assembler::andpd(XMMRegister dst, Address src) {
2982 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2983 emit_simd_arith(0x54, dst, src, VEX_SIMD_66);
2984 }
2986 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2987 assert(VM_Version::supports_avx(), "");
2988 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
2989 }
2991 void Assembler::vandps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
2992 assert(VM_Version::supports_avx(), "");
2993 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
2994 }
2996 void Assembler::vandpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
2997 assert(VM_Version::supports_avx(), "");
2998 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_66, vector256);
2999 }
3001 void Assembler::vandps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3002 assert(VM_Version::supports_avx(), "");
3003 emit_vex_arith(0x54, dst, nds, src, VEX_SIMD_NONE, vector256);
3004 }
3006 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
3007 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3008 emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
3009 }
3011 void Assembler::xorps(XMMRegister dst, XMMRegister src) {
3012 NOT_LP64(assert(VM_Version::supports_sse(), ""));
3013 emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
3014 }
3016 void Assembler::xorpd(XMMRegister dst, Address src) {
3017 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3018 emit_simd_arith(0x57, dst, src, VEX_SIMD_66);
3019 }
3021 void Assembler::xorps(XMMRegister dst, Address src) {
3022 NOT_LP64(assert(VM_Version::supports_sse(), ""));
3023 emit_simd_arith(0x57, dst, src, VEX_SIMD_NONE);
3024 }
3026 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3027 assert(VM_Version::supports_avx(), "");
3028 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
3029 }
3031 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3032 assert(VM_Version::supports_avx(), "");
3033 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
3034 }
3036 void Assembler::vxorpd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3037 assert(VM_Version::supports_avx(), "");
3038 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_66, vector256);
3039 }
3041 void Assembler::vxorps(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3042 assert(VM_Version::supports_avx(), "");
3043 emit_vex_arith(0x57, dst, nds, src, VEX_SIMD_NONE, vector256);
3044 }
3047 // Integer vector arithmetic
3048 void Assembler::paddb(XMMRegister dst, XMMRegister src) {
3049 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3050 emit_simd_arith(0xFC, dst, src, VEX_SIMD_66);
3051 }
3053 void Assembler::paddw(XMMRegister dst, XMMRegister src) {
3054 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3055 emit_simd_arith(0xFD, dst, src, VEX_SIMD_66);
3056 }
3058 void Assembler::paddd(XMMRegister dst, XMMRegister src) {
3059 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3060 emit_simd_arith(0xFE, dst, src, VEX_SIMD_66);
3061 }
3063 void Assembler::paddq(XMMRegister dst, XMMRegister src) {
3064 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3065 emit_simd_arith(0xD4, dst, src, VEX_SIMD_66);
3066 }
3068 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3069 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3070 emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
3071 }
3073 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3074 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3075 emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
3076 }
3078 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3079 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3080 emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
3081 }
3083 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3084 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3085 emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
3086 }
3088 void Assembler::vpaddb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3089 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3090 emit_vex_arith(0xFC, dst, nds, src, VEX_SIMD_66, vector256);
3091 }
3093 void Assembler::vpaddw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3094 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3095 emit_vex_arith(0xFD, dst, nds, src, VEX_SIMD_66, vector256);
3096 }
3098 void Assembler::vpaddd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3099 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3100 emit_vex_arith(0xFE, dst, nds, src, VEX_SIMD_66, vector256);
3101 }
3103 void Assembler::vpaddq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3104 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3105 emit_vex_arith(0xD4, dst, nds, src, VEX_SIMD_66, vector256);
3106 }
3108 void Assembler::psubb(XMMRegister dst, XMMRegister src) {
3109 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3110 emit_simd_arith(0xF8, dst, src, VEX_SIMD_66);
3111 }
3113 void Assembler::psubw(XMMRegister dst, XMMRegister src) {
3114 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3115 emit_simd_arith(0xF9, dst, src, VEX_SIMD_66);
3116 }
3118 void Assembler::psubd(XMMRegister dst, XMMRegister src) {
3119 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3120 emit_simd_arith(0xFA, dst, src, VEX_SIMD_66);
3121 }
3123 void Assembler::psubq(XMMRegister dst, XMMRegister src) {
3124 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3125 emit_simd_arith(0xFB, dst, src, VEX_SIMD_66);
3126 }
3128 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3129 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3130 emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
3131 }
3133 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3134 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3135 emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
3136 }
3138 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3139 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3140 emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
3141 }
3143 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3144 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3145 emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
3146 }
3148 void Assembler::vpsubb(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3149 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3150 emit_vex_arith(0xF8, dst, nds, src, VEX_SIMD_66, vector256);
3151 }
3153 void Assembler::vpsubw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3154 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3155 emit_vex_arith(0xF9, dst, nds, src, VEX_SIMD_66, vector256);
3156 }
3158 void Assembler::vpsubd(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3159 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3160 emit_vex_arith(0xFA, dst, nds, src, VEX_SIMD_66, vector256);
3161 }
3163 void Assembler::vpsubq(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3164 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3165 emit_vex_arith(0xFB, dst, nds, src, VEX_SIMD_66, vector256);
3166 }
3168 void Assembler::pmullw(XMMRegister dst, XMMRegister src) {
3169 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3170 emit_simd_arith(0xD5, dst, src, VEX_SIMD_66);
3171 }
3173 void Assembler::pmulld(XMMRegister dst, XMMRegister src) {
3174 assert(VM_Version::supports_sse4_1(), "");
3175 int encode = simd_prefix_and_encode(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F_38);
3176 emit_byte(0x40);
3177 emit_byte(0xC0 | encode);
3178 }
3180 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3181 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3182 emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
3183 }
3185 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3186 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3187 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_38);
3188 emit_byte(0x40);
3189 emit_byte(0xC0 | encode);
3190 }
3192 void Assembler::vpmullw(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3193 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3194 emit_vex_arith(0xD5, dst, nds, src, VEX_SIMD_66, vector256);
3195 }
3197 void Assembler::vpmulld(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3198 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3199 InstructionMark im(this);
3200 int dst_enc = dst->encoding();
3201 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
3202 vex_prefix(src, nds_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_38, false, vector256);
3203 emit_byte(0x40);
3204 emit_operand(dst, src);
3205 }
3207 // Shift packed integers left by specified number of bits.
3208 void Assembler::psllw(XMMRegister dst, int shift) {
3209 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3210 // XMM6 is for /6 encoding: 66 0F 71 /6 ib
3211 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
3212 emit_byte(0x71);
3213 emit_byte(0xC0 | encode);
3214 emit_byte(shift & 0xFF);
3215 }
3217 void Assembler::pslld(XMMRegister dst, int shift) {
3218 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3219 // XMM6 is for /6 encoding: 66 0F 72 /6 ib
3220 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
3221 emit_byte(0x72);
3222 emit_byte(0xC0 | encode);
3223 emit_byte(shift & 0xFF);
3224 }
3226 void Assembler::psllq(XMMRegister dst, int shift) {
3227 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3228 // XMM6 is for /6 encoding: 66 0F 73 /6 ib
3229 int encode = simd_prefix_and_encode(xmm6, dst, dst, VEX_SIMD_66);
3230 emit_byte(0x73);
3231 emit_byte(0xC0 | encode);
3232 emit_byte(shift & 0xFF);
3233 }
3235 void Assembler::psllw(XMMRegister dst, XMMRegister shift) {
3236 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3237 emit_simd_arith(0xF1, dst, shift, VEX_SIMD_66);
3238 }
3240 void Assembler::pslld(XMMRegister dst, XMMRegister shift) {
3241 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3242 emit_simd_arith(0xF2, dst, shift, VEX_SIMD_66);
3243 }
3245 void Assembler::psllq(XMMRegister dst, XMMRegister shift) {
3246 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3247 emit_simd_arith(0xF3, dst, shift, VEX_SIMD_66);
3248 }
3250 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3251 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3252 // XMM6 is for /6 encoding: 66 0F 71 /6 ib
3253 emit_vex_arith(0x71, xmm6, dst, src, VEX_SIMD_66, vector256);
3254 emit_byte(shift & 0xFF);
3255 }
3257 void Assembler::vpslld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3258 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3259 // XMM6 is for /6 encoding: 66 0F 72 /6 ib
3260 emit_vex_arith(0x72, xmm6, dst, src, VEX_SIMD_66, vector256);
3261 emit_byte(shift & 0xFF);
3262 }
3264 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3265 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3266 // XMM6 is for /6 encoding: 66 0F 73 /6 ib
3267 emit_vex_arith(0x73, xmm6, dst, src, VEX_SIMD_66, vector256);
3268 emit_byte(shift & 0xFF);
3269 }
3271 void Assembler::vpsllw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3272 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3273 emit_vex_arith(0xF1, dst, src, shift, VEX_SIMD_66, vector256);
3274 }
3276 void Assembler::vpslld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3277 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3278 emit_vex_arith(0xF2, dst, src, shift, VEX_SIMD_66, vector256);
3279 }
3281 void Assembler::vpsllq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3282 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3283 emit_vex_arith(0xF3, dst, src, shift, VEX_SIMD_66, vector256);
3284 }
3286 // Shift packed integers logically right by specified number of bits.
3287 void Assembler::psrlw(XMMRegister dst, int shift) {
3288 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3289 // XMM2 is for /2 encoding: 66 0F 71 /2 ib
3290 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
3291 emit_byte(0x71);
3292 emit_byte(0xC0 | encode);
3293 emit_byte(shift & 0xFF);
3294 }
3296 void Assembler::psrld(XMMRegister dst, int shift) {
3297 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3298 // XMM2 is for /2 encoding: 66 0F 72 /2 ib
3299 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
3300 emit_byte(0x72);
3301 emit_byte(0xC0 | encode);
3302 emit_byte(shift & 0xFF);
3303 }
3305 void Assembler::psrlq(XMMRegister dst, int shift) {
3306 // Do not confuse it with psrldq SSE2 instruction which
3307 // shifts 128 bit value in xmm register by number of bytes.
3308 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3309 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
3310 int encode = simd_prefix_and_encode(xmm2, dst, dst, VEX_SIMD_66);
3311 emit_byte(0x73);
3312 emit_byte(0xC0 | encode);
3313 emit_byte(shift & 0xFF);
3314 }
3316 void Assembler::psrlw(XMMRegister dst, XMMRegister shift) {
3317 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3318 emit_simd_arith(0xD1, dst, shift, VEX_SIMD_66);
3319 }
3321 void Assembler::psrld(XMMRegister dst, XMMRegister shift) {
3322 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3323 emit_simd_arith(0xD2, dst, shift, VEX_SIMD_66);
3324 }
3326 void Assembler::psrlq(XMMRegister dst, XMMRegister shift) {
3327 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3328 emit_simd_arith(0xD3, dst, shift, VEX_SIMD_66);
3329 }
3331 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3332 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3333 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
3334 emit_vex_arith(0x71, xmm2, dst, src, VEX_SIMD_66, vector256);
3335 emit_byte(shift & 0xFF);
3336 }
3338 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3339 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3340 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
3341 emit_vex_arith(0x72, xmm2, dst, src, VEX_SIMD_66, vector256);
3342 emit_byte(shift & 0xFF);
3343 }
3345 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3346 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3347 // XMM2 is for /2 encoding: 66 0F 73 /2 ib
3348 emit_vex_arith(0x73, xmm2, dst, src, VEX_SIMD_66, vector256);
3349 emit_byte(shift & 0xFF);
3350 }
3352 void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3353 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3354 emit_vex_arith(0xD1, dst, src, shift, VEX_SIMD_66, vector256);
3355 }
3357 void Assembler::vpsrld(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3358 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3359 emit_vex_arith(0xD2, dst, src, shift, VEX_SIMD_66, vector256);
3360 }
3362 void Assembler::vpsrlq(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3363 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3364 emit_vex_arith(0xD3, dst, src, shift, VEX_SIMD_66, vector256);
3365 }
3367 // Shift packed integers arithmetically right by specified number of bits.
3368 void Assembler::psraw(XMMRegister dst, int shift) {
3369 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3370 // XMM4 is for /4 encoding: 66 0F 71 /4 ib
3371 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
3372 emit_byte(0x71);
3373 emit_byte(0xC0 | encode);
3374 emit_byte(shift & 0xFF);
3375 }
3377 void Assembler::psrad(XMMRegister dst, int shift) {
3378 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3379 // XMM4 is for /4 encoding: 66 0F 72 /4 ib
3380 int encode = simd_prefix_and_encode(xmm4, dst, dst, VEX_SIMD_66);
3381 emit_byte(0x72);
3382 emit_byte(0xC0 | encode);
3383 emit_byte(shift & 0xFF);
3384 }
3386 void Assembler::psraw(XMMRegister dst, XMMRegister shift) {
3387 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3388 emit_simd_arith(0xE1, dst, shift, VEX_SIMD_66);
3389 }
3391 void Assembler::psrad(XMMRegister dst, XMMRegister shift) {
3392 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3393 emit_simd_arith(0xE2, dst, shift, VEX_SIMD_66);
3394 }
3396 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3397 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3398 // XMM4 is for /4 encoding: 66 0F 71 /4 ib
3399 emit_vex_arith(0x71, xmm4, dst, src, VEX_SIMD_66, vector256);
3400 emit_byte(shift & 0xFF);
3401 }
3403 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int shift, bool vector256) {
3404 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3405 // XMM4 is for /4 encoding: 66 0F 71 /4 ib
3406 emit_vex_arith(0x72, xmm4, dst, src, VEX_SIMD_66, vector256);
3407 emit_byte(shift & 0xFF);
3408 }
3410 void Assembler::vpsraw(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3411 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3412 emit_vex_arith(0xE1, dst, src, shift, VEX_SIMD_66, vector256);
3413 }
3415 void Assembler::vpsrad(XMMRegister dst, XMMRegister src, XMMRegister shift, bool vector256) {
3416 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3417 emit_vex_arith(0xE2, dst, src, shift, VEX_SIMD_66, vector256);
3418 }
3421 // AND packed integers
3422 void Assembler::pand(XMMRegister dst, XMMRegister src) {
3423 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3424 emit_simd_arith(0xDB, dst, src, VEX_SIMD_66);
3425 }
3427 void Assembler::vpand(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3428 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3429 emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
3430 }
3432 void Assembler::vpand(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3433 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3434 emit_vex_arith(0xDB, dst, nds, src, VEX_SIMD_66, vector256);
3435 }
3437 void Assembler::por(XMMRegister dst, XMMRegister src) {
3438 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3439 emit_simd_arith(0xEB, dst, src, VEX_SIMD_66);
3440 }
3442 void Assembler::vpor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3443 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3444 emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
3445 }
3447 void Assembler::vpor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3448 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3449 emit_vex_arith(0xEB, dst, nds, src, VEX_SIMD_66, vector256);
3450 }
3452 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
3453 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3454 emit_simd_arith(0xEF, dst, src, VEX_SIMD_66);
3455 }
3457 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, XMMRegister src, bool vector256) {
3458 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3459 emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
3460 }
3462 void Assembler::vpxor(XMMRegister dst, XMMRegister nds, Address src, bool vector256) {
3463 assert(VM_Version::supports_avx() && !vector256 || VM_Version::supports_avx2(), "256 bit integer vectors requires AVX2");
3464 emit_vex_arith(0xEF, dst, nds, src, VEX_SIMD_66, vector256);
3465 }
3468 void Assembler::vinsertf128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
3469 assert(VM_Version::supports_avx(), "");
3470 bool vector256 = true;
3471 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
3472 emit_byte(0x18);
3473 emit_byte(0xC0 | encode);
3474 // 0x00 - insert into lower 128 bits
3475 // 0x01 - insert into upper 128 bits
3476 emit_byte(0x01);
3477 }
3479 void Assembler::vinsertf128h(XMMRegister dst, Address src) {
3480 assert(VM_Version::supports_avx(), "");
3481 InstructionMark im(this);
3482 bool vector256 = true;
3483 assert(dst != xnoreg, "sanity");
3484 int dst_enc = dst->encoding();
3485 // swap src<->dst for encoding
3486 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3487 emit_byte(0x18);
3488 emit_operand(dst, src);
3489 // 0x01 - insert into upper 128 bits
3490 emit_byte(0x01);
3491 }
3493 void Assembler::vextractf128h(Address dst, XMMRegister src) {
3494 assert(VM_Version::supports_avx(), "");
3495 InstructionMark im(this);
3496 bool vector256 = true;
3497 assert(src != xnoreg, "sanity");
3498 int src_enc = src->encoding();
3499 vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3500 emit_byte(0x19);
3501 emit_operand(src, dst);
3502 // 0x01 - extract from upper 128 bits
3503 emit_byte(0x01);
3504 }
3506 void Assembler::vinserti128h(XMMRegister dst, XMMRegister nds, XMMRegister src) {
3507 assert(VM_Version::supports_avx2(), "");
3508 bool vector256 = true;
3509 int encode = vex_prefix_and_encode(dst, nds, src, VEX_SIMD_66, vector256, VEX_OPCODE_0F_3A);
3510 emit_byte(0x38);
3511 emit_byte(0xC0 | encode);
3512 // 0x00 - insert into lower 128 bits
3513 // 0x01 - insert into upper 128 bits
3514 emit_byte(0x01);
3515 }
3517 void Assembler::vinserti128h(XMMRegister dst, Address src) {
3518 assert(VM_Version::supports_avx2(), "");
3519 InstructionMark im(this);
3520 bool vector256 = true;
3521 assert(dst != xnoreg, "sanity");
3522 int dst_enc = dst->encoding();
3523 // swap src<->dst for encoding
3524 vex_prefix(src, dst_enc, dst_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3525 emit_byte(0x38);
3526 emit_operand(dst, src);
3527 // 0x01 - insert into upper 128 bits
3528 emit_byte(0x01);
3529 }
3531 void Assembler::vextracti128h(Address dst, XMMRegister src) {
3532 assert(VM_Version::supports_avx2(), "");
3533 InstructionMark im(this);
3534 bool vector256 = true;
3535 assert(src != xnoreg, "sanity");
3536 int src_enc = src->encoding();
3537 vex_prefix(dst, 0, src_enc, VEX_SIMD_66, VEX_OPCODE_0F_3A, false, vector256);
3538 emit_byte(0x39);
3539 emit_operand(src, dst);
3540 // 0x01 - extract from upper 128 bits
3541 emit_byte(0x01);
3542 }
3544 void Assembler::vzeroupper() {
3545 assert(VM_Version::supports_avx(), "");
3546 (void)vex_prefix_and_encode(xmm0, xmm0, xmm0, VEX_SIMD_NONE);
3547 emit_byte(0x77);
3548 }
3551 #ifndef _LP64
3552 // 32bit only pieces of the assembler
3554 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
3555 // NO PREFIX AS NEVER 64BIT
3556 InstructionMark im(this);
3557 emit_byte(0x81);
3558 emit_byte(0xF8 | src1->encoding());
3559 emit_data(imm32, rspec, 0);
3560 }
3562 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
3563 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
3564 InstructionMark im(this);
3565 emit_byte(0x81);
3566 emit_operand(rdi, src1);
3567 emit_data(imm32, rspec, 0);
3568 }
3570 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax,
3571 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
3572 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise.
3573 void Assembler::cmpxchg8(Address adr) {
3574 InstructionMark im(this);
3575 emit_byte(0x0F);
3576 emit_byte(0xc7);
3577 emit_operand(rcx, adr);
3578 }
3580 void Assembler::decl(Register dst) {
3581 // Don't use it directly. Use MacroAssembler::decrementl() instead.
3582 emit_byte(0x48 | dst->encoding());
3583 }
3585 #endif // _LP64
3587 // 64bit typically doesn't use the x87 but needs to for the trig funcs
3589 void Assembler::fabs() {
3590 emit_byte(0xD9);
3591 emit_byte(0xE1);
3592 }
3594 void Assembler::fadd(int i) {
3595 emit_farith(0xD8, 0xC0, i);
3596 }
3598 void Assembler::fadd_d(Address src) {
3599 InstructionMark im(this);
3600 emit_byte(0xDC);
3601 emit_operand32(rax, src);
3602 }
3604 void Assembler::fadd_s(Address src) {
3605 InstructionMark im(this);
3606 emit_byte(0xD8);
3607 emit_operand32(rax, src);
3608 }
3610 void Assembler::fadda(int i) {
3611 emit_farith(0xDC, 0xC0, i);
3612 }
3614 void Assembler::faddp(int i) {
3615 emit_farith(0xDE, 0xC0, i);
3616 }
3618 void Assembler::fchs() {
3619 emit_byte(0xD9);
3620 emit_byte(0xE0);
3621 }
3623 void Assembler::fcom(int i) {
3624 emit_farith(0xD8, 0xD0, i);
3625 }
3627 void Assembler::fcomp(int i) {
3628 emit_farith(0xD8, 0xD8, i);
3629 }
3631 void Assembler::fcomp_d(Address src) {
3632 InstructionMark im(this);
3633 emit_byte(0xDC);
3634 emit_operand32(rbx, src);
3635 }
3637 void Assembler::fcomp_s(Address src) {
3638 InstructionMark im(this);
3639 emit_byte(0xD8);
3640 emit_operand32(rbx, src);
3641 }
3643 void Assembler::fcompp() {
3644 emit_byte(0xDE);
3645 emit_byte(0xD9);
3646 }
3648 void Assembler::fcos() {
3649 emit_byte(0xD9);
3650 emit_byte(0xFF);
3651 }
3653 void Assembler::fdecstp() {
3654 emit_byte(0xD9);
3655 emit_byte(0xF6);
3656 }
3658 void Assembler::fdiv(int i) {
3659 emit_farith(0xD8, 0xF0, i);
3660 }
3662 void Assembler::fdiv_d(Address src) {
3663 InstructionMark im(this);
3664 emit_byte(0xDC);
3665 emit_operand32(rsi, src);
3666 }
3668 void Assembler::fdiv_s(Address src) {
3669 InstructionMark im(this);
3670 emit_byte(0xD8);
3671 emit_operand32(rsi, src);
3672 }
3674 void Assembler::fdiva(int i) {
3675 emit_farith(0xDC, 0xF8, i);
3676 }
3678 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
3679 // is erroneous for some of the floating-point instructions below.
3681 void Assembler::fdivp(int i) {
3682 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
3683 }
3685 void Assembler::fdivr(int i) {
3686 emit_farith(0xD8, 0xF8, i);
3687 }
3689 void Assembler::fdivr_d(Address src) {
3690 InstructionMark im(this);
3691 emit_byte(0xDC);
3692 emit_operand32(rdi, src);
3693 }
3695 void Assembler::fdivr_s(Address src) {
3696 InstructionMark im(this);
3697 emit_byte(0xD8);
3698 emit_operand32(rdi, src);
3699 }
3701 void Assembler::fdivra(int i) {
3702 emit_farith(0xDC, 0xF0, i);
3703 }
3705 void Assembler::fdivrp(int i) {
3706 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
3707 }
3709 void Assembler::ffree(int i) {
3710 emit_farith(0xDD, 0xC0, i);
3711 }
3713 void Assembler::fild_d(Address adr) {
3714 InstructionMark im(this);
3715 emit_byte(0xDF);
3716 emit_operand32(rbp, adr);
3717 }
3719 void Assembler::fild_s(Address adr) {
3720 InstructionMark im(this);
3721 emit_byte(0xDB);
3722 emit_operand32(rax, adr);
3723 }
3725 void Assembler::fincstp() {
3726 emit_byte(0xD9);
3727 emit_byte(0xF7);
3728 }
3730 void Assembler::finit() {
3731 emit_byte(0x9B);
3732 emit_byte(0xDB);
3733 emit_byte(0xE3);
3734 }
3736 void Assembler::fist_s(Address adr) {
3737 InstructionMark im(this);
3738 emit_byte(0xDB);
3739 emit_operand32(rdx, adr);
3740 }
3742 void Assembler::fistp_d(Address adr) {
3743 InstructionMark im(this);
3744 emit_byte(0xDF);
3745 emit_operand32(rdi, adr);
3746 }
3748 void Assembler::fistp_s(Address adr) {
3749 InstructionMark im(this);
3750 emit_byte(0xDB);
3751 emit_operand32(rbx, adr);
3752 }
3754 void Assembler::fld1() {
3755 emit_byte(0xD9);
3756 emit_byte(0xE8);
3757 }
3759 void Assembler::fld_d(Address adr) {
3760 InstructionMark im(this);
3761 emit_byte(0xDD);
3762 emit_operand32(rax, adr);
3763 }
3765 void Assembler::fld_s(Address adr) {
3766 InstructionMark im(this);
3767 emit_byte(0xD9);
3768 emit_operand32(rax, adr);
3769 }
3772 void Assembler::fld_s(int index) {
3773 emit_farith(0xD9, 0xC0, index);
3774 }
3776 void Assembler::fld_x(Address adr) {
3777 InstructionMark im(this);
3778 emit_byte(0xDB);
3779 emit_operand32(rbp, adr);
3780 }
3782 void Assembler::fldcw(Address src) {
3783 InstructionMark im(this);
3784 emit_byte(0xd9);
3785 emit_operand32(rbp, src);
3786 }
3788 void Assembler::fldenv(Address src) {
3789 InstructionMark im(this);
3790 emit_byte(0xD9);
3791 emit_operand32(rsp, src);
3792 }
3794 void Assembler::fldlg2() {
3795 emit_byte(0xD9);
3796 emit_byte(0xEC);
3797 }
3799 void Assembler::fldln2() {
3800 emit_byte(0xD9);
3801 emit_byte(0xED);
3802 }
3804 void Assembler::fldz() {
3805 emit_byte(0xD9);
3806 emit_byte(0xEE);
3807 }
3809 void Assembler::flog() {
3810 fldln2();
3811 fxch();
3812 fyl2x();
3813 }
3815 void Assembler::flog10() {
3816 fldlg2();
3817 fxch();
3818 fyl2x();
3819 }
3821 void Assembler::fmul(int i) {
3822 emit_farith(0xD8, 0xC8, i);
3823 }
3825 void Assembler::fmul_d(Address src) {
3826 InstructionMark im(this);
3827 emit_byte(0xDC);
3828 emit_operand32(rcx, src);
3829 }
3831 void Assembler::fmul_s(Address src) {
3832 InstructionMark im(this);
3833 emit_byte(0xD8);
3834 emit_operand32(rcx, src);
3835 }
3837 void Assembler::fmula(int i) {
3838 emit_farith(0xDC, 0xC8, i);
3839 }
3841 void Assembler::fmulp(int i) {
3842 emit_farith(0xDE, 0xC8, i);
3843 }
3845 void Assembler::fnsave(Address dst) {
3846 InstructionMark im(this);
3847 emit_byte(0xDD);
3848 emit_operand32(rsi, dst);
3849 }
3851 void Assembler::fnstcw(Address src) {
3852 InstructionMark im(this);
3853 emit_byte(0x9B);
3854 emit_byte(0xD9);
3855 emit_operand32(rdi, src);
3856 }
3858 void Assembler::fnstsw_ax() {
3859 emit_byte(0xdF);
3860 emit_byte(0xE0);
3861 }
3863 void Assembler::fprem() {
3864 emit_byte(0xD9);
3865 emit_byte(0xF8);
3866 }
3868 void Assembler::fprem1() {
3869 emit_byte(0xD9);
3870 emit_byte(0xF5);
3871 }
3873 void Assembler::frstor(Address src) {
3874 InstructionMark im(this);
3875 emit_byte(0xDD);
3876 emit_operand32(rsp, src);
3877 }
3879 void Assembler::fsin() {
3880 emit_byte(0xD9);
3881 emit_byte(0xFE);
3882 }
3884 void Assembler::fsqrt() {
3885 emit_byte(0xD9);
3886 emit_byte(0xFA);
3887 }
3889 void Assembler::fst_d(Address adr) {
3890 InstructionMark im(this);
3891 emit_byte(0xDD);
3892 emit_operand32(rdx, adr);
3893 }
3895 void Assembler::fst_s(Address adr) {
3896 InstructionMark im(this);
3897 emit_byte(0xD9);
3898 emit_operand32(rdx, adr);
3899 }
3901 void Assembler::fstp_d(Address adr) {
3902 InstructionMark im(this);
3903 emit_byte(0xDD);
3904 emit_operand32(rbx, adr);
3905 }
3907 void Assembler::fstp_d(int index) {
3908 emit_farith(0xDD, 0xD8, index);
3909 }
3911 void Assembler::fstp_s(Address adr) {
3912 InstructionMark im(this);
3913 emit_byte(0xD9);
3914 emit_operand32(rbx, adr);
3915 }
3917 void Assembler::fstp_x(Address adr) {
3918 InstructionMark im(this);
3919 emit_byte(0xDB);
3920 emit_operand32(rdi, adr);
3921 }
3923 void Assembler::fsub(int i) {
3924 emit_farith(0xD8, 0xE0, i);
3925 }
3927 void Assembler::fsub_d(Address src) {
3928 InstructionMark im(this);
3929 emit_byte(0xDC);
3930 emit_operand32(rsp, src);
3931 }
3933 void Assembler::fsub_s(Address src) {
3934 InstructionMark im(this);
3935 emit_byte(0xD8);
3936 emit_operand32(rsp, src);
3937 }
3939 void Assembler::fsuba(int i) {
3940 emit_farith(0xDC, 0xE8, i);
3941 }
3943 void Assembler::fsubp(int i) {
3944 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
3945 }
3947 void Assembler::fsubr(int i) {
3948 emit_farith(0xD8, 0xE8, i);
3949 }
3951 void Assembler::fsubr_d(Address src) {
3952 InstructionMark im(this);
3953 emit_byte(0xDC);
3954 emit_operand32(rbp, src);
3955 }
3957 void Assembler::fsubr_s(Address src) {
3958 InstructionMark im(this);
3959 emit_byte(0xD8);
3960 emit_operand32(rbp, src);
3961 }
3963 void Assembler::fsubra(int i) {
3964 emit_farith(0xDC, 0xE0, i);
3965 }
3967 void Assembler::fsubrp(int i) {
3968 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
3969 }
3971 void Assembler::ftan() {
3972 emit_byte(0xD9);
3973 emit_byte(0xF2);
3974 emit_byte(0xDD);
3975 emit_byte(0xD8);
3976 }
3978 void Assembler::ftst() {
3979 emit_byte(0xD9);
3980 emit_byte(0xE4);
3981 }
3983 void Assembler::fucomi(int i) {
3984 // make sure the instruction is supported (introduced for P6, together with cmov)
3985 guarantee(VM_Version::supports_cmov(), "illegal instruction");
3986 emit_farith(0xDB, 0xE8, i);
3987 }
3989 void Assembler::fucomip(int i) {
3990 // make sure the instruction is supported (introduced for P6, together with cmov)
3991 guarantee(VM_Version::supports_cmov(), "illegal instruction");
3992 emit_farith(0xDF, 0xE8, i);
3993 }
3995 void Assembler::fwait() {
3996 emit_byte(0x9B);
3997 }
3999 void Assembler::fxch(int i) {
4000 emit_farith(0xD9, 0xC8, i);
4001 }
4003 void Assembler::fyl2x() {
4004 emit_byte(0xD9);
4005 emit_byte(0xF1);
4006 }
4008 void Assembler::frndint() {
4009 emit_byte(0xD9);
4010 emit_byte(0xFC);
4011 }
4013 void Assembler::f2xm1() {
4014 emit_byte(0xD9);
4015 emit_byte(0xF0);
4016 }
4018 void Assembler::fldl2e() {
4019 emit_byte(0xD9);
4020 emit_byte(0xEA);
4021 }
4023 // SSE SIMD prefix byte values corresponding to VexSimdPrefix encoding.
4024 static int simd_pre[4] = { 0, 0x66, 0xF3, 0xF2 };
4025 // SSE opcode second byte values (first is 0x0F) corresponding to VexOpcode encoding.
4026 static int simd_opc[4] = { 0, 0, 0x38, 0x3A };
4028 // Generate SSE legacy REX prefix and SIMD opcode based on VEX encoding.
4029 void Assembler::rex_prefix(Address adr, XMMRegister xreg, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
4030 if (pre > 0) {
4031 emit_byte(simd_pre[pre]);
4032 }
4033 if (rex_w) {
4034 prefixq(adr, xreg);
4035 } else {
4036 prefix(adr, xreg);
4037 }
4038 if (opc > 0) {
4039 emit_byte(0x0F);
4040 int opc2 = simd_opc[opc];
4041 if (opc2 > 0) {
4042 emit_byte(opc2);
4043 }
4044 }
4045 }
4047 int Assembler::rex_prefix_and_encode(int dst_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool rex_w) {
4048 if (pre > 0) {
4049 emit_byte(simd_pre[pre]);
4050 }
4051 int encode = (rex_w) ? prefixq_and_encode(dst_enc, src_enc) :
4052 prefix_and_encode(dst_enc, src_enc);
4053 if (opc > 0) {
4054 emit_byte(0x0F);
4055 int opc2 = simd_opc[opc];
4056 if (opc2 > 0) {
4057 emit_byte(opc2);
4058 }
4059 }
4060 return encode;
4061 }
4064 void Assembler::vex_prefix(bool vex_r, bool vex_b, bool vex_x, bool vex_w, int nds_enc, VexSimdPrefix pre, VexOpcode opc, bool vector256) {
4065 if (vex_b || vex_x || vex_w || (opc == VEX_OPCODE_0F_38) || (opc == VEX_OPCODE_0F_3A)) {
4066 prefix(VEX_3bytes);
4068 int byte1 = (vex_r ? VEX_R : 0) | (vex_x ? VEX_X : 0) | (vex_b ? VEX_B : 0);
4069 byte1 = (~byte1) & 0xE0;
4070 byte1 |= opc;
4071 a_byte(byte1);
4073 int byte2 = ((~nds_enc) & 0xf) << 3;
4074 byte2 |= (vex_w ? VEX_W : 0) | (vector256 ? 4 : 0) | pre;
4075 emit_byte(byte2);
4076 } else {
4077 prefix(VEX_2bytes);
4079 int byte1 = vex_r ? VEX_R : 0;
4080 byte1 = (~byte1) & 0x80;
4081 byte1 |= ((~nds_enc) & 0xf) << 3;
4082 byte1 |= (vector256 ? 4 : 0) | pre;
4083 emit_byte(byte1);
4084 }
4085 }
4087 void Assembler::vex_prefix(Address adr, int nds_enc, int xreg_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256){
4088 bool vex_r = (xreg_enc >= 8);
4089 bool vex_b = adr.base_needs_rex();
4090 bool vex_x = adr.index_needs_rex();
4091 vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
4092 }
4094 int Assembler::vex_prefix_and_encode(int dst_enc, int nds_enc, int src_enc, VexSimdPrefix pre, VexOpcode opc, bool vex_w, bool vector256) {
4095 bool vex_r = (dst_enc >= 8);
4096 bool vex_b = (src_enc >= 8);
4097 bool vex_x = false;
4098 vex_prefix(vex_r, vex_b, vex_x, vex_w, nds_enc, pre, opc, vector256);
4099 return (((dst_enc & 7) << 3) | (src_enc & 7));
4100 }
4103 void Assembler::simd_prefix(XMMRegister xreg, XMMRegister nds, Address adr, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
4104 if (UseAVX > 0) {
4105 int xreg_enc = xreg->encoding();
4106 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
4107 vex_prefix(adr, nds_enc, xreg_enc, pre, opc, rex_w, vector256);
4108 } else {
4109 assert((nds == xreg) || (nds == xnoreg), "wrong sse encoding");
4110 rex_prefix(adr, xreg, pre, opc, rex_w);
4111 }
4112 }
4114 int Assembler::simd_prefix_and_encode(XMMRegister dst, XMMRegister nds, XMMRegister src, VexSimdPrefix pre, VexOpcode opc, bool rex_w, bool vector256) {
4115 int dst_enc = dst->encoding();
4116 int src_enc = src->encoding();
4117 if (UseAVX > 0) {
4118 int nds_enc = nds->is_valid() ? nds->encoding() : 0;
4119 return vex_prefix_and_encode(dst_enc, nds_enc, src_enc, pre, opc, rex_w, vector256);
4120 } else {
4121 assert((nds == dst) || (nds == src) || (nds == xnoreg), "wrong sse encoding");
4122 return rex_prefix_and_encode(dst_enc, src_enc, pre, opc, rex_w);
4123 }
4124 }
4126 void Assembler::emit_simd_arith(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
4127 InstructionMark im(this);
4128 simd_prefix(dst, dst, src, pre);
4129 emit_byte(opcode);
4130 emit_operand(dst, src);
4131 }
4133 void Assembler::emit_simd_arith(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
4134 int encode = simd_prefix_and_encode(dst, dst, src, pre);
4135 emit_byte(opcode);
4136 emit_byte(0xC0 | encode);
4137 }
4139 // Versions with no second source register (non-destructive source).
4140 void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, Address src, VexSimdPrefix pre) {
4141 InstructionMark im(this);
4142 simd_prefix(dst, xnoreg, src, pre);
4143 emit_byte(opcode);
4144 emit_operand(dst, src);
4145 }
4147 void Assembler::emit_simd_arith_nonds(int opcode, XMMRegister dst, XMMRegister src, VexSimdPrefix pre) {
4148 int encode = simd_prefix_and_encode(dst, xnoreg, src, pre);
4149 emit_byte(opcode);
4150 emit_byte(0xC0 | encode);
4151 }
4153 // 3-operands AVX instructions
4154 void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
4155 Address src, VexSimdPrefix pre, bool vector256) {
4156 InstructionMark im(this);
4157 vex_prefix(dst, nds, src, pre, vector256);
4158 emit_byte(opcode);
4159 emit_operand(dst, src);
4160 }
4162 void Assembler::emit_vex_arith(int opcode, XMMRegister dst, XMMRegister nds,
4163 XMMRegister src, VexSimdPrefix pre, bool vector256) {
4164 int encode = vex_prefix_and_encode(dst, nds, src, pre, vector256);
4165 emit_byte(opcode);
4166 emit_byte(0xC0 | encode);
4167 }
4169 #ifndef _LP64
4171 void Assembler::incl(Register dst) {
4172 // Don't use it directly. Use MacroAssembler::incrementl() instead.
4173 emit_byte(0x40 | dst->encoding());
4174 }
4176 void Assembler::lea(Register dst, Address src) {
4177 leal(dst, src);
4178 }
4180 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) {
4181 InstructionMark im(this);
4182 emit_byte(0xC7);
4183 emit_operand(rax, dst);
4184 emit_data((int)imm32, rspec, 0);
4185 }
4187 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
4188 InstructionMark im(this);
4189 int encode = prefix_and_encode(dst->encoding());
4190 emit_byte(0xB8 | encode);
4191 emit_data((int)imm32, rspec, 0);
4192 }
4194 void Assembler::popa() { // 32bit
4195 emit_byte(0x61);
4196 }
4198 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
4199 InstructionMark im(this);
4200 emit_byte(0x68);
4201 emit_data(imm32, rspec, 0);
4202 }
4204 void Assembler::pusha() { // 32bit
4205 emit_byte(0x60);
4206 }
4208 void Assembler::set_byte_if_not_zero(Register dst) {
4209 emit_byte(0x0F);
4210 emit_byte(0x95);
4211 emit_byte(0xE0 | dst->encoding());
4212 }
4214 void Assembler::shldl(Register dst, Register src) {
4215 emit_byte(0x0F);
4216 emit_byte(0xA5);
4217 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
4218 }
4220 void Assembler::shrdl(Register dst, Register src) {
4221 emit_byte(0x0F);
4222 emit_byte(0xAD);
4223 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
4224 }
4226 #else // LP64
4228 void Assembler::set_byte_if_not_zero(Register dst) {
4229 int enc = prefix_and_encode(dst->encoding(), true);
4230 emit_byte(0x0F);
4231 emit_byte(0x95);
4232 emit_byte(0xE0 | enc);
4233 }
4235 // 64bit only pieces of the assembler
4236 // This should only be used by 64bit instructions that can use rip-relative
4237 // it cannot be used by instructions that want an immediate value.
4239 bool Assembler::reachable(AddressLiteral adr) {
4240 int64_t disp;
4241 // None will force a 64bit literal to the code stream. Likely a placeholder
4242 // for something that will be patched later and we need to certain it will
4243 // always be reachable.
4244 if (adr.reloc() == relocInfo::none) {
4245 return false;
4246 }
4247 if (adr.reloc() == relocInfo::internal_word_type) {
4248 // This should be rip relative and easily reachable.
4249 return true;
4250 }
4251 if (adr.reloc() == relocInfo::virtual_call_type ||
4252 adr.reloc() == relocInfo::opt_virtual_call_type ||
4253 adr.reloc() == relocInfo::static_call_type ||
4254 adr.reloc() == relocInfo::static_stub_type ) {
4255 // This should be rip relative within the code cache and easily
4256 // reachable until we get huge code caches. (At which point
4257 // ic code is going to have issues).
4258 return true;
4259 }
4260 if (adr.reloc() != relocInfo::external_word_type &&
4261 adr.reloc() != relocInfo::poll_return_type && // these are really external_word but need special
4262 adr.reloc() != relocInfo::poll_type && // relocs to identify them
4263 adr.reloc() != relocInfo::runtime_call_type ) {
4264 return false;
4265 }
4267 // Stress the correction code
4268 if (ForceUnreachable) {
4269 // Must be runtimecall reloc, see if it is in the codecache
4270 // Flipping stuff in the codecache to be unreachable causes issues
4271 // with things like inline caches where the additional instructions
4272 // are not handled.
4273 if (CodeCache::find_blob(adr._target) == NULL) {
4274 return false;
4275 }
4276 }
4277 // For external_word_type/runtime_call_type if it is reachable from where we
4278 // are now (possibly a temp buffer) and where we might end up
4279 // anywhere in the codeCache then we are always reachable.
4280 // This would have to change if we ever save/restore shared code
4281 // to be more pessimistic.
4282 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
4283 if (!is_simm32(disp)) return false;
4284 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
4285 if (!is_simm32(disp)) return false;
4287 disp = (int64_t)adr._target - ((int64_t)_code_pos + sizeof(int));
4289 // Because rip relative is a disp + address_of_next_instruction and we
4290 // don't know the value of address_of_next_instruction we apply a fudge factor
4291 // to make sure we will be ok no matter the size of the instruction we get placed into.
4292 // We don't have to fudge the checks above here because they are already worst case.
4294 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
4295 // + 4 because better safe than sorry.
4296 const int fudge = 12 + 4;
4297 if (disp < 0) {
4298 disp -= fudge;
4299 } else {
4300 disp += fudge;
4301 }
4302 return is_simm32(disp);
4303 }
4305 // Check if the polling page is not reachable from the code cache using rip-relative
4306 // addressing.
4307 bool Assembler::is_polling_page_far() {
4308 intptr_t addr = (intptr_t)os::get_polling_page();
4309 return ForceUnreachable ||
4310 !is_simm32(addr - (intptr_t)CodeCache::low_bound()) ||
4311 !is_simm32(addr - (intptr_t)CodeCache::high_bound());
4312 }
4314 void Assembler::emit_data64(jlong data,
4315 relocInfo::relocType rtype,
4316 int format) {
4317 if (rtype == relocInfo::none) {
4318 emit_long64(data);
4319 } else {
4320 emit_data64(data, Relocation::spec_simple(rtype), format);
4321 }
4322 }
4324 void Assembler::emit_data64(jlong data,
4325 RelocationHolder const& rspec,
4326 int format) {
4327 assert(imm_operand == 0, "default format must be immediate in this file");
4328 assert(imm_operand == format, "must be immediate");
4329 assert(inst_mark() != NULL, "must be inside InstructionMark");
4330 // Do not use AbstractAssembler::relocate, which is not intended for
4331 // embedded words. Instead, relocate to the enclosing instruction.
4332 code_section()->relocate(inst_mark(), rspec, format);
4333 #ifdef ASSERT
4334 check_relocation(rspec, format);
4335 #endif
4336 emit_long64(data);
4337 }
4339 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
4340 if (reg_enc >= 8) {
4341 prefix(REX_B);
4342 reg_enc -= 8;
4343 } else if (byteinst && reg_enc >= 4) {
4344 prefix(REX);
4345 }
4346 return reg_enc;
4347 }
4349 int Assembler::prefixq_and_encode(int reg_enc) {
4350 if (reg_enc < 8) {
4351 prefix(REX_W);
4352 } else {
4353 prefix(REX_WB);
4354 reg_enc -= 8;
4355 }
4356 return reg_enc;
4357 }
4359 int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) {
4360 if (dst_enc < 8) {
4361 if (src_enc >= 8) {
4362 prefix(REX_B);
4363 src_enc -= 8;
4364 } else if (byteinst && src_enc >= 4) {
4365 prefix(REX);
4366 }
4367 } else {
4368 if (src_enc < 8) {
4369 prefix(REX_R);
4370 } else {
4371 prefix(REX_RB);
4372 src_enc -= 8;
4373 }
4374 dst_enc -= 8;
4375 }
4376 return dst_enc << 3 | src_enc;
4377 }
4379 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) {
4380 if (dst_enc < 8) {
4381 if (src_enc < 8) {
4382 prefix(REX_W);
4383 } else {
4384 prefix(REX_WB);
4385 src_enc -= 8;
4386 }
4387 } else {
4388 if (src_enc < 8) {
4389 prefix(REX_WR);
4390 } else {
4391 prefix(REX_WRB);
4392 src_enc -= 8;
4393 }
4394 dst_enc -= 8;
4395 }
4396 return dst_enc << 3 | src_enc;
4397 }
4399 void Assembler::prefix(Register reg) {
4400 if (reg->encoding() >= 8) {
4401 prefix(REX_B);
4402 }
4403 }
4405 void Assembler::prefix(Address adr) {
4406 if (adr.base_needs_rex()) {
4407 if (adr.index_needs_rex()) {
4408 prefix(REX_XB);
4409 } else {
4410 prefix(REX_B);
4411 }
4412 } else {
4413 if (adr.index_needs_rex()) {
4414 prefix(REX_X);
4415 }
4416 }
4417 }
4419 void Assembler::prefixq(Address adr) {
4420 if (adr.base_needs_rex()) {
4421 if (adr.index_needs_rex()) {
4422 prefix(REX_WXB);
4423 } else {
4424 prefix(REX_WB);
4425 }
4426 } else {
4427 if (adr.index_needs_rex()) {
4428 prefix(REX_WX);
4429 } else {
4430 prefix(REX_W);
4431 }
4432 }
4433 }
4436 void Assembler::prefix(Address adr, Register reg, bool byteinst) {
4437 if (reg->encoding() < 8) {
4438 if (adr.base_needs_rex()) {
4439 if (adr.index_needs_rex()) {
4440 prefix(REX_XB);
4441 } else {
4442 prefix(REX_B);
4443 }
4444 } else {
4445 if (adr.index_needs_rex()) {
4446 prefix(REX_X);
4447 } else if (byteinst && reg->encoding() >= 4 ) {
4448 prefix(REX);
4449 }
4450 }
4451 } else {
4452 if (adr.base_needs_rex()) {
4453 if (adr.index_needs_rex()) {
4454 prefix(REX_RXB);
4455 } else {
4456 prefix(REX_RB);
4457 }
4458 } else {
4459 if (adr.index_needs_rex()) {
4460 prefix(REX_RX);
4461 } else {
4462 prefix(REX_R);
4463 }
4464 }
4465 }
4466 }
4468 void Assembler::prefixq(Address adr, Register src) {
4469 if (src->encoding() < 8) {
4470 if (adr.base_needs_rex()) {
4471 if (adr.index_needs_rex()) {
4472 prefix(REX_WXB);
4473 } else {
4474 prefix(REX_WB);
4475 }
4476 } else {
4477 if (adr.index_needs_rex()) {
4478 prefix(REX_WX);
4479 } else {
4480 prefix(REX_W);
4481 }
4482 }
4483 } else {
4484 if (adr.base_needs_rex()) {
4485 if (adr.index_needs_rex()) {
4486 prefix(REX_WRXB);
4487 } else {
4488 prefix(REX_WRB);
4489 }
4490 } else {
4491 if (adr.index_needs_rex()) {
4492 prefix(REX_WRX);
4493 } else {
4494 prefix(REX_WR);
4495 }
4496 }
4497 }
4498 }
4500 void Assembler::prefix(Address adr, XMMRegister reg) {
4501 if (reg->encoding() < 8) {
4502 if (adr.base_needs_rex()) {
4503 if (adr.index_needs_rex()) {
4504 prefix(REX_XB);
4505 } else {
4506 prefix(REX_B);
4507 }
4508 } else {
4509 if (adr.index_needs_rex()) {
4510 prefix(REX_X);
4511 }
4512 }
4513 } else {
4514 if (adr.base_needs_rex()) {
4515 if (adr.index_needs_rex()) {
4516 prefix(REX_RXB);
4517 } else {
4518 prefix(REX_RB);
4519 }
4520 } else {
4521 if (adr.index_needs_rex()) {
4522 prefix(REX_RX);
4523 } else {
4524 prefix(REX_R);
4525 }
4526 }
4527 }
4528 }
4530 void Assembler::prefixq(Address adr, XMMRegister src) {
4531 if (src->encoding() < 8) {
4532 if (adr.base_needs_rex()) {
4533 if (adr.index_needs_rex()) {
4534 prefix(REX_WXB);
4535 } else {
4536 prefix(REX_WB);
4537 }
4538 } else {
4539 if (adr.index_needs_rex()) {
4540 prefix(REX_WX);
4541 } else {
4542 prefix(REX_W);
4543 }
4544 }
4545 } else {
4546 if (adr.base_needs_rex()) {
4547 if (adr.index_needs_rex()) {
4548 prefix(REX_WRXB);
4549 } else {
4550 prefix(REX_WRB);
4551 }
4552 } else {
4553 if (adr.index_needs_rex()) {
4554 prefix(REX_WRX);
4555 } else {
4556 prefix(REX_WR);
4557 }
4558 }
4559 }
4560 }
4562 void Assembler::adcq(Register dst, int32_t imm32) {
4563 (void) prefixq_and_encode(dst->encoding());
4564 emit_arith(0x81, 0xD0, dst, imm32);
4565 }
4567 void Assembler::adcq(Register dst, Address src) {
4568 InstructionMark im(this);
4569 prefixq(src, dst);
4570 emit_byte(0x13);
4571 emit_operand(dst, src);
4572 }
4574 void Assembler::adcq(Register dst, Register src) {
4575 (int) prefixq_and_encode(dst->encoding(), src->encoding());
4576 emit_arith(0x13, 0xC0, dst, src);
4577 }
4579 void Assembler::addq(Address dst, int32_t imm32) {
4580 InstructionMark im(this);
4581 prefixq(dst);
4582 emit_arith_operand(0x81, rax, dst,imm32);
4583 }
4585 void Assembler::addq(Address dst, Register src) {
4586 InstructionMark im(this);
4587 prefixq(dst, src);
4588 emit_byte(0x01);
4589 emit_operand(src, dst);
4590 }
4592 void Assembler::addq(Register dst, int32_t imm32) {
4593 (void) prefixq_and_encode(dst->encoding());
4594 emit_arith(0x81, 0xC0, dst, imm32);
4595 }
4597 void Assembler::addq(Register dst, Address src) {
4598 InstructionMark im(this);
4599 prefixq(src, dst);
4600 emit_byte(0x03);
4601 emit_operand(dst, src);
4602 }
4604 void Assembler::addq(Register dst, Register src) {
4605 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4606 emit_arith(0x03, 0xC0, dst, src);
4607 }
4609 void Assembler::andq(Address dst, int32_t imm32) {
4610 InstructionMark im(this);
4611 prefixq(dst);
4612 emit_byte(0x81);
4613 emit_operand(rsp, dst, 4);
4614 emit_long(imm32);
4615 }
4617 void Assembler::andq(Register dst, int32_t imm32) {
4618 (void) prefixq_and_encode(dst->encoding());
4619 emit_arith(0x81, 0xE0, dst, imm32);
4620 }
4622 void Assembler::andq(Register dst, Address src) {
4623 InstructionMark im(this);
4624 prefixq(src, dst);
4625 emit_byte(0x23);
4626 emit_operand(dst, src);
4627 }
4629 void Assembler::andq(Register dst, Register src) {
4630 (int) prefixq_and_encode(dst->encoding(), src->encoding());
4631 emit_arith(0x23, 0xC0, dst, src);
4632 }
4634 void Assembler::bsfq(Register dst, Register src) {
4635 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4636 emit_byte(0x0F);
4637 emit_byte(0xBC);
4638 emit_byte(0xC0 | encode);
4639 }
4641 void Assembler::bsrq(Register dst, Register src) {
4642 assert(!VM_Version::supports_lzcnt(), "encoding is treated as LZCNT");
4643 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4644 emit_byte(0x0F);
4645 emit_byte(0xBD);
4646 emit_byte(0xC0 | encode);
4647 }
4649 void Assembler::bswapq(Register reg) {
4650 int encode = prefixq_and_encode(reg->encoding());
4651 emit_byte(0x0F);
4652 emit_byte(0xC8 | encode);
4653 }
4655 void Assembler::cdqq() {
4656 prefix(REX_W);
4657 emit_byte(0x99);
4658 }
4660 void Assembler::clflush(Address adr) {
4661 prefix(adr);
4662 emit_byte(0x0F);
4663 emit_byte(0xAE);
4664 emit_operand(rdi, adr);
4665 }
4667 void Assembler::cmovq(Condition cc, Register dst, Register src) {
4668 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4669 emit_byte(0x0F);
4670 emit_byte(0x40 | cc);
4671 emit_byte(0xC0 | encode);
4672 }
4674 void Assembler::cmovq(Condition cc, Register dst, Address src) {
4675 InstructionMark im(this);
4676 prefixq(src, dst);
4677 emit_byte(0x0F);
4678 emit_byte(0x40 | cc);
4679 emit_operand(dst, src);
4680 }
4682 void Assembler::cmpq(Address dst, int32_t imm32) {
4683 InstructionMark im(this);
4684 prefixq(dst);
4685 emit_byte(0x81);
4686 emit_operand(rdi, dst, 4);
4687 emit_long(imm32);
4688 }
4690 void Assembler::cmpq(Register dst, int32_t imm32) {
4691 (void) prefixq_and_encode(dst->encoding());
4692 emit_arith(0x81, 0xF8, dst, imm32);
4693 }
4695 void Assembler::cmpq(Address dst, Register src) {
4696 InstructionMark im(this);
4697 prefixq(dst, src);
4698 emit_byte(0x3B);
4699 emit_operand(src, dst);
4700 }
4702 void Assembler::cmpq(Register dst, Register src) {
4703 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4704 emit_arith(0x3B, 0xC0, dst, src);
4705 }
4707 void Assembler::cmpq(Register dst, Address src) {
4708 InstructionMark im(this);
4709 prefixq(src, dst);
4710 emit_byte(0x3B);
4711 emit_operand(dst, src);
4712 }
4714 void Assembler::cmpxchgq(Register reg, Address adr) {
4715 InstructionMark im(this);
4716 prefixq(adr, reg);
4717 emit_byte(0x0F);
4718 emit_byte(0xB1);
4719 emit_operand(reg, adr);
4720 }
4722 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
4723 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4724 int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F2);
4725 emit_byte(0x2A);
4726 emit_byte(0xC0 | encode);
4727 }
4729 void Assembler::cvtsi2sdq(XMMRegister dst, Address src) {
4730 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4731 InstructionMark im(this);
4732 simd_prefix_q(dst, dst, src, VEX_SIMD_F2);
4733 emit_byte(0x2A);
4734 emit_operand(dst, src);
4735 }
4737 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
4738 NOT_LP64(assert(VM_Version::supports_sse(), ""));
4739 int encode = simd_prefix_and_encode_q(dst, dst, src, VEX_SIMD_F3);
4740 emit_byte(0x2A);
4741 emit_byte(0xC0 | encode);
4742 }
4744 void Assembler::cvtsi2ssq(XMMRegister dst, Address src) {
4745 NOT_LP64(assert(VM_Version::supports_sse(), ""));
4746 InstructionMark im(this);
4747 simd_prefix_q(dst, dst, src, VEX_SIMD_F3);
4748 emit_byte(0x2A);
4749 emit_operand(dst, src);
4750 }
4752 void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
4753 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4754 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F2);
4755 emit_byte(0x2C);
4756 emit_byte(0xC0 | encode);
4757 }
4759 void Assembler::cvttss2siq(Register dst, XMMRegister src) {
4760 NOT_LP64(assert(VM_Version::supports_sse(), ""));
4761 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_F3);
4762 emit_byte(0x2C);
4763 emit_byte(0xC0 | encode);
4764 }
4766 void Assembler::decl(Register dst) {
4767 // Don't use it directly. Use MacroAssembler::decrementl() instead.
4768 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
4769 int encode = prefix_and_encode(dst->encoding());
4770 emit_byte(0xFF);
4771 emit_byte(0xC8 | encode);
4772 }
4774 void Assembler::decq(Register dst) {
4775 // Don't use it directly. Use MacroAssembler::decrementq() instead.
4776 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
4777 int encode = prefixq_and_encode(dst->encoding());
4778 emit_byte(0xFF);
4779 emit_byte(0xC8 | encode);
4780 }
4782 void Assembler::decq(Address dst) {
4783 // Don't use it directly. Use MacroAssembler::decrementq() instead.
4784 InstructionMark im(this);
4785 prefixq(dst);
4786 emit_byte(0xFF);
4787 emit_operand(rcx, dst);
4788 }
4790 void Assembler::fxrstor(Address src) {
4791 prefixq(src);
4792 emit_byte(0x0F);
4793 emit_byte(0xAE);
4794 emit_operand(as_Register(1), src);
4795 }
4797 void Assembler::fxsave(Address dst) {
4798 prefixq(dst);
4799 emit_byte(0x0F);
4800 emit_byte(0xAE);
4801 emit_operand(as_Register(0), dst);
4802 }
4804 void Assembler::idivq(Register src) {
4805 int encode = prefixq_and_encode(src->encoding());
4806 emit_byte(0xF7);
4807 emit_byte(0xF8 | encode);
4808 }
4810 void Assembler::imulq(Register dst, Register src) {
4811 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4812 emit_byte(0x0F);
4813 emit_byte(0xAF);
4814 emit_byte(0xC0 | encode);
4815 }
4817 void Assembler::imulq(Register dst, Register src, int value) {
4818 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4819 if (is8bit(value)) {
4820 emit_byte(0x6B);
4821 emit_byte(0xC0 | encode);
4822 emit_byte(value & 0xFF);
4823 } else {
4824 emit_byte(0x69);
4825 emit_byte(0xC0 | encode);
4826 emit_long(value);
4827 }
4828 }
4830 void Assembler::incl(Register dst) {
4831 // Don't use it directly. Use MacroAssembler::incrementl() instead.
4832 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
4833 int encode = prefix_and_encode(dst->encoding());
4834 emit_byte(0xFF);
4835 emit_byte(0xC0 | encode);
4836 }
4838 void Assembler::incq(Register dst) {
4839 // Don't use it directly. Use MacroAssembler::incrementq() instead.
4840 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
4841 int encode = prefixq_and_encode(dst->encoding());
4842 emit_byte(0xFF);
4843 emit_byte(0xC0 | encode);
4844 }
4846 void Assembler::incq(Address dst) {
4847 // Don't use it directly. Use MacroAssembler::incrementq() instead.
4848 InstructionMark im(this);
4849 prefixq(dst);
4850 emit_byte(0xFF);
4851 emit_operand(rax, dst);
4852 }
4854 void Assembler::lea(Register dst, Address src) {
4855 leaq(dst, src);
4856 }
4858 void Assembler::leaq(Register dst, Address src) {
4859 InstructionMark im(this);
4860 prefixq(src, dst);
4861 emit_byte(0x8D);
4862 emit_operand(dst, src);
4863 }
4865 void Assembler::mov64(Register dst, int64_t imm64) {
4866 InstructionMark im(this);
4867 int encode = prefixq_and_encode(dst->encoding());
4868 emit_byte(0xB8 | encode);
4869 emit_long64(imm64);
4870 }
4872 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
4873 InstructionMark im(this);
4874 int encode = prefixq_and_encode(dst->encoding());
4875 emit_byte(0xB8 | encode);
4876 emit_data64(imm64, rspec);
4877 }
4879 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) {
4880 InstructionMark im(this);
4881 int encode = prefix_and_encode(dst->encoding());
4882 emit_byte(0xB8 | encode);
4883 emit_data((int)imm32, rspec, narrow_oop_operand);
4884 }
4886 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) {
4887 InstructionMark im(this);
4888 prefix(dst);
4889 emit_byte(0xC7);
4890 emit_operand(rax, dst, 4);
4891 emit_data((int)imm32, rspec, narrow_oop_operand);
4892 }
4894 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) {
4895 InstructionMark im(this);
4896 int encode = prefix_and_encode(src1->encoding());
4897 emit_byte(0x81);
4898 emit_byte(0xF8 | encode);
4899 emit_data((int)imm32, rspec, narrow_oop_operand);
4900 }
4902 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) {
4903 InstructionMark im(this);
4904 prefix(src1);
4905 emit_byte(0x81);
4906 emit_operand(rax, src1, 4);
4907 emit_data((int)imm32, rspec, narrow_oop_operand);
4908 }
4910 void Assembler::lzcntq(Register dst, Register src) {
4911 assert(VM_Version::supports_lzcnt(), "encoding is treated as BSR");
4912 emit_byte(0xF3);
4913 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4914 emit_byte(0x0F);
4915 emit_byte(0xBD);
4916 emit_byte(0xC0 | encode);
4917 }
4919 void Assembler::movdq(XMMRegister dst, Register src) {
4920 // table D-1 says MMX/SSE2
4921 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4922 int encode = simd_prefix_and_encode_q(dst, src, VEX_SIMD_66);
4923 emit_byte(0x6E);
4924 emit_byte(0xC0 | encode);
4925 }
4927 void Assembler::movdq(Register dst, XMMRegister src) {
4928 // table D-1 says MMX/SSE2
4929 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4930 // swap src/dst to get correct prefix
4931 int encode = simd_prefix_and_encode_q(src, dst, VEX_SIMD_66);
4932 emit_byte(0x7E);
4933 emit_byte(0xC0 | encode);
4934 }
4936 void Assembler::movq(Register dst, Register src) {
4937 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4938 emit_byte(0x8B);
4939 emit_byte(0xC0 | encode);
4940 }
4942 void Assembler::movq(Register dst, Address src) {
4943 InstructionMark im(this);
4944 prefixq(src, dst);
4945 emit_byte(0x8B);
4946 emit_operand(dst, src);
4947 }
4949 void Assembler::movq(Address dst, Register src) {
4950 InstructionMark im(this);
4951 prefixq(dst, src);
4952 emit_byte(0x89);
4953 emit_operand(src, dst);
4954 }
4956 void Assembler::movsbq(Register dst, Address src) {
4957 InstructionMark im(this);
4958 prefixq(src, dst);
4959 emit_byte(0x0F);
4960 emit_byte(0xBE);
4961 emit_operand(dst, src);
4962 }
4964 void Assembler::movsbq(Register dst, Register src) {
4965 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4966 emit_byte(0x0F);
4967 emit_byte(0xBE);
4968 emit_byte(0xC0 | encode);
4969 }
4971 void Assembler::movslq(Register dst, int32_t imm32) {
4972 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx)
4973 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx)
4974 // as a result we shouldn't use until tested at runtime...
4975 ShouldNotReachHere();
4976 InstructionMark im(this);
4977 int encode = prefixq_and_encode(dst->encoding());
4978 emit_byte(0xC7 | encode);
4979 emit_long(imm32);
4980 }
4982 void Assembler::movslq(Address dst, int32_t imm32) {
4983 assert(is_simm32(imm32), "lost bits");
4984 InstructionMark im(this);
4985 prefixq(dst);
4986 emit_byte(0xC7);
4987 emit_operand(rax, dst, 4);
4988 emit_long(imm32);
4989 }
4991 void Assembler::movslq(Register dst, Address src) {
4992 InstructionMark im(this);
4993 prefixq(src, dst);
4994 emit_byte(0x63);
4995 emit_operand(dst, src);
4996 }
4998 void Assembler::movslq(Register dst, Register src) {
4999 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5000 emit_byte(0x63);
5001 emit_byte(0xC0 | encode);
5002 }
5004 void Assembler::movswq(Register dst, Address src) {
5005 InstructionMark im(this);
5006 prefixq(src, dst);
5007 emit_byte(0x0F);
5008 emit_byte(0xBF);
5009 emit_operand(dst, src);
5010 }
5012 void Assembler::movswq(Register dst, Register src) {
5013 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5014 emit_byte(0x0F);
5015 emit_byte(0xBF);
5016 emit_byte(0xC0 | encode);
5017 }
5019 void Assembler::movzbq(Register dst, Address src) {
5020 InstructionMark im(this);
5021 prefixq(src, dst);
5022 emit_byte(0x0F);
5023 emit_byte(0xB6);
5024 emit_operand(dst, src);
5025 }
5027 void Assembler::movzbq(Register dst, Register src) {
5028 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5029 emit_byte(0x0F);
5030 emit_byte(0xB6);
5031 emit_byte(0xC0 | encode);
5032 }
5034 void Assembler::movzwq(Register dst, Address src) {
5035 InstructionMark im(this);
5036 prefixq(src, dst);
5037 emit_byte(0x0F);
5038 emit_byte(0xB7);
5039 emit_operand(dst, src);
5040 }
5042 void Assembler::movzwq(Register dst, Register src) {
5043 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5044 emit_byte(0x0F);
5045 emit_byte(0xB7);
5046 emit_byte(0xC0 | encode);
5047 }
5049 void Assembler::negq(Register dst) {
5050 int encode = prefixq_and_encode(dst->encoding());
5051 emit_byte(0xF7);
5052 emit_byte(0xD8 | encode);
5053 }
5055 void Assembler::notq(Register dst) {
5056 int encode = prefixq_and_encode(dst->encoding());
5057 emit_byte(0xF7);
5058 emit_byte(0xD0 | encode);
5059 }
5061 void Assembler::orq(Address dst, int32_t imm32) {
5062 InstructionMark im(this);
5063 prefixq(dst);
5064 emit_byte(0x81);
5065 emit_operand(rcx, dst, 4);
5066 emit_long(imm32);
5067 }
5069 void Assembler::orq(Register dst, int32_t imm32) {
5070 (void) prefixq_and_encode(dst->encoding());
5071 emit_arith(0x81, 0xC8, dst, imm32);
5072 }
5074 void Assembler::orq(Register dst, Address src) {
5075 InstructionMark im(this);
5076 prefixq(src, dst);
5077 emit_byte(0x0B);
5078 emit_operand(dst, src);
5079 }
5081 void Assembler::orq(Register dst, Register src) {
5082 (void) prefixq_and_encode(dst->encoding(), src->encoding());
5083 emit_arith(0x0B, 0xC0, dst, src);
5084 }
5086 void Assembler::popa() { // 64bit
5087 movq(r15, Address(rsp, 0));
5088 movq(r14, Address(rsp, wordSize));
5089 movq(r13, Address(rsp, 2 * wordSize));
5090 movq(r12, Address(rsp, 3 * wordSize));
5091 movq(r11, Address(rsp, 4 * wordSize));
5092 movq(r10, Address(rsp, 5 * wordSize));
5093 movq(r9, Address(rsp, 6 * wordSize));
5094 movq(r8, Address(rsp, 7 * wordSize));
5095 movq(rdi, Address(rsp, 8 * wordSize));
5096 movq(rsi, Address(rsp, 9 * wordSize));
5097 movq(rbp, Address(rsp, 10 * wordSize));
5098 // skip rsp
5099 movq(rbx, Address(rsp, 12 * wordSize));
5100 movq(rdx, Address(rsp, 13 * wordSize));
5101 movq(rcx, Address(rsp, 14 * wordSize));
5102 movq(rax, Address(rsp, 15 * wordSize));
5104 addq(rsp, 16 * wordSize);
5105 }
5107 void Assembler::popcntq(Register dst, Address src) {
5108 assert(VM_Version::supports_popcnt(), "must support");
5109 InstructionMark im(this);
5110 emit_byte(0xF3);
5111 prefixq(src, dst);
5112 emit_byte(0x0F);
5113 emit_byte(0xB8);
5114 emit_operand(dst, src);
5115 }
5117 void Assembler::popcntq(Register dst, Register src) {
5118 assert(VM_Version::supports_popcnt(), "must support");
5119 emit_byte(0xF3);
5120 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5121 emit_byte(0x0F);
5122 emit_byte(0xB8);
5123 emit_byte(0xC0 | encode);
5124 }
5126 void Assembler::popq(Address dst) {
5127 InstructionMark im(this);
5128 prefixq(dst);
5129 emit_byte(0x8F);
5130 emit_operand(rax, dst);
5131 }
5133 void Assembler::pusha() { // 64bit
5134 // we have to store original rsp. ABI says that 128 bytes
5135 // below rsp are local scratch.
5136 movq(Address(rsp, -5 * wordSize), rsp);
5138 subq(rsp, 16 * wordSize);
5140 movq(Address(rsp, 15 * wordSize), rax);
5141 movq(Address(rsp, 14 * wordSize), rcx);
5142 movq(Address(rsp, 13 * wordSize), rdx);
5143 movq(Address(rsp, 12 * wordSize), rbx);
5144 // skip rsp
5145 movq(Address(rsp, 10 * wordSize), rbp);
5146 movq(Address(rsp, 9 * wordSize), rsi);
5147 movq(Address(rsp, 8 * wordSize), rdi);
5148 movq(Address(rsp, 7 * wordSize), r8);
5149 movq(Address(rsp, 6 * wordSize), r9);
5150 movq(Address(rsp, 5 * wordSize), r10);
5151 movq(Address(rsp, 4 * wordSize), r11);
5152 movq(Address(rsp, 3 * wordSize), r12);
5153 movq(Address(rsp, 2 * wordSize), r13);
5154 movq(Address(rsp, wordSize), r14);
5155 movq(Address(rsp, 0), r15);
5156 }
5158 void Assembler::pushq(Address src) {
5159 InstructionMark im(this);
5160 prefixq(src);
5161 emit_byte(0xFF);
5162 emit_operand(rsi, src);
5163 }
5165 void Assembler::rclq(Register dst, int imm8) {
5166 assert(isShiftCount(imm8 >> 1), "illegal shift count");
5167 int encode = prefixq_and_encode(dst->encoding());
5168 if (imm8 == 1) {
5169 emit_byte(0xD1);
5170 emit_byte(0xD0 | encode);
5171 } else {
5172 emit_byte(0xC1);
5173 emit_byte(0xD0 | encode);
5174 emit_byte(imm8);
5175 }
5176 }
5177 void Assembler::sarq(Register dst, int imm8) {
5178 assert(isShiftCount(imm8 >> 1), "illegal shift count");
5179 int encode = prefixq_and_encode(dst->encoding());
5180 if (imm8 == 1) {
5181 emit_byte(0xD1);
5182 emit_byte(0xF8 | encode);
5183 } else {
5184 emit_byte(0xC1);
5185 emit_byte(0xF8 | encode);
5186 emit_byte(imm8);
5187 }
5188 }
5190 void Assembler::sarq(Register dst) {
5191 int encode = prefixq_and_encode(dst->encoding());
5192 emit_byte(0xD3);
5193 emit_byte(0xF8 | encode);
5194 }
5196 void Assembler::sbbq(Address dst, int32_t imm32) {
5197 InstructionMark im(this);
5198 prefixq(dst);
5199 emit_arith_operand(0x81, rbx, dst, imm32);
5200 }
5202 void Assembler::sbbq(Register dst, int32_t imm32) {
5203 (void) prefixq_and_encode(dst->encoding());
5204 emit_arith(0x81, 0xD8, dst, imm32);
5205 }
5207 void Assembler::sbbq(Register dst, Address src) {
5208 InstructionMark im(this);
5209 prefixq(src, dst);
5210 emit_byte(0x1B);
5211 emit_operand(dst, src);
5212 }
5214 void Assembler::sbbq(Register dst, Register src) {
5215 (void) prefixq_and_encode(dst->encoding(), src->encoding());
5216 emit_arith(0x1B, 0xC0, dst, src);
5217 }
5219 void Assembler::shlq(Register dst, int imm8) {
5220 assert(isShiftCount(imm8 >> 1), "illegal shift count");
5221 int encode = prefixq_and_encode(dst->encoding());
5222 if (imm8 == 1) {
5223 emit_byte(0xD1);
5224 emit_byte(0xE0 | encode);
5225 } else {
5226 emit_byte(0xC1);
5227 emit_byte(0xE0 | encode);
5228 emit_byte(imm8);
5229 }
5230 }
5232 void Assembler::shlq(Register dst) {
5233 int encode = prefixq_and_encode(dst->encoding());
5234 emit_byte(0xD3);
5235 emit_byte(0xE0 | encode);
5236 }
5238 void Assembler::shrq(Register dst, int imm8) {
5239 assert(isShiftCount(imm8 >> 1), "illegal shift count");
5240 int encode = prefixq_and_encode(dst->encoding());
5241 emit_byte(0xC1);
5242 emit_byte(0xE8 | encode);
5243 emit_byte(imm8);
5244 }
5246 void Assembler::shrq(Register dst) {
5247 int encode = prefixq_and_encode(dst->encoding());
5248 emit_byte(0xD3);
5249 emit_byte(0xE8 | encode);
5250 }
5252 void Assembler::subq(Address dst, int32_t imm32) {
5253 InstructionMark im(this);
5254 prefixq(dst);
5255 emit_arith_operand(0x81, rbp, dst, imm32);
5256 }
5258 void Assembler::subq(Address dst, Register src) {
5259 InstructionMark im(this);
5260 prefixq(dst, src);
5261 emit_byte(0x29);
5262 emit_operand(src, dst);
5263 }
5265 void Assembler::subq(Register dst, int32_t imm32) {
5266 (void) prefixq_and_encode(dst->encoding());
5267 emit_arith(0x81, 0xE8, dst, imm32);
5268 }
5270 // Force generation of a 4 byte immediate value even if it fits into 8bit
5271 void Assembler::subq_imm32(Register dst, int32_t imm32) {
5272 (void) prefixq_and_encode(dst->encoding());
5273 emit_arith_imm32(0x81, 0xE8, dst, imm32);
5274 }
5276 void Assembler::subq(Register dst, Address src) {
5277 InstructionMark im(this);
5278 prefixq(src, dst);
5279 emit_byte(0x2B);
5280 emit_operand(dst, src);
5281 }
5283 void Assembler::subq(Register dst, Register src) {
5284 (void) prefixq_and_encode(dst->encoding(), src->encoding());
5285 emit_arith(0x2B, 0xC0, dst, src);
5286 }
5288 void Assembler::testq(Register dst, int32_t imm32) {
5289 // not using emit_arith because test
5290 // doesn't support sign-extension of
5291 // 8bit operands
5292 int encode = dst->encoding();
5293 if (encode == 0) {
5294 prefix(REX_W);
5295 emit_byte(0xA9);
5296 } else {
5297 encode = prefixq_and_encode(encode);
5298 emit_byte(0xF7);
5299 emit_byte(0xC0 | encode);
5300 }
5301 emit_long(imm32);
5302 }
5304 void Assembler::testq(Register dst, Register src) {
5305 (void) prefixq_and_encode(dst->encoding(), src->encoding());
5306 emit_arith(0x85, 0xC0, dst, src);
5307 }
5309 void Assembler::xaddq(Address dst, Register src) {
5310 InstructionMark im(this);
5311 prefixq(dst, src);
5312 emit_byte(0x0F);
5313 emit_byte(0xC1);
5314 emit_operand(src, dst);
5315 }
5317 void Assembler::xchgq(Register dst, Address src) {
5318 InstructionMark im(this);
5319 prefixq(src, dst);
5320 emit_byte(0x87);
5321 emit_operand(dst, src);
5322 }
5324 void Assembler::xchgq(Register dst, Register src) {
5325 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
5326 emit_byte(0x87);
5327 emit_byte(0xc0 | encode);
5328 }
5330 void Assembler::xorq(Register dst, Register src) {
5331 (void) prefixq_and_encode(dst->encoding(), src->encoding());
5332 emit_arith(0x33, 0xC0, dst, src);
5333 }
5335 void Assembler::xorq(Register dst, Address src) {
5336 InstructionMark im(this);
5337 prefixq(src, dst);
5338 emit_byte(0x33);
5339 emit_operand(dst, src);
5340 }
5342 #endif // !LP64
5344 static Assembler::Condition reverse[] = {
5345 Assembler::noOverflow /* overflow = 0x0 */ ,
5346 Assembler::overflow /* noOverflow = 0x1 */ ,
5347 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
5348 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
5349 Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
5350 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
5351 Assembler::above /* belowEqual = 0x6 */ ,
5352 Assembler::belowEqual /* above = 0x7 */ ,
5353 Assembler::positive /* negative = 0x8 */ ,
5354 Assembler::negative /* positive = 0x9 */ ,
5355 Assembler::noParity /* parity = 0xa */ ,
5356 Assembler::parity /* noParity = 0xb */ ,
5357 Assembler::greaterEqual /* less = 0xc */ ,
5358 Assembler::less /* greaterEqual = 0xd */ ,
5359 Assembler::greater /* lessEqual = 0xe */ ,
5360 Assembler::lessEqual /* greater = 0xf, */
5362 };
5365 // Implementation of MacroAssembler
5367 // First all the versions that have distinct versions depending on 32/64 bit
5368 // Unless the difference is trivial (1 line or so).
5370 #ifndef _LP64
5372 // 32bit versions
5374 Address MacroAssembler::as_Address(AddressLiteral adr) {
5375 return Address(adr.target(), adr.rspec());
5376 }
5378 Address MacroAssembler::as_Address(ArrayAddress adr) {
5379 return Address::make_array(adr);
5380 }
5382 int MacroAssembler::biased_locking_enter(Register lock_reg,
5383 Register obj_reg,
5384 Register swap_reg,
5385 Register tmp_reg,
5386 bool swap_reg_contains_mark,
5387 Label& done,
5388 Label* slow_case,
5389 BiasedLockingCounters* counters) {
5390 assert(UseBiasedLocking, "why call this otherwise?");
5391 assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg");
5392 assert_different_registers(lock_reg, obj_reg, swap_reg);
5394 if (PrintBiasedLockingStatistics && counters == NULL)
5395 counters = BiasedLocking::counters();
5397 bool need_tmp_reg = false;
5398 if (tmp_reg == noreg) {
5399 need_tmp_reg = true;
5400 tmp_reg = lock_reg;
5401 } else {
5402 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
5403 }
5404 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
5405 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
5406 Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
5407 Address saved_mark_addr(lock_reg, 0);
5409 // Biased locking
5410 // See whether the lock is currently biased toward our thread and
5411 // whether the epoch is still valid
5412 // Note that the runtime guarantees sufficient alignment of JavaThread
5413 // pointers to allow age to be placed into low bits
5414 // First check to see whether biasing is even enabled for this object
5415 Label cas_label;
5416 int null_check_offset = -1;
5417 if (!swap_reg_contains_mark) {
5418 null_check_offset = offset();
5419 movl(swap_reg, mark_addr);
5420 }
5421 if (need_tmp_reg) {
5422 push(tmp_reg);
5423 }
5424 movl(tmp_reg, swap_reg);
5425 andl(tmp_reg, markOopDesc::biased_lock_mask_in_place);
5426 cmpl(tmp_reg, markOopDesc::biased_lock_pattern);
5427 if (need_tmp_reg) {
5428 pop(tmp_reg);
5429 }
5430 jcc(Assembler::notEqual, cas_label);
5431 // The bias pattern is present in the object's header. Need to check
5432 // whether the bias owner and the epoch are both still current.
5433 // Note that because there is no current thread register on x86 we
5434 // need to store off the mark word we read out of the object to
5435 // avoid reloading it and needing to recheck invariants below. This
5436 // store is unfortunate but it makes the overall code shorter and
5437 // simpler.
5438 movl(saved_mark_addr, swap_reg);
5439 if (need_tmp_reg) {
5440 push(tmp_reg);
5441 }
5442 get_thread(tmp_reg);
5443 xorl(swap_reg, tmp_reg);
5444 if (swap_reg_contains_mark) {
5445 null_check_offset = offset();
5446 }
5447 movl(tmp_reg, klass_addr);
5448 xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset()));
5449 andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
5450 if (need_tmp_reg) {
5451 pop(tmp_reg);
5452 }
5453 if (counters != NULL) {
5454 cond_inc32(Assembler::zero,
5455 ExternalAddress((address)counters->biased_lock_entry_count_addr()));
5456 }
5457 jcc(Assembler::equal, done);
5459 Label try_revoke_bias;
5460 Label try_rebias;
5462 // At this point we know that the header has the bias pattern and
5463 // that we are not the bias owner in the current epoch. We need to
5464 // figure out more details about the state of the header in order to
5465 // know what operations can be legally performed on the object's
5466 // header.
5468 // If the low three bits in the xor result aren't clear, that means
5469 // the prototype header is no longer biased and we have to revoke
5470 // the bias on this object.
5471 testl(swap_reg, markOopDesc::biased_lock_mask_in_place);
5472 jcc(Assembler::notZero, try_revoke_bias);
5474 // Biasing is still enabled for this data type. See whether the
5475 // epoch of the current bias is still valid, meaning that the epoch
5476 // bits of the mark word are equal to the epoch bits of the
5477 // prototype header. (Note that the prototype header's epoch bits
5478 // only change at a safepoint.) If not, attempt to rebias the object
5479 // toward the current thread. Note that we must be absolutely sure
5480 // that the current epoch is invalid in order to do this because
5481 // otherwise the manipulations it performs on the mark word are
5482 // illegal.
5483 testl(swap_reg, markOopDesc::epoch_mask_in_place);
5484 jcc(Assembler::notZero, try_rebias);
5486 // The epoch of the current bias is still valid but we know nothing
5487 // about the owner; it might be set or it might be clear. Try to
5488 // acquire the bias of the object using an atomic operation. If this
5489 // fails we will go in to the runtime to revoke the object's bias.
5490 // Note that we first construct the presumed unbiased header so we
5491 // don't accidentally blow away another thread's valid bias.
5492 movl(swap_reg, saved_mark_addr);
5493 andl(swap_reg,
5494 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
5495 if (need_tmp_reg) {
5496 push(tmp_reg);
5497 }
5498 get_thread(tmp_reg);
5499 orl(tmp_reg, swap_reg);
5500 if (os::is_MP()) {
5501 lock();
5502 }
5503 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
5504 if (need_tmp_reg) {
5505 pop(tmp_reg);
5506 }
5507 // If the biasing toward our thread failed, this means that
5508 // another thread succeeded in biasing it toward itself and we
5509 // need to revoke that bias. The revocation will occur in the
5510 // interpreter runtime in the slow case.
5511 if (counters != NULL) {
5512 cond_inc32(Assembler::zero,
5513 ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr()));
5514 }
5515 if (slow_case != NULL) {
5516 jcc(Assembler::notZero, *slow_case);
5517 }
5518 jmp(done);
5520 bind(try_rebias);
5521 // At this point we know the epoch has expired, meaning that the
5522 // current "bias owner", if any, is actually invalid. Under these
5523 // circumstances _only_, we are allowed to use the current header's
5524 // value as the comparison value when doing the cas to acquire the
5525 // bias in the current epoch. In other words, we allow transfer of
5526 // the bias from one thread to another directly in this situation.
5527 //
5528 // FIXME: due to a lack of registers we currently blow away the age
5529 // bits in this situation. Should attempt to preserve them.
5530 if (need_tmp_reg) {
5531 push(tmp_reg);
5532 }
5533 get_thread(tmp_reg);
5534 movl(swap_reg, klass_addr);
5535 orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset()));
5536 movl(swap_reg, saved_mark_addr);
5537 if (os::is_MP()) {
5538 lock();
5539 }
5540 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
5541 if (need_tmp_reg) {
5542 pop(tmp_reg);
5543 }
5544 // If the biasing toward our thread failed, then another thread
5545 // succeeded in biasing it toward itself and we need to revoke that
5546 // bias. The revocation will occur in the runtime in the slow case.
5547 if (counters != NULL) {
5548 cond_inc32(Assembler::zero,
5549 ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
5550 }
5551 if (slow_case != NULL) {
5552 jcc(Assembler::notZero, *slow_case);
5553 }
5554 jmp(done);
5556 bind(try_revoke_bias);
5557 // The prototype mark in the klass doesn't have the bias bit set any
5558 // more, indicating that objects of this data type are not supposed
5559 // to be biased any more. We are going to try to reset the mark of
5560 // this object to the prototype value and fall through to the
5561 // CAS-based locking scheme. Note that if our CAS fails, it means
5562 // that another thread raced us for the privilege of revoking the
5563 // bias of this particular object, so it's okay to continue in the
5564 // normal locking code.
5565 //
5566 // FIXME: due to a lack of registers we currently blow away the age
5567 // bits in this situation. Should attempt to preserve them.
5568 movl(swap_reg, saved_mark_addr);
5569 if (need_tmp_reg) {
5570 push(tmp_reg);
5571 }
5572 movl(tmp_reg, klass_addr);
5573 movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset()));
5574 if (os::is_MP()) {
5575 lock();
5576 }
5577 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
5578 if (need_tmp_reg) {
5579 pop(tmp_reg);
5580 }
5581 // Fall through to the normal CAS-based lock, because no matter what
5582 // the result of the above CAS, some thread must have succeeded in
5583 // removing the bias bit from the object's header.
5584 if (counters != NULL) {
5585 cond_inc32(Assembler::zero,
5586 ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
5587 }
5589 bind(cas_label);
5591 return null_check_offset;
5592 }
5593 void MacroAssembler::call_VM_leaf_base(address entry_point,
5594 int number_of_arguments) {
5595 call(RuntimeAddress(entry_point));
5596 increment(rsp, number_of_arguments * wordSize);
5597 }
5599 void MacroAssembler::cmpklass(Address src1, Metadata* obj) {
5600 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
5601 }
5603 void MacroAssembler::cmpklass(Register src1, Metadata* obj) {
5604 cmp_literal32(src1, (int32_t)obj, metadata_Relocation::spec_for_immediate());
5605 }
5607 void MacroAssembler::cmpoop(Address src1, jobject obj) {
5608 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
5609 }
5611 void MacroAssembler::cmpoop(Register src1, jobject obj) {
5612 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
5613 }
5615 void MacroAssembler::extend_sign(Register hi, Register lo) {
5616 // According to Intel Doc. AP-526, "Integer Divide", p.18.
5617 if (VM_Version::is_P6() && hi == rdx && lo == rax) {
5618 cdql();
5619 } else {
5620 movl(hi, lo);
5621 sarl(hi, 31);
5622 }
5623 }
5625 void MacroAssembler::jC2(Register tmp, Label& L) {
5626 // set parity bit if FPU flag C2 is set (via rax)
5627 save_rax(tmp);
5628 fwait(); fnstsw_ax();
5629 sahf();
5630 restore_rax(tmp);
5631 // branch
5632 jcc(Assembler::parity, L);
5633 }
5635 void MacroAssembler::jnC2(Register tmp, Label& L) {
5636 // set parity bit if FPU flag C2 is set (via rax)
5637 save_rax(tmp);
5638 fwait(); fnstsw_ax();
5639 sahf();
5640 restore_rax(tmp);
5641 // branch
5642 jcc(Assembler::noParity, L);
5643 }
5645 // 32bit can do a case table jump in one instruction but we no longer allow the base
5646 // to be installed in the Address class
5647 void MacroAssembler::jump(ArrayAddress entry) {
5648 jmp(as_Address(entry));
5649 }
5651 // Note: y_lo will be destroyed
5652 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
5653 // Long compare for Java (semantics as described in JVM spec.)
5654 Label high, low, done;
5656 cmpl(x_hi, y_hi);
5657 jcc(Assembler::less, low);
5658 jcc(Assembler::greater, high);
5659 // x_hi is the return register
5660 xorl(x_hi, x_hi);
5661 cmpl(x_lo, y_lo);
5662 jcc(Assembler::below, low);
5663 jcc(Assembler::equal, done);
5665 bind(high);
5666 xorl(x_hi, x_hi);
5667 increment(x_hi);
5668 jmp(done);
5670 bind(low);
5671 xorl(x_hi, x_hi);
5672 decrementl(x_hi);
5674 bind(done);
5675 }
5677 void MacroAssembler::lea(Register dst, AddressLiteral src) {
5678 mov_literal32(dst, (int32_t)src.target(), src.rspec());
5679 }
5681 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
5682 // leal(dst, as_Address(adr));
5683 // see note in movl as to why we must use a move
5684 mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
5685 }
5687 void MacroAssembler::leave() {
5688 mov(rsp, rbp);
5689 pop(rbp);
5690 }
5692 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
5693 // Multiplication of two Java long values stored on the stack
5694 // as illustrated below. Result is in rdx:rax.
5695 //
5696 // rsp ---> [ ?? ] \ \
5697 // .... | y_rsp_offset |
5698 // [ y_lo ] / (in bytes) | x_rsp_offset
5699 // [ y_hi ] | (in bytes)
5700 // .... |
5701 // [ x_lo ] /
5702 // [ x_hi ]
5703 // ....
5704 //
5705 // Basic idea: lo(result) = lo(x_lo * y_lo)
5706 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
5707 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
5708 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
5709 Label quick;
5710 // load x_hi, y_hi and check if quick
5711 // multiplication is possible
5712 movl(rbx, x_hi);
5713 movl(rcx, y_hi);
5714 movl(rax, rbx);
5715 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0
5716 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply
5717 // do full multiplication
5718 // 1st step
5719 mull(y_lo); // x_hi * y_lo
5720 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx,
5721 // 2nd step
5722 movl(rax, x_lo);
5723 mull(rcx); // x_lo * y_hi
5724 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx,
5725 // 3rd step
5726 bind(quick); // note: rbx, = 0 if quick multiply!
5727 movl(rax, x_lo);
5728 mull(y_lo); // x_lo * y_lo
5729 addl(rdx, rbx); // correct hi(x_lo * y_lo)
5730 }
5732 void MacroAssembler::lneg(Register hi, Register lo) {
5733 negl(lo);
5734 adcl(hi, 0);
5735 negl(hi);
5736 }
5738 void MacroAssembler::lshl(Register hi, Register lo) {
5739 // Java shift left long support (semantics as described in JVM spec., p.305)
5740 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
5741 // shift value is in rcx !
5742 assert(hi != rcx, "must not use rcx");
5743 assert(lo != rcx, "must not use rcx");
5744 const Register s = rcx; // shift count
5745 const int n = BitsPerWord;
5746 Label L;
5747 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
5748 cmpl(s, n); // if (s < n)
5749 jcc(Assembler::less, L); // else (s >= n)
5750 movl(hi, lo); // x := x << n
5751 xorl(lo, lo);
5752 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
5753 bind(L); // s (mod n) < n
5754 shldl(hi, lo); // x := x << s
5755 shll(lo);
5756 }
5759 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
5760 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
5761 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
5762 assert(hi != rcx, "must not use rcx");
5763 assert(lo != rcx, "must not use rcx");
5764 const Register s = rcx; // shift count
5765 const int n = BitsPerWord;
5766 Label L;
5767 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
5768 cmpl(s, n); // if (s < n)
5769 jcc(Assembler::less, L); // else (s >= n)
5770 movl(lo, hi); // x := x >> n
5771 if (sign_extension) sarl(hi, 31);
5772 else xorl(hi, hi);
5773 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
5774 bind(L); // s (mod n) < n
5775 shrdl(lo, hi); // x := x >> s
5776 if (sign_extension) sarl(hi);
5777 else shrl(hi);
5778 }
5780 void MacroAssembler::movoop(Register dst, jobject obj) {
5781 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
5782 }
5784 void MacroAssembler::movoop(Address dst, jobject obj) {
5785 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
5786 }
5788 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
5789 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
5790 }
5792 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
5793 mov_literal32(dst, (int32_t)obj, metadata_Relocation::spec_for_immediate());
5794 }
5796 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
5797 if (src.is_lval()) {
5798 mov_literal32(dst, (intptr_t)src.target(), src.rspec());
5799 } else {
5800 movl(dst, as_Address(src));
5801 }
5802 }
5804 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
5805 movl(as_Address(dst), src);
5806 }
5808 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
5809 movl(dst, as_Address(src));
5810 }
5812 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
5813 void MacroAssembler::movptr(Address dst, intptr_t src) {
5814 movl(dst, src);
5815 }
5818 void MacroAssembler::pop_callee_saved_registers() {
5819 pop(rcx);
5820 pop(rdx);
5821 pop(rdi);
5822 pop(rsi);
5823 }
5825 void MacroAssembler::pop_fTOS() {
5826 fld_d(Address(rsp, 0));
5827 addl(rsp, 2 * wordSize);
5828 }
5830 void MacroAssembler::push_callee_saved_registers() {
5831 push(rsi);
5832 push(rdi);
5833 push(rdx);
5834 push(rcx);
5835 }
5837 void MacroAssembler::push_fTOS() {
5838 subl(rsp, 2 * wordSize);
5839 fstp_d(Address(rsp, 0));
5840 }
5843 void MacroAssembler::pushoop(jobject obj) {
5844 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
5845 }
5847 void MacroAssembler::pushklass(Metadata* obj) {
5848 push_literal32((int32_t)obj, metadata_Relocation::spec_for_immediate());
5849 }
5851 void MacroAssembler::pushptr(AddressLiteral src) {
5852 if (src.is_lval()) {
5853 push_literal32((int32_t)src.target(), src.rspec());
5854 } else {
5855 pushl(as_Address(src));
5856 }
5857 }
5859 void MacroAssembler::set_word_if_not_zero(Register dst) {
5860 xorl(dst, dst);
5861 set_byte_if_not_zero(dst);
5862 }
5864 static void pass_arg0(MacroAssembler* masm, Register arg) {
5865 masm->push(arg);
5866 }
5868 static void pass_arg1(MacroAssembler* masm, Register arg) {
5869 masm->push(arg);
5870 }
5872 static void pass_arg2(MacroAssembler* masm, Register arg) {
5873 masm->push(arg);
5874 }
5876 static void pass_arg3(MacroAssembler* masm, Register arg) {
5877 masm->push(arg);
5878 }
5880 #ifndef PRODUCT
5881 extern "C" void findpc(intptr_t x);
5882 #endif
5884 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
5885 // In order to get locks to work, we need to fake a in_VM state
5886 JavaThread* thread = JavaThread::current();
5887 JavaThreadState saved_state = thread->thread_state();
5888 thread->set_thread_state(_thread_in_vm);
5889 if (ShowMessageBoxOnError) {
5890 JavaThread* thread = JavaThread::current();
5891 JavaThreadState saved_state = thread->thread_state();
5892 thread->set_thread_state(_thread_in_vm);
5893 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
5894 ttyLocker ttyl;
5895 BytecodeCounter::print();
5896 }
5897 // To see where a verify_oop failed, get $ebx+40/X for this frame.
5898 // This is the value of eip which points to where verify_oop will return.
5899 if (os::message_box(msg, "Execution stopped, print registers?")) {
5900 print_state32(rdi, rsi, rbp, rsp, rbx, rdx, rcx, rax, eip);
5901 BREAKPOINT;
5902 }
5903 } else {
5904 ttyLocker ttyl;
5905 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
5906 }
5907 // Don't assert holding the ttyLock
5908 assert(false, err_msg("DEBUG MESSAGE: %s", msg));
5909 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
5910 }
5912 void MacroAssembler::print_state32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip) {
5913 ttyLocker ttyl;
5914 FlagSetting fs(Debugging, true);
5915 tty->print_cr("eip = 0x%08x", eip);
5916 #ifndef PRODUCT
5917 if ((WizardMode || Verbose) && PrintMiscellaneous) {
5918 tty->cr();
5919 findpc(eip);
5920 tty->cr();
5921 }
5922 #endif
5923 #define PRINT_REG(rax) \
5924 { tty->print("%s = ", #rax); os::print_location(tty, rax); }
5925 PRINT_REG(rax);
5926 PRINT_REG(rbx);
5927 PRINT_REG(rcx);
5928 PRINT_REG(rdx);
5929 PRINT_REG(rdi);
5930 PRINT_REG(rsi);
5931 PRINT_REG(rbp);
5932 PRINT_REG(rsp);
5933 #undef PRINT_REG
5934 // Print some words near top of staack.
5935 int* dump_sp = (int*) rsp;
5936 for (int col1 = 0; col1 < 8; col1++) {
5937 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
5938 os::print_location(tty, *dump_sp++);
5939 }
5940 for (int row = 0; row < 16; row++) {
5941 tty->print("(rsp+0x%03x) 0x%08x: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (intptr_t)dump_sp);
5942 for (int col = 0; col < 8; col++) {
5943 tty->print(" 0x%08x", *dump_sp++);
5944 }
5945 tty->cr();
5946 }
5947 // Print some instructions around pc:
5948 Disassembler::decode((address)eip-64, (address)eip);
5949 tty->print_cr("--------");
5950 Disassembler::decode((address)eip, (address)eip+32);
5951 }
5953 void MacroAssembler::stop(const char* msg) {
5954 ExternalAddress message((address)msg);
5955 // push address of message
5956 pushptr(message.addr());
5957 { Label L; call(L, relocInfo::none); bind(L); } // push eip
5958 pusha(); // push registers
5959 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
5960 hlt();
5961 }
5963 void MacroAssembler::warn(const char* msg) {
5964 push_CPU_state();
5966 ExternalAddress message((address) msg);
5967 // push address of message
5968 pushptr(message.addr());
5970 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
5971 addl(rsp, wordSize); // discard argument
5972 pop_CPU_state();
5973 }
5975 void MacroAssembler::print_state() {
5976 { Label L; call(L, relocInfo::none); bind(L); } // push eip
5977 pusha(); // push registers
5979 push_CPU_state();
5980 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::print_state32)));
5981 pop_CPU_state();
5983 popa();
5984 addl(rsp, wordSize);
5985 }
5987 #else // _LP64
5989 // 64 bit versions
5991 Address MacroAssembler::as_Address(AddressLiteral adr) {
5992 // amd64 always does this as a pc-rel
5993 // we can be absolute or disp based on the instruction type
5994 // jmp/call are displacements others are absolute
5995 assert(!adr.is_lval(), "must be rval");
5996 assert(reachable(adr), "must be");
5997 return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
5999 }
6001 Address MacroAssembler::as_Address(ArrayAddress adr) {
6002 AddressLiteral base = adr.base();
6003 lea(rscratch1, base);
6004 Address index = adr.index();
6005 assert(index._disp == 0, "must not have disp"); // maybe it can?
6006 Address array(rscratch1, index._index, index._scale, index._disp);
6007 return array;
6008 }
6010 int MacroAssembler::biased_locking_enter(Register lock_reg,
6011 Register obj_reg,
6012 Register swap_reg,
6013 Register tmp_reg,
6014 bool swap_reg_contains_mark,
6015 Label& done,
6016 Label* slow_case,
6017 BiasedLockingCounters* counters) {
6018 assert(UseBiasedLocking, "why call this otherwise?");
6019 assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
6020 assert(tmp_reg != noreg, "tmp_reg must be supplied");
6021 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
6022 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
6023 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
6024 Address saved_mark_addr(lock_reg, 0);
6026 if (PrintBiasedLockingStatistics && counters == NULL)
6027 counters = BiasedLocking::counters();
6029 // Biased locking
6030 // See whether the lock is currently biased toward our thread and
6031 // whether the epoch is still valid
6032 // Note that the runtime guarantees sufficient alignment of JavaThread
6033 // pointers to allow age to be placed into low bits
6034 // First check to see whether biasing is even enabled for this object
6035 Label cas_label;
6036 int null_check_offset = -1;
6037 if (!swap_reg_contains_mark) {
6038 null_check_offset = offset();
6039 movq(swap_reg, mark_addr);
6040 }
6041 movq(tmp_reg, swap_reg);
6042 andq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
6043 cmpq(tmp_reg, markOopDesc::biased_lock_pattern);
6044 jcc(Assembler::notEqual, cas_label);
6045 // The bias pattern is present in the object's header. Need to check
6046 // whether the bias owner and the epoch are both still current.
6047 load_prototype_header(tmp_reg, obj_reg);
6048 orq(tmp_reg, r15_thread);
6049 xorq(tmp_reg, swap_reg);
6050 andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place));
6051 if (counters != NULL) {
6052 cond_inc32(Assembler::zero,
6053 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
6054 }
6055 jcc(Assembler::equal, done);
6057 Label try_revoke_bias;
6058 Label try_rebias;
6060 // At this point we know that the header has the bias pattern and
6061 // that we are not the bias owner in the current epoch. We need to
6062 // figure out more details about the state of the header in order to
6063 // know what operations can be legally performed on the object's
6064 // header.
6066 // If the low three bits in the xor result aren't clear, that means
6067 // the prototype header is no longer biased and we have to revoke
6068 // the bias on this object.
6069 testq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
6070 jcc(Assembler::notZero, try_revoke_bias);
6072 // Biasing is still enabled for this data type. See whether the
6073 // epoch of the current bias is still valid, meaning that the epoch
6074 // bits of the mark word are equal to the epoch bits of the
6075 // prototype header. (Note that the prototype header's epoch bits
6076 // only change at a safepoint.) If not, attempt to rebias the object
6077 // toward the current thread. Note that we must be absolutely sure
6078 // that the current epoch is invalid in order to do this because
6079 // otherwise the manipulations it performs on the mark word are
6080 // illegal.
6081 testq(tmp_reg, markOopDesc::epoch_mask_in_place);
6082 jcc(Assembler::notZero, try_rebias);
6084 // The epoch of the current bias is still valid but we know nothing
6085 // about the owner; it might be set or it might be clear. Try to
6086 // acquire the bias of the object using an atomic operation. If this
6087 // fails we will go in to the runtime to revoke the object's bias.
6088 // Note that we first construct the presumed unbiased header so we
6089 // don't accidentally blow away another thread's valid bias.
6090 andq(swap_reg,
6091 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
6092 movq(tmp_reg, swap_reg);
6093 orq(tmp_reg, r15_thread);
6094 if (os::is_MP()) {
6095 lock();
6096 }
6097 cmpxchgq(tmp_reg, Address(obj_reg, 0));
6098 // If the biasing toward our thread failed, this means that
6099 // another thread succeeded in biasing it toward itself and we
6100 // need to revoke that bias. The revocation will occur in the
6101 // interpreter runtime in the slow case.
6102 if (counters != NULL) {
6103 cond_inc32(Assembler::zero,
6104 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
6105 }
6106 if (slow_case != NULL) {
6107 jcc(Assembler::notZero, *slow_case);
6108 }
6109 jmp(done);
6111 bind(try_rebias);
6112 // At this point we know the epoch has expired, meaning that the
6113 // current "bias owner", if any, is actually invalid. Under these
6114 // circumstances _only_, we are allowed to use the current header's
6115 // value as the comparison value when doing the cas to acquire the
6116 // bias in the current epoch. In other words, we allow transfer of
6117 // the bias from one thread to another directly in this situation.
6118 //
6119 // FIXME: due to a lack of registers we currently blow away the age
6120 // bits in this situation. Should attempt to preserve them.
6121 load_prototype_header(tmp_reg, obj_reg);
6122 orq(tmp_reg, r15_thread);
6123 if (os::is_MP()) {
6124 lock();
6125 }
6126 cmpxchgq(tmp_reg, Address(obj_reg, 0));
6127 // If the biasing toward our thread failed, then another thread
6128 // succeeded in biasing it toward itself and we need to revoke that
6129 // bias. The revocation will occur in the runtime in the slow case.
6130 if (counters != NULL) {
6131 cond_inc32(Assembler::zero,
6132 ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
6133 }
6134 if (slow_case != NULL) {
6135 jcc(Assembler::notZero, *slow_case);
6136 }
6137 jmp(done);
6139 bind(try_revoke_bias);
6140 // The prototype mark in the klass doesn't have the bias bit set any
6141 // more, indicating that objects of this data type are not supposed
6142 // to be biased any more. We are going to try to reset the mark of
6143 // this object to the prototype value and fall through to the
6144 // CAS-based locking scheme. Note that if our CAS fails, it means
6145 // that another thread raced us for the privilege of revoking the
6146 // bias of this particular object, so it's okay to continue in the
6147 // normal locking code.
6148 //
6149 // FIXME: due to a lack of registers we currently blow away the age
6150 // bits in this situation. Should attempt to preserve them.
6151 load_prototype_header(tmp_reg, obj_reg);
6152 if (os::is_MP()) {
6153 lock();
6154 }
6155 cmpxchgq(tmp_reg, Address(obj_reg, 0));
6156 // Fall through to the normal CAS-based lock, because no matter what
6157 // the result of the above CAS, some thread must have succeeded in
6158 // removing the bias bit from the object's header.
6159 if (counters != NULL) {
6160 cond_inc32(Assembler::zero,
6161 ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
6162 }
6164 bind(cas_label);
6166 return null_check_offset;
6167 }
6169 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
6170 Label L, E;
6172 #ifdef _WIN64
6173 // Windows always allocates space for it's register args
6174 assert(num_args <= 4, "only register arguments supported");
6175 subq(rsp, frame::arg_reg_save_area_bytes);
6176 #endif
6178 // Align stack if necessary
6179 testl(rsp, 15);
6180 jcc(Assembler::zero, L);
6182 subq(rsp, 8);
6183 {
6184 call(RuntimeAddress(entry_point));
6185 }
6186 addq(rsp, 8);
6187 jmp(E);
6189 bind(L);
6190 {
6191 call(RuntimeAddress(entry_point));
6192 }
6194 bind(E);
6196 #ifdef _WIN64
6197 // restore stack pointer
6198 addq(rsp, frame::arg_reg_save_area_bytes);
6199 #endif
6201 }
6203 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
6204 assert(!src2.is_lval(), "should use cmpptr");
6206 if (reachable(src2)) {
6207 cmpq(src1, as_Address(src2));
6208 } else {
6209 lea(rscratch1, src2);
6210 Assembler::cmpq(src1, Address(rscratch1, 0));
6211 }
6212 }
6214 int MacroAssembler::corrected_idivq(Register reg) {
6215 // Full implementation of Java ldiv and lrem; checks for special
6216 // case as described in JVM spec., p.243 & p.271. The function
6217 // returns the (pc) offset of the idivl instruction - may be needed
6218 // for implicit exceptions.
6219 //
6220 // normal case special case
6221 //
6222 // input : rax: dividend min_long
6223 // reg: divisor (may not be eax/edx) -1
6224 //
6225 // output: rax: quotient (= rax idiv reg) min_long
6226 // rdx: remainder (= rax irem reg) 0
6227 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
6228 static const int64_t min_long = 0x8000000000000000;
6229 Label normal_case, special_case;
6231 // check for special case
6232 cmp64(rax, ExternalAddress((address) &min_long));
6233 jcc(Assembler::notEqual, normal_case);
6234 xorl(rdx, rdx); // prepare rdx for possible special case (where
6235 // remainder = 0)
6236 cmpq(reg, -1);
6237 jcc(Assembler::equal, special_case);
6239 // handle normal case
6240 bind(normal_case);
6241 cdqq();
6242 int idivq_offset = offset();
6243 idivq(reg);
6245 // normal and special case exit
6246 bind(special_case);
6248 return idivq_offset;
6249 }
6251 void MacroAssembler::decrementq(Register reg, int value) {
6252 if (value == min_jint) { subq(reg, value); return; }
6253 if (value < 0) { incrementq(reg, -value); return; }
6254 if (value == 0) { ; return; }
6255 if (value == 1 && UseIncDec) { decq(reg) ; return; }
6256 /* else */ { subq(reg, value) ; return; }
6257 }
6259 void MacroAssembler::decrementq(Address dst, int value) {
6260 if (value == min_jint) { subq(dst, value); return; }
6261 if (value < 0) { incrementq(dst, -value); return; }
6262 if (value == 0) { ; return; }
6263 if (value == 1 && UseIncDec) { decq(dst) ; return; }
6264 /* else */ { subq(dst, value) ; return; }
6265 }
6267 void MacroAssembler::incrementq(Register reg, int value) {
6268 if (value == min_jint) { addq(reg, value); return; }
6269 if (value < 0) { decrementq(reg, -value); return; }
6270 if (value == 0) { ; return; }
6271 if (value == 1 && UseIncDec) { incq(reg) ; return; }
6272 /* else */ { addq(reg, value) ; return; }
6273 }
6275 void MacroAssembler::incrementq(Address dst, int value) {
6276 if (value == min_jint) { addq(dst, value); return; }
6277 if (value < 0) { decrementq(dst, -value); return; }
6278 if (value == 0) { ; return; }
6279 if (value == 1 && UseIncDec) { incq(dst) ; return; }
6280 /* else */ { addq(dst, value) ; return; }
6281 }
6283 // 32bit can do a case table jump in one instruction but we no longer allow the base
6284 // to be installed in the Address class
6285 void MacroAssembler::jump(ArrayAddress entry) {
6286 lea(rscratch1, entry.base());
6287 Address dispatch = entry.index();
6288 assert(dispatch._base == noreg, "must be");
6289 dispatch._base = rscratch1;
6290 jmp(dispatch);
6291 }
6293 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
6294 ShouldNotReachHere(); // 64bit doesn't use two regs
6295 cmpq(x_lo, y_lo);
6296 }
6298 void MacroAssembler::lea(Register dst, AddressLiteral src) {
6299 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
6300 }
6302 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
6303 mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec());
6304 movptr(dst, rscratch1);
6305 }
6307 void MacroAssembler::leave() {
6308 // %%% is this really better? Why not on 32bit too?
6309 emit_byte(0xC9); // LEAVE
6310 }
6312 void MacroAssembler::lneg(Register hi, Register lo) {
6313 ShouldNotReachHere(); // 64bit doesn't use two regs
6314 negq(lo);
6315 }
6317 void MacroAssembler::movoop(Register dst, jobject obj) {
6318 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
6319 }
6321 void MacroAssembler::movoop(Address dst, jobject obj) {
6322 mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
6323 movq(dst, rscratch1);
6324 }
6326 void MacroAssembler::mov_metadata(Register dst, Metadata* obj) {
6327 mov_literal64(dst, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
6328 }
6330 void MacroAssembler::mov_metadata(Address dst, Metadata* obj) {
6331 mov_literal64(rscratch1, (intptr_t)obj, metadata_Relocation::spec_for_immediate());
6332 movq(dst, rscratch1);
6333 }
6335 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
6336 if (src.is_lval()) {
6337 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
6338 } else {
6339 if (reachable(src)) {
6340 movq(dst, as_Address(src));
6341 } else {
6342 lea(rscratch1, src);
6343 movq(dst, Address(rscratch1,0));
6344 }
6345 }
6346 }
6348 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
6349 movq(as_Address(dst), src);
6350 }
6352 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
6353 movq(dst, as_Address(src));
6354 }
6356 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
6357 void MacroAssembler::movptr(Address dst, intptr_t src) {
6358 mov64(rscratch1, src);
6359 movq(dst, rscratch1);
6360 }
6362 // These are mostly for initializing NULL
6363 void MacroAssembler::movptr(Address dst, int32_t src) {
6364 movslq(dst, src);
6365 }
6367 void MacroAssembler::movptr(Register dst, int32_t src) {
6368 mov64(dst, (intptr_t)src);
6369 }
6371 void MacroAssembler::pushoop(jobject obj) {
6372 movoop(rscratch1, obj);
6373 push(rscratch1);
6374 }
6376 void MacroAssembler::pushklass(Metadata* obj) {
6377 mov_metadata(rscratch1, obj);
6378 push(rscratch1);
6379 }
6381 void MacroAssembler::pushptr(AddressLiteral src) {
6382 lea(rscratch1, src);
6383 if (src.is_lval()) {
6384 push(rscratch1);
6385 } else {
6386 pushq(Address(rscratch1, 0));
6387 }
6388 }
6390 void MacroAssembler::reset_last_Java_frame(bool clear_fp,
6391 bool clear_pc) {
6392 // we must set sp to zero to clear frame
6393 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
6394 // must clear fp, so that compiled frames are not confused; it is
6395 // possible that we need it only for debugging
6396 if (clear_fp) {
6397 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
6398 }
6400 if (clear_pc) {
6401 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
6402 }
6403 }
6405 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
6406 Register last_java_fp,
6407 address last_java_pc) {
6408 // determine last_java_sp register
6409 if (!last_java_sp->is_valid()) {
6410 last_java_sp = rsp;
6411 }
6413 // last_java_fp is optional
6414 if (last_java_fp->is_valid()) {
6415 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
6416 last_java_fp);
6417 }
6419 // last_java_pc is optional
6420 if (last_java_pc != NULL) {
6421 Address java_pc(r15_thread,
6422 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
6423 lea(rscratch1, InternalAddress(last_java_pc));
6424 movptr(java_pc, rscratch1);
6425 }
6427 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
6428 }
6430 static void pass_arg0(MacroAssembler* masm, Register arg) {
6431 if (c_rarg0 != arg ) {
6432 masm->mov(c_rarg0, arg);
6433 }
6434 }
6436 static void pass_arg1(MacroAssembler* masm, Register arg) {
6437 if (c_rarg1 != arg ) {
6438 masm->mov(c_rarg1, arg);
6439 }
6440 }
6442 static void pass_arg2(MacroAssembler* masm, Register arg) {
6443 if (c_rarg2 != arg ) {
6444 masm->mov(c_rarg2, arg);
6445 }
6446 }
6448 static void pass_arg3(MacroAssembler* masm, Register arg) {
6449 if (c_rarg3 != arg ) {
6450 masm->mov(c_rarg3, arg);
6451 }
6452 }
6454 void MacroAssembler::stop(const char* msg) {
6455 address rip = pc();
6456 pusha(); // get regs on stack
6457 lea(c_rarg0, ExternalAddress((address) msg));
6458 lea(c_rarg1, InternalAddress(rip));
6459 movq(c_rarg2, rsp); // pass pointer to regs array
6460 andq(rsp, -16); // align stack as required by ABI
6461 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
6462 hlt();
6463 }
6465 void MacroAssembler::warn(const char* msg) {
6466 push(rbp);
6467 movq(rbp, rsp);
6468 andq(rsp, -16); // align stack as required by push_CPU_state and call
6469 push_CPU_state(); // keeps alignment at 16 bytes
6470 lea(c_rarg0, ExternalAddress((address) msg));
6471 call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
6472 pop_CPU_state();
6473 mov(rsp, rbp);
6474 pop(rbp);
6475 }
6477 void MacroAssembler::print_state() {
6478 address rip = pc();
6479 pusha(); // get regs on stack
6480 push(rbp);
6481 movq(rbp, rsp);
6482 andq(rsp, -16); // align stack as required by push_CPU_state and call
6483 push_CPU_state(); // keeps alignment at 16 bytes
6485 lea(c_rarg0, InternalAddress(rip));
6486 lea(c_rarg1, Address(rbp, wordSize)); // pass pointer to regs array
6487 call_VM_leaf(CAST_FROM_FN_PTR(address, MacroAssembler::print_state64), c_rarg0, c_rarg1);
6489 pop_CPU_state();
6490 mov(rsp, rbp);
6491 pop(rbp);
6492 popa();
6493 }
6495 #ifndef PRODUCT
6496 extern "C" void findpc(intptr_t x);
6497 #endif
6499 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
6500 // In order to get locks to work, we need to fake a in_VM state
6501 if (ShowMessageBoxOnError) {
6502 JavaThread* thread = JavaThread::current();
6503 JavaThreadState saved_state = thread->thread_state();
6504 thread->set_thread_state(_thread_in_vm);
6505 #ifndef PRODUCT
6506 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
6507 ttyLocker ttyl;
6508 BytecodeCounter::print();
6509 }
6510 #endif
6511 // To see where a verify_oop failed, get $ebx+40/X for this frame.
6512 // XXX correct this offset for amd64
6513 // This is the value of eip which points to where verify_oop will return.
6514 if (os::message_box(msg, "Execution stopped, print registers?")) {
6515 print_state64(pc, regs);
6516 BREAKPOINT;
6517 assert(false, "start up GDB");
6518 }
6519 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
6520 } else {
6521 ttyLocker ttyl;
6522 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
6523 msg);
6524 assert(false, err_msg("DEBUG MESSAGE: %s", msg));
6525 }
6526 }
6528 void MacroAssembler::print_state64(int64_t pc, int64_t regs[]) {
6529 ttyLocker ttyl;
6530 FlagSetting fs(Debugging, true);
6531 tty->print_cr("rip = 0x%016lx", pc);
6532 #ifndef PRODUCT
6533 tty->cr();
6534 findpc(pc);
6535 tty->cr();
6536 #endif
6537 #define PRINT_REG(rax, value) \
6538 { tty->print("%s = ", #rax); os::print_location(tty, value); }
6539 PRINT_REG(rax, regs[15]);
6540 PRINT_REG(rbx, regs[12]);
6541 PRINT_REG(rcx, regs[14]);
6542 PRINT_REG(rdx, regs[13]);
6543 PRINT_REG(rdi, regs[8]);
6544 PRINT_REG(rsi, regs[9]);
6545 PRINT_REG(rbp, regs[10]);
6546 PRINT_REG(rsp, regs[11]);
6547 PRINT_REG(r8 , regs[7]);
6548 PRINT_REG(r9 , regs[6]);
6549 PRINT_REG(r10, regs[5]);
6550 PRINT_REG(r11, regs[4]);
6551 PRINT_REG(r12, regs[3]);
6552 PRINT_REG(r13, regs[2]);
6553 PRINT_REG(r14, regs[1]);
6554 PRINT_REG(r15, regs[0]);
6555 #undef PRINT_REG
6556 // Print some words near top of staack.
6557 int64_t* rsp = (int64_t*) regs[11];
6558 int64_t* dump_sp = rsp;
6559 for (int col1 = 0; col1 < 8; col1++) {
6560 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
6561 os::print_location(tty, *dump_sp++);
6562 }
6563 for (int row = 0; row < 25; row++) {
6564 tty->print("(rsp+0x%03x) 0x%016lx: ", (int)((intptr_t)dump_sp - (intptr_t)rsp), (int64_t)dump_sp);
6565 for (int col = 0; col < 4; col++) {
6566 tty->print(" 0x%016lx", *dump_sp++);
6567 }
6568 tty->cr();
6569 }
6570 // Print some instructions around pc:
6571 Disassembler::decode((address)pc-64, (address)pc);
6572 tty->print_cr("--------");
6573 Disassembler::decode((address)pc, (address)pc+32);
6574 }
6576 #endif // _LP64
6578 // Now versions that are common to 32/64 bit
6580 void MacroAssembler::addptr(Register dst, int32_t imm32) {
6581 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
6582 }
6584 void MacroAssembler::addptr(Register dst, Register src) {
6585 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
6586 }
6588 void MacroAssembler::addptr(Address dst, Register src) {
6589 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
6590 }
6592 void MacroAssembler::addsd(XMMRegister dst, AddressLiteral src) {
6593 if (reachable(src)) {
6594 Assembler::addsd(dst, as_Address(src));
6595 } else {
6596 lea(rscratch1, src);
6597 Assembler::addsd(dst, Address(rscratch1, 0));
6598 }
6599 }
6601 void MacroAssembler::addss(XMMRegister dst, AddressLiteral src) {
6602 if (reachable(src)) {
6603 addss(dst, as_Address(src));
6604 } else {
6605 lea(rscratch1, src);
6606 addss(dst, Address(rscratch1, 0));
6607 }
6608 }
6610 void MacroAssembler::align(int modulus) {
6611 if (offset() % modulus != 0) {
6612 nop(modulus - (offset() % modulus));
6613 }
6614 }
6616 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
6617 // Used in sign-masking with aligned address.
6618 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
6619 if (reachable(src)) {
6620 Assembler::andpd(dst, as_Address(src));
6621 } else {
6622 lea(rscratch1, src);
6623 Assembler::andpd(dst, Address(rscratch1, 0));
6624 }
6625 }
6627 void MacroAssembler::andps(XMMRegister dst, AddressLiteral src) {
6628 // Used in sign-masking with aligned address.
6629 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
6630 if (reachable(src)) {
6631 Assembler::andps(dst, as_Address(src));
6632 } else {
6633 lea(rscratch1, src);
6634 Assembler::andps(dst, Address(rscratch1, 0));
6635 }
6636 }
6638 void MacroAssembler::andptr(Register dst, int32_t imm32) {
6639 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
6640 }
6642 void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
6643 pushf();
6644 if (os::is_MP())
6645 lock();
6646 incrementl(counter_addr);
6647 popf();
6648 }
6650 // Writes to stack successive pages until offset reached to check for
6651 // stack overflow + shadow pages. This clobbers tmp.
6652 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
6653 movptr(tmp, rsp);
6654 // Bang stack for total size given plus shadow page size.
6655 // Bang one page at a time because large size can bang beyond yellow and
6656 // red zones.
6657 Label loop;
6658 bind(loop);
6659 movl(Address(tmp, (-os::vm_page_size())), size );
6660 subptr(tmp, os::vm_page_size());
6661 subl(size, os::vm_page_size());
6662 jcc(Assembler::greater, loop);
6664 // Bang down shadow pages too.
6665 // The -1 because we already subtracted 1 page.
6666 for (int i = 0; i< StackShadowPages-1; i++) {
6667 // this could be any sized move but this is can be a debugging crumb
6668 // so the bigger the better.
6669 movptr(Address(tmp, (-i*os::vm_page_size())), size );
6670 }
6671 }
6673 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
6674 assert(UseBiasedLocking, "why call this otherwise?");
6676 // Check for biased locking unlock case, which is a no-op
6677 // Note: we do not have to check the thread ID for two reasons.
6678 // First, the interpreter checks for IllegalMonitorStateException at
6679 // a higher level. Second, if the bias was revoked while we held the
6680 // lock, the object could not be rebiased toward another thread, so
6681 // the bias bit would be clear.
6682 movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
6683 andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
6684 cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
6685 jcc(Assembler::equal, done);
6686 }
6688 void MacroAssembler::c2bool(Register x) {
6689 // implements x == 0 ? 0 : 1
6690 // note: must only look at least-significant byte of x
6691 // since C-style booleans are stored in one byte
6692 // only! (was bug)
6693 andl(x, 0xFF);
6694 setb(Assembler::notZero, x);
6695 }
6697 // Wouldn't need if AddressLiteral version had new name
6698 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
6699 Assembler::call(L, rtype);
6700 }
6702 void MacroAssembler::call(Register entry) {
6703 Assembler::call(entry);
6704 }
6706 void MacroAssembler::call(AddressLiteral entry) {
6707 if (reachable(entry)) {
6708 Assembler::call_literal(entry.target(), entry.rspec());
6709 } else {
6710 lea(rscratch1, entry);
6711 Assembler::call(rscratch1);
6712 }
6713 }
6715 void MacroAssembler::ic_call(address entry) {
6716 RelocationHolder rh = virtual_call_Relocation::spec(pc());
6717 movptr(rax, (intptr_t)Universe::non_oop_word());
6718 call(AddressLiteral(entry, rh));
6719 }
6721 // Implementation of call_VM versions
6723 void MacroAssembler::call_VM(Register oop_result,
6724 address entry_point,
6725 bool check_exceptions) {
6726 Label C, E;
6727 call(C, relocInfo::none);
6728 jmp(E);
6730 bind(C);
6731 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
6732 ret(0);
6734 bind(E);
6735 }
6737 void MacroAssembler::call_VM(Register oop_result,
6738 address entry_point,
6739 Register arg_1,
6740 bool check_exceptions) {
6741 Label C, E;
6742 call(C, relocInfo::none);
6743 jmp(E);
6745 bind(C);
6746 pass_arg1(this, arg_1);
6747 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
6748 ret(0);
6750 bind(E);
6751 }
6753 void MacroAssembler::call_VM(Register oop_result,
6754 address entry_point,
6755 Register arg_1,
6756 Register arg_2,
6757 bool check_exceptions) {
6758 Label C, E;
6759 call(C, relocInfo::none);
6760 jmp(E);
6762 bind(C);
6764 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6766 pass_arg2(this, arg_2);
6767 pass_arg1(this, arg_1);
6768 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
6769 ret(0);
6771 bind(E);
6772 }
6774 void MacroAssembler::call_VM(Register oop_result,
6775 address entry_point,
6776 Register arg_1,
6777 Register arg_2,
6778 Register arg_3,
6779 bool check_exceptions) {
6780 Label C, E;
6781 call(C, relocInfo::none);
6782 jmp(E);
6784 bind(C);
6786 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
6787 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
6788 pass_arg3(this, arg_3);
6790 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6791 pass_arg2(this, arg_2);
6793 pass_arg1(this, arg_1);
6794 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
6795 ret(0);
6797 bind(E);
6798 }
6800 void MacroAssembler::call_VM(Register oop_result,
6801 Register last_java_sp,
6802 address entry_point,
6803 int number_of_arguments,
6804 bool check_exceptions) {
6805 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
6806 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
6807 }
6809 void MacroAssembler::call_VM(Register oop_result,
6810 Register last_java_sp,
6811 address entry_point,
6812 Register arg_1,
6813 bool check_exceptions) {
6814 pass_arg1(this, arg_1);
6815 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
6816 }
6818 void MacroAssembler::call_VM(Register oop_result,
6819 Register last_java_sp,
6820 address entry_point,
6821 Register arg_1,
6822 Register arg_2,
6823 bool check_exceptions) {
6825 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6826 pass_arg2(this, arg_2);
6827 pass_arg1(this, arg_1);
6828 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
6829 }
6831 void MacroAssembler::call_VM(Register oop_result,
6832 Register last_java_sp,
6833 address entry_point,
6834 Register arg_1,
6835 Register arg_2,
6836 Register arg_3,
6837 bool check_exceptions) {
6838 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
6839 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
6840 pass_arg3(this, arg_3);
6841 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6842 pass_arg2(this, arg_2);
6843 pass_arg1(this, arg_1);
6844 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
6845 }
6847 void MacroAssembler::super_call_VM(Register oop_result,
6848 Register last_java_sp,
6849 address entry_point,
6850 int number_of_arguments,
6851 bool check_exceptions) {
6852 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
6853 MacroAssembler::call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
6854 }
6856 void MacroAssembler::super_call_VM(Register oop_result,
6857 Register last_java_sp,
6858 address entry_point,
6859 Register arg_1,
6860 bool check_exceptions) {
6861 pass_arg1(this, arg_1);
6862 super_call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
6863 }
6865 void MacroAssembler::super_call_VM(Register oop_result,
6866 Register last_java_sp,
6867 address entry_point,
6868 Register arg_1,
6869 Register arg_2,
6870 bool check_exceptions) {
6872 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6873 pass_arg2(this, arg_2);
6874 pass_arg1(this, arg_1);
6875 super_call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
6876 }
6878 void MacroAssembler::super_call_VM(Register oop_result,
6879 Register last_java_sp,
6880 address entry_point,
6881 Register arg_1,
6882 Register arg_2,
6883 Register arg_3,
6884 bool check_exceptions) {
6885 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
6886 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
6887 pass_arg3(this, arg_3);
6888 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
6889 pass_arg2(this, arg_2);
6890 pass_arg1(this, arg_1);
6891 super_call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
6892 }
6894 void MacroAssembler::call_VM_base(Register oop_result,
6895 Register java_thread,
6896 Register last_java_sp,
6897 address entry_point,
6898 int number_of_arguments,
6899 bool check_exceptions) {
6900 // determine java_thread register
6901 if (!java_thread->is_valid()) {
6902 #ifdef _LP64
6903 java_thread = r15_thread;
6904 #else
6905 java_thread = rdi;
6906 get_thread(java_thread);
6907 #endif // LP64
6908 }
6909 // determine last_java_sp register
6910 if (!last_java_sp->is_valid()) {
6911 last_java_sp = rsp;
6912 }
6913 // debugging support
6914 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
6915 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
6916 #ifdef ASSERT
6917 // TraceBytecodes does not use r12 but saves it over the call, so don't verify
6918 // r12 is the heapbase.
6919 LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base");)
6920 #endif // ASSERT
6922 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
6923 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
6925 // push java thread (becomes first argument of C function)
6927 NOT_LP64(push(java_thread); number_of_arguments++);
6928 LP64_ONLY(mov(c_rarg0, r15_thread));
6930 // set last Java frame before call
6931 assert(last_java_sp != rbp, "can't use ebp/rbp");
6933 // Only interpreter should have to set fp
6934 set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
6936 // do the call, remove parameters
6937 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
6939 // restore the thread (cannot use the pushed argument since arguments
6940 // may be overwritten by C code generated by an optimizing compiler);
6941 // however can use the register value directly if it is callee saved.
6942 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
6943 // rdi & rsi (also r15) are callee saved -> nothing to do
6944 #ifdef ASSERT
6945 guarantee(java_thread != rax, "change this code");
6946 push(rax);
6947 { Label L;
6948 get_thread(rax);
6949 cmpptr(java_thread, rax);
6950 jcc(Assembler::equal, L);
6951 STOP("MacroAssembler::call_VM_base: rdi not callee saved?");
6952 bind(L);
6953 }
6954 pop(rax);
6955 #endif
6956 } else {
6957 get_thread(java_thread);
6958 }
6959 // reset last Java frame
6960 // Only interpreter should have to clear fp
6961 reset_last_Java_frame(java_thread, true, false);
6963 #ifndef CC_INTERP
6964 // C++ interp handles this in the interpreter
6965 check_and_handle_popframe(java_thread);
6966 check_and_handle_earlyret(java_thread);
6967 #endif /* CC_INTERP */
6969 if (check_exceptions) {
6970 // check for pending exceptions (java_thread is set upon return)
6971 cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
6972 #ifndef _LP64
6973 jump_cc(Assembler::notEqual,
6974 RuntimeAddress(StubRoutines::forward_exception_entry()));
6975 #else
6976 // This used to conditionally jump to forward_exception however it is
6977 // possible if we relocate that the branch will not reach. So we must jump
6978 // around so we can always reach
6980 Label ok;
6981 jcc(Assembler::equal, ok);
6982 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
6983 bind(ok);
6984 #endif // LP64
6985 }
6987 // get oop result if there is one and reset the value in the thread
6988 if (oop_result->is_valid()) {
6989 get_vm_result(oop_result, java_thread);
6990 }
6991 }
6993 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
6995 // Calculate the value for last_Java_sp
6996 // somewhat subtle. call_VM does an intermediate call
6997 // which places a return address on the stack just under the
6998 // stack pointer as the user finsihed with it. This allows
6999 // use to retrieve last_Java_pc from last_Java_sp[-1].
7000 // On 32bit we then have to push additional args on the stack to accomplish
7001 // the actual requested call. On 64bit call_VM only can use register args
7002 // so the only extra space is the return address that call_VM created.
7003 // This hopefully explains the calculations here.
7005 #ifdef _LP64
7006 // We've pushed one address, correct last_Java_sp
7007 lea(rax, Address(rsp, wordSize));
7008 #else
7009 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
7010 #endif // LP64
7012 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
7014 }
7016 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
7017 call_VM_leaf_base(entry_point, number_of_arguments);
7018 }
7020 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
7021 pass_arg0(this, arg_0);
7022 call_VM_leaf(entry_point, 1);
7023 }
7025 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
7027 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
7028 pass_arg1(this, arg_1);
7029 pass_arg0(this, arg_0);
7030 call_VM_leaf(entry_point, 2);
7031 }
7033 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
7034 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
7035 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
7036 pass_arg2(this, arg_2);
7037 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
7038 pass_arg1(this, arg_1);
7039 pass_arg0(this, arg_0);
7040 call_VM_leaf(entry_point, 3);
7041 }
7043 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0) {
7044 pass_arg0(this, arg_0);
7045 MacroAssembler::call_VM_leaf_base(entry_point, 1);
7046 }
7048 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
7050 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
7051 pass_arg1(this, arg_1);
7052 pass_arg0(this, arg_0);
7053 MacroAssembler::call_VM_leaf_base(entry_point, 2);
7054 }
7056 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
7057 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
7058 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
7059 pass_arg2(this, arg_2);
7060 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
7061 pass_arg1(this, arg_1);
7062 pass_arg0(this, arg_0);
7063 MacroAssembler::call_VM_leaf_base(entry_point, 3);
7064 }
7066 void MacroAssembler::super_call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2, Register arg_3) {
7067 LP64_ONLY(assert(arg_0 != c_rarg3, "smashed arg"));
7068 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
7069 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
7070 pass_arg3(this, arg_3);
7071 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
7072 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
7073 pass_arg2(this, arg_2);
7074 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
7075 pass_arg1(this, arg_1);
7076 pass_arg0(this, arg_0);
7077 MacroAssembler::call_VM_leaf_base(entry_point, 4);
7078 }
7080 void MacroAssembler::get_vm_result(Register oop_result, Register java_thread) {
7081 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
7082 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
7083 verify_oop(oop_result, "broken oop in call_VM_base");
7084 }
7086 void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thread) {
7087 movptr(metadata_result, Address(java_thread, JavaThread::vm_result_2_offset()));
7088 movptr(Address(java_thread, JavaThread::vm_result_2_offset()), NULL_WORD);
7089 }
7091 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
7092 }
7094 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
7095 }
7097 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
7098 if (reachable(src1)) {
7099 cmpl(as_Address(src1), imm);
7100 } else {
7101 lea(rscratch1, src1);
7102 cmpl(Address(rscratch1, 0), imm);
7103 }
7104 }
7106 void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
7107 assert(!src2.is_lval(), "use cmpptr");
7108 if (reachable(src2)) {
7109 cmpl(src1, as_Address(src2));
7110 } else {
7111 lea(rscratch1, src2);
7112 cmpl(src1, Address(rscratch1, 0));
7113 }
7114 }
7116 void MacroAssembler::cmp32(Register src1, int32_t imm) {
7117 Assembler::cmpl(src1, imm);
7118 }
7120 void MacroAssembler::cmp32(Register src1, Address src2) {
7121 Assembler::cmpl(src1, src2);
7122 }
7124 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
7125 ucomisd(opr1, opr2);
7127 Label L;
7128 if (unordered_is_less) {
7129 movl(dst, -1);
7130 jcc(Assembler::parity, L);
7131 jcc(Assembler::below , L);
7132 movl(dst, 0);
7133 jcc(Assembler::equal , L);
7134 increment(dst);
7135 } else { // unordered is greater
7136 movl(dst, 1);
7137 jcc(Assembler::parity, L);
7138 jcc(Assembler::above , L);
7139 movl(dst, 0);
7140 jcc(Assembler::equal , L);
7141 decrementl(dst);
7142 }
7143 bind(L);
7144 }
7146 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
7147 ucomiss(opr1, opr2);
7149 Label L;
7150 if (unordered_is_less) {
7151 movl(dst, -1);
7152 jcc(Assembler::parity, L);
7153 jcc(Assembler::below , L);
7154 movl(dst, 0);
7155 jcc(Assembler::equal , L);
7156 increment(dst);
7157 } else { // unordered is greater
7158 movl(dst, 1);
7159 jcc(Assembler::parity, L);
7160 jcc(Assembler::above , L);
7161 movl(dst, 0);
7162 jcc(Assembler::equal , L);
7163 decrementl(dst);
7164 }
7165 bind(L);
7166 }
7169 void MacroAssembler::cmp8(AddressLiteral src1, int imm) {
7170 if (reachable(src1)) {
7171 cmpb(as_Address(src1), imm);
7172 } else {
7173 lea(rscratch1, src1);
7174 cmpb(Address(rscratch1, 0), imm);
7175 }
7176 }
7178 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
7179 #ifdef _LP64
7180 if (src2.is_lval()) {
7181 movptr(rscratch1, src2);
7182 Assembler::cmpq(src1, rscratch1);
7183 } else if (reachable(src2)) {
7184 cmpq(src1, as_Address(src2));
7185 } else {
7186 lea(rscratch1, src2);
7187 Assembler::cmpq(src1, Address(rscratch1, 0));
7188 }
7189 #else
7190 if (src2.is_lval()) {
7191 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
7192 } else {
7193 cmpl(src1, as_Address(src2));
7194 }
7195 #endif // _LP64
7196 }
7198 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
7199 assert(src2.is_lval(), "not a mem-mem compare");
7200 #ifdef _LP64
7201 // moves src2's literal address
7202 movptr(rscratch1, src2);
7203 Assembler::cmpq(src1, rscratch1);
7204 #else
7205 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
7206 #endif // _LP64
7207 }
7209 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
7210 if (reachable(adr)) {
7211 if (os::is_MP())
7212 lock();
7213 cmpxchgptr(reg, as_Address(adr));
7214 } else {
7215 lea(rscratch1, adr);
7216 if (os::is_MP())
7217 lock();
7218 cmpxchgptr(reg, Address(rscratch1, 0));
7219 }
7220 }
7222 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
7223 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
7224 }
7226 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
7227 if (reachable(src)) {
7228 Assembler::comisd(dst, as_Address(src));
7229 } else {
7230 lea(rscratch1, src);
7231 Assembler::comisd(dst, Address(rscratch1, 0));
7232 }
7233 }
7235 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
7236 if (reachable(src)) {
7237 Assembler::comiss(dst, as_Address(src));
7238 } else {
7239 lea(rscratch1, src);
7240 Assembler::comiss(dst, Address(rscratch1, 0));
7241 }
7242 }
7245 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
7246 Condition negated_cond = negate_condition(cond);
7247 Label L;
7248 jcc(negated_cond, L);
7249 atomic_incl(counter_addr);
7250 bind(L);
7251 }
7253 int MacroAssembler::corrected_idivl(Register reg) {
7254 // Full implementation of Java idiv and irem; checks for
7255 // special case as described in JVM spec., p.243 & p.271.
7256 // The function returns the (pc) offset of the idivl
7257 // instruction - may be needed for implicit exceptions.
7258 //
7259 // normal case special case
7260 //
7261 // input : rax,: dividend min_int
7262 // reg: divisor (may not be rax,/rdx) -1
7263 //
7264 // output: rax,: quotient (= rax, idiv reg) min_int
7265 // rdx: remainder (= rax, irem reg) 0
7266 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
7267 const int min_int = 0x80000000;
7268 Label normal_case, special_case;
7270 // check for special case
7271 cmpl(rax, min_int);
7272 jcc(Assembler::notEqual, normal_case);
7273 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
7274 cmpl(reg, -1);
7275 jcc(Assembler::equal, special_case);
7277 // handle normal case
7278 bind(normal_case);
7279 cdql();
7280 int idivl_offset = offset();
7281 idivl(reg);
7283 // normal and special case exit
7284 bind(special_case);
7286 return idivl_offset;
7287 }
7291 void MacroAssembler::decrementl(Register reg, int value) {
7292 if (value == min_jint) {subl(reg, value) ; return; }
7293 if (value < 0) { incrementl(reg, -value); return; }
7294 if (value == 0) { ; return; }
7295 if (value == 1 && UseIncDec) { decl(reg) ; return; }
7296 /* else */ { subl(reg, value) ; return; }
7297 }
7299 void MacroAssembler::decrementl(Address dst, int value) {
7300 if (value == min_jint) {subl(dst, value) ; return; }
7301 if (value < 0) { incrementl(dst, -value); return; }
7302 if (value == 0) { ; return; }
7303 if (value == 1 && UseIncDec) { decl(dst) ; return; }
7304 /* else */ { subl(dst, value) ; return; }
7305 }
7307 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
7308 assert (shift_value > 0, "illegal shift value");
7309 Label _is_positive;
7310 testl (reg, reg);
7311 jcc (Assembler::positive, _is_positive);
7312 int offset = (1 << shift_value) - 1 ;
7314 if (offset == 1) {
7315 incrementl(reg);
7316 } else {
7317 addl(reg, offset);
7318 }
7320 bind (_is_positive);
7321 sarl(reg, shift_value);
7322 }
7324 void MacroAssembler::divsd(XMMRegister dst, AddressLiteral src) {
7325 if (reachable(src)) {
7326 Assembler::divsd(dst, as_Address(src));
7327 } else {
7328 lea(rscratch1, src);
7329 Assembler::divsd(dst, Address(rscratch1, 0));
7330 }
7331 }
7333 void MacroAssembler::divss(XMMRegister dst, AddressLiteral src) {
7334 if (reachable(src)) {
7335 Assembler::divss(dst, as_Address(src));
7336 } else {
7337 lea(rscratch1, src);
7338 Assembler::divss(dst, Address(rscratch1, 0));
7339 }
7340 }
7342 // !defined(COMPILER2) is because of stupid core builds
7343 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
7344 void MacroAssembler::empty_FPU_stack() {
7345 if (VM_Version::supports_mmx()) {
7346 emms();
7347 } else {
7348 for (int i = 8; i-- > 0; ) ffree(i);
7349 }
7350 }
7351 #endif // !LP64 || C1 || !C2
7354 // Defines obj, preserves var_size_in_bytes
7355 void MacroAssembler::eden_allocate(Register obj,
7356 Register var_size_in_bytes,
7357 int con_size_in_bytes,
7358 Register t1,
7359 Label& slow_case) {
7360 assert(obj == rax, "obj must be in rax, for cmpxchg");
7361 assert_different_registers(obj, var_size_in_bytes, t1);
7362 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
7363 jmp(slow_case);
7364 } else {
7365 Register end = t1;
7366 Label retry;
7367 bind(retry);
7368 ExternalAddress heap_top((address) Universe::heap()->top_addr());
7369 movptr(obj, heap_top);
7370 if (var_size_in_bytes == noreg) {
7371 lea(end, Address(obj, con_size_in_bytes));
7372 } else {
7373 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
7374 }
7375 // if end < obj then we wrapped around => object too long => slow case
7376 cmpptr(end, obj);
7377 jcc(Assembler::below, slow_case);
7378 cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
7379 jcc(Assembler::above, slow_case);
7380 // Compare obj with the top addr, and if still equal, store the new top addr in
7381 // end at the address of the top addr pointer. Sets ZF if was equal, and clears
7382 // it otherwise. Use lock prefix for atomicity on MPs.
7383 locked_cmpxchgptr(end, heap_top);
7384 jcc(Assembler::notEqual, retry);
7385 }
7386 }
7388 void MacroAssembler::enter() {
7389 push(rbp);
7390 mov(rbp, rsp);
7391 }
7393 // A 5 byte nop that is safe for patching (see patch_verified_entry)
7394 void MacroAssembler::fat_nop() {
7395 if (UseAddressNop) {
7396 addr_nop_5();
7397 } else {
7398 emit_byte(0x26); // es:
7399 emit_byte(0x2e); // cs:
7400 emit_byte(0x64); // fs:
7401 emit_byte(0x65); // gs:
7402 emit_byte(0x90);
7403 }
7404 }
7406 void MacroAssembler::fcmp(Register tmp) {
7407 fcmp(tmp, 1, true, true);
7408 }
7410 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
7411 assert(!pop_right || pop_left, "usage error");
7412 if (VM_Version::supports_cmov()) {
7413 assert(tmp == noreg, "unneeded temp");
7414 if (pop_left) {
7415 fucomip(index);
7416 } else {
7417 fucomi(index);
7418 }
7419 if (pop_right) {
7420 fpop();
7421 }
7422 } else {
7423 assert(tmp != noreg, "need temp");
7424 if (pop_left) {
7425 if (pop_right) {
7426 fcompp();
7427 } else {
7428 fcomp(index);
7429 }
7430 } else {
7431 fcom(index);
7432 }
7433 // convert FPU condition into eflags condition via rax,
7434 save_rax(tmp);
7435 fwait(); fnstsw_ax();
7436 sahf();
7437 restore_rax(tmp);
7438 }
7439 // condition codes set as follows:
7440 //
7441 // CF (corresponds to C0) if x < y
7442 // PF (corresponds to C2) if unordered
7443 // ZF (corresponds to C3) if x = y
7444 }
7446 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
7447 fcmp2int(dst, unordered_is_less, 1, true, true);
7448 }
7450 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
7451 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
7452 Label L;
7453 if (unordered_is_less) {
7454 movl(dst, -1);
7455 jcc(Assembler::parity, L);
7456 jcc(Assembler::below , L);
7457 movl(dst, 0);
7458 jcc(Assembler::equal , L);
7459 increment(dst);
7460 } else { // unordered is greater
7461 movl(dst, 1);
7462 jcc(Assembler::parity, L);
7463 jcc(Assembler::above , L);
7464 movl(dst, 0);
7465 jcc(Assembler::equal , L);
7466 decrementl(dst);
7467 }
7468 bind(L);
7469 }
7471 void MacroAssembler::fld_d(AddressLiteral src) {
7472 fld_d(as_Address(src));
7473 }
7475 void MacroAssembler::fld_s(AddressLiteral src) {
7476 fld_s(as_Address(src));
7477 }
7479 void MacroAssembler::fld_x(AddressLiteral src) {
7480 Assembler::fld_x(as_Address(src));
7481 }
7483 void MacroAssembler::fldcw(AddressLiteral src) {
7484 Assembler::fldcw(as_Address(src));
7485 }
7487 void MacroAssembler::pow_exp_core_encoding() {
7488 // kills rax, rcx, rdx
7489 subptr(rsp,sizeof(jdouble));
7490 // computes 2^X. Stack: X ...
7491 // f2xm1 computes 2^X-1 but only operates on -1<=X<=1. Get int(X) and
7492 // keep it on the thread's stack to compute 2^int(X) later
7493 // then compute 2^(X-int(X)) as (2^(X-int(X)-1+1)
7494 // final result is obtained with: 2^X = 2^int(X) * 2^(X-int(X))
7495 fld_s(0); // Stack: X X ...
7496 frndint(); // Stack: int(X) X ...
7497 fsuba(1); // Stack: int(X) X-int(X) ...
7498 fistp_s(Address(rsp,0)); // move int(X) as integer to thread's stack. Stack: X-int(X) ...
7499 f2xm1(); // Stack: 2^(X-int(X))-1 ...
7500 fld1(); // Stack: 1 2^(X-int(X))-1 ...
7501 faddp(1); // Stack: 2^(X-int(X))
7502 // computes 2^(int(X)): add exponent bias (1023) to int(X), then
7503 // shift int(X)+1023 to exponent position.
7504 // Exponent is limited to 11 bits if int(X)+1023 does not fit in 11
7505 // bits, set result to NaN. 0x000 and 0x7FF are reserved exponent
7506 // values so detect them and set result to NaN.
7507 movl(rax,Address(rsp,0));
7508 movl(rcx, -2048); // 11 bit mask and valid NaN binary encoding
7509 addl(rax, 1023);
7510 movl(rdx,rax);
7511 shll(rax,20);
7512 // Check that 0 < int(X)+1023 < 2047. Otherwise set rax to NaN.
7513 addl(rdx,1);
7514 // Check that 1 < int(X)+1023+1 < 2048
7515 // in 3 steps:
7516 // 1- (int(X)+1023+1)&-2048 == 0 => 0 <= int(X)+1023+1 < 2048
7517 // 2- (int(X)+1023+1)&-2048 != 0
7518 // 3- (int(X)+1023+1)&-2048 != 1
7519 // Do 2- first because addl just updated the flags.
7520 cmov32(Assembler::equal,rax,rcx);
7521 cmpl(rdx,1);
7522 cmov32(Assembler::equal,rax,rcx);
7523 testl(rdx,rcx);
7524 cmov32(Assembler::notEqual,rax,rcx);
7525 movl(Address(rsp,4),rax);
7526 movl(Address(rsp,0),0);
7527 fmul_d(Address(rsp,0)); // Stack: 2^X ...
7528 addptr(rsp,sizeof(jdouble));
7529 }
7531 void MacroAssembler::increase_precision() {
7532 subptr(rsp, BytesPerWord);
7533 fnstcw(Address(rsp, 0));
7534 movl(rax, Address(rsp, 0));
7535 orl(rax, 0x300);
7536 push(rax);
7537 fldcw(Address(rsp, 0));
7538 pop(rax);
7539 }
7541 void MacroAssembler::restore_precision() {
7542 fldcw(Address(rsp, 0));
7543 addptr(rsp, BytesPerWord);
7544 }
7546 void MacroAssembler::fast_pow() {
7547 // computes X^Y = 2^(Y * log2(X))
7548 // if fast computation is not possible, result is NaN. Requires
7549 // fallback from user of this macro.
7550 // increase precision for intermediate steps of the computation
7551 increase_precision();
7552 fyl2x(); // Stack: (Y*log2(X)) ...
7553 pow_exp_core_encoding(); // Stack: exp(X) ...
7554 restore_precision();
7555 }
7557 void MacroAssembler::fast_exp() {
7558 // computes exp(X) = 2^(X * log2(e))
7559 // if fast computation is not possible, result is NaN. Requires
7560 // fallback from user of this macro.
7561 // increase precision for intermediate steps of the computation
7562 increase_precision();
7563 fldl2e(); // Stack: log2(e) X ...
7564 fmulp(1); // Stack: (X*log2(e)) ...
7565 pow_exp_core_encoding(); // Stack: exp(X) ...
7566 restore_precision();
7567 }
7569 void MacroAssembler::pow_or_exp(bool is_exp, int num_fpu_regs_in_use) {
7570 // kills rax, rcx, rdx
7571 // pow and exp needs 2 extra registers on the fpu stack.
7572 Label slow_case, done;
7573 Register tmp = noreg;
7574 if (!VM_Version::supports_cmov()) {
7575 // fcmp needs a temporary so preserve rdx,
7576 tmp = rdx;
7577 }
7578 Register tmp2 = rax;
7579 Register tmp3 = rcx;
7581 if (is_exp) {
7582 // Stack: X
7583 fld_s(0); // duplicate argument for runtime call. Stack: X X
7584 fast_exp(); // Stack: exp(X) X
7585 fcmp(tmp, 0, false, false); // Stack: exp(X) X
7586 // exp(X) not equal to itself: exp(X) is NaN go to slow case.
7587 jcc(Assembler::parity, slow_case);
7588 // get rid of duplicate argument. Stack: exp(X)
7589 if (num_fpu_regs_in_use > 0) {
7590 fxch();
7591 fpop();
7592 } else {
7593 ffree(1);
7594 }
7595 jmp(done);
7596 } else {
7597 // Stack: X Y
7598 Label x_negative, y_odd;
7600 fldz(); // Stack: 0 X Y
7601 fcmp(tmp, 1, true, false); // Stack: X Y
7602 jcc(Assembler::above, x_negative);
7604 // X >= 0
7606 fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
7607 fld_s(1); // Stack: X Y X Y
7608 fast_pow(); // Stack: X^Y X Y
7609 fcmp(tmp, 0, false, false); // Stack: X^Y X Y
7610 // X^Y not equal to itself: X^Y is NaN go to slow case.
7611 jcc(Assembler::parity, slow_case);
7612 // get rid of duplicate arguments. Stack: X^Y
7613 if (num_fpu_regs_in_use > 0) {
7614 fxch(); fpop();
7615 fxch(); fpop();
7616 } else {
7617 ffree(2);
7618 ffree(1);
7619 }
7620 jmp(done);
7622 // X <= 0
7623 bind(x_negative);
7625 fld_s(1); // Stack: Y X Y
7626 frndint(); // Stack: int(Y) X Y
7627 fcmp(tmp, 2, false, false); // Stack: int(Y) X Y
7628 jcc(Assembler::notEqual, slow_case);
7630 subptr(rsp, 8);
7632 // For X^Y, when X < 0, Y has to be an integer and the final
7633 // result depends on whether it's odd or even. We just checked
7634 // that int(Y) == Y. We move int(Y) to gp registers as a 64 bit
7635 // integer to test its parity. If int(Y) is huge and doesn't fit
7636 // in the 64 bit integer range, the integer indefinite value will
7637 // end up in the gp registers. Huge numbers are all even, the
7638 // integer indefinite number is even so it's fine.
7640 #ifdef ASSERT
7641 // Let's check we don't end up with an integer indefinite number
7642 // when not expected. First test for huge numbers: check whether
7643 // int(Y)+1 == int(Y) which is true for very large numbers and
7644 // those are all even. A 64 bit integer is guaranteed to not
7645 // overflow for numbers where y+1 != y (when precision is set to
7646 // double precision).
7647 Label y_not_huge;
7649 fld1(); // Stack: 1 int(Y) X Y
7650 fadd(1); // Stack: 1+int(Y) int(Y) X Y
7652 #ifdef _LP64
7653 // trip to memory to force the precision down from double extended
7654 // precision
7655 fstp_d(Address(rsp, 0));
7656 fld_d(Address(rsp, 0));
7657 #endif
7659 fcmp(tmp, 1, true, false); // Stack: int(Y) X Y
7660 #endif
7662 // move int(Y) as 64 bit integer to thread's stack
7663 fistp_d(Address(rsp,0)); // Stack: X Y
7665 #ifdef ASSERT
7666 jcc(Assembler::notEqual, y_not_huge);
7668 // Y is huge so we know it's even. It may not fit in a 64 bit
7669 // integer and we don't want the debug code below to see the
7670 // integer indefinite value so overwrite int(Y) on the thread's
7671 // stack with 0.
7672 movl(Address(rsp, 0), 0);
7673 movl(Address(rsp, 4), 0);
7675 bind(y_not_huge);
7676 #endif
7678 fld_s(1); // duplicate arguments for runtime call. Stack: Y X Y
7679 fld_s(1); // Stack: X Y X Y
7680 fabs(); // Stack: abs(X) Y X Y
7681 fast_pow(); // Stack: abs(X)^Y X Y
7682 fcmp(tmp, 0, false, false); // Stack: abs(X)^Y X Y
7683 // abs(X)^Y not equal to itself: abs(X)^Y is NaN go to slow case.
7685 pop(tmp2);
7686 NOT_LP64(pop(tmp3));
7687 jcc(Assembler::parity, slow_case);
7689 #ifdef ASSERT
7690 // Check that int(Y) is not integer indefinite value (int
7691 // overflow). Shouldn't happen because for values that would
7692 // overflow, 1+int(Y)==Y which was tested earlier.
7693 #ifndef _LP64
7694 {
7695 Label integer;
7696 testl(tmp2, tmp2);
7697 jcc(Assembler::notZero, integer);
7698 cmpl(tmp3, 0x80000000);
7699 jcc(Assembler::notZero, integer);
7700 STOP("integer indefinite value shouldn't be seen here");
7701 bind(integer);
7702 }
7703 #else
7704 {
7705 Label integer;
7706 mov(tmp3, tmp2); // preserve tmp2 for parity check below
7707 shlq(tmp3, 1);
7708 jcc(Assembler::carryClear, integer);
7709 jcc(Assembler::notZero, integer);
7710 STOP("integer indefinite value shouldn't be seen here");
7711 bind(integer);
7712 }
7713 #endif
7714 #endif
7716 // get rid of duplicate arguments. Stack: X^Y
7717 if (num_fpu_regs_in_use > 0) {
7718 fxch(); fpop();
7719 fxch(); fpop();
7720 } else {
7721 ffree(2);
7722 ffree(1);
7723 }
7725 testl(tmp2, 1);
7726 jcc(Assembler::zero, done); // X <= 0, Y even: X^Y = abs(X)^Y
7727 // X <= 0, Y even: X^Y = -abs(X)^Y
7729 fchs(); // Stack: -abs(X)^Y Y
7730 jmp(done);
7731 }
7733 // slow case: runtime call
7734 bind(slow_case);
7736 fpop(); // pop incorrect result or int(Y)
7738 fp_runtime_fallback(is_exp ? CAST_FROM_FN_PTR(address, SharedRuntime::dexp) : CAST_FROM_FN_PTR(address, SharedRuntime::dpow),
7739 is_exp ? 1 : 2, num_fpu_regs_in_use);
7741 // Come here with result in F-TOS
7742 bind(done);
7743 }
7745 void MacroAssembler::fpop() {
7746 ffree();
7747 fincstp();
7748 }
7750 void MacroAssembler::fremr(Register tmp) {
7751 save_rax(tmp);
7752 { Label L;
7753 bind(L);
7754 fprem();
7755 fwait(); fnstsw_ax();
7756 #ifdef _LP64
7757 testl(rax, 0x400);
7758 jcc(Assembler::notEqual, L);
7759 #else
7760 sahf();
7761 jcc(Assembler::parity, L);
7762 #endif // _LP64
7763 }
7764 restore_rax(tmp);
7765 // Result is in ST0.
7766 // Note: fxch & fpop to get rid of ST1
7767 // (otherwise FPU stack could overflow eventually)
7768 fxch(1);
7769 fpop();
7770 }
7773 void MacroAssembler::incrementl(AddressLiteral dst) {
7774 if (reachable(dst)) {
7775 incrementl(as_Address(dst));
7776 } else {
7777 lea(rscratch1, dst);
7778 incrementl(Address(rscratch1, 0));
7779 }
7780 }
7782 void MacroAssembler::incrementl(ArrayAddress dst) {
7783 incrementl(as_Address(dst));
7784 }
7786 void MacroAssembler::incrementl(Register reg, int value) {
7787 if (value == min_jint) {addl(reg, value) ; return; }
7788 if (value < 0) { decrementl(reg, -value); return; }
7789 if (value == 0) { ; return; }
7790 if (value == 1 && UseIncDec) { incl(reg) ; return; }
7791 /* else */ { addl(reg, value) ; return; }
7792 }
7794 void MacroAssembler::incrementl(Address dst, int value) {
7795 if (value == min_jint) {addl(dst, value) ; return; }
7796 if (value < 0) { decrementl(dst, -value); return; }
7797 if (value == 0) { ; return; }
7798 if (value == 1 && UseIncDec) { incl(dst) ; return; }
7799 /* else */ { addl(dst, value) ; return; }
7800 }
7802 void MacroAssembler::jump(AddressLiteral dst) {
7803 if (reachable(dst)) {
7804 jmp_literal(dst.target(), dst.rspec());
7805 } else {
7806 lea(rscratch1, dst);
7807 jmp(rscratch1);
7808 }
7809 }
7811 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
7812 if (reachable(dst)) {
7813 InstructionMark im(this);
7814 relocate(dst.reloc());
7815 const int short_size = 2;
7816 const int long_size = 6;
7817 int offs = (intptr_t)dst.target() - ((intptr_t)_code_pos);
7818 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
7819 // 0111 tttn #8-bit disp
7820 emit_byte(0x70 | cc);
7821 emit_byte((offs - short_size) & 0xFF);
7822 } else {
7823 // 0000 1111 1000 tttn #32-bit disp
7824 emit_byte(0x0F);
7825 emit_byte(0x80 | cc);
7826 emit_long(offs - long_size);
7827 }
7828 } else {
7829 #ifdef ASSERT
7830 warning("reversing conditional branch");
7831 #endif /* ASSERT */
7832 Label skip;
7833 jccb(reverse[cc], skip);
7834 lea(rscratch1, dst);
7835 Assembler::jmp(rscratch1);
7836 bind(skip);
7837 }
7838 }
7840 void MacroAssembler::ldmxcsr(AddressLiteral src) {
7841 if (reachable(src)) {
7842 Assembler::ldmxcsr(as_Address(src));
7843 } else {
7844 lea(rscratch1, src);
7845 Assembler::ldmxcsr(Address(rscratch1, 0));
7846 }
7847 }
7849 int MacroAssembler::load_signed_byte(Register dst, Address src) {
7850 int off;
7851 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
7852 off = offset();
7853 movsbl(dst, src); // movsxb
7854 } else {
7855 off = load_unsigned_byte(dst, src);
7856 shll(dst, 24);
7857 sarl(dst, 24);
7858 }
7859 return off;
7860 }
7862 // Note: load_signed_short used to be called load_signed_word.
7863 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
7864 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
7865 // The term "word" in HotSpot means a 32- or 64-bit machine word.
7866 int MacroAssembler::load_signed_short(Register dst, Address src) {
7867 int off;
7868 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
7869 // This is dubious to me since it seems safe to do a signed 16 => 64 bit
7870 // version but this is what 64bit has always done. This seems to imply
7871 // that users are only using 32bits worth.
7872 off = offset();
7873 movswl(dst, src); // movsxw
7874 } else {
7875 off = load_unsigned_short(dst, src);
7876 shll(dst, 16);
7877 sarl(dst, 16);
7878 }
7879 return off;
7880 }
7882 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
7883 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
7884 // and "3.9 Partial Register Penalties", p. 22).
7885 int off;
7886 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
7887 off = offset();
7888 movzbl(dst, src); // movzxb
7889 } else {
7890 xorl(dst, dst);
7891 off = offset();
7892 movb(dst, src);
7893 }
7894 return off;
7895 }
7897 // Note: load_unsigned_short used to be called load_unsigned_word.
7898 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
7899 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
7900 // and "3.9 Partial Register Penalties", p. 22).
7901 int off;
7902 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
7903 off = offset();
7904 movzwl(dst, src); // movzxw
7905 } else {
7906 xorl(dst, dst);
7907 off = offset();
7908 movw(dst, src);
7909 }
7910 return off;
7911 }
7913 void MacroAssembler::load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, Register dst2) {
7914 switch (size_in_bytes) {
7915 #ifndef _LP64
7916 case 8:
7917 assert(dst2 != noreg, "second dest register required");
7918 movl(dst, src);
7919 movl(dst2, src.plus_disp(BytesPerInt));
7920 break;
7921 #else
7922 case 8: movq(dst, src); break;
7923 #endif
7924 case 4: movl(dst, src); break;
7925 case 2: is_signed ? load_signed_short(dst, src) : load_unsigned_short(dst, src); break;
7926 case 1: is_signed ? load_signed_byte( dst, src) : load_unsigned_byte( dst, src); break;
7927 default: ShouldNotReachHere();
7928 }
7929 }
7931 void MacroAssembler::store_sized_value(Address dst, Register src, size_t size_in_bytes, Register src2) {
7932 switch (size_in_bytes) {
7933 #ifndef _LP64
7934 case 8:
7935 assert(src2 != noreg, "second source register required");
7936 movl(dst, src);
7937 movl(dst.plus_disp(BytesPerInt), src2);
7938 break;
7939 #else
7940 case 8: movq(dst, src); break;
7941 #endif
7942 case 4: movl(dst, src); break;
7943 case 2: movw(dst, src); break;
7944 case 1: movb(dst, src); break;
7945 default: ShouldNotReachHere();
7946 }
7947 }
7949 void MacroAssembler::mov32(AddressLiteral dst, Register src) {
7950 if (reachable(dst)) {
7951 movl(as_Address(dst), src);
7952 } else {
7953 lea(rscratch1, dst);
7954 movl(Address(rscratch1, 0), src);
7955 }
7956 }
7958 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
7959 if (reachable(src)) {
7960 movl(dst, as_Address(src));
7961 } else {
7962 lea(rscratch1, src);
7963 movl(dst, Address(rscratch1, 0));
7964 }
7965 }
7967 // C++ bool manipulation
7969 void MacroAssembler::movbool(Register dst, Address src) {
7970 if(sizeof(bool) == 1)
7971 movb(dst, src);
7972 else if(sizeof(bool) == 2)
7973 movw(dst, src);
7974 else if(sizeof(bool) == 4)
7975 movl(dst, src);
7976 else
7977 // unsupported
7978 ShouldNotReachHere();
7979 }
7981 void MacroAssembler::movbool(Address dst, bool boolconst) {
7982 if(sizeof(bool) == 1)
7983 movb(dst, (int) boolconst);
7984 else if(sizeof(bool) == 2)
7985 movw(dst, (int) boolconst);
7986 else if(sizeof(bool) == 4)
7987 movl(dst, (int) boolconst);
7988 else
7989 // unsupported
7990 ShouldNotReachHere();
7991 }
7993 void MacroAssembler::movbool(Address dst, Register src) {
7994 if(sizeof(bool) == 1)
7995 movb(dst, src);
7996 else if(sizeof(bool) == 2)
7997 movw(dst, src);
7998 else if(sizeof(bool) == 4)
7999 movl(dst, src);
8000 else
8001 // unsupported
8002 ShouldNotReachHere();
8003 }
8005 void MacroAssembler::movbyte(ArrayAddress dst, int src) {
8006 movb(as_Address(dst), src);
8007 }
8009 void MacroAssembler::movdl(XMMRegister dst, AddressLiteral src) {
8010 if (reachable(src)) {
8011 movdl(dst, as_Address(src));
8012 } else {
8013 lea(rscratch1, src);
8014 movdl(dst, Address(rscratch1, 0));
8015 }
8016 }
8018 void MacroAssembler::movq(XMMRegister dst, AddressLiteral src) {
8019 if (reachable(src)) {
8020 movq(dst, as_Address(src));
8021 } else {
8022 lea(rscratch1, src);
8023 movq(dst, Address(rscratch1, 0));
8024 }
8025 }
8027 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
8028 if (reachable(src)) {
8029 if (UseXmmLoadAndClearUpper) {
8030 movsd (dst, as_Address(src));
8031 } else {
8032 movlpd(dst, as_Address(src));
8033 }
8034 } else {
8035 lea(rscratch1, src);
8036 if (UseXmmLoadAndClearUpper) {
8037 movsd (dst, Address(rscratch1, 0));
8038 } else {
8039 movlpd(dst, Address(rscratch1, 0));
8040 }
8041 }
8042 }
8044 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
8045 if (reachable(src)) {
8046 movss(dst, as_Address(src));
8047 } else {
8048 lea(rscratch1, src);
8049 movss(dst, Address(rscratch1, 0));
8050 }
8051 }
8053 void MacroAssembler::movptr(Register dst, Register src) {
8054 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
8055 }
8057 void MacroAssembler::movptr(Register dst, Address src) {
8058 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
8059 }
8061 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
8062 void MacroAssembler::movptr(Register dst, intptr_t src) {
8063 LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
8064 }
8066 void MacroAssembler::movptr(Address dst, Register src) {
8067 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
8068 }
8070 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
8071 if (reachable(src)) {
8072 Assembler::movsd(dst, as_Address(src));
8073 } else {
8074 lea(rscratch1, src);
8075 Assembler::movsd(dst, Address(rscratch1, 0));
8076 }
8077 }
8079 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
8080 if (reachable(src)) {
8081 Assembler::movss(dst, as_Address(src));
8082 } else {
8083 lea(rscratch1, src);
8084 Assembler::movss(dst, Address(rscratch1, 0));
8085 }
8086 }
8088 void MacroAssembler::mulsd(XMMRegister dst, AddressLiteral src) {
8089 if (reachable(src)) {
8090 Assembler::mulsd(dst, as_Address(src));
8091 } else {
8092 lea(rscratch1, src);
8093 Assembler::mulsd(dst, Address(rscratch1, 0));
8094 }
8095 }
8097 void MacroAssembler::mulss(XMMRegister dst, AddressLiteral src) {
8098 if (reachable(src)) {
8099 Assembler::mulss(dst, as_Address(src));
8100 } else {
8101 lea(rscratch1, src);
8102 Assembler::mulss(dst, Address(rscratch1, 0));
8103 }
8104 }
8106 void MacroAssembler::null_check(Register reg, int offset) {
8107 if (needs_explicit_null_check(offset)) {
8108 // provoke OS NULL exception if reg = NULL by
8109 // accessing M[reg] w/o changing any (non-CC) registers
8110 // NOTE: cmpl is plenty here to provoke a segv
8111 cmpptr(rax, Address(reg, 0));
8112 // Note: should probably use testl(rax, Address(reg, 0));
8113 // may be shorter code (however, this version of
8114 // testl needs to be implemented first)
8115 } else {
8116 // nothing to do, (later) access of M[reg + offset]
8117 // will provoke OS NULL exception if reg = NULL
8118 }
8119 }
8121 void MacroAssembler::os_breakpoint() {
8122 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
8123 // (e.g., MSVC can't call ps() otherwise)
8124 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
8125 }
8127 void MacroAssembler::pop_CPU_state() {
8128 pop_FPU_state();
8129 pop_IU_state();
8130 }
8132 void MacroAssembler::pop_FPU_state() {
8133 NOT_LP64(frstor(Address(rsp, 0));)
8134 LP64_ONLY(fxrstor(Address(rsp, 0));)
8135 addptr(rsp, FPUStateSizeInWords * wordSize);
8136 }
8138 void MacroAssembler::pop_IU_state() {
8139 popa();
8140 LP64_ONLY(addq(rsp, 8));
8141 popf();
8142 }
8144 // Save Integer and Float state
8145 // Warning: Stack must be 16 byte aligned (64bit)
8146 void MacroAssembler::push_CPU_state() {
8147 push_IU_state();
8148 push_FPU_state();
8149 }
8151 void MacroAssembler::push_FPU_state() {
8152 subptr(rsp, FPUStateSizeInWords * wordSize);
8153 #ifndef _LP64
8154 fnsave(Address(rsp, 0));
8155 fwait();
8156 #else
8157 fxsave(Address(rsp, 0));
8158 #endif // LP64
8159 }
8161 void MacroAssembler::push_IU_state() {
8162 // Push flags first because pusha kills them
8163 pushf();
8164 // Make sure rsp stays 16-byte aligned
8165 LP64_ONLY(subq(rsp, 8));
8166 pusha();
8167 }
8169 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
8170 // determine java_thread register
8171 if (!java_thread->is_valid()) {
8172 java_thread = rdi;
8173 get_thread(java_thread);
8174 }
8175 // we must set sp to zero to clear frame
8176 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
8177 if (clear_fp) {
8178 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
8179 }
8181 if (clear_pc)
8182 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
8184 }
8186 void MacroAssembler::restore_rax(Register tmp) {
8187 if (tmp == noreg) pop(rax);
8188 else if (tmp != rax) mov(rax, tmp);
8189 }
8191 void MacroAssembler::round_to(Register reg, int modulus) {
8192 addptr(reg, modulus - 1);
8193 andptr(reg, -modulus);
8194 }
8196 void MacroAssembler::save_rax(Register tmp) {
8197 if (tmp == noreg) push(rax);
8198 else if (tmp != rax) mov(tmp, rax);
8199 }
8201 // Write serialization page so VM thread can do a pseudo remote membar.
8202 // We use the current thread pointer to calculate a thread specific
8203 // offset to write to within the page. This minimizes bus traffic
8204 // due to cache line collision.
8205 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
8206 movl(tmp, thread);
8207 shrl(tmp, os::get_serialize_page_shift_count());
8208 andl(tmp, (os::vm_page_size() - sizeof(int)));
8210 Address index(noreg, tmp, Address::times_1);
8211 ExternalAddress page(os::get_memory_serialize_page());
8213 // Size of store must match masking code above
8214 movl(as_Address(ArrayAddress(page, index)), tmp);
8215 }
8217 // Calls to C land
8218 //
8219 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
8220 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
8221 // has to be reset to 0. This is required to allow proper stack traversal.
8222 void MacroAssembler::set_last_Java_frame(Register java_thread,
8223 Register last_java_sp,
8224 Register last_java_fp,
8225 address last_java_pc) {
8226 // determine java_thread register
8227 if (!java_thread->is_valid()) {
8228 java_thread = rdi;
8229 get_thread(java_thread);
8230 }
8231 // determine last_java_sp register
8232 if (!last_java_sp->is_valid()) {
8233 last_java_sp = rsp;
8234 }
8236 // last_java_fp is optional
8238 if (last_java_fp->is_valid()) {
8239 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
8240 }
8242 // last_java_pc is optional
8244 if (last_java_pc != NULL) {
8245 lea(Address(java_thread,
8246 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
8247 InternalAddress(last_java_pc));
8249 }
8250 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
8251 }
8253 void MacroAssembler::shlptr(Register dst, int imm8) {
8254 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
8255 }
8257 void MacroAssembler::shrptr(Register dst, int imm8) {
8258 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
8259 }
8261 void MacroAssembler::sign_extend_byte(Register reg) {
8262 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
8263 movsbl(reg, reg); // movsxb
8264 } else {
8265 shll(reg, 24);
8266 sarl(reg, 24);
8267 }
8268 }
8270 void MacroAssembler::sign_extend_short(Register reg) {
8271 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
8272 movswl(reg, reg); // movsxw
8273 } else {
8274 shll(reg, 16);
8275 sarl(reg, 16);
8276 }
8277 }
8279 void MacroAssembler::testl(Register dst, AddressLiteral src) {
8280 assert(reachable(src), "Address should be reachable");
8281 testl(dst, as_Address(src));
8282 }
8284 void MacroAssembler::sqrtsd(XMMRegister dst, AddressLiteral src) {
8285 if (reachable(src)) {
8286 Assembler::sqrtsd(dst, as_Address(src));
8287 } else {
8288 lea(rscratch1, src);
8289 Assembler::sqrtsd(dst, Address(rscratch1, 0));
8290 }
8291 }
8293 void MacroAssembler::sqrtss(XMMRegister dst, AddressLiteral src) {
8294 if (reachable(src)) {
8295 Assembler::sqrtss(dst, as_Address(src));
8296 } else {
8297 lea(rscratch1, src);
8298 Assembler::sqrtss(dst, Address(rscratch1, 0));
8299 }
8300 }
8302 void MacroAssembler::subsd(XMMRegister dst, AddressLiteral src) {
8303 if (reachable(src)) {
8304 Assembler::subsd(dst, as_Address(src));
8305 } else {
8306 lea(rscratch1, src);
8307 Assembler::subsd(dst, Address(rscratch1, 0));
8308 }
8309 }
8311 void MacroAssembler::subss(XMMRegister dst, AddressLiteral src) {
8312 if (reachable(src)) {
8313 Assembler::subss(dst, as_Address(src));
8314 } else {
8315 lea(rscratch1, src);
8316 Assembler::subss(dst, Address(rscratch1, 0));
8317 }
8318 }
8320 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
8321 if (reachable(src)) {
8322 Assembler::ucomisd(dst, as_Address(src));
8323 } else {
8324 lea(rscratch1, src);
8325 Assembler::ucomisd(dst, Address(rscratch1, 0));
8326 }
8327 }
8329 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
8330 if (reachable(src)) {
8331 Assembler::ucomiss(dst, as_Address(src));
8332 } else {
8333 lea(rscratch1, src);
8334 Assembler::ucomiss(dst, Address(rscratch1, 0));
8335 }
8336 }
8338 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
8339 // Used in sign-bit flipping with aligned address.
8340 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
8341 if (reachable(src)) {
8342 Assembler::xorpd(dst, as_Address(src));
8343 } else {
8344 lea(rscratch1, src);
8345 Assembler::xorpd(dst, Address(rscratch1, 0));
8346 }
8347 }
8349 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
8350 // Used in sign-bit flipping with aligned address.
8351 assert((UseAVX > 0) || (((intptr_t)src.target() & 15) == 0), "SSE mode requires address alignment 16 bytes");
8352 if (reachable(src)) {
8353 Assembler::xorps(dst, as_Address(src));
8354 } else {
8355 lea(rscratch1, src);
8356 Assembler::xorps(dst, Address(rscratch1, 0));
8357 }
8358 }
8360 // AVX 3-operands instructions
8362 void MacroAssembler::vaddsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8363 if (reachable(src)) {
8364 vaddsd(dst, nds, as_Address(src));
8365 } else {
8366 lea(rscratch1, src);
8367 vaddsd(dst, nds, Address(rscratch1, 0));
8368 }
8369 }
8371 void MacroAssembler::vaddss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8372 if (reachable(src)) {
8373 vaddss(dst, nds, as_Address(src));
8374 } else {
8375 lea(rscratch1, src);
8376 vaddss(dst, nds, Address(rscratch1, 0));
8377 }
8378 }
8380 void MacroAssembler::vandpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
8381 if (reachable(src)) {
8382 vandpd(dst, nds, as_Address(src), vector256);
8383 } else {
8384 lea(rscratch1, src);
8385 vandpd(dst, nds, Address(rscratch1, 0), vector256);
8386 }
8387 }
8389 void MacroAssembler::vandps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
8390 if (reachable(src)) {
8391 vandps(dst, nds, as_Address(src), vector256);
8392 } else {
8393 lea(rscratch1, src);
8394 vandps(dst, nds, Address(rscratch1, 0), vector256);
8395 }
8396 }
8398 void MacroAssembler::vdivsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8399 if (reachable(src)) {
8400 vdivsd(dst, nds, as_Address(src));
8401 } else {
8402 lea(rscratch1, src);
8403 vdivsd(dst, nds, Address(rscratch1, 0));
8404 }
8405 }
8407 void MacroAssembler::vdivss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8408 if (reachable(src)) {
8409 vdivss(dst, nds, as_Address(src));
8410 } else {
8411 lea(rscratch1, src);
8412 vdivss(dst, nds, Address(rscratch1, 0));
8413 }
8414 }
8416 void MacroAssembler::vmulsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8417 if (reachable(src)) {
8418 vmulsd(dst, nds, as_Address(src));
8419 } else {
8420 lea(rscratch1, src);
8421 vmulsd(dst, nds, Address(rscratch1, 0));
8422 }
8423 }
8425 void MacroAssembler::vmulss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8426 if (reachable(src)) {
8427 vmulss(dst, nds, as_Address(src));
8428 } else {
8429 lea(rscratch1, src);
8430 vmulss(dst, nds, Address(rscratch1, 0));
8431 }
8432 }
8434 void MacroAssembler::vsubsd(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8435 if (reachable(src)) {
8436 vsubsd(dst, nds, as_Address(src));
8437 } else {
8438 lea(rscratch1, src);
8439 vsubsd(dst, nds, Address(rscratch1, 0));
8440 }
8441 }
8443 void MacroAssembler::vsubss(XMMRegister dst, XMMRegister nds, AddressLiteral src) {
8444 if (reachable(src)) {
8445 vsubss(dst, nds, as_Address(src));
8446 } else {
8447 lea(rscratch1, src);
8448 vsubss(dst, nds, Address(rscratch1, 0));
8449 }
8450 }
8452 void MacroAssembler::vxorpd(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
8453 if (reachable(src)) {
8454 vxorpd(dst, nds, as_Address(src), vector256);
8455 } else {
8456 lea(rscratch1, src);
8457 vxorpd(dst, nds, Address(rscratch1, 0), vector256);
8458 }
8459 }
8461 void MacroAssembler::vxorps(XMMRegister dst, XMMRegister nds, AddressLiteral src, bool vector256) {
8462 if (reachable(src)) {
8463 vxorps(dst, nds, as_Address(src), vector256);
8464 } else {
8465 lea(rscratch1, src);
8466 vxorps(dst, nds, Address(rscratch1, 0), vector256);
8467 }
8468 }
8471 //////////////////////////////////////////////////////////////////////////////////
8472 #ifndef SERIALGC
8474 void MacroAssembler::g1_write_barrier_pre(Register obj,
8475 Register pre_val,
8476 Register thread,
8477 Register tmp,
8478 bool tosca_live,
8479 bool expand_call) {
8481 // If expand_call is true then we expand the call_VM_leaf macro
8482 // directly to skip generating the check by
8483 // InterpreterMacroAssembler::call_VM_leaf_base that checks _last_sp.
8485 #ifdef _LP64
8486 assert(thread == r15_thread, "must be");
8487 #endif // _LP64
8489 Label done;
8490 Label runtime;
8492 assert(pre_val != noreg, "check this code");
8494 if (obj != noreg) {
8495 assert_different_registers(obj, pre_val, tmp);
8496 assert(pre_val != rax, "check this code");
8497 }
8499 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
8500 PtrQueue::byte_offset_of_active()));
8501 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
8502 PtrQueue::byte_offset_of_index()));
8503 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
8504 PtrQueue::byte_offset_of_buf()));
8507 // Is marking active?
8508 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
8509 cmpl(in_progress, 0);
8510 } else {
8511 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
8512 cmpb(in_progress, 0);
8513 }
8514 jcc(Assembler::equal, done);
8516 // Do we need to load the previous value?
8517 if (obj != noreg) {
8518 load_heap_oop(pre_val, Address(obj, 0));
8519 }
8521 // Is the previous value null?
8522 cmpptr(pre_val, (int32_t) NULL_WORD);
8523 jcc(Assembler::equal, done);
8525 // Can we store original value in the thread's buffer?
8526 // Is index == 0?
8527 // (The index field is typed as size_t.)
8529 movptr(tmp, index); // tmp := *index_adr
8530 cmpptr(tmp, 0); // tmp == 0?
8531 jcc(Assembler::equal, runtime); // If yes, goto runtime
8533 subptr(tmp, wordSize); // tmp := tmp - wordSize
8534 movptr(index, tmp); // *index_adr := tmp
8535 addptr(tmp, buffer); // tmp := tmp + *buffer_adr
8537 // Record the previous value
8538 movptr(Address(tmp, 0), pre_val);
8539 jmp(done);
8541 bind(runtime);
8542 // save the live input values
8543 if(tosca_live) push(rax);
8545 if (obj != noreg && obj != rax)
8546 push(obj);
8548 if (pre_val != rax)
8549 push(pre_val);
8551 // Calling the runtime using the regular call_VM_leaf mechanism generates
8552 // code (generated by InterpreterMacroAssember::call_VM_leaf_base)
8553 // that checks that the *(ebp+frame::interpreter_frame_last_sp) == NULL.
8554 //
8555 // If we care generating the pre-barrier without a frame (e.g. in the
8556 // intrinsified Reference.get() routine) then ebp might be pointing to
8557 // the caller frame and so this check will most likely fail at runtime.
8558 //
8559 // Expanding the call directly bypasses the generation of the check.
8560 // So when we do not have have a full interpreter frame on the stack
8561 // expand_call should be passed true.
8563 NOT_LP64( push(thread); )
8565 if (expand_call) {
8566 LP64_ONLY( assert(pre_val != c_rarg1, "smashed arg"); )
8567 pass_arg1(this, thread);
8568 pass_arg0(this, pre_val);
8569 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), 2);
8570 } else {
8571 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), pre_val, thread);
8572 }
8574 NOT_LP64( pop(thread); )
8576 // save the live input values
8577 if (pre_val != rax)
8578 pop(pre_val);
8580 if (obj != noreg && obj != rax)
8581 pop(obj);
8583 if(tosca_live) pop(rax);
8585 bind(done);
8586 }
8588 void MacroAssembler::g1_write_barrier_post(Register store_addr,
8589 Register new_val,
8590 Register thread,
8591 Register tmp,
8592 Register tmp2) {
8593 #ifdef _LP64
8594 assert(thread == r15_thread, "must be");
8595 #endif // _LP64
8597 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
8598 PtrQueue::byte_offset_of_index()));
8599 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
8600 PtrQueue::byte_offset_of_buf()));
8602 BarrierSet* bs = Universe::heap()->barrier_set();
8603 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
8604 Label done;
8605 Label runtime;
8607 // Does store cross heap regions?
8609 movptr(tmp, store_addr);
8610 xorptr(tmp, new_val);
8611 shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
8612 jcc(Assembler::equal, done);
8614 // crosses regions, storing NULL?
8616 cmpptr(new_val, (int32_t) NULL_WORD);
8617 jcc(Assembler::equal, done);
8619 // storing region crossing non-NULL, is card already dirty?
8621 ExternalAddress cardtable((address) ct->byte_map_base);
8622 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
8623 #ifdef _LP64
8624 const Register card_addr = tmp;
8626 movq(card_addr, store_addr);
8627 shrq(card_addr, CardTableModRefBS::card_shift);
8629 lea(tmp2, cardtable);
8631 // get the address of the card
8632 addq(card_addr, tmp2);
8633 #else
8634 const Register card_index = tmp;
8636 movl(card_index, store_addr);
8637 shrl(card_index, CardTableModRefBS::card_shift);
8639 Address index(noreg, card_index, Address::times_1);
8640 const Register card_addr = tmp;
8641 lea(card_addr, as_Address(ArrayAddress(cardtable, index)));
8642 #endif
8643 cmpb(Address(card_addr, 0), 0);
8644 jcc(Assembler::equal, done);
8646 // storing a region crossing, non-NULL oop, card is clean.
8647 // dirty card and log.
8649 movb(Address(card_addr, 0), 0);
8651 cmpl(queue_index, 0);
8652 jcc(Assembler::equal, runtime);
8653 subl(queue_index, wordSize);
8654 movptr(tmp2, buffer);
8655 #ifdef _LP64
8656 movslq(rscratch1, queue_index);
8657 addq(tmp2, rscratch1);
8658 movq(Address(tmp2, 0), card_addr);
8659 #else
8660 addl(tmp2, queue_index);
8661 movl(Address(tmp2, 0), card_index);
8662 #endif
8663 jmp(done);
8665 bind(runtime);
8666 // save the live input values
8667 push(store_addr);
8668 push(new_val);
8669 #ifdef _LP64
8670 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
8671 #else
8672 push(thread);
8673 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
8674 pop(thread);
8675 #endif
8676 pop(new_val);
8677 pop(store_addr);
8679 bind(done);
8680 }
8682 #endif // SERIALGC
8683 //////////////////////////////////////////////////////////////////////////////////
8686 void MacroAssembler::store_check(Register obj) {
8687 // Does a store check for the oop in register obj. The content of
8688 // register obj is destroyed afterwards.
8689 store_check_part_1(obj);
8690 store_check_part_2(obj);
8691 }
8693 void MacroAssembler::store_check(Register obj, Address dst) {
8694 store_check(obj);
8695 }
8698 // split the store check operation so that other instructions can be scheduled inbetween
8699 void MacroAssembler::store_check_part_1(Register obj) {
8700 BarrierSet* bs = Universe::heap()->barrier_set();
8701 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
8702 shrptr(obj, CardTableModRefBS::card_shift);
8703 }
8705 void MacroAssembler::store_check_part_2(Register obj) {
8706 BarrierSet* bs = Universe::heap()->barrier_set();
8707 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
8708 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
8709 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
8711 // The calculation for byte_map_base is as follows:
8712 // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
8713 // So this essentially converts an address to a displacement and
8714 // it will never need to be relocated. On 64bit however the value may be too
8715 // large for a 32bit displacement
8717 intptr_t disp = (intptr_t) ct->byte_map_base;
8718 if (is_simm32(disp)) {
8719 Address cardtable(noreg, obj, Address::times_1, disp);
8720 movb(cardtable, 0);
8721 } else {
8722 // By doing it as an ExternalAddress disp could be converted to a rip-relative
8723 // displacement and done in a single instruction given favorable mapping and
8724 // a smarter version of as_Address. Worst case it is two instructions which
8725 // is no worse off then loading disp into a register and doing as a simple
8726 // Address() as above.
8727 // We can't do as ExternalAddress as the only style since if disp == 0 we'll
8728 // assert since NULL isn't acceptable in a reloci (see 6644928). In any case
8729 // in some cases we'll get a single instruction version.
8731 ExternalAddress cardtable((address)disp);
8732 Address index(noreg, obj, Address::times_1);
8733 movb(as_Address(ArrayAddress(cardtable, index)), 0);
8734 }
8735 }
8737 void MacroAssembler::subptr(Register dst, int32_t imm32) {
8738 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
8739 }
8741 // Force generation of a 4 byte immediate value even if it fits into 8bit
8742 void MacroAssembler::subptr_imm32(Register dst, int32_t imm32) {
8743 LP64_ONLY(subq_imm32(dst, imm32)) NOT_LP64(subl_imm32(dst, imm32));
8744 }
8746 void MacroAssembler::subptr(Register dst, Register src) {
8747 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
8748 }
8750 // C++ bool manipulation
8751 void MacroAssembler::testbool(Register dst) {
8752 if(sizeof(bool) == 1)
8753 testb(dst, 0xff);
8754 else if(sizeof(bool) == 2) {
8755 // testw implementation needed for two byte bools
8756 ShouldNotReachHere();
8757 } else if(sizeof(bool) == 4)
8758 testl(dst, dst);
8759 else
8760 // unsupported
8761 ShouldNotReachHere();
8762 }
8764 void MacroAssembler::testptr(Register dst, Register src) {
8765 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
8766 }
8768 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
8769 void MacroAssembler::tlab_allocate(Register obj,
8770 Register var_size_in_bytes,
8771 int con_size_in_bytes,
8772 Register t1,
8773 Register t2,
8774 Label& slow_case) {
8775 assert_different_registers(obj, t1, t2);
8776 assert_different_registers(obj, var_size_in_bytes, t1);
8777 Register end = t2;
8778 Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
8780 verify_tlab();
8782 NOT_LP64(get_thread(thread));
8784 movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
8785 if (var_size_in_bytes == noreg) {
8786 lea(end, Address(obj, con_size_in_bytes));
8787 } else {
8788 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
8789 }
8790 cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
8791 jcc(Assembler::above, slow_case);
8793 // update the tlab top pointer
8794 movptr(Address(thread, JavaThread::tlab_top_offset()), end);
8796 // recover var_size_in_bytes if necessary
8797 if (var_size_in_bytes == end) {
8798 subptr(var_size_in_bytes, obj);
8799 }
8800 verify_tlab();
8801 }
8803 // Preserves rbx, and rdx.
8804 Register MacroAssembler::tlab_refill(Label& retry,
8805 Label& try_eden,
8806 Label& slow_case) {
8807 Register top = rax;
8808 Register t1 = rcx;
8809 Register t2 = rsi;
8810 Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
8811 assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
8812 Label do_refill, discard_tlab;
8814 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
8815 // No allocation in the shared eden.
8816 jmp(slow_case);
8817 }
8819 NOT_LP64(get_thread(thread_reg));
8821 movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
8822 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
8824 // calculate amount of free space
8825 subptr(t1, top);
8826 shrptr(t1, LogHeapWordSize);
8828 // Retain tlab and allocate object in shared space if
8829 // the amount free in the tlab is too large to discard.
8830 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
8831 jcc(Assembler::lessEqual, discard_tlab);
8833 // Retain
8834 // %%% yuck as movptr...
8835 movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
8836 addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
8837 if (TLABStats) {
8838 // increment number of slow_allocations
8839 addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
8840 }
8841 jmp(try_eden);
8843 bind(discard_tlab);
8844 if (TLABStats) {
8845 // increment number of refills
8846 addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
8847 // accumulate wastage -- t1 is amount free in tlab
8848 addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
8849 }
8851 // if tlab is currently allocated (top or end != null) then
8852 // fill [top, end + alignment_reserve) with array object
8853 testptr(top, top);
8854 jcc(Assembler::zero, do_refill);
8856 // set up the mark word
8857 movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
8858 // set the length to the remaining space
8859 subptr(t1, typeArrayOopDesc::header_size(T_INT));
8860 addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
8861 shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
8862 movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
8863 // set klass to intArrayKlass
8864 // dubious reloc why not an oop reloc?
8865 movptr(t1, ExternalAddress((address)Universe::intArrayKlassObj_addr()));
8866 // store klass last. concurrent gcs assumes klass length is valid if
8867 // klass field is not null.
8868 store_klass(top, t1);
8870 movptr(t1, top);
8871 subptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
8872 incr_allocated_bytes(thread_reg, t1, 0);
8874 // refill the tlab with an eden allocation
8875 bind(do_refill);
8876 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
8877 shlptr(t1, LogHeapWordSize);
8878 // allocate new tlab, address returned in top
8879 eden_allocate(top, t1, 0, t2, slow_case);
8881 // Check that t1 was preserved in eden_allocate.
8882 #ifdef ASSERT
8883 if (UseTLAB) {
8884 Label ok;
8885 Register tsize = rsi;
8886 assert_different_registers(tsize, thread_reg, t1);
8887 push(tsize);
8888 movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
8889 shlptr(tsize, LogHeapWordSize);
8890 cmpptr(t1, tsize);
8891 jcc(Assembler::equal, ok);
8892 STOP("assert(t1 != tlab size)");
8893 should_not_reach_here();
8895 bind(ok);
8896 pop(tsize);
8897 }
8898 #endif
8899 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
8900 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
8901 addptr(top, t1);
8902 subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
8903 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
8904 verify_tlab();
8905 jmp(retry);
8907 return thread_reg; // for use by caller
8908 }
8910 void MacroAssembler::incr_allocated_bytes(Register thread,
8911 Register var_size_in_bytes,
8912 int con_size_in_bytes,
8913 Register t1) {
8914 if (!thread->is_valid()) {
8915 #ifdef _LP64
8916 thread = r15_thread;
8917 #else
8918 assert(t1->is_valid(), "need temp reg");
8919 thread = t1;
8920 get_thread(thread);
8921 #endif
8922 }
8924 #ifdef _LP64
8925 if (var_size_in_bytes->is_valid()) {
8926 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
8927 } else {
8928 addq(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
8929 }
8930 #else
8931 if (var_size_in_bytes->is_valid()) {
8932 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), var_size_in_bytes);
8933 } else {
8934 addl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())), con_size_in_bytes);
8935 }
8936 adcl(Address(thread, in_bytes(JavaThread::allocated_bytes_offset())+4), 0);
8937 #endif
8938 }
8940 void MacroAssembler::fp_runtime_fallback(address runtime_entry, int nb_args, int num_fpu_regs_in_use) {
8941 pusha();
8943 // if we are coming from c1, xmm registers may be live
8944 int off = 0;
8945 if (UseSSE == 1) {
8946 subptr(rsp, sizeof(jdouble)*8);
8947 movflt(Address(rsp,off++*sizeof(jdouble)),xmm0);
8948 movflt(Address(rsp,off++*sizeof(jdouble)),xmm1);
8949 movflt(Address(rsp,off++*sizeof(jdouble)),xmm2);
8950 movflt(Address(rsp,off++*sizeof(jdouble)),xmm3);
8951 movflt(Address(rsp,off++*sizeof(jdouble)),xmm4);
8952 movflt(Address(rsp,off++*sizeof(jdouble)),xmm5);
8953 movflt(Address(rsp,off++*sizeof(jdouble)),xmm6);
8954 movflt(Address(rsp,off++*sizeof(jdouble)),xmm7);
8955 } else if (UseSSE >= 2) {
8956 #ifdef COMPILER2
8957 if (MaxVectorSize > 16) {
8958 assert(UseAVX > 0, "256bit vectors are supported only with AVX");
8959 // Save upper half of YMM registes
8960 subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
8961 vextractf128h(Address(rsp, 0),xmm0);
8962 vextractf128h(Address(rsp, 16),xmm1);
8963 vextractf128h(Address(rsp, 32),xmm2);
8964 vextractf128h(Address(rsp, 48),xmm3);
8965 vextractf128h(Address(rsp, 64),xmm4);
8966 vextractf128h(Address(rsp, 80),xmm5);
8967 vextractf128h(Address(rsp, 96),xmm6);
8968 vextractf128h(Address(rsp,112),xmm7);
8969 #ifdef _LP64
8970 vextractf128h(Address(rsp,128),xmm8);
8971 vextractf128h(Address(rsp,144),xmm9);
8972 vextractf128h(Address(rsp,160),xmm10);
8973 vextractf128h(Address(rsp,176),xmm11);
8974 vextractf128h(Address(rsp,192),xmm12);
8975 vextractf128h(Address(rsp,208),xmm13);
8976 vextractf128h(Address(rsp,224),xmm14);
8977 vextractf128h(Address(rsp,240),xmm15);
8978 #endif
8979 }
8980 #endif
8981 // Save whole 128bit (16 bytes) XMM regiters
8982 subptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
8983 movdqu(Address(rsp,off++*16),xmm0);
8984 movdqu(Address(rsp,off++*16),xmm1);
8985 movdqu(Address(rsp,off++*16),xmm2);
8986 movdqu(Address(rsp,off++*16),xmm3);
8987 movdqu(Address(rsp,off++*16),xmm4);
8988 movdqu(Address(rsp,off++*16),xmm5);
8989 movdqu(Address(rsp,off++*16),xmm6);
8990 movdqu(Address(rsp,off++*16),xmm7);
8991 #ifdef _LP64
8992 movdqu(Address(rsp,off++*16),xmm8);
8993 movdqu(Address(rsp,off++*16),xmm9);
8994 movdqu(Address(rsp,off++*16),xmm10);
8995 movdqu(Address(rsp,off++*16),xmm11);
8996 movdqu(Address(rsp,off++*16),xmm12);
8997 movdqu(Address(rsp,off++*16),xmm13);
8998 movdqu(Address(rsp,off++*16),xmm14);
8999 movdqu(Address(rsp,off++*16),xmm15);
9000 #endif
9001 }
9003 // Preserve registers across runtime call
9004 int incoming_argument_and_return_value_offset = -1;
9005 if (num_fpu_regs_in_use > 1) {
9006 // Must preserve all other FPU regs (could alternatively convert
9007 // SharedRuntime::dsin, dcos etc. into assembly routines known not to trash
9008 // FPU state, but can not trust C compiler)
9009 NEEDS_CLEANUP;
9010 // NOTE that in this case we also push the incoming argument(s) to
9011 // the stack and restore it later; we also use this stack slot to
9012 // hold the return value from dsin, dcos etc.
9013 for (int i = 0; i < num_fpu_regs_in_use; i++) {
9014 subptr(rsp, sizeof(jdouble));
9015 fstp_d(Address(rsp, 0));
9016 }
9017 incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1);
9018 for (int i = nb_args-1; i >= 0; i--) {
9019 fld_d(Address(rsp, incoming_argument_and_return_value_offset-i*sizeof(jdouble)));
9020 }
9021 }
9023 subptr(rsp, nb_args*sizeof(jdouble));
9024 for (int i = 0; i < nb_args; i++) {
9025 fstp_d(Address(rsp, i*sizeof(jdouble)));
9026 }
9028 #ifdef _LP64
9029 if (nb_args > 0) {
9030 movdbl(xmm0, Address(rsp, 0));
9031 }
9032 if (nb_args > 1) {
9033 movdbl(xmm1, Address(rsp, sizeof(jdouble)));
9034 }
9035 assert(nb_args <= 2, "unsupported number of args");
9036 #endif // _LP64
9038 // NOTE: we must not use call_VM_leaf here because that requires a
9039 // complete interpreter frame in debug mode -- same bug as 4387334
9040 // MacroAssembler::call_VM_leaf_base is perfectly safe and will
9041 // do proper 64bit abi
9043 NEEDS_CLEANUP;
9044 // Need to add stack banging before this runtime call if it needs to
9045 // be taken; however, there is no generic stack banging routine at
9046 // the MacroAssembler level
9048 MacroAssembler::call_VM_leaf_base(runtime_entry, 0);
9050 #ifdef _LP64
9051 movsd(Address(rsp, 0), xmm0);
9052 fld_d(Address(rsp, 0));
9053 #endif // _LP64
9054 addptr(rsp, sizeof(jdouble) * nb_args);
9055 if (num_fpu_regs_in_use > 1) {
9056 // Must save return value to stack and then restore entire FPU
9057 // stack except incoming arguments
9058 fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
9059 for (int i = 0; i < num_fpu_regs_in_use - nb_args; i++) {
9060 fld_d(Address(rsp, 0));
9061 addptr(rsp, sizeof(jdouble));
9062 }
9063 fld_d(Address(rsp, (nb_args-1)*sizeof(jdouble)));
9064 addptr(rsp, sizeof(jdouble) * nb_args);
9065 }
9067 off = 0;
9068 if (UseSSE == 1) {
9069 movflt(xmm0, Address(rsp,off++*sizeof(jdouble)));
9070 movflt(xmm1, Address(rsp,off++*sizeof(jdouble)));
9071 movflt(xmm2, Address(rsp,off++*sizeof(jdouble)));
9072 movflt(xmm3, Address(rsp,off++*sizeof(jdouble)));
9073 movflt(xmm4, Address(rsp,off++*sizeof(jdouble)));
9074 movflt(xmm5, Address(rsp,off++*sizeof(jdouble)));
9075 movflt(xmm6, Address(rsp,off++*sizeof(jdouble)));
9076 movflt(xmm7, Address(rsp,off++*sizeof(jdouble)));
9077 addptr(rsp, sizeof(jdouble)*8);
9078 } else if (UseSSE >= 2) {
9079 // Restore whole 128bit (16 bytes) XMM regiters
9080 movdqu(xmm0, Address(rsp,off++*16));
9081 movdqu(xmm1, Address(rsp,off++*16));
9082 movdqu(xmm2, Address(rsp,off++*16));
9083 movdqu(xmm3, Address(rsp,off++*16));
9084 movdqu(xmm4, Address(rsp,off++*16));
9085 movdqu(xmm5, Address(rsp,off++*16));
9086 movdqu(xmm6, Address(rsp,off++*16));
9087 movdqu(xmm7, Address(rsp,off++*16));
9088 #ifdef _LP64
9089 movdqu(xmm8, Address(rsp,off++*16));
9090 movdqu(xmm9, Address(rsp,off++*16));
9091 movdqu(xmm10, Address(rsp,off++*16));
9092 movdqu(xmm11, Address(rsp,off++*16));
9093 movdqu(xmm12, Address(rsp,off++*16));
9094 movdqu(xmm13, Address(rsp,off++*16));
9095 movdqu(xmm14, Address(rsp,off++*16));
9096 movdqu(xmm15, Address(rsp,off++*16));
9097 #endif
9098 addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
9099 #ifdef COMPILER2
9100 if (MaxVectorSize > 16) {
9101 // Restore upper half of YMM registes.
9102 vinsertf128h(xmm0, Address(rsp, 0));
9103 vinsertf128h(xmm1, Address(rsp, 16));
9104 vinsertf128h(xmm2, Address(rsp, 32));
9105 vinsertf128h(xmm3, Address(rsp, 48));
9106 vinsertf128h(xmm4, Address(rsp, 64));
9107 vinsertf128h(xmm5, Address(rsp, 80));
9108 vinsertf128h(xmm6, Address(rsp, 96));
9109 vinsertf128h(xmm7, Address(rsp,112));
9110 #ifdef _LP64
9111 vinsertf128h(xmm8, Address(rsp,128));
9112 vinsertf128h(xmm9, Address(rsp,144));
9113 vinsertf128h(xmm10, Address(rsp,160));
9114 vinsertf128h(xmm11, Address(rsp,176));
9115 vinsertf128h(xmm12, Address(rsp,192));
9116 vinsertf128h(xmm13, Address(rsp,208));
9117 vinsertf128h(xmm14, Address(rsp,224));
9118 vinsertf128h(xmm15, Address(rsp,240));
9119 #endif
9120 addptr(rsp, 16 * LP64_ONLY(16) NOT_LP64(8));
9121 }
9122 #endif
9123 }
9124 popa();
9125 }
9127 static const double pi_4 = 0.7853981633974483;
9129 void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
9130 // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
9131 // was attempted in this code; unfortunately it appears that the
9132 // switch to 80-bit precision and back causes this to be
9133 // unprofitable compared with simply performing a runtime call if
9134 // the argument is out of the (-pi/4, pi/4) range.
9136 Register tmp = noreg;
9137 if (!VM_Version::supports_cmov()) {
9138 // fcmp needs a temporary so preserve rbx,
9139 tmp = rbx;
9140 push(tmp);
9141 }
9143 Label slow_case, done;
9145 ExternalAddress pi4_adr = (address)&pi_4;
9146 if (reachable(pi4_adr)) {
9147 // x ?<= pi/4
9148 fld_d(pi4_adr);
9149 fld_s(1); // Stack: X PI/4 X
9150 fabs(); // Stack: |X| PI/4 X
9151 fcmp(tmp);
9152 jcc(Assembler::above, slow_case);
9154 // fastest case: -pi/4 <= x <= pi/4
9155 switch(trig) {
9156 case 's':
9157 fsin();
9158 break;
9159 case 'c':
9160 fcos();
9161 break;
9162 case 't':
9163 ftan();
9164 break;
9165 default:
9166 assert(false, "bad intrinsic");
9167 break;
9168 }
9169 jmp(done);
9170 }
9172 // slow case: runtime call
9173 bind(slow_case);
9175 switch(trig) {
9176 case 's':
9177 {
9178 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 1, num_fpu_regs_in_use);
9179 }
9180 break;
9181 case 'c':
9182 {
9183 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 1, num_fpu_regs_in_use);
9184 }
9185 break;
9186 case 't':
9187 {
9188 fp_runtime_fallback(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 1, num_fpu_regs_in_use);
9189 }
9190 break;
9191 default:
9192 assert(false, "bad intrinsic");
9193 break;
9194 }
9196 // Come here with result in F-TOS
9197 bind(done);
9199 if (tmp != noreg) {
9200 pop(tmp);
9201 }
9202 }
9205 // Look up the method for a megamorphic invokeinterface call.
9206 // The target method is determined by <intf_klass, itable_index>.
9207 // The receiver klass is in recv_klass.
9208 // On success, the result will be in method_result, and execution falls through.
9209 // On failure, execution transfers to the given label.
9210 void MacroAssembler::lookup_interface_method(Register recv_klass,
9211 Register intf_klass,
9212 RegisterOrConstant itable_index,
9213 Register method_result,
9214 Register scan_temp,
9215 Label& L_no_such_interface) {
9216 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
9217 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
9218 "caller must use same register for non-constant itable index as for method");
9220 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
9221 int vtable_base = InstanceKlass::vtable_start_offset() * wordSize;
9222 int itentry_off = itableMethodEntry::method_offset_in_bytes();
9223 int scan_step = itableOffsetEntry::size() * wordSize;
9224 int vte_size = vtableEntry::size() * wordSize;
9225 Address::ScaleFactor times_vte_scale = Address::times_ptr;
9226 assert(vte_size == wordSize, "else adjust times_vte_scale");
9228 movl(scan_temp, Address(recv_klass, InstanceKlass::vtable_length_offset() * wordSize));
9230 // %%% Could store the aligned, prescaled offset in the klassoop.
9231 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
9232 if (HeapWordsPerLong > 1) {
9233 // Round up to align_object_offset boundary
9234 // see code for InstanceKlass::start_of_itable!
9235 round_to(scan_temp, BytesPerLong);
9236 }
9238 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
9239 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
9240 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
9242 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
9243 // if (scan->interface() == intf) {
9244 // result = (klass + scan->offset() + itable_index);
9245 // }
9246 // }
9247 Label search, found_method;
9249 for (int peel = 1; peel >= 0; peel--) {
9250 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
9251 cmpptr(intf_klass, method_result);
9253 if (peel) {
9254 jccb(Assembler::equal, found_method);
9255 } else {
9256 jccb(Assembler::notEqual, search);
9257 // (invert the test to fall through to found_method...)
9258 }
9260 if (!peel) break;
9262 bind(search);
9264 // Check that the previous entry is non-null. A null entry means that
9265 // the receiver class doesn't implement the interface, and wasn't the
9266 // same as when the caller was compiled.
9267 testptr(method_result, method_result);
9268 jcc(Assembler::zero, L_no_such_interface);
9269 addptr(scan_temp, scan_step);
9270 }
9272 bind(found_method);
9274 // Got a hit.
9275 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
9276 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
9277 }
9280 // virtual method calling
9281 void MacroAssembler::lookup_virtual_method(Register recv_klass,
9282 RegisterOrConstant vtable_index,
9283 Register method_result) {
9284 const int base = InstanceKlass::vtable_start_offset() * wordSize;
9285 assert(vtableEntry::size() * wordSize == wordSize, "else adjust the scaling in the code below");
9286 Address vtable_entry_addr(recv_klass,
9287 vtable_index, Address::times_ptr,
9288 base + vtableEntry::method_offset_in_bytes());
9289 movptr(method_result, vtable_entry_addr);
9290 }
9293 void MacroAssembler::check_klass_subtype(Register sub_klass,
9294 Register super_klass,
9295 Register temp_reg,
9296 Label& L_success) {
9297 Label L_failure;
9298 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL);
9299 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
9300 bind(L_failure);
9301 }
9304 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
9305 Register super_klass,
9306 Register temp_reg,
9307 Label* L_success,
9308 Label* L_failure,
9309 Label* L_slow_path,
9310 RegisterOrConstant super_check_offset) {
9311 assert_different_registers(sub_klass, super_klass, temp_reg);
9312 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
9313 if (super_check_offset.is_register()) {
9314 assert_different_registers(sub_klass, super_klass,
9315 super_check_offset.as_register());
9316 } else if (must_load_sco) {
9317 assert(temp_reg != noreg, "supply either a temp or a register offset");
9318 }
9320 Label L_fallthrough;
9321 int label_nulls = 0;
9322 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
9323 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
9324 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
9325 assert(label_nulls <= 1, "at most one NULL in the batch");
9327 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
9328 int sco_offset = in_bytes(Klass::super_check_offset_offset());
9329 Address super_check_offset_addr(super_klass, sco_offset);
9331 // Hacked jcc, which "knows" that L_fallthrough, at least, is in
9332 // range of a jccb. If this routine grows larger, reconsider at
9333 // least some of these.
9334 #define local_jcc(assembler_cond, label) \
9335 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \
9336 else jcc( assembler_cond, label) /*omit semi*/
9338 // Hacked jmp, which may only be used just before L_fallthrough.
9339 #define final_jmp(label) \
9340 if (&(label) == &L_fallthrough) { /*do nothing*/ } \
9341 else jmp(label) /*omit semi*/
9343 // If the pointers are equal, we are done (e.g., String[] elements).
9344 // This self-check enables sharing of secondary supertype arrays among
9345 // non-primary types such as array-of-interface. Otherwise, each such
9346 // type would need its own customized SSA.
9347 // We move this check to the front of the fast path because many
9348 // type checks are in fact trivially successful in this manner,
9349 // so we get a nicely predicted branch right at the start of the check.
9350 cmpptr(sub_klass, super_klass);
9351 local_jcc(Assembler::equal, *L_success);
9353 // Check the supertype display:
9354 if (must_load_sco) {
9355 // Positive movl does right thing on LP64.
9356 movl(temp_reg, super_check_offset_addr);
9357 super_check_offset = RegisterOrConstant(temp_reg);
9358 }
9359 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
9360 cmpptr(super_klass, super_check_addr); // load displayed supertype
9362 // This check has worked decisively for primary supers.
9363 // Secondary supers are sought in the super_cache ('super_cache_addr').
9364 // (Secondary supers are interfaces and very deeply nested subtypes.)
9365 // This works in the same check above because of a tricky aliasing
9366 // between the super_cache and the primary super display elements.
9367 // (The 'super_check_addr' can address either, as the case requires.)
9368 // Note that the cache is updated below if it does not help us find
9369 // what we need immediately.
9370 // So if it was a primary super, we can just fail immediately.
9371 // Otherwise, it's the slow path for us (no success at this point).
9373 if (super_check_offset.is_register()) {
9374 local_jcc(Assembler::equal, *L_success);
9375 cmpl(super_check_offset.as_register(), sc_offset);
9376 if (L_failure == &L_fallthrough) {
9377 local_jcc(Assembler::equal, *L_slow_path);
9378 } else {
9379 local_jcc(Assembler::notEqual, *L_failure);
9380 final_jmp(*L_slow_path);
9381 }
9382 } else if (super_check_offset.as_constant() == sc_offset) {
9383 // Need a slow path; fast failure is impossible.
9384 if (L_slow_path == &L_fallthrough) {
9385 local_jcc(Assembler::equal, *L_success);
9386 } else {
9387 local_jcc(Assembler::notEqual, *L_slow_path);
9388 final_jmp(*L_success);
9389 }
9390 } else {
9391 // No slow path; it's a fast decision.
9392 if (L_failure == &L_fallthrough) {
9393 local_jcc(Assembler::equal, *L_success);
9394 } else {
9395 local_jcc(Assembler::notEqual, *L_failure);
9396 final_jmp(*L_success);
9397 }
9398 }
9400 bind(L_fallthrough);
9402 #undef local_jcc
9403 #undef final_jmp
9404 }
9407 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
9408 Register super_klass,
9409 Register temp_reg,
9410 Register temp2_reg,
9411 Label* L_success,
9412 Label* L_failure,
9413 bool set_cond_codes) {
9414 assert_different_registers(sub_klass, super_klass, temp_reg);
9415 if (temp2_reg != noreg)
9416 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
9417 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
9419 Label L_fallthrough;
9420 int label_nulls = 0;
9421 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
9422 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
9423 assert(label_nulls <= 1, "at most one NULL in the batch");
9425 // a couple of useful fields in sub_klass:
9426 int ss_offset = in_bytes(Klass::secondary_supers_offset());
9427 int sc_offset = in_bytes(Klass::secondary_super_cache_offset());
9428 Address secondary_supers_addr(sub_klass, ss_offset);
9429 Address super_cache_addr( sub_klass, sc_offset);
9431 // Do a linear scan of the secondary super-klass chain.
9432 // This code is rarely used, so simplicity is a virtue here.
9433 // The repne_scan instruction uses fixed registers, which we must spill.
9434 // Don't worry too much about pre-existing connections with the input regs.
9436 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
9437 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
9439 // Get super_klass value into rax (even if it was in rdi or rcx).
9440 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
9441 if (super_klass != rax || UseCompressedOops) {
9442 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
9443 mov(rax, super_klass);
9444 }
9445 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
9446 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
9448 #ifndef PRODUCT
9449 int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
9450 ExternalAddress pst_counter_addr((address) pst_counter);
9451 NOT_LP64( incrementl(pst_counter_addr) );
9452 LP64_ONLY( lea(rcx, pst_counter_addr) );
9453 LP64_ONLY( incrementl(Address(rcx, 0)) );
9454 #endif //PRODUCT
9456 // We will consult the secondary-super array.
9457 movptr(rdi, secondary_supers_addr);
9458 // Load the array length. (Positive movl does right thing on LP64.)
9459 movl(rcx, Address(rdi, Array<Klass*>::length_offset_in_bytes()));
9460 // Skip to start of data.
9461 addptr(rdi, Array<Klass*>::base_offset_in_bytes());
9463 // Scan RCX words at [RDI] for an occurrence of RAX.
9464 // Set NZ/Z based on last compare.
9465 // Z flag value will not be set by 'repne' if RCX == 0 since 'repne' does
9466 // not change flags (only scas instruction which is repeated sets flags).
9467 // Set Z = 0 (not equal) before 'repne' to indicate that class was not found.
9469 testptr(rax,rax); // Set Z = 0
9470 repne_scan();
9472 // Unspill the temp. registers:
9473 if (pushed_rdi) pop(rdi);
9474 if (pushed_rcx) pop(rcx);
9475 if (pushed_rax) pop(rax);
9477 if (set_cond_codes) {
9478 // Special hack for the AD files: rdi is guaranteed non-zero.
9479 assert(!pushed_rdi, "rdi must be left non-NULL");
9480 // Also, the condition codes are properly set Z/NZ on succeed/failure.
9481 }
9483 if (L_failure == &L_fallthrough)
9484 jccb(Assembler::notEqual, *L_failure);
9485 else jcc(Assembler::notEqual, *L_failure);
9487 // Success. Cache the super we found and proceed in triumph.
9488 movptr(super_cache_addr, super_klass);
9490 if (L_success != &L_fallthrough) {
9491 jmp(*L_success);
9492 }
9494 #undef IS_A_TEMP
9496 bind(L_fallthrough);
9497 }
9500 void MacroAssembler::cmov32(Condition cc, Register dst, Address src) {
9501 if (VM_Version::supports_cmov()) {
9502 cmovl(cc, dst, src);
9503 } else {
9504 Label L;
9505 jccb(negate_condition(cc), L);
9506 movl(dst, src);
9507 bind(L);
9508 }
9509 }
9511 void MacroAssembler::cmov32(Condition cc, Register dst, Register src) {
9512 if (VM_Version::supports_cmov()) {
9513 cmovl(cc, dst, src);
9514 } else {
9515 Label L;
9516 jccb(negate_condition(cc), L);
9517 movl(dst, src);
9518 bind(L);
9519 }
9520 }
9522 void MacroAssembler::verify_oop(Register reg, const char* s) {
9523 if (!VerifyOops) return;
9525 // Pass register number to verify_oop_subroutine
9526 char* b = new char[strlen(s) + 50];
9527 sprintf(b, "verify_oop: %s: %s", reg->name(), s);
9528 BLOCK_COMMENT("verify_oop {");
9529 #ifdef _LP64
9530 push(rscratch1); // save r10, trashed by movptr()
9531 #endif
9532 push(rax); // save rax,
9533 push(reg); // pass register argument
9534 ExternalAddress buffer((address) b);
9535 // avoid using pushptr, as it modifies scratch registers
9536 // and our contract is not to modify anything
9537 movptr(rax, buffer.addr());
9538 push(rax);
9539 // call indirectly to solve generation ordering problem
9540 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
9541 call(rax);
9542 // Caller pops the arguments (oop, message) and restores rax, r10
9543 BLOCK_COMMENT("} verify_oop");
9544 }
9547 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
9548 Register tmp,
9549 int offset) {
9550 intptr_t value = *delayed_value_addr;
9551 if (value != 0)
9552 return RegisterOrConstant(value + offset);
9554 // load indirectly to solve generation ordering problem
9555 movptr(tmp, ExternalAddress((address) delayed_value_addr));
9557 #ifdef ASSERT
9558 { Label L;
9559 testptr(tmp, tmp);
9560 if (WizardMode) {
9561 jcc(Assembler::notZero, L);
9562 char* buf = new char[40];
9563 sprintf(buf, "DelayedValue="INTPTR_FORMAT, delayed_value_addr[1]);
9564 STOP(buf);
9565 } else {
9566 jccb(Assembler::notZero, L);
9567 hlt();
9568 }
9569 bind(L);
9570 }
9571 #endif
9573 if (offset != 0)
9574 addptr(tmp, offset);
9576 return RegisterOrConstant(tmp);
9577 }
9580 Address MacroAssembler::argument_address(RegisterOrConstant arg_slot,
9581 int extra_slot_offset) {
9582 // cf. TemplateTable::prepare_invoke(), if (load_receiver).
9583 int stackElementSize = Interpreter::stackElementSize;
9584 int offset = Interpreter::expr_offset_in_bytes(extra_slot_offset+0);
9585 #ifdef ASSERT
9586 int offset1 = Interpreter::expr_offset_in_bytes(extra_slot_offset+1);
9587 assert(offset1 - offset == stackElementSize, "correct arithmetic");
9588 #endif
9589 Register scale_reg = noreg;
9590 Address::ScaleFactor scale_factor = Address::no_scale;
9591 if (arg_slot.is_constant()) {
9592 offset += arg_slot.as_constant() * stackElementSize;
9593 } else {
9594 scale_reg = arg_slot.as_register();
9595 scale_factor = Address::times(stackElementSize);
9596 }
9597 offset += wordSize; // return PC is on stack
9598 return Address(rsp, scale_reg, scale_factor, offset);
9599 }
9602 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
9603 if (!VerifyOops) return;
9605 // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
9606 // Pass register number to verify_oop_subroutine
9607 char* b = new char[strlen(s) + 50];
9608 sprintf(b, "verify_oop_addr: %s", s);
9610 #ifdef _LP64
9611 push(rscratch1); // save r10, trashed by movptr()
9612 #endif
9613 push(rax); // save rax,
9614 // addr may contain rsp so we will have to adjust it based on the push
9615 // we just did (and on 64 bit we do two pushes)
9616 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
9617 // stores rax into addr which is backwards of what was intended.
9618 if (addr.uses(rsp)) {
9619 lea(rax, addr);
9620 pushptr(Address(rax, LP64_ONLY(2 *) BytesPerWord));
9621 } else {
9622 pushptr(addr);
9623 }
9625 ExternalAddress buffer((address) b);
9626 // pass msg argument
9627 // avoid using pushptr, as it modifies scratch registers
9628 // and our contract is not to modify anything
9629 movptr(rax, buffer.addr());
9630 push(rax);
9632 // call indirectly to solve generation ordering problem
9633 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
9634 call(rax);
9635 // Caller pops the arguments (addr, message) and restores rax, r10.
9636 }
9638 void MacroAssembler::verify_tlab() {
9639 #ifdef ASSERT
9640 if (UseTLAB && VerifyOops) {
9641 Label next, ok;
9642 Register t1 = rsi;
9643 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
9645 push(t1);
9646 NOT_LP64(push(thread_reg));
9647 NOT_LP64(get_thread(thread_reg));
9649 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
9650 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
9651 jcc(Assembler::aboveEqual, next);
9652 STOP("assert(top >= start)");
9653 should_not_reach_here();
9655 bind(next);
9656 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
9657 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
9658 jcc(Assembler::aboveEqual, ok);
9659 STOP("assert(top <= end)");
9660 should_not_reach_here();
9662 bind(ok);
9663 NOT_LP64(pop(thread_reg));
9664 pop(t1);
9665 }
9666 #endif
9667 }
9669 class ControlWord {
9670 public:
9671 int32_t _value;
9673 int rounding_control() const { return (_value >> 10) & 3 ; }
9674 int precision_control() const { return (_value >> 8) & 3 ; }
9675 bool precision() const { return ((_value >> 5) & 1) != 0; }
9676 bool underflow() const { return ((_value >> 4) & 1) != 0; }
9677 bool overflow() const { return ((_value >> 3) & 1) != 0; }
9678 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
9679 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
9680 bool invalid() const { return ((_value >> 0) & 1) != 0; }
9682 void print() const {
9683 // rounding control
9684 const char* rc;
9685 switch (rounding_control()) {
9686 case 0: rc = "round near"; break;
9687 case 1: rc = "round down"; break;
9688 case 2: rc = "round up "; break;
9689 case 3: rc = "chop "; break;
9690 };
9691 // precision control
9692 const char* pc;
9693 switch (precision_control()) {
9694 case 0: pc = "24 bits "; break;
9695 case 1: pc = "reserved"; break;
9696 case 2: pc = "53 bits "; break;
9697 case 3: pc = "64 bits "; break;
9698 };
9699 // flags
9700 char f[9];
9701 f[0] = ' ';
9702 f[1] = ' ';
9703 f[2] = (precision ()) ? 'P' : 'p';
9704 f[3] = (underflow ()) ? 'U' : 'u';
9705 f[4] = (overflow ()) ? 'O' : 'o';
9706 f[5] = (zero_divide ()) ? 'Z' : 'z';
9707 f[6] = (denormalized()) ? 'D' : 'd';
9708 f[7] = (invalid ()) ? 'I' : 'i';
9709 f[8] = '\x0';
9710 // output
9711 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
9712 }
9714 };
9716 class StatusWord {
9717 public:
9718 int32_t _value;
9720 bool busy() const { return ((_value >> 15) & 1) != 0; }
9721 bool C3() const { return ((_value >> 14) & 1) != 0; }
9722 bool C2() const { return ((_value >> 10) & 1) != 0; }
9723 bool C1() const { return ((_value >> 9) & 1) != 0; }
9724 bool C0() const { return ((_value >> 8) & 1) != 0; }
9725 int top() const { return (_value >> 11) & 7 ; }
9726 bool error_status() const { return ((_value >> 7) & 1) != 0; }
9727 bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
9728 bool precision() const { return ((_value >> 5) & 1) != 0; }
9729 bool underflow() const { return ((_value >> 4) & 1) != 0; }
9730 bool overflow() const { return ((_value >> 3) & 1) != 0; }
9731 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
9732 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
9733 bool invalid() const { return ((_value >> 0) & 1) != 0; }
9735 void print() const {
9736 // condition codes
9737 char c[5];
9738 c[0] = (C3()) ? '3' : '-';
9739 c[1] = (C2()) ? '2' : '-';
9740 c[2] = (C1()) ? '1' : '-';
9741 c[3] = (C0()) ? '0' : '-';
9742 c[4] = '\x0';
9743 // flags
9744 char f[9];
9745 f[0] = (error_status()) ? 'E' : '-';
9746 f[1] = (stack_fault ()) ? 'S' : '-';
9747 f[2] = (precision ()) ? 'P' : '-';
9748 f[3] = (underflow ()) ? 'U' : '-';
9749 f[4] = (overflow ()) ? 'O' : '-';
9750 f[5] = (zero_divide ()) ? 'Z' : '-';
9751 f[6] = (denormalized()) ? 'D' : '-';
9752 f[7] = (invalid ()) ? 'I' : '-';
9753 f[8] = '\x0';
9754 // output
9755 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
9756 }
9758 };
9760 class TagWord {
9761 public:
9762 int32_t _value;
9764 int tag_at(int i) const { return (_value >> (i*2)) & 3; }
9766 void print() const {
9767 printf("%04x", _value & 0xFFFF);
9768 }
9770 };
9772 class FPU_Register {
9773 public:
9774 int32_t _m0;
9775 int32_t _m1;
9776 int16_t _ex;
9778 bool is_indefinite() const {
9779 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
9780 }
9782 void print() const {
9783 char sign = (_ex < 0) ? '-' : '+';
9784 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
9785 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
9786 };
9788 };
9790 class FPU_State {
9791 public:
9792 enum {
9793 register_size = 10,
9794 number_of_registers = 8,
9795 register_mask = 7
9796 };
9798 ControlWord _control_word;
9799 StatusWord _status_word;
9800 TagWord _tag_word;
9801 int32_t _error_offset;
9802 int32_t _error_selector;
9803 int32_t _data_offset;
9804 int32_t _data_selector;
9805 int8_t _register[register_size * number_of_registers];
9807 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
9808 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
9810 const char* tag_as_string(int tag) const {
9811 switch (tag) {
9812 case 0: return "valid";
9813 case 1: return "zero";
9814 case 2: return "special";
9815 case 3: return "empty";
9816 }
9817 ShouldNotReachHere();
9818 return NULL;
9819 }
9821 void print() const {
9822 // print computation registers
9823 { int t = _status_word.top();
9824 for (int i = 0; i < number_of_registers; i++) {
9825 int j = (i - t) & register_mask;
9826 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
9827 st(j)->print();
9828 printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
9829 }
9830 }
9831 printf("\n");
9832 // print control registers
9833 printf("ctrl = "); _control_word.print(); printf("\n");
9834 printf("stat = "); _status_word .print(); printf("\n");
9835 printf("tags = "); _tag_word .print(); printf("\n");
9836 }
9838 };
9840 class Flag_Register {
9841 public:
9842 int32_t _value;
9844 bool overflow() const { return ((_value >> 11) & 1) != 0; }
9845 bool direction() const { return ((_value >> 10) & 1) != 0; }
9846 bool sign() const { return ((_value >> 7) & 1) != 0; }
9847 bool zero() const { return ((_value >> 6) & 1) != 0; }
9848 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
9849 bool parity() const { return ((_value >> 2) & 1) != 0; }
9850 bool carry() const { return ((_value >> 0) & 1) != 0; }
9852 void print() const {
9853 // flags
9854 char f[8];
9855 f[0] = (overflow ()) ? 'O' : '-';
9856 f[1] = (direction ()) ? 'D' : '-';
9857 f[2] = (sign ()) ? 'S' : '-';
9858 f[3] = (zero ()) ? 'Z' : '-';
9859 f[4] = (auxiliary_carry()) ? 'A' : '-';
9860 f[5] = (parity ()) ? 'P' : '-';
9861 f[6] = (carry ()) ? 'C' : '-';
9862 f[7] = '\x0';
9863 // output
9864 printf("%08x flags = %s", _value, f);
9865 }
9867 };
9869 class IU_Register {
9870 public:
9871 int32_t _value;
9873 void print() const {
9874 printf("%08x %11d", _value, _value);
9875 }
9877 };
9879 class IU_State {
9880 public:
9881 Flag_Register _eflags;
9882 IU_Register _rdi;
9883 IU_Register _rsi;
9884 IU_Register _rbp;
9885 IU_Register _rsp;
9886 IU_Register _rbx;
9887 IU_Register _rdx;
9888 IU_Register _rcx;
9889 IU_Register _rax;
9891 void print() const {
9892 // computation registers
9893 printf("rax, = "); _rax.print(); printf("\n");
9894 printf("rbx, = "); _rbx.print(); printf("\n");
9895 printf("rcx = "); _rcx.print(); printf("\n");
9896 printf("rdx = "); _rdx.print(); printf("\n");
9897 printf("rdi = "); _rdi.print(); printf("\n");
9898 printf("rsi = "); _rsi.print(); printf("\n");
9899 printf("rbp, = "); _rbp.print(); printf("\n");
9900 printf("rsp = "); _rsp.print(); printf("\n");
9901 printf("\n");
9902 // control registers
9903 printf("flgs = "); _eflags.print(); printf("\n");
9904 }
9905 };
9908 class CPU_State {
9909 public:
9910 FPU_State _fpu_state;
9911 IU_State _iu_state;
9913 void print() const {
9914 printf("--------------------------------------------------\n");
9915 _iu_state .print();
9916 printf("\n");
9917 _fpu_state.print();
9918 printf("--------------------------------------------------\n");
9919 }
9921 };
9924 static void _print_CPU_state(CPU_State* state) {
9925 state->print();
9926 };
9929 void MacroAssembler::print_CPU_state() {
9930 push_CPU_state();
9931 push(rsp); // pass CPU state
9932 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
9933 addptr(rsp, wordSize); // discard argument
9934 pop_CPU_state();
9935 }
9938 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
9939 static int counter = 0;
9940 FPU_State* fs = &state->_fpu_state;
9941 counter++;
9942 // For leaf calls, only verify that the top few elements remain empty.
9943 // We only need 1 empty at the top for C2 code.
9944 if( stack_depth < 0 ) {
9945 if( fs->tag_for_st(7) != 3 ) {
9946 printf("FPR7 not empty\n");
9947 state->print();
9948 assert(false, "error");
9949 return false;
9950 }
9951 return true; // All other stack states do not matter
9952 }
9954 assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
9955 "bad FPU control word");
9957 // compute stack depth
9958 int i = 0;
9959 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++;
9960 int d = i;
9961 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
9962 // verify findings
9963 if (i != FPU_State::number_of_registers) {
9964 // stack not contiguous
9965 printf("%s: stack not contiguous at ST%d\n", s, i);
9966 state->print();
9967 assert(false, "error");
9968 return false;
9969 }
9970 // check if computed stack depth corresponds to expected stack depth
9971 if (stack_depth < 0) {
9972 // expected stack depth is -stack_depth or less
9973 if (d > -stack_depth) {
9974 // too many elements on the stack
9975 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
9976 state->print();
9977 assert(false, "error");
9978 return false;
9979 }
9980 } else {
9981 // expected stack depth is stack_depth
9982 if (d != stack_depth) {
9983 // wrong stack depth
9984 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
9985 state->print();
9986 assert(false, "error");
9987 return false;
9988 }
9989 }
9990 // everything is cool
9991 return true;
9992 }
9995 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
9996 if (!VerifyFPU) return;
9997 push_CPU_state();
9998 push(rsp); // pass CPU state
9999 ExternalAddress msg((address) s);
10000 // pass message string s
10001 pushptr(msg.addr());
10002 push(stack_depth); // pass stack depth
10003 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
10004 addptr(rsp, 3 * wordSize); // discard arguments
10005 // check for error
10006 { Label L;
10007 testl(rax, rax);
10008 jcc(Assembler::notZero, L);
10009 int3(); // break if error condition
10010 bind(L);
10011 }
10012 pop_CPU_state();
10013 }
10015 void MacroAssembler::load_klass(Register dst, Register src) {
10016 #ifdef _LP64
10017 if (UseCompressedKlassPointers) {
10018 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
10019 decode_heap_oop_not_null(dst);
10020 } else
10021 #endif
10022 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
10023 }
10025 void MacroAssembler::load_prototype_header(Register dst, Register src) {
10026 #ifdef _LP64
10027 if (UseCompressedKlassPointers) {
10028 assert (Universe::heap() != NULL, "java heap should be initialized");
10029 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
10030 if (Universe::narrow_oop_shift() != 0) {
10031 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10032 if (LogMinObjAlignmentInBytes == Address::times_8) {
10033 movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset()));
10034 } else {
10035 // OK to use shift since we don't need to preserve flags.
10036 shlq(dst, LogMinObjAlignmentInBytes);
10037 movq(dst, Address(r12_heapbase, dst, Address::times_1, Klass::prototype_header_offset()));
10038 }
10039 } else {
10040 movq(dst, Address(dst, Klass::prototype_header_offset()));
10041 }
10042 } else
10043 #endif
10044 {
10045 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
10046 movptr(dst, Address(dst, Klass::prototype_header_offset()));
10047 }
10048 }
10050 void MacroAssembler::store_klass(Register dst, Register src) {
10051 #ifdef _LP64
10052 if (UseCompressedKlassPointers) {
10053 encode_heap_oop_not_null(src);
10054 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
10055 } else
10056 #endif
10057 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
10058 }
10060 void MacroAssembler::load_heap_oop(Register dst, Address src) {
10061 #ifdef _LP64
10062 // FIXME: Must change all places where we try to load the klass.
10063 if (UseCompressedOops) {
10064 movl(dst, src);
10065 decode_heap_oop(dst);
10066 } else
10067 #endif
10068 movptr(dst, src);
10069 }
10071 // Doesn't do verfication, generates fixed size code
10072 void MacroAssembler::load_heap_oop_not_null(Register dst, Address src) {
10073 #ifdef _LP64
10074 if (UseCompressedOops) {
10075 movl(dst, src);
10076 decode_heap_oop_not_null(dst);
10077 } else
10078 #endif
10079 movptr(dst, src);
10080 }
10082 void MacroAssembler::store_heap_oop(Address dst, Register src) {
10083 #ifdef _LP64
10084 if (UseCompressedOops) {
10085 assert(!dst.uses(src), "not enough registers");
10086 encode_heap_oop(src);
10087 movl(dst, src);
10088 } else
10089 #endif
10090 movptr(dst, src);
10091 }
10093 void MacroAssembler::cmp_heap_oop(Register src1, Address src2, Register tmp) {
10094 assert_different_registers(src1, tmp);
10095 #ifdef _LP64
10096 if (UseCompressedOops) {
10097 bool did_push = false;
10098 if (tmp == noreg) {
10099 tmp = rax;
10100 push(tmp);
10101 did_push = true;
10102 assert(!src2.uses(rsp), "can't push");
10103 }
10104 load_heap_oop(tmp, src2);
10105 cmpptr(src1, tmp);
10106 if (did_push) pop(tmp);
10107 } else
10108 #endif
10109 cmpptr(src1, src2);
10110 }
10112 // Used for storing NULLs.
10113 void MacroAssembler::store_heap_oop_null(Address dst) {
10114 #ifdef _LP64
10115 if (UseCompressedOops) {
10116 movl(dst, (int32_t)NULL_WORD);
10117 } else {
10118 movslq(dst, (int32_t)NULL_WORD);
10119 }
10120 #else
10121 movl(dst, (int32_t)NULL_WORD);
10122 #endif
10123 }
10125 #ifdef _LP64
10126 void MacroAssembler::store_klass_gap(Register dst, Register src) {
10127 if (UseCompressedKlassPointers) {
10128 // Store to klass gap in destination
10129 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
10130 }
10131 }
10133 #ifdef ASSERT
10134 void MacroAssembler::verify_heapbase(const char* msg) {
10135 assert (UseCompressedOops, "should be compressed");
10136 assert (Universe::heap() != NULL, "java heap should be initialized");
10137 if (CheckCompressedOops) {
10138 Label ok;
10139 push(rscratch1); // cmpptr trashes rscratch1
10140 cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
10141 jcc(Assembler::equal, ok);
10142 STOP(msg);
10143 bind(ok);
10144 pop(rscratch1);
10145 }
10146 }
10147 #endif
10149 // Algorithm must match oop.inline.hpp encode_heap_oop.
10150 void MacroAssembler::encode_heap_oop(Register r) {
10151 #ifdef ASSERT
10152 verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
10153 #endif
10154 verify_oop(r, "broken oop in encode_heap_oop");
10155 if (Universe::narrow_oop_base() == NULL) {
10156 if (Universe::narrow_oop_shift() != 0) {
10157 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10158 shrq(r, LogMinObjAlignmentInBytes);
10159 }
10160 return;
10161 }
10162 testq(r, r);
10163 cmovq(Assembler::equal, r, r12_heapbase);
10164 subq(r, r12_heapbase);
10165 shrq(r, LogMinObjAlignmentInBytes);
10166 }
10168 void MacroAssembler::encode_heap_oop_not_null(Register r) {
10169 #ifdef ASSERT
10170 verify_heapbase("MacroAssembler::encode_heap_oop_not_null: heap base corrupted?");
10171 if (CheckCompressedOops) {
10172 Label ok;
10173 testq(r, r);
10174 jcc(Assembler::notEqual, ok);
10175 STOP("null oop passed to encode_heap_oop_not_null");
10176 bind(ok);
10177 }
10178 #endif
10179 verify_oop(r, "broken oop in encode_heap_oop_not_null");
10180 if (Universe::narrow_oop_base() != NULL) {
10181 subq(r, r12_heapbase);
10182 }
10183 if (Universe::narrow_oop_shift() != 0) {
10184 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10185 shrq(r, LogMinObjAlignmentInBytes);
10186 }
10187 }
10189 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
10190 #ifdef ASSERT
10191 verify_heapbase("MacroAssembler::encode_heap_oop_not_null2: heap base corrupted?");
10192 if (CheckCompressedOops) {
10193 Label ok;
10194 testq(src, src);
10195 jcc(Assembler::notEqual, ok);
10196 STOP("null oop passed to encode_heap_oop_not_null2");
10197 bind(ok);
10198 }
10199 #endif
10200 verify_oop(src, "broken oop in encode_heap_oop_not_null2");
10201 if (dst != src) {
10202 movq(dst, src);
10203 }
10204 if (Universe::narrow_oop_base() != NULL) {
10205 subq(dst, r12_heapbase);
10206 }
10207 if (Universe::narrow_oop_shift() != 0) {
10208 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10209 shrq(dst, LogMinObjAlignmentInBytes);
10210 }
10211 }
10213 void MacroAssembler::decode_heap_oop(Register r) {
10214 #ifdef ASSERT
10215 verify_heapbase("MacroAssembler::decode_heap_oop: heap base corrupted?");
10216 #endif
10217 if (Universe::narrow_oop_base() == NULL) {
10218 if (Universe::narrow_oop_shift() != 0) {
10219 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10220 shlq(r, LogMinObjAlignmentInBytes);
10221 }
10222 } else {
10223 Label done;
10224 shlq(r, LogMinObjAlignmentInBytes);
10225 jccb(Assembler::equal, done);
10226 addq(r, r12_heapbase);
10227 bind(done);
10228 }
10229 verify_oop(r, "broken oop in decode_heap_oop");
10230 }
10232 void MacroAssembler::decode_heap_oop_not_null(Register r) {
10233 // Note: it will change flags
10234 assert (UseCompressedOops, "should only be used for compressed headers");
10235 assert (Universe::heap() != NULL, "java heap should be initialized");
10236 // Cannot assert, unverified entry point counts instructions (see .ad file)
10237 // vtableStubs also counts instructions in pd_code_size_limit.
10238 // Also do not verify_oop as this is called by verify_oop.
10239 if (Universe::narrow_oop_shift() != 0) {
10240 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10241 shlq(r, LogMinObjAlignmentInBytes);
10242 if (Universe::narrow_oop_base() != NULL) {
10243 addq(r, r12_heapbase);
10244 }
10245 } else {
10246 assert (Universe::narrow_oop_base() == NULL, "sanity");
10247 }
10248 }
10250 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
10251 // Note: it will change flags
10252 assert (UseCompressedOops, "should only be used for compressed headers");
10253 assert (Universe::heap() != NULL, "java heap should be initialized");
10254 // Cannot assert, unverified entry point counts instructions (see .ad file)
10255 // vtableStubs also counts instructions in pd_code_size_limit.
10256 // Also do not verify_oop as this is called by verify_oop.
10257 if (Universe::narrow_oop_shift() != 0) {
10258 assert(LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
10259 if (LogMinObjAlignmentInBytes == Address::times_8) {
10260 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
10261 } else {
10262 if (dst != src) {
10263 movq(dst, src);
10264 }
10265 shlq(dst, LogMinObjAlignmentInBytes);
10266 if (Universe::narrow_oop_base() != NULL) {
10267 addq(dst, r12_heapbase);
10268 }
10269 }
10270 } else {
10271 assert (Universe::narrow_oop_base() == NULL, "sanity");
10272 if (dst != src) {
10273 movq(dst, src);
10274 }
10275 }
10276 }
10278 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
10279 assert (UseCompressedOops, "should only be used for compressed headers");
10280 assert (Universe::heap() != NULL, "java heap should be initialized");
10281 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10282 int oop_index = oop_recorder()->find_index(obj);
10283 RelocationHolder rspec = oop_Relocation::spec(oop_index);
10284 mov_narrow_oop(dst, oop_index, rspec);
10285 }
10287 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
10288 assert (UseCompressedOops, "should only be used for compressed headers");
10289 assert (Universe::heap() != NULL, "java heap should be initialized");
10290 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10291 int oop_index = oop_recorder()->find_index(obj);
10292 RelocationHolder rspec = oop_Relocation::spec(oop_index);
10293 mov_narrow_oop(dst, oop_index, rspec);
10294 }
10296 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
10297 assert (UseCompressedOops, "should only be used for compressed headers");
10298 assert (Universe::heap() != NULL, "java heap should be initialized");
10299 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10300 int oop_index = oop_recorder()->find_index(obj);
10301 RelocationHolder rspec = oop_Relocation::spec(oop_index);
10302 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
10303 }
10305 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
10306 assert (UseCompressedOops, "should only be used for compressed headers");
10307 assert (Universe::heap() != NULL, "java heap should be initialized");
10308 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
10309 int oop_index = oop_recorder()->find_index(obj);
10310 RelocationHolder rspec = oop_Relocation::spec(oop_index);
10311 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
10312 }
10314 void MacroAssembler::reinit_heapbase() {
10315 if (UseCompressedOops) {
10316 movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
10317 }
10318 }
10319 #endif // _LP64
10322 // C2 compiled method's prolog code.
10323 void MacroAssembler::verified_entry(int framesize, bool stack_bang, bool fp_mode_24b) {
10325 // WARNING: Initial instruction MUST be 5 bytes or longer so that
10326 // NativeJump::patch_verified_entry will be able to patch out the entry
10327 // code safely. The push to verify stack depth is ok at 5 bytes,
10328 // the frame allocation can be either 3 or 6 bytes. So if we don't do
10329 // stack bang then we must use the 6 byte frame allocation even if
10330 // we have no frame. :-(
10332 assert((framesize & (StackAlignmentInBytes-1)) == 0, "frame size not aligned");
10333 // Remove word for return addr
10334 framesize -= wordSize;
10336 // Calls to C2R adapters often do not accept exceptional returns.
10337 // We require that their callers must bang for them. But be careful, because
10338 // some VM calls (such as call site linkage) can use several kilobytes of
10339 // stack. But the stack safety zone should account for that.
10340 // See bugs 4446381, 4468289, 4497237.
10341 if (stack_bang) {
10342 generate_stack_overflow_check(framesize);
10344 // We always push rbp, so that on return to interpreter rbp, will be
10345 // restored correctly and we can correct the stack.
10346 push(rbp);
10347 // Remove word for ebp
10348 framesize -= wordSize;
10350 // Create frame
10351 if (framesize) {
10352 subptr(rsp, framesize);
10353 }
10354 } else {
10355 // Create frame (force generation of a 4 byte immediate value)
10356 subptr_imm32(rsp, framesize);
10358 // Save RBP register now.
10359 framesize -= wordSize;
10360 movptr(Address(rsp, framesize), rbp);
10361 }
10363 if (VerifyStackAtCalls) { // Majik cookie to verify stack depth
10364 framesize -= wordSize;
10365 movptr(Address(rsp, framesize), (int32_t)0xbadb100d);
10366 }
10368 #ifndef _LP64
10369 // If method sets FPU control word do it now
10370 if (fp_mode_24b) {
10371 fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_24()));
10372 }
10373 if (UseSSE >= 2 && VerifyFPU) {
10374 verify_FPU(0, "FPU stack must be clean on entry");
10375 }
10376 #endif
10378 #ifdef ASSERT
10379 if (VerifyStackAtCalls) {
10380 Label L;
10381 push(rax);
10382 mov(rax, rsp);
10383 andptr(rax, StackAlignmentInBytes-1);
10384 cmpptr(rax, StackAlignmentInBytes-wordSize);
10385 pop(rax);
10386 jcc(Assembler::equal, L);
10387 STOP("Stack is not properly aligned!");
10388 bind(L);
10389 }
10390 #endif
10392 }
10395 // IndexOf for constant substrings with size >= 8 chars
10396 // which don't need to be loaded through stack.
10397 void MacroAssembler::string_indexofC8(Register str1, Register str2,
10398 Register cnt1, Register cnt2,
10399 int int_cnt2, Register result,
10400 XMMRegister vec, Register tmp) {
10401 ShortBranchVerifier sbv(this);
10402 assert(UseSSE42Intrinsics, "SSE4.2 is required");
10404 // This method uses pcmpestri inxtruction with bound registers
10405 // inputs:
10406 // xmm - substring
10407 // rax - substring length (elements count)
10408 // mem - scanned string
10409 // rdx - string length (elements count)
10410 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
10411 // outputs:
10412 // rcx - matched index in string
10413 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
10415 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR,
10416 RET_FOUND, RET_NOT_FOUND, EXIT, FOUND_SUBSTR,
10417 MATCH_SUBSTR_HEAD, RELOAD_STR, FOUND_CANDIDATE;
10419 // Note, inline_string_indexOf() generates checks:
10420 // if (substr.count > string.count) return -1;
10421 // if (substr.count == 0) return 0;
10422 assert(int_cnt2 >= 8, "this code isused only for cnt2 >= 8 chars");
10424 // Load substring.
10425 movdqu(vec, Address(str2, 0));
10426 movl(cnt2, int_cnt2);
10427 movptr(result, str1); // string addr
10429 if (int_cnt2 > 8) {
10430 jmpb(SCAN_TO_SUBSTR);
10432 // Reload substr for rescan, this code
10433 // is executed only for large substrings (> 8 chars)
10434 bind(RELOAD_SUBSTR);
10435 movdqu(vec, Address(str2, 0));
10436 negptr(cnt2); // Jumped here with negative cnt2, convert to positive
10438 bind(RELOAD_STR);
10439 // We came here after the beginning of the substring was
10440 // matched but the rest of it was not so we need to search
10441 // again. Start from the next element after the previous match.
10443 // cnt2 is number of substring reminding elements and
10444 // cnt1 is number of string reminding elements when cmp failed.
10445 // Restored cnt1 = cnt1 - cnt2 + int_cnt2
10446 subl(cnt1, cnt2);
10447 addl(cnt1, int_cnt2);
10448 movl(cnt2, int_cnt2); // Now restore cnt2
10450 decrementl(cnt1); // Shift to next element
10451 cmpl(cnt1, cnt2);
10452 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
10454 addptr(result, 2);
10456 } // (int_cnt2 > 8)
10458 // Scan string for start of substr in 16-byte vectors
10459 bind(SCAN_TO_SUBSTR);
10460 pcmpestri(vec, Address(result, 0), 0x0d);
10461 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
10462 subl(cnt1, 8);
10463 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
10464 cmpl(cnt1, cnt2);
10465 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
10466 addptr(result, 16);
10467 jmpb(SCAN_TO_SUBSTR);
10469 // Found a potential substr
10470 bind(FOUND_CANDIDATE);
10471 // Matched whole vector if first element matched (tmp(rcx) == 0).
10472 if (int_cnt2 == 8) {
10473 jccb(Assembler::overflow, RET_FOUND); // OF == 1
10474 } else { // int_cnt2 > 8
10475 jccb(Assembler::overflow, FOUND_SUBSTR);
10476 }
10477 // After pcmpestri tmp(rcx) contains matched element index
10478 // Compute start addr of substr
10479 lea(result, Address(result, tmp, Address::times_2));
10481 // Make sure string is still long enough
10482 subl(cnt1, tmp);
10483 cmpl(cnt1, cnt2);
10484 if (int_cnt2 == 8) {
10485 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
10486 } else { // int_cnt2 > 8
10487 jccb(Assembler::greaterEqual, MATCH_SUBSTR_HEAD);
10488 }
10489 // Left less then substring.
10491 bind(RET_NOT_FOUND);
10492 movl(result, -1);
10493 jmpb(EXIT);
10495 if (int_cnt2 > 8) {
10496 // This code is optimized for the case when whole substring
10497 // is matched if its head is matched.
10498 bind(MATCH_SUBSTR_HEAD);
10499 pcmpestri(vec, Address(result, 0), 0x0d);
10500 // Reload only string if does not match
10501 jccb(Assembler::noOverflow, RELOAD_STR); // OF == 0
10503 Label CONT_SCAN_SUBSTR;
10504 // Compare the rest of substring (> 8 chars).
10505 bind(FOUND_SUBSTR);
10506 // First 8 chars are already matched.
10507 negptr(cnt2);
10508 addptr(cnt2, 8);
10510 bind(SCAN_SUBSTR);
10511 subl(cnt1, 8);
10512 cmpl(cnt2, -8); // Do not read beyond substring
10513 jccb(Assembler::lessEqual, CONT_SCAN_SUBSTR);
10514 // Back-up strings to avoid reading beyond substring:
10515 // cnt1 = cnt1 - cnt2 + 8
10516 addl(cnt1, cnt2); // cnt2 is negative
10517 addl(cnt1, 8);
10518 movl(cnt2, 8); negptr(cnt2);
10519 bind(CONT_SCAN_SUBSTR);
10520 if (int_cnt2 < (int)G) {
10521 movdqu(vec, Address(str2, cnt2, Address::times_2, int_cnt2*2));
10522 pcmpestri(vec, Address(result, cnt2, Address::times_2, int_cnt2*2), 0x0d);
10523 } else {
10524 // calculate index in register to avoid integer overflow (int_cnt2*2)
10525 movl(tmp, int_cnt2);
10526 addptr(tmp, cnt2);
10527 movdqu(vec, Address(str2, tmp, Address::times_2, 0));
10528 pcmpestri(vec, Address(result, tmp, Address::times_2, 0), 0x0d);
10529 }
10530 // Need to reload strings pointers if not matched whole vector
10531 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
10532 addptr(cnt2, 8);
10533 jcc(Assembler::negative, SCAN_SUBSTR);
10534 // Fall through if found full substring
10536 } // (int_cnt2 > 8)
10538 bind(RET_FOUND);
10539 // Found result if we matched full small substring.
10540 // Compute substr offset
10541 subptr(result, str1);
10542 shrl(result, 1); // index
10543 bind(EXIT);
10545 } // string_indexofC8
10547 // Small strings are loaded through stack if they cross page boundary.
10548 void MacroAssembler::string_indexof(Register str1, Register str2,
10549 Register cnt1, Register cnt2,
10550 int int_cnt2, Register result,
10551 XMMRegister vec, Register tmp) {
10552 ShortBranchVerifier sbv(this);
10553 assert(UseSSE42Intrinsics, "SSE4.2 is required");
10554 //
10555 // int_cnt2 is length of small (< 8 chars) constant substring
10556 // or (-1) for non constant substring in which case its length
10557 // is in cnt2 register.
10558 //
10559 // Note, inline_string_indexOf() generates checks:
10560 // if (substr.count > string.count) return -1;
10561 // if (substr.count == 0) return 0;
10562 //
10563 assert(int_cnt2 == -1 || (0 < int_cnt2 && int_cnt2 < 8), "should be != 0");
10565 // This method uses pcmpestri inxtruction with bound registers
10566 // inputs:
10567 // xmm - substring
10568 // rax - substring length (elements count)
10569 // mem - scanned string
10570 // rdx - string length (elements count)
10571 // 0xd - mode: 1100 (substring search) + 01 (unsigned shorts)
10572 // outputs:
10573 // rcx - matched index in string
10574 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
10576 Label RELOAD_SUBSTR, SCAN_TO_SUBSTR, SCAN_SUBSTR, ADJUST_STR,
10577 RET_FOUND, RET_NOT_FOUND, CLEANUP, FOUND_SUBSTR,
10578 FOUND_CANDIDATE;
10580 { //========================================================
10581 // We don't know where these strings are located
10582 // and we can't read beyond them. Load them through stack.
10583 Label BIG_STRINGS, CHECK_STR, COPY_SUBSTR, COPY_STR;
10585 movptr(tmp, rsp); // save old SP
10587 if (int_cnt2 > 0) { // small (< 8 chars) constant substring
10588 if (int_cnt2 == 1) { // One char
10589 load_unsigned_short(result, Address(str2, 0));
10590 movdl(vec, result); // move 32 bits
10591 } else if (int_cnt2 == 2) { // Two chars
10592 movdl(vec, Address(str2, 0)); // move 32 bits
10593 } else if (int_cnt2 == 4) { // Four chars
10594 movq(vec, Address(str2, 0)); // move 64 bits
10595 } else { // cnt2 = { 3, 5, 6, 7 }
10596 // Array header size is 12 bytes in 32-bit VM
10597 // + 6 bytes for 3 chars == 18 bytes,
10598 // enough space to load vec and shift.
10599 assert(HeapWordSize*TypeArrayKlass::header_size() >= 12,"sanity");
10600 movdqu(vec, Address(str2, (int_cnt2*2)-16));
10601 psrldq(vec, 16-(int_cnt2*2));
10602 }
10603 } else { // not constant substring
10604 cmpl(cnt2, 8);
10605 jccb(Assembler::aboveEqual, BIG_STRINGS); // Both strings are big enough
10607 // We can read beyond string if srt+16 does not cross page boundary
10608 // since heaps are aligned and mapped by pages.
10609 assert(os::vm_page_size() < (int)G, "default page should be small");
10610 movl(result, str2); // We need only low 32 bits
10611 andl(result, (os::vm_page_size()-1));
10612 cmpl(result, (os::vm_page_size()-16));
10613 jccb(Assembler::belowEqual, CHECK_STR);
10615 // Move small strings to stack to allow load 16 bytes into vec.
10616 subptr(rsp, 16);
10617 int stk_offset = wordSize-2;
10618 push(cnt2);
10620 bind(COPY_SUBSTR);
10621 load_unsigned_short(result, Address(str2, cnt2, Address::times_2, -2));
10622 movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
10623 decrement(cnt2);
10624 jccb(Assembler::notZero, COPY_SUBSTR);
10626 pop(cnt2);
10627 movptr(str2, rsp); // New substring address
10628 } // non constant
10630 bind(CHECK_STR);
10631 cmpl(cnt1, 8);
10632 jccb(Assembler::aboveEqual, BIG_STRINGS);
10634 // Check cross page boundary.
10635 movl(result, str1); // We need only low 32 bits
10636 andl(result, (os::vm_page_size()-1));
10637 cmpl(result, (os::vm_page_size()-16));
10638 jccb(Assembler::belowEqual, BIG_STRINGS);
10640 subptr(rsp, 16);
10641 int stk_offset = -2;
10642 if (int_cnt2 < 0) { // not constant
10643 push(cnt2);
10644 stk_offset += wordSize;
10645 }
10646 movl(cnt2, cnt1);
10648 bind(COPY_STR);
10649 load_unsigned_short(result, Address(str1, cnt2, Address::times_2, -2));
10650 movw(Address(rsp, cnt2, Address::times_2, stk_offset), result);
10651 decrement(cnt2);
10652 jccb(Assembler::notZero, COPY_STR);
10654 if (int_cnt2 < 0) { // not constant
10655 pop(cnt2);
10656 }
10657 movptr(str1, rsp); // New string address
10659 bind(BIG_STRINGS);
10660 // Load substring.
10661 if (int_cnt2 < 0) { // -1
10662 movdqu(vec, Address(str2, 0));
10663 push(cnt2); // substr count
10664 push(str2); // substr addr
10665 push(str1); // string addr
10666 } else {
10667 // Small (< 8 chars) constant substrings are loaded already.
10668 movl(cnt2, int_cnt2);
10669 }
10670 push(tmp); // original SP
10672 } // Finished loading
10674 //========================================================
10675 // Start search
10676 //
10678 movptr(result, str1); // string addr
10680 if (int_cnt2 < 0) { // Only for non constant substring
10681 jmpb(SCAN_TO_SUBSTR);
10683 // SP saved at sp+0
10684 // String saved at sp+1*wordSize
10685 // Substr saved at sp+2*wordSize
10686 // Substr count saved at sp+3*wordSize
10688 // Reload substr for rescan, this code
10689 // is executed only for large substrings (> 8 chars)
10690 bind(RELOAD_SUBSTR);
10691 movptr(str2, Address(rsp, 2*wordSize));
10692 movl(cnt2, Address(rsp, 3*wordSize));
10693 movdqu(vec, Address(str2, 0));
10694 // We came here after the beginning of the substring was
10695 // matched but the rest of it was not so we need to search
10696 // again. Start from the next element after the previous match.
10697 subptr(str1, result); // Restore counter
10698 shrl(str1, 1);
10699 addl(cnt1, str1);
10700 decrementl(cnt1); // Shift to next element
10701 cmpl(cnt1, cnt2);
10702 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
10704 addptr(result, 2);
10705 } // non constant
10707 // Scan string for start of substr in 16-byte vectors
10708 bind(SCAN_TO_SUBSTR);
10709 assert(cnt1 == rdx && cnt2 == rax && tmp == rcx, "pcmpestri");
10710 pcmpestri(vec, Address(result, 0), 0x0d);
10711 jccb(Assembler::below, FOUND_CANDIDATE); // CF == 1
10712 subl(cnt1, 8);
10713 jccb(Assembler::lessEqual, RET_NOT_FOUND); // Scanned full string
10714 cmpl(cnt1, cnt2);
10715 jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
10716 addptr(result, 16);
10718 bind(ADJUST_STR);
10719 cmpl(cnt1, 8); // Do not read beyond string
10720 jccb(Assembler::greaterEqual, SCAN_TO_SUBSTR);
10721 // Back-up string to avoid reading beyond string.
10722 lea(result, Address(result, cnt1, Address::times_2, -16));
10723 movl(cnt1, 8);
10724 jmpb(SCAN_TO_SUBSTR);
10726 // Found a potential substr
10727 bind(FOUND_CANDIDATE);
10728 // After pcmpestri tmp(rcx) contains matched element index
10730 // Make sure string is still long enough
10731 subl(cnt1, tmp);
10732 cmpl(cnt1, cnt2);
10733 jccb(Assembler::greaterEqual, FOUND_SUBSTR);
10734 // Left less then substring.
10736 bind(RET_NOT_FOUND);
10737 movl(result, -1);
10738 jmpb(CLEANUP);
10740 bind(FOUND_SUBSTR);
10741 // Compute start addr of substr
10742 lea(result, Address(result, tmp, Address::times_2));
10744 if (int_cnt2 > 0) { // Constant substring
10745 // Repeat search for small substring (< 8 chars)
10746 // from new point without reloading substring.
10747 // Have to check that we don't read beyond string.
10748 cmpl(tmp, 8-int_cnt2);
10749 jccb(Assembler::greater, ADJUST_STR);
10750 // Fall through if matched whole substring.
10751 } else { // non constant
10752 assert(int_cnt2 == -1, "should be != 0");
10754 addl(tmp, cnt2);
10755 // Found result if we matched whole substring.
10756 cmpl(tmp, 8);
10757 jccb(Assembler::lessEqual, RET_FOUND);
10759 // Repeat search for small substring (<= 8 chars)
10760 // from new point 'str1' without reloading substring.
10761 cmpl(cnt2, 8);
10762 // Have to check that we don't read beyond string.
10763 jccb(Assembler::lessEqual, ADJUST_STR);
10765 Label CHECK_NEXT, CONT_SCAN_SUBSTR, RET_FOUND_LONG;
10766 // Compare the rest of substring (> 8 chars).
10767 movptr(str1, result);
10769 cmpl(tmp, cnt2);
10770 // First 8 chars are already matched.
10771 jccb(Assembler::equal, CHECK_NEXT);
10773 bind(SCAN_SUBSTR);
10774 pcmpestri(vec, Address(str1, 0), 0x0d);
10775 // Need to reload strings pointers if not matched whole vector
10776 jcc(Assembler::noOverflow, RELOAD_SUBSTR); // OF == 0
10778 bind(CHECK_NEXT);
10779 subl(cnt2, 8);
10780 jccb(Assembler::lessEqual, RET_FOUND_LONG); // Found full substring
10781 addptr(str1, 16);
10782 addptr(str2, 16);
10783 subl(cnt1, 8);
10784 cmpl(cnt2, 8); // Do not read beyond substring
10785 jccb(Assembler::greaterEqual, CONT_SCAN_SUBSTR);
10786 // Back-up strings to avoid reading beyond substring.
10787 lea(str2, Address(str2, cnt2, Address::times_2, -16));
10788 lea(str1, Address(str1, cnt2, Address::times_2, -16));
10789 subl(cnt1, cnt2);
10790 movl(cnt2, 8);
10791 addl(cnt1, 8);
10792 bind(CONT_SCAN_SUBSTR);
10793 movdqu(vec, Address(str2, 0));
10794 jmpb(SCAN_SUBSTR);
10796 bind(RET_FOUND_LONG);
10797 movptr(str1, Address(rsp, wordSize));
10798 } // non constant
10800 bind(RET_FOUND);
10801 // Compute substr offset
10802 subptr(result, str1);
10803 shrl(result, 1); // index
10805 bind(CLEANUP);
10806 pop(rsp); // restore SP
10808 } // string_indexof
10810 // Compare strings.
10811 void MacroAssembler::string_compare(Register str1, Register str2,
10812 Register cnt1, Register cnt2, Register result,
10813 XMMRegister vec1) {
10814 ShortBranchVerifier sbv(this);
10815 Label LENGTH_DIFF_LABEL, POP_LABEL, DONE_LABEL, WHILE_HEAD_LABEL;
10817 // Compute the minimum of the string lengths and the
10818 // difference of the string lengths (stack).
10819 // Do the conditional move stuff
10820 movl(result, cnt1);
10821 subl(cnt1, cnt2);
10822 push(cnt1);
10823 cmov32(Assembler::lessEqual, cnt2, result);
10825 // Is the minimum length zero?
10826 testl(cnt2, cnt2);
10827 jcc(Assembler::zero, LENGTH_DIFF_LABEL);
10829 // Load first characters
10830 load_unsigned_short(result, Address(str1, 0));
10831 load_unsigned_short(cnt1, Address(str2, 0));
10833 // Compare first characters
10834 subl(result, cnt1);
10835 jcc(Assembler::notZero, POP_LABEL);
10836 decrementl(cnt2);
10837 jcc(Assembler::zero, LENGTH_DIFF_LABEL);
10839 {
10840 // Check after comparing first character to see if strings are equivalent
10841 Label LSkip2;
10842 // Check if the strings start at same location
10843 cmpptr(str1, str2);
10844 jccb(Assembler::notEqual, LSkip2);
10846 // Check if the length difference is zero (from stack)
10847 cmpl(Address(rsp, 0), 0x0);
10848 jcc(Assembler::equal, LENGTH_DIFF_LABEL);
10850 // Strings might not be equivalent
10851 bind(LSkip2);
10852 }
10854 Address::ScaleFactor scale = Address::times_2;
10855 int stride = 8;
10857 // Advance to next element
10858 addptr(str1, 16/stride);
10859 addptr(str2, 16/stride);
10861 if (UseSSE42Intrinsics) {
10862 Label COMPARE_WIDE_VECTORS, VECTOR_NOT_EQUAL, COMPARE_TAIL;
10863 int pcmpmask = 0x19;
10864 // Setup to compare 16-byte vectors
10865 movl(result, cnt2);
10866 andl(cnt2, ~(stride - 1)); // cnt2 holds the vector count
10867 jccb(Assembler::zero, COMPARE_TAIL);
10869 lea(str1, Address(str1, result, scale));
10870 lea(str2, Address(str2, result, scale));
10871 negptr(result);
10873 // pcmpestri
10874 // inputs:
10875 // vec1- substring
10876 // rax - negative string length (elements count)
10877 // mem - scaned string
10878 // rdx - string length (elements count)
10879 // pcmpmask - cmp mode: 11000 (string compare with negated result)
10880 // + 00 (unsigned bytes) or + 01 (unsigned shorts)
10881 // outputs:
10882 // rcx - first mismatched element index
10883 assert(result == rax && cnt2 == rdx && cnt1 == rcx, "pcmpestri");
10885 bind(COMPARE_WIDE_VECTORS);
10886 movdqu(vec1, Address(str1, result, scale));
10887 pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
10888 // After pcmpestri cnt1(rcx) contains mismatched element index
10890 jccb(Assembler::below, VECTOR_NOT_EQUAL); // CF==1
10891 addptr(result, stride);
10892 subptr(cnt2, stride);
10893 jccb(Assembler::notZero, COMPARE_WIDE_VECTORS);
10895 // compare wide vectors tail
10896 testl(result, result);
10897 jccb(Assembler::zero, LENGTH_DIFF_LABEL);
10899 movl(cnt2, stride);
10900 movl(result, stride);
10901 negptr(result);
10902 movdqu(vec1, Address(str1, result, scale));
10903 pcmpestri(vec1, Address(str2, result, scale), pcmpmask);
10904 jccb(Assembler::aboveEqual, LENGTH_DIFF_LABEL);
10906 // Mismatched characters in the vectors
10907 bind(VECTOR_NOT_EQUAL);
10908 addptr(result, cnt1);
10909 movptr(cnt2, result);
10910 load_unsigned_short(result, Address(str1, cnt2, scale));
10911 load_unsigned_short(cnt1, Address(str2, cnt2, scale));
10912 subl(result, cnt1);
10913 jmpb(POP_LABEL);
10915 bind(COMPARE_TAIL); // limit is zero
10916 movl(cnt2, result);
10917 // Fallthru to tail compare
10918 }
10920 // Shift str2 and str1 to the end of the arrays, negate min
10921 lea(str1, Address(str1, cnt2, scale, 0));
10922 lea(str2, Address(str2, cnt2, scale, 0));
10923 negptr(cnt2);
10925 // Compare the rest of the elements
10926 bind(WHILE_HEAD_LABEL);
10927 load_unsigned_short(result, Address(str1, cnt2, scale, 0));
10928 load_unsigned_short(cnt1, Address(str2, cnt2, scale, 0));
10929 subl(result, cnt1);
10930 jccb(Assembler::notZero, POP_LABEL);
10931 increment(cnt2);
10932 jccb(Assembler::notZero, WHILE_HEAD_LABEL);
10934 // Strings are equal up to min length. Return the length difference.
10935 bind(LENGTH_DIFF_LABEL);
10936 pop(result);
10937 jmpb(DONE_LABEL);
10939 // Discard the stored length difference
10940 bind(POP_LABEL);
10941 pop(cnt1);
10943 // That's it
10944 bind(DONE_LABEL);
10945 }
10947 // Compare char[] arrays aligned to 4 bytes or substrings.
10948 void MacroAssembler::char_arrays_equals(bool is_array_equ, Register ary1, Register ary2,
10949 Register limit, Register result, Register chr,
10950 XMMRegister vec1, XMMRegister vec2) {
10951 ShortBranchVerifier sbv(this);
10952 Label TRUE_LABEL, FALSE_LABEL, DONE, COMPARE_VECTORS, COMPARE_CHAR;
10954 int length_offset = arrayOopDesc::length_offset_in_bytes();
10955 int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
10957 // Check the input args
10958 cmpptr(ary1, ary2);
10959 jcc(Assembler::equal, TRUE_LABEL);
10961 if (is_array_equ) {
10962 // Need additional checks for arrays_equals.
10963 testptr(ary1, ary1);
10964 jcc(Assembler::zero, FALSE_LABEL);
10965 testptr(ary2, ary2);
10966 jcc(Assembler::zero, FALSE_LABEL);
10968 // Check the lengths
10969 movl(limit, Address(ary1, length_offset));
10970 cmpl(limit, Address(ary2, length_offset));
10971 jcc(Assembler::notEqual, FALSE_LABEL);
10972 }
10974 // count == 0
10975 testl(limit, limit);
10976 jcc(Assembler::zero, TRUE_LABEL);
10978 if (is_array_equ) {
10979 // Load array address
10980 lea(ary1, Address(ary1, base_offset));
10981 lea(ary2, Address(ary2, base_offset));
10982 }
10984 shll(limit, 1); // byte count != 0
10985 movl(result, limit); // copy
10987 if (UseSSE42Intrinsics) {
10988 // With SSE4.2, use double quad vector compare
10989 Label COMPARE_WIDE_VECTORS, COMPARE_TAIL;
10991 // Compare 16-byte vectors
10992 andl(result, 0x0000000e); // tail count (in bytes)
10993 andl(limit, 0xfffffff0); // vector count (in bytes)
10994 jccb(Assembler::zero, COMPARE_TAIL);
10996 lea(ary1, Address(ary1, limit, Address::times_1));
10997 lea(ary2, Address(ary2, limit, Address::times_1));
10998 negptr(limit);
11000 bind(COMPARE_WIDE_VECTORS);
11001 movdqu(vec1, Address(ary1, limit, Address::times_1));
11002 movdqu(vec2, Address(ary2, limit, Address::times_1));
11003 pxor(vec1, vec2);
11005 ptest(vec1, vec1);
11006 jccb(Assembler::notZero, FALSE_LABEL);
11007 addptr(limit, 16);
11008 jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
11010 testl(result, result);
11011 jccb(Assembler::zero, TRUE_LABEL);
11013 movdqu(vec1, Address(ary1, result, Address::times_1, -16));
11014 movdqu(vec2, Address(ary2, result, Address::times_1, -16));
11015 pxor(vec1, vec2);
11017 ptest(vec1, vec1);
11018 jccb(Assembler::notZero, FALSE_LABEL);
11019 jmpb(TRUE_LABEL);
11021 bind(COMPARE_TAIL); // limit is zero
11022 movl(limit, result);
11023 // Fallthru to tail compare
11024 }
11026 // Compare 4-byte vectors
11027 andl(limit, 0xfffffffc); // vector count (in bytes)
11028 jccb(Assembler::zero, COMPARE_CHAR);
11030 lea(ary1, Address(ary1, limit, Address::times_1));
11031 lea(ary2, Address(ary2, limit, Address::times_1));
11032 negptr(limit);
11034 bind(COMPARE_VECTORS);
11035 movl(chr, Address(ary1, limit, Address::times_1));
11036 cmpl(chr, Address(ary2, limit, Address::times_1));
11037 jccb(Assembler::notEqual, FALSE_LABEL);
11038 addptr(limit, 4);
11039 jcc(Assembler::notZero, COMPARE_VECTORS);
11041 // Compare trailing char (final 2 bytes), if any
11042 bind(COMPARE_CHAR);
11043 testl(result, 0x2); // tail char
11044 jccb(Assembler::zero, TRUE_LABEL);
11045 load_unsigned_short(chr, Address(ary1, 0));
11046 load_unsigned_short(limit, Address(ary2, 0));
11047 cmpl(chr, limit);
11048 jccb(Assembler::notEqual, FALSE_LABEL);
11050 bind(TRUE_LABEL);
11051 movl(result, 1); // return true
11052 jmpb(DONE);
11054 bind(FALSE_LABEL);
11055 xorl(result, result); // return false
11057 // That's it
11058 bind(DONE);
11059 }
11061 void MacroAssembler::generate_fill(BasicType t, bool aligned,
11062 Register to, Register value, Register count,
11063 Register rtmp, XMMRegister xtmp) {
11064 ShortBranchVerifier sbv(this);
11065 assert_different_registers(to, value, count, rtmp);
11066 Label L_exit, L_skip_align1, L_skip_align2, L_fill_byte;
11067 Label L_fill_2_bytes, L_fill_4_bytes;
11069 int shift = -1;
11070 switch (t) {
11071 case T_BYTE:
11072 shift = 2;
11073 break;
11074 case T_SHORT:
11075 shift = 1;
11076 break;
11077 case T_INT:
11078 shift = 0;
11079 break;
11080 default: ShouldNotReachHere();
11081 }
11083 if (t == T_BYTE) {
11084 andl(value, 0xff);
11085 movl(rtmp, value);
11086 shll(rtmp, 8);
11087 orl(value, rtmp);
11088 }
11089 if (t == T_SHORT) {
11090 andl(value, 0xffff);
11091 }
11092 if (t == T_BYTE || t == T_SHORT) {
11093 movl(rtmp, value);
11094 shll(rtmp, 16);
11095 orl(value, rtmp);
11096 }
11098 cmpl(count, 2<<shift); // Short arrays (< 8 bytes) fill by element
11099 jcc(Assembler::below, L_fill_4_bytes); // use unsigned cmp
11100 if (!UseUnalignedLoadStores && !aligned && (t == T_BYTE || t == T_SHORT)) {
11101 // align source address at 4 bytes address boundary
11102 if (t == T_BYTE) {
11103 // One byte misalignment happens only for byte arrays
11104 testptr(to, 1);
11105 jccb(Assembler::zero, L_skip_align1);
11106 movb(Address(to, 0), value);
11107 increment(to);
11108 decrement(count);
11109 BIND(L_skip_align1);
11110 }
11111 // Two bytes misalignment happens only for byte and short (char) arrays
11112 testptr(to, 2);
11113 jccb(Assembler::zero, L_skip_align2);
11114 movw(Address(to, 0), value);
11115 addptr(to, 2);
11116 subl(count, 1<<(shift-1));
11117 BIND(L_skip_align2);
11118 }
11119 if (UseSSE < 2) {
11120 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
11121 // Fill 32-byte chunks
11122 subl(count, 8 << shift);
11123 jcc(Assembler::less, L_check_fill_8_bytes);
11124 align(16);
11126 BIND(L_fill_32_bytes_loop);
11128 for (int i = 0; i < 32; i += 4) {
11129 movl(Address(to, i), value);
11130 }
11132 addptr(to, 32);
11133 subl(count, 8 << shift);
11134 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
11135 BIND(L_check_fill_8_bytes);
11136 addl(count, 8 << shift);
11137 jccb(Assembler::zero, L_exit);
11138 jmpb(L_fill_8_bytes);
11140 //
11141 // length is too short, just fill qwords
11142 //
11143 BIND(L_fill_8_bytes_loop);
11144 movl(Address(to, 0), value);
11145 movl(Address(to, 4), value);
11146 addptr(to, 8);
11147 BIND(L_fill_8_bytes);
11148 subl(count, 1 << (shift + 1));
11149 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
11150 // fall through to fill 4 bytes
11151 } else {
11152 Label L_fill_32_bytes;
11153 if (!UseUnalignedLoadStores) {
11154 // align to 8 bytes, we know we are 4 byte aligned to start
11155 testptr(to, 4);
11156 jccb(Assembler::zero, L_fill_32_bytes);
11157 movl(Address(to, 0), value);
11158 addptr(to, 4);
11159 subl(count, 1<<shift);
11160 }
11161 BIND(L_fill_32_bytes);
11162 {
11163 assert( UseSSE >= 2, "supported cpu only" );
11164 Label L_fill_32_bytes_loop, L_check_fill_8_bytes, L_fill_8_bytes_loop, L_fill_8_bytes;
11165 // Fill 32-byte chunks
11166 movdl(xtmp, value);
11167 pshufd(xtmp, xtmp, 0);
11169 subl(count, 8 << shift);
11170 jcc(Assembler::less, L_check_fill_8_bytes);
11171 align(16);
11173 BIND(L_fill_32_bytes_loop);
11175 if (UseUnalignedLoadStores) {
11176 movdqu(Address(to, 0), xtmp);
11177 movdqu(Address(to, 16), xtmp);
11178 } else {
11179 movq(Address(to, 0), xtmp);
11180 movq(Address(to, 8), xtmp);
11181 movq(Address(to, 16), xtmp);
11182 movq(Address(to, 24), xtmp);
11183 }
11185 addptr(to, 32);
11186 subl(count, 8 << shift);
11187 jcc(Assembler::greaterEqual, L_fill_32_bytes_loop);
11188 BIND(L_check_fill_8_bytes);
11189 addl(count, 8 << shift);
11190 jccb(Assembler::zero, L_exit);
11191 jmpb(L_fill_8_bytes);
11193 //
11194 // length is too short, just fill qwords
11195 //
11196 BIND(L_fill_8_bytes_loop);
11197 movq(Address(to, 0), xtmp);
11198 addptr(to, 8);
11199 BIND(L_fill_8_bytes);
11200 subl(count, 1 << (shift + 1));
11201 jcc(Assembler::greaterEqual, L_fill_8_bytes_loop);
11202 }
11203 }
11204 // fill trailing 4 bytes
11205 BIND(L_fill_4_bytes);
11206 testl(count, 1<<shift);
11207 jccb(Assembler::zero, L_fill_2_bytes);
11208 movl(Address(to, 0), value);
11209 if (t == T_BYTE || t == T_SHORT) {
11210 addptr(to, 4);
11211 BIND(L_fill_2_bytes);
11212 // fill trailing 2 bytes
11213 testl(count, 1<<(shift-1));
11214 jccb(Assembler::zero, L_fill_byte);
11215 movw(Address(to, 0), value);
11216 if (t == T_BYTE) {
11217 addptr(to, 2);
11218 BIND(L_fill_byte);
11219 // fill trailing byte
11220 testl(count, 1);
11221 jccb(Assembler::zero, L_exit);
11222 movb(Address(to, 0), value);
11223 } else {
11224 BIND(L_fill_byte);
11225 }
11226 } else {
11227 BIND(L_fill_2_bytes);
11228 }
11229 BIND(L_exit);
11230 }
11231 #undef BIND
11232 #undef BLOCK_COMMENT
11235 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
11236 switch (cond) {
11237 // Note some conditions are synonyms for others
11238 case Assembler::zero: return Assembler::notZero;
11239 case Assembler::notZero: return Assembler::zero;
11240 case Assembler::less: return Assembler::greaterEqual;
11241 case Assembler::lessEqual: return Assembler::greater;
11242 case Assembler::greater: return Assembler::lessEqual;
11243 case Assembler::greaterEqual: return Assembler::less;
11244 case Assembler::below: return Assembler::aboveEqual;
11245 case Assembler::belowEqual: return Assembler::above;
11246 case Assembler::above: return Assembler::belowEqual;
11247 case Assembler::aboveEqual: return Assembler::below;
11248 case Assembler::overflow: return Assembler::noOverflow;
11249 case Assembler::noOverflow: return Assembler::overflow;
11250 case Assembler::negative: return Assembler::positive;
11251 case Assembler::positive: return Assembler::negative;
11252 case Assembler::parity: return Assembler::noParity;
11253 case Assembler::noParity: return Assembler::parity;
11254 }
11255 ShouldNotReachHere(); return Assembler::overflow;
11256 }
11258 SkipIfEqual::SkipIfEqual(
11259 MacroAssembler* masm, const bool* flag_addr, bool value) {
11260 _masm = masm;
11261 _masm->cmp8(ExternalAddress((address)flag_addr), value);
11262 _masm->jcc(Assembler::equal, _label);
11263 }
11265 SkipIfEqual::~SkipIfEqual() {
11266 _masm->bind(_label);
11267 }