Tue, 14 Oct 2008 15:10:26 -0700
6532536: Optimize arraycopy stubs for Intel cpus
Summary: Use SSE2 movdqu in arraycopy stubs on newest Intel's cpus
Reviewed-by: rasbold
1 /*
2 * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_assembler_x86.cpp.incl"
28 // Implementation of AddressLiteral
30 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
31 _is_lval = false;
32 _target = target;
33 switch (rtype) {
34 case relocInfo::oop_type:
35 // Oops are a special case. Normally they would be their own section
36 // but in cases like icBuffer they are literals in the code stream that
37 // we don't have a section for. We use none so that we get a literal address
38 // which is always patchable.
39 break;
40 case relocInfo::external_word_type:
41 _rspec = external_word_Relocation::spec(target);
42 break;
43 case relocInfo::internal_word_type:
44 _rspec = internal_word_Relocation::spec(target);
45 break;
46 case relocInfo::opt_virtual_call_type:
47 _rspec = opt_virtual_call_Relocation::spec();
48 break;
49 case relocInfo::static_call_type:
50 _rspec = static_call_Relocation::spec();
51 break;
52 case relocInfo::runtime_call_type:
53 _rspec = runtime_call_Relocation::spec();
54 break;
55 case relocInfo::poll_type:
56 case relocInfo::poll_return_type:
57 _rspec = Relocation::spec_simple(rtype);
58 break;
59 case relocInfo::none:
60 break;
61 default:
62 ShouldNotReachHere();
63 break;
64 }
65 }
67 // Implementation of Address
69 #ifdef _LP64
71 Address Address::make_array(ArrayAddress adr) {
72 // Not implementable on 64bit machines
73 // Should have been handled higher up the call chain.
74 ShouldNotReachHere();
75 return Address();
76 }
78 // exceedingly dangerous constructor
79 Address::Address(int disp, address loc, relocInfo::relocType rtype) {
80 _base = noreg;
81 _index = noreg;
82 _scale = no_scale;
83 _disp = disp;
84 switch (rtype) {
85 case relocInfo::external_word_type:
86 _rspec = external_word_Relocation::spec(loc);
87 break;
88 case relocInfo::internal_word_type:
89 _rspec = internal_word_Relocation::spec(loc);
90 break;
91 case relocInfo::runtime_call_type:
92 // HMM
93 _rspec = runtime_call_Relocation::spec();
94 break;
95 case relocInfo::poll_type:
96 case relocInfo::poll_return_type:
97 _rspec = Relocation::spec_simple(rtype);
98 break;
99 case relocInfo::none:
100 break;
101 default:
102 ShouldNotReachHere();
103 }
104 }
105 #else // LP64
107 Address Address::make_array(ArrayAddress adr) {
108 AddressLiteral base = adr.base();
109 Address index = adr.index();
110 assert(index._disp == 0, "must not have disp"); // maybe it can?
111 Address array(index._base, index._index, index._scale, (intptr_t) base.target());
112 array._rspec = base._rspec;
113 return array;
114 }
116 // exceedingly dangerous constructor
117 Address::Address(address loc, RelocationHolder spec) {
118 _base = noreg;
119 _index = noreg;
120 _scale = no_scale;
121 _disp = (intptr_t) loc;
122 _rspec = spec;
123 }
125 #endif // _LP64
129 // Convert the raw encoding form into the form expected by the constructor for
130 // Address. An index of 4 (rsp) corresponds to having no index, so convert
131 // that to noreg for the Address constructor.
132 Address Address::make_raw(int base, int index, int scale, int disp) {
133 bool valid_index = index != rsp->encoding();
134 if (valid_index) {
135 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
136 return madr;
137 } else {
138 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
139 return madr;
140 }
141 }
143 // Implementation of Assembler
145 int AbstractAssembler::code_fill_byte() {
146 return (u_char)'\xF4'; // hlt
147 }
149 // make this go away someday
150 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
151 if (rtype == relocInfo::none)
152 emit_long(data);
153 else emit_data(data, Relocation::spec_simple(rtype), format);
154 }
156 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
157 assert(imm_operand == 0, "default format must be immediate in this file");
158 assert(inst_mark() != NULL, "must be inside InstructionMark");
159 if (rspec.type() != relocInfo::none) {
160 #ifdef ASSERT
161 check_relocation(rspec, format);
162 #endif
163 // Do not use AbstractAssembler::relocate, which is not intended for
164 // embedded words. Instead, relocate to the enclosing instruction.
166 // hack. call32 is too wide for mask so use disp32
167 if (format == call32_operand)
168 code_section()->relocate(inst_mark(), rspec, disp32_operand);
169 else
170 code_section()->relocate(inst_mark(), rspec, format);
171 }
172 emit_long(data);
173 }
175 static int encode(Register r) {
176 int enc = r->encoding();
177 if (enc >= 8) {
178 enc -= 8;
179 }
180 return enc;
181 }
183 static int encode(XMMRegister r) {
184 int enc = r->encoding();
185 if (enc >= 8) {
186 enc -= 8;
187 }
188 return enc;
189 }
191 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
192 assert(dst->has_byte_register(), "must have byte register");
193 assert(isByte(op1) && isByte(op2), "wrong opcode");
194 assert(isByte(imm8), "not a byte");
195 assert((op1 & 0x01) == 0, "should be 8bit operation");
196 emit_byte(op1);
197 emit_byte(op2 | encode(dst));
198 emit_byte(imm8);
199 }
202 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
203 assert(isByte(op1) && isByte(op2), "wrong opcode");
204 assert((op1 & 0x01) == 1, "should be 32bit operation");
205 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
206 if (is8bit(imm32)) {
207 emit_byte(op1 | 0x02); // set sign bit
208 emit_byte(op2 | encode(dst));
209 emit_byte(imm32 & 0xFF);
210 } else {
211 emit_byte(op1);
212 emit_byte(op2 | encode(dst));
213 emit_long(imm32);
214 }
215 }
217 // immediate-to-memory forms
218 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
219 assert((op1 & 0x01) == 1, "should be 32bit operation");
220 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
221 if (is8bit(imm32)) {
222 emit_byte(op1 | 0x02); // set sign bit
223 emit_operand(rm, adr, 1);
224 emit_byte(imm32 & 0xFF);
225 } else {
226 emit_byte(op1);
227 emit_operand(rm, adr, 4);
228 emit_long(imm32);
229 }
230 }
232 void Assembler::emit_arith(int op1, int op2, Register dst, jobject obj) {
233 LP64_ONLY(ShouldNotReachHere());
234 assert(isByte(op1) && isByte(op2), "wrong opcode");
235 assert((op1 & 0x01) == 1, "should be 32bit operation");
236 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
237 InstructionMark im(this);
238 emit_byte(op1);
239 emit_byte(op2 | encode(dst));
240 emit_data((intptr_t)obj, relocInfo::oop_type, 0);
241 }
244 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
245 assert(isByte(op1) && isByte(op2), "wrong opcode");
246 emit_byte(op1);
247 emit_byte(op2 | encode(dst) << 3 | encode(src));
248 }
251 void Assembler::emit_operand(Register reg, Register base, Register index,
252 Address::ScaleFactor scale, int disp,
253 RelocationHolder const& rspec,
254 int rip_relative_correction) {
255 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
257 // Encode the registers as needed in the fields they are used in
259 int regenc = encode(reg) << 3;
260 int indexenc = index->is_valid() ? encode(index) << 3 : 0;
261 int baseenc = base->is_valid() ? encode(base) : 0;
263 if (base->is_valid()) {
264 if (index->is_valid()) {
265 assert(scale != Address::no_scale, "inconsistent address");
266 // [base + index*scale + disp]
267 if (disp == 0 && rtype == relocInfo::none &&
268 base != rbp LP64_ONLY(&& base != r13)) {
269 // [base + index*scale]
270 // [00 reg 100][ss index base]
271 assert(index != rsp, "illegal addressing mode");
272 emit_byte(0x04 | regenc);
273 emit_byte(scale << 6 | indexenc | baseenc);
274 } else if (is8bit(disp) && rtype == relocInfo::none) {
275 // [base + index*scale + imm8]
276 // [01 reg 100][ss index base] imm8
277 assert(index != rsp, "illegal addressing mode");
278 emit_byte(0x44 | regenc);
279 emit_byte(scale << 6 | indexenc | baseenc);
280 emit_byte(disp & 0xFF);
281 } else {
282 // [base + index*scale + disp32]
283 // [10 reg 100][ss index base] disp32
284 assert(index != rsp, "illegal addressing mode");
285 emit_byte(0x84 | regenc);
286 emit_byte(scale << 6 | indexenc | baseenc);
287 emit_data(disp, rspec, disp32_operand);
288 }
289 } else if (base == rsp LP64_ONLY(|| base == r12)) {
290 // [rsp + disp]
291 if (disp == 0 && rtype == relocInfo::none) {
292 // [rsp]
293 // [00 reg 100][00 100 100]
294 emit_byte(0x04 | regenc);
295 emit_byte(0x24);
296 } else if (is8bit(disp) && rtype == relocInfo::none) {
297 // [rsp + imm8]
298 // [01 reg 100][00 100 100] disp8
299 emit_byte(0x44 | regenc);
300 emit_byte(0x24);
301 emit_byte(disp & 0xFF);
302 } else {
303 // [rsp + imm32]
304 // [10 reg 100][00 100 100] disp32
305 emit_byte(0x84 | regenc);
306 emit_byte(0x24);
307 emit_data(disp, rspec, disp32_operand);
308 }
309 } else {
310 // [base + disp]
311 assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode");
312 if (disp == 0 && rtype == relocInfo::none &&
313 base != rbp LP64_ONLY(&& base != r13)) {
314 // [base]
315 // [00 reg base]
316 emit_byte(0x00 | regenc | baseenc);
317 } else if (is8bit(disp) && rtype == relocInfo::none) {
318 // [base + disp8]
319 // [01 reg base] disp8
320 emit_byte(0x40 | regenc | baseenc);
321 emit_byte(disp & 0xFF);
322 } else {
323 // [base + disp32]
324 // [10 reg base] disp32
325 emit_byte(0x80 | regenc | baseenc);
326 emit_data(disp, rspec, disp32_operand);
327 }
328 }
329 } else {
330 if (index->is_valid()) {
331 assert(scale != Address::no_scale, "inconsistent address");
332 // [index*scale + disp]
333 // [00 reg 100][ss index 101] disp32
334 assert(index != rsp, "illegal addressing mode");
335 emit_byte(0x04 | regenc);
336 emit_byte(scale << 6 | indexenc | 0x05);
337 emit_data(disp, rspec, disp32_operand);
338 } else if (rtype != relocInfo::none ) {
339 // [disp] (64bit) RIP-RELATIVE (32bit) abs
340 // [00 000 101] disp32
342 emit_byte(0x05 | regenc);
343 // Note that the RIP-rel. correction applies to the generated
344 // disp field, but _not_ to the target address in the rspec.
346 // disp was created by converting the target address minus the pc
347 // at the start of the instruction. That needs more correction here.
348 // intptr_t disp = target - next_ip;
349 assert(inst_mark() != NULL, "must be inside InstructionMark");
350 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
351 int64_t adjusted = disp;
352 // Do rip-rel adjustment for 64bit
353 LP64_ONLY(adjusted -= (next_ip - inst_mark()));
354 assert(is_simm32(adjusted),
355 "must be 32bit offset (RIP relative address)");
356 emit_data((int32_t) adjusted, rspec, disp32_operand);
358 } else {
359 // 32bit never did this, did everything as the rip-rel/disp code above
360 // [disp] ABSOLUTE
361 // [00 reg 100][00 100 101] disp32
362 emit_byte(0x04 | regenc);
363 emit_byte(0x25);
364 emit_data(disp, rspec, disp32_operand);
365 }
366 }
367 }
369 void Assembler::emit_operand(XMMRegister reg, Register base, Register index,
370 Address::ScaleFactor scale, int disp,
371 RelocationHolder const& rspec) {
372 emit_operand((Register)reg, base, index, scale, disp, rspec);
373 }
375 // Secret local extension to Assembler::WhichOperand:
376 #define end_pc_operand (_WhichOperand_limit)
378 address Assembler::locate_operand(address inst, WhichOperand which) {
379 // Decode the given instruction, and return the address of
380 // an embedded 32-bit operand word.
382 // If "which" is disp32_operand, selects the displacement portion
383 // of an effective address specifier.
384 // If "which" is imm64_operand, selects the trailing immediate constant.
385 // If "which" is call32_operand, selects the displacement of a call or jump.
386 // Caller is responsible for ensuring that there is such an operand,
387 // and that it is 32/64 bits wide.
389 // If "which" is end_pc_operand, find the end of the instruction.
391 address ip = inst;
392 bool is_64bit = false;
394 debug_only(bool has_disp32 = false);
395 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
397 again_after_prefix:
398 switch (0xFF & *ip++) {
400 // These convenience macros generate groups of "case" labels for the switch.
401 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
402 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
403 case (x)+4: case (x)+5: case (x)+6: case (x)+7
404 #define REP16(x) REP8((x)+0): \
405 case REP8((x)+8)
407 case CS_segment:
408 case SS_segment:
409 case DS_segment:
410 case ES_segment:
411 case FS_segment:
412 case GS_segment:
413 // Seems dubious
414 LP64_ONLY(assert(false, "shouldn't have that prefix"));
415 assert(ip == inst+1, "only one prefix allowed");
416 goto again_after_prefix;
418 case 0x67:
419 case REX:
420 case REX_B:
421 case REX_X:
422 case REX_XB:
423 case REX_R:
424 case REX_RB:
425 case REX_RX:
426 case REX_RXB:
427 NOT_LP64(assert(false, "64bit prefixes"));
428 goto again_after_prefix;
430 case REX_W:
431 case REX_WB:
432 case REX_WX:
433 case REX_WXB:
434 case REX_WR:
435 case REX_WRB:
436 case REX_WRX:
437 case REX_WRXB:
438 NOT_LP64(assert(false, "64bit prefixes"));
439 is_64bit = true;
440 goto again_after_prefix;
442 case 0xFF: // pushq a; decl a; incl a; call a; jmp a
443 case 0x88: // movb a, r
444 case 0x89: // movl a, r
445 case 0x8A: // movb r, a
446 case 0x8B: // movl r, a
447 case 0x8F: // popl a
448 debug_only(has_disp32 = true);
449 break;
451 case 0x68: // pushq #32
452 if (which == end_pc_operand) {
453 return ip + 4;
454 }
455 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate");
456 return ip; // not produced by emit_operand
458 case 0x66: // movw ... (size prefix)
459 again_after_size_prefix2:
460 switch (0xFF & *ip++) {
461 case REX:
462 case REX_B:
463 case REX_X:
464 case REX_XB:
465 case REX_R:
466 case REX_RB:
467 case REX_RX:
468 case REX_RXB:
469 case REX_W:
470 case REX_WB:
471 case REX_WX:
472 case REX_WXB:
473 case REX_WR:
474 case REX_WRB:
475 case REX_WRX:
476 case REX_WRXB:
477 NOT_LP64(assert(false, "64bit prefix found"));
478 goto again_after_size_prefix2;
479 case 0x8B: // movw r, a
480 case 0x89: // movw a, r
481 debug_only(has_disp32 = true);
482 break;
483 case 0xC7: // movw a, #16
484 debug_only(has_disp32 = true);
485 tail_size = 2; // the imm16
486 break;
487 case 0x0F: // several SSE/SSE2 variants
488 ip--; // reparse the 0x0F
489 goto again_after_prefix;
490 default:
491 ShouldNotReachHere();
492 }
493 break;
495 case REP8(0xB8): // movl/q r, #32/#64(oop?)
496 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4);
497 // these asserts are somewhat nonsensical
498 #ifndef _LP64
499 assert(which == imm_operand || which == disp32_operand, "");
500 #else
501 assert((which == call32_operand || which == imm_operand) && is_64bit ||
502 which == narrow_oop_operand && !is_64bit, "");
503 #endif // _LP64
504 return ip;
506 case 0x69: // imul r, a, #32
507 case 0xC7: // movl a, #32(oop?)
508 tail_size = 4;
509 debug_only(has_disp32 = true); // has both kinds of operands!
510 break;
512 case 0x0F: // movx..., etc.
513 switch (0xFF & *ip++) {
514 case 0x12: // movlps
515 case 0x28: // movaps
516 case 0x2E: // ucomiss
517 case 0x2F: // comiss
518 case 0x54: // andps
519 case 0x55: // andnps
520 case 0x56: // orps
521 case 0x57: // xorps
522 case 0x6E: // movd
523 case 0x7E: // movd
524 case 0xAE: // ldmxcsr a
525 // 64bit side says it these have both operands but that doesn't
526 // appear to be true
527 debug_only(has_disp32 = true);
528 break;
530 case 0xAD: // shrd r, a, %cl
531 case 0xAF: // imul r, a
532 case 0xBE: // movsbl r, a (movsxb)
533 case 0xBF: // movswl r, a (movsxw)
534 case 0xB6: // movzbl r, a (movzxb)
535 case 0xB7: // movzwl r, a (movzxw)
536 case REP16(0x40): // cmovl cc, r, a
537 case 0xB0: // cmpxchgb
538 case 0xB1: // cmpxchg
539 case 0xC1: // xaddl
540 case 0xC7: // cmpxchg8
541 case REP16(0x90): // setcc a
542 debug_only(has_disp32 = true);
543 // fall out of the switch to decode the address
544 break;
546 case 0xAC: // shrd r, a, #8
547 debug_only(has_disp32 = true);
548 tail_size = 1; // the imm8
549 break;
551 case REP16(0x80): // jcc rdisp32
552 if (which == end_pc_operand) return ip + 4;
553 assert(which == call32_operand, "jcc has no disp32 or imm");
554 return ip;
555 default:
556 ShouldNotReachHere();
557 }
558 break;
560 case 0x81: // addl a, #32; addl r, #32
561 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
562 // on 32bit in the case of cmpl, the imm might be an oop
563 tail_size = 4;
564 debug_only(has_disp32 = true); // has both kinds of operands!
565 break;
567 case 0x83: // addl a, #8; addl r, #8
568 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
569 debug_only(has_disp32 = true); // has both kinds of operands!
570 tail_size = 1;
571 break;
573 case 0x9B:
574 switch (0xFF & *ip++) {
575 case 0xD9: // fnstcw a
576 debug_only(has_disp32 = true);
577 break;
578 default:
579 ShouldNotReachHere();
580 }
581 break;
583 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
584 case REP4(0x10): // adc...
585 case REP4(0x20): // and...
586 case REP4(0x30): // xor...
587 case REP4(0x08): // or...
588 case REP4(0x18): // sbb...
589 case REP4(0x28): // sub...
590 case 0xF7: // mull a
591 case 0x8D: // lea r, a
592 case 0x87: // xchg r, a
593 case REP4(0x38): // cmp...
594 case 0x85: // test r, a
595 debug_only(has_disp32 = true); // has both kinds of operands!
596 break;
598 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
599 case 0xC6: // movb a, #8
600 case 0x80: // cmpb a, #8
601 case 0x6B: // imul r, a, #8
602 debug_only(has_disp32 = true); // has both kinds of operands!
603 tail_size = 1; // the imm8
604 break;
606 case 0xE8: // call rdisp32
607 case 0xE9: // jmp rdisp32
608 if (which == end_pc_operand) return ip + 4;
609 assert(which == call32_operand, "call has no disp32 or imm");
610 return ip;
612 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
613 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
614 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
615 case 0xDD: // fld_d a; fst_d a; fstp_d a
616 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
617 case 0xDF: // fild_d a; fistp_d a
618 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
619 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
620 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
621 debug_only(has_disp32 = true);
622 break;
624 case 0xF3: // For SSE
625 case 0xF2: // For SSE2
626 switch (0xFF & *ip++) {
627 case REX:
628 case REX_B:
629 case REX_X:
630 case REX_XB:
631 case REX_R:
632 case REX_RB:
633 case REX_RX:
634 case REX_RXB:
635 case REX_W:
636 case REX_WB:
637 case REX_WX:
638 case REX_WXB:
639 case REX_WR:
640 case REX_WRB:
641 case REX_WRX:
642 case REX_WRXB:
643 NOT_LP64(assert(false, "found 64bit prefix"));
644 ip++;
645 default:
646 ip++;
647 }
648 debug_only(has_disp32 = true); // has both kinds of operands!
649 break;
651 default:
652 ShouldNotReachHere();
654 #undef REP8
655 #undef REP16
656 }
658 assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
659 #ifdef _LP64
660 assert(which != imm_operand, "instruction is not a movq reg, imm64");
661 #else
662 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
663 assert(which != imm_operand || has_disp32, "instruction has no imm32 field");
664 #endif // LP64
665 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");
667 // parse the output of emit_operand
668 int op2 = 0xFF & *ip++;
669 int base = op2 & 0x07;
670 int op3 = -1;
671 const int b100 = 4;
672 const int b101 = 5;
673 if (base == b100 && (op2 >> 6) != 3) {
674 op3 = 0xFF & *ip++;
675 base = op3 & 0x07; // refetch the base
676 }
677 // now ip points at the disp (if any)
679 switch (op2 >> 6) {
680 case 0:
681 // [00 reg 100][ss index base]
682 // [00 reg 100][00 100 esp]
683 // [00 reg base]
684 // [00 reg 100][ss index 101][disp32]
685 // [00 reg 101] [disp32]
687 if (base == b101) {
688 if (which == disp32_operand)
689 return ip; // caller wants the disp32
690 ip += 4; // skip the disp32
691 }
692 break;
694 case 1:
695 // [01 reg 100][ss index base][disp8]
696 // [01 reg 100][00 100 esp][disp8]
697 // [01 reg base] [disp8]
698 ip += 1; // skip the disp8
699 break;
701 case 2:
702 // [10 reg 100][ss index base][disp32]
703 // [10 reg 100][00 100 esp][disp32]
704 // [10 reg base] [disp32]
705 if (which == disp32_operand)
706 return ip; // caller wants the disp32
707 ip += 4; // skip the disp32
708 break;
710 case 3:
711 // [11 reg base] (not a memory addressing mode)
712 break;
713 }
715 if (which == end_pc_operand) {
716 return ip + tail_size;
717 }
719 #ifdef _LP64
720 assert(false, "fix locate_operand");
721 #else
722 assert(which == imm_operand, "instruction has only an imm field");
723 #endif // LP64
724 return ip;
725 }
727 address Assembler::locate_next_instruction(address inst) {
728 // Secretly share code with locate_operand:
729 return locate_operand(inst, end_pc_operand);
730 }
733 #ifdef ASSERT
734 void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
735 address inst = inst_mark();
736 assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
737 address opnd;
739 Relocation* r = rspec.reloc();
740 if (r->type() == relocInfo::none) {
741 return;
742 } else if (r->is_call() || format == call32_operand) {
743 // assert(format == imm32_operand, "cannot specify a nonzero format");
744 opnd = locate_operand(inst, call32_operand);
745 } else if (r->is_data()) {
746 assert(format == imm_operand || format == disp32_operand
747 LP64_ONLY(|| format == narrow_oop_operand), "format ok");
748 opnd = locate_operand(inst, (WhichOperand)format);
749 } else {
750 assert(format == imm_operand, "cannot specify a format");
751 return;
752 }
753 assert(opnd == pc(), "must put operand where relocs can find it");
754 }
755 #endif // ASSERT
757 void Assembler::emit_operand32(Register reg, Address adr) {
758 assert(reg->encoding() < 8, "no extended registers");
759 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
760 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
761 adr._rspec);
762 }
764 void Assembler::emit_operand(Register reg, Address adr,
765 int rip_relative_correction) {
766 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
767 adr._rspec,
768 rip_relative_correction);
769 }
771 void Assembler::emit_operand(XMMRegister reg, Address adr) {
772 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
773 adr._rspec);
774 }
776 // MMX operations
777 void Assembler::emit_operand(MMXRegister reg, Address adr) {
778 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
779 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
780 }
782 // work around gcc (3.2.1-7a) bug
783 void Assembler::emit_operand(Address adr, MMXRegister reg) {
784 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
785 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
786 }
789 void Assembler::emit_farith(int b1, int b2, int i) {
790 assert(isByte(b1) && isByte(b2), "wrong opcode");
791 assert(0 <= i && i < 8, "illegal stack offset");
792 emit_byte(b1);
793 emit_byte(b2 + i);
794 }
797 // Now the Assembler instruction (identical for 32/64 bits)
799 void Assembler::adcl(Register dst, int32_t imm32) {
800 prefix(dst);
801 emit_arith(0x81, 0xD0, dst, imm32);
802 }
804 void Assembler::adcl(Register dst, Address src) {
805 InstructionMark im(this);
806 prefix(src, dst);
807 emit_byte(0x13);
808 emit_operand(dst, src);
809 }
811 void Assembler::adcl(Register dst, Register src) {
812 (void) prefix_and_encode(dst->encoding(), src->encoding());
813 emit_arith(0x13, 0xC0, dst, src);
814 }
816 void Assembler::addl(Address dst, int32_t imm32) {
817 InstructionMark im(this);
818 prefix(dst);
819 emit_arith_operand(0x81, rax, dst, imm32);
820 }
822 void Assembler::addl(Address dst, Register src) {
823 InstructionMark im(this);
824 prefix(dst, src);
825 emit_byte(0x01);
826 emit_operand(src, dst);
827 }
829 void Assembler::addl(Register dst, int32_t imm32) {
830 prefix(dst);
831 emit_arith(0x81, 0xC0, dst, imm32);
832 }
834 void Assembler::addl(Register dst, Address src) {
835 InstructionMark im(this);
836 prefix(src, dst);
837 emit_byte(0x03);
838 emit_operand(dst, src);
839 }
841 void Assembler::addl(Register dst, Register src) {
842 (void) prefix_and_encode(dst->encoding(), src->encoding());
843 emit_arith(0x03, 0xC0, dst, src);
844 }
846 void Assembler::addr_nop_4() {
847 // 4 bytes: NOP DWORD PTR [EAX+0]
848 emit_byte(0x0F);
849 emit_byte(0x1F);
850 emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
851 emit_byte(0); // 8-bits offset (1 byte)
852 }
854 void Assembler::addr_nop_5() {
855 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
856 emit_byte(0x0F);
857 emit_byte(0x1F);
858 emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
859 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
860 emit_byte(0); // 8-bits offset (1 byte)
861 }
863 void Assembler::addr_nop_7() {
864 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
865 emit_byte(0x0F);
866 emit_byte(0x1F);
867 emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
868 emit_long(0); // 32-bits offset (4 bytes)
869 }
871 void Assembler::addr_nop_8() {
872 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
873 emit_byte(0x0F);
874 emit_byte(0x1F);
875 emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
876 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
877 emit_long(0); // 32-bits offset (4 bytes)
878 }
880 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
881 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
882 emit_byte(0xF2);
883 int encode = prefix_and_encode(dst->encoding(), src->encoding());
884 emit_byte(0x0F);
885 emit_byte(0x58);
886 emit_byte(0xC0 | encode);
887 }
889 void Assembler::addsd(XMMRegister dst, Address src) {
890 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
891 InstructionMark im(this);
892 emit_byte(0xF2);
893 prefix(src, dst);
894 emit_byte(0x0F);
895 emit_byte(0x58);
896 emit_operand(dst, src);
897 }
899 void Assembler::addss(XMMRegister dst, XMMRegister src) {
900 NOT_LP64(assert(VM_Version::supports_sse(), ""));
901 emit_byte(0xF3);
902 int encode = prefix_and_encode(dst->encoding(), src->encoding());
903 emit_byte(0x0F);
904 emit_byte(0x58);
905 emit_byte(0xC0 | encode);
906 }
908 void Assembler::addss(XMMRegister dst, Address src) {
909 NOT_LP64(assert(VM_Version::supports_sse(), ""));
910 InstructionMark im(this);
911 emit_byte(0xF3);
912 prefix(src, dst);
913 emit_byte(0x0F);
914 emit_byte(0x58);
915 emit_operand(dst, src);
916 }
918 void Assembler::andl(Register dst, int32_t imm32) {
919 prefix(dst);
920 emit_arith(0x81, 0xE0, dst, imm32);
921 }
923 void Assembler::andl(Register dst, Address src) {
924 InstructionMark im(this);
925 prefix(src, dst);
926 emit_byte(0x23);
927 emit_operand(dst, src);
928 }
930 void Assembler::andl(Register dst, Register src) {
931 (void) prefix_and_encode(dst->encoding(), src->encoding());
932 emit_arith(0x23, 0xC0, dst, src);
933 }
935 void Assembler::andpd(XMMRegister dst, Address src) {
936 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
937 InstructionMark im(this);
938 emit_byte(0x66);
939 prefix(src, dst);
940 emit_byte(0x0F);
941 emit_byte(0x54);
942 emit_operand(dst, src);
943 }
945 void Assembler::bswapl(Register reg) { // bswap
946 int encode = prefix_and_encode(reg->encoding());
947 emit_byte(0x0F);
948 emit_byte(0xC8 | encode);
949 }
951 void Assembler::call(Label& L, relocInfo::relocType rtype) {
952 // suspect disp32 is always good
953 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
955 if (L.is_bound()) {
956 const int long_size = 5;
957 int offs = (int)( target(L) - pc() );
958 assert(offs <= 0, "assembler error");
959 InstructionMark im(this);
960 // 1110 1000 #32-bit disp
961 emit_byte(0xE8);
962 emit_data(offs - long_size, rtype, operand);
963 } else {
964 InstructionMark im(this);
965 // 1110 1000 #32-bit disp
966 L.add_patch_at(code(), locator());
968 emit_byte(0xE8);
969 emit_data(int(0), rtype, operand);
970 }
971 }
973 void Assembler::call(Register dst) {
974 // This was originally using a 32bit register encoding
975 // and surely we want 64bit!
976 // this is a 32bit encoding but in 64bit mode the default
977 // operand size is 64bit so there is no need for the
978 // wide prefix. So prefix only happens if we use the
979 // new registers. Much like push/pop.
980 int x = offset();
981 // this may be true but dbx disassembles it as if it
982 // were 32bits...
983 // int encode = prefix_and_encode(dst->encoding());
984 // if (offset() != x) assert(dst->encoding() >= 8, "what?");
985 int encode = prefixq_and_encode(dst->encoding());
987 emit_byte(0xFF);
988 emit_byte(0xD0 | encode);
989 }
992 void Assembler::call(Address adr) {
993 InstructionMark im(this);
994 prefix(adr);
995 emit_byte(0xFF);
996 emit_operand(rdx, adr);
997 }
999 void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
1000 assert(entry != NULL, "call most probably wrong");
1001 InstructionMark im(this);
1002 emit_byte(0xE8);
1003 intptr_t disp = entry - (_code_pos + sizeof(int32_t));
1004 assert(is_simm32(disp), "must be 32bit offset (call2)");
1005 // Technically, should use call32_operand, but this format is
1006 // implied by the fact that we're emitting a call instruction.
1008 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
1009 emit_data((int) disp, rspec, operand);
1010 }
1012 void Assembler::cdql() {
1013 emit_byte(0x99);
1014 }
1016 void Assembler::cmovl(Condition cc, Register dst, Register src) {
1017 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
1018 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1019 emit_byte(0x0F);
1020 emit_byte(0x40 | cc);
1021 emit_byte(0xC0 | encode);
1022 }
1025 void Assembler::cmovl(Condition cc, Register dst, Address src) {
1026 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
1027 prefix(src, dst);
1028 emit_byte(0x0F);
1029 emit_byte(0x40 | cc);
1030 emit_operand(dst, src);
1031 }
1033 void Assembler::cmpb(Address dst, int imm8) {
1034 InstructionMark im(this);
1035 prefix(dst);
1036 emit_byte(0x80);
1037 emit_operand(rdi, dst, 1);
1038 emit_byte(imm8);
1039 }
1041 void Assembler::cmpl(Address dst, int32_t imm32) {
1042 InstructionMark im(this);
1043 prefix(dst);
1044 emit_byte(0x81);
1045 emit_operand(rdi, dst, 4);
1046 emit_long(imm32);
1047 }
1049 void Assembler::cmpl(Register dst, int32_t imm32) {
1050 prefix(dst);
1051 emit_arith(0x81, 0xF8, dst, imm32);
1052 }
1054 void Assembler::cmpl(Register dst, Register src) {
1055 (void) prefix_and_encode(dst->encoding(), src->encoding());
1056 emit_arith(0x3B, 0xC0, dst, src);
1057 }
1060 void Assembler::cmpl(Register dst, Address src) {
1061 InstructionMark im(this);
1062 prefix(src, dst);
1063 emit_byte(0x3B);
1064 emit_operand(dst, src);
1065 }
1067 void Assembler::cmpw(Address dst, int imm16) {
1068 InstructionMark im(this);
1069 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers");
1070 emit_byte(0x66);
1071 emit_byte(0x81);
1072 emit_operand(rdi, dst, 2);
1073 emit_word(imm16);
1074 }
1076 // The 32-bit cmpxchg compares the value at adr with the contents of rax,
1077 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
1078 // The ZF is set if the compared values were equal, and cleared otherwise.
1079 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
1080 if (Atomics & 2) {
1081 // caveat: no instructionmark, so this isn't relocatable.
1082 // Emit a synthetic, non-atomic, CAS equivalent.
1083 // Beware. The synthetic form sets all ICCs, not just ZF.
1084 // cmpxchg r,[m] is equivalent to rax, = CAS (m, rax, r)
1085 cmpl(rax, adr);
1086 movl(rax, adr);
1087 if (reg != rax) {
1088 Label L ;
1089 jcc(Assembler::notEqual, L);
1090 movl(adr, reg);
1091 bind(L);
1092 }
1093 } else {
1094 InstructionMark im(this);
1095 prefix(adr, reg);
1096 emit_byte(0x0F);
1097 emit_byte(0xB1);
1098 emit_operand(reg, adr);
1099 }
1100 }
1102 void Assembler::comisd(XMMRegister dst, Address src) {
1103 // NOTE: dbx seems to decode this as comiss even though the
1104 // 0x66 is there. Strangly ucomisd comes out correct
1105 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1106 emit_byte(0x66);
1107 comiss(dst, src);
1108 }
1110 void Assembler::comiss(XMMRegister dst, Address src) {
1111 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1113 InstructionMark im(this);
1114 prefix(src, dst);
1115 emit_byte(0x0F);
1116 emit_byte(0x2F);
1117 emit_operand(dst, src);
1118 }
1120 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
1121 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1122 emit_byte(0xF3);
1123 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1124 emit_byte(0x0F);
1125 emit_byte(0xE6);
1126 emit_byte(0xC0 | encode);
1127 }
1129 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
1130 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1131 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1132 emit_byte(0x0F);
1133 emit_byte(0x5B);
1134 emit_byte(0xC0 | encode);
1135 }
1137 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
1138 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1139 emit_byte(0xF2);
1140 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1141 emit_byte(0x0F);
1142 emit_byte(0x5A);
1143 emit_byte(0xC0 | encode);
1144 }
1146 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
1147 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1148 emit_byte(0xF2);
1149 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1150 emit_byte(0x0F);
1151 emit_byte(0x2A);
1152 emit_byte(0xC0 | encode);
1153 }
1155 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
1156 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1157 emit_byte(0xF3);
1158 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1159 emit_byte(0x0F);
1160 emit_byte(0x2A);
1161 emit_byte(0xC0 | encode);
1162 }
1164 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
1165 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1166 emit_byte(0xF3);
1167 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1168 emit_byte(0x0F);
1169 emit_byte(0x5A);
1170 emit_byte(0xC0 | encode);
1171 }
1173 void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
1174 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1175 emit_byte(0xF2);
1176 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1177 emit_byte(0x0F);
1178 emit_byte(0x2C);
1179 emit_byte(0xC0 | encode);
1180 }
1182 void Assembler::cvttss2sil(Register dst, XMMRegister src) {
1183 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1184 emit_byte(0xF3);
1185 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1186 emit_byte(0x0F);
1187 emit_byte(0x2C);
1188 emit_byte(0xC0 | encode);
1189 }
1191 void Assembler::decl(Address dst) {
1192 // Don't use it directly. Use MacroAssembler::decrement() instead.
1193 InstructionMark im(this);
1194 prefix(dst);
1195 emit_byte(0xFF);
1196 emit_operand(rcx, dst);
1197 }
1199 void Assembler::divsd(XMMRegister dst, Address src) {
1200 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1201 InstructionMark im(this);
1202 emit_byte(0xF2);
1203 prefix(src, dst);
1204 emit_byte(0x0F);
1205 emit_byte(0x5E);
1206 emit_operand(dst, src);
1207 }
1209 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
1210 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1211 emit_byte(0xF2);
1212 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1213 emit_byte(0x0F);
1214 emit_byte(0x5E);
1215 emit_byte(0xC0 | encode);
1216 }
1218 void Assembler::divss(XMMRegister dst, Address src) {
1219 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1220 InstructionMark im(this);
1221 emit_byte(0xF3);
1222 prefix(src, dst);
1223 emit_byte(0x0F);
1224 emit_byte(0x5E);
1225 emit_operand(dst, src);
1226 }
1228 void Assembler::divss(XMMRegister dst, XMMRegister src) {
1229 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1230 emit_byte(0xF3);
1231 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1232 emit_byte(0x0F);
1233 emit_byte(0x5E);
1234 emit_byte(0xC0 | encode);
1235 }
1237 void Assembler::emms() {
1238 NOT_LP64(assert(VM_Version::supports_mmx(), ""));
1239 emit_byte(0x0F);
1240 emit_byte(0x77);
1241 }
1243 void Assembler::hlt() {
1244 emit_byte(0xF4);
1245 }
1247 void Assembler::idivl(Register src) {
1248 int encode = prefix_and_encode(src->encoding());
1249 emit_byte(0xF7);
1250 emit_byte(0xF8 | encode);
1251 }
1253 void Assembler::imull(Register dst, Register src) {
1254 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1255 emit_byte(0x0F);
1256 emit_byte(0xAF);
1257 emit_byte(0xC0 | encode);
1258 }
1261 void Assembler::imull(Register dst, Register src, int value) {
1262 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1263 if (is8bit(value)) {
1264 emit_byte(0x6B);
1265 emit_byte(0xC0 | encode);
1266 emit_byte(value);
1267 } else {
1268 emit_byte(0x69);
1269 emit_byte(0xC0 | encode);
1270 emit_long(value);
1271 }
1272 }
1274 void Assembler::incl(Address dst) {
1275 // Don't use it directly. Use MacroAssembler::increment() instead.
1276 InstructionMark im(this);
1277 prefix(dst);
1278 emit_byte(0xFF);
1279 emit_operand(rax, dst);
1280 }
1282 void Assembler::jcc(Condition cc, Label& L, relocInfo::relocType rtype) {
1283 InstructionMark im(this);
1284 relocate(rtype);
1285 assert((0 <= cc) && (cc < 16), "illegal cc");
1286 if (L.is_bound()) {
1287 address dst = target(L);
1288 assert(dst != NULL, "jcc most probably wrong");
1290 const int short_size = 2;
1291 const int long_size = 6;
1292 intptr_t offs = (intptr_t)dst - (intptr_t)_code_pos;
1293 if (rtype == relocInfo::none && is8bit(offs - short_size)) {
1294 // 0111 tttn #8-bit disp
1295 emit_byte(0x70 | cc);
1296 emit_byte((offs - short_size) & 0xFF);
1297 } else {
1298 // 0000 1111 1000 tttn #32-bit disp
1299 assert(is_simm32(offs - long_size),
1300 "must be 32bit offset (call4)");
1301 emit_byte(0x0F);
1302 emit_byte(0x80 | cc);
1303 emit_long(offs - long_size);
1304 }
1305 } else {
1306 // Note: could eliminate cond. jumps to this jump if condition
1307 // is the same however, seems to be rather unlikely case.
1308 // Note: use jccb() if label to be bound is very close to get
1309 // an 8-bit displacement
1310 L.add_patch_at(code(), locator());
1311 emit_byte(0x0F);
1312 emit_byte(0x80 | cc);
1313 emit_long(0);
1314 }
1315 }
1317 void Assembler::jccb(Condition cc, Label& L) {
1318 if (L.is_bound()) {
1319 const int short_size = 2;
1320 address entry = target(L);
1321 assert(is8bit((intptr_t)entry - ((intptr_t)_code_pos + short_size)),
1322 "Dispacement too large for a short jmp");
1323 intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos;
1324 // 0111 tttn #8-bit disp
1325 emit_byte(0x70 | cc);
1326 emit_byte((offs - short_size) & 0xFF);
1327 } else {
1328 InstructionMark im(this);
1329 L.add_patch_at(code(), locator());
1330 emit_byte(0x70 | cc);
1331 emit_byte(0);
1332 }
1333 }
1335 void Assembler::jmp(Address adr) {
1336 InstructionMark im(this);
1337 prefix(adr);
1338 emit_byte(0xFF);
1339 emit_operand(rsp, adr);
1340 }
1342 void Assembler::jmp(Label& L, relocInfo::relocType rtype) {
1343 if (L.is_bound()) {
1344 address entry = target(L);
1345 assert(entry != NULL, "jmp most probably wrong");
1346 InstructionMark im(this);
1347 const int short_size = 2;
1348 const int long_size = 5;
1349 intptr_t offs = entry - _code_pos;
1350 if (rtype == relocInfo::none && is8bit(offs - short_size)) {
1351 emit_byte(0xEB);
1352 emit_byte((offs - short_size) & 0xFF);
1353 } else {
1354 emit_byte(0xE9);
1355 emit_long(offs - long_size);
1356 }
1357 } else {
1358 // By default, forward jumps are always 32-bit displacements, since
1359 // we can't yet know where the label will be bound. If you're sure that
1360 // the forward jump will not run beyond 256 bytes, use jmpb to
1361 // force an 8-bit displacement.
1362 InstructionMark im(this);
1363 relocate(rtype);
1364 L.add_patch_at(code(), locator());
1365 emit_byte(0xE9);
1366 emit_long(0);
1367 }
1368 }
1370 void Assembler::jmp(Register entry) {
1371 int encode = prefix_and_encode(entry->encoding());
1372 emit_byte(0xFF);
1373 emit_byte(0xE0 | encode);
1374 }
1376 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
1377 InstructionMark im(this);
1378 emit_byte(0xE9);
1379 assert(dest != NULL, "must have a target");
1380 intptr_t disp = dest - (_code_pos + sizeof(int32_t));
1381 assert(is_simm32(disp), "must be 32bit offset (jmp)");
1382 emit_data(disp, rspec.reloc(), call32_operand);
1383 }
1385 void Assembler::jmpb(Label& L) {
1386 if (L.is_bound()) {
1387 const int short_size = 2;
1388 address entry = target(L);
1389 assert(is8bit((entry - _code_pos) + short_size),
1390 "Dispacement too large for a short jmp");
1391 assert(entry != NULL, "jmp most probably wrong");
1392 intptr_t offs = entry - _code_pos;
1393 emit_byte(0xEB);
1394 emit_byte((offs - short_size) & 0xFF);
1395 } else {
1396 InstructionMark im(this);
1397 L.add_patch_at(code(), locator());
1398 emit_byte(0xEB);
1399 emit_byte(0);
1400 }
1401 }
1403 void Assembler::ldmxcsr( Address src) {
1404 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1405 InstructionMark im(this);
1406 prefix(src);
1407 emit_byte(0x0F);
1408 emit_byte(0xAE);
1409 emit_operand(as_Register(2), src);
1410 }
1412 void Assembler::leal(Register dst, Address src) {
1413 InstructionMark im(this);
1414 #ifdef _LP64
1415 emit_byte(0x67); // addr32
1416 prefix(src, dst);
1417 #endif // LP64
1418 emit_byte(0x8D);
1419 emit_operand(dst, src);
1420 }
1422 void Assembler::lock() {
1423 if (Atomics & 1) {
1424 // Emit either nothing, a NOP, or a NOP: prefix
1425 emit_byte(0x90) ;
1426 } else {
1427 emit_byte(0xF0);
1428 }
1429 }
1431 // Serializes memory.
1432 void Assembler::mfence() {
1433 // Memory barriers are only needed on multiprocessors
1434 if (os::is_MP()) {
1435 if( LP64_ONLY(true ||) VM_Version::supports_sse2() ) {
1436 emit_byte( 0x0F ); // MFENCE; faster blows no regs
1437 emit_byte( 0xAE );
1438 emit_byte( 0xF0 );
1439 } else {
1440 // All usable chips support "locked" instructions which suffice
1441 // as barriers, and are much faster than the alternative of
1442 // using cpuid instruction. We use here a locked add [esp],0.
1443 // This is conveniently otherwise a no-op except for blowing
1444 // flags (which we save and restore.)
1445 pushf(); // Save eflags register
1446 lock();
1447 addl(Address(rsp, 0), 0);// Assert the lock# signal here
1448 popf(); // Restore eflags register
1449 }
1450 }
1451 }
1453 void Assembler::mov(Register dst, Register src) {
1454 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
1455 }
1457 void Assembler::movapd(XMMRegister dst, XMMRegister src) {
1458 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1459 int dstenc = dst->encoding();
1460 int srcenc = src->encoding();
1461 emit_byte(0x66);
1462 if (dstenc < 8) {
1463 if (srcenc >= 8) {
1464 prefix(REX_B);
1465 srcenc -= 8;
1466 }
1467 } else {
1468 if (srcenc < 8) {
1469 prefix(REX_R);
1470 } else {
1471 prefix(REX_RB);
1472 srcenc -= 8;
1473 }
1474 dstenc -= 8;
1475 }
1476 emit_byte(0x0F);
1477 emit_byte(0x28);
1478 emit_byte(0xC0 | dstenc << 3 | srcenc);
1479 }
1481 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
1482 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1483 int dstenc = dst->encoding();
1484 int srcenc = src->encoding();
1485 if (dstenc < 8) {
1486 if (srcenc >= 8) {
1487 prefix(REX_B);
1488 srcenc -= 8;
1489 }
1490 } else {
1491 if (srcenc < 8) {
1492 prefix(REX_R);
1493 } else {
1494 prefix(REX_RB);
1495 srcenc -= 8;
1496 }
1497 dstenc -= 8;
1498 }
1499 emit_byte(0x0F);
1500 emit_byte(0x28);
1501 emit_byte(0xC0 | dstenc << 3 | srcenc);
1502 }
1504 void Assembler::movb(Register dst, Address src) {
1505 NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
1506 InstructionMark im(this);
1507 prefix(src, dst, true);
1508 emit_byte(0x8A);
1509 emit_operand(dst, src);
1510 }
1513 void Assembler::movb(Address dst, int imm8) {
1514 InstructionMark im(this);
1515 prefix(dst);
1516 emit_byte(0xC6);
1517 emit_operand(rax, dst, 1);
1518 emit_byte(imm8);
1519 }
1522 void Assembler::movb(Address dst, Register src) {
1523 assert(src->has_byte_register(), "must have byte register");
1524 InstructionMark im(this);
1525 prefix(dst, src, true);
1526 emit_byte(0x88);
1527 emit_operand(src, dst);
1528 }
1530 void Assembler::movdl(XMMRegister dst, Register src) {
1531 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1532 emit_byte(0x66);
1533 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1534 emit_byte(0x0F);
1535 emit_byte(0x6E);
1536 emit_byte(0xC0 | encode);
1537 }
1539 void Assembler::movdl(Register dst, XMMRegister src) {
1540 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1541 emit_byte(0x66);
1542 // swap src/dst to get correct prefix
1543 int encode = prefix_and_encode(src->encoding(), dst->encoding());
1544 emit_byte(0x0F);
1545 emit_byte(0x7E);
1546 emit_byte(0xC0 | encode);
1547 }
1549 void Assembler::movdqa(XMMRegister dst, Address src) {
1550 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1551 InstructionMark im(this);
1552 emit_byte(0x66);
1553 prefix(src, dst);
1554 emit_byte(0x0F);
1555 emit_byte(0x6F);
1556 emit_operand(dst, src);
1557 }
1559 void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
1560 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1561 emit_byte(0x66);
1562 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
1563 emit_byte(0x0F);
1564 emit_byte(0x6F);
1565 emit_byte(0xC0 | encode);
1566 }
1568 void Assembler::movdqa(Address dst, XMMRegister src) {
1569 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1570 InstructionMark im(this);
1571 emit_byte(0x66);
1572 prefix(dst, src);
1573 emit_byte(0x0F);
1574 emit_byte(0x7F);
1575 emit_operand(src, dst);
1576 }
1578 void Assembler::movdqu(XMMRegister dst, Address src) {
1579 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1580 InstructionMark im(this);
1581 emit_byte(0xF3);
1582 prefix(src, dst);
1583 emit_byte(0x0F);
1584 emit_byte(0x6F);
1585 emit_operand(dst, src);
1586 }
1588 void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
1589 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1590 emit_byte(0xF3);
1591 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
1592 emit_byte(0x0F);
1593 emit_byte(0x6F);
1594 emit_byte(0xC0 | encode);
1595 }
1597 void Assembler::movdqu(Address dst, XMMRegister src) {
1598 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1599 InstructionMark im(this);
1600 emit_byte(0xF3);
1601 prefix(dst, src);
1602 emit_byte(0x0F);
1603 emit_byte(0x7F);
1604 emit_operand(src, dst);
1605 }
1607 // Uses zero extension on 64bit
1609 void Assembler::movl(Register dst, int32_t imm32) {
1610 int encode = prefix_and_encode(dst->encoding());
1611 emit_byte(0xB8 | encode);
1612 emit_long(imm32);
1613 }
1615 void Assembler::movl(Register dst, Register src) {
1616 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1617 emit_byte(0x8B);
1618 emit_byte(0xC0 | encode);
1619 }
1621 void Assembler::movl(Register dst, Address src) {
1622 InstructionMark im(this);
1623 prefix(src, dst);
1624 emit_byte(0x8B);
1625 emit_operand(dst, src);
1626 }
1628 void Assembler::movl(Address dst, int32_t imm32) {
1629 InstructionMark im(this);
1630 prefix(dst);
1631 emit_byte(0xC7);
1632 emit_operand(rax, dst, 4);
1633 emit_long(imm32);
1634 }
1636 void Assembler::movl(Address dst, Register src) {
1637 InstructionMark im(this);
1638 prefix(dst, src);
1639 emit_byte(0x89);
1640 emit_operand(src, dst);
1641 }
1643 // New cpus require to use movsd and movss to avoid partial register stall
1644 // when loading from memory. But for old Opteron use movlpd instead of movsd.
1645 // The selection is done in MacroAssembler::movdbl() and movflt().
1646 void Assembler::movlpd(XMMRegister dst, Address src) {
1647 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1648 InstructionMark im(this);
1649 emit_byte(0x66);
1650 prefix(src, dst);
1651 emit_byte(0x0F);
1652 emit_byte(0x12);
1653 emit_operand(dst, src);
1654 }
1656 void Assembler::movq( MMXRegister dst, Address src ) {
1657 assert( VM_Version::supports_mmx(), "" );
1658 emit_byte(0x0F);
1659 emit_byte(0x6F);
1660 emit_operand(dst, src);
1661 }
1663 void Assembler::movq( Address dst, MMXRegister src ) {
1664 assert( VM_Version::supports_mmx(), "" );
1665 emit_byte(0x0F);
1666 emit_byte(0x7F);
1667 // workaround gcc (3.2.1-7a) bug
1668 // In that version of gcc with only an emit_operand(MMX, Address)
1669 // gcc will tail jump and try and reverse the parameters completely
1670 // obliterating dst in the process. By having a version available
1671 // that doesn't need to swap the args at the tail jump the bug is
1672 // avoided.
1673 emit_operand(dst, src);
1674 }
1676 void Assembler::movq(XMMRegister dst, Address src) {
1677 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1678 InstructionMark im(this);
1679 emit_byte(0xF3);
1680 prefix(src, dst);
1681 emit_byte(0x0F);
1682 emit_byte(0x7E);
1683 emit_operand(dst, src);
1684 }
1686 void Assembler::movq(Address dst, XMMRegister src) {
1687 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1688 InstructionMark im(this);
1689 emit_byte(0x66);
1690 prefix(dst, src);
1691 emit_byte(0x0F);
1692 emit_byte(0xD6);
1693 emit_operand(src, dst);
1694 }
1696 void Assembler::movsbl(Register dst, Address src) { // movsxb
1697 InstructionMark im(this);
1698 prefix(src, dst);
1699 emit_byte(0x0F);
1700 emit_byte(0xBE);
1701 emit_operand(dst, src);
1702 }
1704 void Assembler::movsbl(Register dst, Register src) { // movsxb
1705 NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
1706 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1707 emit_byte(0x0F);
1708 emit_byte(0xBE);
1709 emit_byte(0xC0 | encode);
1710 }
1712 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
1713 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1714 emit_byte(0xF2);
1715 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1716 emit_byte(0x0F);
1717 emit_byte(0x10);
1718 emit_byte(0xC0 | encode);
1719 }
1721 void Assembler::movsd(XMMRegister dst, Address src) {
1722 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1723 InstructionMark im(this);
1724 emit_byte(0xF2);
1725 prefix(src, dst);
1726 emit_byte(0x0F);
1727 emit_byte(0x10);
1728 emit_operand(dst, src);
1729 }
1731 void Assembler::movsd(Address dst, XMMRegister src) {
1732 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1733 InstructionMark im(this);
1734 emit_byte(0xF2);
1735 prefix(dst, src);
1736 emit_byte(0x0F);
1737 emit_byte(0x11);
1738 emit_operand(src, dst);
1739 }
1741 void Assembler::movss(XMMRegister dst, XMMRegister src) {
1742 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1743 emit_byte(0xF3);
1744 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1745 emit_byte(0x0F);
1746 emit_byte(0x10);
1747 emit_byte(0xC0 | encode);
1748 }
1750 void Assembler::movss(XMMRegister dst, Address src) {
1751 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1752 InstructionMark im(this);
1753 emit_byte(0xF3);
1754 prefix(src, dst);
1755 emit_byte(0x0F);
1756 emit_byte(0x10);
1757 emit_operand(dst, src);
1758 }
1760 void Assembler::movss(Address dst, XMMRegister src) {
1761 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1762 InstructionMark im(this);
1763 emit_byte(0xF3);
1764 prefix(dst, src);
1765 emit_byte(0x0F);
1766 emit_byte(0x11);
1767 emit_operand(src, dst);
1768 }
1770 void Assembler::movswl(Register dst, Address src) { // movsxw
1771 InstructionMark im(this);
1772 prefix(src, dst);
1773 emit_byte(0x0F);
1774 emit_byte(0xBF);
1775 emit_operand(dst, src);
1776 }
1778 void Assembler::movswl(Register dst, Register src) { // movsxw
1779 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1780 emit_byte(0x0F);
1781 emit_byte(0xBF);
1782 emit_byte(0xC0 | encode);
1783 }
1785 void Assembler::movw(Address dst, int imm16) {
1786 InstructionMark im(this);
1788 emit_byte(0x66); // switch to 16-bit mode
1789 prefix(dst);
1790 emit_byte(0xC7);
1791 emit_operand(rax, dst, 2);
1792 emit_word(imm16);
1793 }
1795 void Assembler::movw(Register dst, Address src) {
1796 InstructionMark im(this);
1797 emit_byte(0x66);
1798 prefix(src, dst);
1799 emit_byte(0x8B);
1800 emit_operand(dst, src);
1801 }
1803 void Assembler::movw(Address dst, Register src) {
1804 InstructionMark im(this);
1805 emit_byte(0x66);
1806 prefix(dst, src);
1807 emit_byte(0x89);
1808 emit_operand(src, dst);
1809 }
1811 void Assembler::movzbl(Register dst, Address src) { // movzxb
1812 InstructionMark im(this);
1813 prefix(src, dst);
1814 emit_byte(0x0F);
1815 emit_byte(0xB6);
1816 emit_operand(dst, src);
1817 }
1819 void Assembler::movzbl(Register dst, Register src) { // movzxb
1820 NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
1821 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1822 emit_byte(0x0F);
1823 emit_byte(0xB6);
1824 emit_byte(0xC0 | encode);
1825 }
1827 void Assembler::movzwl(Register dst, Address src) { // movzxw
1828 InstructionMark im(this);
1829 prefix(src, dst);
1830 emit_byte(0x0F);
1831 emit_byte(0xB7);
1832 emit_operand(dst, src);
1833 }
1835 void Assembler::movzwl(Register dst, Register src) { // movzxw
1836 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1837 emit_byte(0x0F);
1838 emit_byte(0xB7);
1839 emit_byte(0xC0 | encode);
1840 }
1842 void Assembler::mull(Address src) {
1843 InstructionMark im(this);
1844 prefix(src);
1845 emit_byte(0xF7);
1846 emit_operand(rsp, src);
1847 }
1849 void Assembler::mull(Register src) {
1850 int encode = prefix_and_encode(src->encoding());
1851 emit_byte(0xF7);
1852 emit_byte(0xE0 | encode);
1853 }
1855 void Assembler::mulsd(XMMRegister dst, Address src) {
1856 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1857 InstructionMark im(this);
1858 emit_byte(0xF2);
1859 prefix(src, dst);
1860 emit_byte(0x0F);
1861 emit_byte(0x59);
1862 emit_operand(dst, src);
1863 }
1865 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
1866 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1867 emit_byte(0xF2);
1868 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1869 emit_byte(0x0F);
1870 emit_byte(0x59);
1871 emit_byte(0xC0 | encode);
1872 }
1874 void Assembler::mulss(XMMRegister dst, Address src) {
1875 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1876 InstructionMark im(this);
1877 emit_byte(0xF3);
1878 prefix(src, dst);
1879 emit_byte(0x0F);
1880 emit_byte(0x59);
1881 emit_operand(dst, src);
1882 }
1884 void Assembler::mulss(XMMRegister dst, XMMRegister src) {
1885 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1886 emit_byte(0xF3);
1887 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1888 emit_byte(0x0F);
1889 emit_byte(0x59);
1890 emit_byte(0xC0 | encode);
1891 }
1893 void Assembler::negl(Register dst) {
1894 int encode = prefix_and_encode(dst->encoding());
1895 emit_byte(0xF7);
1896 emit_byte(0xD8 | encode);
1897 }
1899 void Assembler::nop(int i) {
1900 #ifdef ASSERT
1901 assert(i > 0, " ");
1902 // The fancy nops aren't currently recognized by debuggers making it a
1903 // pain to disassemble code while debugging. If asserts are on clearly
1904 // speed is not an issue so simply use the single byte traditional nop
1905 // to do alignment.
1907 for (; i > 0 ; i--) emit_byte(0x90);
1908 return;
1910 #endif // ASSERT
1912 if (UseAddressNop && VM_Version::is_intel()) {
1913 //
1914 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
1915 // 1: 0x90
1916 // 2: 0x66 0x90
1917 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
1918 // 4: 0x0F 0x1F 0x40 0x00
1919 // 5: 0x0F 0x1F 0x44 0x00 0x00
1920 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
1921 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
1922 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1923 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1924 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1925 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1927 // The rest coding is Intel specific - don't use consecutive address nops
1929 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1930 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1931 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1932 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1934 while(i >= 15) {
1935 // For Intel don't generate consecutive addess nops (mix with regular nops)
1936 i -= 15;
1937 emit_byte(0x66); // size prefix
1938 emit_byte(0x66); // size prefix
1939 emit_byte(0x66); // size prefix
1940 addr_nop_8();
1941 emit_byte(0x66); // size prefix
1942 emit_byte(0x66); // size prefix
1943 emit_byte(0x66); // size prefix
1944 emit_byte(0x90); // nop
1945 }
1946 switch (i) {
1947 case 14:
1948 emit_byte(0x66); // size prefix
1949 case 13:
1950 emit_byte(0x66); // size prefix
1951 case 12:
1952 addr_nop_8();
1953 emit_byte(0x66); // size prefix
1954 emit_byte(0x66); // size prefix
1955 emit_byte(0x66); // size prefix
1956 emit_byte(0x90); // nop
1957 break;
1958 case 11:
1959 emit_byte(0x66); // size prefix
1960 case 10:
1961 emit_byte(0x66); // size prefix
1962 case 9:
1963 emit_byte(0x66); // size prefix
1964 case 8:
1965 addr_nop_8();
1966 break;
1967 case 7:
1968 addr_nop_7();
1969 break;
1970 case 6:
1971 emit_byte(0x66); // size prefix
1972 case 5:
1973 addr_nop_5();
1974 break;
1975 case 4:
1976 addr_nop_4();
1977 break;
1978 case 3:
1979 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
1980 emit_byte(0x66); // size prefix
1981 case 2:
1982 emit_byte(0x66); // size prefix
1983 case 1:
1984 emit_byte(0x90); // nop
1985 break;
1986 default:
1987 assert(i == 0, " ");
1988 }
1989 return;
1990 }
1991 if (UseAddressNop && VM_Version::is_amd()) {
1992 //
1993 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
1994 // 1: 0x90
1995 // 2: 0x66 0x90
1996 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
1997 // 4: 0x0F 0x1F 0x40 0x00
1998 // 5: 0x0F 0x1F 0x44 0x00 0x00
1999 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
2000 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
2001 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2002 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2003 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2004 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2006 // The rest coding is AMD specific - use consecutive address nops
2008 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
2009 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
2010 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
2011 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
2012 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2013 // Size prefixes (0x66) are added for larger sizes
2015 while(i >= 22) {
2016 i -= 11;
2017 emit_byte(0x66); // size prefix
2018 emit_byte(0x66); // size prefix
2019 emit_byte(0x66); // size prefix
2020 addr_nop_8();
2021 }
2022 // Generate first nop for size between 21-12
2023 switch (i) {
2024 case 21:
2025 i -= 1;
2026 emit_byte(0x66); // size prefix
2027 case 20:
2028 case 19:
2029 i -= 1;
2030 emit_byte(0x66); // size prefix
2031 case 18:
2032 case 17:
2033 i -= 1;
2034 emit_byte(0x66); // size prefix
2035 case 16:
2036 case 15:
2037 i -= 8;
2038 addr_nop_8();
2039 break;
2040 case 14:
2041 case 13:
2042 i -= 7;
2043 addr_nop_7();
2044 break;
2045 case 12:
2046 i -= 6;
2047 emit_byte(0x66); // size prefix
2048 addr_nop_5();
2049 break;
2050 default:
2051 assert(i < 12, " ");
2052 }
2054 // Generate second nop for size between 11-1
2055 switch (i) {
2056 case 11:
2057 emit_byte(0x66); // size prefix
2058 case 10:
2059 emit_byte(0x66); // size prefix
2060 case 9:
2061 emit_byte(0x66); // size prefix
2062 case 8:
2063 addr_nop_8();
2064 break;
2065 case 7:
2066 addr_nop_7();
2067 break;
2068 case 6:
2069 emit_byte(0x66); // size prefix
2070 case 5:
2071 addr_nop_5();
2072 break;
2073 case 4:
2074 addr_nop_4();
2075 break;
2076 case 3:
2077 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
2078 emit_byte(0x66); // size prefix
2079 case 2:
2080 emit_byte(0x66); // size prefix
2081 case 1:
2082 emit_byte(0x90); // nop
2083 break;
2084 default:
2085 assert(i == 0, " ");
2086 }
2087 return;
2088 }
2090 // Using nops with size prefixes "0x66 0x90".
2091 // From AMD Optimization Guide:
2092 // 1: 0x90
2093 // 2: 0x66 0x90
2094 // 3: 0x66 0x66 0x90
2095 // 4: 0x66 0x66 0x66 0x90
2096 // 5: 0x66 0x66 0x90 0x66 0x90
2097 // 6: 0x66 0x66 0x90 0x66 0x66 0x90
2098 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
2099 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
2100 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
2101 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
2102 //
2103 while(i > 12) {
2104 i -= 4;
2105 emit_byte(0x66); // size prefix
2106 emit_byte(0x66);
2107 emit_byte(0x66);
2108 emit_byte(0x90); // nop
2109 }
2110 // 1 - 12 nops
2111 if(i > 8) {
2112 if(i > 9) {
2113 i -= 1;
2114 emit_byte(0x66);
2115 }
2116 i -= 3;
2117 emit_byte(0x66);
2118 emit_byte(0x66);
2119 emit_byte(0x90);
2120 }
2121 // 1 - 8 nops
2122 if(i > 4) {
2123 if(i > 6) {
2124 i -= 1;
2125 emit_byte(0x66);
2126 }
2127 i -= 3;
2128 emit_byte(0x66);
2129 emit_byte(0x66);
2130 emit_byte(0x90);
2131 }
2132 switch (i) {
2133 case 4:
2134 emit_byte(0x66);
2135 case 3:
2136 emit_byte(0x66);
2137 case 2:
2138 emit_byte(0x66);
2139 case 1:
2140 emit_byte(0x90);
2141 break;
2142 default:
2143 assert(i == 0, " ");
2144 }
2145 }
2147 void Assembler::notl(Register dst) {
2148 int encode = prefix_and_encode(dst->encoding());
2149 emit_byte(0xF7);
2150 emit_byte(0xD0 | encode );
2151 }
2153 void Assembler::orl(Address dst, int32_t imm32) {
2154 InstructionMark im(this);
2155 prefix(dst);
2156 emit_byte(0x81);
2157 emit_operand(rcx, dst, 4);
2158 emit_long(imm32);
2159 }
2161 void Assembler::orl(Register dst, int32_t imm32) {
2162 prefix(dst);
2163 emit_arith(0x81, 0xC8, dst, imm32);
2164 }
2167 void Assembler::orl(Register dst, Address src) {
2168 InstructionMark im(this);
2169 prefix(src, dst);
2170 emit_byte(0x0B);
2171 emit_operand(dst, src);
2172 }
2175 void Assembler::orl(Register dst, Register src) {
2176 (void) prefix_and_encode(dst->encoding(), src->encoding());
2177 emit_arith(0x0B, 0xC0, dst, src);
2178 }
2180 // generic
2181 void Assembler::pop(Register dst) {
2182 int encode = prefix_and_encode(dst->encoding());
2183 emit_byte(0x58 | encode);
2184 }
2186 void Assembler::popf() {
2187 emit_byte(0x9D);
2188 }
2190 void Assembler::popl(Address dst) {
2191 // NOTE: this will adjust stack by 8byte on 64bits
2192 InstructionMark im(this);
2193 prefix(dst);
2194 emit_byte(0x8F);
2195 emit_operand(rax, dst);
2196 }
2198 void Assembler::prefetch_prefix(Address src) {
2199 prefix(src);
2200 emit_byte(0x0F);
2201 }
2203 void Assembler::prefetchnta(Address src) {
2204 NOT_LP64(assert(VM_Version::supports_sse2(), "must support"));
2205 InstructionMark im(this);
2206 prefetch_prefix(src);
2207 emit_byte(0x18);
2208 emit_operand(rax, src); // 0, src
2209 }
2211 void Assembler::prefetchr(Address src) {
2212 NOT_LP64(assert(VM_Version::supports_3dnow(), "must support"));
2213 InstructionMark im(this);
2214 prefetch_prefix(src);
2215 emit_byte(0x0D);
2216 emit_operand(rax, src); // 0, src
2217 }
2219 void Assembler::prefetcht0(Address src) {
2220 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2221 InstructionMark im(this);
2222 prefetch_prefix(src);
2223 emit_byte(0x18);
2224 emit_operand(rcx, src); // 1, src
2225 }
2227 void Assembler::prefetcht1(Address src) {
2228 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2229 InstructionMark im(this);
2230 prefetch_prefix(src);
2231 emit_byte(0x18);
2232 emit_operand(rdx, src); // 2, src
2233 }
2235 void Assembler::prefetcht2(Address src) {
2236 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2237 InstructionMark im(this);
2238 prefetch_prefix(src);
2239 emit_byte(0x18);
2240 emit_operand(rbx, src); // 3, src
2241 }
2243 void Assembler::prefetchw(Address src) {
2244 NOT_LP64(assert(VM_Version::supports_3dnow(), "must support"));
2245 InstructionMark im(this);
2246 prefetch_prefix(src);
2247 emit_byte(0x0D);
2248 emit_operand(rcx, src); // 1, src
2249 }
2251 void Assembler::prefix(Prefix p) {
2252 a_byte(p);
2253 }
2255 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
2256 assert(isByte(mode), "invalid value");
2257 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2259 emit_byte(0x66);
2260 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2261 emit_byte(0x0F);
2262 emit_byte(0x70);
2263 emit_byte(0xC0 | encode);
2264 emit_byte(mode & 0xFF);
2266 }
2268 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
2269 assert(isByte(mode), "invalid value");
2270 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2272 InstructionMark im(this);
2273 emit_byte(0x66);
2274 prefix(src, dst);
2275 emit_byte(0x0F);
2276 emit_byte(0x70);
2277 emit_operand(dst, src);
2278 emit_byte(mode & 0xFF);
2279 }
2281 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
2282 assert(isByte(mode), "invalid value");
2283 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2285 emit_byte(0xF2);
2286 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2287 emit_byte(0x0F);
2288 emit_byte(0x70);
2289 emit_byte(0xC0 | encode);
2290 emit_byte(mode & 0xFF);
2291 }
2293 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
2294 assert(isByte(mode), "invalid value");
2295 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2297 InstructionMark im(this);
2298 emit_byte(0xF2);
2299 prefix(src, dst); // QQ new
2300 emit_byte(0x0F);
2301 emit_byte(0x70);
2302 emit_operand(dst, src);
2303 emit_byte(mode & 0xFF);
2304 }
2306 void Assembler::psrlq(XMMRegister dst, int shift) {
2307 // HMM Table D-1 says sse2 or mmx
2308 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2310 int encode = prefixq_and_encode(xmm2->encoding(), dst->encoding());
2311 emit_byte(0x66);
2312 emit_byte(0x0F);
2313 emit_byte(0x73);
2314 emit_byte(0xC0 | encode);
2315 emit_byte(shift);
2316 }
2318 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
2319 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2320 emit_byte(0x66);
2321 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2322 emit_byte(0x0F);
2323 emit_byte(0x60);
2324 emit_byte(0xC0 | encode);
2325 }
2327 void Assembler::push(int32_t imm32) {
2328 // in 64bits we push 64bits onto the stack but only
2329 // take a 32bit immediate
2330 emit_byte(0x68);
2331 emit_long(imm32);
2332 }
2334 void Assembler::push(Register src) {
2335 int encode = prefix_and_encode(src->encoding());
2337 emit_byte(0x50 | encode);
2338 }
2340 void Assembler::pushf() {
2341 emit_byte(0x9C);
2342 }
2344 void Assembler::pushl(Address src) {
2345 // Note this will push 64bit on 64bit
2346 InstructionMark im(this);
2347 prefix(src);
2348 emit_byte(0xFF);
2349 emit_operand(rsi, src);
2350 }
2352 void Assembler::pxor(XMMRegister dst, Address src) {
2353 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2354 InstructionMark im(this);
2355 emit_byte(0x66);
2356 prefix(src, dst);
2357 emit_byte(0x0F);
2358 emit_byte(0xEF);
2359 emit_operand(dst, src);
2360 }
2362 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
2363 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2364 InstructionMark im(this);
2365 emit_byte(0x66);
2366 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2367 emit_byte(0x0F);
2368 emit_byte(0xEF);
2369 emit_byte(0xC0 | encode);
2370 }
2372 void Assembler::rcll(Register dst, int imm8) {
2373 assert(isShiftCount(imm8), "illegal shift count");
2374 int encode = prefix_and_encode(dst->encoding());
2375 if (imm8 == 1) {
2376 emit_byte(0xD1);
2377 emit_byte(0xD0 | encode);
2378 } else {
2379 emit_byte(0xC1);
2380 emit_byte(0xD0 | encode);
2381 emit_byte(imm8);
2382 }
2383 }
2385 // copies data from [esi] to [edi] using rcx pointer sized words
2386 // generic
2387 void Assembler::rep_mov() {
2388 emit_byte(0xF3);
2389 // MOVSQ
2390 LP64_ONLY(prefix(REX_W));
2391 emit_byte(0xA5);
2392 }
2394 // sets rcx pointer sized words with rax, value at [edi]
2395 // generic
2396 void Assembler::rep_set() { // rep_set
2397 emit_byte(0xF3);
2398 // STOSQ
2399 LP64_ONLY(prefix(REX_W));
2400 emit_byte(0xAB);
2401 }
2403 // scans rcx pointer sized words at [edi] for occurance of rax,
2404 // generic
2405 void Assembler::repne_scan() { // repne_scan
2406 emit_byte(0xF2);
2407 // SCASQ
2408 LP64_ONLY(prefix(REX_W));
2409 emit_byte(0xAF);
2410 }
2412 #ifdef _LP64
2413 // scans rcx 4 byte words at [edi] for occurance of rax,
2414 // generic
2415 void Assembler::repne_scanl() { // repne_scan
2416 emit_byte(0xF2);
2417 // SCASL
2418 emit_byte(0xAF);
2419 }
2420 #endif
2422 void Assembler::ret(int imm16) {
2423 if (imm16 == 0) {
2424 emit_byte(0xC3);
2425 } else {
2426 emit_byte(0xC2);
2427 emit_word(imm16);
2428 }
2429 }
2431 void Assembler::sahf() {
2432 #ifdef _LP64
2433 // Not supported in 64bit mode
2434 ShouldNotReachHere();
2435 #endif
2436 emit_byte(0x9E);
2437 }
2439 void Assembler::sarl(Register dst, int imm8) {
2440 int encode = prefix_and_encode(dst->encoding());
2441 assert(isShiftCount(imm8), "illegal shift count");
2442 if (imm8 == 1) {
2443 emit_byte(0xD1);
2444 emit_byte(0xF8 | encode);
2445 } else {
2446 emit_byte(0xC1);
2447 emit_byte(0xF8 | encode);
2448 emit_byte(imm8);
2449 }
2450 }
2452 void Assembler::sarl(Register dst) {
2453 int encode = prefix_and_encode(dst->encoding());
2454 emit_byte(0xD3);
2455 emit_byte(0xF8 | encode);
2456 }
2458 void Assembler::sbbl(Address dst, int32_t imm32) {
2459 InstructionMark im(this);
2460 prefix(dst);
2461 emit_arith_operand(0x81, rbx, dst, imm32);
2462 }
2464 void Assembler::sbbl(Register dst, int32_t imm32) {
2465 prefix(dst);
2466 emit_arith(0x81, 0xD8, dst, imm32);
2467 }
2470 void Assembler::sbbl(Register dst, Address src) {
2471 InstructionMark im(this);
2472 prefix(src, dst);
2473 emit_byte(0x1B);
2474 emit_operand(dst, src);
2475 }
2477 void Assembler::sbbl(Register dst, Register src) {
2478 (void) prefix_and_encode(dst->encoding(), src->encoding());
2479 emit_arith(0x1B, 0xC0, dst, src);
2480 }
2482 void Assembler::setb(Condition cc, Register dst) {
2483 assert(0 <= cc && cc < 16, "illegal cc");
2484 int encode = prefix_and_encode(dst->encoding(), true);
2485 emit_byte(0x0F);
2486 emit_byte(0x90 | cc);
2487 emit_byte(0xC0 | encode);
2488 }
2490 void Assembler::shll(Register dst, int imm8) {
2491 assert(isShiftCount(imm8), "illegal shift count");
2492 int encode = prefix_and_encode(dst->encoding());
2493 if (imm8 == 1 ) {
2494 emit_byte(0xD1);
2495 emit_byte(0xE0 | encode);
2496 } else {
2497 emit_byte(0xC1);
2498 emit_byte(0xE0 | encode);
2499 emit_byte(imm8);
2500 }
2501 }
2503 void Assembler::shll(Register dst) {
2504 int encode = prefix_and_encode(dst->encoding());
2505 emit_byte(0xD3);
2506 emit_byte(0xE0 | encode);
2507 }
2509 void Assembler::shrl(Register dst, int imm8) {
2510 assert(isShiftCount(imm8), "illegal shift count");
2511 int encode = prefix_and_encode(dst->encoding());
2512 emit_byte(0xC1);
2513 emit_byte(0xE8 | encode);
2514 emit_byte(imm8);
2515 }
2517 void Assembler::shrl(Register dst) {
2518 int encode = prefix_and_encode(dst->encoding());
2519 emit_byte(0xD3);
2520 emit_byte(0xE8 | encode);
2521 }
2523 // copies a single word from [esi] to [edi]
2524 void Assembler::smovl() {
2525 emit_byte(0xA5);
2526 }
2528 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
2529 // HMM Table D-1 says sse2
2530 // NOT_LP64(assert(VM_Version::supports_sse(), ""));
2531 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2532 emit_byte(0xF2);
2533 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2534 emit_byte(0x0F);
2535 emit_byte(0x51);
2536 emit_byte(0xC0 | encode);
2537 }
2539 void Assembler::stmxcsr( Address dst) {
2540 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2541 InstructionMark im(this);
2542 prefix(dst);
2543 emit_byte(0x0F);
2544 emit_byte(0xAE);
2545 emit_operand(as_Register(3), dst);
2546 }
2548 void Assembler::subl(Address dst, int32_t imm32) {
2549 InstructionMark im(this);
2550 prefix(dst);
2551 if (is8bit(imm32)) {
2552 emit_byte(0x83);
2553 emit_operand(rbp, dst, 1);
2554 emit_byte(imm32 & 0xFF);
2555 } else {
2556 emit_byte(0x81);
2557 emit_operand(rbp, dst, 4);
2558 emit_long(imm32);
2559 }
2560 }
2562 void Assembler::subl(Register dst, int32_t imm32) {
2563 prefix(dst);
2564 emit_arith(0x81, 0xE8, dst, imm32);
2565 }
2567 void Assembler::subl(Address dst, Register src) {
2568 InstructionMark im(this);
2569 prefix(dst, src);
2570 emit_byte(0x29);
2571 emit_operand(src, dst);
2572 }
2574 void Assembler::subl(Register dst, Address src) {
2575 InstructionMark im(this);
2576 prefix(src, dst);
2577 emit_byte(0x2B);
2578 emit_operand(dst, src);
2579 }
2581 void Assembler::subl(Register dst, Register src) {
2582 (void) prefix_and_encode(dst->encoding(), src->encoding());
2583 emit_arith(0x2B, 0xC0, dst, src);
2584 }
2586 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
2587 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2588 emit_byte(0xF2);
2589 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2590 emit_byte(0x0F);
2591 emit_byte(0x5C);
2592 emit_byte(0xC0 | encode);
2593 }
2595 void Assembler::subsd(XMMRegister dst, Address src) {
2596 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2597 InstructionMark im(this);
2598 emit_byte(0xF2);
2599 prefix(src, dst);
2600 emit_byte(0x0F);
2601 emit_byte(0x5C);
2602 emit_operand(dst, src);
2603 }
2605 void Assembler::subss(XMMRegister dst, XMMRegister src) {
2606 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2607 emit_byte(0xF3);
2608 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2609 emit_byte(0x0F);
2610 emit_byte(0x5C);
2611 emit_byte(0xC0 | encode);
2612 }
2614 void Assembler::subss(XMMRegister dst, Address src) {
2615 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2616 InstructionMark im(this);
2617 emit_byte(0xF3);
2618 prefix(src, dst);
2619 emit_byte(0x0F);
2620 emit_byte(0x5C);
2621 emit_operand(dst, src);
2622 }
2624 void Assembler::testb(Register dst, int imm8) {
2625 NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
2626 (void) prefix_and_encode(dst->encoding(), true);
2627 emit_arith_b(0xF6, 0xC0, dst, imm8);
2628 }
2630 void Assembler::testl(Register dst, int32_t imm32) {
2631 // not using emit_arith because test
2632 // doesn't support sign-extension of
2633 // 8bit operands
2634 int encode = dst->encoding();
2635 if (encode == 0) {
2636 emit_byte(0xA9);
2637 } else {
2638 encode = prefix_and_encode(encode);
2639 emit_byte(0xF7);
2640 emit_byte(0xC0 | encode);
2641 }
2642 emit_long(imm32);
2643 }
2645 void Assembler::testl(Register dst, Register src) {
2646 (void) prefix_and_encode(dst->encoding(), src->encoding());
2647 emit_arith(0x85, 0xC0, dst, src);
2648 }
2650 void Assembler::testl(Register dst, Address src) {
2651 InstructionMark im(this);
2652 prefix(src, dst);
2653 emit_byte(0x85);
2654 emit_operand(dst, src);
2655 }
2657 void Assembler::ucomisd(XMMRegister dst, Address src) {
2658 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2659 emit_byte(0x66);
2660 ucomiss(dst, src);
2661 }
2663 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
2664 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2665 emit_byte(0x66);
2666 ucomiss(dst, src);
2667 }
2669 void Assembler::ucomiss(XMMRegister dst, Address src) {
2670 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2672 InstructionMark im(this);
2673 prefix(src, dst);
2674 emit_byte(0x0F);
2675 emit_byte(0x2E);
2676 emit_operand(dst, src);
2677 }
2679 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
2680 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2681 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2682 emit_byte(0x0F);
2683 emit_byte(0x2E);
2684 emit_byte(0xC0 | encode);
2685 }
2688 void Assembler::xaddl(Address dst, Register src) {
2689 InstructionMark im(this);
2690 prefix(dst, src);
2691 emit_byte(0x0F);
2692 emit_byte(0xC1);
2693 emit_operand(src, dst);
2694 }
2696 void Assembler::xchgl(Register dst, Address src) { // xchg
2697 InstructionMark im(this);
2698 prefix(src, dst);
2699 emit_byte(0x87);
2700 emit_operand(dst, src);
2701 }
2703 void Assembler::xchgl(Register dst, Register src) {
2704 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2705 emit_byte(0x87);
2706 emit_byte(0xc0 | encode);
2707 }
2709 void Assembler::xorl(Register dst, int32_t imm32) {
2710 prefix(dst);
2711 emit_arith(0x81, 0xF0, dst, imm32);
2712 }
2714 void Assembler::xorl(Register dst, Address src) {
2715 InstructionMark im(this);
2716 prefix(src, dst);
2717 emit_byte(0x33);
2718 emit_operand(dst, src);
2719 }
2721 void Assembler::xorl(Register dst, Register src) {
2722 (void) prefix_and_encode(dst->encoding(), src->encoding());
2723 emit_arith(0x33, 0xC0, dst, src);
2724 }
2726 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
2727 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2728 emit_byte(0x66);
2729 xorps(dst, src);
2730 }
2732 void Assembler::xorpd(XMMRegister dst, Address src) {
2733 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2734 InstructionMark im(this);
2735 emit_byte(0x66);
2736 prefix(src, dst);
2737 emit_byte(0x0F);
2738 emit_byte(0x57);
2739 emit_operand(dst, src);
2740 }
2743 void Assembler::xorps(XMMRegister dst, XMMRegister src) {
2744 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2745 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2746 emit_byte(0x0F);
2747 emit_byte(0x57);
2748 emit_byte(0xC0 | encode);
2749 }
2751 void Assembler::xorps(XMMRegister dst, Address src) {
2752 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2753 InstructionMark im(this);
2754 prefix(src, dst);
2755 emit_byte(0x0F);
2756 emit_byte(0x57);
2757 emit_operand(dst, src);
2758 }
2760 #ifndef _LP64
2761 // 32bit only pieces of the assembler
2763 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
2764 // NO PREFIX AS NEVER 64BIT
2765 InstructionMark im(this);
2766 emit_byte(0x81);
2767 emit_byte(0xF8 | src1->encoding());
2768 emit_data(imm32, rspec, 0);
2769 }
2771 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
2772 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
2773 InstructionMark im(this);
2774 emit_byte(0x81);
2775 emit_operand(rdi, src1);
2776 emit_data(imm32, rspec, 0);
2777 }
2779 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax,
2780 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
2781 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise.
2782 void Assembler::cmpxchg8(Address adr) {
2783 InstructionMark im(this);
2784 emit_byte(0x0F);
2785 emit_byte(0xc7);
2786 emit_operand(rcx, adr);
2787 }
2789 void Assembler::decl(Register dst) {
2790 // Don't use it directly. Use MacroAssembler::decrementl() instead.
2791 emit_byte(0x48 | dst->encoding());
2792 }
2794 #endif // _LP64
2796 // 64bit typically doesn't use the x87 but needs to for the trig funcs
2798 void Assembler::fabs() {
2799 emit_byte(0xD9);
2800 emit_byte(0xE1);
2801 }
2803 void Assembler::fadd(int i) {
2804 emit_farith(0xD8, 0xC0, i);
2805 }
2807 void Assembler::fadd_d(Address src) {
2808 InstructionMark im(this);
2809 emit_byte(0xDC);
2810 emit_operand32(rax, src);
2811 }
2813 void Assembler::fadd_s(Address src) {
2814 InstructionMark im(this);
2815 emit_byte(0xD8);
2816 emit_operand32(rax, src);
2817 }
2819 void Assembler::fadda(int i) {
2820 emit_farith(0xDC, 0xC0, i);
2821 }
2823 void Assembler::faddp(int i) {
2824 emit_farith(0xDE, 0xC0, i);
2825 }
2827 void Assembler::fchs() {
2828 emit_byte(0xD9);
2829 emit_byte(0xE0);
2830 }
2832 void Assembler::fcom(int i) {
2833 emit_farith(0xD8, 0xD0, i);
2834 }
2836 void Assembler::fcomp(int i) {
2837 emit_farith(0xD8, 0xD8, i);
2838 }
2840 void Assembler::fcomp_d(Address src) {
2841 InstructionMark im(this);
2842 emit_byte(0xDC);
2843 emit_operand32(rbx, src);
2844 }
2846 void Assembler::fcomp_s(Address src) {
2847 InstructionMark im(this);
2848 emit_byte(0xD8);
2849 emit_operand32(rbx, src);
2850 }
2852 void Assembler::fcompp() {
2853 emit_byte(0xDE);
2854 emit_byte(0xD9);
2855 }
2857 void Assembler::fcos() {
2858 emit_byte(0xD9);
2859 emit_byte(0xFF);
2860 }
2862 void Assembler::fdecstp() {
2863 emit_byte(0xD9);
2864 emit_byte(0xF6);
2865 }
2867 void Assembler::fdiv(int i) {
2868 emit_farith(0xD8, 0xF0, i);
2869 }
2871 void Assembler::fdiv_d(Address src) {
2872 InstructionMark im(this);
2873 emit_byte(0xDC);
2874 emit_operand32(rsi, src);
2875 }
2877 void Assembler::fdiv_s(Address src) {
2878 InstructionMark im(this);
2879 emit_byte(0xD8);
2880 emit_operand32(rsi, src);
2881 }
2883 void Assembler::fdiva(int i) {
2884 emit_farith(0xDC, 0xF8, i);
2885 }
2887 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
2888 // is erroneous for some of the floating-point instructions below.
2890 void Assembler::fdivp(int i) {
2891 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
2892 }
2894 void Assembler::fdivr(int i) {
2895 emit_farith(0xD8, 0xF8, i);
2896 }
2898 void Assembler::fdivr_d(Address src) {
2899 InstructionMark im(this);
2900 emit_byte(0xDC);
2901 emit_operand32(rdi, src);
2902 }
2904 void Assembler::fdivr_s(Address src) {
2905 InstructionMark im(this);
2906 emit_byte(0xD8);
2907 emit_operand32(rdi, src);
2908 }
2910 void Assembler::fdivra(int i) {
2911 emit_farith(0xDC, 0xF0, i);
2912 }
2914 void Assembler::fdivrp(int i) {
2915 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
2916 }
2918 void Assembler::ffree(int i) {
2919 emit_farith(0xDD, 0xC0, i);
2920 }
2922 void Assembler::fild_d(Address adr) {
2923 InstructionMark im(this);
2924 emit_byte(0xDF);
2925 emit_operand32(rbp, adr);
2926 }
2928 void Assembler::fild_s(Address adr) {
2929 InstructionMark im(this);
2930 emit_byte(0xDB);
2931 emit_operand32(rax, adr);
2932 }
2934 void Assembler::fincstp() {
2935 emit_byte(0xD9);
2936 emit_byte(0xF7);
2937 }
2939 void Assembler::finit() {
2940 emit_byte(0x9B);
2941 emit_byte(0xDB);
2942 emit_byte(0xE3);
2943 }
2945 void Assembler::fist_s(Address adr) {
2946 InstructionMark im(this);
2947 emit_byte(0xDB);
2948 emit_operand32(rdx, adr);
2949 }
2951 void Assembler::fistp_d(Address adr) {
2952 InstructionMark im(this);
2953 emit_byte(0xDF);
2954 emit_operand32(rdi, adr);
2955 }
2957 void Assembler::fistp_s(Address adr) {
2958 InstructionMark im(this);
2959 emit_byte(0xDB);
2960 emit_operand32(rbx, adr);
2961 }
2963 void Assembler::fld1() {
2964 emit_byte(0xD9);
2965 emit_byte(0xE8);
2966 }
2968 void Assembler::fld_d(Address adr) {
2969 InstructionMark im(this);
2970 emit_byte(0xDD);
2971 emit_operand32(rax, adr);
2972 }
2974 void Assembler::fld_s(Address adr) {
2975 InstructionMark im(this);
2976 emit_byte(0xD9);
2977 emit_operand32(rax, adr);
2978 }
2981 void Assembler::fld_s(int index) {
2982 emit_farith(0xD9, 0xC0, index);
2983 }
2985 void Assembler::fld_x(Address adr) {
2986 InstructionMark im(this);
2987 emit_byte(0xDB);
2988 emit_operand32(rbp, adr);
2989 }
2991 void Assembler::fldcw(Address src) {
2992 InstructionMark im(this);
2993 emit_byte(0xd9);
2994 emit_operand32(rbp, src);
2995 }
2997 void Assembler::fldenv(Address src) {
2998 InstructionMark im(this);
2999 emit_byte(0xD9);
3000 emit_operand32(rsp, src);
3001 }
3003 void Assembler::fldlg2() {
3004 emit_byte(0xD9);
3005 emit_byte(0xEC);
3006 }
3008 void Assembler::fldln2() {
3009 emit_byte(0xD9);
3010 emit_byte(0xED);
3011 }
3013 void Assembler::fldz() {
3014 emit_byte(0xD9);
3015 emit_byte(0xEE);
3016 }
3018 void Assembler::flog() {
3019 fldln2();
3020 fxch();
3021 fyl2x();
3022 }
3024 void Assembler::flog10() {
3025 fldlg2();
3026 fxch();
3027 fyl2x();
3028 }
3030 void Assembler::fmul(int i) {
3031 emit_farith(0xD8, 0xC8, i);
3032 }
3034 void Assembler::fmul_d(Address src) {
3035 InstructionMark im(this);
3036 emit_byte(0xDC);
3037 emit_operand32(rcx, src);
3038 }
3040 void Assembler::fmul_s(Address src) {
3041 InstructionMark im(this);
3042 emit_byte(0xD8);
3043 emit_operand32(rcx, src);
3044 }
3046 void Assembler::fmula(int i) {
3047 emit_farith(0xDC, 0xC8, i);
3048 }
3050 void Assembler::fmulp(int i) {
3051 emit_farith(0xDE, 0xC8, i);
3052 }
3054 void Assembler::fnsave(Address dst) {
3055 InstructionMark im(this);
3056 emit_byte(0xDD);
3057 emit_operand32(rsi, dst);
3058 }
3060 void Assembler::fnstcw(Address src) {
3061 InstructionMark im(this);
3062 emit_byte(0x9B);
3063 emit_byte(0xD9);
3064 emit_operand32(rdi, src);
3065 }
3067 void Assembler::fnstsw_ax() {
3068 emit_byte(0xdF);
3069 emit_byte(0xE0);
3070 }
3072 void Assembler::fprem() {
3073 emit_byte(0xD9);
3074 emit_byte(0xF8);
3075 }
3077 void Assembler::fprem1() {
3078 emit_byte(0xD9);
3079 emit_byte(0xF5);
3080 }
3082 void Assembler::frstor(Address src) {
3083 InstructionMark im(this);
3084 emit_byte(0xDD);
3085 emit_operand32(rsp, src);
3086 }
3088 void Assembler::fsin() {
3089 emit_byte(0xD9);
3090 emit_byte(0xFE);
3091 }
3093 void Assembler::fsqrt() {
3094 emit_byte(0xD9);
3095 emit_byte(0xFA);
3096 }
3098 void Assembler::fst_d(Address adr) {
3099 InstructionMark im(this);
3100 emit_byte(0xDD);
3101 emit_operand32(rdx, adr);
3102 }
3104 void Assembler::fst_s(Address adr) {
3105 InstructionMark im(this);
3106 emit_byte(0xD9);
3107 emit_operand32(rdx, adr);
3108 }
3110 void Assembler::fstp_d(Address adr) {
3111 InstructionMark im(this);
3112 emit_byte(0xDD);
3113 emit_operand32(rbx, adr);
3114 }
3116 void Assembler::fstp_d(int index) {
3117 emit_farith(0xDD, 0xD8, index);
3118 }
3120 void Assembler::fstp_s(Address adr) {
3121 InstructionMark im(this);
3122 emit_byte(0xD9);
3123 emit_operand32(rbx, adr);
3124 }
3126 void Assembler::fstp_x(Address adr) {
3127 InstructionMark im(this);
3128 emit_byte(0xDB);
3129 emit_operand32(rdi, adr);
3130 }
3132 void Assembler::fsub(int i) {
3133 emit_farith(0xD8, 0xE0, i);
3134 }
3136 void Assembler::fsub_d(Address src) {
3137 InstructionMark im(this);
3138 emit_byte(0xDC);
3139 emit_operand32(rsp, src);
3140 }
3142 void Assembler::fsub_s(Address src) {
3143 InstructionMark im(this);
3144 emit_byte(0xD8);
3145 emit_operand32(rsp, src);
3146 }
3148 void Assembler::fsuba(int i) {
3149 emit_farith(0xDC, 0xE8, i);
3150 }
3152 void Assembler::fsubp(int i) {
3153 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
3154 }
3156 void Assembler::fsubr(int i) {
3157 emit_farith(0xD8, 0xE8, i);
3158 }
3160 void Assembler::fsubr_d(Address src) {
3161 InstructionMark im(this);
3162 emit_byte(0xDC);
3163 emit_operand32(rbp, src);
3164 }
3166 void Assembler::fsubr_s(Address src) {
3167 InstructionMark im(this);
3168 emit_byte(0xD8);
3169 emit_operand32(rbp, src);
3170 }
3172 void Assembler::fsubra(int i) {
3173 emit_farith(0xDC, 0xE0, i);
3174 }
3176 void Assembler::fsubrp(int i) {
3177 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
3178 }
3180 void Assembler::ftan() {
3181 emit_byte(0xD9);
3182 emit_byte(0xF2);
3183 emit_byte(0xDD);
3184 emit_byte(0xD8);
3185 }
3187 void Assembler::ftst() {
3188 emit_byte(0xD9);
3189 emit_byte(0xE4);
3190 }
3192 void Assembler::fucomi(int i) {
3193 // make sure the instruction is supported (introduced for P6, together with cmov)
3194 guarantee(VM_Version::supports_cmov(), "illegal instruction");
3195 emit_farith(0xDB, 0xE8, i);
3196 }
3198 void Assembler::fucomip(int i) {
3199 // make sure the instruction is supported (introduced for P6, together with cmov)
3200 guarantee(VM_Version::supports_cmov(), "illegal instruction");
3201 emit_farith(0xDF, 0xE8, i);
3202 }
3204 void Assembler::fwait() {
3205 emit_byte(0x9B);
3206 }
3208 void Assembler::fxch(int i) {
3209 emit_farith(0xD9, 0xC8, i);
3210 }
3212 void Assembler::fyl2x() {
3213 emit_byte(0xD9);
3214 emit_byte(0xF1);
3215 }
3217 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec, int format) {
3218 InstructionMark im(this);
3219 int encode = prefix_and_encode(dst->encoding());
3220 emit_byte(0xB8 | encode);
3221 emit_data((int)imm32, rspec, format);
3222 }
3224 #ifndef _LP64
3226 void Assembler::incl(Register dst) {
3227 // Don't use it directly. Use MacroAssembler::incrementl() instead.
3228 emit_byte(0x40 | dst->encoding());
3229 }
3231 void Assembler::lea(Register dst, Address src) {
3232 leal(dst, src);
3233 }
3235 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) {
3236 InstructionMark im(this);
3237 emit_byte(0xC7);
3238 emit_operand(rax, dst);
3239 emit_data((int)imm32, rspec, 0);
3240 }
3243 void Assembler::popa() { // 32bit
3244 emit_byte(0x61);
3245 }
3247 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
3248 InstructionMark im(this);
3249 emit_byte(0x68);
3250 emit_data(imm32, rspec, 0);
3251 }
3253 void Assembler::pusha() { // 32bit
3254 emit_byte(0x60);
3255 }
3257 void Assembler::set_byte_if_not_zero(Register dst) {
3258 emit_byte(0x0F);
3259 emit_byte(0x95);
3260 emit_byte(0xE0 | dst->encoding());
3261 }
3263 void Assembler::shldl(Register dst, Register src) {
3264 emit_byte(0x0F);
3265 emit_byte(0xA5);
3266 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
3267 }
3269 void Assembler::shrdl(Register dst, Register src) {
3270 emit_byte(0x0F);
3271 emit_byte(0xAD);
3272 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
3273 }
3275 #else // LP64
3277 // 64bit only pieces of the assembler
3278 // This should only be used by 64bit instructions that can use rip-relative
3279 // it cannot be used by instructions that want an immediate value.
3281 bool Assembler::reachable(AddressLiteral adr) {
3282 int64_t disp;
3283 // None will force a 64bit literal to the code stream. Likely a placeholder
3284 // for something that will be patched later and we need to certain it will
3285 // always be reachable.
3286 if (adr.reloc() == relocInfo::none) {
3287 return false;
3288 }
3289 if (adr.reloc() == relocInfo::internal_word_type) {
3290 // This should be rip relative and easily reachable.
3291 return true;
3292 }
3293 if (adr.reloc() == relocInfo::virtual_call_type ||
3294 adr.reloc() == relocInfo::opt_virtual_call_type ||
3295 adr.reloc() == relocInfo::static_call_type ||
3296 adr.reloc() == relocInfo::static_stub_type ) {
3297 // This should be rip relative within the code cache and easily
3298 // reachable until we get huge code caches. (At which point
3299 // ic code is going to have issues).
3300 return true;
3301 }
3302 if (adr.reloc() != relocInfo::external_word_type &&
3303 adr.reloc() != relocInfo::poll_return_type && // these are really external_word but need special
3304 adr.reloc() != relocInfo::poll_type && // relocs to identify them
3305 adr.reloc() != relocInfo::runtime_call_type ) {
3306 return false;
3307 }
3309 // Stress the correction code
3310 if (ForceUnreachable) {
3311 // Must be runtimecall reloc, see if it is in the codecache
3312 // Flipping stuff in the codecache to be unreachable causes issues
3313 // with things like inline caches where the additional instructions
3314 // are not handled.
3315 if (CodeCache::find_blob(adr._target) == NULL) {
3316 return false;
3317 }
3318 }
3319 // For external_word_type/runtime_call_type if it is reachable from where we
3320 // are now (possibly a temp buffer) and where we might end up
3321 // anywhere in the codeCache then we are always reachable.
3322 // This would have to change if we ever save/restore shared code
3323 // to be more pessimistic.
3325 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
3326 if (!is_simm32(disp)) return false;
3327 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
3328 if (!is_simm32(disp)) return false;
3330 disp = (int64_t)adr._target - ((int64_t)_code_pos + sizeof(int));
3332 // Because rip relative is a disp + address_of_next_instruction and we
3333 // don't know the value of address_of_next_instruction we apply a fudge factor
3334 // to make sure we will be ok no matter the size of the instruction we get placed into.
3335 // We don't have to fudge the checks above here because they are already worst case.
3337 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
3338 // + 4 because better safe than sorry.
3339 const int fudge = 12 + 4;
3340 if (disp < 0) {
3341 disp -= fudge;
3342 } else {
3343 disp += fudge;
3344 }
3345 return is_simm32(disp);
3346 }
3348 void Assembler::emit_data64(jlong data,
3349 relocInfo::relocType rtype,
3350 int format) {
3351 if (rtype == relocInfo::none) {
3352 emit_long64(data);
3353 } else {
3354 emit_data64(data, Relocation::spec_simple(rtype), format);
3355 }
3356 }
3358 void Assembler::emit_data64(jlong data,
3359 RelocationHolder const& rspec,
3360 int format) {
3361 assert(imm_operand == 0, "default format must be immediate in this file");
3362 assert(imm_operand == format, "must be immediate");
3363 assert(inst_mark() != NULL, "must be inside InstructionMark");
3364 // Do not use AbstractAssembler::relocate, which is not intended for
3365 // embedded words. Instead, relocate to the enclosing instruction.
3366 code_section()->relocate(inst_mark(), rspec, format);
3367 #ifdef ASSERT
3368 check_relocation(rspec, format);
3369 #endif
3370 emit_long64(data);
3371 }
3373 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
3374 if (reg_enc >= 8) {
3375 prefix(REX_B);
3376 reg_enc -= 8;
3377 } else if (byteinst && reg_enc >= 4) {
3378 prefix(REX);
3379 }
3380 return reg_enc;
3381 }
3383 int Assembler::prefixq_and_encode(int reg_enc) {
3384 if (reg_enc < 8) {
3385 prefix(REX_W);
3386 } else {
3387 prefix(REX_WB);
3388 reg_enc -= 8;
3389 }
3390 return reg_enc;
3391 }
3393 int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) {
3394 if (dst_enc < 8) {
3395 if (src_enc >= 8) {
3396 prefix(REX_B);
3397 src_enc -= 8;
3398 } else if (byteinst && src_enc >= 4) {
3399 prefix(REX);
3400 }
3401 } else {
3402 if (src_enc < 8) {
3403 prefix(REX_R);
3404 } else {
3405 prefix(REX_RB);
3406 src_enc -= 8;
3407 }
3408 dst_enc -= 8;
3409 }
3410 return dst_enc << 3 | src_enc;
3411 }
3413 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) {
3414 if (dst_enc < 8) {
3415 if (src_enc < 8) {
3416 prefix(REX_W);
3417 } else {
3418 prefix(REX_WB);
3419 src_enc -= 8;
3420 }
3421 } else {
3422 if (src_enc < 8) {
3423 prefix(REX_WR);
3424 } else {
3425 prefix(REX_WRB);
3426 src_enc -= 8;
3427 }
3428 dst_enc -= 8;
3429 }
3430 return dst_enc << 3 | src_enc;
3431 }
3433 void Assembler::prefix(Register reg) {
3434 if (reg->encoding() >= 8) {
3435 prefix(REX_B);
3436 }
3437 }
3439 void Assembler::prefix(Address adr) {
3440 if (adr.base_needs_rex()) {
3441 if (adr.index_needs_rex()) {
3442 prefix(REX_XB);
3443 } else {
3444 prefix(REX_B);
3445 }
3446 } else {
3447 if (adr.index_needs_rex()) {
3448 prefix(REX_X);
3449 }
3450 }
3451 }
3453 void Assembler::prefixq(Address adr) {
3454 if (adr.base_needs_rex()) {
3455 if (adr.index_needs_rex()) {
3456 prefix(REX_WXB);
3457 } else {
3458 prefix(REX_WB);
3459 }
3460 } else {
3461 if (adr.index_needs_rex()) {
3462 prefix(REX_WX);
3463 } else {
3464 prefix(REX_W);
3465 }
3466 }
3467 }
3470 void Assembler::prefix(Address adr, Register reg, bool byteinst) {
3471 if (reg->encoding() < 8) {
3472 if (adr.base_needs_rex()) {
3473 if (adr.index_needs_rex()) {
3474 prefix(REX_XB);
3475 } else {
3476 prefix(REX_B);
3477 }
3478 } else {
3479 if (adr.index_needs_rex()) {
3480 prefix(REX_X);
3481 } else if (reg->encoding() >= 4 ) {
3482 prefix(REX);
3483 }
3484 }
3485 } else {
3486 if (adr.base_needs_rex()) {
3487 if (adr.index_needs_rex()) {
3488 prefix(REX_RXB);
3489 } else {
3490 prefix(REX_RB);
3491 }
3492 } else {
3493 if (adr.index_needs_rex()) {
3494 prefix(REX_RX);
3495 } else {
3496 prefix(REX_R);
3497 }
3498 }
3499 }
3500 }
3502 void Assembler::prefixq(Address adr, Register src) {
3503 if (src->encoding() < 8) {
3504 if (adr.base_needs_rex()) {
3505 if (adr.index_needs_rex()) {
3506 prefix(REX_WXB);
3507 } else {
3508 prefix(REX_WB);
3509 }
3510 } else {
3511 if (adr.index_needs_rex()) {
3512 prefix(REX_WX);
3513 } else {
3514 prefix(REX_W);
3515 }
3516 }
3517 } else {
3518 if (adr.base_needs_rex()) {
3519 if (adr.index_needs_rex()) {
3520 prefix(REX_WRXB);
3521 } else {
3522 prefix(REX_WRB);
3523 }
3524 } else {
3525 if (adr.index_needs_rex()) {
3526 prefix(REX_WRX);
3527 } else {
3528 prefix(REX_WR);
3529 }
3530 }
3531 }
3532 }
3534 void Assembler::prefix(Address adr, XMMRegister reg) {
3535 if (reg->encoding() < 8) {
3536 if (adr.base_needs_rex()) {
3537 if (adr.index_needs_rex()) {
3538 prefix(REX_XB);
3539 } else {
3540 prefix(REX_B);
3541 }
3542 } else {
3543 if (adr.index_needs_rex()) {
3544 prefix(REX_X);
3545 }
3546 }
3547 } else {
3548 if (adr.base_needs_rex()) {
3549 if (adr.index_needs_rex()) {
3550 prefix(REX_RXB);
3551 } else {
3552 prefix(REX_RB);
3553 }
3554 } else {
3555 if (adr.index_needs_rex()) {
3556 prefix(REX_RX);
3557 } else {
3558 prefix(REX_R);
3559 }
3560 }
3561 }
3562 }
3564 void Assembler::adcq(Register dst, int32_t imm32) {
3565 (void) prefixq_and_encode(dst->encoding());
3566 emit_arith(0x81, 0xD0, dst, imm32);
3567 }
3569 void Assembler::adcq(Register dst, Address src) {
3570 InstructionMark im(this);
3571 prefixq(src, dst);
3572 emit_byte(0x13);
3573 emit_operand(dst, src);
3574 }
3576 void Assembler::adcq(Register dst, Register src) {
3577 (int) prefixq_and_encode(dst->encoding(), src->encoding());
3578 emit_arith(0x13, 0xC0, dst, src);
3579 }
3581 void Assembler::addq(Address dst, int32_t imm32) {
3582 InstructionMark im(this);
3583 prefixq(dst);
3584 emit_arith_operand(0x81, rax, dst,imm32);
3585 }
3587 void Assembler::addq(Address dst, Register src) {
3588 InstructionMark im(this);
3589 prefixq(dst, src);
3590 emit_byte(0x01);
3591 emit_operand(src, dst);
3592 }
3594 void Assembler::addq(Register dst, int32_t imm32) {
3595 (void) prefixq_and_encode(dst->encoding());
3596 emit_arith(0x81, 0xC0, dst, imm32);
3597 }
3599 void Assembler::addq(Register dst, Address src) {
3600 InstructionMark im(this);
3601 prefixq(src, dst);
3602 emit_byte(0x03);
3603 emit_operand(dst, src);
3604 }
3606 void Assembler::addq(Register dst, Register src) {
3607 (void) prefixq_and_encode(dst->encoding(), src->encoding());
3608 emit_arith(0x03, 0xC0, dst, src);
3609 }
3611 void Assembler::andq(Register dst, int32_t imm32) {
3612 (void) prefixq_and_encode(dst->encoding());
3613 emit_arith(0x81, 0xE0, dst, imm32);
3614 }
3616 void Assembler::andq(Register dst, Address src) {
3617 InstructionMark im(this);
3618 prefixq(src, dst);
3619 emit_byte(0x23);
3620 emit_operand(dst, src);
3621 }
3623 void Assembler::andq(Register dst, Register src) {
3624 (int) prefixq_and_encode(dst->encoding(), src->encoding());
3625 emit_arith(0x23, 0xC0, dst, src);
3626 }
3628 void Assembler::bswapq(Register reg) {
3629 int encode = prefixq_and_encode(reg->encoding());
3630 emit_byte(0x0F);
3631 emit_byte(0xC8 | encode);
3632 }
3634 void Assembler::cdqq() {
3635 prefix(REX_W);
3636 emit_byte(0x99);
3637 }
3639 void Assembler::clflush(Address adr) {
3640 prefix(adr);
3641 emit_byte(0x0F);
3642 emit_byte(0xAE);
3643 emit_operand(rdi, adr);
3644 }
3646 void Assembler::cmovq(Condition cc, Register dst, Register src) {
3647 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3648 emit_byte(0x0F);
3649 emit_byte(0x40 | cc);
3650 emit_byte(0xC0 | encode);
3651 }
3653 void Assembler::cmovq(Condition cc, Register dst, Address src) {
3654 InstructionMark im(this);
3655 prefixq(src, dst);
3656 emit_byte(0x0F);
3657 emit_byte(0x40 | cc);
3658 emit_operand(dst, src);
3659 }
3661 void Assembler::cmpq(Address dst, int32_t imm32) {
3662 InstructionMark im(this);
3663 prefixq(dst);
3664 emit_byte(0x81);
3665 emit_operand(rdi, dst, 4);
3666 emit_long(imm32);
3667 }
3669 void Assembler::cmpq(Register dst, int32_t imm32) {
3670 (void) prefixq_and_encode(dst->encoding());
3671 emit_arith(0x81, 0xF8, dst, imm32);
3672 }
3674 void Assembler::cmpq(Address dst, Register src) {
3675 InstructionMark im(this);
3676 prefixq(dst, src);
3677 emit_byte(0x3B);
3678 emit_operand(src, dst);
3679 }
3681 void Assembler::cmpq(Register dst, Register src) {
3682 (void) prefixq_and_encode(dst->encoding(), src->encoding());
3683 emit_arith(0x3B, 0xC0, dst, src);
3684 }
3686 void Assembler::cmpq(Register dst, Address src) {
3687 InstructionMark im(this);
3688 prefixq(src, dst);
3689 emit_byte(0x3B);
3690 emit_operand(dst, src);
3691 }
3693 void Assembler::cmpxchgq(Register reg, Address adr) {
3694 InstructionMark im(this);
3695 prefixq(adr, reg);
3696 emit_byte(0x0F);
3697 emit_byte(0xB1);
3698 emit_operand(reg, adr);
3699 }
3701 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
3702 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3703 emit_byte(0xF2);
3704 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3705 emit_byte(0x0F);
3706 emit_byte(0x2A);
3707 emit_byte(0xC0 | encode);
3708 }
3710 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
3711 NOT_LP64(assert(VM_Version::supports_sse(), ""));
3712 emit_byte(0xF3);
3713 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3714 emit_byte(0x0F);
3715 emit_byte(0x2A);
3716 emit_byte(0xC0 | encode);
3717 }
3719 void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
3720 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3721 emit_byte(0xF2);
3722 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3723 emit_byte(0x0F);
3724 emit_byte(0x2C);
3725 emit_byte(0xC0 | encode);
3726 }
3728 void Assembler::cvttss2siq(Register dst, XMMRegister src) {
3729 NOT_LP64(assert(VM_Version::supports_sse(), ""));
3730 emit_byte(0xF3);
3731 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3732 emit_byte(0x0F);
3733 emit_byte(0x2C);
3734 emit_byte(0xC0 | encode);
3735 }
3737 void Assembler::decl(Register dst) {
3738 // Don't use it directly. Use MacroAssembler::decrementl() instead.
3739 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
3740 int encode = prefix_and_encode(dst->encoding());
3741 emit_byte(0xFF);
3742 emit_byte(0xC8 | encode);
3743 }
3745 void Assembler::decq(Register dst) {
3746 // Don't use it directly. Use MacroAssembler::decrementq() instead.
3747 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
3748 int encode = prefixq_and_encode(dst->encoding());
3749 emit_byte(0xFF);
3750 emit_byte(0xC8 | encode);
3751 }
3753 void Assembler::decq(Address dst) {
3754 // Don't use it directly. Use MacroAssembler::decrementq() instead.
3755 InstructionMark im(this);
3756 prefixq(dst);
3757 emit_byte(0xFF);
3758 emit_operand(rcx, dst);
3759 }
3761 void Assembler::fxrstor(Address src) {
3762 prefixq(src);
3763 emit_byte(0x0F);
3764 emit_byte(0xAE);
3765 emit_operand(as_Register(1), src);
3766 }
3768 void Assembler::fxsave(Address dst) {
3769 prefixq(dst);
3770 emit_byte(0x0F);
3771 emit_byte(0xAE);
3772 emit_operand(as_Register(0), dst);
3773 }
3775 void Assembler::idivq(Register src) {
3776 int encode = prefixq_and_encode(src->encoding());
3777 emit_byte(0xF7);
3778 emit_byte(0xF8 | encode);
3779 }
3781 void Assembler::imulq(Register dst, Register src) {
3782 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3783 emit_byte(0x0F);
3784 emit_byte(0xAF);
3785 emit_byte(0xC0 | encode);
3786 }
3788 void Assembler::imulq(Register dst, Register src, int value) {
3789 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3790 if (is8bit(value)) {
3791 emit_byte(0x6B);
3792 emit_byte(0xC0 | encode);
3793 emit_byte(value);
3794 } else {
3795 emit_byte(0x69);
3796 emit_byte(0xC0 | encode);
3797 emit_long(value);
3798 }
3799 }
3801 void Assembler::incl(Register dst) {
3802 // Don't use it directly. Use MacroAssembler::incrementl() instead.
3803 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
3804 int encode = prefix_and_encode(dst->encoding());
3805 emit_byte(0xFF);
3806 emit_byte(0xC0 | encode);
3807 }
3809 void Assembler::incq(Register dst) {
3810 // Don't use it directly. Use MacroAssembler::incrementq() instead.
3811 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
3812 int encode = prefixq_and_encode(dst->encoding());
3813 emit_byte(0xFF);
3814 emit_byte(0xC0 | encode);
3815 }
3817 void Assembler::incq(Address dst) {
3818 // Don't use it directly. Use MacroAssembler::incrementq() instead.
3819 InstructionMark im(this);
3820 prefixq(dst);
3821 emit_byte(0xFF);
3822 emit_operand(rax, dst);
3823 }
3825 void Assembler::lea(Register dst, Address src) {
3826 leaq(dst, src);
3827 }
3829 void Assembler::leaq(Register dst, Address src) {
3830 InstructionMark im(this);
3831 prefixq(src, dst);
3832 emit_byte(0x8D);
3833 emit_operand(dst, src);
3834 }
3836 void Assembler::mov64(Register dst, int64_t imm64) {
3837 InstructionMark im(this);
3838 int encode = prefixq_and_encode(dst->encoding());
3839 emit_byte(0xB8 | encode);
3840 emit_long64(imm64);
3841 }
3843 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
3844 InstructionMark im(this);
3845 int encode = prefixq_and_encode(dst->encoding());
3846 emit_byte(0xB8 | encode);
3847 emit_data64(imm64, rspec);
3848 }
3850 void Assembler::movdq(XMMRegister dst, Register src) {
3851 // table D-1 says MMX/SSE2
3852 NOT_LP64(assert(VM_Version::supports_sse2() || VM_Version::supports_mmx(), ""));
3853 emit_byte(0x66);
3854 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3855 emit_byte(0x0F);
3856 emit_byte(0x6E);
3857 emit_byte(0xC0 | encode);
3858 }
3860 void Assembler::movdq(Register dst, XMMRegister src) {
3861 // table D-1 says MMX/SSE2
3862 NOT_LP64(assert(VM_Version::supports_sse2() || VM_Version::supports_mmx(), ""));
3863 emit_byte(0x66);
3864 // swap src/dst to get correct prefix
3865 int encode = prefixq_and_encode(src->encoding(), dst->encoding());
3866 emit_byte(0x0F);
3867 emit_byte(0x7E);
3868 emit_byte(0xC0 | encode);
3869 }
3871 void Assembler::movq(Register dst, Register src) {
3872 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3873 emit_byte(0x8B);
3874 emit_byte(0xC0 | encode);
3875 }
3877 void Assembler::movq(Register dst, Address src) {
3878 InstructionMark im(this);
3879 prefixq(src, dst);
3880 emit_byte(0x8B);
3881 emit_operand(dst, src);
3882 }
3884 void Assembler::movq(Address dst, Register src) {
3885 InstructionMark im(this);
3886 prefixq(dst, src);
3887 emit_byte(0x89);
3888 emit_operand(src, dst);
3889 }
3891 void Assembler::movslq(Register dst, int32_t imm32) {
3892 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx)
3893 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx)
3894 // as a result we shouldn't use until tested at runtime...
3895 ShouldNotReachHere();
3896 InstructionMark im(this);
3897 int encode = prefixq_and_encode(dst->encoding());
3898 emit_byte(0xC7 | encode);
3899 emit_long(imm32);
3900 }
3902 void Assembler::movslq(Address dst, int32_t imm32) {
3903 assert(is_simm32(imm32), "lost bits");
3904 InstructionMark im(this);
3905 prefixq(dst);
3906 emit_byte(0xC7);
3907 emit_operand(rax, dst, 4);
3908 emit_long(imm32);
3909 }
3911 void Assembler::movslq(Register dst, Address src) {
3912 InstructionMark im(this);
3913 prefixq(src, dst);
3914 emit_byte(0x63);
3915 emit_operand(dst, src);
3916 }
3918 void Assembler::movslq(Register dst, Register src) {
3919 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3920 emit_byte(0x63);
3921 emit_byte(0xC0 | encode);
3922 }
3924 void Assembler::negq(Register dst) {
3925 int encode = prefixq_and_encode(dst->encoding());
3926 emit_byte(0xF7);
3927 emit_byte(0xD8 | encode);
3928 }
3930 void Assembler::notq(Register dst) {
3931 int encode = prefixq_and_encode(dst->encoding());
3932 emit_byte(0xF7);
3933 emit_byte(0xD0 | encode);
3934 }
3936 void Assembler::orq(Address dst, int32_t imm32) {
3937 InstructionMark im(this);
3938 prefixq(dst);
3939 emit_byte(0x81);
3940 emit_operand(rcx, dst, 4);
3941 emit_long(imm32);
3942 }
3944 void Assembler::orq(Register dst, int32_t imm32) {
3945 (void) prefixq_and_encode(dst->encoding());
3946 emit_arith(0x81, 0xC8, dst, imm32);
3947 }
3949 void Assembler::orq(Register dst, Address src) {
3950 InstructionMark im(this);
3951 prefixq(src, dst);
3952 emit_byte(0x0B);
3953 emit_operand(dst, src);
3954 }
3956 void Assembler::orq(Register dst, Register src) {
3957 (void) prefixq_and_encode(dst->encoding(), src->encoding());
3958 emit_arith(0x0B, 0xC0, dst, src);
3959 }
3961 void Assembler::popa() { // 64bit
3962 movq(r15, Address(rsp, 0));
3963 movq(r14, Address(rsp, wordSize));
3964 movq(r13, Address(rsp, 2 * wordSize));
3965 movq(r12, Address(rsp, 3 * wordSize));
3966 movq(r11, Address(rsp, 4 * wordSize));
3967 movq(r10, Address(rsp, 5 * wordSize));
3968 movq(r9, Address(rsp, 6 * wordSize));
3969 movq(r8, Address(rsp, 7 * wordSize));
3970 movq(rdi, Address(rsp, 8 * wordSize));
3971 movq(rsi, Address(rsp, 9 * wordSize));
3972 movq(rbp, Address(rsp, 10 * wordSize));
3973 // skip rsp
3974 movq(rbx, Address(rsp, 12 * wordSize));
3975 movq(rdx, Address(rsp, 13 * wordSize));
3976 movq(rcx, Address(rsp, 14 * wordSize));
3977 movq(rax, Address(rsp, 15 * wordSize));
3979 addq(rsp, 16 * wordSize);
3980 }
3982 void Assembler::popq(Address dst) {
3983 InstructionMark im(this);
3984 prefixq(dst);
3985 emit_byte(0x8F);
3986 emit_operand(rax, dst);
3987 }
3989 void Assembler::pusha() { // 64bit
3990 // we have to store original rsp. ABI says that 128 bytes
3991 // below rsp are local scratch.
3992 movq(Address(rsp, -5 * wordSize), rsp);
3994 subq(rsp, 16 * wordSize);
3996 movq(Address(rsp, 15 * wordSize), rax);
3997 movq(Address(rsp, 14 * wordSize), rcx);
3998 movq(Address(rsp, 13 * wordSize), rdx);
3999 movq(Address(rsp, 12 * wordSize), rbx);
4000 // skip rsp
4001 movq(Address(rsp, 10 * wordSize), rbp);
4002 movq(Address(rsp, 9 * wordSize), rsi);
4003 movq(Address(rsp, 8 * wordSize), rdi);
4004 movq(Address(rsp, 7 * wordSize), r8);
4005 movq(Address(rsp, 6 * wordSize), r9);
4006 movq(Address(rsp, 5 * wordSize), r10);
4007 movq(Address(rsp, 4 * wordSize), r11);
4008 movq(Address(rsp, 3 * wordSize), r12);
4009 movq(Address(rsp, 2 * wordSize), r13);
4010 movq(Address(rsp, wordSize), r14);
4011 movq(Address(rsp, 0), r15);
4012 }
4014 void Assembler::pushq(Address src) {
4015 InstructionMark im(this);
4016 prefixq(src);
4017 emit_byte(0xFF);
4018 emit_operand(rsi, src);
4019 }
4021 void Assembler::rclq(Register dst, int imm8) {
4022 assert(isShiftCount(imm8 >> 1), "illegal shift count");
4023 int encode = prefixq_and_encode(dst->encoding());
4024 if (imm8 == 1) {
4025 emit_byte(0xD1);
4026 emit_byte(0xD0 | encode);
4027 } else {
4028 emit_byte(0xC1);
4029 emit_byte(0xD0 | encode);
4030 emit_byte(imm8);
4031 }
4032 }
4033 void Assembler::sarq(Register dst, int imm8) {
4034 assert(isShiftCount(imm8 >> 1), "illegal shift count");
4035 int encode = prefixq_and_encode(dst->encoding());
4036 if (imm8 == 1) {
4037 emit_byte(0xD1);
4038 emit_byte(0xF8 | encode);
4039 } else {
4040 emit_byte(0xC1);
4041 emit_byte(0xF8 | encode);
4042 emit_byte(imm8);
4043 }
4044 }
4046 void Assembler::sarq(Register dst) {
4047 int encode = prefixq_and_encode(dst->encoding());
4048 emit_byte(0xD3);
4049 emit_byte(0xF8 | encode);
4050 }
4051 void Assembler::sbbq(Address dst, int32_t imm32) {
4052 InstructionMark im(this);
4053 prefixq(dst);
4054 emit_arith_operand(0x81, rbx, dst, imm32);
4055 }
4057 void Assembler::sbbq(Register dst, int32_t imm32) {
4058 (void) prefixq_and_encode(dst->encoding());
4059 emit_arith(0x81, 0xD8, dst, imm32);
4060 }
4062 void Assembler::sbbq(Register dst, Address src) {
4063 InstructionMark im(this);
4064 prefixq(src, dst);
4065 emit_byte(0x1B);
4066 emit_operand(dst, src);
4067 }
4069 void Assembler::sbbq(Register dst, Register src) {
4070 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4071 emit_arith(0x1B, 0xC0, dst, src);
4072 }
4074 void Assembler::shlq(Register dst, int imm8) {
4075 assert(isShiftCount(imm8 >> 1), "illegal shift count");
4076 int encode = prefixq_and_encode(dst->encoding());
4077 if (imm8 == 1) {
4078 emit_byte(0xD1);
4079 emit_byte(0xE0 | encode);
4080 } else {
4081 emit_byte(0xC1);
4082 emit_byte(0xE0 | encode);
4083 emit_byte(imm8);
4084 }
4085 }
4087 void Assembler::shlq(Register dst) {
4088 int encode = prefixq_and_encode(dst->encoding());
4089 emit_byte(0xD3);
4090 emit_byte(0xE0 | encode);
4091 }
4093 void Assembler::shrq(Register dst, int imm8) {
4094 assert(isShiftCount(imm8 >> 1), "illegal shift count");
4095 int encode = prefixq_and_encode(dst->encoding());
4096 emit_byte(0xC1);
4097 emit_byte(0xE8 | encode);
4098 emit_byte(imm8);
4099 }
4101 void Assembler::shrq(Register dst) {
4102 int encode = prefixq_and_encode(dst->encoding());
4103 emit_byte(0xD3);
4104 emit_byte(0xE8 | encode);
4105 }
4107 void Assembler::sqrtsd(XMMRegister dst, Address src) {
4108 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4109 InstructionMark im(this);
4110 emit_byte(0xF2);
4111 prefix(src, dst);
4112 emit_byte(0x0F);
4113 emit_byte(0x51);
4114 emit_operand(dst, src);
4115 }
4117 void Assembler::subq(Address dst, int32_t imm32) {
4118 InstructionMark im(this);
4119 prefixq(dst);
4120 if (is8bit(imm32)) {
4121 emit_byte(0x83);
4122 emit_operand(rbp, dst, 1);
4123 emit_byte(imm32 & 0xFF);
4124 } else {
4125 emit_byte(0x81);
4126 emit_operand(rbp, dst, 4);
4127 emit_long(imm32);
4128 }
4129 }
4131 void Assembler::subq(Register dst, int32_t imm32) {
4132 (void) prefixq_and_encode(dst->encoding());
4133 emit_arith(0x81, 0xE8, dst, imm32);
4134 }
4136 void Assembler::subq(Address dst, Register src) {
4137 InstructionMark im(this);
4138 prefixq(dst, src);
4139 emit_byte(0x29);
4140 emit_operand(src, dst);
4141 }
4143 void Assembler::subq(Register dst, Address src) {
4144 InstructionMark im(this);
4145 prefixq(src, dst);
4146 emit_byte(0x2B);
4147 emit_operand(dst, src);
4148 }
4150 void Assembler::subq(Register dst, Register src) {
4151 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4152 emit_arith(0x2B, 0xC0, dst, src);
4153 }
4155 void Assembler::testq(Register dst, int32_t imm32) {
4156 // not using emit_arith because test
4157 // doesn't support sign-extension of
4158 // 8bit operands
4159 int encode = dst->encoding();
4160 if (encode == 0) {
4161 prefix(REX_W);
4162 emit_byte(0xA9);
4163 } else {
4164 encode = prefixq_and_encode(encode);
4165 emit_byte(0xF7);
4166 emit_byte(0xC0 | encode);
4167 }
4168 emit_long(imm32);
4169 }
4171 void Assembler::testq(Register dst, Register src) {
4172 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4173 emit_arith(0x85, 0xC0, dst, src);
4174 }
4176 void Assembler::xaddq(Address dst, Register src) {
4177 InstructionMark im(this);
4178 prefixq(dst, src);
4179 emit_byte(0x0F);
4180 emit_byte(0xC1);
4181 emit_operand(src, dst);
4182 }
4184 void Assembler::xchgq(Register dst, Address src) {
4185 InstructionMark im(this);
4186 prefixq(src, dst);
4187 emit_byte(0x87);
4188 emit_operand(dst, src);
4189 }
4191 void Assembler::xchgq(Register dst, Register src) {
4192 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4193 emit_byte(0x87);
4194 emit_byte(0xc0 | encode);
4195 }
4197 void Assembler::xorq(Register dst, Register src) {
4198 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4199 emit_arith(0x33, 0xC0, dst, src);
4200 }
4202 void Assembler::xorq(Register dst, Address src) {
4203 InstructionMark im(this);
4204 prefixq(src, dst);
4205 emit_byte(0x33);
4206 emit_operand(dst, src);
4207 }
4209 #endif // !LP64
4211 static Assembler::Condition reverse[] = {
4212 Assembler::noOverflow /* overflow = 0x0 */ ,
4213 Assembler::overflow /* noOverflow = 0x1 */ ,
4214 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
4215 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
4216 Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
4217 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
4218 Assembler::above /* belowEqual = 0x6 */ ,
4219 Assembler::belowEqual /* above = 0x7 */ ,
4220 Assembler::positive /* negative = 0x8 */ ,
4221 Assembler::negative /* positive = 0x9 */ ,
4222 Assembler::noParity /* parity = 0xa */ ,
4223 Assembler::parity /* noParity = 0xb */ ,
4224 Assembler::greaterEqual /* less = 0xc */ ,
4225 Assembler::less /* greaterEqual = 0xd */ ,
4226 Assembler::greater /* lessEqual = 0xe */ ,
4227 Assembler::lessEqual /* greater = 0xf, */
4229 };
4232 // Implementation of MacroAssembler
4234 // First all the versions that have distinct versions depending on 32/64 bit
4235 // Unless the difference is trivial (1 line or so).
4237 #ifndef _LP64
4239 // 32bit versions
4241 Address MacroAssembler::as_Address(AddressLiteral adr) {
4242 return Address(adr.target(), adr.rspec());
4243 }
4245 Address MacroAssembler::as_Address(ArrayAddress adr) {
4246 return Address::make_array(adr);
4247 }
4249 int MacroAssembler::biased_locking_enter(Register lock_reg,
4250 Register obj_reg,
4251 Register swap_reg,
4252 Register tmp_reg,
4253 bool swap_reg_contains_mark,
4254 Label& done,
4255 Label* slow_case,
4256 BiasedLockingCounters* counters) {
4257 assert(UseBiasedLocking, "why call this otherwise?");
4258 assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg");
4259 assert_different_registers(lock_reg, obj_reg, swap_reg);
4261 if (PrintBiasedLockingStatistics && counters == NULL)
4262 counters = BiasedLocking::counters();
4264 bool need_tmp_reg = false;
4265 if (tmp_reg == noreg) {
4266 need_tmp_reg = true;
4267 tmp_reg = lock_reg;
4268 } else {
4269 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
4270 }
4271 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
4272 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
4273 Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
4274 Address saved_mark_addr(lock_reg, 0);
4276 // Biased locking
4277 // See whether the lock is currently biased toward our thread and
4278 // whether the epoch is still valid
4279 // Note that the runtime guarantees sufficient alignment of JavaThread
4280 // pointers to allow age to be placed into low bits
4281 // First check to see whether biasing is even enabled for this object
4282 Label cas_label;
4283 int null_check_offset = -1;
4284 if (!swap_reg_contains_mark) {
4285 null_check_offset = offset();
4286 movl(swap_reg, mark_addr);
4287 }
4288 if (need_tmp_reg) {
4289 push(tmp_reg);
4290 }
4291 movl(tmp_reg, swap_reg);
4292 andl(tmp_reg, markOopDesc::biased_lock_mask_in_place);
4293 cmpl(tmp_reg, markOopDesc::biased_lock_pattern);
4294 if (need_tmp_reg) {
4295 pop(tmp_reg);
4296 }
4297 jcc(Assembler::notEqual, cas_label);
4298 // The bias pattern is present in the object's header. Need to check
4299 // whether the bias owner and the epoch are both still current.
4300 // Note that because there is no current thread register on x86 we
4301 // need to store off the mark word we read out of the object to
4302 // avoid reloading it and needing to recheck invariants below. This
4303 // store is unfortunate but it makes the overall code shorter and
4304 // simpler.
4305 movl(saved_mark_addr, swap_reg);
4306 if (need_tmp_reg) {
4307 push(tmp_reg);
4308 }
4309 get_thread(tmp_reg);
4310 xorl(swap_reg, tmp_reg);
4311 if (swap_reg_contains_mark) {
4312 null_check_offset = offset();
4313 }
4314 movl(tmp_reg, klass_addr);
4315 xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
4316 andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
4317 if (need_tmp_reg) {
4318 pop(tmp_reg);
4319 }
4320 if (counters != NULL) {
4321 cond_inc32(Assembler::zero,
4322 ExternalAddress((address)counters->biased_lock_entry_count_addr()));
4323 }
4324 jcc(Assembler::equal, done);
4326 Label try_revoke_bias;
4327 Label try_rebias;
4329 // At this point we know that the header has the bias pattern and
4330 // that we are not the bias owner in the current epoch. We need to
4331 // figure out more details about the state of the header in order to
4332 // know what operations can be legally performed on the object's
4333 // header.
4335 // If the low three bits in the xor result aren't clear, that means
4336 // the prototype header is no longer biased and we have to revoke
4337 // the bias on this object.
4338 testl(swap_reg, markOopDesc::biased_lock_mask_in_place);
4339 jcc(Assembler::notZero, try_revoke_bias);
4341 // Biasing is still enabled for this data type. See whether the
4342 // epoch of the current bias is still valid, meaning that the epoch
4343 // bits of the mark word are equal to the epoch bits of the
4344 // prototype header. (Note that the prototype header's epoch bits
4345 // only change at a safepoint.) If not, attempt to rebias the object
4346 // toward the current thread. Note that we must be absolutely sure
4347 // that the current epoch is invalid in order to do this because
4348 // otherwise the manipulations it performs on the mark word are
4349 // illegal.
4350 testl(swap_reg, markOopDesc::epoch_mask_in_place);
4351 jcc(Assembler::notZero, try_rebias);
4353 // The epoch of the current bias is still valid but we know nothing
4354 // about the owner; it might be set or it might be clear. Try to
4355 // acquire the bias of the object using an atomic operation. If this
4356 // fails we will go in to the runtime to revoke the object's bias.
4357 // Note that we first construct the presumed unbiased header so we
4358 // don't accidentally blow away another thread's valid bias.
4359 movl(swap_reg, saved_mark_addr);
4360 andl(swap_reg,
4361 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
4362 if (need_tmp_reg) {
4363 push(tmp_reg);
4364 }
4365 get_thread(tmp_reg);
4366 orl(tmp_reg, swap_reg);
4367 if (os::is_MP()) {
4368 lock();
4369 }
4370 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
4371 if (need_tmp_reg) {
4372 pop(tmp_reg);
4373 }
4374 // If the biasing toward our thread failed, this means that
4375 // another thread succeeded in biasing it toward itself and we
4376 // need to revoke that bias. The revocation will occur in the
4377 // interpreter runtime in the slow case.
4378 if (counters != NULL) {
4379 cond_inc32(Assembler::zero,
4380 ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr()));
4381 }
4382 if (slow_case != NULL) {
4383 jcc(Assembler::notZero, *slow_case);
4384 }
4385 jmp(done);
4387 bind(try_rebias);
4388 // At this point we know the epoch has expired, meaning that the
4389 // current "bias owner", if any, is actually invalid. Under these
4390 // circumstances _only_, we are allowed to use the current header's
4391 // value as the comparison value when doing the cas to acquire the
4392 // bias in the current epoch. In other words, we allow transfer of
4393 // the bias from one thread to another directly in this situation.
4394 //
4395 // FIXME: due to a lack of registers we currently blow away the age
4396 // bits in this situation. Should attempt to preserve them.
4397 if (need_tmp_reg) {
4398 push(tmp_reg);
4399 }
4400 get_thread(tmp_reg);
4401 movl(swap_reg, klass_addr);
4402 orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
4403 movl(swap_reg, saved_mark_addr);
4404 if (os::is_MP()) {
4405 lock();
4406 }
4407 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
4408 if (need_tmp_reg) {
4409 pop(tmp_reg);
4410 }
4411 // If the biasing toward our thread failed, then another thread
4412 // succeeded in biasing it toward itself and we need to revoke that
4413 // bias. The revocation will occur in the runtime in the slow case.
4414 if (counters != NULL) {
4415 cond_inc32(Assembler::zero,
4416 ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
4417 }
4418 if (slow_case != NULL) {
4419 jcc(Assembler::notZero, *slow_case);
4420 }
4421 jmp(done);
4423 bind(try_revoke_bias);
4424 // The prototype mark in the klass doesn't have the bias bit set any
4425 // more, indicating that objects of this data type are not supposed
4426 // to be biased any more. We are going to try to reset the mark of
4427 // this object to the prototype value and fall through to the
4428 // CAS-based locking scheme. Note that if our CAS fails, it means
4429 // that another thread raced us for the privilege of revoking the
4430 // bias of this particular object, so it's okay to continue in the
4431 // normal locking code.
4432 //
4433 // FIXME: due to a lack of registers we currently blow away the age
4434 // bits in this situation. Should attempt to preserve them.
4435 movl(swap_reg, saved_mark_addr);
4436 if (need_tmp_reg) {
4437 push(tmp_reg);
4438 }
4439 movl(tmp_reg, klass_addr);
4440 movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
4441 if (os::is_MP()) {
4442 lock();
4443 }
4444 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
4445 if (need_tmp_reg) {
4446 pop(tmp_reg);
4447 }
4448 // Fall through to the normal CAS-based lock, because no matter what
4449 // the result of the above CAS, some thread must have succeeded in
4450 // removing the bias bit from the object's header.
4451 if (counters != NULL) {
4452 cond_inc32(Assembler::zero,
4453 ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
4454 }
4456 bind(cas_label);
4458 return null_check_offset;
4459 }
4460 void MacroAssembler::call_VM_leaf_base(address entry_point,
4461 int number_of_arguments) {
4462 call(RuntimeAddress(entry_point));
4463 increment(rsp, number_of_arguments * wordSize);
4464 }
4466 void MacroAssembler::cmpoop(Address src1, jobject obj) {
4467 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
4468 }
4470 void MacroAssembler::cmpoop(Register src1, jobject obj) {
4471 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
4472 }
4474 void MacroAssembler::extend_sign(Register hi, Register lo) {
4475 // According to Intel Doc. AP-526, "Integer Divide", p.18.
4476 if (VM_Version::is_P6() && hi == rdx && lo == rax) {
4477 cdql();
4478 } else {
4479 movl(hi, lo);
4480 sarl(hi, 31);
4481 }
4482 }
4484 void MacroAssembler::fat_nop() {
4485 // A 5 byte nop that is safe for patching (see patch_verified_entry)
4486 emit_byte(0x26); // es:
4487 emit_byte(0x2e); // cs:
4488 emit_byte(0x64); // fs:
4489 emit_byte(0x65); // gs:
4490 emit_byte(0x90);
4491 }
4493 void MacroAssembler::jC2(Register tmp, Label& L) {
4494 // set parity bit if FPU flag C2 is set (via rax)
4495 save_rax(tmp);
4496 fwait(); fnstsw_ax();
4497 sahf();
4498 restore_rax(tmp);
4499 // branch
4500 jcc(Assembler::parity, L);
4501 }
4503 void MacroAssembler::jnC2(Register tmp, Label& L) {
4504 // set parity bit if FPU flag C2 is set (via rax)
4505 save_rax(tmp);
4506 fwait(); fnstsw_ax();
4507 sahf();
4508 restore_rax(tmp);
4509 // branch
4510 jcc(Assembler::noParity, L);
4511 }
4513 // 32bit can do a case table jump in one instruction but we no longer allow the base
4514 // to be installed in the Address class
4515 void MacroAssembler::jump(ArrayAddress entry) {
4516 jmp(as_Address(entry));
4517 }
4519 // Note: y_lo will be destroyed
4520 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
4521 // Long compare for Java (semantics as described in JVM spec.)
4522 Label high, low, done;
4524 cmpl(x_hi, y_hi);
4525 jcc(Assembler::less, low);
4526 jcc(Assembler::greater, high);
4527 // x_hi is the return register
4528 xorl(x_hi, x_hi);
4529 cmpl(x_lo, y_lo);
4530 jcc(Assembler::below, low);
4531 jcc(Assembler::equal, done);
4533 bind(high);
4534 xorl(x_hi, x_hi);
4535 increment(x_hi);
4536 jmp(done);
4538 bind(low);
4539 xorl(x_hi, x_hi);
4540 decrementl(x_hi);
4542 bind(done);
4543 }
4545 void MacroAssembler::lea(Register dst, AddressLiteral src) {
4546 mov_literal32(dst, (int32_t)src.target(), src.rspec());
4547 }
4549 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
4550 // leal(dst, as_Address(adr));
4551 // see note in movl as to why we must use a move
4552 mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
4553 }
4555 void MacroAssembler::leave() {
4556 mov(rsp, rbp);
4557 pop(rbp);
4558 }
4560 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
4561 // Multiplication of two Java long values stored on the stack
4562 // as illustrated below. Result is in rdx:rax.
4563 //
4564 // rsp ---> [ ?? ] \ \
4565 // .... | y_rsp_offset |
4566 // [ y_lo ] / (in bytes) | x_rsp_offset
4567 // [ y_hi ] | (in bytes)
4568 // .... |
4569 // [ x_lo ] /
4570 // [ x_hi ]
4571 // ....
4572 //
4573 // Basic idea: lo(result) = lo(x_lo * y_lo)
4574 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
4575 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
4576 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
4577 Label quick;
4578 // load x_hi, y_hi and check if quick
4579 // multiplication is possible
4580 movl(rbx, x_hi);
4581 movl(rcx, y_hi);
4582 movl(rax, rbx);
4583 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0
4584 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply
4585 // do full multiplication
4586 // 1st step
4587 mull(y_lo); // x_hi * y_lo
4588 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx,
4589 // 2nd step
4590 movl(rax, x_lo);
4591 mull(rcx); // x_lo * y_hi
4592 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx,
4593 // 3rd step
4594 bind(quick); // note: rbx, = 0 if quick multiply!
4595 movl(rax, x_lo);
4596 mull(y_lo); // x_lo * y_lo
4597 addl(rdx, rbx); // correct hi(x_lo * y_lo)
4598 }
4600 void MacroAssembler::lneg(Register hi, Register lo) {
4601 negl(lo);
4602 adcl(hi, 0);
4603 negl(hi);
4604 }
4606 void MacroAssembler::lshl(Register hi, Register lo) {
4607 // Java shift left long support (semantics as described in JVM spec., p.305)
4608 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
4609 // shift value is in rcx !
4610 assert(hi != rcx, "must not use rcx");
4611 assert(lo != rcx, "must not use rcx");
4612 const Register s = rcx; // shift count
4613 const int n = BitsPerWord;
4614 Label L;
4615 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
4616 cmpl(s, n); // if (s < n)
4617 jcc(Assembler::less, L); // else (s >= n)
4618 movl(hi, lo); // x := x << n
4619 xorl(lo, lo);
4620 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
4621 bind(L); // s (mod n) < n
4622 shldl(hi, lo); // x := x << s
4623 shll(lo);
4624 }
4627 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
4628 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
4629 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
4630 assert(hi != rcx, "must not use rcx");
4631 assert(lo != rcx, "must not use rcx");
4632 const Register s = rcx; // shift count
4633 const int n = BitsPerWord;
4634 Label L;
4635 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
4636 cmpl(s, n); // if (s < n)
4637 jcc(Assembler::less, L); // else (s >= n)
4638 movl(lo, hi); // x := x >> n
4639 if (sign_extension) sarl(hi, 31);
4640 else xorl(hi, hi);
4641 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
4642 bind(L); // s (mod n) < n
4643 shrdl(lo, hi); // x := x >> s
4644 if (sign_extension) sarl(hi);
4645 else shrl(hi);
4646 }
4648 void MacroAssembler::movoop(Register dst, jobject obj) {
4649 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
4650 }
4652 void MacroAssembler::movoop(Address dst, jobject obj) {
4653 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
4654 }
4656 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
4657 if (src.is_lval()) {
4658 mov_literal32(dst, (intptr_t)src.target(), src.rspec());
4659 } else {
4660 movl(dst, as_Address(src));
4661 }
4662 }
4664 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
4665 movl(as_Address(dst), src);
4666 }
4668 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
4669 movl(dst, as_Address(src));
4670 }
4672 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
4673 void MacroAssembler::movptr(Address dst, intptr_t src) {
4674 movl(dst, src);
4675 }
4678 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
4679 movsd(dst, as_Address(src));
4680 }
4682 void MacroAssembler::pop_callee_saved_registers() {
4683 pop(rcx);
4684 pop(rdx);
4685 pop(rdi);
4686 pop(rsi);
4687 }
4689 void MacroAssembler::pop_fTOS() {
4690 fld_d(Address(rsp, 0));
4691 addl(rsp, 2 * wordSize);
4692 }
4694 void MacroAssembler::push_callee_saved_registers() {
4695 push(rsi);
4696 push(rdi);
4697 push(rdx);
4698 push(rcx);
4699 }
4701 void MacroAssembler::push_fTOS() {
4702 subl(rsp, 2 * wordSize);
4703 fstp_d(Address(rsp, 0));
4704 }
4707 void MacroAssembler::pushoop(jobject obj) {
4708 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
4709 }
4712 void MacroAssembler::pushptr(AddressLiteral src) {
4713 if (src.is_lval()) {
4714 push_literal32((int32_t)src.target(), src.rspec());
4715 } else {
4716 pushl(as_Address(src));
4717 }
4718 }
4720 void MacroAssembler::set_word_if_not_zero(Register dst) {
4721 xorl(dst, dst);
4722 set_byte_if_not_zero(dst);
4723 }
4725 static void pass_arg0(MacroAssembler* masm, Register arg) {
4726 masm->push(arg);
4727 }
4729 static void pass_arg1(MacroAssembler* masm, Register arg) {
4730 masm->push(arg);
4731 }
4733 static void pass_arg2(MacroAssembler* masm, Register arg) {
4734 masm->push(arg);
4735 }
4737 static void pass_arg3(MacroAssembler* masm, Register arg) {
4738 masm->push(arg);
4739 }
4741 #ifndef PRODUCT
4742 extern "C" void findpc(intptr_t x);
4743 #endif
4745 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
4746 // In order to get locks to work, we need to fake a in_VM state
4747 JavaThread* thread = JavaThread::current();
4748 JavaThreadState saved_state = thread->thread_state();
4749 thread->set_thread_state(_thread_in_vm);
4750 if (ShowMessageBoxOnError) {
4751 JavaThread* thread = JavaThread::current();
4752 JavaThreadState saved_state = thread->thread_state();
4753 thread->set_thread_state(_thread_in_vm);
4754 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
4755 ttyLocker ttyl;
4756 BytecodeCounter::print();
4757 }
4758 // To see where a verify_oop failed, get $ebx+40/X for this frame.
4759 // This is the value of eip which points to where verify_oop will return.
4760 if (os::message_box(msg, "Execution stopped, print registers?")) {
4761 ttyLocker ttyl;
4762 tty->print_cr("eip = 0x%08x", eip);
4763 #ifndef PRODUCT
4764 tty->cr();
4765 findpc(eip);
4766 tty->cr();
4767 #endif
4768 tty->print_cr("rax, = 0x%08x", rax);
4769 tty->print_cr("rbx, = 0x%08x", rbx);
4770 tty->print_cr("rcx = 0x%08x", rcx);
4771 tty->print_cr("rdx = 0x%08x", rdx);
4772 tty->print_cr("rdi = 0x%08x", rdi);
4773 tty->print_cr("rsi = 0x%08x", rsi);
4774 tty->print_cr("rbp, = 0x%08x", rbp);
4775 tty->print_cr("rsp = 0x%08x", rsp);
4776 BREAKPOINT;
4777 }
4778 } else {
4779 ttyLocker ttyl;
4780 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
4781 assert(false, "DEBUG MESSAGE");
4782 }
4783 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
4784 }
4786 void MacroAssembler::stop(const char* msg) {
4787 ExternalAddress message((address)msg);
4788 // push address of message
4789 pushptr(message.addr());
4790 { Label L; call(L, relocInfo::none); bind(L); } // push eip
4791 pusha(); // push registers
4792 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
4793 hlt();
4794 }
4796 void MacroAssembler::warn(const char* msg) {
4797 push_CPU_state();
4799 ExternalAddress message((address) msg);
4800 // push address of message
4801 pushptr(message.addr());
4803 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
4804 addl(rsp, wordSize); // discard argument
4805 pop_CPU_state();
4806 }
4808 #else // _LP64
4810 // 64 bit versions
4812 Address MacroAssembler::as_Address(AddressLiteral adr) {
4813 // amd64 always does this as a pc-rel
4814 // we can be absolute or disp based on the instruction type
4815 // jmp/call are displacements others are absolute
4816 assert(!adr.is_lval(), "must be rval");
4817 assert(reachable(adr), "must be");
4818 return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
4820 }
4822 Address MacroAssembler::as_Address(ArrayAddress adr) {
4823 AddressLiteral base = adr.base();
4824 lea(rscratch1, base);
4825 Address index = adr.index();
4826 assert(index._disp == 0, "must not have disp"); // maybe it can?
4827 Address array(rscratch1, index._index, index._scale, index._disp);
4828 return array;
4829 }
4831 int MacroAssembler::biased_locking_enter(Register lock_reg,
4832 Register obj_reg,
4833 Register swap_reg,
4834 Register tmp_reg,
4835 bool swap_reg_contains_mark,
4836 Label& done,
4837 Label* slow_case,
4838 BiasedLockingCounters* counters) {
4839 assert(UseBiasedLocking, "why call this otherwise?");
4840 assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
4841 assert(tmp_reg != noreg, "tmp_reg must be supplied");
4842 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
4843 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
4844 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
4845 Address saved_mark_addr(lock_reg, 0);
4847 if (PrintBiasedLockingStatistics && counters == NULL)
4848 counters = BiasedLocking::counters();
4850 // Biased locking
4851 // See whether the lock is currently biased toward our thread and
4852 // whether the epoch is still valid
4853 // Note that the runtime guarantees sufficient alignment of JavaThread
4854 // pointers to allow age to be placed into low bits
4855 // First check to see whether biasing is even enabled for this object
4856 Label cas_label;
4857 int null_check_offset = -1;
4858 if (!swap_reg_contains_mark) {
4859 null_check_offset = offset();
4860 movq(swap_reg, mark_addr);
4861 }
4862 movq(tmp_reg, swap_reg);
4863 andq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
4864 cmpq(tmp_reg, markOopDesc::biased_lock_pattern);
4865 jcc(Assembler::notEqual, cas_label);
4866 // The bias pattern is present in the object's header. Need to check
4867 // whether the bias owner and the epoch are both still current.
4868 load_prototype_header(tmp_reg, obj_reg);
4869 orq(tmp_reg, r15_thread);
4870 xorq(tmp_reg, swap_reg);
4871 andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place));
4872 if (counters != NULL) {
4873 cond_inc32(Assembler::zero,
4874 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
4875 }
4876 jcc(Assembler::equal, done);
4878 Label try_revoke_bias;
4879 Label try_rebias;
4881 // At this point we know that the header has the bias pattern and
4882 // that we are not the bias owner in the current epoch. We need to
4883 // figure out more details about the state of the header in order to
4884 // know what operations can be legally performed on the object's
4885 // header.
4887 // If the low three bits in the xor result aren't clear, that means
4888 // the prototype header is no longer biased and we have to revoke
4889 // the bias on this object.
4890 testq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
4891 jcc(Assembler::notZero, try_revoke_bias);
4893 // Biasing is still enabled for this data type. See whether the
4894 // epoch of the current bias is still valid, meaning that the epoch
4895 // bits of the mark word are equal to the epoch bits of the
4896 // prototype header. (Note that the prototype header's epoch bits
4897 // only change at a safepoint.) If not, attempt to rebias the object
4898 // toward the current thread. Note that we must be absolutely sure
4899 // that the current epoch is invalid in order to do this because
4900 // otherwise the manipulations it performs on the mark word are
4901 // illegal.
4902 testq(tmp_reg, markOopDesc::epoch_mask_in_place);
4903 jcc(Assembler::notZero, try_rebias);
4905 // The epoch of the current bias is still valid but we know nothing
4906 // about the owner; it might be set or it might be clear. Try to
4907 // acquire the bias of the object using an atomic operation. If this
4908 // fails we will go in to the runtime to revoke the object's bias.
4909 // Note that we first construct the presumed unbiased header so we
4910 // don't accidentally blow away another thread's valid bias.
4911 andq(swap_reg,
4912 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
4913 movq(tmp_reg, swap_reg);
4914 orq(tmp_reg, r15_thread);
4915 if (os::is_MP()) {
4916 lock();
4917 }
4918 cmpxchgq(tmp_reg, Address(obj_reg, 0));
4919 // If the biasing toward our thread failed, this means that
4920 // another thread succeeded in biasing it toward itself and we
4921 // need to revoke that bias. The revocation will occur in the
4922 // interpreter runtime in the slow case.
4923 if (counters != NULL) {
4924 cond_inc32(Assembler::zero,
4925 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
4926 }
4927 if (slow_case != NULL) {
4928 jcc(Assembler::notZero, *slow_case);
4929 }
4930 jmp(done);
4932 bind(try_rebias);
4933 // At this point we know the epoch has expired, meaning that the
4934 // current "bias owner", if any, is actually invalid. Under these
4935 // circumstances _only_, we are allowed to use the current header's
4936 // value as the comparison value when doing the cas to acquire the
4937 // bias in the current epoch. In other words, we allow transfer of
4938 // the bias from one thread to another directly in this situation.
4939 //
4940 // FIXME: due to a lack of registers we currently blow away the age
4941 // bits in this situation. Should attempt to preserve them.
4942 load_prototype_header(tmp_reg, obj_reg);
4943 orq(tmp_reg, r15_thread);
4944 if (os::is_MP()) {
4945 lock();
4946 }
4947 cmpxchgq(tmp_reg, Address(obj_reg, 0));
4948 // If the biasing toward our thread failed, then another thread
4949 // succeeded in biasing it toward itself and we need to revoke that
4950 // bias. The revocation will occur in the runtime in the slow case.
4951 if (counters != NULL) {
4952 cond_inc32(Assembler::zero,
4953 ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
4954 }
4955 if (slow_case != NULL) {
4956 jcc(Assembler::notZero, *slow_case);
4957 }
4958 jmp(done);
4960 bind(try_revoke_bias);
4961 // The prototype mark in the klass doesn't have the bias bit set any
4962 // more, indicating that objects of this data type are not supposed
4963 // to be biased any more. We are going to try to reset the mark of
4964 // this object to the prototype value and fall through to the
4965 // CAS-based locking scheme. Note that if our CAS fails, it means
4966 // that another thread raced us for the privilege of revoking the
4967 // bias of this particular object, so it's okay to continue in the
4968 // normal locking code.
4969 //
4970 // FIXME: due to a lack of registers we currently blow away the age
4971 // bits in this situation. Should attempt to preserve them.
4972 load_prototype_header(tmp_reg, obj_reg);
4973 if (os::is_MP()) {
4974 lock();
4975 }
4976 cmpxchgq(tmp_reg, Address(obj_reg, 0));
4977 // Fall through to the normal CAS-based lock, because no matter what
4978 // the result of the above CAS, some thread must have succeeded in
4979 // removing the bias bit from the object's header.
4980 if (counters != NULL) {
4981 cond_inc32(Assembler::zero,
4982 ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
4983 }
4985 bind(cas_label);
4987 return null_check_offset;
4988 }
4990 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
4991 Label L, E;
4993 #ifdef _WIN64
4994 // Windows always allocates space for it's register args
4995 assert(num_args <= 4, "only register arguments supported");
4996 subq(rsp, frame::arg_reg_save_area_bytes);
4997 #endif
4999 // Align stack if necessary
5000 testl(rsp, 15);
5001 jcc(Assembler::zero, L);
5003 subq(rsp, 8);
5004 {
5005 call(RuntimeAddress(entry_point));
5006 }
5007 addq(rsp, 8);
5008 jmp(E);
5010 bind(L);
5011 {
5012 call(RuntimeAddress(entry_point));
5013 }
5015 bind(E);
5017 #ifdef _WIN64
5018 // restore stack pointer
5019 addq(rsp, frame::arg_reg_save_area_bytes);
5020 #endif
5022 }
5024 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
5025 assert(!src2.is_lval(), "should use cmpptr");
5027 if (reachable(src2)) {
5028 cmpq(src1, as_Address(src2));
5029 } else {
5030 lea(rscratch1, src2);
5031 Assembler::cmpq(src1, Address(rscratch1, 0));
5032 }
5033 }
5035 int MacroAssembler::corrected_idivq(Register reg) {
5036 // Full implementation of Java ldiv and lrem; checks for special
5037 // case as described in JVM spec., p.243 & p.271. The function
5038 // returns the (pc) offset of the idivl instruction - may be needed
5039 // for implicit exceptions.
5040 //
5041 // normal case special case
5042 //
5043 // input : rax: dividend min_long
5044 // reg: divisor (may not be eax/edx) -1
5045 //
5046 // output: rax: quotient (= rax idiv reg) min_long
5047 // rdx: remainder (= rax irem reg) 0
5048 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
5049 static const int64_t min_long = 0x8000000000000000;
5050 Label normal_case, special_case;
5052 // check for special case
5053 cmp64(rax, ExternalAddress((address) &min_long));
5054 jcc(Assembler::notEqual, normal_case);
5055 xorl(rdx, rdx); // prepare rdx for possible special case (where
5056 // remainder = 0)
5057 cmpq(reg, -1);
5058 jcc(Assembler::equal, special_case);
5060 // handle normal case
5061 bind(normal_case);
5062 cdqq();
5063 int idivq_offset = offset();
5064 idivq(reg);
5066 // normal and special case exit
5067 bind(special_case);
5069 return idivq_offset;
5070 }
5072 void MacroAssembler::decrementq(Register reg, int value) {
5073 if (value == min_jint) { subq(reg, value); return; }
5074 if (value < 0) { incrementq(reg, -value); return; }
5075 if (value == 0) { ; return; }
5076 if (value == 1 && UseIncDec) { decq(reg) ; return; }
5077 /* else */ { subq(reg, value) ; return; }
5078 }
5080 void MacroAssembler::decrementq(Address dst, int value) {
5081 if (value == min_jint) { subq(dst, value); return; }
5082 if (value < 0) { incrementq(dst, -value); return; }
5083 if (value == 0) { ; return; }
5084 if (value == 1 && UseIncDec) { decq(dst) ; return; }
5085 /* else */ { subq(dst, value) ; return; }
5086 }
5088 void MacroAssembler::fat_nop() {
5089 // A 5 byte nop that is safe for patching (see patch_verified_entry)
5090 // Recommened sequence from 'Software Optimization Guide for the AMD
5091 // Hammer Processor'
5092 emit_byte(0x66);
5093 emit_byte(0x66);
5094 emit_byte(0x90);
5095 emit_byte(0x66);
5096 emit_byte(0x90);
5097 }
5099 void MacroAssembler::incrementq(Register reg, int value) {
5100 if (value == min_jint) { addq(reg, value); return; }
5101 if (value < 0) { decrementq(reg, -value); return; }
5102 if (value == 0) { ; return; }
5103 if (value == 1 && UseIncDec) { incq(reg) ; return; }
5104 /* else */ { addq(reg, value) ; return; }
5105 }
5107 void MacroAssembler::incrementq(Address dst, int value) {
5108 if (value == min_jint) { addq(dst, value); return; }
5109 if (value < 0) { decrementq(dst, -value); return; }
5110 if (value == 0) { ; return; }
5111 if (value == 1 && UseIncDec) { incq(dst) ; return; }
5112 /* else */ { addq(dst, value) ; return; }
5113 }
5115 // 32bit can do a case table jump in one instruction but we no longer allow the base
5116 // to be installed in the Address class
5117 void MacroAssembler::jump(ArrayAddress entry) {
5118 lea(rscratch1, entry.base());
5119 Address dispatch = entry.index();
5120 assert(dispatch._base == noreg, "must be");
5121 dispatch._base = rscratch1;
5122 jmp(dispatch);
5123 }
5125 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
5126 ShouldNotReachHere(); // 64bit doesn't use two regs
5127 cmpq(x_lo, y_lo);
5128 }
5130 void MacroAssembler::lea(Register dst, AddressLiteral src) {
5131 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
5132 }
5134 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
5135 mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec());
5136 movptr(dst, rscratch1);
5137 }
5139 void MacroAssembler::leave() {
5140 // %%% is this really better? Why not on 32bit too?
5141 emit_byte(0xC9); // LEAVE
5142 }
5144 void MacroAssembler::lneg(Register hi, Register lo) {
5145 ShouldNotReachHere(); // 64bit doesn't use two regs
5146 negq(lo);
5147 }
5149 void MacroAssembler::movoop(Register dst, jobject obj) {
5150 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
5151 }
5153 void MacroAssembler::movoop(Address dst, jobject obj) {
5154 mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
5155 movq(dst, rscratch1);
5156 }
5158 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
5159 if (src.is_lval()) {
5160 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
5161 } else {
5162 if (reachable(src)) {
5163 movq(dst, as_Address(src));
5164 } else {
5165 lea(rscratch1, src);
5166 movq(dst, Address(rscratch1,0));
5167 }
5168 }
5169 }
5171 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
5172 movq(as_Address(dst), src);
5173 }
5175 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
5176 movq(dst, as_Address(src));
5177 }
5179 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
5180 void MacroAssembler::movptr(Address dst, intptr_t src) {
5181 mov64(rscratch1, src);
5182 movq(dst, rscratch1);
5183 }
5185 // These are mostly for initializing NULL
5186 void MacroAssembler::movptr(Address dst, int32_t src) {
5187 movslq(dst, src);
5188 }
5190 void MacroAssembler::movptr(Register dst, int32_t src) {
5191 mov64(dst, (intptr_t)src);
5192 }
5194 void MacroAssembler::pushoop(jobject obj) {
5195 movoop(rscratch1, obj);
5196 push(rscratch1);
5197 }
5199 void MacroAssembler::pushptr(AddressLiteral src) {
5200 lea(rscratch1, src);
5201 if (src.is_lval()) {
5202 push(rscratch1);
5203 } else {
5204 pushq(Address(rscratch1, 0));
5205 }
5206 }
5208 void MacroAssembler::reset_last_Java_frame(bool clear_fp,
5209 bool clear_pc) {
5210 // we must set sp to zero to clear frame
5211 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), (int32_t)NULL_WORD);
5212 // must clear fp, so that compiled frames are not confused; it is
5213 // possible that we need it only for debugging
5214 if (clear_fp) {
5215 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), (int32_t)NULL_WORD);
5216 }
5218 if (clear_pc) {
5219 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), (int32_t)NULL_WORD);
5220 }
5221 }
5223 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
5224 Register last_java_fp,
5225 address last_java_pc) {
5226 // determine last_java_sp register
5227 if (!last_java_sp->is_valid()) {
5228 last_java_sp = rsp;
5229 }
5231 // last_java_fp is optional
5232 if (last_java_fp->is_valid()) {
5233 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
5234 last_java_fp);
5235 }
5237 // last_java_pc is optional
5238 if (last_java_pc != NULL) {
5239 Address java_pc(r15_thread,
5240 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
5241 lea(rscratch1, InternalAddress(last_java_pc));
5242 movptr(java_pc, rscratch1);
5243 }
5245 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
5246 }
5248 static void pass_arg0(MacroAssembler* masm, Register arg) {
5249 if (c_rarg0 != arg ) {
5250 masm->mov(c_rarg0, arg);
5251 }
5252 }
5254 static void pass_arg1(MacroAssembler* masm, Register arg) {
5255 if (c_rarg1 != arg ) {
5256 masm->mov(c_rarg1, arg);
5257 }
5258 }
5260 static void pass_arg2(MacroAssembler* masm, Register arg) {
5261 if (c_rarg2 != arg ) {
5262 masm->mov(c_rarg2, arg);
5263 }
5264 }
5266 static void pass_arg3(MacroAssembler* masm, Register arg) {
5267 if (c_rarg3 != arg ) {
5268 masm->mov(c_rarg3, arg);
5269 }
5270 }
5272 void MacroAssembler::stop(const char* msg) {
5273 address rip = pc();
5274 pusha(); // get regs on stack
5275 lea(c_rarg0, ExternalAddress((address) msg));
5276 lea(c_rarg1, InternalAddress(rip));
5277 movq(c_rarg2, rsp); // pass pointer to regs array
5278 andq(rsp, -16); // align stack as required by ABI
5279 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
5280 hlt();
5281 }
5283 void MacroAssembler::warn(const char* msg) {
5284 push(r12);
5285 movq(r12, rsp);
5286 andq(rsp, -16); // align stack as required by push_CPU_state and call
5288 push_CPU_state(); // keeps alignment at 16 bytes
5289 lea(c_rarg0, ExternalAddress((address) msg));
5290 call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
5291 pop_CPU_state();
5293 movq(rsp, r12);
5294 pop(r12);
5295 }
5297 #ifndef PRODUCT
5298 extern "C" void findpc(intptr_t x);
5299 #endif
5301 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
5302 // In order to get locks to work, we need to fake a in_VM state
5303 if (ShowMessageBoxOnError ) {
5304 JavaThread* thread = JavaThread::current();
5305 JavaThreadState saved_state = thread->thread_state();
5306 thread->set_thread_state(_thread_in_vm);
5307 #ifndef PRODUCT
5308 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
5309 ttyLocker ttyl;
5310 BytecodeCounter::print();
5311 }
5312 #endif
5313 // To see where a verify_oop failed, get $ebx+40/X for this frame.
5314 // XXX correct this offset for amd64
5315 // This is the value of eip which points to where verify_oop will return.
5316 if (os::message_box(msg, "Execution stopped, print registers?")) {
5317 ttyLocker ttyl;
5318 tty->print_cr("rip = 0x%016lx", pc);
5319 #ifndef PRODUCT
5320 tty->cr();
5321 findpc(pc);
5322 tty->cr();
5323 #endif
5324 tty->print_cr("rax = 0x%016lx", regs[15]);
5325 tty->print_cr("rbx = 0x%016lx", regs[12]);
5326 tty->print_cr("rcx = 0x%016lx", regs[14]);
5327 tty->print_cr("rdx = 0x%016lx", regs[13]);
5328 tty->print_cr("rdi = 0x%016lx", regs[8]);
5329 tty->print_cr("rsi = 0x%016lx", regs[9]);
5330 tty->print_cr("rbp = 0x%016lx", regs[10]);
5331 tty->print_cr("rsp = 0x%016lx", regs[11]);
5332 tty->print_cr("r8 = 0x%016lx", regs[7]);
5333 tty->print_cr("r9 = 0x%016lx", regs[6]);
5334 tty->print_cr("r10 = 0x%016lx", regs[5]);
5335 tty->print_cr("r11 = 0x%016lx", regs[4]);
5336 tty->print_cr("r12 = 0x%016lx", regs[3]);
5337 tty->print_cr("r13 = 0x%016lx", regs[2]);
5338 tty->print_cr("r14 = 0x%016lx", regs[1]);
5339 tty->print_cr("r15 = 0x%016lx", regs[0]);
5340 BREAKPOINT;
5341 }
5342 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
5343 } else {
5344 ttyLocker ttyl;
5345 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
5346 msg);
5347 }
5348 }
5350 #endif // _LP64
5352 // Now versions that are common to 32/64 bit
5354 void MacroAssembler::addptr(Register dst, int32_t imm32) {
5355 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
5356 }
5358 void MacroAssembler::addptr(Register dst, Register src) {
5359 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
5360 }
5362 void MacroAssembler::addptr(Address dst, Register src) {
5363 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
5364 }
5366 void MacroAssembler::align(int modulus) {
5367 if (offset() % modulus != 0) {
5368 nop(modulus - (offset() % modulus));
5369 }
5370 }
5372 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
5373 andpd(dst, as_Address(src));
5374 }
5376 void MacroAssembler::andptr(Register dst, int32_t imm32) {
5377 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
5378 }
5380 void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
5381 pushf();
5382 if (os::is_MP())
5383 lock();
5384 incrementl(counter_addr);
5385 popf();
5386 }
5388 // Writes to stack successive pages until offset reached to check for
5389 // stack overflow + shadow pages. This clobbers tmp.
5390 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
5391 movptr(tmp, rsp);
5392 // Bang stack for total size given plus shadow page size.
5393 // Bang one page at a time because large size can bang beyond yellow and
5394 // red zones.
5395 Label loop;
5396 bind(loop);
5397 movl(Address(tmp, (-os::vm_page_size())), size );
5398 subptr(tmp, os::vm_page_size());
5399 subl(size, os::vm_page_size());
5400 jcc(Assembler::greater, loop);
5402 // Bang down shadow pages too.
5403 // The -1 because we already subtracted 1 page.
5404 for (int i = 0; i< StackShadowPages-1; i++) {
5405 // this could be any sized move but this is can be a debugging crumb
5406 // so the bigger the better.
5407 movptr(Address(tmp, (-i*os::vm_page_size())), size );
5408 }
5409 }
5411 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
5412 assert(UseBiasedLocking, "why call this otherwise?");
5414 // Check for biased locking unlock case, which is a no-op
5415 // Note: we do not have to check the thread ID for two reasons.
5416 // First, the interpreter checks for IllegalMonitorStateException at
5417 // a higher level. Second, if the bias was revoked while we held the
5418 // lock, the object could not be rebiased toward another thread, so
5419 // the bias bit would be clear.
5420 movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
5421 andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
5422 cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
5423 jcc(Assembler::equal, done);
5424 }
5426 void MacroAssembler::c2bool(Register x) {
5427 // implements x == 0 ? 0 : 1
5428 // note: must only look at least-significant byte of x
5429 // since C-style booleans are stored in one byte
5430 // only! (was bug)
5431 andl(x, 0xFF);
5432 setb(Assembler::notZero, x);
5433 }
5435 // Wouldn't need if AddressLiteral version had new name
5436 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
5437 Assembler::call(L, rtype);
5438 }
5440 void MacroAssembler::call(Register entry) {
5441 Assembler::call(entry);
5442 }
5444 void MacroAssembler::call(AddressLiteral entry) {
5445 if (reachable(entry)) {
5446 Assembler::call_literal(entry.target(), entry.rspec());
5447 } else {
5448 lea(rscratch1, entry);
5449 Assembler::call(rscratch1);
5450 }
5451 }
5453 // Implementation of call_VM versions
5455 void MacroAssembler::call_VM(Register oop_result,
5456 address entry_point,
5457 bool check_exceptions) {
5458 Label C, E;
5459 call(C, relocInfo::none);
5460 jmp(E);
5462 bind(C);
5463 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
5464 ret(0);
5466 bind(E);
5467 }
5469 void MacroAssembler::call_VM(Register oop_result,
5470 address entry_point,
5471 Register arg_1,
5472 bool check_exceptions) {
5473 Label C, E;
5474 call(C, relocInfo::none);
5475 jmp(E);
5477 bind(C);
5478 pass_arg1(this, arg_1);
5479 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
5480 ret(0);
5482 bind(E);
5483 }
5485 void MacroAssembler::call_VM(Register oop_result,
5486 address entry_point,
5487 Register arg_1,
5488 Register arg_2,
5489 bool check_exceptions) {
5490 Label C, E;
5491 call(C, relocInfo::none);
5492 jmp(E);
5494 bind(C);
5496 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
5498 pass_arg2(this, arg_2);
5499 pass_arg1(this, arg_1);
5500 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
5501 ret(0);
5503 bind(E);
5504 }
5506 void MacroAssembler::call_VM(Register oop_result,
5507 address entry_point,
5508 Register arg_1,
5509 Register arg_2,
5510 Register arg_3,
5511 bool check_exceptions) {
5512 Label C, E;
5513 call(C, relocInfo::none);
5514 jmp(E);
5516 bind(C);
5518 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
5519 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
5520 pass_arg3(this, arg_3);
5522 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
5523 pass_arg2(this, arg_2);
5525 pass_arg1(this, arg_1);
5526 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
5527 ret(0);
5529 bind(E);
5530 }
5532 void MacroAssembler::call_VM(Register oop_result,
5533 Register last_java_sp,
5534 address entry_point,
5535 int number_of_arguments,
5536 bool check_exceptions) {
5537 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
5538 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
5539 }
5541 void MacroAssembler::call_VM(Register oop_result,
5542 Register last_java_sp,
5543 address entry_point,
5544 Register arg_1,
5545 bool check_exceptions) {
5546 pass_arg1(this, arg_1);
5547 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
5548 }
5550 void MacroAssembler::call_VM(Register oop_result,
5551 Register last_java_sp,
5552 address entry_point,
5553 Register arg_1,
5554 Register arg_2,
5555 bool check_exceptions) {
5557 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
5558 pass_arg2(this, arg_2);
5559 pass_arg1(this, arg_1);
5560 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
5561 }
5563 void MacroAssembler::call_VM(Register oop_result,
5564 Register last_java_sp,
5565 address entry_point,
5566 Register arg_1,
5567 Register arg_2,
5568 Register arg_3,
5569 bool check_exceptions) {
5570 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
5571 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
5572 pass_arg3(this, arg_3);
5573 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
5574 pass_arg2(this, arg_2);
5575 pass_arg1(this, arg_1);
5576 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
5577 }
5579 void MacroAssembler::call_VM_base(Register oop_result,
5580 Register java_thread,
5581 Register last_java_sp,
5582 address entry_point,
5583 int number_of_arguments,
5584 bool check_exceptions) {
5585 // determine java_thread register
5586 if (!java_thread->is_valid()) {
5587 #ifdef _LP64
5588 java_thread = r15_thread;
5589 #else
5590 java_thread = rdi;
5591 get_thread(java_thread);
5592 #endif // LP64
5593 }
5594 // determine last_java_sp register
5595 if (!last_java_sp->is_valid()) {
5596 last_java_sp = rsp;
5597 }
5598 // debugging support
5599 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
5600 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
5601 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
5602 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
5604 // push java thread (becomes first argument of C function)
5606 NOT_LP64(push(java_thread); number_of_arguments++);
5607 LP64_ONLY(mov(c_rarg0, r15_thread));
5609 // set last Java frame before call
5610 assert(last_java_sp != rbp, "can't use ebp/rbp");
5612 // Only interpreter should have to set fp
5613 set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
5615 // do the call, remove parameters
5616 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
5618 // restore the thread (cannot use the pushed argument since arguments
5619 // may be overwritten by C code generated by an optimizing compiler);
5620 // however can use the register value directly if it is callee saved.
5621 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
5622 // rdi & rsi (also r15) are callee saved -> nothing to do
5623 #ifdef ASSERT
5624 guarantee(java_thread != rax, "change this code");
5625 push(rax);
5626 { Label L;
5627 get_thread(rax);
5628 cmpptr(java_thread, rax);
5629 jcc(Assembler::equal, L);
5630 stop("MacroAssembler::call_VM_base: rdi not callee saved?");
5631 bind(L);
5632 }
5633 pop(rax);
5634 #endif
5635 } else {
5636 get_thread(java_thread);
5637 }
5638 // reset last Java frame
5639 // Only interpreter should have to clear fp
5640 reset_last_Java_frame(java_thread, true, false);
5642 #ifndef CC_INTERP
5643 // C++ interp handles this in the interpreter
5644 check_and_handle_popframe(java_thread);
5645 check_and_handle_earlyret(java_thread);
5646 #endif /* CC_INTERP */
5648 if (check_exceptions) {
5649 // check for pending exceptions (java_thread is set upon return)
5650 cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
5651 #ifndef _LP64
5652 jump_cc(Assembler::notEqual,
5653 RuntimeAddress(StubRoutines::forward_exception_entry()));
5654 #else
5655 // This used to conditionally jump to forward_exception however it is
5656 // possible if we relocate that the branch will not reach. So we must jump
5657 // around so we can always reach
5659 Label ok;
5660 jcc(Assembler::equal, ok);
5661 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
5662 bind(ok);
5663 #endif // LP64
5664 }
5666 // get oop result if there is one and reset the value in the thread
5667 if (oop_result->is_valid()) {
5668 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
5669 movptr(Address(java_thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
5670 verify_oop(oop_result, "broken oop in call_VM_base");
5671 }
5672 }
5674 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
5676 // Calculate the value for last_Java_sp
5677 // somewhat subtle. call_VM does an intermediate call
5678 // which places a return address on the stack just under the
5679 // stack pointer as the user finsihed with it. This allows
5680 // use to retrieve last_Java_pc from last_Java_sp[-1].
5681 // On 32bit we then have to push additional args on the stack to accomplish
5682 // the actual requested call. On 64bit call_VM only can use register args
5683 // so the only extra space is the return address that call_VM created.
5684 // This hopefully explains the calculations here.
5686 #ifdef _LP64
5687 // We've pushed one address, correct last_Java_sp
5688 lea(rax, Address(rsp, wordSize));
5689 #else
5690 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
5691 #endif // LP64
5693 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
5695 }
5697 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
5698 call_VM_leaf_base(entry_point, number_of_arguments);
5699 }
5701 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
5702 pass_arg0(this, arg_0);
5703 call_VM_leaf(entry_point, 1);
5704 }
5706 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
5708 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
5709 pass_arg1(this, arg_1);
5710 pass_arg0(this, arg_0);
5711 call_VM_leaf(entry_point, 2);
5712 }
5714 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
5715 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
5716 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
5717 pass_arg2(this, arg_2);
5718 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
5719 pass_arg1(this, arg_1);
5720 pass_arg0(this, arg_0);
5721 call_VM_leaf(entry_point, 3);
5722 }
5724 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
5725 }
5727 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
5728 }
5730 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
5731 if (reachable(src1)) {
5732 cmpl(as_Address(src1), imm);
5733 } else {
5734 lea(rscratch1, src1);
5735 cmpl(Address(rscratch1, 0), imm);
5736 }
5737 }
5739 void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
5740 assert(!src2.is_lval(), "use cmpptr");
5741 if (reachable(src2)) {
5742 cmpl(src1, as_Address(src2));
5743 } else {
5744 lea(rscratch1, src2);
5745 cmpl(src1, Address(rscratch1, 0));
5746 }
5747 }
5749 void MacroAssembler::cmp32(Register src1, int32_t imm) {
5750 Assembler::cmpl(src1, imm);
5751 }
5753 void MacroAssembler::cmp32(Register src1, Address src2) {
5754 Assembler::cmpl(src1, src2);
5755 }
5757 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
5758 ucomisd(opr1, opr2);
5760 Label L;
5761 if (unordered_is_less) {
5762 movl(dst, -1);
5763 jcc(Assembler::parity, L);
5764 jcc(Assembler::below , L);
5765 movl(dst, 0);
5766 jcc(Assembler::equal , L);
5767 increment(dst);
5768 } else { // unordered is greater
5769 movl(dst, 1);
5770 jcc(Assembler::parity, L);
5771 jcc(Assembler::above , L);
5772 movl(dst, 0);
5773 jcc(Assembler::equal , L);
5774 decrementl(dst);
5775 }
5776 bind(L);
5777 }
5779 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
5780 ucomiss(opr1, opr2);
5782 Label L;
5783 if (unordered_is_less) {
5784 movl(dst, -1);
5785 jcc(Assembler::parity, L);
5786 jcc(Assembler::below , L);
5787 movl(dst, 0);
5788 jcc(Assembler::equal , L);
5789 increment(dst);
5790 } else { // unordered is greater
5791 movl(dst, 1);
5792 jcc(Assembler::parity, L);
5793 jcc(Assembler::above , L);
5794 movl(dst, 0);
5795 jcc(Assembler::equal , L);
5796 decrementl(dst);
5797 }
5798 bind(L);
5799 }
5802 void MacroAssembler::cmp8(AddressLiteral src1, int imm) {
5803 if (reachable(src1)) {
5804 cmpb(as_Address(src1), imm);
5805 } else {
5806 lea(rscratch1, src1);
5807 cmpb(Address(rscratch1, 0), imm);
5808 }
5809 }
5811 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
5812 #ifdef _LP64
5813 if (src2.is_lval()) {
5814 movptr(rscratch1, src2);
5815 Assembler::cmpq(src1, rscratch1);
5816 } else if (reachable(src2)) {
5817 cmpq(src1, as_Address(src2));
5818 } else {
5819 lea(rscratch1, src2);
5820 Assembler::cmpq(src1, Address(rscratch1, 0));
5821 }
5822 #else
5823 if (src2.is_lval()) {
5824 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
5825 } else {
5826 cmpl(src1, as_Address(src2));
5827 }
5828 #endif // _LP64
5829 }
5831 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
5832 assert(src2.is_lval(), "not a mem-mem compare");
5833 #ifdef _LP64
5834 // moves src2's literal address
5835 movptr(rscratch1, src2);
5836 Assembler::cmpq(src1, rscratch1);
5837 #else
5838 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
5839 #endif // _LP64
5840 }
5842 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
5843 if (reachable(adr)) {
5844 if (os::is_MP())
5845 lock();
5846 cmpxchgptr(reg, as_Address(adr));
5847 } else {
5848 lea(rscratch1, adr);
5849 if (os::is_MP())
5850 lock();
5851 cmpxchgptr(reg, Address(rscratch1, 0));
5852 }
5853 }
5855 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
5856 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
5857 }
5859 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
5860 comisd(dst, as_Address(src));
5861 }
5863 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
5864 comiss(dst, as_Address(src));
5865 }
5868 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
5869 Condition negated_cond = negate_condition(cond);
5870 Label L;
5871 jcc(negated_cond, L);
5872 atomic_incl(counter_addr);
5873 bind(L);
5874 }
5876 int MacroAssembler::corrected_idivl(Register reg) {
5877 // Full implementation of Java idiv and irem; checks for
5878 // special case as described in JVM spec., p.243 & p.271.
5879 // The function returns the (pc) offset of the idivl
5880 // instruction - may be needed for implicit exceptions.
5881 //
5882 // normal case special case
5883 //
5884 // input : rax,: dividend min_int
5885 // reg: divisor (may not be rax,/rdx) -1
5886 //
5887 // output: rax,: quotient (= rax, idiv reg) min_int
5888 // rdx: remainder (= rax, irem reg) 0
5889 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
5890 const int min_int = 0x80000000;
5891 Label normal_case, special_case;
5893 // check for special case
5894 cmpl(rax, min_int);
5895 jcc(Assembler::notEqual, normal_case);
5896 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
5897 cmpl(reg, -1);
5898 jcc(Assembler::equal, special_case);
5900 // handle normal case
5901 bind(normal_case);
5902 cdql();
5903 int idivl_offset = offset();
5904 idivl(reg);
5906 // normal and special case exit
5907 bind(special_case);
5909 return idivl_offset;
5910 }
5914 void MacroAssembler::decrementl(Register reg, int value) {
5915 if (value == min_jint) {subl(reg, value) ; return; }
5916 if (value < 0) { incrementl(reg, -value); return; }
5917 if (value == 0) { ; return; }
5918 if (value == 1 && UseIncDec) { decl(reg) ; return; }
5919 /* else */ { subl(reg, value) ; return; }
5920 }
5922 void MacroAssembler::decrementl(Address dst, int value) {
5923 if (value == min_jint) {subl(dst, value) ; return; }
5924 if (value < 0) { incrementl(dst, -value); return; }
5925 if (value == 0) { ; return; }
5926 if (value == 1 && UseIncDec) { decl(dst) ; return; }
5927 /* else */ { subl(dst, value) ; return; }
5928 }
5930 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
5931 assert (shift_value > 0, "illegal shift value");
5932 Label _is_positive;
5933 testl (reg, reg);
5934 jcc (Assembler::positive, _is_positive);
5935 int offset = (1 << shift_value) - 1 ;
5937 if (offset == 1) {
5938 incrementl(reg);
5939 } else {
5940 addl(reg, offset);
5941 }
5943 bind (_is_positive);
5944 sarl(reg, shift_value);
5945 }
5947 // !defined(COMPILER2) is because of stupid core builds
5948 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
5949 void MacroAssembler::empty_FPU_stack() {
5950 if (VM_Version::supports_mmx()) {
5951 emms();
5952 } else {
5953 for (int i = 8; i-- > 0; ) ffree(i);
5954 }
5955 }
5956 #endif // !LP64 || C1 || !C2
5959 // Defines obj, preserves var_size_in_bytes
5960 void MacroAssembler::eden_allocate(Register obj,
5961 Register var_size_in_bytes,
5962 int con_size_in_bytes,
5963 Register t1,
5964 Label& slow_case) {
5965 assert(obj == rax, "obj must be in rax, for cmpxchg");
5966 assert_different_registers(obj, var_size_in_bytes, t1);
5967 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
5968 jmp(slow_case);
5969 } else {
5970 Register end = t1;
5971 Label retry;
5972 bind(retry);
5973 ExternalAddress heap_top((address) Universe::heap()->top_addr());
5974 movptr(obj, heap_top);
5975 if (var_size_in_bytes == noreg) {
5976 lea(end, Address(obj, con_size_in_bytes));
5977 } else {
5978 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
5979 }
5980 // if end < obj then we wrapped around => object too long => slow case
5981 cmpptr(end, obj);
5982 jcc(Assembler::below, slow_case);
5983 cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
5984 jcc(Assembler::above, slow_case);
5985 // Compare obj with the top addr, and if still equal, store the new top addr in
5986 // end at the address of the top addr pointer. Sets ZF if was equal, and clears
5987 // it otherwise. Use lock prefix for atomicity on MPs.
5988 locked_cmpxchgptr(end, heap_top);
5989 jcc(Assembler::notEqual, retry);
5990 }
5991 }
5993 void MacroAssembler::enter() {
5994 push(rbp);
5995 mov(rbp, rsp);
5996 }
5998 void MacroAssembler::fcmp(Register tmp) {
5999 fcmp(tmp, 1, true, true);
6000 }
6002 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
6003 assert(!pop_right || pop_left, "usage error");
6004 if (VM_Version::supports_cmov()) {
6005 assert(tmp == noreg, "unneeded temp");
6006 if (pop_left) {
6007 fucomip(index);
6008 } else {
6009 fucomi(index);
6010 }
6011 if (pop_right) {
6012 fpop();
6013 }
6014 } else {
6015 assert(tmp != noreg, "need temp");
6016 if (pop_left) {
6017 if (pop_right) {
6018 fcompp();
6019 } else {
6020 fcomp(index);
6021 }
6022 } else {
6023 fcom(index);
6024 }
6025 // convert FPU condition into eflags condition via rax,
6026 save_rax(tmp);
6027 fwait(); fnstsw_ax();
6028 sahf();
6029 restore_rax(tmp);
6030 }
6031 // condition codes set as follows:
6032 //
6033 // CF (corresponds to C0) if x < y
6034 // PF (corresponds to C2) if unordered
6035 // ZF (corresponds to C3) if x = y
6036 }
6038 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
6039 fcmp2int(dst, unordered_is_less, 1, true, true);
6040 }
6042 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
6043 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
6044 Label L;
6045 if (unordered_is_less) {
6046 movl(dst, -1);
6047 jcc(Assembler::parity, L);
6048 jcc(Assembler::below , L);
6049 movl(dst, 0);
6050 jcc(Assembler::equal , L);
6051 increment(dst);
6052 } else { // unordered is greater
6053 movl(dst, 1);
6054 jcc(Assembler::parity, L);
6055 jcc(Assembler::above , L);
6056 movl(dst, 0);
6057 jcc(Assembler::equal , L);
6058 decrementl(dst);
6059 }
6060 bind(L);
6061 }
6063 void MacroAssembler::fld_d(AddressLiteral src) {
6064 fld_d(as_Address(src));
6065 }
6067 void MacroAssembler::fld_s(AddressLiteral src) {
6068 fld_s(as_Address(src));
6069 }
6071 void MacroAssembler::fld_x(AddressLiteral src) {
6072 Assembler::fld_x(as_Address(src));
6073 }
6075 void MacroAssembler::fldcw(AddressLiteral src) {
6076 Assembler::fldcw(as_Address(src));
6077 }
6079 void MacroAssembler::fpop() {
6080 ffree();
6081 fincstp();
6082 }
6084 void MacroAssembler::fremr(Register tmp) {
6085 save_rax(tmp);
6086 { Label L;
6087 bind(L);
6088 fprem();
6089 fwait(); fnstsw_ax();
6090 #ifdef _LP64
6091 testl(rax, 0x400);
6092 jcc(Assembler::notEqual, L);
6093 #else
6094 sahf();
6095 jcc(Assembler::parity, L);
6096 #endif // _LP64
6097 }
6098 restore_rax(tmp);
6099 // Result is in ST0.
6100 // Note: fxch & fpop to get rid of ST1
6101 // (otherwise FPU stack could overflow eventually)
6102 fxch(1);
6103 fpop();
6104 }
6107 void MacroAssembler::incrementl(AddressLiteral dst) {
6108 if (reachable(dst)) {
6109 incrementl(as_Address(dst));
6110 } else {
6111 lea(rscratch1, dst);
6112 incrementl(Address(rscratch1, 0));
6113 }
6114 }
6116 void MacroAssembler::incrementl(ArrayAddress dst) {
6117 incrementl(as_Address(dst));
6118 }
6120 void MacroAssembler::incrementl(Register reg, int value) {
6121 if (value == min_jint) {addl(reg, value) ; return; }
6122 if (value < 0) { decrementl(reg, -value); return; }
6123 if (value == 0) { ; return; }
6124 if (value == 1 && UseIncDec) { incl(reg) ; return; }
6125 /* else */ { addl(reg, value) ; return; }
6126 }
6128 void MacroAssembler::incrementl(Address dst, int value) {
6129 if (value == min_jint) {addl(dst, value) ; return; }
6130 if (value < 0) { decrementl(dst, -value); return; }
6131 if (value == 0) { ; return; }
6132 if (value == 1 && UseIncDec) { incl(dst) ; return; }
6133 /* else */ { addl(dst, value) ; return; }
6134 }
6136 void MacroAssembler::jump(AddressLiteral dst) {
6137 if (reachable(dst)) {
6138 jmp_literal(dst.target(), dst.rspec());
6139 } else {
6140 lea(rscratch1, dst);
6141 jmp(rscratch1);
6142 }
6143 }
6145 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
6146 if (reachable(dst)) {
6147 InstructionMark im(this);
6148 relocate(dst.reloc());
6149 const int short_size = 2;
6150 const int long_size = 6;
6151 int offs = (intptr_t)dst.target() - ((intptr_t)_code_pos);
6152 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
6153 // 0111 tttn #8-bit disp
6154 emit_byte(0x70 | cc);
6155 emit_byte((offs - short_size) & 0xFF);
6156 } else {
6157 // 0000 1111 1000 tttn #32-bit disp
6158 emit_byte(0x0F);
6159 emit_byte(0x80 | cc);
6160 emit_long(offs - long_size);
6161 }
6162 } else {
6163 #ifdef ASSERT
6164 warning("reversing conditional branch");
6165 #endif /* ASSERT */
6166 Label skip;
6167 jccb(reverse[cc], skip);
6168 lea(rscratch1, dst);
6169 Assembler::jmp(rscratch1);
6170 bind(skip);
6171 }
6172 }
6174 void MacroAssembler::ldmxcsr(AddressLiteral src) {
6175 if (reachable(src)) {
6176 Assembler::ldmxcsr(as_Address(src));
6177 } else {
6178 lea(rscratch1, src);
6179 Assembler::ldmxcsr(Address(rscratch1, 0));
6180 }
6181 }
6183 int MacroAssembler::load_signed_byte(Register dst, Address src) {
6184 int off;
6185 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
6186 off = offset();
6187 movsbl(dst, src); // movsxb
6188 } else {
6189 off = load_unsigned_byte(dst, src);
6190 shll(dst, 24);
6191 sarl(dst, 24);
6192 }
6193 return off;
6194 }
6196 // word => int32 which seems bad for 64bit
6197 int MacroAssembler::load_signed_word(Register dst, Address src) {
6198 int off;
6199 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
6200 // This is dubious to me since it seems safe to do a signed 16 => 64 bit
6201 // version but this is what 64bit has always done. This seems to imply
6202 // that users are only using 32bits worth.
6203 off = offset();
6204 movswl(dst, src); // movsxw
6205 } else {
6206 off = load_unsigned_word(dst, src);
6207 shll(dst, 16);
6208 sarl(dst, 16);
6209 }
6210 return off;
6211 }
6213 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
6214 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
6215 // and "3.9 Partial Register Penalties", p. 22).
6216 int off;
6217 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
6218 off = offset();
6219 movzbl(dst, src); // movzxb
6220 } else {
6221 xorl(dst, dst);
6222 off = offset();
6223 movb(dst, src);
6224 }
6225 return off;
6226 }
6228 int MacroAssembler::load_unsigned_word(Register dst, Address src) {
6229 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
6230 // and "3.9 Partial Register Penalties", p. 22).
6231 int off;
6232 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
6233 off = offset();
6234 movzwl(dst, src); // movzxw
6235 } else {
6236 xorl(dst, dst);
6237 off = offset();
6238 movw(dst, src);
6239 }
6240 return off;
6241 }
6243 void MacroAssembler::mov32(AddressLiteral dst, Register src) {
6244 if (reachable(dst)) {
6245 movl(as_Address(dst), src);
6246 } else {
6247 lea(rscratch1, dst);
6248 movl(Address(rscratch1, 0), src);
6249 }
6250 }
6252 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
6253 if (reachable(src)) {
6254 movl(dst, as_Address(src));
6255 } else {
6256 lea(rscratch1, src);
6257 movl(dst, Address(rscratch1, 0));
6258 }
6259 }
6261 // C++ bool manipulation
6263 void MacroAssembler::movbool(Register dst, Address src) {
6264 if(sizeof(bool) == 1)
6265 movb(dst, src);
6266 else if(sizeof(bool) == 2)
6267 movw(dst, src);
6268 else if(sizeof(bool) == 4)
6269 movl(dst, src);
6270 else
6271 // unsupported
6272 ShouldNotReachHere();
6273 }
6275 void MacroAssembler::movbool(Address dst, bool boolconst) {
6276 if(sizeof(bool) == 1)
6277 movb(dst, (int) boolconst);
6278 else if(sizeof(bool) == 2)
6279 movw(dst, (int) boolconst);
6280 else if(sizeof(bool) == 4)
6281 movl(dst, (int) boolconst);
6282 else
6283 // unsupported
6284 ShouldNotReachHere();
6285 }
6287 void MacroAssembler::movbool(Address dst, Register src) {
6288 if(sizeof(bool) == 1)
6289 movb(dst, src);
6290 else if(sizeof(bool) == 2)
6291 movw(dst, src);
6292 else if(sizeof(bool) == 4)
6293 movl(dst, src);
6294 else
6295 // unsupported
6296 ShouldNotReachHere();
6297 }
6299 void MacroAssembler::movbyte(ArrayAddress dst, int src) {
6300 movb(as_Address(dst), src);
6301 }
6303 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
6304 if (reachable(src)) {
6305 if (UseXmmLoadAndClearUpper) {
6306 movsd (dst, as_Address(src));
6307 } else {
6308 movlpd(dst, as_Address(src));
6309 }
6310 } else {
6311 lea(rscratch1, src);
6312 if (UseXmmLoadAndClearUpper) {
6313 movsd (dst, Address(rscratch1, 0));
6314 } else {
6315 movlpd(dst, Address(rscratch1, 0));
6316 }
6317 }
6318 }
6320 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
6321 if (reachable(src)) {
6322 movss(dst, as_Address(src));
6323 } else {
6324 lea(rscratch1, src);
6325 movss(dst, Address(rscratch1, 0));
6326 }
6327 }
6329 void MacroAssembler::movptr(Register dst, Register src) {
6330 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
6331 }
6333 void MacroAssembler::movptr(Register dst, Address src) {
6334 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
6335 }
6337 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
6338 void MacroAssembler::movptr(Register dst, intptr_t src) {
6339 LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
6340 }
6342 void MacroAssembler::movptr(Address dst, Register src) {
6343 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
6344 }
6346 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
6347 if (reachable(src)) {
6348 movss(dst, as_Address(src));
6349 } else {
6350 lea(rscratch1, src);
6351 movss(dst, Address(rscratch1, 0));
6352 }
6353 }
6355 void MacroAssembler::null_check(Register reg, int offset) {
6356 if (needs_explicit_null_check(offset)) {
6357 // provoke OS NULL exception if reg = NULL by
6358 // accessing M[reg] w/o changing any (non-CC) registers
6359 // NOTE: cmpl is plenty here to provoke a segv
6360 cmpptr(rax, Address(reg, 0));
6361 // Note: should probably use testl(rax, Address(reg, 0));
6362 // may be shorter code (however, this version of
6363 // testl needs to be implemented first)
6364 } else {
6365 // nothing to do, (later) access of M[reg + offset]
6366 // will provoke OS NULL exception if reg = NULL
6367 }
6368 }
6370 void MacroAssembler::os_breakpoint() {
6371 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
6372 // (e.g., MSVC can't call ps() otherwise)
6373 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
6374 }
6376 void MacroAssembler::pop_CPU_state() {
6377 pop_FPU_state();
6378 pop_IU_state();
6379 }
6381 void MacroAssembler::pop_FPU_state() {
6382 NOT_LP64(frstor(Address(rsp, 0));)
6383 LP64_ONLY(fxrstor(Address(rsp, 0));)
6384 addptr(rsp, FPUStateSizeInWords * wordSize);
6385 }
6387 void MacroAssembler::pop_IU_state() {
6388 popa();
6389 LP64_ONLY(addq(rsp, 8));
6390 popf();
6391 }
6393 // Save Integer and Float state
6394 // Warning: Stack must be 16 byte aligned (64bit)
6395 void MacroAssembler::push_CPU_state() {
6396 push_IU_state();
6397 push_FPU_state();
6398 }
6400 void MacroAssembler::push_FPU_state() {
6401 subptr(rsp, FPUStateSizeInWords * wordSize);
6402 #ifndef _LP64
6403 fnsave(Address(rsp, 0));
6404 fwait();
6405 #else
6406 fxsave(Address(rsp, 0));
6407 #endif // LP64
6408 }
6410 void MacroAssembler::push_IU_state() {
6411 // Push flags first because pusha kills them
6412 pushf();
6413 // Make sure rsp stays 16-byte aligned
6414 LP64_ONLY(subq(rsp, 8));
6415 pusha();
6416 }
6418 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
6419 // determine java_thread register
6420 if (!java_thread->is_valid()) {
6421 java_thread = rdi;
6422 get_thread(java_thread);
6423 }
6424 // we must set sp to zero to clear frame
6425 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), (int32_t)NULL_WORD);
6426 if (clear_fp) {
6427 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), (int32_t)NULL_WORD);
6428 }
6430 if (clear_pc)
6431 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), (int32_t)NULL_WORD);
6433 }
6435 void MacroAssembler::restore_rax(Register tmp) {
6436 if (tmp == noreg) pop(rax);
6437 else if (tmp != rax) mov(rax, tmp);
6438 }
6440 void MacroAssembler::round_to(Register reg, int modulus) {
6441 addptr(reg, modulus - 1);
6442 andptr(reg, -modulus);
6443 }
6445 void MacroAssembler::save_rax(Register tmp) {
6446 if (tmp == noreg) push(rax);
6447 else if (tmp != rax) mov(tmp, rax);
6448 }
6450 // Write serialization page so VM thread can do a pseudo remote membar.
6451 // We use the current thread pointer to calculate a thread specific
6452 // offset to write to within the page. This minimizes bus traffic
6453 // due to cache line collision.
6454 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
6455 movl(tmp, thread);
6456 shrl(tmp, os::get_serialize_page_shift_count());
6457 andl(tmp, (os::vm_page_size() - sizeof(int)));
6459 Address index(noreg, tmp, Address::times_1);
6460 ExternalAddress page(os::get_memory_serialize_page());
6462 movptr(ArrayAddress(page, index), tmp);
6463 }
6465 // Calls to C land
6466 //
6467 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
6468 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
6469 // has to be reset to 0. This is required to allow proper stack traversal.
6470 void MacroAssembler::set_last_Java_frame(Register java_thread,
6471 Register last_java_sp,
6472 Register last_java_fp,
6473 address last_java_pc) {
6474 // determine java_thread register
6475 if (!java_thread->is_valid()) {
6476 java_thread = rdi;
6477 get_thread(java_thread);
6478 }
6479 // determine last_java_sp register
6480 if (!last_java_sp->is_valid()) {
6481 last_java_sp = rsp;
6482 }
6484 // last_java_fp is optional
6486 if (last_java_fp->is_valid()) {
6487 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
6488 }
6490 // last_java_pc is optional
6492 if (last_java_pc != NULL) {
6493 lea(Address(java_thread,
6494 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
6495 InternalAddress(last_java_pc));
6497 }
6498 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
6499 }
6501 void MacroAssembler::shlptr(Register dst, int imm8) {
6502 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
6503 }
6505 void MacroAssembler::shrptr(Register dst, int imm8) {
6506 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
6507 }
6509 void MacroAssembler::sign_extend_byte(Register reg) {
6510 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
6511 movsbl(reg, reg); // movsxb
6512 } else {
6513 shll(reg, 24);
6514 sarl(reg, 24);
6515 }
6516 }
6518 void MacroAssembler::sign_extend_short(Register reg) {
6519 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
6520 movswl(reg, reg); // movsxw
6521 } else {
6522 shll(reg, 16);
6523 sarl(reg, 16);
6524 }
6525 }
6527 //////////////////////////////////////////////////////////////////////////////////
6528 #ifndef SERIALGC
6530 void MacroAssembler::g1_write_barrier_pre(Register obj,
6531 #ifndef _LP64
6532 Register thread,
6533 #endif
6534 Register tmp,
6535 Register tmp2,
6536 bool tosca_live) {
6537 LP64_ONLY(Register thread = r15_thread;)
6538 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
6539 PtrQueue::byte_offset_of_active()));
6541 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
6542 PtrQueue::byte_offset_of_index()));
6543 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
6544 PtrQueue::byte_offset_of_buf()));
6547 Label done;
6548 Label runtime;
6550 // if (!marking_in_progress) goto done;
6551 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
6552 cmpl(in_progress, 0);
6553 } else {
6554 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
6555 cmpb(in_progress, 0);
6556 }
6557 jcc(Assembler::equal, done);
6559 // if (x.f == NULL) goto done;
6560 cmpptr(Address(obj, 0), NULL_WORD);
6561 jcc(Assembler::equal, done);
6563 // Can we store original value in the thread's buffer?
6565 LP64_ONLY(movslq(tmp, index);)
6566 movptr(tmp2, Address(obj, 0));
6567 #ifdef _LP64
6568 cmpq(tmp, 0);
6569 #else
6570 cmpl(index, 0);
6571 #endif
6572 jcc(Assembler::equal, runtime);
6573 #ifdef _LP64
6574 subq(tmp, wordSize);
6575 movl(index, tmp);
6576 addq(tmp, buffer);
6577 #else
6578 subl(index, wordSize);
6579 movl(tmp, buffer);
6580 addl(tmp, index);
6581 #endif
6582 movptr(Address(tmp, 0), tmp2);
6583 jmp(done);
6584 bind(runtime);
6585 // save the live input values
6586 if(tosca_live) push(rax);
6587 push(obj);
6588 #ifdef _LP64
6589 movq(c_rarg0, Address(obj, 0));
6590 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, r15_thread);
6591 #else
6592 push(thread);
6593 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, thread);
6594 pop(thread);
6595 #endif
6596 pop(obj);
6597 if(tosca_live) pop(rax);
6598 bind(done);
6600 }
6602 void MacroAssembler::g1_write_barrier_post(Register store_addr,
6603 Register new_val,
6604 #ifndef _LP64
6605 Register thread,
6606 #endif
6607 Register tmp,
6608 Register tmp2) {
6610 LP64_ONLY(Register thread = r15_thread;)
6611 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
6612 PtrQueue::byte_offset_of_index()));
6613 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
6614 PtrQueue::byte_offset_of_buf()));
6615 BarrierSet* bs = Universe::heap()->barrier_set();
6616 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
6617 Label done;
6618 Label runtime;
6620 // Does store cross heap regions?
6622 movptr(tmp, store_addr);
6623 xorptr(tmp, new_val);
6624 shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
6625 jcc(Assembler::equal, done);
6627 // crosses regions, storing NULL?
6629 cmpptr(new_val, (int32_t) NULL_WORD);
6630 jcc(Assembler::equal, done);
6632 // storing region crossing non-NULL, is card already dirty?
6634 ExternalAddress cardtable((address) ct->byte_map_base);
6635 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
6636 #ifdef _LP64
6637 const Register card_addr = tmp;
6639 movq(card_addr, store_addr);
6640 shrq(card_addr, CardTableModRefBS::card_shift);
6642 lea(tmp2, cardtable);
6644 // get the address of the card
6645 addq(card_addr, tmp2);
6646 #else
6647 const Register card_index = tmp;
6649 movl(card_index, store_addr);
6650 shrl(card_index, CardTableModRefBS::card_shift);
6652 Address index(noreg, card_index, Address::times_1);
6653 const Register card_addr = tmp;
6654 lea(card_addr, as_Address(ArrayAddress(cardtable, index)));
6655 #endif
6656 cmpb(Address(card_addr, 0), 0);
6657 jcc(Assembler::equal, done);
6659 // storing a region crossing, non-NULL oop, card is clean.
6660 // dirty card and log.
6662 movb(Address(card_addr, 0), 0);
6664 cmpl(queue_index, 0);
6665 jcc(Assembler::equal, runtime);
6666 subl(queue_index, wordSize);
6667 movptr(tmp2, buffer);
6668 #ifdef _LP64
6669 movslq(rscratch1, queue_index);
6670 addq(tmp2, rscratch1);
6671 movq(Address(tmp2, 0), card_addr);
6672 #else
6673 addl(tmp2, queue_index);
6674 movl(Address(tmp2, 0), card_index);
6675 #endif
6676 jmp(done);
6678 bind(runtime);
6679 // save the live input values
6680 push(store_addr);
6681 push(new_val);
6682 #ifdef _LP64
6683 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
6684 #else
6685 push(thread);
6686 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
6687 pop(thread);
6688 #endif
6689 pop(new_val);
6690 pop(store_addr);
6692 bind(done);
6694 }
6696 #endif // SERIALGC
6697 //////////////////////////////////////////////////////////////////////////////////
6700 void MacroAssembler::store_check(Register obj) {
6701 // Does a store check for the oop in register obj. The content of
6702 // register obj is destroyed afterwards.
6703 store_check_part_1(obj);
6704 store_check_part_2(obj);
6705 }
6707 void MacroAssembler::store_check(Register obj, Address dst) {
6708 store_check(obj);
6709 }
6712 // split the store check operation so that other instructions can be scheduled inbetween
6713 void MacroAssembler::store_check_part_1(Register obj) {
6714 BarrierSet* bs = Universe::heap()->barrier_set();
6715 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
6716 shrptr(obj, CardTableModRefBS::card_shift);
6717 }
6719 void MacroAssembler::store_check_part_2(Register obj) {
6720 BarrierSet* bs = Universe::heap()->barrier_set();
6721 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
6722 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
6723 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
6725 // The calculation for byte_map_base is as follows:
6726 // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
6727 // So this essentially converts an address to a displacement and
6728 // it will never need to be relocated. On 64bit however the value may be too
6729 // large for a 32bit displacement
6731 intptr_t disp = (intptr_t) ct->byte_map_base;
6732 if (is_simm32(disp)) {
6733 Address cardtable(noreg, obj, Address::times_1, disp);
6734 movb(cardtable, 0);
6735 } else {
6736 // By doing it as an ExternalAddress disp could be converted to a rip-relative
6737 // displacement and done in a single instruction given favorable mapping and
6738 // a smarter version of as_Address. Worst case it is two instructions which
6739 // is no worse off then loading disp into a register and doing as a simple
6740 // Address() as above.
6741 // We can't do as ExternalAddress as the only style since if disp == 0 we'll
6742 // assert since NULL isn't acceptable in a reloci (see 6644928). In any case
6743 // in some cases we'll get a single instruction version.
6745 ExternalAddress cardtable((address)disp);
6746 Address index(noreg, obj, Address::times_1);
6747 movb(as_Address(ArrayAddress(cardtable, index)), 0);
6748 }
6749 }
6751 void MacroAssembler::subptr(Register dst, int32_t imm32) {
6752 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
6753 }
6755 void MacroAssembler::subptr(Register dst, Register src) {
6756 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
6757 }
6759 void MacroAssembler::test32(Register src1, AddressLiteral src2) {
6760 // src2 must be rval
6762 if (reachable(src2)) {
6763 testl(src1, as_Address(src2));
6764 } else {
6765 lea(rscratch1, src2);
6766 testl(src1, Address(rscratch1, 0));
6767 }
6768 }
6770 // C++ bool manipulation
6771 void MacroAssembler::testbool(Register dst) {
6772 if(sizeof(bool) == 1)
6773 testb(dst, 0xff);
6774 else if(sizeof(bool) == 2) {
6775 // testw implementation needed for two byte bools
6776 ShouldNotReachHere();
6777 } else if(sizeof(bool) == 4)
6778 testl(dst, dst);
6779 else
6780 // unsupported
6781 ShouldNotReachHere();
6782 }
6784 void MacroAssembler::testptr(Register dst, Register src) {
6785 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
6786 }
6788 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
6789 void MacroAssembler::tlab_allocate(Register obj,
6790 Register var_size_in_bytes,
6791 int con_size_in_bytes,
6792 Register t1,
6793 Register t2,
6794 Label& slow_case) {
6795 assert_different_registers(obj, t1, t2);
6796 assert_different_registers(obj, var_size_in_bytes, t1);
6797 Register end = t2;
6798 Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
6800 verify_tlab();
6802 NOT_LP64(get_thread(thread));
6804 movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
6805 if (var_size_in_bytes == noreg) {
6806 lea(end, Address(obj, con_size_in_bytes));
6807 } else {
6808 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
6809 }
6810 cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
6811 jcc(Assembler::above, slow_case);
6813 // update the tlab top pointer
6814 movptr(Address(thread, JavaThread::tlab_top_offset()), end);
6816 // recover var_size_in_bytes if necessary
6817 if (var_size_in_bytes == end) {
6818 subptr(var_size_in_bytes, obj);
6819 }
6820 verify_tlab();
6821 }
6823 // Preserves rbx, and rdx.
6824 void MacroAssembler::tlab_refill(Label& retry,
6825 Label& try_eden,
6826 Label& slow_case) {
6827 Register top = rax;
6828 Register t1 = rcx;
6829 Register t2 = rsi;
6830 Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
6831 assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
6832 Label do_refill, discard_tlab;
6834 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
6835 // No allocation in the shared eden.
6836 jmp(slow_case);
6837 }
6839 NOT_LP64(get_thread(thread_reg));
6841 movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
6842 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
6844 // calculate amount of free space
6845 subptr(t1, top);
6846 shrptr(t1, LogHeapWordSize);
6848 // Retain tlab and allocate object in shared space if
6849 // the amount free in the tlab is too large to discard.
6850 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
6851 jcc(Assembler::lessEqual, discard_tlab);
6853 // Retain
6854 // %%% yuck as movptr...
6855 movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
6856 addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
6857 if (TLABStats) {
6858 // increment number of slow_allocations
6859 addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
6860 }
6861 jmp(try_eden);
6863 bind(discard_tlab);
6864 if (TLABStats) {
6865 // increment number of refills
6866 addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
6867 // accumulate wastage -- t1 is amount free in tlab
6868 addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
6869 }
6871 // if tlab is currently allocated (top or end != null) then
6872 // fill [top, end + alignment_reserve) with array object
6873 testptr (top, top);
6874 jcc(Assembler::zero, do_refill);
6876 // set up the mark word
6877 movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
6878 // set the length to the remaining space
6879 subptr(t1, typeArrayOopDesc::header_size(T_INT));
6880 addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
6881 shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
6882 movptr(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
6883 // set klass to intArrayKlass
6884 // dubious reloc why not an oop reloc?
6885 movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr()));
6886 // store klass last. concurrent gcs assumes klass length is valid if
6887 // klass field is not null.
6888 store_klass(top, t1);
6890 // refill the tlab with an eden allocation
6891 bind(do_refill);
6892 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
6893 shlptr(t1, LogHeapWordSize);
6894 // add object_size ??
6895 eden_allocate(top, t1, 0, t2, slow_case);
6897 // Check that t1 was preserved in eden_allocate.
6898 #ifdef ASSERT
6899 if (UseTLAB) {
6900 Label ok;
6901 Register tsize = rsi;
6902 assert_different_registers(tsize, thread_reg, t1);
6903 push(tsize);
6904 movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
6905 shlptr(tsize, LogHeapWordSize);
6906 cmpptr(t1, tsize);
6907 jcc(Assembler::equal, ok);
6908 stop("assert(t1 != tlab size)");
6909 should_not_reach_here();
6911 bind(ok);
6912 pop(tsize);
6913 }
6914 #endif
6915 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
6916 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
6917 addptr(top, t1);
6918 subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
6919 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
6920 verify_tlab();
6921 jmp(retry);
6922 }
6924 static const double pi_4 = 0.7853981633974483;
6926 void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
6927 // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
6928 // was attempted in this code; unfortunately it appears that the
6929 // switch to 80-bit precision and back causes this to be
6930 // unprofitable compared with simply performing a runtime call if
6931 // the argument is out of the (-pi/4, pi/4) range.
6933 Register tmp = noreg;
6934 if (!VM_Version::supports_cmov()) {
6935 // fcmp needs a temporary so preserve rbx,
6936 tmp = rbx;
6937 push(tmp);
6938 }
6940 Label slow_case, done;
6942 // x ?<= pi/4
6943 fld_d(ExternalAddress((address)&pi_4));
6944 fld_s(1); // Stack: X PI/4 X
6945 fabs(); // Stack: |X| PI/4 X
6946 fcmp(tmp);
6947 jcc(Assembler::above, slow_case);
6949 // fastest case: -pi/4 <= x <= pi/4
6950 switch(trig) {
6951 case 's':
6952 fsin();
6953 break;
6954 case 'c':
6955 fcos();
6956 break;
6957 case 't':
6958 ftan();
6959 break;
6960 default:
6961 assert(false, "bad intrinsic");
6962 break;
6963 }
6964 jmp(done);
6966 // slow case: runtime call
6967 bind(slow_case);
6968 // Preserve registers across runtime call
6969 pusha();
6970 int incoming_argument_and_return_value_offset = -1;
6971 if (num_fpu_regs_in_use > 1) {
6972 // Must preserve all other FPU regs (could alternatively convert
6973 // SharedRuntime::dsin and dcos into assembly routines known not to trash
6974 // FPU state, but can not trust C compiler)
6975 NEEDS_CLEANUP;
6976 // NOTE that in this case we also push the incoming argument to
6977 // the stack and restore it later; we also use this stack slot to
6978 // hold the return value from dsin or dcos.
6979 for (int i = 0; i < num_fpu_regs_in_use; i++) {
6980 subptr(rsp, sizeof(jdouble));
6981 fstp_d(Address(rsp, 0));
6982 }
6983 incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1);
6984 fld_d(Address(rsp, incoming_argument_and_return_value_offset));
6985 }
6986 subptr(rsp, sizeof(jdouble));
6987 fstp_d(Address(rsp, 0));
6988 #ifdef _LP64
6989 movdbl(xmm0, Address(rsp, 0));
6990 #endif // _LP64
6992 // NOTE: we must not use call_VM_leaf here because that requires a
6993 // complete interpreter frame in debug mode -- same bug as 4387334
6994 // MacroAssembler::call_VM_leaf_base is perfectly safe and will
6995 // do proper 64bit abi
6997 NEEDS_CLEANUP;
6998 // Need to add stack banging before this runtime call if it needs to
6999 // be taken; however, there is no generic stack banging routine at
7000 // the MacroAssembler level
7001 switch(trig) {
7002 case 's':
7003 {
7004 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 0);
7005 }
7006 break;
7007 case 'c':
7008 {
7009 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 0);
7010 }
7011 break;
7012 case 't':
7013 {
7014 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 0);
7015 }
7016 break;
7017 default:
7018 assert(false, "bad intrinsic");
7019 break;
7020 }
7021 #ifdef _LP64
7022 movsd(Address(rsp, 0), xmm0);
7023 fld_d(Address(rsp, 0));
7024 #endif // _LP64
7025 addptr(rsp, sizeof(jdouble));
7026 if (num_fpu_regs_in_use > 1) {
7027 // Must save return value to stack and then restore entire FPU stack
7028 fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
7029 for (int i = 0; i < num_fpu_regs_in_use; i++) {
7030 fld_d(Address(rsp, 0));
7031 addptr(rsp, sizeof(jdouble));
7032 }
7033 }
7034 popa();
7036 // Come here with result in F-TOS
7037 bind(done);
7039 if (tmp != noreg) {
7040 pop(tmp);
7041 }
7042 }
7045 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
7046 ucomisd(dst, as_Address(src));
7047 }
7049 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
7050 ucomiss(dst, as_Address(src));
7051 }
7053 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
7054 if (reachable(src)) {
7055 xorpd(dst, as_Address(src));
7056 } else {
7057 lea(rscratch1, src);
7058 xorpd(dst, Address(rscratch1, 0));
7059 }
7060 }
7062 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
7063 if (reachable(src)) {
7064 xorps(dst, as_Address(src));
7065 } else {
7066 lea(rscratch1, src);
7067 xorps(dst, Address(rscratch1, 0));
7068 }
7069 }
7071 void MacroAssembler::verify_oop(Register reg, const char* s) {
7072 if (!VerifyOops) return;
7074 // Pass register number to verify_oop_subroutine
7075 char* b = new char[strlen(s) + 50];
7076 sprintf(b, "verify_oop: %s: %s", reg->name(), s);
7077 push(rax); // save rax,
7078 push(reg); // pass register argument
7079 ExternalAddress buffer((address) b);
7080 // avoid using pushptr, as it modifies scratch registers
7081 // and our contract is not to modify anything
7082 movptr(rax, buffer.addr());
7083 push(rax);
7084 // call indirectly to solve generation ordering problem
7085 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
7086 call(rax);
7087 }
7090 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
7091 if (!VerifyOops) return;
7093 // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
7094 // Pass register number to verify_oop_subroutine
7095 char* b = new char[strlen(s) + 50];
7096 sprintf(b, "verify_oop_addr: %s", s);
7098 push(rax); // save rax,
7099 // addr may contain rsp so we will have to adjust it based on the push
7100 // we just did
7101 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
7102 // stores rax into addr which is backwards of what was intended.
7103 if (addr.uses(rsp)) {
7104 lea(rax, addr);
7105 pushptr(Address(rax, BytesPerWord));
7106 } else {
7107 pushptr(addr);
7108 }
7110 ExternalAddress buffer((address) b);
7111 // pass msg argument
7112 // avoid using pushptr, as it modifies scratch registers
7113 // and our contract is not to modify anything
7114 movptr(rax, buffer.addr());
7115 push(rax);
7117 // call indirectly to solve generation ordering problem
7118 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
7119 call(rax);
7120 // Caller pops the arguments and restores rax, from the stack
7121 }
7123 void MacroAssembler::verify_tlab() {
7124 #ifdef ASSERT
7125 if (UseTLAB && VerifyOops) {
7126 Label next, ok;
7127 Register t1 = rsi;
7128 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
7130 push(t1);
7131 NOT_LP64(push(thread_reg));
7132 NOT_LP64(get_thread(thread_reg));
7134 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
7135 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
7136 jcc(Assembler::aboveEqual, next);
7137 stop("assert(top >= start)");
7138 should_not_reach_here();
7140 bind(next);
7141 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
7142 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
7143 jcc(Assembler::aboveEqual, ok);
7144 stop("assert(top <= end)");
7145 should_not_reach_here();
7147 bind(ok);
7148 NOT_LP64(pop(thread_reg));
7149 pop(t1);
7150 }
7151 #endif
7152 }
7154 class ControlWord {
7155 public:
7156 int32_t _value;
7158 int rounding_control() const { return (_value >> 10) & 3 ; }
7159 int precision_control() const { return (_value >> 8) & 3 ; }
7160 bool precision() const { return ((_value >> 5) & 1) != 0; }
7161 bool underflow() const { return ((_value >> 4) & 1) != 0; }
7162 bool overflow() const { return ((_value >> 3) & 1) != 0; }
7163 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
7164 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
7165 bool invalid() const { return ((_value >> 0) & 1) != 0; }
7167 void print() const {
7168 // rounding control
7169 const char* rc;
7170 switch (rounding_control()) {
7171 case 0: rc = "round near"; break;
7172 case 1: rc = "round down"; break;
7173 case 2: rc = "round up "; break;
7174 case 3: rc = "chop "; break;
7175 };
7176 // precision control
7177 const char* pc;
7178 switch (precision_control()) {
7179 case 0: pc = "24 bits "; break;
7180 case 1: pc = "reserved"; break;
7181 case 2: pc = "53 bits "; break;
7182 case 3: pc = "64 bits "; break;
7183 };
7184 // flags
7185 char f[9];
7186 f[0] = ' ';
7187 f[1] = ' ';
7188 f[2] = (precision ()) ? 'P' : 'p';
7189 f[3] = (underflow ()) ? 'U' : 'u';
7190 f[4] = (overflow ()) ? 'O' : 'o';
7191 f[5] = (zero_divide ()) ? 'Z' : 'z';
7192 f[6] = (denormalized()) ? 'D' : 'd';
7193 f[7] = (invalid ()) ? 'I' : 'i';
7194 f[8] = '\x0';
7195 // output
7196 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
7197 }
7199 };
7201 class StatusWord {
7202 public:
7203 int32_t _value;
7205 bool busy() const { return ((_value >> 15) & 1) != 0; }
7206 bool C3() const { return ((_value >> 14) & 1) != 0; }
7207 bool C2() const { return ((_value >> 10) & 1) != 0; }
7208 bool C1() const { return ((_value >> 9) & 1) != 0; }
7209 bool C0() const { return ((_value >> 8) & 1) != 0; }
7210 int top() const { return (_value >> 11) & 7 ; }
7211 bool error_status() const { return ((_value >> 7) & 1) != 0; }
7212 bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
7213 bool precision() const { return ((_value >> 5) & 1) != 0; }
7214 bool underflow() const { return ((_value >> 4) & 1) != 0; }
7215 bool overflow() const { return ((_value >> 3) & 1) != 0; }
7216 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
7217 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
7218 bool invalid() const { return ((_value >> 0) & 1) != 0; }
7220 void print() const {
7221 // condition codes
7222 char c[5];
7223 c[0] = (C3()) ? '3' : '-';
7224 c[1] = (C2()) ? '2' : '-';
7225 c[2] = (C1()) ? '1' : '-';
7226 c[3] = (C0()) ? '0' : '-';
7227 c[4] = '\x0';
7228 // flags
7229 char f[9];
7230 f[0] = (error_status()) ? 'E' : '-';
7231 f[1] = (stack_fault ()) ? 'S' : '-';
7232 f[2] = (precision ()) ? 'P' : '-';
7233 f[3] = (underflow ()) ? 'U' : '-';
7234 f[4] = (overflow ()) ? 'O' : '-';
7235 f[5] = (zero_divide ()) ? 'Z' : '-';
7236 f[6] = (denormalized()) ? 'D' : '-';
7237 f[7] = (invalid ()) ? 'I' : '-';
7238 f[8] = '\x0';
7239 // output
7240 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
7241 }
7243 };
7245 class TagWord {
7246 public:
7247 int32_t _value;
7249 int tag_at(int i) const { return (_value >> (i*2)) & 3; }
7251 void print() const {
7252 printf("%04x", _value & 0xFFFF);
7253 }
7255 };
7257 class FPU_Register {
7258 public:
7259 int32_t _m0;
7260 int32_t _m1;
7261 int16_t _ex;
7263 bool is_indefinite() const {
7264 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
7265 }
7267 void print() const {
7268 char sign = (_ex < 0) ? '-' : '+';
7269 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
7270 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
7271 };
7273 };
7275 class FPU_State {
7276 public:
7277 enum {
7278 register_size = 10,
7279 number_of_registers = 8,
7280 register_mask = 7
7281 };
7283 ControlWord _control_word;
7284 StatusWord _status_word;
7285 TagWord _tag_word;
7286 int32_t _error_offset;
7287 int32_t _error_selector;
7288 int32_t _data_offset;
7289 int32_t _data_selector;
7290 int8_t _register[register_size * number_of_registers];
7292 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
7293 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
7295 const char* tag_as_string(int tag) const {
7296 switch (tag) {
7297 case 0: return "valid";
7298 case 1: return "zero";
7299 case 2: return "special";
7300 case 3: return "empty";
7301 }
7302 ShouldNotReachHere()
7303 return NULL;
7304 }
7306 void print() const {
7307 // print computation registers
7308 { int t = _status_word.top();
7309 for (int i = 0; i < number_of_registers; i++) {
7310 int j = (i - t) & register_mask;
7311 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
7312 st(j)->print();
7313 printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
7314 }
7315 }
7316 printf("\n");
7317 // print control registers
7318 printf("ctrl = "); _control_word.print(); printf("\n");
7319 printf("stat = "); _status_word .print(); printf("\n");
7320 printf("tags = "); _tag_word .print(); printf("\n");
7321 }
7323 };
7325 class Flag_Register {
7326 public:
7327 int32_t _value;
7329 bool overflow() const { return ((_value >> 11) & 1) != 0; }
7330 bool direction() const { return ((_value >> 10) & 1) != 0; }
7331 bool sign() const { return ((_value >> 7) & 1) != 0; }
7332 bool zero() const { return ((_value >> 6) & 1) != 0; }
7333 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
7334 bool parity() const { return ((_value >> 2) & 1) != 0; }
7335 bool carry() const { return ((_value >> 0) & 1) != 0; }
7337 void print() const {
7338 // flags
7339 char f[8];
7340 f[0] = (overflow ()) ? 'O' : '-';
7341 f[1] = (direction ()) ? 'D' : '-';
7342 f[2] = (sign ()) ? 'S' : '-';
7343 f[3] = (zero ()) ? 'Z' : '-';
7344 f[4] = (auxiliary_carry()) ? 'A' : '-';
7345 f[5] = (parity ()) ? 'P' : '-';
7346 f[6] = (carry ()) ? 'C' : '-';
7347 f[7] = '\x0';
7348 // output
7349 printf("%08x flags = %s", _value, f);
7350 }
7352 };
7354 class IU_Register {
7355 public:
7356 int32_t _value;
7358 void print() const {
7359 printf("%08x %11d", _value, _value);
7360 }
7362 };
7364 class IU_State {
7365 public:
7366 Flag_Register _eflags;
7367 IU_Register _rdi;
7368 IU_Register _rsi;
7369 IU_Register _rbp;
7370 IU_Register _rsp;
7371 IU_Register _rbx;
7372 IU_Register _rdx;
7373 IU_Register _rcx;
7374 IU_Register _rax;
7376 void print() const {
7377 // computation registers
7378 printf("rax, = "); _rax.print(); printf("\n");
7379 printf("rbx, = "); _rbx.print(); printf("\n");
7380 printf("rcx = "); _rcx.print(); printf("\n");
7381 printf("rdx = "); _rdx.print(); printf("\n");
7382 printf("rdi = "); _rdi.print(); printf("\n");
7383 printf("rsi = "); _rsi.print(); printf("\n");
7384 printf("rbp, = "); _rbp.print(); printf("\n");
7385 printf("rsp = "); _rsp.print(); printf("\n");
7386 printf("\n");
7387 // control registers
7388 printf("flgs = "); _eflags.print(); printf("\n");
7389 }
7390 };
7393 class CPU_State {
7394 public:
7395 FPU_State _fpu_state;
7396 IU_State _iu_state;
7398 void print() const {
7399 printf("--------------------------------------------------\n");
7400 _iu_state .print();
7401 printf("\n");
7402 _fpu_state.print();
7403 printf("--------------------------------------------------\n");
7404 }
7406 };
7409 static void _print_CPU_state(CPU_State* state) {
7410 state->print();
7411 };
7414 void MacroAssembler::print_CPU_state() {
7415 push_CPU_state();
7416 push(rsp); // pass CPU state
7417 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
7418 addptr(rsp, wordSize); // discard argument
7419 pop_CPU_state();
7420 }
7423 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
7424 static int counter = 0;
7425 FPU_State* fs = &state->_fpu_state;
7426 counter++;
7427 // For leaf calls, only verify that the top few elements remain empty.
7428 // We only need 1 empty at the top for C2 code.
7429 if( stack_depth < 0 ) {
7430 if( fs->tag_for_st(7) != 3 ) {
7431 printf("FPR7 not empty\n");
7432 state->print();
7433 assert(false, "error");
7434 return false;
7435 }
7436 return true; // All other stack states do not matter
7437 }
7439 assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
7440 "bad FPU control word");
7442 // compute stack depth
7443 int i = 0;
7444 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++;
7445 int d = i;
7446 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
7447 // verify findings
7448 if (i != FPU_State::number_of_registers) {
7449 // stack not contiguous
7450 printf("%s: stack not contiguous at ST%d\n", s, i);
7451 state->print();
7452 assert(false, "error");
7453 return false;
7454 }
7455 // check if computed stack depth corresponds to expected stack depth
7456 if (stack_depth < 0) {
7457 // expected stack depth is -stack_depth or less
7458 if (d > -stack_depth) {
7459 // too many elements on the stack
7460 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
7461 state->print();
7462 assert(false, "error");
7463 return false;
7464 }
7465 } else {
7466 // expected stack depth is stack_depth
7467 if (d != stack_depth) {
7468 // wrong stack depth
7469 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
7470 state->print();
7471 assert(false, "error");
7472 return false;
7473 }
7474 }
7475 // everything is cool
7476 return true;
7477 }
7480 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
7481 if (!VerifyFPU) return;
7482 push_CPU_state();
7483 push(rsp); // pass CPU state
7484 ExternalAddress msg((address) s);
7485 // pass message string s
7486 pushptr(msg.addr());
7487 push(stack_depth); // pass stack depth
7488 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
7489 addptr(rsp, 3 * wordSize); // discard arguments
7490 // check for error
7491 { Label L;
7492 testl(rax, rax);
7493 jcc(Assembler::notZero, L);
7494 int3(); // break if error condition
7495 bind(L);
7496 }
7497 pop_CPU_state();
7498 }
7500 void MacroAssembler::load_klass(Register dst, Register src) {
7501 #ifdef _LP64
7502 if (UseCompressedOops) {
7503 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
7504 decode_heap_oop_not_null(dst);
7505 } else
7506 #endif
7507 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
7508 }
7510 void MacroAssembler::load_prototype_header(Register dst, Register src) {
7511 #ifdef _LP64
7512 if (UseCompressedOops) {
7513 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
7514 movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
7515 } else
7516 #endif
7517 {
7518 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
7519 movptr(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
7520 }
7521 }
7523 void MacroAssembler::store_klass(Register dst, Register src) {
7524 #ifdef _LP64
7525 if (UseCompressedOops) {
7526 encode_heap_oop_not_null(src);
7527 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
7528 } else
7529 #endif
7530 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
7531 }
7533 #ifdef _LP64
7534 void MacroAssembler::store_klass_gap(Register dst, Register src) {
7535 if (UseCompressedOops) {
7536 // Store to klass gap in destination
7537 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
7538 }
7539 }
7541 void MacroAssembler::load_heap_oop(Register dst, Address src) {
7542 if (UseCompressedOops) {
7543 movl(dst, src);
7544 decode_heap_oop(dst);
7545 } else {
7546 movq(dst, src);
7547 }
7548 }
7550 void MacroAssembler::store_heap_oop(Address dst, Register src) {
7551 if (UseCompressedOops) {
7552 assert(!dst.uses(src), "not enough registers");
7553 encode_heap_oop(src);
7554 movl(dst, src);
7555 } else {
7556 movq(dst, src);
7557 }
7558 }
7560 // Algorithm must match oop.inline.hpp encode_heap_oop.
7561 void MacroAssembler::encode_heap_oop(Register r) {
7562 assert (UseCompressedOops, "should be compressed");
7563 #ifdef ASSERT
7564 if (CheckCompressedOops) {
7565 Label ok;
7566 push(rscratch1); // cmpptr trashes rscratch1
7567 cmpptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
7568 jcc(Assembler::equal, ok);
7569 stop("MacroAssembler::encode_heap_oop: heap base corrupted?");
7570 bind(ok);
7571 pop(rscratch1);
7572 }
7573 #endif
7574 verify_oop(r, "broken oop in encode_heap_oop");
7575 testq(r, r);
7576 cmovq(Assembler::equal, r, r12_heapbase);
7577 subq(r, r12_heapbase);
7578 shrq(r, LogMinObjAlignmentInBytes);
7579 }
7581 void MacroAssembler::encode_heap_oop_not_null(Register r) {
7582 assert (UseCompressedOops, "should be compressed");
7583 #ifdef ASSERT
7584 if (CheckCompressedOops) {
7585 Label ok;
7586 testq(r, r);
7587 jcc(Assembler::notEqual, ok);
7588 stop("null oop passed to encode_heap_oop_not_null");
7589 bind(ok);
7590 }
7591 #endif
7592 verify_oop(r, "broken oop in encode_heap_oop_not_null");
7593 subq(r, r12_heapbase);
7594 shrq(r, LogMinObjAlignmentInBytes);
7595 }
7597 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
7598 assert (UseCompressedOops, "should be compressed");
7599 #ifdef ASSERT
7600 if (CheckCompressedOops) {
7601 Label ok;
7602 testq(src, src);
7603 jcc(Assembler::notEqual, ok);
7604 stop("null oop passed to encode_heap_oop_not_null2");
7605 bind(ok);
7606 }
7607 #endif
7608 verify_oop(src, "broken oop in encode_heap_oop_not_null2");
7609 if (dst != src) {
7610 movq(dst, src);
7611 }
7612 subq(dst, r12_heapbase);
7613 shrq(dst, LogMinObjAlignmentInBytes);
7614 }
7616 void MacroAssembler::decode_heap_oop(Register r) {
7617 assert (UseCompressedOops, "should be compressed");
7618 #ifdef ASSERT
7619 if (CheckCompressedOops) {
7620 Label ok;
7621 push(rscratch1);
7622 cmpptr(r12_heapbase,
7623 ExternalAddress((address)Universe::heap_base_addr()));
7624 jcc(Assembler::equal, ok);
7625 stop("MacroAssembler::decode_heap_oop: heap base corrupted?");
7626 bind(ok);
7627 pop(rscratch1);
7628 }
7629 #endif
7631 Label done;
7632 shlq(r, LogMinObjAlignmentInBytes);
7633 jccb(Assembler::equal, done);
7634 addq(r, r12_heapbase);
7635 #if 0
7636 // alternate decoding probably a wash.
7637 testq(r, r);
7638 jccb(Assembler::equal, done);
7639 leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
7640 #endif
7641 bind(done);
7642 verify_oop(r, "broken oop in decode_heap_oop");
7643 }
7645 void MacroAssembler::decode_heap_oop_not_null(Register r) {
7646 assert (UseCompressedOops, "should only be used for compressed headers");
7647 // Cannot assert, unverified entry point counts instructions (see .ad file)
7648 // vtableStubs also counts instructions in pd_code_size_limit.
7649 // Also do not verify_oop as this is called by verify_oop.
7650 assert(Address::times_8 == LogMinObjAlignmentInBytes, "decode alg wrong");
7651 leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
7652 }
7654 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
7655 assert (UseCompressedOops, "should only be used for compressed headers");
7656 // Cannot assert, unverified entry point counts instructions (see .ad file)
7657 // vtableStubs also counts instructions in pd_code_size_limit.
7658 // Also do not verify_oop as this is called by verify_oop.
7659 assert(Address::times_8 == LogMinObjAlignmentInBytes, "decode alg wrong");
7660 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
7661 }
7663 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
7664 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
7665 int oop_index = oop_recorder()->find_index(obj);
7666 RelocationHolder rspec = oop_Relocation::spec(oop_index);
7667 mov_literal32(dst, oop_index, rspec, narrow_oop_operand);
7668 }
7670 void MacroAssembler::reinit_heapbase() {
7671 if (UseCompressedOops) {
7672 movptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
7673 }
7674 }
7675 #endif // _LP64
7677 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
7678 switch (cond) {
7679 // Note some conditions are synonyms for others
7680 case Assembler::zero: return Assembler::notZero;
7681 case Assembler::notZero: return Assembler::zero;
7682 case Assembler::less: return Assembler::greaterEqual;
7683 case Assembler::lessEqual: return Assembler::greater;
7684 case Assembler::greater: return Assembler::lessEqual;
7685 case Assembler::greaterEqual: return Assembler::less;
7686 case Assembler::below: return Assembler::aboveEqual;
7687 case Assembler::belowEqual: return Assembler::above;
7688 case Assembler::above: return Assembler::belowEqual;
7689 case Assembler::aboveEqual: return Assembler::below;
7690 case Assembler::overflow: return Assembler::noOverflow;
7691 case Assembler::noOverflow: return Assembler::overflow;
7692 case Assembler::negative: return Assembler::positive;
7693 case Assembler::positive: return Assembler::negative;
7694 case Assembler::parity: return Assembler::noParity;
7695 case Assembler::noParity: return Assembler::parity;
7696 }
7697 ShouldNotReachHere(); return Assembler::overflow;
7698 }
7700 SkipIfEqual::SkipIfEqual(
7701 MacroAssembler* masm, const bool* flag_addr, bool value) {
7702 _masm = masm;
7703 _masm->cmp8(ExternalAddress((address)flag_addr), value);
7704 _masm->jcc(Assembler::equal, _label);
7705 }
7707 SkipIfEqual::~SkipIfEqual() {
7708 _masm->bind(_label);
7709 }