Tue, 31 Mar 2009 14:07:08 -0700
6761600: Use sse 4.2 in intrinsics
Summary: Use SSE 4.2 in intrinsics for String.{compareTo/equals/indexOf} and Arrays.equals.
Reviewed-by: kvn, never, jrose
1 /*
2 * Copyright 1997-2009 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_assembler_x86.cpp.incl"
28 // Implementation of AddressLiteral
30 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
31 _is_lval = false;
32 _target = target;
33 switch (rtype) {
34 case relocInfo::oop_type:
35 // Oops are a special case. Normally they would be their own section
36 // but in cases like icBuffer they are literals in the code stream that
37 // we don't have a section for. We use none so that we get a literal address
38 // which is always patchable.
39 break;
40 case relocInfo::external_word_type:
41 _rspec = external_word_Relocation::spec(target);
42 break;
43 case relocInfo::internal_word_type:
44 _rspec = internal_word_Relocation::spec(target);
45 break;
46 case relocInfo::opt_virtual_call_type:
47 _rspec = opt_virtual_call_Relocation::spec();
48 break;
49 case relocInfo::static_call_type:
50 _rspec = static_call_Relocation::spec();
51 break;
52 case relocInfo::runtime_call_type:
53 _rspec = runtime_call_Relocation::spec();
54 break;
55 case relocInfo::poll_type:
56 case relocInfo::poll_return_type:
57 _rspec = Relocation::spec_simple(rtype);
58 break;
59 case relocInfo::none:
60 break;
61 default:
62 ShouldNotReachHere();
63 break;
64 }
65 }
67 // Implementation of Address
69 #ifdef _LP64
71 Address Address::make_array(ArrayAddress adr) {
72 // Not implementable on 64bit machines
73 // Should have been handled higher up the call chain.
74 ShouldNotReachHere();
75 return Address();
76 }
78 // exceedingly dangerous constructor
79 Address::Address(int disp, address loc, relocInfo::relocType rtype) {
80 _base = noreg;
81 _index = noreg;
82 _scale = no_scale;
83 _disp = disp;
84 switch (rtype) {
85 case relocInfo::external_word_type:
86 _rspec = external_word_Relocation::spec(loc);
87 break;
88 case relocInfo::internal_word_type:
89 _rspec = internal_word_Relocation::spec(loc);
90 break;
91 case relocInfo::runtime_call_type:
92 // HMM
93 _rspec = runtime_call_Relocation::spec();
94 break;
95 case relocInfo::poll_type:
96 case relocInfo::poll_return_type:
97 _rspec = Relocation::spec_simple(rtype);
98 break;
99 case relocInfo::none:
100 break;
101 default:
102 ShouldNotReachHere();
103 }
104 }
105 #else // LP64
107 Address Address::make_array(ArrayAddress adr) {
108 AddressLiteral base = adr.base();
109 Address index = adr.index();
110 assert(index._disp == 0, "must not have disp"); // maybe it can?
111 Address array(index._base, index._index, index._scale, (intptr_t) base.target());
112 array._rspec = base._rspec;
113 return array;
114 }
116 // exceedingly dangerous constructor
117 Address::Address(address loc, RelocationHolder spec) {
118 _base = noreg;
119 _index = noreg;
120 _scale = no_scale;
121 _disp = (intptr_t) loc;
122 _rspec = spec;
123 }
125 #endif // _LP64
129 // Convert the raw encoding form into the form expected by the constructor for
130 // Address. An index of 4 (rsp) corresponds to having no index, so convert
131 // that to noreg for the Address constructor.
132 Address Address::make_raw(int base, int index, int scale, int disp, bool disp_is_oop) {
133 RelocationHolder rspec;
134 if (disp_is_oop) {
135 rspec = Relocation::spec_simple(relocInfo::oop_type);
136 }
137 bool valid_index = index != rsp->encoding();
138 if (valid_index) {
139 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
140 madr._rspec = rspec;
141 return madr;
142 } else {
143 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
144 madr._rspec = rspec;
145 return madr;
146 }
147 }
149 // Implementation of Assembler
151 int AbstractAssembler::code_fill_byte() {
152 return (u_char)'\xF4'; // hlt
153 }
155 // make this go away someday
156 void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
157 if (rtype == relocInfo::none)
158 emit_long(data);
159 else emit_data(data, Relocation::spec_simple(rtype), format);
160 }
162 void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
163 assert(imm_operand == 0, "default format must be immediate in this file");
164 assert(inst_mark() != NULL, "must be inside InstructionMark");
165 if (rspec.type() != relocInfo::none) {
166 #ifdef ASSERT
167 check_relocation(rspec, format);
168 #endif
169 // Do not use AbstractAssembler::relocate, which is not intended for
170 // embedded words. Instead, relocate to the enclosing instruction.
172 // hack. call32 is too wide for mask so use disp32
173 if (format == call32_operand)
174 code_section()->relocate(inst_mark(), rspec, disp32_operand);
175 else
176 code_section()->relocate(inst_mark(), rspec, format);
177 }
178 emit_long(data);
179 }
181 static int encode(Register r) {
182 int enc = r->encoding();
183 if (enc >= 8) {
184 enc -= 8;
185 }
186 return enc;
187 }
189 static int encode(XMMRegister r) {
190 int enc = r->encoding();
191 if (enc >= 8) {
192 enc -= 8;
193 }
194 return enc;
195 }
197 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
198 assert(dst->has_byte_register(), "must have byte register");
199 assert(isByte(op1) && isByte(op2), "wrong opcode");
200 assert(isByte(imm8), "not a byte");
201 assert((op1 & 0x01) == 0, "should be 8bit operation");
202 emit_byte(op1);
203 emit_byte(op2 | encode(dst));
204 emit_byte(imm8);
205 }
208 void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
209 assert(isByte(op1) && isByte(op2), "wrong opcode");
210 assert((op1 & 0x01) == 1, "should be 32bit operation");
211 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
212 if (is8bit(imm32)) {
213 emit_byte(op1 | 0x02); // set sign bit
214 emit_byte(op2 | encode(dst));
215 emit_byte(imm32 & 0xFF);
216 } else {
217 emit_byte(op1);
218 emit_byte(op2 | encode(dst));
219 emit_long(imm32);
220 }
221 }
223 // immediate-to-memory forms
224 void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
225 assert((op1 & 0x01) == 1, "should be 32bit operation");
226 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
227 if (is8bit(imm32)) {
228 emit_byte(op1 | 0x02); // set sign bit
229 emit_operand(rm, adr, 1);
230 emit_byte(imm32 & 0xFF);
231 } else {
232 emit_byte(op1);
233 emit_operand(rm, adr, 4);
234 emit_long(imm32);
235 }
236 }
238 void Assembler::emit_arith(int op1, int op2, Register dst, jobject obj) {
239 LP64_ONLY(ShouldNotReachHere());
240 assert(isByte(op1) && isByte(op2), "wrong opcode");
241 assert((op1 & 0x01) == 1, "should be 32bit operation");
242 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
243 InstructionMark im(this);
244 emit_byte(op1);
245 emit_byte(op2 | encode(dst));
246 emit_data((intptr_t)obj, relocInfo::oop_type, 0);
247 }
250 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
251 assert(isByte(op1) && isByte(op2), "wrong opcode");
252 emit_byte(op1);
253 emit_byte(op2 | encode(dst) << 3 | encode(src));
254 }
257 void Assembler::emit_operand(Register reg, Register base, Register index,
258 Address::ScaleFactor scale, int disp,
259 RelocationHolder const& rspec,
260 int rip_relative_correction) {
261 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
263 // Encode the registers as needed in the fields they are used in
265 int regenc = encode(reg) << 3;
266 int indexenc = index->is_valid() ? encode(index) << 3 : 0;
267 int baseenc = base->is_valid() ? encode(base) : 0;
269 if (base->is_valid()) {
270 if (index->is_valid()) {
271 assert(scale != Address::no_scale, "inconsistent address");
272 // [base + index*scale + disp]
273 if (disp == 0 && rtype == relocInfo::none &&
274 base != rbp LP64_ONLY(&& base != r13)) {
275 // [base + index*scale]
276 // [00 reg 100][ss index base]
277 assert(index != rsp, "illegal addressing mode");
278 emit_byte(0x04 | regenc);
279 emit_byte(scale << 6 | indexenc | baseenc);
280 } else if (is8bit(disp) && rtype == relocInfo::none) {
281 // [base + index*scale + imm8]
282 // [01 reg 100][ss index base] imm8
283 assert(index != rsp, "illegal addressing mode");
284 emit_byte(0x44 | regenc);
285 emit_byte(scale << 6 | indexenc | baseenc);
286 emit_byte(disp & 0xFF);
287 } else {
288 // [base + index*scale + disp32]
289 // [10 reg 100][ss index base] disp32
290 assert(index != rsp, "illegal addressing mode");
291 emit_byte(0x84 | regenc);
292 emit_byte(scale << 6 | indexenc | baseenc);
293 emit_data(disp, rspec, disp32_operand);
294 }
295 } else if (base == rsp LP64_ONLY(|| base == r12)) {
296 // [rsp + disp]
297 if (disp == 0 && rtype == relocInfo::none) {
298 // [rsp]
299 // [00 reg 100][00 100 100]
300 emit_byte(0x04 | regenc);
301 emit_byte(0x24);
302 } else if (is8bit(disp) && rtype == relocInfo::none) {
303 // [rsp + imm8]
304 // [01 reg 100][00 100 100] disp8
305 emit_byte(0x44 | regenc);
306 emit_byte(0x24);
307 emit_byte(disp & 0xFF);
308 } else {
309 // [rsp + imm32]
310 // [10 reg 100][00 100 100] disp32
311 emit_byte(0x84 | regenc);
312 emit_byte(0x24);
313 emit_data(disp, rspec, disp32_operand);
314 }
315 } else {
316 // [base + disp]
317 assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode");
318 if (disp == 0 && rtype == relocInfo::none &&
319 base != rbp LP64_ONLY(&& base != r13)) {
320 // [base]
321 // [00 reg base]
322 emit_byte(0x00 | regenc | baseenc);
323 } else if (is8bit(disp) && rtype == relocInfo::none) {
324 // [base + disp8]
325 // [01 reg base] disp8
326 emit_byte(0x40 | regenc | baseenc);
327 emit_byte(disp & 0xFF);
328 } else {
329 // [base + disp32]
330 // [10 reg base] disp32
331 emit_byte(0x80 | regenc | baseenc);
332 emit_data(disp, rspec, disp32_operand);
333 }
334 }
335 } else {
336 if (index->is_valid()) {
337 assert(scale != Address::no_scale, "inconsistent address");
338 // [index*scale + disp]
339 // [00 reg 100][ss index 101] disp32
340 assert(index != rsp, "illegal addressing mode");
341 emit_byte(0x04 | regenc);
342 emit_byte(scale << 6 | indexenc | 0x05);
343 emit_data(disp, rspec, disp32_operand);
344 } else if (rtype != relocInfo::none ) {
345 // [disp] (64bit) RIP-RELATIVE (32bit) abs
346 // [00 000 101] disp32
348 emit_byte(0x05 | regenc);
349 // Note that the RIP-rel. correction applies to the generated
350 // disp field, but _not_ to the target address in the rspec.
352 // disp was created by converting the target address minus the pc
353 // at the start of the instruction. That needs more correction here.
354 // intptr_t disp = target - next_ip;
355 assert(inst_mark() != NULL, "must be inside InstructionMark");
356 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
357 int64_t adjusted = disp;
358 // Do rip-rel adjustment for 64bit
359 LP64_ONLY(adjusted -= (next_ip - inst_mark()));
360 assert(is_simm32(adjusted),
361 "must be 32bit offset (RIP relative address)");
362 emit_data((int32_t) adjusted, rspec, disp32_operand);
364 } else {
365 // 32bit never did this, did everything as the rip-rel/disp code above
366 // [disp] ABSOLUTE
367 // [00 reg 100][00 100 101] disp32
368 emit_byte(0x04 | regenc);
369 emit_byte(0x25);
370 emit_data(disp, rspec, disp32_operand);
371 }
372 }
373 }
375 void Assembler::emit_operand(XMMRegister reg, Register base, Register index,
376 Address::ScaleFactor scale, int disp,
377 RelocationHolder const& rspec) {
378 emit_operand((Register)reg, base, index, scale, disp, rspec);
379 }
381 // Secret local extension to Assembler::WhichOperand:
382 #define end_pc_operand (_WhichOperand_limit)
384 address Assembler::locate_operand(address inst, WhichOperand which) {
385 // Decode the given instruction, and return the address of
386 // an embedded 32-bit operand word.
388 // If "which" is disp32_operand, selects the displacement portion
389 // of an effective address specifier.
390 // If "which" is imm64_operand, selects the trailing immediate constant.
391 // If "which" is call32_operand, selects the displacement of a call or jump.
392 // Caller is responsible for ensuring that there is such an operand,
393 // and that it is 32/64 bits wide.
395 // If "which" is end_pc_operand, find the end of the instruction.
397 address ip = inst;
398 bool is_64bit = false;
400 debug_only(bool has_disp32 = false);
401 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
403 again_after_prefix:
404 switch (0xFF & *ip++) {
406 // These convenience macros generate groups of "case" labels for the switch.
407 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
408 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
409 case (x)+4: case (x)+5: case (x)+6: case (x)+7
410 #define REP16(x) REP8((x)+0): \
411 case REP8((x)+8)
413 case CS_segment:
414 case SS_segment:
415 case DS_segment:
416 case ES_segment:
417 case FS_segment:
418 case GS_segment:
419 // Seems dubious
420 LP64_ONLY(assert(false, "shouldn't have that prefix"));
421 assert(ip == inst+1, "only one prefix allowed");
422 goto again_after_prefix;
424 case 0x67:
425 case REX:
426 case REX_B:
427 case REX_X:
428 case REX_XB:
429 case REX_R:
430 case REX_RB:
431 case REX_RX:
432 case REX_RXB:
433 NOT_LP64(assert(false, "64bit prefixes"));
434 goto again_after_prefix;
436 case REX_W:
437 case REX_WB:
438 case REX_WX:
439 case REX_WXB:
440 case REX_WR:
441 case REX_WRB:
442 case REX_WRX:
443 case REX_WRXB:
444 NOT_LP64(assert(false, "64bit prefixes"));
445 is_64bit = true;
446 goto again_after_prefix;
448 case 0xFF: // pushq a; decl a; incl a; call a; jmp a
449 case 0x88: // movb a, r
450 case 0x89: // movl a, r
451 case 0x8A: // movb r, a
452 case 0x8B: // movl r, a
453 case 0x8F: // popl a
454 debug_only(has_disp32 = true);
455 break;
457 case 0x68: // pushq #32
458 if (which == end_pc_operand) {
459 return ip + 4;
460 }
461 assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate");
462 return ip; // not produced by emit_operand
464 case 0x66: // movw ... (size prefix)
465 again_after_size_prefix2:
466 switch (0xFF & *ip++) {
467 case REX:
468 case REX_B:
469 case REX_X:
470 case REX_XB:
471 case REX_R:
472 case REX_RB:
473 case REX_RX:
474 case REX_RXB:
475 case REX_W:
476 case REX_WB:
477 case REX_WX:
478 case REX_WXB:
479 case REX_WR:
480 case REX_WRB:
481 case REX_WRX:
482 case REX_WRXB:
483 NOT_LP64(assert(false, "64bit prefix found"));
484 goto again_after_size_prefix2;
485 case 0x8B: // movw r, a
486 case 0x89: // movw a, r
487 debug_only(has_disp32 = true);
488 break;
489 case 0xC7: // movw a, #16
490 debug_only(has_disp32 = true);
491 tail_size = 2; // the imm16
492 break;
493 case 0x0F: // several SSE/SSE2 variants
494 ip--; // reparse the 0x0F
495 goto again_after_prefix;
496 default:
497 ShouldNotReachHere();
498 }
499 break;
501 case REP8(0xB8): // movl/q r, #32/#64(oop?)
502 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4);
503 // these asserts are somewhat nonsensical
504 #ifndef _LP64
505 assert(which == imm_operand || which == disp32_operand, "");
506 #else
507 assert((which == call32_operand || which == imm_operand) && is_64bit ||
508 which == narrow_oop_operand && !is_64bit, "");
509 #endif // _LP64
510 return ip;
512 case 0x69: // imul r, a, #32
513 case 0xC7: // movl a, #32(oop?)
514 tail_size = 4;
515 debug_only(has_disp32 = true); // has both kinds of operands!
516 break;
518 case 0x0F: // movx..., etc.
519 switch (0xFF & *ip++) {
520 case 0x12: // movlps
521 case 0x28: // movaps
522 case 0x2E: // ucomiss
523 case 0x2F: // comiss
524 case 0x54: // andps
525 case 0x55: // andnps
526 case 0x56: // orps
527 case 0x57: // xorps
528 case 0x6E: // movd
529 case 0x7E: // movd
530 case 0xAE: // ldmxcsr a
531 // 64bit side says it these have both operands but that doesn't
532 // appear to be true
533 debug_only(has_disp32 = true);
534 break;
536 case 0xAD: // shrd r, a, %cl
537 case 0xAF: // imul r, a
538 case 0xBE: // movsbl r, a (movsxb)
539 case 0xBF: // movswl r, a (movsxw)
540 case 0xB6: // movzbl r, a (movzxb)
541 case 0xB7: // movzwl r, a (movzxw)
542 case REP16(0x40): // cmovl cc, r, a
543 case 0xB0: // cmpxchgb
544 case 0xB1: // cmpxchg
545 case 0xC1: // xaddl
546 case 0xC7: // cmpxchg8
547 case REP16(0x90): // setcc a
548 debug_only(has_disp32 = true);
549 // fall out of the switch to decode the address
550 break;
552 case 0xAC: // shrd r, a, #8
553 debug_only(has_disp32 = true);
554 tail_size = 1; // the imm8
555 break;
557 case REP16(0x80): // jcc rdisp32
558 if (which == end_pc_operand) return ip + 4;
559 assert(which == call32_operand, "jcc has no disp32 or imm");
560 return ip;
561 default:
562 ShouldNotReachHere();
563 }
564 break;
566 case 0x81: // addl a, #32; addl r, #32
567 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
568 // on 32bit in the case of cmpl, the imm might be an oop
569 tail_size = 4;
570 debug_only(has_disp32 = true); // has both kinds of operands!
571 break;
573 case 0x83: // addl a, #8; addl r, #8
574 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
575 debug_only(has_disp32 = true); // has both kinds of operands!
576 tail_size = 1;
577 break;
579 case 0x9B:
580 switch (0xFF & *ip++) {
581 case 0xD9: // fnstcw a
582 debug_only(has_disp32 = true);
583 break;
584 default:
585 ShouldNotReachHere();
586 }
587 break;
589 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
590 case REP4(0x10): // adc...
591 case REP4(0x20): // and...
592 case REP4(0x30): // xor...
593 case REP4(0x08): // or...
594 case REP4(0x18): // sbb...
595 case REP4(0x28): // sub...
596 case 0xF7: // mull a
597 case 0x8D: // lea r, a
598 case 0x87: // xchg r, a
599 case REP4(0x38): // cmp...
600 case 0x85: // test r, a
601 debug_only(has_disp32 = true); // has both kinds of operands!
602 break;
604 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
605 case 0xC6: // movb a, #8
606 case 0x80: // cmpb a, #8
607 case 0x6B: // imul r, a, #8
608 debug_only(has_disp32 = true); // has both kinds of operands!
609 tail_size = 1; // the imm8
610 break;
612 case 0xE8: // call rdisp32
613 case 0xE9: // jmp rdisp32
614 if (which == end_pc_operand) return ip + 4;
615 assert(which == call32_operand, "call has no disp32 or imm");
616 return ip;
618 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
619 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
620 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
621 case 0xDD: // fld_d a; fst_d a; fstp_d a
622 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
623 case 0xDF: // fild_d a; fistp_d a
624 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
625 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
626 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
627 debug_only(has_disp32 = true);
628 break;
630 case 0xF0: // Lock
631 assert(os::is_MP(), "only on MP");
632 goto again_after_prefix;
634 case 0xF3: // For SSE
635 case 0xF2: // For SSE2
636 switch (0xFF & *ip++) {
637 case REX:
638 case REX_B:
639 case REX_X:
640 case REX_XB:
641 case REX_R:
642 case REX_RB:
643 case REX_RX:
644 case REX_RXB:
645 case REX_W:
646 case REX_WB:
647 case REX_WX:
648 case REX_WXB:
649 case REX_WR:
650 case REX_WRB:
651 case REX_WRX:
652 case REX_WRXB:
653 NOT_LP64(assert(false, "found 64bit prefix"));
654 ip++;
655 default:
656 ip++;
657 }
658 debug_only(has_disp32 = true); // has both kinds of operands!
659 break;
661 default:
662 ShouldNotReachHere();
664 #undef REP8
665 #undef REP16
666 }
668 assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
669 #ifdef _LP64
670 assert(which != imm_operand, "instruction is not a movq reg, imm64");
671 #else
672 // assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
673 assert(which != imm_operand || has_disp32, "instruction has no imm32 field");
674 #endif // LP64
675 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");
677 // parse the output of emit_operand
678 int op2 = 0xFF & *ip++;
679 int base = op2 & 0x07;
680 int op3 = -1;
681 const int b100 = 4;
682 const int b101 = 5;
683 if (base == b100 && (op2 >> 6) != 3) {
684 op3 = 0xFF & *ip++;
685 base = op3 & 0x07; // refetch the base
686 }
687 // now ip points at the disp (if any)
689 switch (op2 >> 6) {
690 case 0:
691 // [00 reg 100][ss index base]
692 // [00 reg 100][00 100 esp]
693 // [00 reg base]
694 // [00 reg 100][ss index 101][disp32]
695 // [00 reg 101] [disp32]
697 if (base == b101) {
698 if (which == disp32_operand)
699 return ip; // caller wants the disp32
700 ip += 4; // skip the disp32
701 }
702 break;
704 case 1:
705 // [01 reg 100][ss index base][disp8]
706 // [01 reg 100][00 100 esp][disp8]
707 // [01 reg base] [disp8]
708 ip += 1; // skip the disp8
709 break;
711 case 2:
712 // [10 reg 100][ss index base][disp32]
713 // [10 reg 100][00 100 esp][disp32]
714 // [10 reg base] [disp32]
715 if (which == disp32_operand)
716 return ip; // caller wants the disp32
717 ip += 4; // skip the disp32
718 break;
720 case 3:
721 // [11 reg base] (not a memory addressing mode)
722 break;
723 }
725 if (which == end_pc_operand) {
726 return ip + tail_size;
727 }
729 #ifdef _LP64
730 assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32");
731 #else
732 assert(which == imm_operand, "instruction has only an imm field");
733 #endif // LP64
734 return ip;
735 }
737 address Assembler::locate_next_instruction(address inst) {
738 // Secretly share code with locate_operand:
739 return locate_operand(inst, end_pc_operand);
740 }
743 #ifdef ASSERT
744 void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
745 address inst = inst_mark();
746 assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
747 address opnd;
749 Relocation* r = rspec.reloc();
750 if (r->type() == relocInfo::none) {
751 return;
752 } else if (r->is_call() || format == call32_operand) {
753 // assert(format == imm32_operand, "cannot specify a nonzero format");
754 opnd = locate_operand(inst, call32_operand);
755 } else if (r->is_data()) {
756 assert(format == imm_operand || format == disp32_operand
757 LP64_ONLY(|| format == narrow_oop_operand), "format ok");
758 opnd = locate_operand(inst, (WhichOperand)format);
759 } else {
760 assert(format == imm_operand, "cannot specify a format");
761 return;
762 }
763 assert(opnd == pc(), "must put operand where relocs can find it");
764 }
765 #endif // ASSERT
767 void Assembler::emit_operand32(Register reg, Address adr) {
768 assert(reg->encoding() < 8, "no extended registers");
769 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
770 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
771 adr._rspec);
772 }
774 void Assembler::emit_operand(Register reg, Address adr,
775 int rip_relative_correction) {
776 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
777 adr._rspec,
778 rip_relative_correction);
779 }
781 void Assembler::emit_operand(XMMRegister reg, Address adr) {
782 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
783 adr._rspec);
784 }
786 // MMX operations
787 void Assembler::emit_operand(MMXRegister reg, Address adr) {
788 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
789 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
790 }
792 // work around gcc (3.2.1-7a) bug
793 void Assembler::emit_operand(Address adr, MMXRegister reg) {
794 assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
795 emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
796 }
799 void Assembler::emit_farith(int b1, int b2, int i) {
800 assert(isByte(b1) && isByte(b2), "wrong opcode");
801 assert(0 <= i && i < 8, "illegal stack offset");
802 emit_byte(b1);
803 emit_byte(b2 + i);
804 }
807 // Now the Assembler instruction (identical for 32/64 bits)
809 void Assembler::adcl(Register dst, int32_t imm32) {
810 prefix(dst);
811 emit_arith(0x81, 0xD0, dst, imm32);
812 }
814 void Assembler::adcl(Register dst, Address src) {
815 InstructionMark im(this);
816 prefix(src, dst);
817 emit_byte(0x13);
818 emit_operand(dst, src);
819 }
821 void Assembler::adcl(Register dst, Register src) {
822 (void) prefix_and_encode(dst->encoding(), src->encoding());
823 emit_arith(0x13, 0xC0, dst, src);
824 }
826 void Assembler::addl(Address dst, int32_t imm32) {
827 InstructionMark im(this);
828 prefix(dst);
829 emit_arith_operand(0x81, rax, dst, imm32);
830 }
832 void Assembler::addl(Address dst, Register src) {
833 InstructionMark im(this);
834 prefix(dst, src);
835 emit_byte(0x01);
836 emit_operand(src, dst);
837 }
839 void Assembler::addl(Register dst, int32_t imm32) {
840 prefix(dst);
841 emit_arith(0x81, 0xC0, dst, imm32);
842 }
844 void Assembler::addl(Register dst, Address src) {
845 InstructionMark im(this);
846 prefix(src, dst);
847 emit_byte(0x03);
848 emit_operand(dst, src);
849 }
851 void Assembler::addl(Register dst, Register src) {
852 (void) prefix_and_encode(dst->encoding(), src->encoding());
853 emit_arith(0x03, 0xC0, dst, src);
854 }
856 void Assembler::addr_nop_4() {
857 // 4 bytes: NOP DWORD PTR [EAX+0]
858 emit_byte(0x0F);
859 emit_byte(0x1F);
860 emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
861 emit_byte(0); // 8-bits offset (1 byte)
862 }
864 void Assembler::addr_nop_5() {
865 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
866 emit_byte(0x0F);
867 emit_byte(0x1F);
868 emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
869 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
870 emit_byte(0); // 8-bits offset (1 byte)
871 }
873 void Assembler::addr_nop_7() {
874 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
875 emit_byte(0x0F);
876 emit_byte(0x1F);
877 emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
878 emit_long(0); // 32-bits offset (4 bytes)
879 }
881 void Assembler::addr_nop_8() {
882 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
883 emit_byte(0x0F);
884 emit_byte(0x1F);
885 emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
886 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
887 emit_long(0); // 32-bits offset (4 bytes)
888 }
890 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
891 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
892 emit_byte(0xF2);
893 int encode = prefix_and_encode(dst->encoding(), src->encoding());
894 emit_byte(0x0F);
895 emit_byte(0x58);
896 emit_byte(0xC0 | encode);
897 }
899 void Assembler::addsd(XMMRegister dst, Address src) {
900 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
901 InstructionMark im(this);
902 emit_byte(0xF2);
903 prefix(src, dst);
904 emit_byte(0x0F);
905 emit_byte(0x58);
906 emit_operand(dst, src);
907 }
909 void Assembler::addss(XMMRegister dst, XMMRegister src) {
910 NOT_LP64(assert(VM_Version::supports_sse(), ""));
911 emit_byte(0xF3);
912 int encode = prefix_and_encode(dst->encoding(), src->encoding());
913 emit_byte(0x0F);
914 emit_byte(0x58);
915 emit_byte(0xC0 | encode);
916 }
918 void Assembler::addss(XMMRegister dst, Address src) {
919 NOT_LP64(assert(VM_Version::supports_sse(), ""));
920 InstructionMark im(this);
921 emit_byte(0xF3);
922 prefix(src, dst);
923 emit_byte(0x0F);
924 emit_byte(0x58);
925 emit_operand(dst, src);
926 }
928 void Assembler::andl(Register dst, int32_t imm32) {
929 prefix(dst);
930 emit_arith(0x81, 0xE0, dst, imm32);
931 }
933 void Assembler::andl(Register dst, Address src) {
934 InstructionMark im(this);
935 prefix(src, dst);
936 emit_byte(0x23);
937 emit_operand(dst, src);
938 }
940 void Assembler::andl(Register dst, Register src) {
941 (void) prefix_and_encode(dst->encoding(), src->encoding());
942 emit_arith(0x23, 0xC0, dst, src);
943 }
945 void Assembler::andpd(XMMRegister dst, Address src) {
946 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
947 InstructionMark im(this);
948 emit_byte(0x66);
949 prefix(src, dst);
950 emit_byte(0x0F);
951 emit_byte(0x54);
952 emit_operand(dst, src);
953 }
955 void Assembler::bswapl(Register reg) { // bswap
956 int encode = prefix_and_encode(reg->encoding());
957 emit_byte(0x0F);
958 emit_byte(0xC8 | encode);
959 }
961 void Assembler::call(Label& L, relocInfo::relocType rtype) {
962 // suspect disp32 is always good
963 int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
965 if (L.is_bound()) {
966 const int long_size = 5;
967 int offs = (int)( target(L) - pc() );
968 assert(offs <= 0, "assembler error");
969 InstructionMark im(this);
970 // 1110 1000 #32-bit disp
971 emit_byte(0xE8);
972 emit_data(offs - long_size, rtype, operand);
973 } else {
974 InstructionMark im(this);
975 // 1110 1000 #32-bit disp
976 L.add_patch_at(code(), locator());
978 emit_byte(0xE8);
979 emit_data(int(0), rtype, operand);
980 }
981 }
983 void Assembler::call(Register dst) {
984 // This was originally using a 32bit register encoding
985 // and surely we want 64bit!
986 // this is a 32bit encoding but in 64bit mode the default
987 // operand size is 64bit so there is no need for the
988 // wide prefix. So prefix only happens if we use the
989 // new registers. Much like push/pop.
990 int x = offset();
991 // this may be true but dbx disassembles it as if it
992 // were 32bits...
993 // int encode = prefix_and_encode(dst->encoding());
994 // if (offset() != x) assert(dst->encoding() >= 8, "what?");
995 int encode = prefixq_and_encode(dst->encoding());
997 emit_byte(0xFF);
998 emit_byte(0xD0 | encode);
999 }
1002 void Assembler::call(Address adr) {
1003 InstructionMark im(this);
1004 prefix(adr);
1005 emit_byte(0xFF);
1006 emit_operand(rdx, adr);
1007 }
1009 void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
1010 assert(entry != NULL, "call most probably wrong");
1011 InstructionMark im(this);
1012 emit_byte(0xE8);
1013 intptr_t disp = entry - (_code_pos + sizeof(int32_t));
1014 assert(is_simm32(disp), "must be 32bit offset (call2)");
1015 // Technically, should use call32_operand, but this format is
1016 // implied by the fact that we're emitting a call instruction.
1018 int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
1019 emit_data((int) disp, rspec, operand);
1020 }
1022 void Assembler::cdql() {
1023 emit_byte(0x99);
1024 }
1026 void Assembler::cmovl(Condition cc, Register dst, Register src) {
1027 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
1028 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1029 emit_byte(0x0F);
1030 emit_byte(0x40 | cc);
1031 emit_byte(0xC0 | encode);
1032 }
1035 void Assembler::cmovl(Condition cc, Register dst, Address src) {
1036 NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
1037 prefix(src, dst);
1038 emit_byte(0x0F);
1039 emit_byte(0x40 | cc);
1040 emit_operand(dst, src);
1041 }
1043 void Assembler::cmpb(Address dst, int imm8) {
1044 InstructionMark im(this);
1045 prefix(dst);
1046 emit_byte(0x80);
1047 emit_operand(rdi, dst, 1);
1048 emit_byte(imm8);
1049 }
1051 void Assembler::cmpl(Address dst, int32_t imm32) {
1052 InstructionMark im(this);
1053 prefix(dst);
1054 emit_byte(0x81);
1055 emit_operand(rdi, dst, 4);
1056 emit_long(imm32);
1057 }
1059 void Assembler::cmpl(Register dst, int32_t imm32) {
1060 prefix(dst);
1061 emit_arith(0x81, 0xF8, dst, imm32);
1062 }
1064 void Assembler::cmpl(Register dst, Register src) {
1065 (void) prefix_and_encode(dst->encoding(), src->encoding());
1066 emit_arith(0x3B, 0xC0, dst, src);
1067 }
1070 void Assembler::cmpl(Register dst, Address src) {
1071 InstructionMark im(this);
1072 prefix(src, dst);
1073 emit_byte(0x3B);
1074 emit_operand(dst, src);
1075 }
1077 void Assembler::cmpw(Address dst, int imm16) {
1078 InstructionMark im(this);
1079 assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers");
1080 emit_byte(0x66);
1081 emit_byte(0x81);
1082 emit_operand(rdi, dst, 2);
1083 emit_word(imm16);
1084 }
1086 // The 32-bit cmpxchg compares the value at adr with the contents of rax,
1087 // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
1088 // The ZF is set if the compared values were equal, and cleared otherwise.
1089 void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
1090 if (Atomics & 2) {
1091 // caveat: no instructionmark, so this isn't relocatable.
1092 // Emit a synthetic, non-atomic, CAS equivalent.
1093 // Beware. The synthetic form sets all ICCs, not just ZF.
1094 // cmpxchg r,[m] is equivalent to rax, = CAS (m, rax, r)
1095 cmpl(rax, adr);
1096 movl(rax, adr);
1097 if (reg != rax) {
1098 Label L ;
1099 jcc(Assembler::notEqual, L);
1100 movl(adr, reg);
1101 bind(L);
1102 }
1103 } else {
1104 InstructionMark im(this);
1105 prefix(adr, reg);
1106 emit_byte(0x0F);
1107 emit_byte(0xB1);
1108 emit_operand(reg, adr);
1109 }
1110 }
1112 void Assembler::comisd(XMMRegister dst, Address src) {
1113 // NOTE: dbx seems to decode this as comiss even though the
1114 // 0x66 is there. Strangly ucomisd comes out correct
1115 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1116 emit_byte(0x66);
1117 comiss(dst, src);
1118 }
1120 void Assembler::comiss(XMMRegister dst, Address src) {
1121 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1123 InstructionMark im(this);
1124 prefix(src, dst);
1125 emit_byte(0x0F);
1126 emit_byte(0x2F);
1127 emit_operand(dst, src);
1128 }
1130 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
1131 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1132 emit_byte(0xF3);
1133 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1134 emit_byte(0x0F);
1135 emit_byte(0xE6);
1136 emit_byte(0xC0 | encode);
1137 }
1139 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
1140 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1141 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1142 emit_byte(0x0F);
1143 emit_byte(0x5B);
1144 emit_byte(0xC0 | encode);
1145 }
1147 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
1148 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1149 emit_byte(0xF2);
1150 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1151 emit_byte(0x0F);
1152 emit_byte(0x5A);
1153 emit_byte(0xC0 | encode);
1154 }
1156 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
1157 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1158 emit_byte(0xF2);
1159 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1160 emit_byte(0x0F);
1161 emit_byte(0x2A);
1162 emit_byte(0xC0 | encode);
1163 }
1165 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
1166 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1167 emit_byte(0xF3);
1168 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1169 emit_byte(0x0F);
1170 emit_byte(0x2A);
1171 emit_byte(0xC0 | encode);
1172 }
1174 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
1175 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1176 emit_byte(0xF3);
1177 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1178 emit_byte(0x0F);
1179 emit_byte(0x5A);
1180 emit_byte(0xC0 | encode);
1181 }
1183 void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
1184 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1185 emit_byte(0xF2);
1186 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1187 emit_byte(0x0F);
1188 emit_byte(0x2C);
1189 emit_byte(0xC0 | encode);
1190 }
1192 void Assembler::cvttss2sil(Register dst, XMMRegister src) {
1193 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1194 emit_byte(0xF3);
1195 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1196 emit_byte(0x0F);
1197 emit_byte(0x2C);
1198 emit_byte(0xC0 | encode);
1199 }
1201 void Assembler::decl(Address dst) {
1202 // Don't use it directly. Use MacroAssembler::decrement() instead.
1203 InstructionMark im(this);
1204 prefix(dst);
1205 emit_byte(0xFF);
1206 emit_operand(rcx, dst);
1207 }
1209 void Assembler::divsd(XMMRegister dst, Address src) {
1210 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1211 InstructionMark im(this);
1212 emit_byte(0xF2);
1213 prefix(src, dst);
1214 emit_byte(0x0F);
1215 emit_byte(0x5E);
1216 emit_operand(dst, src);
1217 }
1219 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
1220 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1221 emit_byte(0xF2);
1222 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1223 emit_byte(0x0F);
1224 emit_byte(0x5E);
1225 emit_byte(0xC0 | encode);
1226 }
1228 void Assembler::divss(XMMRegister dst, Address src) {
1229 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1230 InstructionMark im(this);
1231 emit_byte(0xF3);
1232 prefix(src, dst);
1233 emit_byte(0x0F);
1234 emit_byte(0x5E);
1235 emit_operand(dst, src);
1236 }
1238 void Assembler::divss(XMMRegister dst, XMMRegister src) {
1239 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1240 emit_byte(0xF3);
1241 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1242 emit_byte(0x0F);
1243 emit_byte(0x5E);
1244 emit_byte(0xC0 | encode);
1245 }
1247 void Assembler::emms() {
1248 NOT_LP64(assert(VM_Version::supports_mmx(), ""));
1249 emit_byte(0x0F);
1250 emit_byte(0x77);
1251 }
1253 void Assembler::hlt() {
1254 emit_byte(0xF4);
1255 }
1257 void Assembler::idivl(Register src) {
1258 int encode = prefix_and_encode(src->encoding());
1259 emit_byte(0xF7);
1260 emit_byte(0xF8 | encode);
1261 }
1263 void Assembler::imull(Register dst, Register src) {
1264 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1265 emit_byte(0x0F);
1266 emit_byte(0xAF);
1267 emit_byte(0xC0 | encode);
1268 }
1271 void Assembler::imull(Register dst, Register src, int value) {
1272 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1273 if (is8bit(value)) {
1274 emit_byte(0x6B);
1275 emit_byte(0xC0 | encode);
1276 emit_byte(value);
1277 } else {
1278 emit_byte(0x69);
1279 emit_byte(0xC0 | encode);
1280 emit_long(value);
1281 }
1282 }
1284 void Assembler::incl(Address dst) {
1285 // Don't use it directly. Use MacroAssembler::increment() instead.
1286 InstructionMark im(this);
1287 prefix(dst);
1288 emit_byte(0xFF);
1289 emit_operand(rax, dst);
1290 }
1292 void Assembler::jcc(Condition cc, Label& L, relocInfo::relocType rtype) {
1293 InstructionMark im(this);
1294 relocate(rtype);
1295 assert((0 <= cc) && (cc < 16), "illegal cc");
1296 if (L.is_bound()) {
1297 address dst = target(L);
1298 assert(dst != NULL, "jcc most probably wrong");
1300 const int short_size = 2;
1301 const int long_size = 6;
1302 intptr_t offs = (intptr_t)dst - (intptr_t)_code_pos;
1303 if (rtype == relocInfo::none && is8bit(offs - short_size)) {
1304 // 0111 tttn #8-bit disp
1305 emit_byte(0x70 | cc);
1306 emit_byte((offs - short_size) & 0xFF);
1307 } else {
1308 // 0000 1111 1000 tttn #32-bit disp
1309 assert(is_simm32(offs - long_size),
1310 "must be 32bit offset (call4)");
1311 emit_byte(0x0F);
1312 emit_byte(0x80 | cc);
1313 emit_long(offs - long_size);
1314 }
1315 } else {
1316 // Note: could eliminate cond. jumps to this jump if condition
1317 // is the same however, seems to be rather unlikely case.
1318 // Note: use jccb() if label to be bound is very close to get
1319 // an 8-bit displacement
1320 L.add_patch_at(code(), locator());
1321 emit_byte(0x0F);
1322 emit_byte(0x80 | cc);
1323 emit_long(0);
1324 }
1325 }
1327 void Assembler::jccb(Condition cc, Label& L) {
1328 if (L.is_bound()) {
1329 const int short_size = 2;
1330 address entry = target(L);
1331 assert(is8bit((intptr_t)entry - ((intptr_t)_code_pos + short_size)),
1332 "Dispacement too large for a short jmp");
1333 intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos;
1334 // 0111 tttn #8-bit disp
1335 emit_byte(0x70 | cc);
1336 emit_byte((offs - short_size) & 0xFF);
1337 } else {
1338 InstructionMark im(this);
1339 L.add_patch_at(code(), locator());
1340 emit_byte(0x70 | cc);
1341 emit_byte(0);
1342 }
1343 }
1345 void Assembler::jmp(Address adr) {
1346 InstructionMark im(this);
1347 prefix(adr);
1348 emit_byte(0xFF);
1349 emit_operand(rsp, adr);
1350 }
1352 void Assembler::jmp(Label& L, relocInfo::relocType rtype) {
1353 if (L.is_bound()) {
1354 address entry = target(L);
1355 assert(entry != NULL, "jmp most probably wrong");
1356 InstructionMark im(this);
1357 const int short_size = 2;
1358 const int long_size = 5;
1359 intptr_t offs = entry - _code_pos;
1360 if (rtype == relocInfo::none && is8bit(offs - short_size)) {
1361 emit_byte(0xEB);
1362 emit_byte((offs - short_size) & 0xFF);
1363 } else {
1364 emit_byte(0xE9);
1365 emit_long(offs - long_size);
1366 }
1367 } else {
1368 // By default, forward jumps are always 32-bit displacements, since
1369 // we can't yet know where the label will be bound. If you're sure that
1370 // the forward jump will not run beyond 256 bytes, use jmpb to
1371 // force an 8-bit displacement.
1372 InstructionMark im(this);
1373 relocate(rtype);
1374 L.add_patch_at(code(), locator());
1375 emit_byte(0xE9);
1376 emit_long(0);
1377 }
1378 }
1380 void Assembler::jmp(Register entry) {
1381 int encode = prefix_and_encode(entry->encoding());
1382 emit_byte(0xFF);
1383 emit_byte(0xE0 | encode);
1384 }
1386 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
1387 InstructionMark im(this);
1388 emit_byte(0xE9);
1389 assert(dest != NULL, "must have a target");
1390 intptr_t disp = dest - (_code_pos + sizeof(int32_t));
1391 assert(is_simm32(disp), "must be 32bit offset (jmp)");
1392 emit_data(disp, rspec.reloc(), call32_operand);
1393 }
1395 void Assembler::jmpb(Label& L) {
1396 if (L.is_bound()) {
1397 const int short_size = 2;
1398 address entry = target(L);
1399 assert(is8bit((entry - _code_pos) + short_size),
1400 "Dispacement too large for a short jmp");
1401 assert(entry != NULL, "jmp most probably wrong");
1402 intptr_t offs = entry - _code_pos;
1403 emit_byte(0xEB);
1404 emit_byte((offs - short_size) & 0xFF);
1405 } else {
1406 InstructionMark im(this);
1407 L.add_patch_at(code(), locator());
1408 emit_byte(0xEB);
1409 emit_byte(0);
1410 }
1411 }
1413 void Assembler::ldmxcsr( Address src) {
1414 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1415 InstructionMark im(this);
1416 prefix(src);
1417 emit_byte(0x0F);
1418 emit_byte(0xAE);
1419 emit_operand(as_Register(2), src);
1420 }
1422 void Assembler::leal(Register dst, Address src) {
1423 InstructionMark im(this);
1424 #ifdef _LP64
1425 emit_byte(0x67); // addr32
1426 prefix(src, dst);
1427 #endif // LP64
1428 emit_byte(0x8D);
1429 emit_operand(dst, src);
1430 }
1432 void Assembler::lock() {
1433 if (Atomics & 1) {
1434 // Emit either nothing, a NOP, or a NOP: prefix
1435 emit_byte(0x90) ;
1436 } else {
1437 emit_byte(0xF0);
1438 }
1439 }
1441 // Emit mfence instruction
1442 void Assembler::mfence() {
1443 NOT_LP64(assert(VM_Version::supports_sse2(), "unsupported");)
1444 emit_byte( 0x0F );
1445 emit_byte( 0xAE );
1446 emit_byte( 0xF0 );
1447 }
1449 void Assembler::mov(Register dst, Register src) {
1450 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
1451 }
1453 void Assembler::movapd(XMMRegister dst, XMMRegister src) {
1454 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1455 int dstenc = dst->encoding();
1456 int srcenc = src->encoding();
1457 emit_byte(0x66);
1458 if (dstenc < 8) {
1459 if (srcenc >= 8) {
1460 prefix(REX_B);
1461 srcenc -= 8;
1462 }
1463 } else {
1464 if (srcenc < 8) {
1465 prefix(REX_R);
1466 } else {
1467 prefix(REX_RB);
1468 srcenc -= 8;
1469 }
1470 dstenc -= 8;
1471 }
1472 emit_byte(0x0F);
1473 emit_byte(0x28);
1474 emit_byte(0xC0 | dstenc << 3 | srcenc);
1475 }
1477 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
1478 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1479 int dstenc = dst->encoding();
1480 int srcenc = src->encoding();
1481 if (dstenc < 8) {
1482 if (srcenc >= 8) {
1483 prefix(REX_B);
1484 srcenc -= 8;
1485 }
1486 } else {
1487 if (srcenc < 8) {
1488 prefix(REX_R);
1489 } else {
1490 prefix(REX_RB);
1491 srcenc -= 8;
1492 }
1493 dstenc -= 8;
1494 }
1495 emit_byte(0x0F);
1496 emit_byte(0x28);
1497 emit_byte(0xC0 | dstenc << 3 | srcenc);
1498 }
1500 void Assembler::movb(Register dst, Address src) {
1501 NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
1502 InstructionMark im(this);
1503 prefix(src, dst, true);
1504 emit_byte(0x8A);
1505 emit_operand(dst, src);
1506 }
1509 void Assembler::movb(Address dst, int imm8) {
1510 InstructionMark im(this);
1511 prefix(dst);
1512 emit_byte(0xC6);
1513 emit_operand(rax, dst, 1);
1514 emit_byte(imm8);
1515 }
1518 void Assembler::movb(Address dst, Register src) {
1519 assert(src->has_byte_register(), "must have byte register");
1520 InstructionMark im(this);
1521 prefix(dst, src, true);
1522 emit_byte(0x88);
1523 emit_operand(src, dst);
1524 }
1526 void Assembler::movdl(XMMRegister dst, Register src) {
1527 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1528 emit_byte(0x66);
1529 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1530 emit_byte(0x0F);
1531 emit_byte(0x6E);
1532 emit_byte(0xC0 | encode);
1533 }
1535 void Assembler::movdl(Register dst, XMMRegister src) {
1536 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1537 emit_byte(0x66);
1538 // swap src/dst to get correct prefix
1539 int encode = prefix_and_encode(src->encoding(), dst->encoding());
1540 emit_byte(0x0F);
1541 emit_byte(0x7E);
1542 emit_byte(0xC0 | encode);
1543 }
1545 void Assembler::movdqa(XMMRegister dst, Address src) {
1546 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1547 InstructionMark im(this);
1548 emit_byte(0x66);
1549 prefix(src, dst);
1550 emit_byte(0x0F);
1551 emit_byte(0x6F);
1552 emit_operand(dst, src);
1553 }
1555 void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
1556 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1557 emit_byte(0x66);
1558 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
1559 emit_byte(0x0F);
1560 emit_byte(0x6F);
1561 emit_byte(0xC0 | encode);
1562 }
1564 void Assembler::movdqa(Address dst, XMMRegister src) {
1565 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1566 InstructionMark im(this);
1567 emit_byte(0x66);
1568 prefix(dst, src);
1569 emit_byte(0x0F);
1570 emit_byte(0x7F);
1571 emit_operand(src, dst);
1572 }
1574 void Assembler::movdqu(XMMRegister dst, Address src) {
1575 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1576 InstructionMark im(this);
1577 emit_byte(0xF3);
1578 prefix(src, dst);
1579 emit_byte(0x0F);
1580 emit_byte(0x6F);
1581 emit_operand(dst, src);
1582 }
1584 void Assembler::movdqu(XMMRegister dst, XMMRegister src) {
1585 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1586 emit_byte(0xF3);
1587 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
1588 emit_byte(0x0F);
1589 emit_byte(0x6F);
1590 emit_byte(0xC0 | encode);
1591 }
1593 void Assembler::movdqu(Address dst, XMMRegister src) {
1594 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1595 InstructionMark im(this);
1596 emit_byte(0xF3);
1597 prefix(dst, src);
1598 emit_byte(0x0F);
1599 emit_byte(0x7F);
1600 emit_operand(src, dst);
1601 }
1603 // Uses zero extension on 64bit
1605 void Assembler::movl(Register dst, int32_t imm32) {
1606 int encode = prefix_and_encode(dst->encoding());
1607 emit_byte(0xB8 | encode);
1608 emit_long(imm32);
1609 }
1611 void Assembler::movl(Register dst, Register src) {
1612 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1613 emit_byte(0x8B);
1614 emit_byte(0xC0 | encode);
1615 }
1617 void Assembler::movl(Register dst, Address src) {
1618 InstructionMark im(this);
1619 prefix(src, dst);
1620 emit_byte(0x8B);
1621 emit_operand(dst, src);
1622 }
1624 void Assembler::movl(Address dst, int32_t imm32) {
1625 InstructionMark im(this);
1626 prefix(dst);
1627 emit_byte(0xC7);
1628 emit_operand(rax, dst, 4);
1629 emit_long(imm32);
1630 }
1632 void Assembler::movl(Address dst, Register src) {
1633 InstructionMark im(this);
1634 prefix(dst, src);
1635 emit_byte(0x89);
1636 emit_operand(src, dst);
1637 }
1639 // New cpus require to use movsd and movss to avoid partial register stall
1640 // when loading from memory. But for old Opteron use movlpd instead of movsd.
1641 // The selection is done in MacroAssembler::movdbl() and movflt().
1642 void Assembler::movlpd(XMMRegister dst, Address src) {
1643 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1644 InstructionMark im(this);
1645 emit_byte(0x66);
1646 prefix(src, dst);
1647 emit_byte(0x0F);
1648 emit_byte(0x12);
1649 emit_operand(dst, src);
1650 }
1652 void Assembler::movq( MMXRegister dst, Address src ) {
1653 assert( VM_Version::supports_mmx(), "" );
1654 emit_byte(0x0F);
1655 emit_byte(0x6F);
1656 emit_operand(dst, src);
1657 }
1659 void Assembler::movq( Address dst, MMXRegister src ) {
1660 assert( VM_Version::supports_mmx(), "" );
1661 emit_byte(0x0F);
1662 emit_byte(0x7F);
1663 // workaround gcc (3.2.1-7a) bug
1664 // In that version of gcc with only an emit_operand(MMX, Address)
1665 // gcc will tail jump and try and reverse the parameters completely
1666 // obliterating dst in the process. By having a version available
1667 // that doesn't need to swap the args at the tail jump the bug is
1668 // avoided.
1669 emit_operand(dst, src);
1670 }
1672 void Assembler::movq(XMMRegister dst, Address src) {
1673 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1674 InstructionMark im(this);
1675 emit_byte(0xF3);
1676 prefix(src, dst);
1677 emit_byte(0x0F);
1678 emit_byte(0x7E);
1679 emit_operand(dst, src);
1680 }
1682 void Assembler::movq(Address dst, XMMRegister src) {
1683 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1684 InstructionMark im(this);
1685 emit_byte(0x66);
1686 prefix(dst, src);
1687 emit_byte(0x0F);
1688 emit_byte(0xD6);
1689 emit_operand(src, dst);
1690 }
1692 void Assembler::movsbl(Register dst, Address src) { // movsxb
1693 InstructionMark im(this);
1694 prefix(src, dst);
1695 emit_byte(0x0F);
1696 emit_byte(0xBE);
1697 emit_operand(dst, src);
1698 }
1700 void Assembler::movsbl(Register dst, Register src) { // movsxb
1701 NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
1702 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1703 emit_byte(0x0F);
1704 emit_byte(0xBE);
1705 emit_byte(0xC0 | encode);
1706 }
1708 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
1709 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1710 emit_byte(0xF2);
1711 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1712 emit_byte(0x0F);
1713 emit_byte(0x10);
1714 emit_byte(0xC0 | encode);
1715 }
1717 void Assembler::movsd(XMMRegister dst, Address src) {
1718 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1719 InstructionMark im(this);
1720 emit_byte(0xF2);
1721 prefix(src, dst);
1722 emit_byte(0x0F);
1723 emit_byte(0x10);
1724 emit_operand(dst, src);
1725 }
1727 void Assembler::movsd(Address dst, XMMRegister src) {
1728 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1729 InstructionMark im(this);
1730 emit_byte(0xF2);
1731 prefix(dst, src);
1732 emit_byte(0x0F);
1733 emit_byte(0x11);
1734 emit_operand(src, dst);
1735 }
1737 void Assembler::movss(XMMRegister dst, XMMRegister src) {
1738 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1739 emit_byte(0xF3);
1740 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1741 emit_byte(0x0F);
1742 emit_byte(0x10);
1743 emit_byte(0xC0 | encode);
1744 }
1746 void Assembler::movss(XMMRegister dst, Address src) {
1747 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1748 InstructionMark im(this);
1749 emit_byte(0xF3);
1750 prefix(src, dst);
1751 emit_byte(0x0F);
1752 emit_byte(0x10);
1753 emit_operand(dst, src);
1754 }
1756 void Assembler::movss(Address dst, XMMRegister src) {
1757 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1758 InstructionMark im(this);
1759 emit_byte(0xF3);
1760 prefix(dst, src);
1761 emit_byte(0x0F);
1762 emit_byte(0x11);
1763 emit_operand(src, dst);
1764 }
1766 void Assembler::movswl(Register dst, Address src) { // movsxw
1767 InstructionMark im(this);
1768 prefix(src, dst);
1769 emit_byte(0x0F);
1770 emit_byte(0xBF);
1771 emit_operand(dst, src);
1772 }
1774 void Assembler::movswl(Register dst, Register src) { // movsxw
1775 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1776 emit_byte(0x0F);
1777 emit_byte(0xBF);
1778 emit_byte(0xC0 | encode);
1779 }
1781 void Assembler::movw(Address dst, int imm16) {
1782 InstructionMark im(this);
1784 emit_byte(0x66); // switch to 16-bit mode
1785 prefix(dst);
1786 emit_byte(0xC7);
1787 emit_operand(rax, dst, 2);
1788 emit_word(imm16);
1789 }
1791 void Assembler::movw(Register dst, Address src) {
1792 InstructionMark im(this);
1793 emit_byte(0x66);
1794 prefix(src, dst);
1795 emit_byte(0x8B);
1796 emit_operand(dst, src);
1797 }
1799 void Assembler::movw(Address dst, Register src) {
1800 InstructionMark im(this);
1801 emit_byte(0x66);
1802 prefix(dst, src);
1803 emit_byte(0x89);
1804 emit_operand(src, dst);
1805 }
1807 void Assembler::movzbl(Register dst, Address src) { // movzxb
1808 InstructionMark im(this);
1809 prefix(src, dst);
1810 emit_byte(0x0F);
1811 emit_byte(0xB6);
1812 emit_operand(dst, src);
1813 }
1815 void Assembler::movzbl(Register dst, Register src) { // movzxb
1816 NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
1817 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1818 emit_byte(0x0F);
1819 emit_byte(0xB6);
1820 emit_byte(0xC0 | encode);
1821 }
1823 void Assembler::movzwl(Register dst, Address src) { // movzxw
1824 InstructionMark im(this);
1825 prefix(src, dst);
1826 emit_byte(0x0F);
1827 emit_byte(0xB7);
1828 emit_operand(dst, src);
1829 }
1831 void Assembler::movzwl(Register dst, Register src) { // movzxw
1832 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1833 emit_byte(0x0F);
1834 emit_byte(0xB7);
1835 emit_byte(0xC0 | encode);
1836 }
1838 void Assembler::mull(Address src) {
1839 InstructionMark im(this);
1840 prefix(src);
1841 emit_byte(0xF7);
1842 emit_operand(rsp, src);
1843 }
1845 void Assembler::mull(Register src) {
1846 int encode = prefix_and_encode(src->encoding());
1847 emit_byte(0xF7);
1848 emit_byte(0xE0 | encode);
1849 }
1851 void Assembler::mulsd(XMMRegister dst, Address src) {
1852 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1853 InstructionMark im(this);
1854 emit_byte(0xF2);
1855 prefix(src, dst);
1856 emit_byte(0x0F);
1857 emit_byte(0x59);
1858 emit_operand(dst, src);
1859 }
1861 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
1862 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
1863 emit_byte(0xF2);
1864 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1865 emit_byte(0x0F);
1866 emit_byte(0x59);
1867 emit_byte(0xC0 | encode);
1868 }
1870 void Assembler::mulss(XMMRegister dst, Address src) {
1871 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1872 InstructionMark im(this);
1873 emit_byte(0xF3);
1874 prefix(src, dst);
1875 emit_byte(0x0F);
1876 emit_byte(0x59);
1877 emit_operand(dst, src);
1878 }
1880 void Assembler::mulss(XMMRegister dst, XMMRegister src) {
1881 NOT_LP64(assert(VM_Version::supports_sse(), ""));
1882 emit_byte(0xF3);
1883 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1884 emit_byte(0x0F);
1885 emit_byte(0x59);
1886 emit_byte(0xC0 | encode);
1887 }
1889 void Assembler::negl(Register dst) {
1890 int encode = prefix_and_encode(dst->encoding());
1891 emit_byte(0xF7);
1892 emit_byte(0xD8 | encode);
1893 }
1895 void Assembler::nop(int i) {
1896 #ifdef ASSERT
1897 assert(i > 0, " ");
1898 // The fancy nops aren't currently recognized by debuggers making it a
1899 // pain to disassemble code while debugging. If asserts are on clearly
1900 // speed is not an issue so simply use the single byte traditional nop
1901 // to do alignment.
1903 for (; i > 0 ; i--) emit_byte(0x90);
1904 return;
1906 #endif // ASSERT
1908 if (UseAddressNop && VM_Version::is_intel()) {
1909 //
1910 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
1911 // 1: 0x90
1912 // 2: 0x66 0x90
1913 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
1914 // 4: 0x0F 0x1F 0x40 0x00
1915 // 5: 0x0F 0x1F 0x44 0x00 0x00
1916 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
1917 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
1918 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1919 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1920 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1921 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1923 // The rest coding is Intel specific - don't use consecutive address nops
1925 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1926 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1927 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1928 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
1930 while(i >= 15) {
1931 // For Intel don't generate consecutive addess nops (mix with regular nops)
1932 i -= 15;
1933 emit_byte(0x66); // size prefix
1934 emit_byte(0x66); // size prefix
1935 emit_byte(0x66); // size prefix
1936 addr_nop_8();
1937 emit_byte(0x66); // size prefix
1938 emit_byte(0x66); // size prefix
1939 emit_byte(0x66); // size prefix
1940 emit_byte(0x90); // nop
1941 }
1942 switch (i) {
1943 case 14:
1944 emit_byte(0x66); // size prefix
1945 case 13:
1946 emit_byte(0x66); // size prefix
1947 case 12:
1948 addr_nop_8();
1949 emit_byte(0x66); // size prefix
1950 emit_byte(0x66); // size prefix
1951 emit_byte(0x66); // size prefix
1952 emit_byte(0x90); // nop
1953 break;
1954 case 11:
1955 emit_byte(0x66); // size prefix
1956 case 10:
1957 emit_byte(0x66); // size prefix
1958 case 9:
1959 emit_byte(0x66); // size prefix
1960 case 8:
1961 addr_nop_8();
1962 break;
1963 case 7:
1964 addr_nop_7();
1965 break;
1966 case 6:
1967 emit_byte(0x66); // size prefix
1968 case 5:
1969 addr_nop_5();
1970 break;
1971 case 4:
1972 addr_nop_4();
1973 break;
1974 case 3:
1975 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
1976 emit_byte(0x66); // size prefix
1977 case 2:
1978 emit_byte(0x66); // size prefix
1979 case 1:
1980 emit_byte(0x90); // nop
1981 break;
1982 default:
1983 assert(i == 0, " ");
1984 }
1985 return;
1986 }
1987 if (UseAddressNop && VM_Version::is_amd()) {
1988 //
1989 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
1990 // 1: 0x90
1991 // 2: 0x66 0x90
1992 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
1993 // 4: 0x0F 0x1F 0x40 0x00
1994 // 5: 0x0F 0x1F 0x44 0x00 0x00
1995 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
1996 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
1997 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1998 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
1999 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2000 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2002 // The rest coding is AMD specific - use consecutive address nops
2004 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
2005 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
2006 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
2007 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
2008 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2009 // Size prefixes (0x66) are added for larger sizes
2011 while(i >= 22) {
2012 i -= 11;
2013 emit_byte(0x66); // size prefix
2014 emit_byte(0x66); // size prefix
2015 emit_byte(0x66); // size prefix
2016 addr_nop_8();
2017 }
2018 // Generate first nop for size between 21-12
2019 switch (i) {
2020 case 21:
2021 i -= 1;
2022 emit_byte(0x66); // size prefix
2023 case 20:
2024 case 19:
2025 i -= 1;
2026 emit_byte(0x66); // size prefix
2027 case 18:
2028 case 17:
2029 i -= 1;
2030 emit_byte(0x66); // size prefix
2031 case 16:
2032 case 15:
2033 i -= 8;
2034 addr_nop_8();
2035 break;
2036 case 14:
2037 case 13:
2038 i -= 7;
2039 addr_nop_7();
2040 break;
2041 case 12:
2042 i -= 6;
2043 emit_byte(0x66); // size prefix
2044 addr_nop_5();
2045 break;
2046 default:
2047 assert(i < 12, " ");
2048 }
2050 // Generate second nop for size between 11-1
2051 switch (i) {
2052 case 11:
2053 emit_byte(0x66); // size prefix
2054 case 10:
2055 emit_byte(0x66); // size prefix
2056 case 9:
2057 emit_byte(0x66); // size prefix
2058 case 8:
2059 addr_nop_8();
2060 break;
2061 case 7:
2062 addr_nop_7();
2063 break;
2064 case 6:
2065 emit_byte(0x66); // size prefix
2066 case 5:
2067 addr_nop_5();
2068 break;
2069 case 4:
2070 addr_nop_4();
2071 break;
2072 case 3:
2073 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
2074 emit_byte(0x66); // size prefix
2075 case 2:
2076 emit_byte(0x66); // size prefix
2077 case 1:
2078 emit_byte(0x90); // nop
2079 break;
2080 default:
2081 assert(i == 0, " ");
2082 }
2083 return;
2084 }
2086 // Using nops with size prefixes "0x66 0x90".
2087 // From AMD Optimization Guide:
2088 // 1: 0x90
2089 // 2: 0x66 0x90
2090 // 3: 0x66 0x66 0x90
2091 // 4: 0x66 0x66 0x66 0x90
2092 // 5: 0x66 0x66 0x90 0x66 0x90
2093 // 6: 0x66 0x66 0x90 0x66 0x66 0x90
2094 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
2095 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
2096 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
2097 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
2098 //
2099 while(i > 12) {
2100 i -= 4;
2101 emit_byte(0x66); // size prefix
2102 emit_byte(0x66);
2103 emit_byte(0x66);
2104 emit_byte(0x90); // nop
2105 }
2106 // 1 - 12 nops
2107 if(i > 8) {
2108 if(i > 9) {
2109 i -= 1;
2110 emit_byte(0x66);
2111 }
2112 i -= 3;
2113 emit_byte(0x66);
2114 emit_byte(0x66);
2115 emit_byte(0x90);
2116 }
2117 // 1 - 8 nops
2118 if(i > 4) {
2119 if(i > 6) {
2120 i -= 1;
2121 emit_byte(0x66);
2122 }
2123 i -= 3;
2124 emit_byte(0x66);
2125 emit_byte(0x66);
2126 emit_byte(0x90);
2127 }
2128 switch (i) {
2129 case 4:
2130 emit_byte(0x66);
2131 case 3:
2132 emit_byte(0x66);
2133 case 2:
2134 emit_byte(0x66);
2135 case 1:
2136 emit_byte(0x90);
2137 break;
2138 default:
2139 assert(i == 0, " ");
2140 }
2141 }
2143 void Assembler::notl(Register dst) {
2144 int encode = prefix_and_encode(dst->encoding());
2145 emit_byte(0xF7);
2146 emit_byte(0xD0 | encode );
2147 }
2149 void Assembler::orl(Address dst, int32_t imm32) {
2150 InstructionMark im(this);
2151 prefix(dst);
2152 emit_byte(0x81);
2153 emit_operand(rcx, dst, 4);
2154 emit_long(imm32);
2155 }
2157 void Assembler::orl(Register dst, int32_t imm32) {
2158 prefix(dst);
2159 emit_arith(0x81, 0xC8, dst, imm32);
2160 }
2163 void Assembler::orl(Register dst, Address src) {
2164 InstructionMark im(this);
2165 prefix(src, dst);
2166 emit_byte(0x0B);
2167 emit_operand(dst, src);
2168 }
2171 void Assembler::orl(Register dst, Register src) {
2172 (void) prefix_and_encode(dst->encoding(), src->encoding());
2173 emit_arith(0x0B, 0xC0, dst, src);
2174 }
2176 void Assembler::pcmpestri(XMMRegister dst, Address src, int imm8) {
2177 assert(VM_Version::supports_sse4_2(), "");
2179 InstructionMark im(this);
2180 emit_byte(0x66);
2181 prefix(src, dst);
2182 emit_byte(0x0F);
2183 emit_byte(0x3A);
2184 emit_byte(0x61);
2185 emit_operand(dst, src);
2186 emit_byte(imm8);
2187 }
2189 void Assembler::pcmpestri(XMMRegister dst, XMMRegister src, int imm8) {
2190 assert(VM_Version::supports_sse4_2(), "");
2192 emit_byte(0x66);
2193 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
2194 emit_byte(0x0F);
2195 emit_byte(0x3A);
2196 emit_byte(0x61);
2197 emit_byte(0xC0 | encode);
2198 emit_byte(imm8);
2199 }
2201 // generic
2202 void Assembler::pop(Register dst) {
2203 int encode = prefix_and_encode(dst->encoding());
2204 emit_byte(0x58 | encode);
2205 }
2207 void Assembler::popcntl(Register dst, Address src) {
2208 assert(VM_Version::supports_popcnt(), "must support");
2209 InstructionMark im(this);
2210 emit_byte(0xF3);
2211 prefix(src, dst);
2212 emit_byte(0x0F);
2213 emit_byte(0xB8);
2214 emit_operand(dst, src);
2215 }
2217 void Assembler::popcntl(Register dst, Register src) {
2218 assert(VM_Version::supports_popcnt(), "must support");
2219 emit_byte(0xF3);
2220 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2221 emit_byte(0x0F);
2222 emit_byte(0xB8);
2223 emit_byte(0xC0 | encode);
2224 }
2226 void Assembler::popf() {
2227 emit_byte(0x9D);
2228 }
2230 void Assembler::popl(Address dst) {
2231 // NOTE: this will adjust stack by 8byte on 64bits
2232 InstructionMark im(this);
2233 prefix(dst);
2234 emit_byte(0x8F);
2235 emit_operand(rax, dst);
2236 }
2238 void Assembler::prefetch_prefix(Address src) {
2239 prefix(src);
2240 emit_byte(0x0F);
2241 }
2243 void Assembler::prefetchnta(Address src) {
2244 NOT_LP64(assert(VM_Version::supports_sse2(), "must support"));
2245 InstructionMark im(this);
2246 prefetch_prefix(src);
2247 emit_byte(0x18);
2248 emit_operand(rax, src); // 0, src
2249 }
2251 void Assembler::prefetchr(Address src) {
2252 NOT_LP64(assert(VM_Version::supports_3dnow(), "must support"));
2253 InstructionMark im(this);
2254 prefetch_prefix(src);
2255 emit_byte(0x0D);
2256 emit_operand(rax, src); // 0, src
2257 }
2259 void Assembler::prefetcht0(Address src) {
2260 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2261 InstructionMark im(this);
2262 prefetch_prefix(src);
2263 emit_byte(0x18);
2264 emit_operand(rcx, src); // 1, src
2265 }
2267 void Assembler::prefetcht1(Address src) {
2268 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2269 InstructionMark im(this);
2270 prefetch_prefix(src);
2271 emit_byte(0x18);
2272 emit_operand(rdx, src); // 2, src
2273 }
2275 void Assembler::prefetcht2(Address src) {
2276 NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
2277 InstructionMark im(this);
2278 prefetch_prefix(src);
2279 emit_byte(0x18);
2280 emit_operand(rbx, src); // 3, src
2281 }
2283 void Assembler::prefetchw(Address src) {
2284 NOT_LP64(assert(VM_Version::supports_3dnow(), "must support"));
2285 InstructionMark im(this);
2286 prefetch_prefix(src);
2287 emit_byte(0x0D);
2288 emit_operand(rcx, src); // 1, src
2289 }
2291 void Assembler::prefix(Prefix p) {
2292 a_byte(p);
2293 }
2295 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
2296 assert(isByte(mode), "invalid value");
2297 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2299 emit_byte(0x66);
2300 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2301 emit_byte(0x0F);
2302 emit_byte(0x70);
2303 emit_byte(0xC0 | encode);
2304 emit_byte(mode & 0xFF);
2306 }
2308 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
2309 assert(isByte(mode), "invalid value");
2310 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2312 InstructionMark im(this);
2313 emit_byte(0x66);
2314 prefix(src, dst);
2315 emit_byte(0x0F);
2316 emit_byte(0x70);
2317 emit_operand(dst, src);
2318 emit_byte(mode & 0xFF);
2319 }
2321 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
2322 assert(isByte(mode), "invalid value");
2323 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2325 emit_byte(0xF2);
2326 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2327 emit_byte(0x0F);
2328 emit_byte(0x70);
2329 emit_byte(0xC0 | encode);
2330 emit_byte(mode & 0xFF);
2331 }
2333 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
2334 assert(isByte(mode), "invalid value");
2335 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2337 InstructionMark im(this);
2338 emit_byte(0xF2);
2339 prefix(src, dst); // QQ new
2340 emit_byte(0x0F);
2341 emit_byte(0x70);
2342 emit_operand(dst, src);
2343 emit_byte(mode & 0xFF);
2344 }
2346 void Assembler::psrlq(XMMRegister dst, int shift) {
2347 // HMM Table D-1 says sse2 or mmx
2348 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2350 int encode = prefixq_and_encode(xmm2->encoding(), dst->encoding());
2351 emit_byte(0x66);
2352 emit_byte(0x0F);
2353 emit_byte(0x73);
2354 emit_byte(0xC0 | encode);
2355 emit_byte(shift);
2356 }
2358 void Assembler::ptest(XMMRegister dst, Address src) {
2359 assert(VM_Version::supports_sse4_1(), "");
2361 InstructionMark im(this);
2362 emit_byte(0x66);
2363 prefix(src, dst);
2364 emit_byte(0x0F);
2365 emit_byte(0x38);
2366 emit_byte(0x17);
2367 emit_operand(dst, src);
2368 }
2370 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
2371 assert(VM_Version::supports_sse4_1(), "");
2373 emit_byte(0x66);
2374 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
2375 emit_byte(0x0F);
2376 emit_byte(0x38);
2377 emit_byte(0x17);
2378 emit_byte(0xC0 | encode);
2379 }
2381 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
2382 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2383 emit_byte(0x66);
2384 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2385 emit_byte(0x0F);
2386 emit_byte(0x60);
2387 emit_byte(0xC0 | encode);
2388 }
2390 void Assembler::push(int32_t imm32) {
2391 // in 64bits we push 64bits onto the stack but only
2392 // take a 32bit immediate
2393 emit_byte(0x68);
2394 emit_long(imm32);
2395 }
2397 void Assembler::push(Register src) {
2398 int encode = prefix_and_encode(src->encoding());
2400 emit_byte(0x50 | encode);
2401 }
2403 void Assembler::pushf() {
2404 emit_byte(0x9C);
2405 }
2407 void Assembler::pushl(Address src) {
2408 // Note this will push 64bit on 64bit
2409 InstructionMark im(this);
2410 prefix(src);
2411 emit_byte(0xFF);
2412 emit_operand(rsi, src);
2413 }
2415 void Assembler::pxor(XMMRegister dst, Address src) {
2416 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2417 InstructionMark im(this);
2418 emit_byte(0x66);
2419 prefix(src, dst);
2420 emit_byte(0x0F);
2421 emit_byte(0xEF);
2422 emit_operand(dst, src);
2423 }
2425 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
2426 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2427 InstructionMark im(this);
2428 emit_byte(0x66);
2429 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2430 emit_byte(0x0F);
2431 emit_byte(0xEF);
2432 emit_byte(0xC0 | encode);
2433 }
2435 void Assembler::rcll(Register dst, int imm8) {
2436 assert(isShiftCount(imm8), "illegal shift count");
2437 int encode = prefix_and_encode(dst->encoding());
2438 if (imm8 == 1) {
2439 emit_byte(0xD1);
2440 emit_byte(0xD0 | encode);
2441 } else {
2442 emit_byte(0xC1);
2443 emit_byte(0xD0 | encode);
2444 emit_byte(imm8);
2445 }
2446 }
2448 // copies data from [esi] to [edi] using rcx pointer sized words
2449 // generic
2450 void Assembler::rep_mov() {
2451 emit_byte(0xF3);
2452 // MOVSQ
2453 LP64_ONLY(prefix(REX_W));
2454 emit_byte(0xA5);
2455 }
2457 // sets rcx pointer sized words with rax, value at [edi]
2458 // generic
2459 void Assembler::rep_set() { // rep_set
2460 emit_byte(0xF3);
2461 // STOSQ
2462 LP64_ONLY(prefix(REX_W));
2463 emit_byte(0xAB);
2464 }
2466 // scans rcx pointer sized words at [edi] for occurance of rax,
2467 // generic
2468 void Assembler::repne_scan() { // repne_scan
2469 emit_byte(0xF2);
2470 // SCASQ
2471 LP64_ONLY(prefix(REX_W));
2472 emit_byte(0xAF);
2473 }
2475 #ifdef _LP64
2476 // scans rcx 4 byte words at [edi] for occurance of rax,
2477 // generic
2478 void Assembler::repne_scanl() { // repne_scan
2479 emit_byte(0xF2);
2480 // SCASL
2481 emit_byte(0xAF);
2482 }
2483 #endif
2485 void Assembler::ret(int imm16) {
2486 if (imm16 == 0) {
2487 emit_byte(0xC3);
2488 } else {
2489 emit_byte(0xC2);
2490 emit_word(imm16);
2491 }
2492 }
2494 void Assembler::sahf() {
2495 #ifdef _LP64
2496 // Not supported in 64bit mode
2497 ShouldNotReachHere();
2498 #endif
2499 emit_byte(0x9E);
2500 }
2502 void Assembler::sarl(Register dst, int imm8) {
2503 int encode = prefix_and_encode(dst->encoding());
2504 assert(isShiftCount(imm8), "illegal shift count");
2505 if (imm8 == 1) {
2506 emit_byte(0xD1);
2507 emit_byte(0xF8 | encode);
2508 } else {
2509 emit_byte(0xC1);
2510 emit_byte(0xF8 | encode);
2511 emit_byte(imm8);
2512 }
2513 }
2515 void Assembler::sarl(Register dst) {
2516 int encode = prefix_and_encode(dst->encoding());
2517 emit_byte(0xD3);
2518 emit_byte(0xF8 | encode);
2519 }
2521 void Assembler::sbbl(Address dst, int32_t imm32) {
2522 InstructionMark im(this);
2523 prefix(dst);
2524 emit_arith_operand(0x81, rbx, dst, imm32);
2525 }
2527 void Assembler::sbbl(Register dst, int32_t imm32) {
2528 prefix(dst);
2529 emit_arith(0x81, 0xD8, dst, imm32);
2530 }
2533 void Assembler::sbbl(Register dst, Address src) {
2534 InstructionMark im(this);
2535 prefix(src, dst);
2536 emit_byte(0x1B);
2537 emit_operand(dst, src);
2538 }
2540 void Assembler::sbbl(Register dst, Register src) {
2541 (void) prefix_and_encode(dst->encoding(), src->encoding());
2542 emit_arith(0x1B, 0xC0, dst, src);
2543 }
2545 void Assembler::setb(Condition cc, Register dst) {
2546 assert(0 <= cc && cc < 16, "illegal cc");
2547 int encode = prefix_and_encode(dst->encoding(), true);
2548 emit_byte(0x0F);
2549 emit_byte(0x90 | cc);
2550 emit_byte(0xC0 | encode);
2551 }
2553 void Assembler::shll(Register dst, int imm8) {
2554 assert(isShiftCount(imm8), "illegal shift count");
2555 int encode = prefix_and_encode(dst->encoding());
2556 if (imm8 == 1 ) {
2557 emit_byte(0xD1);
2558 emit_byte(0xE0 | encode);
2559 } else {
2560 emit_byte(0xC1);
2561 emit_byte(0xE0 | encode);
2562 emit_byte(imm8);
2563 }
2564 }
2566 void Assembler::shll(Register dst) {
2567 int encode = prefix_and_encode(dst->encoding());
2568 emit_byte(0xD3);
2569 emit_byte(0xE0 | encode);
2570 }
2572 void Assembler::shrl(Register dst, int imm8) {
2573 assert(isShiftCount(imm8), "illegal shift count");
2574 int encode = prefix_and_encode(dst->encoding());
2575 emit_byte(0xC1);
2576 emit_byte(0xE8 | encode);
2577 emit_byte(imm8);
2578 }
2580 void Assembler::shrl(Register dst) {
2581 int encode = prefix_and_encode(dst->encoding());
2582 emit_byte(0xD3);
2583 emit_byte(0xE8 | encode);
2584 }
2586 // copies a single word from [esi] to [edi]
2587 void Assembler::smovl() {
2588 emit_byte(0xA5);
2589 }
2591 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
2592 // HMM Table D-1 says sse2
2593 // NOT_LP64(assert(VM_Version::supports_sse(), ""));
2594 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2595 emit_byte(0xF2);
2596 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2597 emit_byte(0x0F);
2598 emit_byte(0x51);
2599 emit_byte(0xC0 | encode);
2600 }
2602 void Assembler::stmxcsr( Address dst) {
2603 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2604 InstructionMark im(this);
2605 prefix(dst);
2606 emit_byte(0x0F);
2607 emit_byte(0xAE);
2608 emit_operand(as_Register(3), dst);
2609 }
2611 void Assembler::subl(Address dst, int32_t imm32) {
2612 InstructionMark im(this);
2613 prefix(dst);
2614 if (is8bit(imm32)) {
2615 emit_byte(0x83);
2616 emit_operand(rbp, dst, 1);
2617 emit_byte(imm32 & 0xFF);
2618 } else {
2619 emit_byte(0x81);
2620 emit_operand(rbp, dst, 4);
2621 emit_long(imm32);
2622 }
2623 }
2625 void Assembler::subl(Register dst, int32_t imm32) {
2626 prefix(dst);
2627 emit_arith(0x81, 0xE8, dst, imm32);
2628 }
2630 void Assembler::subl(Address dst, Register src) {
2631 InstructionMark im(this);
2632 prefix(dst, src);
2633 emit_byte(0x29);
2634 emit_operand(src, dst);
2635 }
2637 void Assembler::subl(Register dst, Address src) {
2638 InstructionMark im(this);
2639 prefix(src, dst);
2640 emit_byte(0x2B);
2641 emit_operand(dst, src);
2642 }
2644 void Assembler::subl(Register dst, Register src) {
2645 (void) prefix_and_encode(dst->encoding(), src->encoding());
2646 emit_arith(0x2B, 0xC0, dst, src);
2647 }
2649 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
2650 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2651 emit_byte(0xF2);
2652 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2653 emit_byte(0x0F);
2654 emit_byte(0x5C);
2655 emit_byte(0xC0 | encode);
2656 }
2658 void Assembler::subsd(XMMRegister dst, Address src) {
2659 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2660 InstructionMark im(this);
2661 emit_byte(0xF2);
2662 prefix(src, dst);
2663 emit_byte(0x0F);
2664 emit_byte(0x5C);
2665 emit_operand(dst, src);
2666 }
2668 void Assembler::subss(XMMRegister dst, XMMRegister src) {
2669 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2670 emit_byte(0xF3);
2671 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2672 emit_byte(0x0F);
2673 emit_byte(0x5C);
2674 emit_byte(0xC0 | encode);
2675 }
2677 void Assembler::subss(XMMRegister dst, Address src) {
2678 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2679 InstructionMark im(this);
2680 emit_byte(0xF3);
2681 prefix(src, dst);
2682 emit_byte(0x0F);
2683 emit_byte(0x5C);
2684 emit_operand(dst, src);
2685 }
2687 void Assembler::testb(Register dst, int imm8) {
2688 NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
2689 (void) prefix_and_encode(dst->encoding(), true);
2690 emit_arith_b(0xF6, 0xC0, dst, imm8);
2691 }
2693 void Assembler::testl(Register dst, int32_t imm32) {
2694 // not using emit_arith because test
2695 // doesn't support sign-extension of
2696 // 8bit operands
2697 int encode = dst->encoding();
2698 if (encode == 0) {
2699 emit_byte(0xA9);
2700 } else {
2701 encode = prefix_and_encode(encode);
2702 emit_byte(0xF7);
2703 emit_byte(0xC0 | encode);
2704 }
2705 emit_long(imm32);
2706 }
2708 void Assembler::testl(Register dst, Register src) {
2709 (void) prefix_and_encode(dst->encoding(), src->encoding());
2710 emit_arith(0x85, 0xC0, dst, src);
2711 }
2713 void Assembler::testl(Register dst, Address src) {
2714 InstructionMark im(this);
2715 prefix(src, dst);
2716 emit_byte(0x85);
2717 emit_operand(dst, src);
2718 }
2720 void Assembler::ucomisd(XMMRegister dst, Address src) {
2721 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2722 emit_byte(0x66);
2723 ucomiss(dst, src);
2724 }
2726 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
2727 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2728 emit_byte(0x66);
2729 ucomiss(dst, src);
2730 }
2732 void Assembler::ucomiss(XMMRegister dst, Address src) {
2733 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2735 InstructionMark im(this);
2736 prefix(src, dst);
2737 emit_byte(0x0F);
2738 emit_byte(0x2E);
2739 emit_operand(dst, src);
2740 }
2742 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
2743 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2744 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2745 emit_byte(0x0F);
2746 emit_byte(0x2E);
2747 emit_byte(0xC0 | encode);
2748 }
2751 void Assembler::xaddl(Address dst, Register src) {
2752 InstructionMark im(this);
2753 prefix(dst, src);
2754 emit_byte(0x0F);
2755 emit_byte(0xC1);
2756 emit_operand(src, dst);
2757 }
2759 void Assembler::xchgl(Register dst, Address src) { // xchg
2760 InstructionMark im(this);
2761 prefix(src, dst);
2762 emit_byte(0x87);
2763 emit_operand(dst, src);
2764 }
2766 void Assembler::xchgl(Register dst, Register src) {
2767 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2768 emit_byte(0x87);
2769 emit_byte(0xc0 | encode);
2770 }
2772 void Assembler::xorl(Register dst, int32_t imm32) {
2773 prefix(dst);
2774 emit_arith(0x81, 0xF0, dst, imm32);
2775 }
2777 void Assembler::xorl(Register dst, Address src) {
2778 InstructionMark im(this);
2779 prefix(src, dst);
2780 emit_byte(0x33);
2781 emit_operand(dst, src);
2782 }
2784 void Assembler::xorl(Register dst, Register src) {
2785 (void) prefix_and_encode(dst->encoding(), src->encoding());
2786 emit_arith(0x33, 0xC0, dst, src);
2787 }
2789 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
2790 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2791 emit_byte(0x66);
2792 xorps(dst, src);
2793 }
2795 void Assembler::xorpd(XMMRegister dst, Address src) {
2796 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
2797 InstructionMark im(this);
2798 emit_byte(0x66);
2799 prefix(src, dst);
2800 emit_byte(0x0F);
2801 emit_byte(0x57);
2802 emit_operand(dst, src);
2803 }
2806 void Assembler::xorps(XMMRegister dst, XMMRegister src) {
2807 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2808 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2809 emit_byte(0x0F);
2810 emit_byte(0x57);
2811 emit_byte(0xC0 | encode);
2812 }
2814 void Assembler::xorps(XMMRegister dst, Address src) {
2815 NOT_LP64(assert(VM_Version::supports_sse(), ""));
2816 InstructionMark im(this);
2817 prefix(src, dst);
2818 emit_byte(0x0F);
2819 emit_byte(0x57);
2820 emit_operand(dst, src);
2821 }
2823 #ifndef _LP64
2824 // 32bit only pieces of the assembler
2826 void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
2827 // NO PREFIX AS NEVER 64BIT
2828 InstructionMark im(this);
2829 emit_byte(0x81);
2830 emit_byte(0xF8 | src1->encoding());
2831 emit_data(imm32, rspec, 0);
2832 }
2834 void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
2835 // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
2836 InstructionMark im(this);
2837 emit_byte(0x81);
2838 emit_operand(rdi, src1);
2839 emit_data(imm32, rspec, 0);
2840 }
2842 // The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax,
2843 // and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
2844 // into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise.
2845 void Assembler::cmpxchg8(Address adr) {
2846 InstructionMark im(this);
2847 emit_byte(0x0F);
2848 emit_byte(0xc7);
2849 emit_operand(rcx, adr);
2850 }
2852 void Assembler::decl(Register dst) {
2853 // Don't use it directly. Use MacroAssembler::decrementl() instead.
2854 emit_byte(0x48 | dst->encoding());
2855 }
2857 #endif // _LP64
2859 // 64bit typically doesn't use the x87 but needs to for the trig funcs
2861 void Assembler::fabs() {
2862 emit_byte(0xD9);
2863 emit_byte(0xE1);
2864 }
2866 void Assembler::fadd(int i) {
2867 emit_farith(0xD8, 0xC0, i);
2868 }
2870 void Assembler::fadd_d(Address src) {
2871 InstructionMark im(this);
2872 emit_byte(0xDC);
2873 emit_operand32(rax, src);
2874 }
2876 void Assembler::fadd_s(Address src) {
2877 InstructionMark im(this);
2878 emit_byte(0xD8);
2879 emit_operand32(rax, src);
2880 }
2882 void Assembler::fadda(int i) {
2883 emit_farith(0xDC, 0xC0, i);
2884 }
2886 void Assembler::faddp(int i) {
2887 emit_farith(0xDE, 0xC0, i);
2888 }
2890 void Assembler::fchs() {
2891 emit_byte(0xD9);
2892 emit_byte(0xE0);
2893 }
2895 void Assembler::fcom(int i) {
2896 emit_farith(0xD8, 0xD0, i);
2897 }
2899 void Assembler::fcomp(int i) {
2900 emit_farith(0xD8, 0xD8, i);
2901 }
2903 void Assembler::fcomp_d(Address src) {
2904 InstructionMark im(this);
2905 emit_byte(0xDC);
2906 emit_operand32(rbx, src);
2907 }
2909 void Assembler::fcomp_s(Address src) {
2910 InstructionMark im(this);
2911 emit_byte(0xD8);
2912 emit_operand32(rbx, src);
2913 }
2915 void Assembler::fcompp() {
2916 emit_byte(0xDE);
2917 emit_byte(0xD9);
2918 }
2920 void Assembler::fcos() {
2921 emit_byte(0xD9);
2922 emit_byte(0xFF);
2923 }
2925 void Assembler::fdecstp() {
2926 emit_byte(0xD9);
2927 emit_byte(0xF6);
2928 }
2930 void Assembler::fdiv(int i) {
2931 emit_farith(0xD8, 0xF0, i);
2932 }
2934 void Assembler::fdiv_d(Address src) {
2935 InstructionMark im(this);
2936 emit_byte(0xDC);
2937 emit_operand32(rsi, src);
2938 }
2940 void Assembler::fdiv_s(Address src) {
2941 InstructionMark im(this);
2942 emit_byte(0xD8);
2943 emit_operand32(rsi, src);
2944 }
2946 void Assembler::fdiva(int i) {
2947 emit_farith(0xDC, 0xF8, i);
2948 }
2950 // Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
2951 // is erroneous for some of the floating-point instructions below.
2953 void Assembler::fdivp(int i) {
2954 emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
2955 }
2957 void Assembler::fdivr(int i) {
2958 emit_farith(0xD8, 0xF8, i);
2959 }
2961 void Assembler::fdivr_d(Address src) {
2962 InstructionMark im(this);
2963 emit_byte(0xDC);
2964 emit_operand32(rdi, src);
2965 }
2967 void Assembler::fdivr_s(Address src) {
2968 InstructionMark im(this);
2969 emit_byte(0xD8);
2970 emit_operand32(rdi, src);
2971 }
2973 void Assembler::fdivra(int i) {
2974 emit_farith(0xDC, 0xF0, i);
2975 }
2977 void Assembler::fdivrp(int i) {
2978 emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
2979 }
2981 void Assembler::ffree(int i) {
2982 emit_farith(0xDD, 0xC0, i);
2983 }
2985 void Assembler::fild_d(Address adr) {
2986 InstructionMark im(this);
2987 emit_byte(0xDF);
2988 emit_operand32(rbp, adr);
2989 }
2991 void Assembler::fild_s(Address adr) {
2992 InstructionMark im(this);
2993 emit_byte(0xDB);
2994 emit_operand32(rax, adr);
2995 }
2997 void Assembler::fincstp() {
2998 emit_byte(0xD9);
2999 emit_byte(0xF7);
3000 }
3002 void Assembler::finit() {
3003 emit_byte(0x9B);
3004 emit_byte(0xDB);
3005 emit_byte(0xE3);
3006 }
3008 void Assembler::fist_s(Address adr) {
3009 InstructionMark im(this);
3010 emit_byte(0xDB);
3011 emit_operand32(rdx, adr);
3012 }
3014 void Assembler::fistp_d(Address adr) {
3015 InstructionMark im(this);
3016 emit_byte(0xDF);
3017 emit_operand32(rdi, adr);
3018 }
3020 void Assembler::fistp_s(Address adr) {
3021 InstructionMark im(this);
3022 emit_byte(0xDB);
3023 emit_operand32(rbx, adr);
3024 }
3026 void Assembler::fld1() {
3027 emit_byte(0xD9);
3028 emit_byte(0xE8);
3029 }
3031 void Assembler::fld_d(Address adr) {
3032 InstructionMark im(this);
3033 emit_byte(0xDD);
3034 emit_operand32(rax, adr);
3035 }
3037 void Assembler::fld_s(Address adr) {
3038 InstructionMark im(this);
3039 emit_byte(0xD9);
3040 emit_operand32(rax, adr);
3041 }
3044 void Assembler::fld_s(int index) {
3045 emit_farith(0xD9, 0xC0, index);
3046 }
3048 void Assembler::fld_x(Address adr) {
3049 InstructionMark im(this);
3050 emit_byte(0xDB);
3051 emit_operand32(rbp, adr);
3052 }
3054 void Assembler::fldcw(Address src) {
3055 InstructionMark im(this);
3056 emit_byte(0xd9);
3057 emit_operand32(rbp, src);
3058 }
3060 void Assembler::fldenv(Address src) {
3061 InstructionMark im(this);
3062 emit_byte(0xD9);
3063 emit_operand32(rsp, src);
3064 }
3066 void Assembler::fldlg2() {
3067 emit_byte(0xD9);
3068 emit_byte(0xEC);
3069 }
3071 void Assembler::fldln2() {
3072 emit_byte(0xD9);
3073 emit_byte(0xED);
3074 }
3076 void Assembler::fldz() {
3077 emit_byte(0xD9);
3078 emit_byte(0xEE);
3079 }
3081 void Assembler::flog() {
3082 fldln2();
3083 fxch();
3084 fyl2x();
3085 }
3087 void Assembler::flog10() {
3088 fldlg2();
3089 fxch();
3090 fyl2x();
3091 }
3093 void Assembler::fmul(int i) {
3094 emit_farith(0xD8, 0xC8, i);
3095 }
3097 void Assembler::fmul_d(Address src) {
3098 InstructionMark im(this);
3099 emit_byte(0xDC);
3100 emit_operand32(rcx, src);
3101 }
3103 void Assembler::fmul_s(Address src) {
3104 InstructionMark im(this);
3105 emit_byte(0xD8);
3106 emit_operand32(rcx, src);
3107 }
3109 void Assembler::fmula(int i) {
3110 emit_farith(0xDC, 0xC8, i);
3111 }
3113 void Assembler::fmulp(int i) {
3114 emit_farith(0xDE, 0xC8, i);
3115 }
3117 void Assembler::fnsave(Address dst) {
3118 InstructionMark im(this);
3119 emit_byte(0xDD);
3120 emit_operand32(rsi, dst);
3121 }
3123 void Assembler::fnstcw(Address src) {
3124 InstructionMark im(this);
3125 emit_byte(0x9B);
3126 emit_byte(0xD9);
3127 emit_operand32(rdi, src);
3128 }
3130 void Assembler::fnstsw_ax() {
3131 emit_byte(0xdF);
3132 emit_byte(0xE0);
3133 }
3135 void Assembler::fprem() {
3136 emit_byte(0xD9);
3137 emit_byte(0xF8);
3138 }
3140 void Assembler::fprem1() {
3141 emit_byte(0xD9);
3142 emit_byte(0xF5);
3143 }
3145 void Assembler::frstor(Address src) {
3146 InstructionMark im(this);
3147 emit_byte(0xDD);
3148 emit_operand32(rsp, src);
3149 }
3151 void Assembler::fsin() {
3152 emit_byte(0xD9);
3153 emit_byte(0xFE);
3154 }
3156 void Assembler::fsqrt() {
3157 emit_byte(0xD9);
3158 emit_byte(0xFA);
3159 }
3161 void Assembler::fst_d(Address adr) {
3162 InstructionMark im(this);
3163 emit_byte(0xDD);
3164 emit_operand32(rdx, adr);
3165 }
3167 void Assembler::fst_s(Address adr) {
3168 InstructionMark im(this);
3169 emit_byte(0xD9);
3170 emit_operand32(rdx, adr);
3171 }
3173 void Assembler::fstp_d(Address adr) {
3174 InstructionMark im(this);
3175 emit_byte(0xDD);
3176 emit_operand32(rbx, adr);
3177 }
3179 void Assembler::fstp_d(int index) {
3180 emit_farith(0xDD, 0xD8, index);
3181 }
3183 void Assembler::fstp_s(Address adr) {
3184 InstructionMark im(this);
3185 emit_byte(0xD9);
3186 emit_operand32(rbx, adr);
3187 }
3189 void Assembler::fstp_x(Address adr) {
3190 InstructionMark im(this);
3191 emit_byte(0xDB);
3192 emit_operand32(rdi, adr);
3193 }
3195 void Assembler::fsub(int i) {
3196 emit_farith(0xD8, 0xE0, i);
3197 }
3199 void Assembler::fsub_d(Address src) {
3200 InstructionMark im(this);
3201 emit_byte(0xDC);
3202 emit_operand32(rsp, src);
3203 }
3205 void Assembler::fsub_s(Address src) {
3206 InstructionMark im(this);
3207 emit_byte(0xD8);
3208 emit_operand32(rsp, src);
3209 }
3211 void Assembler::fsuba(int i) {
3212 emit_farith(0xDC, 0xE8, i);
3213 }
3215 void Assembler::fsubp(int i) {
3216 emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
3217 }
3219 void Assembler::fsubr(int i) {
3220 emit_farith(0xD8, 0xE8, i);
3221 }
3223 void Assembler::fsubr_d(Address src) {
3224 InstructionMark im(this);
3225 emit_byte(0xDC);
3226 emit_operand32(rbp, src);
3227 }
3229 void Assembler::fsubr_s(Address src) {
3230 InstructionMark im(this);
3231 emit_byte(0xD8);
3232 emit_operand32(rbp, src);
3233 }
3235 void Assembler::fsubra(int i) {
3236 emit_farith(0xDC, 0xE0, i);
3237 }
3239 void Assembler::fsubrp(int i) {
3240 emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
3241 }
3243 void Assembler::ftan() {
3244 emit_byte(0xD9);
3245 emit_byte(0xF2);
3246 emit_byte(0xDD);
3247 emit_byte(0xD8);
3248 }
3250 void Assembler::ftst() {
3251 emit_byte(0xD9);
3252 emit_byte(0xE4);
3253 }
3255 void Assembler::fucomi(int i) {
3256 // make sure the instruction is supported (introduced for P6, together with cmov)
3257 guarantee(VM_Version::supports_cmov(), "illegal instruction");
3258 emit_farith(0xDB, 0xE8, i);
3259 }
3261 void Assembler::fucomip(int i) {
3262 // make sure the instruction is supported (introduced for P6, together with cmov)
3263 guarantee(VM_Version::supports_cmov(), "illegal instruction");
3264 emit_farith(0xDF, 0xE8, i);
3265 }
3267 void Assembler::fwait() {
3268 emit_byte(0x9B);
3269 }
3271 void Assembler::fxch(int i) {
3272 emit_farith(0xD9, 0xC8, i);
3273 }
3275 void Assembler::fyl2x() {
3276 emit_byte(0xD9);
3277 emit_byte(0xF1);
3278 }
3281 #ifndef _LP64
3283 void Assembler::incl(Register dst) {
3284 // Don't use it directly. Use MacroAssembler::incrementl() instead.
3285 emit_byte(0x40 | dst->encoding());
3286 }
3288 void Assembler::lea(Register dst, Address src) {
3289 leal(dst, src);
3290 }
3292 void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) {
3293 InstructionMark im(this);
3294 emit_byte(0xC7);
3295 emit_operand(rax, dst);
3296 emit_data((int)imm32, rspec, 0);
3297 }
3299 void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
3300 InstructionMark im(this);
3301 int encode = prefix_and_encode(dst->encoding());
3302 emit_byte(0xB8 | encode);
3303 emit_data((int)imm32, rspec, 0);
3304 }
3306 void Assembler::popa() { // 32bit
3307 emit_byte(0x61);
3308 }
3310 void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
3311 InstructionMark im(this);
3312 emit_byte(0x68);
3313 emit_data(imm32, rspec, 0);
3314 }
3316 void Assembler::pusha() { // 32bit
3317 emit_byte(0x60);
3318 }
3320 void Assembler::set_byte_if_not_zero(Register dst) {
3321 emit_byte(0x0F);
3322 emit_byte(0x95);
3323 emit_byte(0xE0 | dst->encoding());
3324 }
3326 void Assembler::shldl(Register dst, Register src) {
3327 emit_byte(0x0F);
3328 emit_byte(0xA5);
3329 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
3330 }
3332 void Assembler::shrdl(Register dst, Register src) {
3333 emit_byte(0x0F);
3334 emit_byte(0xAD);
3335 emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
3336 }
3338 #else // LP64
3340 // 64bit only pieces of the assembler
3341 // This should only be used by 64bit instructions that can use rip-relative
3342 // it cannot be used by instructions that want an immediate value.
3344 bool Assembler::reachable(AddressLiteral adr) {
3345 int64_t disp;
3346 // None will force a 64bit literal to the code stream. Likely a placeholder
3347 // for something that will be patched later and we need to certain it will
3348 // always be reachable.
3349 if (adr.reloc() == relocInfo::none) {
3350 return false;
3351 }
3352 if (adr.reloc() == relocInfo::internal_word_type) {
3353 // This should be rip relative and easily reachable.
3354 return true;
3355 }
3356 if (adr.reloc() == relocInfo::virtual_call_type ||
3357 adr.reloc() == relocInfo::opt_virtual_call_type ||
3358 adr.reloc() == relocInfo::static_call_type ||
3359 adr.reloc() == relocInfo::static_stub_type ) {
3360 // This should be rip relative within the code cache and easily
3361 // reachable until we get huge code caches. (At which point
3362 // ic code is going to have issues).
3363 return true;
3364 }
3365 if (adr.reloc() != relocInfo::external_word_type &&
3366 adr.reloc() != relocInfo::poll_return_type && // these are really external_word but need special
3367 adr.reloc() != relocInfo::poll_type && // relocs to identify them
3368 adr.reloc() != relocInfo::runtime_call_type ) {
3369 return false;
3370 }
3372 // Stress the correction code
3373 if (ForceUnreachable) {
3374 // Must be runtimecall reloc, see if it is in the codecache
3375 // Flipping stuff in the codecache to be unreachable causes issues
3376 // with things like inline caches where the additional instructions
3377 // are not handled.
3378 if (CodeCache::find_blob(adr._target) == NULL) {
3379 return false;
3380 }
3381 }
3382 // For external_word_type/runtime_call_type if it is reachable from where we
3383 // are now (possibly a temp buffer) and where we might end up
3384 // anywhere in the codeCache then we are always reachable.
3385 // This would have to change if we ever save/restore shared code
3386 // to be more pessimistic.
3388 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
3389 if (!is_simm32(disp)) return false;
3390 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
3391 if (!is_simm32(disp)) return false;
3393 disp = (int64_t)adr._target - ((int64_t)_code_pos + sizeof(int));
3395 // Because rip relative is a disp + address_of_next_instruction and we
3396 // don't know the value of address_of_next_instruction we apply a fudge factor
3397 // to make sure we will be ok no matter the size of the instruction we get placed into.
3398 // We don't have to fudge the checks above here because they are already worst case.
3400 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
3401 // + 4 because better safe than sorry.
3402 const int fudge = 12 + 4;
3403 if (disp < 0) {
3404 disp -= fudge;
3405 } else {
3406 disp += fudge;
3407 }
3408 return is_simm32(disp);
3409 }
3411 void Assembler::emit_data64(jlong data,
3412 relocInfo::relocType rtype,
3413 int format) {
3414 if (rtype == relocInfo::none) {
3415 emit_long64(data);
3416 } else {
3417 emit_data64(data, Relocation::spec_simple(rtype), format);
3418 }
3419 }
3421 void Assembler::emit_data64(jlong data,
3422 RelocationHolder const& rspec,
3423 int format) {
3424 assert(imm_operand == 0, "default format must be immediate in this file");
3425 assert(imm_operand == format, "must be immediate");
3426 assert(inst_mark() != NULL, "must be inside InstructionMark");
3427 // Do not use AbstractAssembler::relocate, which is not intended for
3428 // embedded words. Instead, relocate to the enclosing instruction.
3429 code_section()->relocate(inst_mark(), rspec, format);
3430 #ifdef ASSERT
3431 check_relocation(rspec, format);
3432 #endif
3433 emit_long64(data);
3434 }
3436 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
3437 if (reg_enc >= 8) {
3438 prefix(REX_B);
3439 reg_enc -= 8;
3440 } else if (byteinst && reg_enc >= 4) {
3441 prefix(REX);
3442 }
3443 return reg_enc;
3444 }
3446 int Assembler::prefixq_and_encode(int reg_enc) {
3447 if (reg_enc < 8) {
3448 prefix(REX_W);
3449 } else {
3450 prefix(REX_WB);
3451 reg_enc -= 8;
3452 }
3453 return reg_enc;
3454 }
3456 int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) {
3457 if (dst_enc < 8) {
3458 if (src_enc >= 8) {
3459 prefix(REX_B);
3460 src_enc -= 8;
3461 } else if (byteinst && src_enc >= 4) {
3462 prefix(REX);
3463 }
3464 } else {
3465 if (src_enc < 8) {
3466 prefix(REX_R);
3467 } else {
3468 prefix(REX_RB);
3469 src_enc -= 8;
3470 }
3471 dst_enc -= 8;
3472 }
3473 return dst_enc << 3 | src_enc;
3474 }
3476 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) {
3477 if (dst_enc < 8) {
3478 if (src_enc < 8) {
3479 prefix(REX_W);
3480 } else {
3481 prefix(REX_WB);
3482 src_enc -= 8;
3483 }
3484 } else {
3485 if (src_enc < 8) {
3486 prefix(REX_WR);
3487 } else {
3488 prefix(REX_WRB);
3489 src_enc -= 8;
3490 }
3491 dst_enc -= 8;
3492 }
3493 return dst_enc << 3 | src_enc;
3494 }
3496 void Assembler::prefix(Register reg) {
3497 if (reg->encoding() >= 8) {
3498 prefix(REX_B);
3499 }
3500 }
3502 void Assembler::prefix(Address adr) {
3503 if (adr.base_needs_rex()) {
3504 if (adr.index_needs_rex()) {
3505 prefix(REX_XB);
3506 } else {
3507 prefix(REX_B);
3508 }
3509 } else {
3510 if (adr.index_needs_rex()) {
3511 prefix(REX_X);
3512 }
3513 }
3514 }
3516 void Assembler::prefixq(Address adr) {
3517 if (adr.base_needs_rex()) {
3518 if (adr.index_needs_rex()) {
3519 prefix(REX_WXB);
3520 } else {
3521 prefix(REX_WB);
3522 }
3523 } else {
3524 if (adr.index_needs_rex()) {
3525 prefix(REX_WX);
3526 } else {
3527 prefix(REX_W);
3528 }
3529 }
3530 }
3533 void Assembler::prefix(Address adr, Register reg, bool byteinst) {
3534 if (reg->encoding() < 8) {
3535 if (adr.base_needs_rex()) {
3536 if (adr.index_needs_rex()) {
3537 prefix(REX_XB);
3538 } else {
3539 prefix(REX_B);
3540 }
3541 } else {
3542 if (adr.index_needs_rex()) {
3543 prefix(REX_X);
3544 } else if (reg->encoding() >= 4 ) {
3545 prefix(REX);
3546 }
3547 }
3548 } else {
3549 if (adr.base_needs_rex()) {
3550 if (adr.index_needs_rex()) {
3551 prefix(REX_RXB);
3552 } else {
3553 prefix(REX_RB);
3554 }
3555 } else {
3556 if (adr.index_needs_rex()) {
3557 prefix(REX_RX);
3558 } else {
3559 prefix(REX_R);
3560 }
3561 }
3562 }
3563 }
3565 void Assembler::prefixq(Address adr, Register src) {
3566 if (src->encoding() < 8) {
3567 if (adr.base_needs_rex()) {
3568 if (adr.index_needs_rex()) {
3569 prefix(REX_WXB);
3570 } else {
3571 prefix(REX_WB);
3572 }
3573 } else {
3574 if (adr.index_needs_rex()) {
3575 prefix(REX_WX);
3576 } else {
3577 prefix(REX_W);
3578 }
3579 }
3580 } else {
3581 if (adr.base_needs_rex()) {
3582 if (adr.index_needs_rex()) {
3583 prefix(REX_WRXB);
3584 } else {
3585 prefix(REX_WRB);
3586 }
3587 } else {
3588 if (adr.index_needs_rex()) {
3589 prefix(REX_WRX);
3590 } else {
3591 prefix(REX_WR);
3592 }
3593 }
3594 }
3595 }
3597 void Assembler::prefix(Address adr, XMMRegister reg) {
3598 if (reg->encoding() < 8) {
3599 if (adr.base_needs_rex()) {
3600 if (adr.index_needs_rex()) {
3601 prefix(REX_XB);
3602 } else {
3603 prefix(REX_B);
3604 }
3605 } else {
3606 if (adr.index_needs_rex()) {
3607 prefix(REX_X);
3608 }
3609 }
3610 } else {
3611 if (adr.base_needs_rex()) {
3612 if (adr.index_needs_rex()) {
3613 prefix(REX_RXB);
3614 } else {
3615 prefix(REX_RB);
3616 }
3617 } else {
3618 if (adr.index_needs_rex()) {
3619 prefix(REX_RX);
3620 } else {
3621 prefix(REX_R);
3622 }
3623 }
3624 }
3625 }
3627 void Assembler::adcq(Register dst, int32_t imm32) {
3628 (void) prefixq_and_encode(dst->encoding());
3629 emit_arith(0x81, 0xD0, dst, imm32);
3630 }
3632 void Assembler::adcq(Register dst, Address src) {
3633 InstructionMark im(this);
3634 prefixq(src, dst);
3635 emit_byte(0x13);
3636 emit_operand(dst, src);
3637 }
3639 void Assembler::adcq(Register dst, Register src) {
3640 (int) prefixq_and_encode(dst->encoding(), src->encoding());
3641 emit_arith(0x13, 0xC0, dst, src);
3642 }
3644 void Assembler::addq(Address dst, int32_t imm32) {
3645 InstructionMark im(this);
3646 prefixq(dst);
3647 emit_arith_operand(0x81, rax, dst,imm32);
3648 }
3650 void Assembler::addq(Address dst, Register src) {
3651 InstructionMark im(this);
3652 prefixq(dst, src);
3653 emit_byte(0x01);
3654 emit_operand(src, dst);
3655 }
3657 void Assembler::addq(Register dst, int32_t imm32) {
3658 (void) prefixq_and_encode(dst->encoding());
3659 emit_arith(0x81, 0xC0, dst, imm32);
3660 }
3662 void Assembler::addq(Register dst, Address src) {
3663 InstructionMark im(this);
3664 prefixq(src, dst);
3665 emit_byte(0x03);
3666 emit_operand(dst, src);
3667 }
3669 void Assembler::addq(Register dst, Register src) {
3670 (void) prefixq_and_encode(dst->encoding(), src->encoding());
3671 emit_arith(0x03, 0xC0, dst, src);
3672 }
3674 void Assembler::andq(Register dst, int32_t imm32) {
3675 (void) prefixq_and_encode(dst->encoding());
3676 emit_arith(0x81, 0xE0, dst, imm32);
3677 }
3679 void Assembler::andq(Register dst, Address src) {
3680 InstructionMark im(this);
3681 prefixq(src, dst);
3682 emit_byte(0x23);
3683 emit_operand(dst, src);
3684 }
3686 void Assembler::andq(Register dst, Register src) {
3687 (int) prefixq_and_encode(dst->encoding(), src->encoding());
3688 emit_arith(0x23, 0xC0, dst, src);
3689 }
3691 void Assembler::bswapq(Register reg) {
3692 int encode = prefixq_and_encode(reg->encoding());
3693 emit_byte(0x0F);
3694 emit_byte(0xC8 | encode);
3695 }
3697 void Assembler::cdqq() {
3698 prefix(REX_W);
3699 emit_byte(0x99);
3700 }
3702 void Assembler::clflush(Address adr) {
3703 prefix(adr);
3704 emit_byte(0x0F);
3705 emit_byte(0xAE);
3706 emit_operand(rdi, adr);
3707 }
3709 void Assembler::cmovq(Condition cc, Register dst, Register src) {
3710 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3711 emit_byte(0x0F);
3712 emit_byte(0x40 | cc);
3713 emit_byte(0xC0 | encode);
3714 }
3716 void Assembler::cmovq(Condition cc, Register dst, Address src) {
3717 InstructionMark im(this);
3718 prefixq(src, dst);
3719 emit_byte(0x0F);
3720 emit_byte(0x40 | cc);
3721 emit_operand(dst, src);
3722 }
3724 void Assembler::cmpq(Address dst, int32_t imm32) {
3725 InstructionMark im(this);
3726 prefixq(dst);
3727 emit_byte(0x81);
3728 emit_operand(rdi, dst, 4);
3729 emit_long(imm32);
3730 }
3732 void Assembler::cmpq(Register dst, int32_t imm32) {
3733 (void) prefixq_and_encode(dst->encoding());
3734 emit_arith(0x81, 0xF8, dst, imm32);
3735 }
3737 void Assembler::cmpq(Address dst, Register src) {
3738 InstructionMark im(this);
3739 prefixq(dst, src);
3740 emit_byte(0x3B);
3741 emit_operand(src, dst);
3742 }
3744 void Assembler::cmpq(Register dst, Register src) {
3745 (void) prefixq_and_encode(dst->encoding(), src->encoding());
3746 emit_arith(0x3B, 0xC0, dst, src);
3747 }
3749 void Assembler::cmpq(Register dst, Address src) {
3750 InstructionMark im(this);
3751 prefixq(src, dst);
3752 emit_byte(0x3B);
3753 emit_operand(dst, src);
3754 }
3756 void Assembler::cmpxchgq(Register reg, Address adr) {
3757 InstructionMark im(this);
3758 prefixq(adr, reg);
3759 emit_byte(0x0F);
3760 emit_byte(0xB1);
3761 emit_operand(reg, adr);
3762 }
3764 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
3765 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3766 emit_byte(0xF2);
3767 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3768 emit_byte(0x0F);
3769 emit_byte(0x2A);
3770 emit_byte(0xC0 | encode);
3771 }
3773 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
3774 NOT_LP64(assert(VM_Version::supports_sse(), ""));
3775 emit_byte(0xF3);
3776 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3777 emit_byte(0x0F);
3778 emit_byte(0x2A);
3779 emit_byte(0xC0 | encode);
3780 }
3782 void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
3783 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
3784 emit_byte(0xF2);
3785 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3786 emit_byte(0x0F);
3787 emit_byte(0x2C);
3788 emit_byte(0xC0 | encode);
3789 }
3791 void Assembler::cvttss2siq(Register dst, XMMRegister src) {
3792 NOT_LP64(assert(VM_Version::supports_sse(), ""));
3793 emit_byte(0xF3);
3794 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3795 emit_byte(0x0F);
3796 emit_byte(0x2C);
3797 emit_byte(0xC0 | encode);
3798 }
3800 void Assembler::decl(Register dst) {
3801 // Don't use it directly. Use MacroAssembler::decrementl() instead.
3802 // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
3803 int encode = prefix_and_encode(dst->encoding());
3804 emit_byte(0xFF);
3805 emit_byte(0xC8 | encode);
3806 }
3808 void Assembler::decq(Register dst) {
3809 // Don't use it directly. Use MacroAssembler::decrementq() instead.
3810 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
3811 int encode = prefixq_and_encode(dst->encoding());
3812 emit_byte(0xFF);
3813 emit_byte(0xC8 | encode);
3814 }
3816 void Assembler::decq(Address dst) {
3817 // Don't use it directly. Use MacroAssembler::decrementq() instead.
3818 InstructionMark im(this);
3819 prefixq(dst);
3820 emit_byte(0xFF);
3821 emit_operand(rcx, dst);
3822 }
3824 void Assembler::fxrstor(Address src) {
3825 prefixq(src);
3826 emit_byte(0x0F);
3827 emit_byte(0xAE);
3828 emit_operand(as_Register(1), src);
3829 }
3831 void Assembler::fxsave(Address dst) {
3832 prefixq(dst);
3833 emit_byte(0x0F);
3834 emit_byte(0xAE);
3835 emit_operand(as_Register(0), dst);
3836 }
3838 void Assembler::idivq(Register src) {
3839 int encode = prefixq_and_encode(src->encoding());
3840 emit_byte(0xF7);
3841 emit_byte(0xF8 | encode);
3842 }
3844 void Assembler::imulq(Register dst, Register src) {
3845 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3846 emit_byte(0x0F);
3847 emit_byte(0xAF);
3848 emit_byte(0xC0 | encode);
3849 }
3851 void Assembler::imulq(Register dst, Register src, int value) {
3852 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3853 if (is8bit(value)) {
3854 emit_byte(0x6B);
3855 emit_byte(0xC0 | encode);
3856 emit_byte(value);
3857 } else {
3858 emit_byte(0x69);
3859 emit_byte(0xC0 | encode);
3860 emit_long(value);
3861 }
3862 }
3864 void Assembler::incl(Register dst) {
3865 // Don't use it directly. Use MacroAssembler::incrementl() instead.
3866 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
3867 int encode = prefix_and_encode(dst->encoding());
3868 emit_byte(0xFF);
3869 emit_byte(0xC0 | encode);
3870 }
3872 void Assembler::incq(Register dst) {
3873 // Don't use it directly. Use MacroAssembler::incrementq() instead.
3874 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
3875 int encode = prefixq_and_encode(dst->encoding());
3876 emit_byte(0xFF);
3877 emit_byte(0xC0 | encode);
3878 }
3880 void Assembler::incq(Address dst) {
3881 // Don't use it directly. Use MacroAssembler::incrementq() instead.
3882 InstructionMark im(this);
3883 prefixq(dst);
3884 emit_byte(0xFF);
3885 emit_operand(rax, dst);
3886 }
3888 void Assembler::lea(Register dst, Address src) {
3889 leaq(dst, src);
3890 }
3892 void Assembler::leaq(Register dst, Address src) {
3893 InstructionMark im(this);
3894 prefixq(src, dst);
3895 emit_byte(0x8D);
3896 emit_operand(dst, src);
3897 }
3899 void Assembler::mov64(Register dst, int64_t imm64) {
3900 InstructionMark im(this);
3901 int encode = prefixq_and_encode(dst->encoding());
3902 emit_byte(0xB8 | encode);
3903 emit_long64(imm64);
3904 }
3906 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
3907 InstructionMark im(this);
3908 int encode = prefixq_and_encode(dst->encoding());
3909 emit_byte(0xB8 | encode);
3910 emit_data64(imm64, rspec);
3911 }
3913 void Assembler::mov_narrow_oop(Register dst, int32_t imm32, RelocationHolder const& rspec) {
3914 InstructionMark im(this);
3915 int encode = prefix_and_encode(dst->encoding());
3916 emit_byte(0xB8 | encode);
3917 emit_data((int)imm32, rspec, narrow_oop_operand);
3918 }
3920 void Assembler::mov_narrow_oop(Address dst, int32_t imm32, RelocationHolder const& rspec) {
3921 InstructionMark im(this);
3922 prefix(dst);
3923 emit_byte(0xC7);
3924 emit_operand(rax, dst, 4);
3925 emit_data((int)imm32, rspec, narrow_oop_operand);
3926 }
3928 void Assembler::cmp_narrow_oop(Register src1, int32_t imm32, RelocationHolder const& rspec) {
3929 InstructionMark im(this);
3930 int encode = prefix_and_encode(src1->encoding());
3931 emit_byte(0x81);
3932 emit_byte(0xF8 | encode);
3933 emit_data((int)imm32, rspec, narrow_oop_operand);
3934 }
3936 void Assembler::cmp_narrow_oop(Address src1, int32_t imm32, RelocationHolder const& rspec) {
3937 InstructionMark im(this);
3938 prefix(src1);
3939 emit_byte(0x81);
3940 emit_operand(rax, src1, 4);
3941 emit_data((int)imm32, rspec, narrow_oop_operand);
3942 }
3944 void Assembler::movdq(XMMRegister dst, Register src) {
3945 // table D-1 says MMX/SSE2
3946 NOT_LP64(assert(VM_Version::supports_sse2() || VM_Version::supports_mmx(), ""));
3947 emit_byte(0x66);
3948 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3949 emit_byte(0x0F);
3950 emit_byte(0x6E);
3951 emit_byte(0xC0 | encode);
3952 }
3954 void Assembler::movdq(Register dst, XMMRegister src) {
3955 // table D-1 says MMX/SSE2
3956 NOT_LP64(assert(VM_Version::supports_sse2() || VM_Version::supports_mmx(), ""));
3957 emit_byte(0x66);
3958 // swap src/dst to get correct prefix
3959 int encode = prefixq_and_encode(src->encoding(), dst->encoding());
3960 emit_byte(0x0F);
3961 emit_byte(0x7E);
3962 emit_byte(0xC0 | encode);
3963 }
3965 void Assembler::movq(Register dst, Register src) {
3966 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3967 emit_byte(0x8B);
3968 emit_byte(0xC0 | encode);
3969 }
3971 void Assembler::movq(Register dst, Address src) {
3972 InstructionMark im(this);
3973 prefixq(src, dst);
3974 emit_byte(0x8B);
3975 emit_operand(dst, src);
3976 }
3978 void Assembler::movq(Address dst, Register src) {
3979 InstructionMark im(this);
3980 prefixq(dst, src);
3981 emit_byte(0x89);
3982 emit_operand(src, dst);
3983 }
3985 void Assembler::movsbq(Register dst, Address src) {
3986 InstructionMark im(this);
3987 prefixq(src, dst);
3988 emit_byte(0x0F);
3989 emit_byte(0xBE);
3990 emit_operand(dst, src);
3991 }
3993 void Assembler::movsbq(Register dst, Register src) {
3994 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3995 emit_byte(0x0F);
3996 emit_byte(0xBE);
3997 emit_byte(0xC0 | encode);
3998 }
4000 void Assembler::movslq(Register dst, int32_t imm32) {
4001 // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx)
4002 // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx)
4003 // as a result we shouldn't use until tested at runtime...
4004 ShouldNotReachHere();
4005 InstructionMark im(this);
4006 int encode = prefixq_and_encode(dst->encoding());
4007 emit_byte(0xC7 | encode);
4008 emit_long(imm32);
4009 }
4011 void Assembler::movslq(Address dst, int32_t imm32) {
4012 assert(is_simm32(imm32), "lost bits");
4013 InstructionMark im(this);
4014 prefixq(dst);
4015 emit_byte(0xC7);
4016 emit_operand(rax, dst, 4);
4017 emit_long(imm32);
4018 }
4020 void Assembler::movslq(Register dst, Address src) {
4021 InstructionMark im(this);
4022 prefixq(src, dst);
4023 emit_byte(0x63);
4024 emit_operand(dst, src);
4025 }
4027 void Assembler::movslq(Register dst, Register src) {
4028 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4029 emit_byte(0x63);
4030 emit_byte(0xC0 | encode);
4031 }
4033 void Assembler::movswq(Register dst, Address src) {
4034 InstructionMark im(this);
4035 prefixq(src, dst);
4036 emit_byte(0x0F);
4037 emit_byte(0xBF);
4038 emit_operand(dst, src);
4039 }
4041 void Assembler::movswq(Register dst, Register src) {
4042 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4043 emit_byte(0x0F);
4044 emit_byte(0xBF);
4045 emit_byte(0xC0 | encode);
4046 }
4048 void Assembler::movzbq(Register dst, Address src) {
4049 InstructionMark im(this);
4050 prefixq(src, dst);
4051 emit_byte(0x0F);
4052 emit_byte(0xB6);
4053 emit_operand(dst, src);
4054 }
4056 void Assembler::movzbq(Register dst, Register src) {
4057 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4058 emit_byte(0x0F);
4059 emit_byte(0xB6);
4060 emit_byte(0xC0 | encode);
4061 }
4063 void Assembler::movzwq(Register dst, Address src) {
4064 InstructionMark im(this);
4065 prefixq(src, dst);
4066 emit_byte(0x0F);
4067 emit_byte(0xB7);
4068 emit_operand(dst, src);
4069 }
4071 void Assembler::movzwq(Register dst, Register src) {
4072 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4073 emit_byte(0x0F);
4074 emit_byte(0xB7);
4075 emit_byte(0xC0 | encode);
4076 }
4078 void Assembler::negq(Register dst) {
4079 int encode = prefixq_and_encode(dst->encoding());
4080 emit_byte(0xF7);
4081 emit_byte(0xD8 | encode);
4082 }
4084 void Assembler::notq(Register dst) {
4085 int encode = prefixq_and_encode(dst->encoding());
4086 emit_byte(0xF7);
4087 emit_byte(0xD0 | encode);
4088 }
4090 void Assembler::orq(Address dst, int32_t imm32) {
4091 InstructionMark im(this);
4092 prefixq(dst);
4093 emit_byte(0x81);
4094 emit_operand(rcx, dst, 4);
4095 emit_long(imm32);
4096 }
4098 void Assembler::orq(Register dst, int32_t imm32) {
4099 (void) prefixq_and_encode(dst->encoding());
4100 emit_arith(0x81, 0xC8, dst, imm32);
4101 }
4103 void Assembler::orq(Register dst, Address src) {
4104 InstructionMark im(this);
4105 prefixq(src, dst);
4106 emit_byte(0x0B);
4107 emit_operand(dst, src);
4108 }
4110 void Assembler::orq(Register dst, Register src) {
4111 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4112 emit_arith(0x0B, 0xC0, dst, src);
4113 }
4115 void Assembler::popa() { // 64bit
4116 movq(r15, Address(rsp, 0));
4117 movq(r14, Address(rsp, wordSize));
4118 movq(r13, Address(rsp, 2 * wordSize));
4119 movq(r12, Address(rsp, 3 * wordSize));
4120 movq(r11, Address(rsp, 4 * wordSize));
4121 movq(r10, Address(rsp, 5 * wordSize));
4122 movq(r9, Address(rsp, 6 * wordSize));
4123 movq(r8, Address(rsp, 7 * wordSize));
4124 movq(rdi, Address(rsp, 8 * wordSize));
4125 movq(rsi, Address(rsp, 9 * wordSize));
4126 movq(rbp, Address(rsp, 10 * wordSize));
4127 // skip rsp
4128 movq(rbx, Address(rsp, 12 * wordSize));
4129 movq(rdx, Address(rsp, 13 * wordSize));
4130 movq(rcx, Address(rsp, 14 * wordSize));
4131 movq(rax, Address(rsp, 15 * wordSize));
4133 addq(rsp, 16 * wordSize);
4134 }
4136 void Assembler::popcntq(Register dst, Address src) {
4137 assert(VM_Version::supports_popcnt(), "must support");
4138 InstructionMark im(this);
4139 emit_byte(0xF3);
4140 prefixq(src, dst);
4141 emit_byte(0x0F);
4142 emit_byte(0xB8);
4143 emit_operand(dst, src);
4144 }
4146 void Assembler::popcntq(Register dst, Register src) {
4147 assert(VM_Version::supports_popcnt(), "must support");
4148 emit_byte(0xF3);
4149 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4150 emit_byte(0x0F);
4151 emit_byte(0xB8);
4152 emit_byte(0xC0 | encode);
4153 }
4155 void Assembler::popq(Address dst) {
4156 InstructionMark im(this);
4157 prefixq(dst);
4158 emit_byte(0x8F);
4159 emit_operand(rax, dst);
4160 }
4162 void Assembler::pusha() { // 64bit
4163 // we have to store original rsp. ABI says that 128 bytes
4164 // below rsp are local scratch.
4165 movq(Address(rsp, -5 * wordSize), rsp);
4167 subq(rsp, 16 * wordSize);
4169 movq(Address(rsp, 15 * wordSize), rax);
4170 movq(Address(rsp, 14 * wordSize), rcx);
4171 movq(Address(rsp, 13 * wordSize), rdx);
4172 movq(Address(rsp, 12 * wordSize), rbx);
4173 // skip rsp
4174 movq(Address(rsp, 10 * wordSize), rbp);
4175 movq(Address(rsp, 9 * wordSize), rsi);
4176 movq(Address(rsp, 8 * wordSize), rdi);
4177 movq(Address(rsp, 7 * wordSize), r8);
4178 movq(Address(rsp, 6 * wordSize), r9);
4179 movq(Address(rsp, 5 * wordSize), r10);
4180 movq(Address(rsp, 4 * wordSize), r11);
4181 movq(Address(rsp, 3 * wordSize), r12);
4182 movq(Address(rsp, 2 * wordSize), r13);
4183 movq(Address(rsp, wordSize), r14);
4184 movq(Address(rsp, 0), r15);
4185 }
4187 void Assembler::pushq(Address src) {
4188 InstructionMark im(this);
4189 prefixq(src);
4190 emit_byte(0xFF);
4191 emit_operand(rsi, src);
4192 }
4194 void Assembler::rclq(Register dst, int imm8) {
4195 assert(isShiftCount(imm8 >> 1), "illegal shift count");
4196 int encode = prefixq_and_encode(dst->encoding());
4197 if (imm8 == 1) {
4198 emit_byte(0xD1);
4199 emit_byte(0xD0 | encode);
4200 } else {
4201 emit_byte(0xC1);
4202 emit_byte(0xD0 | encode);
4203 emit_byte(imm8);
4204 }
4205 }
4206 void Assembler::sarq(Register dst, int imm8) {
4207 assert(isShiftCount(imm8 >> 1), "illegal shift count");
4208 int encode = prefixq_and_encode(dst->encoding());
4209 if (imm8 == 1) {
4210 emit_byte(0xD1);
4211 emit_byte(0xF8 | encode);
4212 } else {
4213 emit_byte(0xC1);
4214 emit_byte(0xF8 | encode);
4215 emit_byte(imm8);
4216 }
4217 }
4219 void Assembler::sarq(Register dst) {
4220 int encode = prefixq_and_encode(dst->encoding());
4221 emit_byte(0xD3);
4222 emit_byte(0xF8 | encode);
4223 }
4224 void Assembler::sbbq(Address dst, int32_t imm32) {
4225 InstructionMark im(this);
4226 prefixq(dst);
4227 emit_arith_operand(0x81, rbx, dst, imm32);
4228 }
4230 void Assembler::sbbq(Register dst, int32_t imm32) {
4231 (void) prefixq_and_encode(dst->encoding());
4232 emit_arith(0x81, 0xD8, dst, imm32);
4233 }
4235 void Assembler::sbbq(Register dst, Address src) {
4236 InstructionMark im(this);
4237 prefixq(src, dst);
4238 emit_byte(0x1B);
4239 emit_operand(dst, src);
4240 }
4242 void Assembler::sbbq(Register dst, Register src) {
4243 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4244 emit_arith(0x1B, 0xC0, dst, src);
4245 }
4247 void Assembler::shlq(Register dst, int imm8) {
4248 assert(isShiftCount(imm8 >> 1), "illegal shift count");
4249 int encode = prefixq_and_encode(dst->encoding());
4250 if (imm8 == 1) {
4251 emit_byte(0xD1);
4252 emit_byte(0xE0 | encode);
4253 } else {
4254 emit_byte(0xC1);
4255 emit_byte(0xE0 | encode);
4256 emit_byte(imm8);
4257 }
4258 }
4260 void Assembler::shlq(Register dst) {
4261 int encode = prefixq_and_encode(dst->encoding());
4262 emit_byte(0xD3);
4263 emit_byte(0xE0 | encode);
4264 }
4266 void Assembler::shrq(Register dst, int imm8) {
4267 assert(isShiftCount(imm8 >> 1), "illegal shift count");
4268 int encode = prefixq_and_encode(dst->encoding());
4269 emit_byte(0xC1);
4270 emit_byte(0xE8 | encode);
4271 emit_byte(imm8);
4272 }
4274 void Assembler::shrq(Register dst) {
4275 int encode = prefixq_and_encode(dst->encoding());
4276 emit_byte(0xD3);
4277 emit_byte(0xE8 | encode);
4278 }
4280 void Assembler::sqrtsd(XMMRegister dst, Address src) {
4281 NOT_LP64(assert(VM_Version::supports_sse2(), ""));
4282 InstructionMark im(this);
4283 emit_byte(0xF2);
4284 prefix(src, dst);
4285 emit_byte(0x0F);
4286 emit_byte(0x51);
4287 emit_operand(dst, src);
4288 }
4290 void Assembler::subq(Address dst, int32_t imm32) {
4291 InstructionMark im(this);
4292 prefixq(dst);
4293 if (is8bit(imm32)) {
4294 emit_byte(0x83);
4295 emit_operand(rbp, dst, 1);
4296 emit_byte(imm32 & 0xFF);
4297 } else {
4298 emit_byte(0x81);
4299 emit_operand(rbp, dst, 4);
4300 emit_long(imm32);
4301 }
4302 }
4304 void Assembler::subq(Register dst, int32_t imm32) {
4305 (void) prefixq_and_encode(dst->encoding());
4306 emit_arith(0x81, 0xE8, dst, imm32);
4307 }
4309 void Assembler::subq(Address dst, Register src) {
4310 InstructionMark im(this);
4311 prefixq(dst, src);
4312 emit_byte(0x29);
4313 emit_operand(src, dst);
4314 }
4316 void Assembler::subq(Register dst, Address src) {
4317 InstructionMark im(this);
4318 prefixq(src, dst);
4319 emit_byte(0x2B);
4320 emit_operand(dst, src);
4321 }
4323 void Assembler::subq(Register dst, Register src) {
4324 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4325 emit_arith(0x2B, 0xC0, dst, src);
4326 }
4328 void Assembler::testq(Register dst, int32_t imm32) {
4329 // not using emit_arith because test
4330 // doesn't support sign-extension of
4331 // 8bit operands
4332 int encode = dst->encoding();
4333 if (encode == 0) {
4334 prefix(REX_W);
4335 emit_byte(0xA9);
4336 } else {
4337 encode = prefixq_and_encode(encode);
4338 emit_byte(0xF7);
4339 emit_byte(0xC0 | encode);
4340 }
4341 emit_long(imm32);
4342 }
4344 void Assembler::testq(Register dst, Register src) {
4345 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4346 emit_arith(0x85, 0xC0, dst, src);
4347 }
4349 void Assembler::xaddq(Address dst, Register src) {
4350 InstructionMark im(this);
4351 prefixq(dst, src);
4352 emit_byte(0x0F);
4353 emit_byte(0xC1);
4354 emit_operand(src, dst);
4355 }
4357 void Assembler::xchgq(Register dst, Address src) {
4358 InstructionMark im(this);
4359 prefixq(src, dst);
4360 emit_byte(0x87);
4361 emit_operand(dst, src);
4362 }
4364 void Assembler::xchgq(Register dst, Register src) {
4365 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
4366 emit_byte(0x87);
4367 emit_byte(0xc0 | encode);
4368 }
4370 void Assembler::xorq(Register dst, Register src) {
4371 (void) prefixq_and_encode(dst->encoding(), src->encoding());
4372 emit_arith(0x33, 0xC0, dst, src);
4373 }
4375 void Assembler::xorq(Register dst, Address src) {
4376 InstructionMark im(this);
4377 prefixq(src, dst);
4378 emit_byte(0x33);
4379 emit_operand(dst, src);
4380 }
4382 #endif // !LP64
4384 static Assembler::Condition reverse[] = {
4385 Assembler::noOverflow /* overflow = 0x0 */ ,
4386 Assembler::overflow /* noOverflow = 0x1 */ ,
4387 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
4388 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
4389 Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
4390 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
4391 Assembler::above /* belowEqual = 0x6 */ ,
4392 Assembler::belowEqual /* above = 0x7 */ ,
4393 Assembler::positive /* negative = 0x8 */ ,
4394 Assembler::negative /* positive = 0x9 */ ,
4395 Assembler::noParity /* parity = 0xa */ ,
4396 Assembler::parity /* noParity = 0xb */ ,
4397 Assembler::greaterEqual /* less = 0xc */ ,
4398 Assembler::less /* greaterEqual = 0xd */ ,
4399 Assembler::greater /* lessEqual = 0xe */ ,
4400 Assembler::lessEqual /* greater = 0xf, */
4402 };
4405 // Implementation of MacroAssembler
4407 // First all the versions that have distinct versions depending on 32/64 bit
4408 // Unless the difference is trivial (1 line or so).
4410 #ifndef _LP64
4412 // 32bit versions
4414 Address MacroAssembler::as_Address(AddressLiteral adr) {
4415 return Address(adr.target(), adr.rspec());
4416 }
4418 Address MacroAssembler::as_Address(ArrayAddress adr) {
4419 return Address::make_array(adr);
4420 }
4422 int MacroAssembler::biased_locking_enter(Register lock_reg,
4423 Register obj_reg,
4424 Register swap_reg,
4425 Register tmp_reg,
4426 bool swap_reg_contains_mark,
4427 Label& done,
4428 Label* slow_case,
4429 BiasedLockingCounters* counters) {
4430 assert(UseBiasedLocking, "why call this otherwise?");
4431 assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg");
4432 assert_different_registers(lock_reg, obj_reg, swap_reg);
4434 if (PrintBiasedLockingStatistics && counters == NULL)
4435 counters = BiasedLocking::counters();
4437 bool need_tmp_reg = false;
4438 if (tmp_reg == noreg) {
4439 need_tmp_reg = true;
4440 tmp_reg = lock_reg;
4441 } else {
4442 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
4443 }
4444 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
4445 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
4446 Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
4447 Address saved_mark_addr(lock_reg, 0);
4449 // Biased locking
4450 // See whether the lock is currently biased toward our thread and
4451 // whether the epoch is still valid
4452 // Note that the runtime guarantees sufficient alignment of JavaThread
4453 // pointers to allow age to be placed into low bits
4454 // First check to see whether biasing is even enabled for this object
4455 Label cas_label;
4456 int null_check_offset = -1;
4457 if (!swap_reg_contains_mark) {
4458 null_check_offset = offset();
4459 movl(swap_reg, mark_addr);
4460 }
4461 if (need_tmp_reg) {
4462 push(tmp_reg);
4463 }
4464 movl(tmp_reg, swap_reg);
4465 andl(tmp_reg, markOopDesc::biased_lock_mask_in_place);
4466 cmpl(tmp_reg, markOopDesc::biased_lock_pattern);
4467 if (need_tmp_reg) {
4468 pop(tmp_reg);
4469 }
4470 jcc(Assembler::notEqual, cas_label);
4471 // The bias pattern is present in the object's header. Need to check
4472 // whether the bias owner and the epoch are both still current.
4473 // Note that because there is no current thread register on x86 we
4474 // need to store off the mark word we read out of the object to
4475 // avoid reloading it and needing to recheck invariants below. This
4476 // store is unfortunate but it makes the overall code shorter and
4477 // simpler.
4478 movl(saved_mark_addr, swap_reg);
4479 if (need_tmp_reg) {
4480 push(tmp_reg);
4481 }
4482 get_thread(tmp_reg);
4483 xorl(swap_reg, tmp_reg);
4484 if (swap_reg_contains_mark) {
4485 null_check_offset = offset();
4486 }
4487 movl(tmp_reg, klass_addr);
4488 xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
4489 andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
4490 if (need_tmp_reg) {
4491 pop(tmp_reg);
4492 }
4493 if (counters != NULL) {
4494 cond_inc32(Assembler::zero,
4495 ExternalAddress((address)counters->biased_lock_entry_count_addr()));
4496 }
4497 jcc(Assembler::equal, done);
4499 Label try_revoke_bias;
4500 Label try_rebias;
4502 // At this point we know that the header has the bias pattern and
4503 // that we are not the bias owner in the current epoch. We need to
4504 // figure out more details about the state of the header in order to
4505 // know what operations can be legally performed on the object's
4506 // header.
4508 // If the low three bits in the xor result aren't clear, that means
4509 // the prototype header is no longer biased and we have to revoke
4510 // the bias on this object.
4511 testl(swap_reg, markOopDesc::biased_lock_mask_in_place);
4512 jcc(Assembler::notZero, try_revoke_bias);
4514 // Biasing is still enabled for this data type. See whether the
4515 // epoch of the current bias is still valid, meaning that the epoch
4516 // bits of the mark word are equal to the epoch bits of the
4517 // prototype header. (Note that the prototype header's epoch bits
4518 // only change at a safepoint.) If not, attempt to rebias the object
4519 // toward the current thread. Note that we must be absolutely sure
4520 // that the current epoch is invalid in order to do this because
4521 // otherwise the manipulations it performs on the mark word are
4522 // illegal.
4523 testl(swap_reg, markOopDesc::epoch_mask_in_place);
4524 jcc(Assembler::notZero, try_rebias);
4526 // The epoch of the current bias is still valid but we know nothing
4527 // about the owner; it might be set or it might be clear. Try to
4528 // acquire the bias of the object using an atomic operation. If this
4529 // fails we will go in to the runtime to revoke the object's bias.
4530 // Note that we first construct the presumed unbiased header so we
4531 // don't accidentally blow away another thread's valid bias.
4532 movl(swap_reg, saved_mark_addr);
4533 andl(swap_reg,
4534 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
4535 if (need_tmp_reg) {
4536 push(tmp_reg);
4537 }
4538 get_thread(tmp_reg);
4539 orl(tmp_reg, swap_reg);
4540 if (os::is_MP()) {
4541 lock();
4542 }
4543 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
4544 if (need_tmp_reg) {
4545 pop(tmp_reg);
4546 }
4547 // If the biasing toward our thread failed, this means that
4548 // another thread succeeded in biasing it toward itself and we
4549 // need to revoke that bias. The revocation will occur in the
4550 // interpreter runtime in the slow case.
4551 if (counters != NULL) {
4552 cond_inc32(Assembler::zero,
4553 ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr()));
4554 }
4555 if (slow_case != NULL) {
4556 jcc(Assembler::notZero, *slow_case);
4557 }
4558 jmp(done);
4560 bind(try_rebias);
4561 // At this point we know the epoch has expired, meaning that the
4562 // current "bias owner", if any, is actually invalid. Under these
4563 // circumstances _only_, we are allowed to use the current header's
4564 // value as the comparison value when doing the cas to acquire the
4565 // bias in the current epoch. In other words, we allow transfer of
4566 // the bias from one thread to another directly in this situation.
4567 //
4568 // FIXME: due to a lack of registers we currently blow away the age
4569 // bits in this situation. Should attempt to preserve them.
4570 if (need_tmp_reg) {
4571 push(tmp_reg);
4572 }
4573 get_thread(tmp_reg);
4574 movl(swap_reg, klass_addr);
4575 orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
4576 movl(swap_reg, saved_mark_addr);
4577 if (os::is_MP()) {
4578 lock();
4579 }
4580 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
4581 if (need_tmp_reg) {
4582 pop(tmp_reg);
4583 }
4584 // If the biasing toward our thread failed, then another thread
4585 // succeeded in biasing it toward itself and we need to revoke that
4586 // bias. The revocation will occur in the runtime in the slow case.
4587 if (counters != NULL) {
4588 cond_inc32(Assembler::zero,
4589 ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
4590 }
4591 if (slow_case != NULL) {
4592 jcc(Assembler::notZero, *slow_case);
4593 }
4594 jmp(done);
4596 bind(try_revoke_bias);
4597 // The prototype mark in the klass doesn't have the bias bit set any
4598 // more, indicating that objects of this data type are not supposed
4599 // to be biased any more. We are going to try to reset the mark of
4600 // this object to the prototype value and fall through to the
4601 // CAS-based locking scheme. Note that if our CAS fails, it means
4602 // that another thread raced us for the privilege of revoking the
4603 // bias of this particular object, so it's okay to continue in the
4604 // normal locking code.
4605 //
4606 // FIXME: due to a lack of registers we currently blow away the age
4607 // bits in this situation. Should attempt to preserve them.
4608 movl(swap_reg, saved_mark_addr);
4609 if (need_tmp_reg) {
4610 push(tmp_reg);
4611 }
4612 movl(tmp_reg, klass_addr);
4613 movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
4614 if (os::is_MP()) {
4615 lock();
4616 }
4617 cmpxchgptr(tmp_reg, Address(obj_reg, 0));
4618 if (need_tmp_reg) {
4619 pop(tmp_reg);
4620 }
4621 // Fall through to the normal CAS-based lock, because no matter what
4622 // the result of the above CAS, some thread must have succeeded in
4623 // removing the bias bit from the object's header.
4624 if (counters != NULL) {
4625 cond_inc32(Assembler::zero,
4626 ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
4627 }
4629 bind(cas_label);
4631 return null_check_offset;
4632 }
4633 void MacroAssembler::call_VM_leaf_base(address entry_point,
4634 int number_of_arguments) {
4635 call(RuntimeAddress(entry_point));
4636 increment(rsp, number_of_arguments * wordSize);
4637 }
4639 void MacroAssembler::cmpoop(Address src1, jobject obj) {
4640 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
4641 }
4643 void MacroAssembler::cmpoop(Register src1, jobject obj) {
4644 cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
4645 }
4647 void MacroAssembler::extend_sign(Register hi, Register lo) {
4648 // According to Intel Doc. AP-526, "Integer Divide", p.18.
4649 if (VM_Version::is_P6() && hi == rdx && lo == rax) {
4650 cdql();
4651 } else {
4652 movl(hi, lo);
4653 sarl(hi, 31);
4654 }
4655 }
4657 void MacroAssembler::fat_nop() {
4658 // A 5 byte nop that is safe for patching (see patch_verified_entry)
4659 emit_byte(0x26); // es:
4660 emit_byte(0x2e); // cs:
4661 emit_byte(0x64); // fs:
4662 emit_byte(0x65); // gs:
4663 emit_byte(0x90);
4664 }
4666 void MacroAssembler::jC2(Register tmp, Label& L) {
4667 // set parity bit if FPU flag C2 is set (via rax)
4668 save_rax(tmp);
4669 fwait(); fnstsw_ax();
4670 sahf();
4671 restore_rax(tmp);
4672 // branch
4673 jcc(Assembler::parity, L);
4674 }
4676 void MacroAssembler::jnC2(Register tmp, Label& L) {
4677 // set parity bit if FPU flag C2 is set (via rax)
4678 save_rax(tmp);
4679 fwait(); fnstsw_ax();
4680 sahf();
4681 restore_rax(tmp);
4682 // branch
4683 jcc(Assembler::noParity, L);
4684 }
4686 // 32bit can do a case table jump in one instruction but we no longer allow the base
4687 // to be installed in the Address class
4688 void MacroAssembler::jump(ArrayAddress entry) {
4689 jmp(as_Address(entry));
4690 }
4692 // Note: y_lo will be destroyed
4693 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
4694 // Long compare for Java (semantics as described in JVM spec.)
4695 Label high, low, done;
4697 cmpl(x_hi, y_hi);
4698 jcc(Assembler::less, low);
4699 jcc(Assembler::greater, high);
4700 // x_hi is the return register
4701 xorl(x_hi, x_hi);
4702 cmpl(x_lo, y_lo);
4703 jcc(Assembler::below, low);
4704 jcc(Assembler::equal, done);
4706 bind(high);
4707 xorl(x_hi, x_hi);
4708 increment(x_hi);
4709 jmp(done);
4711 bind(low);
4712 xorl(x_hi, x_hi);
4713 decrementl(x_hi);
4715 bind(done);
4716 }
4718 void MacroAssembler::lea(Register dst, AddressLiteral src) {
4719 mov_literal32(dst, (int32_t)src.target(), src.rspec());
4720 }
4722 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
4723 // leal(dst, as_Address(adr));
4724 // see note in movl as to why we must use a move
4725 mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
4726 }
4728 void MacroAssembler::leave() {
4729 mov(rsp, rbp);
4730 pop(rbp);
4731 }
4733 void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
4734 // Multiplication of two Java long values stored on the stack
4735 // as illustrated below. Result is in rdx:rax.
4736 //
4737 // rsp ---> [ ?? ] \ \
4738 // .... | y_rsp_offset |
4739 // [ y_lo ] / (in bytes) | x_rsp_offset
4740 // [ y_hi ] | (in bytes)
4741 // .... |
4742 // [ x_lo ] /
4743 // [ x_hi ]
4744 // ....
4745 //
4746 // Basic idea: lo(result) = lo(x_lo * y_lo)
4747 // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
4748 Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
4749 Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
4750 Label quick;
4751 // load x_hi, y_hi and check if quick
4752 // multiplication is possible
4753 movl(rbx, x_hi);
4754 movl(rcx, y_hi);
4755 movl(rax, rbx);
4756 orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0
4757 jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply
4758 // do full multiplication
4759 // 1st step
4760 mull(y_lo); // x_hi * y_lo
4761 movl(rbx, rax); // save lo(x_hi * y_lo) in rbx,
4762 // 2nd step
4763 movl(rax, x_lo);
4764 mull(rcx); // x_lo * y_hi
4765 addl(rbx, rax); // add lo(x_lo * y_hi) to rbx,
4766 // 3rd step
4767 bind(quick); // note: rbx, = 0 if quick multiply!
4768 movl(rax, x_lo);
4769 mull(y_lo); // x_lo * y_lo
4770 addl(rdx, rbx); // correct hi(x_lo * y_lo)
4771 }
4773 void MacroAssembler::lneg(Register hi, Register lo) {
4774 negl(lo);
4775 adcl(hi, 0);
4776 negl(hi);
4777 }
4779 void MacroAssembler::lshl(Register hi, Register lo) {
4780 // Java shift left long support (semantics as described in JVM spec., p.305)
4781 // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
4782 // shift value is in rcx !
4783 assert(hi != rcx, "must not use rcx");
4784 assert(lo != rcx, "must not use rcx");
4785 const Register s = rcx; // shift count
4786 const int n = BitsPerWord;
4787 Label L;
4788 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
4789 cmpl(s, n); // if (s < n)
4790 jcc(Assembler::less, L); // else (s >= n)
4791 movl(hi, lo); // x := x << n
4792 xorl(lo, lo);
4793 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
4794 bind(L); // s (mod n) < n
4795 shldl(hi, lo); // x := x << s
4796 shll(lo);
4797 }
4800 void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
4801 // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
4802 // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
4803 assert(hi != rcx, "must not use rcx");
4804 assert(lo != rcx, "must not use rcx");
4805 const Register s = rcx; // shift count
4806 const int n = BitsPerWord;
4807 Label L;
4808 andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
4809 cmpl(s, n); // if (s < n)
4810 jcc(Assembler::less, L); // else (s >= n)
4811 movl(lo, hi); // x := x >> n
4812 if (sign_extension) sarl(hi, 31);
4813 else xorl(hi, hi);
4814 // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
4815 bind(L); // s (mod n) < n
4816 shrdl(lo, hi); // x := x >> s
4817 if (sign_extension) sarl(hi);
4818 else shrl(hi);
4819 }
4821 void MacroAssembler::movoop(Register dst, jobject obj) {
4822 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
4823 }
4825 void MacroAssembler::movoop(Address dst, jobject obj) {
4826 mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
4827 }
4829 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
4830 if (src.is_lval()) {
4831 mov_literal32(dst, (intptr_t)src.target(), src.rspec());
4832 } else {
4833 movl(dst, as_Address(src));
4834 }
4835 }
4837 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
4838 movl(as_Address(dst), src);
4839 }
4841 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
4842 movl(dst, as_Address(src));
4843 }
4845 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
4846 void MacroAssembler::movptr(Address dst, intptr_t src) {
4847 movl(dst, src);
4848 }
4851 void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
4852 movsd(dst, as_Address(src));
4853 }
4855 void MacroAssembler::pop_callee_saved_registers() {
4856 pop(rcx);
4857 pop(rdx);
4858 pop(rdi);
4859 pop(rsi);
4860 }
4862 void MacroAssembler::pop_fTOS() {
4863 fld_d(Address(rsp, 0));
4864 addl(rsp, 2 * wordSize);
4865 }
4867 void MacroAssembler::push_callee_saved_registers() {
4868 push(rsi);
4869 push(rdi);
4870 push(rdx);
4871 push(rcx);
4872 }
4874 void MacroAssembler::push_fTOS() {
4875 subl(rsp, 2 * wordSize);
4876 fstp_d(Address(rsp, 0));
4877 }
4880 void MacroAssembler::pushoop(jobject obj) {
4881 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
4882 }
4885 void MacroAssembler::pushptr(AddressLiteral src) {
4886 if (src.is_lval()) {
4887 push_literal32((int32_t)src.target(), src.rspec());
4888 } else {
4889 pushl(as_Address(src));
4890 }
4891 }
4893 void MacroAssembler::set_word_if_not_zero(Register dst) {
4894 xorl(dst, dst);
4895 set_byte_if_not_zero(dst);
4896 }
4898 static void pass_arg0(MacroAssembler* masm, Register arg) {
4899 masm->push(arg);
4900 }
4902 static void pass_arg1(MacroAssembler* masm, Register arg) {
4903 masm->push(arg);
4904 }
4906 static void pass_arg2(MacroAssembler* masm, Register arg) {
4907 masm->push(arg);
4908 }
4910 static void pass_arg3(MacroAssembler* masm, Register arg) {
4911 masm->push(arg);
4912 }
4914 #ifndef PRODUCT
4915 extern "C" void findpc(intptr_t x);
4916 #endif
4918 void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
4919 // In order to get locks to work, we need to fake a in_VM state
4920 JavaThread* thread = JavaThread::current();
4921 JavaThreadState saved_state = thread->thread_state();
4922 thread->set_thread_state(_thread_in_vm);
4923 if (ShowMessageBoxOnError) {
4924 JavaThread* thread = JavaThread::current();
4925 JavaThreadState saved_state = thread->thread_state();
4926 thread->set_thread_state(_thread_in_vm);
4927 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
4928 ttyLocker ttyl;
4929 BytecodeCounter::print();
4930 }
4931 // To see where a verify_oop failed, get $ebx+40/X for this frame.
4932 // This is the value of eip which points to where verify_oop will return.
4933 if (os::message_box(msg, "Execution stopped, print registers?")) {
4934 ttyLocker ttyl;
4935 tty->print_cr("eip = 0x%08x", eip);
4936 #ifndef PRODUCT
4937 tty->cr();
4938 findpc(eip);
4939 tty->cr();
4940 #endif
4941 tty->print_cr("rax, = 0x%08x", rax);
4942 tty->print_cr("rbx, = 0x%08x", rbx);
4943 tty->print_cr("rcx = 0x%08x", rcx);
4944 tty->print_cr("rdx = 0x%08x", rdx);
4945 tty->print_cr("rdi = 0x%08x", rdi);
4946 tty->print_cr("rsi = 0x%08x", rsi);
4947 tty->print_cr("rbp, = 0x%08x", rbp);
4948 tty->print_cr("rsp = 0x%08x", rsp);
4949 BREAKPOINT;
4950 }
4951 } else {
4952 ttyLocker ttyl;
4953 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
4954 assert(false, "DEBUG MESSAGE");
4955 }
4956 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
4957 }
4959 void MacroAssembler::stop(const char* msg) {
4960 ExternalAddress message((address)msg);
4961 // push address of message
4962 pushptr(message.addr());
4963 { Label L; call(L, relocInfo::none); bind(L); } // push eip
4964 pusha(); // push registers
4965 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
4966 hlt();
4967 }
4969 void MacroAssembler::warn(const char* msg) {
4970 push_CPU_state();
4972 ExternalAddress message((address) msg);
4973 // push address of message
4974 pushptr(message.addr());
4976 call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
4977 addl(rsp, wordSize); // discard argument
4978 pop_CPU_state();
4979 }
4981 #else // _LP64
4983 // 64 bit versions
4985 Address MacroAssembler::as_Address(AddressLiteral adr) {
4986 // amd64 always does this as a pc-rel
4987 // we can be absolute or disp based on the instruction type
4988 // jmp/call are displacements others are absolute
4989 assert(!adr.is_lval(), "must be rval");
4990 assert(reachable(adr), "must be");
4991 return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
4993 }
4995 Address MacroAssembler::as_Address(ArrayAddress adr) {
4996 AddressLiteral base = adr.base();
4997 lea(rscratch1, base);
4998 Address index = adr.index();
4999 assert(index._disp == 0, "must not have disp"); // maybe it can?
5000 Address array(rscratch1, index._index, index._scale, index._disp);
5001 return array;
5002 }
5004 int MacroAssembler::biased_locking_enter(Register lock_reg,
5005 Register obj_reg,
5006 Register swap_reg,
5007 Register tmp_reg,
5008 bool swap_reg_contains_mark,
5009 Label& done,
5010 Label* slow_case,
5011 BiasedLockingCounters* counters) {
5012 assert(UseBiasedLocking, "why call this otherwise?");
5013 assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
5014 assert(tmp_reg != noreg, "tmp_reg must be supplied");
5015 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
5016 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
5017 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
5018 Address saved_mark_addr(lock_reg, 0);
5020 if (PrintBiasedLockingStatistics && counters == NULL)
5021 counters = BiasedLocking::counters();
5023 // Biased locking
5024 // See whether the lock is currently biased toward our thread and
5025 // whether the epoch is still valid
5026 // Note that the runtime guarantees sufficient alignment of JavaThread
5027 // pointers to allow age to be placed into low bits
5028 // First check to see whether biasing is even enabled for this object
5029 Label cas_label;
5030 int null_check_offset = -1;
5031 if (!swap_reg_contains_mark) {
5032 null_check_offset = offset();
5033 movq(swap_reg, mark_addr);
5034 }
5035 movq(tmp_reg, swap_reg);
5036 andq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
5037 cmpq(tmp_reg, markOopDesc::biased_lock_pattern);
5038 jcc(Assembler::notEqual, cas_label);
5039 // The bias pattern is present in the object's header. Need to check
5040 // whether the bias owner and the epoch are both still current.
5041 load_prototype_header(tmp_reg, obj_reg);
5042 orq(tmp_reg, r15_thread);
5043 xorq(tmp_reg, swap_reg);
5044 andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place));
5045 if (counters != NULL) {
5046 cond_inc32(Assembler::zero,
5047 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
5048 }
5049 jcc(Assembler::equal, done);
5051 Label try_revoke_bias;
5052 Label try_rebias;
5054 // At this point we know that the header has the bias pattern and
5055 // that we are not the bias owner in the current epoch. We need to
5056 // figure out more details about the state of the header in order to
5057 // know what operations can be legally performed on the object's
5058 // header.
5060 // If the low three bits in the xor result aren't clear, that means
5061 // the prototype header is no longer biased and we have to revoke
5062 // the bias on this object.
5063 testq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
5064 jcc(Assembler::notZero, try_revoke_bias);
5066 // Biasing is still enabled for this data type. See whether the
5067 // epoch of the current bias is still valid, meaning that the epoch
5068 // bits of the mark word are equal to the epoch bits of the
5069 // prototype header. (Note that the prototype header's epoch bits
5070 // only change at a safepoint.) If not, attempt to rebias the object
5071 // toward the current thread. Note that we must be absolutely sure
5072 // that the current epoch is invalid in order to do this because
5073 // otherwise the manipulations it performs on the mark word are
5074 // illegal.
5075 testq(tmp_reg, markOopDesc::epoch_mask_in_place);
5076 jcc(Assembler::notZero, try_rebias);
5078 // The epoch of the current bias is still valid but we know nothing
5079 // about the owner; it might be set or it might be clear. Try to
5080 // acquire the bias of the object using an atomic operation. If this
5081 // fails we will go in to the runtime to revoke the object's bias.
5082 // Note that we first construct the presumed unbiased header so we
5083 // don't accidentally blow away another thread's valid bias.
5084 andq(swap_reg,
5085 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
5086 movq(tmp_reg, swap_reg);
5087 orq(tmp_reg, r15_thread);
5088 if (os::is_MP()) {
5089 lock();
5090 }
5091 cmpxchgq(tmp_reg, Address(obj_reg, 0));
5092 // If the biasing toward our thread failed, this means that
5093 // another thread succeeded in biasing it toward itself and we
5094 // need to revoke that bias. The revocation will occur in the
5095 // interpreter runtime in the slow case.
5096 if (counters != NULL) {
5097 cond_inc32(Assembler::zero,
5098 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
5099 }
5100 if (slow_case != NULL) {
5101 jcc(Assembler::notZero, *slow_case);
5102 }
5103 jmp(done);
5105 bind(try_rebias);
5106 // At this point we know the epoch has expired, meaning that the
5107 // current "bias owner", if any, is actually invalid. Under these
5108 // circumstances _only_, we are allowed to use the current header's
5109 // value as the comparison value when doing the cas to acquire the
5110 // bias in the current epoch. In other words, we allow transfer of
5111 // the bias from one thread to another directly in this situation.
5112 //
5113 // FIXME: due to a lack of registers we currently blow away the age
5114 // bits in this situation. Should attempt to preserve them.
5115 load_prototype_header(tmp_reg, obj_reg);
5116 orq(tmp_reg, r15_thread);
5117 if (os::is_MP()) {
5118 lock();
5119 }
5120 cmpxchgq(tmp_reg, Address(obj_reg, 0));
5121 // If the biasing toward our thread failed, then another thread
5122 // succeeded in biasing it toward itself and we need to revoke that
5123 // bias. The revocation will occur in the runtime in the slow case.
5124 if (counters != NULL) {
5125 cond_inc32(Assembler::zero,
5126 ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
5127 }
5128 if (slow_case != NULL) {
5129 jcc(Assembler::notZero, *slow_case);
5130 }
5131 jmp(done);
5133 bind(try_revoke_bias);
5134 // The prototype mark in the klass doesn't have the bias bit set any
5135 // more, indicating that objects of this data type are not supposed
5136 // to be biased any more. We are going to try to reset the mark of
5137 // this object to the prototype value and fall through to the
5138 // CAS-based locking scheme. Note that if our CAS fails, it means
5139 // that another thread raced us for the privilege of revoking the
5140 // bias of this particular object, so it's okay to continue in the
5141 // normal locking code.
5142 //
5143 // FIXME: due to a lack of registers we currently blow away the age
5144 // bits in this situation. Should attempt to preserve them.
5145 load_prototype_header(tmp_reg, obj_reg);
5146 if (os::is_MP()) {
5147 lock();
5148 }
5149 cmpxchgq(tmp_reg, Address(obj_reg, 0));
5150 // Fall through to the normal CAS-based lock, because no matter what
5151 // the result of the above CAS, some thread must have succeeded in
5152 // removing the bias bit from the object's header.
5153 if (counters != NULL) {
5154 cond_inc32(Assembler::zero,
5155 ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
5156 }
5158 bind(cas_label);
5160 return null_check_offset;
5161 }
5163 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
5164 Label L, E;
5166 #ifdef _WIN64
5167 // Windows always allocates space for it's register args
5168 assert(num_args <= 4, "only register arguments supported");
5169 subq(rsp, frame::arg_reg_save_area_bytes);
5170 #endif
5172 // Align stack if necessary
5173 testl(rsp, 15);
5174 jcc(Assembler::zero, L);
5176 subq(rsp, 8);
5177 {
5178 call(RuntimeAddress(entry_point));
5179 }
5180 addq(rsp, 8);
5181 jmp(E);
5183 bind(L);
5184 {
5185 call(RuntimeAddress(entry_point));
5186 }
5188 bind(E);
5190 #ifdef _WIN64
5191 // restore stack pointer
5192 addq(rsp, frame::arg_reg_save_area_bytes);
5193 #endif
5195 }
5197 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
5198 assert(!src2.is_lval(), "should use cmpptr");
5200 if (reachable(src2)) {
5201 cmpq(src1, as_Address(src2));
5202 } else {
5203 lea(rscratch1, src2);
5204 Assembler::cmpq(src1, Address(rscratch1, 0));
5205 }
5206 }
5208 int MacroAssembler::corrected_idivq(Register reg) {
5209 // Full implementation of Java ldiv and lrem; checks for special
5210 // case as described in JVM spec., p.243 & p.271. The function
5211 // returns the (pc) offset of the idivl instruction - may be needed
5212 // for implicit exceptions.
5213 //
5214 // normal case special case
5215 //
5216 // input : rax: dividend min_long
5217 // reg: divisor (may not be eax/edx) -1
5218 //
5219 // output: rax: quotient (= rax idiv reg) min_long
5220 // rdx: remainder (= rax irem reg) 0
5221 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
5222 static const int64_t min_long = 0x8000000000000000;
5223 Label normal_case, special_case;
5225 // check for special case
5226 cmp64(rax, ExternalAddress((address) &min_long));
5227 jcc(Assembler::notEqual, normal_case);
5228 xorl(rdx, rdx); // prepare rdx for possible special case (where
5229 // remainder = 0)
5230 cmpq(reg, -1);
5231 jcc(Assembler::equal, special_case);
5233 // handle normal case
5234 bind(normal_case);
5235 cdqq();
5236 int idivq_offset = offset();
5237 idivq(reg);
5239 // normal and special case exit
5240 bind(special_case);
5242 return idivq_offset;
5243 }
5245 void MacroAssembler::decrementq(Register reg, int value) {
5246 if (value == min_jint) { subq(reg, value); return; }
5247 if (value < 0) { incrementq(reg, -value); return; }
5248 if (value == 0) { ; return; }
5249 if (value == 1 && UseIncDec) { decq(reg) ; return; }
5250 /* else */ { subq(reg, value) ; return; }
5251 }
5253 void MacroAssembler::decrementq(Address dst, int value) {
5254 if (value == min_jint) { subq(dst, value); return; }
5255 if (value < 0) { incrementq(dst, -value); return; }
5256 if (value == 0) { ; return; }
5257 if (value == 1 && UseIncDec) { decq(dst) ; return; }
5258 /* else */ { subq(dst, value) ; return; }
5259 }
5261 void MacroAssembler::fat_nop() {
5262 // A 5 byte nop that is safe for patching (see patch_verified_entry)
5263 // Recommened sequence from 'Software Optimization Guide for the AMD
5264 // Hammer Processor'
5265 emit_byte(0x66);
5266 emit_byte(0x66);
5267 emit_byte(0x90);
5268 emit_byte(0x66);
5269 emit_byte(0x90);
5270 }
5272 void MacroAssembler::incrementq(Register reg, int value) {
5273 if (value == min_jint) { addq(reg, value); return; }
5274 if (value < 0) { decrementq(reg, -value); return; }
5275 if (value == 0) { ; return; }
5276 if (value == 1 && UseIncDec) { incq(reg) ; return; }
5277 /* else */ { addq(reg, value) ; return; }
5278 }
5280 void MacroAssembler::incrementq(Address dst, int value) {
5281 if (value == min_jint) { addq(dst, value); return; }
5282 if (value < 0) { decrementq(dst, -value); return; }
5283 if (value == 0) { ; return; }
5284 if (value == 1 && UseIncDec) { incq(dst) ; return; }
5285 /* else */ { addq(dst, value) ; return; }
5286 }
5288 // 32bit can do a case table jump in one instruction but we no longer allow the base
5289 // to be installed in the Address class
5290 void MacroAssembler::jump(ArrayAddress entry) {
5291 lea(rscratch1, entry.base());
5292 Address dispatch = entry.index();
5293 assert(dispatch._base == noreg, "must be");
5294 dispatch._base = rscratch1;
5295 jmp(dispatch);
5296 }
5298 void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
5299 ShouldNotReachHere(); // 64bit doesn't use two regs
5300 cmpq(x_lo, y_lo);
5301 }
5303 void MacroAssembler::lea(Register dst, AddressLiteral src) {
5304 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
5305 }
5307 void MacroAssembler::lea(Address dst, AddressLiteral adr) {
5308 mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec());
5309 movptr(dst, rscratch1);
5310 }
5312 void MacroAssembler::leave() {
5313 // %%% is this really better? Why not on 32bit too?
5314 emit_byte(0xC9); // LEAVE
5315 }
5317 void MacroAssembler::lneg(Register hi, Register lo) {
5318 ShouldNotReachHere(); // 64bit doesn't use two regs
5319 negq(lo);
5320 }
5322 void MacroAssembler::movoop(Register dst, jobject obj) {
5323 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
5324 }
5326 void MacroAssembler::movoop(Address dst, jobject obj) {
5327 mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
5328 movq(dst, rscratch1);
5329 }
5331 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
5332 if (src.is_lval()) {
5333 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
5334 } else {
5335 if (reachable(src)) {
5336 movq(dst, as_Address(src));
5337 } else {
5338 lea(rscratch1, src);
5339 movq(dst, Address(rscratch1,0));
5340 }
5341 }
5342 }
5344 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
5345 movq(as_Address(dst), src);
5346 }
5348 void MacroAssembler::movptr(Register dst, ArrayAddress src) {
5349 movq(dst, as_Address(src));
5350 }
5352 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
5353 void MacroAssembler::movptr(Address dst, intptr_t src) {
5354 mov64(rscratch1, src);
5355 movq(dst, rscratch1);
5356 }
5358 // These are mostly for initializing NULL
5359 void MacroAssembler::movptr(Address dst, int32_t src) {
5360 movslq(dst, src);
5361 }
5363 void MacroAssembler::movptr(Register dst, int32_t src) {
5364 mov64(dst, (intptr_t)src);
5365 }
5367 void MacroAssembler::pushoop(jobject obj) {
5368 movoop(rscratch1, obj);
5369 push(rscratch1);
5370 }
5372 void MacroAssembler::pushptr(AddressLiteral src) {
5373 lea(rscratch1, src);
5374 if (src.is_lval()) {
5375 push(rscratch1);
5376 } else {
5377 pushq(Address(rscratch1, 0));
5378 }
5379 }
5381 void MacroAssembler::reset_last_Java_frame(bool clear_fp,
5382 bool clear_pc) {
5383 // we must set sp to zero to clear frame
5384 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
5385 // must clear fp, so that compiled frames are not confused; it is
5386 // possible that we need it only for debugging
5387 if (clear_fp) {
5388 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
5389 }
5391 if (clear_pc) {
5392 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
5393 }
5394 }
5396 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
5397 Register last_java_fp,
5398 address last_java_pc) {
5399 // determine last_java_sp register
5400 if (!last_java_sp->is_valid()) {
5401 last_java_sp = rsp;
5402 }
5404 // last_java_fp is optional
5405 if (last_java_fp->is_valid()) {
5406 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
5407 last_java_fp);
5408 }
5410 // last_java_pc is optional
5411 if (last_java_pc != NULL) {
5412 Address java_pc(r15_thread,
5413 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
5414 lea(rscratch1, InternalAddress(last_java_pc));
5415 movptr(java_pc, rscratch1);
5416 }
5418 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
5419 }
5421 static void pass_arg0(MacroAssembler* masm, Register arg) {
5422 if (c_rarg0 != arg ) {
5423 masm->mov(c_rarg0, arg);
5424 }
5425 }
5427 static void pass_arg1(MacroAssembler* masm, Register arg) {
5428 if (c_rarg1 != arg ) {
5429 masm->mov(c_rarg1, arg);
5430 }
5431 }
5433 static void pass_arg2(MacroAssembler* masm, Register arg) {
5434 if (c_rarg2 != arg ) {
5435 masm->mov(c_rarg2, arg);
5436 }
5437 }
5439 static void pass_arg3(MacroAssembler* masm, Register arg) {
5440 if (c_rarg3 != arg ) {
5441 masm->mov(c_rarg3, arg);
5442 }
5443 }
5445 void MacroAssembler::stop(const char* msg) {
5446 address rip = pc();
5447 pusha(); // get regs on stack
5448 lea(c_rarg0, ExternalAddress((address) msg));
5449 lea(c_rarg1, InternalAddress(rip));
5450 movq(c_rarg2, rsp); // pass pointer to regs array
5451 andq(rsp, -16); // align stack as required by ABI
5452 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
5453 hlt();
5454 }
5456 void MacroAssembler::warn(const char* msg) {
5457 push(r12);
5458 movq(r12, rsp);
5459 andq(rsp, -16); // align stack as required by push_CPU_state and call
5461 push_CPU_state(); // keeps alignment at 16 bytes
5462 lea(c_rarg0, ExternalAddress((address) msg));
5463 call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
5464 pop_CPU_state();
5466 movq(rsp, r12);
5467 pop(r12);
5468 }
5470 #ifndef PRODUCT
5471 extern "C" void findpc(intptr_t x);
5472 #endif
5474 void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
5475 // In order to get locks to work, we need to fake a in_VM state
5476 if (ShowMessageBoxOnError ) {
5477 JavaThread* thread = JavaThread::current();
5478 JavaThreadState saved_state = thread->thread_state();
5479 thread->set_thread_state(_thread_in_vm);
5480 #ifndef PRODUCT
5481 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
5482 ttyLocker ttyl;
5483 BytecodeCounter::print();
5484 }
5485 #endif
5486 // To see where a verify_oop failed, get $ebx+40/X for this frame.
5487 // XXX correct this offset for amd64
5488 // This is the value of eip which points to where verify_oop will return.
5489 if (os::message_box(msg, "Execution stopped, print registers?")) {
5490 ttyLocker ttyl;
5491 tty->print_cr("rip = 0x%016lx", pc);
5492 #ifndef PRODUCT
5493 tty->cr();
5494 findpc(pc);
5495 tty->cr();
5496 #endif
5497 tty->print_cr("rax = 0x%016lx", regs[15]);
5498 tty->print_cr("rbx = 0x%016lx", regs[12]);
5499 tty->print_cr("rcx = 0x%016lx", regs[14]);
5500 tty->print_cr("rdx = 0x%016lx", regs[13]);
5501 tty->print_cr("rdi = 0x%016lx", regs[8]);
5502 tty->print_cr("rsi = 0x%016lx", regs[9]);
5503 tty->print_cr("rbp = 0x%016lx", regs[10]);
5504 tty->print_cr("rsp = 0x%016lx", regs[11]);
5505 tty->print_cr("r8 = 0x%016lx", regs[7]);
5506 tty->print_cr("r9 = 0x%016lx", regs[6]);
5507 tty->print_cr("r10 = 0x%016lx", regs[5]);
5508 tty->print_cr("r11 = 0x%016lx", regs[4]);
5509 tty->print_cr("r12 = 0x%016lx", regs[3]);
5510 tty->print_cr("r13 = 0x%016lx", regs[2]);
5511 tty->print_cr("r14 = 0x%016lx", regs[1]);
5512 tty->print_cr("r15 = 0x%016lx", regs[0]);
5513 BREAKPOINT;
5514 }
5515 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
5516 } else {
5517 ttyLocker ttyl;
5518 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
5519 msg);
5520 }
5521 }
5523 #endif // _LP64
5525 // Now versions that are common to 32/64 bit
5527 void MacroAssembler::addptr(Register dst, int32_t imm32) {
5528 LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
5529 }
5531 void MacroAssembler::addptr(Register dst, Register src) {
5532 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
5533 }
5535 void MacroAssembler::addptr(Address dst, Register src) {
5536 LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
5537 }
5539 void MacroAssembler::align(int modulus) {
5540 if (offset() % modulus != 0) {
5541 nop(modulus - (offset() % modulus));
5542 }
5543 }
5545 void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
5546 andpd(dst, as_Address(src));
5547 }
5549 void MacroAssembler::andptr(Register dst, int32_t imm32) {
5550 LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
5551 }
5553 void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
5554 pushf();
5555 if (os::is_MP())
5556 lock();
5557 incrementl(counter_addr);
5558 popf();
5559 }
5561 // Writes to stack successive pages until offset reached to check for
5562 // stack overflow + shadow pages. This clobbers tmp.
5563 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
5564 movptr(tmp, rsp);
5565 // Bang stack for total size given plus shadow page size.
5566 // Bang one page at a time because large size can bang beyond yellow and
5567 // red zones.
5568 Label loop;
5569 bind(loop);
5570 movl(Address(tmp, (-os::vm_page_size())), size );
5571 subptr(tmp, os::vm_page_size());
5572 subl(size, os::vm_page_size());
5573 jcc(Assembler::greater, loop);
5575 // Bang down shadow pages too.
5576 // The -1 because we already subtracted 1 page.
5577 for (int i = 0; i< StackShadowPages-1; i++) {
5578 // this could be any sized move but this is can be a debugging crumb
5579 // so the bigger the better.
5580 movptr(Address(tmp, (-i*os::vm_page_size())), size );
5581 }
5582 }
5584 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
5585 assert(UseBiasedLocking, "why call this otherwise?");
5587 // Check for biased locking unlock case, which is a no-op
5588 // Note: we do not have to check the thread ID for two reasons.
5589 // First, the interpreter checks for IllegalMonitorStateException at
5590 // a higher level. Second, if the bias was revoked while we held the
5591 // lock, the object could not be rebiased toward another thread, so
5592 // the bias bit would be clear.
5593 movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
5594 andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
5595 cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
5596 jcc(Assembler::equal, done);
5597 }
5599 void MacroAssembler::c2bool(Register x) {
5600 // implements x == 0 ? 0 : 1
5601 // note: must only look at least-significant byte of x
5602 // since C-style booleans are stored in one byte
5603 // only! (was bug)
5604 andl(x, 0xFF);
5605 setb(Assembler::notZero, x);
5606 }
5608 // Wouldn't need if AddressLiteral version had new name
5609 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
5610 Assembler::call(L, rtype);
5611 }
5613 void MacroAssembler::call(Register entry) {
5614 Assembler::call(entry);
5615 }
5617 void MacroAssembler::call(AddressLiteral entry) {
5618 if (reachable(entry)) {
5619 Assembler::call_literal(entry.target(), entry.rspec());
5620 } else {
5621 lea(rscratch1, entry);
5622 Assembler::call(rscratch1);
5623 }
5624 }
5626 // Implementation of call_VM versions
5628 void MacroAssembler::call_VM(Register oop_result,
5629 address entry_point,
5630 bool check_exceptions) {
5631 Label C, E;
5632 call(C, relocInfo::none);
5633 jmp(E);
5635 bind(C);
5636 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
5637 ret(0);
5639 bind(E);
5640 }
5642 void MacroAssembler::call_VM(Register oop_result,
5643 address entry_point,
5644 Register arg_1,
5645 bool check_exceptions) {
5646 Label C, E;
5647 call(C, relocInfo::none);
5648 jmp(E);
5650 bind(C);
5651 pass_arg1(this, arg_1);
5652 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
5653 ret(0);
5655 bind(E);
5656 }
5658 void MacroAssembler::call_VM(Register oop_result,
5659 address entry_point,
5660 Register arg_1,
5661 Register arg_2,
5662 bool check_exceptions) {
5663 Label C, E;
5664 call(C, relocInfo::none);
5665 jmp(E);
5667 bind(C);
5669 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
5671 pass_arg2(this, arg_2);
5672 pass_arg1(this, arg_1);
5673 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
5674 ret(0);
5676 bind(E);
5677 }
5679 void MacroAssembler::call_VM(Register oop_result,
5680 address entry_point,
5681 Register arg_1,
5682 Register arg_2,
5683 Register arg_3,
5684 bool check_exceptions) {
5685 Label C, E;
5686 call(C, relocInfo::none);
5687 jmp(E);
5689 bind(C);
5691 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
5692 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
5693 pass_arg3(this, arg_3);
5695 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
5696 pass_arg2(this, arg_2);
5698 pass_arg1(this, arg_1);
5699 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
5700 ret(0);
5702 bind(E);
5703 }
5705 void MacroAssembler::call_VM(Register oop_result,
5706 Register last_java_sp,
5707 address entry_point,
5708 int number_of_arguments,
5709 bool check_exceptions) {
5710 Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
5711 call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
5712 }
5714 void MacroAssembler::call_VM(Register oop_result,
5715 Register last_java_sp,
5716 address entry_point,
5717 Register arg_1,
5718 bool check_exceptions) {
5719 pass_arg1(this, arg_1);
5720 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
5721 }
5723 void MacroAssembler::call_VM(Register oop_result,
5724 Register last_java_sp,
5725 address entry_point,
5726 Register arg_1,
5727 Register arg_2,
5728 bool check_exceptions) {
5730 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
5731 pass_arg2(this, arg_2);
5732 pass_arg1(this, arg_1);
5733 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
5734 }
5736 void MacroAssembler::call_VM(Register oop_result,
5737 Register last_java_sp,
5738 address entry_point,
5739 Register arg_1,
5740 Register arg_2,
5741 Register arg_3,
5742 bool check_exceptions) {
5743 LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
5744 LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
5745 pass_arg3(this, arg_3);
5746 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
5747 pass_arg2(this, arg_2);
5748 pass_arg1(this, arg_1);
5749 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
5750 }
5752 void MacroAssembler::call_VM_base(Register oop_result,
5753 Register java_thread,
5754 Register last_java_sp,
5755 address entry_point,
5756 int number_of_arguments,
5757 bool check_exceptions) {
5758 // determine java_thread register
5759 if (!java_thread->is_valid()) {
5760 #ifdef _LP64
5761 java_thread = r15_thread;
5762 #else
5763 java_thread = rdi;
5764 get_thread(java_thread);
5765 #endif // LP64
5766 }
5767 // determine last_java_sp register
5768 if (!last_java_sp->is_valid()) {
5769 last_java_sp = rsp;
5770 }
5771 // debugging support
5772 assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
5773 LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
5774 assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
5775 assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
5777 // push java thread (becomes first argument of C function)
5779 NOT_LP64(push(java_thread); number_of_arguments++);
5780 LP64_ONLY(mov(c_rarg0, r15_thread));
5782 // set last Java frame before call
5783 assert(last_java_sp != rbp, "can't use ebp/rbp");
5785 // Only interpreter should have to set fp
5786 set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
5788 // do the call, remove parameters
5789 MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
5791 // restore the thread (cannot use the pushed argument since arguments
5792 // may be overwritten by C code generated by an optimizing compiler);
5793 // however can use the register value directly if it is callee saved.
5794 if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
5795 // rdi & rsi (also r15) are callee saved -> nothing to do
5796 #ifdef ASSERT
5797 guarantee(java_thread != rax, "change this code");
5798 push(rax);
5799 { Label L;
5800 get_thread(rax);
5801 cmpptr(java_thread, rax);
5802 jcc(Assembler::equal, L);
5803 stop("MacroAssembler::call_VM_base: rdi not callee saved?");
5804 bind(L);
5805 }
5806 pop(rax);
5807 #endif
5808 } else {
5809 get_thread(java_thread);
5810 }
5811 // reset last Java frame
5812 // Only interpreter should have to clear fp
5813 reset_last_Java_frame(java_thread, true, false);
5815 #ifndef CC_INTERP
5816 // C++ interp handles this in the interpreter
5817 check_and_handle_popframe(java_thread);
5818 check_and_handle_earlyret(java_thread);
5819 #endif /* CC_INTERP */
5821 if (check_exceptions) {
5822 // check for pending exceptions (java_thread is set upon return)
5823 cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
5824 #ifndef _LP64
5825 jump_cc(Assembler::notEqual,
5826 RuntimeAddress(StubRoutines::forward_exception_entry()));
5827 #else
5828 // This used to conditionally jump to forward_exception however it is
5829 // possible if we relocate that the branch will not reach. So we must jump
5830 // around so we can always reach
5832 Label ok;
5833 jcc(Assembler::equal, ok);
5834 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
5835 bind(ok);
5836 #endif // LP64
5837 }
5839 // get oop result if there is one and reset the value in the thread
5840 if (oop_result->is_valid()) {
5841 movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
5842 movptr(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
5843 verify_oop(oop_result, "broken oop in call_VM_base");
5844 }
5845 }
5847 void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
5849 // Calculate the value for last_Java_sp
5850 // somewhat subtle. call_VM does an intermediate call
5851 // which places a return address on the stack just under the
5852 // stack pointer as the user finsihed with it. This allows
5853 // use to retrieve last_Java_pc from last_Java_sp[-1].
5854 // On 32bit we then have to push additional args on the stack to accomplish
5855 // the actual requested call. On 64bit call_VM only can use register args
5856 // so the only extra space is the return address that call_VM created.
5857 // This hopefully explains the calculations here.
5859 #ifdef _LP64
5860 // We've pushed one address, correct last_Java_sp
5861 lea(rax, Address(rsp, wordSize));
5862 #else
5863 lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
5864 #endif // LP64
5866 call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
5868 }
5870 void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
5871 call_VM_leaf_base(entry_point, number_of_arguments);
5872 }
5874 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
5875 pass_arg0(this, arg_0);
5876 call_VM_leaf(entry_point, 1);
5877 }
5879 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
5881 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
5882 pass_arg1(this, arg_1);
5883 pass_arg0(this, arg_0);
5884 call_VM_leaf(entry_point, 2);
5885 }
5887 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
5888 LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
5889 LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
5890 pass_arg2(this, arg_2);
5891 LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
5892 pass_arg1(this, arg_1);
5893 pass_arg0(this, arg_0);
5894 call_VM_leaf(entry_point, 3);
5895 }
5897 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
5898 }
5900 void MacroAssembler::check_and_handle_popframe(Register java_thread) {
5901 }
5903 void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
5904 if (reachable(src1)) {
5905 cmpl(as_Address(src1), imm);
5906 } else {
5907 lea(rscratch1, src1);
5908 cmpl(Address(rscratch1, 0), imm);
5909 }
5910 }
5912 void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
5913 assert(!src2.is_lval(), "use cmpptr");
5914 if (reachable(src2)) {
5915 cmpl(src1, as_Address(src2));
5916 } else {
5917 lea(rscratch1, src2);
5918 cmpl(src1, Address(rscratch1, 0));
5919 }
5920 }
5922 void MacroAssembler::cmp32(Register src1, int32_t imm) {
5923 Assembler::cmpl(src1, imm);
5924 }
5926 void MacroAssembler::cmp32(Register src1, Address src2) {
5927 Assembler::cmpl(src1, src2);
5928 }
5930 void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
5931 ucomisd(opr1, opr2);
5933 Label L;
5934 if (unordered_is_less) {
5935 movl(dst, -1);
5936 jcc(Assembler::parity, L);
5937 jcc(Assembler::below , L);
5938 movl(dst, 0);
5939 jcc(Assembler::equal , L);
5940 increment(dst);
5941 } else { // unordered is greater
5942 movl(dst, 1);
5943 jcc(Assembler::parity, L);
5944 jcc(Assembler::above , L);
5945 movl(dst, 0);
5946 jcc(Assembler::equal , L);
5947 decrementl(dst);
5948 }
5949 bind(L);
5950 }
5952 void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
5953 ucomiss(opr1, opr2);
5955 Label L;
5956 if (unordered_is_less) {
5957 movl(dst, -1);
5958 jcc(Assembler::parity, L);
5959 jcc(Assembler::below , L);
5960 movl(dst, 0);
5961 jcc(Assembler::equal , L);
5962 increment(dst);
5963 } else { // unordered is greater
5964 movl(dst, 1);
5965 jcc(Assembler::parity, L);
5966 jcc(Assembler::above , L);
5967 movl(dst, 0);
5968 jcc(Assembler::equal , L);
5969 decrementl(dst);
5970 }
5971 bind(L);
5972 }
5975 void MacroAssembler::cmp8(AddressLiteral src1, int imm) {
5976 if (reachable(src1)) {
5977 cmpb(as_Address(src1), imm);
5978 } else {
5979 lea(rscratch1, src1);
5980 cmpb(Address(rscratch1, 0), imm);
5981 }
5982 }
5984 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
5985 #ifdef _LP64
5986 if (src2.is_lval()) {
5987 movptr(rscratch1, src2);
5988 Assembler::cmpq(src1, rscratch1);
5989 } else if (reachable(src2)) {
5990 cmpq(src1, as_Address(src2));
5991 } else {
5992 lea(rscratch1, src2);
5993 Assembler::cmpq(src1, Address(rscratch1, 0));
5994 }
5995 #else
5996 if (src2.is_lval()) {
5997 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
5998 } else {
5999 cmpl(src1, as_Address(src2));
6000 }
6001 #endif // _LP64
6002 }
6004 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
6005 assert(src2.is_lval(), "not a mem-mem compare");
6006 #ifdef _LP64
6007 // moves src2's literal address
6008 movptr(rscratch1, src2);
6009 Assembler::cmpq(src1, rscratch1);
6010 #else
6011 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
6012 #endif // _LP64
6013 }
6015 void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
6016 if (reachable(adr)) {
6017 if (os::is_MP())
6018 lock();
6019 cmpxchgptr(reg, as_Address(adr));
6020 } else {
6021 lea(rscratch1, adr);
6022 if (os::is_MP())
6023 lock();
6024 cmpxchgptr(reg, Address(rscratch1, 0));
6025 }
6026 }
6028 void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
6029 LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
6030 }
6032 void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
6033 comisd(dst, as_Address(src));
6034 }
6036 void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
6037 comiss(dst, as_Address(src));
6038 }
6041 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
6042 Condition negated_cond = negate_condition(cond);
6043 Label L;
6044 jcc(negated_cond, L);
6045 atomic_incl(counter_addr);
6046 bind(L);
6047 }
6049 int MacroAssembler::corrected_idivl(Register reg) {
6050 // Full implementation of Java idiv and irem; checks for
6051 // special case as described in JVM spec., p.243 & p.271.
6052 // The function returns the (pc) offset of the idivl
6053 // instruction - may be needed for implicit exceptions.
6054 //
6055 // normal case special case
6056 //
6057 // input : rax,: dividend min_int
6058 // reg: divisor (may not be rax,/rdx) -1
6059 //
6060 // output: rax,: quotient (= rax, idiv reg) min_int
6061 // rdx: remainder (= rax, irem reg) 0
6062 assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
6063 const int min_int = 0x80000000;
6064 Label normal_case, special_case;
6066 // check for special case
6067 cmpl(rax, min_int);
6068 jcc(Assembler::notEqual, normal_case);
6069 xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
6070 cmpl(reg, -1);
6071 jcc(Assembler::equal, special_case);
6073 // handle normal case
6074 bind(normal_case);
6075 cdql();
6076 int idivl_offset = offset();
6077 idivl(reg);
6079 // normal and special case exit
6080 bind(special_case);
6082 return idivl_offset;
6083 }
6087 void MacroAssembler::decrementl(Register reg, int value) {
6088 if (value == min_jint) {subl(reg, value) ; return; }
6089 if (value < 0) { incrementl(reg, -value); return; }
6090 if (value == 0) { ; return; }
6091 if (value == 1 && UseIncDec) { decl(reg) ; return; }
6092 /* else */ { subl(reg, value) ; return; }
6093 }
6095 void MacroAssembler::decrementl(Address dst, int value) {
6096 if (value == min_jint) {subl(dst, value) ; return; }
6097 if (value < 0) { incrementl(dst, -value); return; }
6098 if (value == 0) { ; return; }
6099 if (value == 1 && UseIncDec) { decl(dst) ; return; }
6100 /* else */ { subl(dst, value) ; return; }
6101 }
6103 void MacroAssembler::division_with_shift (Register reg, int shift_value) {
6104 assert (shift_value > 0, "illegal shift value");
6105 Label _is_positive;
6106 testl (reg, reg);
6107 jcc (Assembler::positive, _is_positive);
6108 int offset = (1 << shift_value) - 1 ;
6110 if (offset == 1) {
6111 incrementl(reg);
6112 } else {
6113 addl(reg, offset);
6114 }
6116 bind (_is_positive);
6117 sarl(reg, shift_value);
6118 }
6120 // !defined(COMPILER2) is because of stupid core builds
6121 #if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
6122 void MacroAssembler::empty_FPU_stack() {
6123 if (VM_Version::supports_mmx()) {
6124 emms();
6125 } else {
6126 for (int i = 8; i-- > 0; ) ffree(i);
6127 }
6128 }
6129 #endif // !LP64 || C1 || !C2
6132 // Defines obj, preserves var_size_in_bytes
6133 void MacroAssembler::eden_allocate(Register obj,
6134 Register var_size_in_bytes,
6135 int con_size_in_bytes,
6136 Register t1,
6137 Label& slow_case) {
6138 assert(obj == rax, "obj must be in rax, for cmpxchg");
6139 assert_different_registers(obj, var_size_in_bytes, t1);
6140 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
6141 jmp(slow_case);
6142 } else {
6143 Register end = t1;
6144 Label retry;
6145 bind(retry);
6146 ExternalAddress heap_top((address) Universe::heap()->top_addr());
6147 movptr(obj, heap_top);
6148 if (var_size_in_bytes == noreg) {
6149 lea(end, Address(obj, con_size_in_bytes));
6150 } else {
6151 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
6152 }
6153 // if end < obj then we wrapped around => object too long => slow case
6154 cmpptr(end, obj);
6155 jcc(Assembler::below, slow_case);
6156 cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
6157 jcc(Assembler::above, slow_case);
6158 // Compare obj with the top addr, and if still equal, store the new top addr in
6159 // end at the address of the top addr pointer. Sets ZF if was equal, and clears
6160 // it otherwise. Use lock prefix for atomicity on MPs.
6161 locked_cmpxchgptr(end, heap_top);
6162 jcc(Assembler::notEqual, retry);
6163 }
6164 }
6166 void MacroAssembler::enter() {
6167 push(rbp);
6168 mov(rbp, rsp);
6169 }
6171 void MacroAssembler::fcmp(Register tmp) {
6172 fcmp(tmp, 1, true, true);
6173 }
6175 void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
6176 assert(!pop_right || pop_left, "usage error");
6177 if (VM_Version::supports_cmov()) {
6178 assert(tmp == noreg, "unneeded temp");
6179 if (pop_left) {
6180 fucomip(index);
6181 } else {
6182 fucomi(index);
6183 }
6184 if (pop_right) {
6185 fpop();
6186 }
6187 } else {
6188 assert(tmp != noreg, "need temp");
6189 if (pop_left) {
6190 if (pop_right) {
6191 fcompp();
6192 } else {
6193 fcomp(index);
6194 }
6195 } else {
6196 fcom(index);
6197 }
6198 // convert FPU condition into eflags condition via rax,
6199 save_rax(tmp);
6200 fwait(); fnstsw_ax();
6201 sahf();
6202 restore_rax(tmp);
6203 }
6204 // condition codes set as follows:
6205 //
6206 // CF (corresponds to C0) if x < y
6207 // PF (corresponds to C2) if unordered
6208 // ZF (corresponds to C3) if x = y
6209 }
6211 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
6212 fcmp2int(dst, unordered_is_less, 1, true, true);
6213 }
6215 void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
6216 fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
6217 Label L;
6218 if (unordered_is_less) {
6219 movl(dst, -1);
6220 jcc(Assembler::parity, L);
6221 jcc(Assembler::below , L);
6222 movl(dst, 0);
6223 jcc(Assembler::equal , L);
6224 increment(dst);
6225 } else { // unordered is greater
6226 movl(dst, 1);
6227 jcc(Assembler::parity, L);
6228 jcc(Assembler::above , L);
6229 movl(dst, 0);
6230 jcc(Assembler::equal , L);
6231 decrementl(dst);
6232 }
6233 bind(L);
6234 }
6236 void MacroAssembler::fld_d(AddressLiteral src) {
6237 fld_d(as_Address(src));
6238 }
6240 void MacroAssembler::fld_s(AddressLiteral src) {
6241 fld_s(as_Address(src));
6242 }
6244 void MacroAssembler::fld_x(AddressLiteral src) {
6245 Assembler::fld_x(as_Address(src));
6246 }
6248 void MacroAssembler::fldcw(AddressLiteral src) {
6249 Assembler::fldcw(as_Address(src));
6250 }
6252 void MacroAssembler::fpop() {
6253 ffree();
6254 fincstp();
6255 }
6257 void MacroAssembler::fremr(Register tmp) {
6258 save_rax(tmp);
6259 { Label L;
6260 bind(L);
6261 fprem();
6262 fwait(); fnstsw_ax();
6263 #ifdef _LP64
6264 testl(rax, 0x400);
6265 jcc(Assembler::notEqual, L);
6266 #else
6267 sahf();
6268 jcc(Assembler::parity, L);
6269 #endif // _LP64
6270 }
6271 restore_rax(tmp);
6272 // Result is in ST0.
6273 // Note: fxch & fpop to get rid of ST1
6274 // (otherwise FPU stack could overflow eventually)
6275 fxch(1);
6276 fpop();
6277 }
6280 void MacroAssembler::incrementl(AddressLiteral dst) {
6281 if (reachable(dst)) {
6282 incrementl(as_Address(dst));
6283 } else {
6284 lea(rscratch1, dst);
6285 incrementl(Address(rscratch1, 0));
6286 }
6287 }
6289 void MacroAssembler::incrementl(ArrayAddress dst) {
6290 incrementl(as_Address(dst));
6291 }
6293 void MacroAssembler::incrementl(Register reg, int value) {
6294 if (value == min_jint) {addl(reg, value) ; return; }
6295 if (value < 0) { decrementl(reg, -value); return; }
6296 if (value == 0) { ; return; }
6297 if (value == 1 && UseIncDec) { incl(reg) ; return; }
6298 /* else */ { addl(reg, value) ; return; }
6299 }
6301 void MacroAssembler::incrementl(Address dst, int value) {
6302 if (value == min_jint) {addl(dst, value) ; return; }
6303 if (value < 0) { decrementl(dst, -value); return; }
6304 if (value == 0) { ; return; }
6305 if (value == 1 && UseIncDec) { incl(dst) ; return; }
6306 /* else */ { addl(dst, value) ; return; }
6307 }
6309 void MacroAssembler::jump(AddressLiteral dst) {
6310 if (reachable(dst)) {
6311 jmp_literal(dst.target(), dst.rspec());
6312 } else {
6313 lea(rscratch1, dst);
6314 jmp(rscratch1);
6315 }
6316 }
6318 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
6319 if (reachable(dst)) {
6320 InstructionMark im(this);
6321 relocate(dst.reloc());
6322 const int short_size = 2;
6323 const int long_size = 6;
6324 int offs = (intptr_t)dst.target() - ((intptr_t)_code_pos);
6325 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
6326 // 0111 tttn #8-bit disp
6327 emit_byte(0x70 | cc);
6328 emit_byte((offs - short_size) & 0xFF);
6329 } else {
6330 // 0000 1111 1000 tttn #32-bit disp
6331 emit_byte(0x0F);
6332 emit_byte(0x80 | cc);
6333 emit_long(offs - long_size);
6334 }
6335 } else {
6336 #ifdef ASSERT
6337 warning("reversing conditional branch");
6338 #endif /* ASSERT */
6339 Label skip;
6340 jccb(reverse[cc], skip);
6341 lea(rscratch1, dst);
6342 Assembler::jmp(rscratch1);
6343 bind(skip);
6344 }
6345 }
6347 void MacroAssembler::ldmxcsr(AddressLiteral src) {
6348 if (reachable(src)) {
6349 Assembler::ldmxcsr(as_Address(src));
6350 } else {
6351 lea(rscratch1, src);
6352 Assembler::ldmxcsr(Address(rscratch1, 0));
6353 }
6354 }
6356 int MacroAssembler::load_signed_byte(Register dst, Address src) {
6357 int off;
6358 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
6359 off = offset();
6360 movsbl(dst, src); // movsxb
6361 } else {
6362 off = load_unsigned_byte(dst, src);
6363 shll(dst, 24);
6364 sarl(dst, 24);
6365 }
6366 return off;
6367 }
6369 // Note: load_signed_short used to be called load_signed_word.
6370 // Although the 'w' in x86 opcodes refers to the term "word" in the assembler
6371 // manual, which means 16 bits, that usage is found nowhere in HotSpot code.
6372 // The term "word" in HotSpot means a 32- or 64-bit machine word.
6373 int MacroAssembler::load_signed_short(Register dst, Address src) {
6374 int off;
6375 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
6376 // This is dubious to me since it seems safe to do a signed 16 => 64 bit
6377 // version but this is what 64bit has always done. This seems to imply
6378 // that users are only using 32bits worth.
6379 off = offset();
6380 movswl(dst, src); // movsxw
6381 } else {
6382 off = load_unsigned_short(dst, src);
6383 shll(dst, 16);
6384 sarl(dst, 16);
6385 }
6386 return off;
6387 }
6389 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
6390 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
6391 // and "3.9 Partial Register Penalties", p. 22).
6392 int off;
6393 if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
6394 off = offset();
6395 movzbl(dst, src); // movzxb
6396 } else {
6397 xorl(dst, dst);
6398 off = offset();
6399 movb(dst, src);
6400 }
6401 return off;
6402 }
6404 // Note: load_unsigned_short used to be called load_unsigned_word.
6405 int MacroAssembler::load_unsigned_short(Register dst, Address src) {
6406 // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
6407 // and "3.9 Partial Register Penalties", p. 22).
6408 int off;
6409 if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
6410 off = offset();
6411 movzwl(dst, src); // movzxw
6412 } else {
6413 xorl(dst, dst);
6414 off = offset();
6415 movw(dst, src);
6416 }
6417 return off;
6418 }
6420 void MacroAssembler::load_sized_value(Register dst, Address src,
6421 int size_in_bytes, bool is_signed) {
6422 switch (size_in_bytes ^ (is_signed ? -1 : 0)) {
6423 #ifndef _LP64
6424 // For case 8, caller is responsible for manually loading
6425 // the second word into another register.
6426 case ~8: // fall through:
6427 case 8: movl( dst, src ); break;
6428 #else
6429 case ~8: // fall through:
6430 case 8: movq( dst, src ); break;
6431 #endif
6432 case ~4: // fall through:
6433 case 4: movl( dst, src ); break;
6434 case ~2: load_signed_short( dst, src ); break;
6435 case 2: load_unsigned_short( dst, src ); break;
6436 case ~1: load_signed_byte( dst, src ); break;
6437 case 1: load_unsigned_byte( dst, src ); break;
6438 default: ShouldNotReachHere();
6439 }
6440 }
6442 void MacroAssembler::mov32(AddressLiteral dst, Register src) {
6443 if (reachable(dst)) {
6444 movl(as_Address(dst), src);
6445 } else {
6446 lea(rscratch1, dst);
6447 movl(Address(rscratch1, 0), src);
6448 }
6449 }
6451 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
6452 if (reachable(src)) {
6453 movl(dst, as_Address(src));
6454 } else {
6455 lea(rscratch1, src);
6456 movl(dst, Address(rscratch1, 0));
6457 }
6458 }
6460 // C++ bool manipulation
6462 void MacroAssembler::movbool(Register dst, Address src) {
6463 if(sizeof(bool) == 1)
6464 movb(dst, src);
6465 else if(sizeof(bool) == 2)
6466 movw(dst, src);
6467 else if(sizeof(bool) == 4)
6468 movl(dst, src);
6469 else
6470 // unsupported
6471 ShouldNotReachHere();
6472 }
6474 void MacroAssembler::movbool(Address dst, bool boolconst) {
6475 if(sizeof(bool) == 1)
6476 movb(dst, (int) boolconst);
6477 else if(sizeof(bool) == 2)
6478 movw(dst, (int) boolconst);
6479 else if(sizeof(bool) == 4)
6480 movl(dst, (int) boolconst);
6481 else
6482 // unsupported
6483 ShouldNotReachHere();
6484 }
6486 void MacroAssembler::movbool(Address dst, Register src) {
6487 if(sizeof(bool) == 1)
6488 movb(dst, src);
6489 else if(sizeof(bool) == 2)
6490 movw(dst, src);
6491 else if(sizeof(bool) == 4)
6492 movl(dst, src);
6493 else
6494 // unsupported
6495 ShouldNotReachHere();
6496 }
6498 void MacroAssembler::movbyte(ArrayAddress dst, int src) {
6499 movb(as_Address(dst), src);
6500 }
6502 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
6503 if (reachable(src)) {
6504 if (UseXmmLoadAndClearUpper) {
6505 movsd (dst, as_Address(src));
6506 } else {
6507 movlpd(dst, as_Address(src));
6508 }
6509 } else {
6510 lea(rscratch1, src);
6511 if (UseXmmLoadAndClearUpper) {
6512 movsd (dst, Address(rscratch1, 0));
6513 } else {
6514 movlpd(dst, Address(rscratch1, 0));
6515 }
6516 }
6517 }
6519 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
6520 if (reachable(src)) {
6521 movss(dst, as_Address(src));
6522 } else {
6523 lea(rscratch1, src);
6524 movss(dst, Address(rscratch1, 0));
6525 }
6526 }
6528 void MacroAssembler::movptr(Register dst, Register src) {
6529 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
6530 }
6532 void MacroAssembler::movptr(Register dst, Address src) {
6533 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
6534 }
6536 // src should NEVER be a real pointer. Use AddressLiteral for true pointers
6537 void MacroAssembler::movptr(Register dst, intptr_t src) {
6538 LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
6539 }
6541 void MacroAssembler::movptr(Address dst, Register src) {
6542 LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
6543 }
6545 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
6546 if (reachable(src)) {
6547 movss(dst, as_Address(src));
6548 } else {
6549 lea(rscratch1, src);
6550 movss(dst, Address(rscratch1, 0));
6551 }
6552 }
6554 void MacroAssembler::null_check(Register reg, int offset) {
6555 if (needs_explicit_null_check(offset)) {
6556 // provoke OS NULL exception if reg = NULL by
6557 // accessing M[reg] w/o changing any (non-CC) registers
6558 // NOTE: cmpl is plenty here to provoke a segv
6559 cmpptr(rax, Address(reg, 0));
6560 // Note: should probably use testl(rax, Address(reg, 0));
6561 // may be shorter code (however, this version of
6562 // testl needs to be implemented first)
6563 } else {
6564 // nothing to do, (later) access of M[reg + offset]
6565 // will provoke OS NULL exception if reg = NULL
6566 }
6567 }
6569 void MacroAssembler::os_breakpoint() {
6570 // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
6571 // (e.g., MSVC can't call ps() otherwise)
6572 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
6573 }
6575 void MacroAssembler::pop_CPU_state() {
6576 pop_FPU_state();
6577 pop_IU_state();
6578 }
6580 void MacroAssembler::pop_FPU_state() {
6581 NOT_LP64(frstor(Address(rsp, 0));)
6582 LP64_ONLY(fxrstor(Address(rsp, 0));)
6583 addptr(rsp, FPUStateSizeInWords * wordSize);
6584 }
6586 void MacroAssembler::pop_IU_state() {
6587 popa();
6588 LP64_ONLY(addq(rsp, 8));
6589 popf();
6590 }
6592 // Save Integer and Float state
6593 // Warning: Stack must be 16 byte aligned (64bit)
6594 void MacroAssembler::push_CPU_state() {
6595 push_IU_state();
6596 push_FPU_state();
6597 }
6599 void MacroAssembler::push_FPU_state() {
6600 subptr(rsp, FPUStateSizeInWords * wordSize);
6601 #ifndef _LP64
6602 fnsave(Address(rsp, 0));
6603 fwait();
6604 #else
6605 fxsave(Address(rsp, 0));
6606 #endif // LP64
6607 }
6609 void MacroAssembler::push_IU_state() {
6610 // Push flags first because pusha kills them
6611 pushf();
6612 // Make sure rsp stays 16-byte aligned
6613 LP64_ONLY(subq(rsp, 8));
6614 pusha();
6615 }
6617 void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
6618 // determine java_thread register
6619 if (!java_thread->is_valid()) {
6620 java_thread = rdi;
6621 get_thread(java_thread);
6622 }
6623 // we must set sp to zero to clear frame
6624 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
6625 if (clear_fp) {
6626 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
6627 }
6629 if (clear_pc)
6630 movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
6632 }
6634 void MacroAssembler::restore_rax(Register tmp) {
6635 if (tmp == noreg) pop(rax);
6636 else if (tmp != rax) mov(rax, tmp);
6637 }
6639 void MacroAssembler::round_to(Register reg, int modulus) {
6640 addptr(reg, modulus - 1);
6641 andptr(reg, -modulus);
6642 }
6644 void MacroAssembler::save_rax(Register tmp) {
6645 if (tmp == noreg) push(rax);
6646 else if (tmp != rax) mov(tmp, rax);
6647 }
6649 // Write serialization page so VM thread can do a pseudo remote membar.
6650 // We use the current thread pointer to calculate a thread specific
6651 // offset to write to within the page. This minimizes bus traffic
6652 // due to cache line collision.
6653 void MacroAssembler::serialize_memory(Register thread, Register tmp) {
6654 movl(tmp, thread);
6655 shrl(tmp, os::get_serialize_page_shift_count());
6656 andl(tmp, (os::vm_page_size() - sizeof(int)));
6658 Address index(noreg, tmp, Address::times_1);
6659 ExternalAddress page(os::get_memory_serialize_page());
6661 // Size of store must match masking code above
6662 movl(as_Address(ArrayAddress(page, index)), tmp);
6663 }
6665 // Calls to C land
6666 //
6667 // When entering C land, the rbp, & rsp of the last Java frame have to be recorded
6668 // in the (thread-local) JavaThread object. When leaving C land, the last Java fp
6669 // has to be reset to 0. This is required to allow proper stack traversal.
6670 void MacroAssembler::set_last_Java_frame(Register java_thread,
6671 Register last_java_sp,
6672 Register last_java_fp,
6673 address last_java_pc) {
6674 // determine java_thread register
6675 if (!java_thread->is_valid()) {
6676 java_thread = rdi;
6677 get_thread(java_thread);
6678 }
6679 // determine last_java_sp register
6680 if (!last_java_sp->is_valid()) {
6681 last_java_sp = rsp;
6682 }
6684 // last_java_fp is optional
6686 if (last_java_fp->is_valid()) {
6687 movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
6688 }
6690 // last_java_pc is optional
6692 if (last_java_pc != NULL) {
6693 lea(Address(java_thread,
6694 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
6695 InternalAddress(last_java_pc));
6697 }
6698 movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
6699 }
6701 void MacroAssembler::shlptr(Register dst, int imm8) {
6702 LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
6703 }
6705 void MacroAssembler::shrptr(Register dst, int imm8) {
6706 LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
6707 }
6709 void MacroAssembler::sign_extend_byte(Register reg) {
6710 if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
6711 movsbl(reg, reg); // movsxb
6712 } else {
6713 shll(reg, 24);
6714 sarl(reg, 24);
6715 }
6716 }
6718 void MacroAssembler::sign_extend_short(Register reg) {
6719 if (LP64_ONLY(true ||) VM_Version::is_P6()) {
6720 movswl(reg, reg); // movsxw
6721 } else {
6722 shll(reg, 16);
6723 sarl(reg, 16);
6724 }
6725 }
6727 //////////////////////////////////////////////////////////////////////////////////
6728 #ifndef SERIALGC
6730 void MacroAssembler::g1_write_barrier_pre(Register obj,
6731 #ifndef _LP64
6732 Register thread,
6733 #endif
6734 Register tmp,
6735 Register tmp2,
6736 bool tosca_live) {
6737 LP64_ONLY(Register thread = r15_thread;)
6738 Address in_progress(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
6739 PtrQueue::byte_offset_of_active()));
6741 Address index(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
6742 PtrQueue::byte_offset_of_index()));
6743 Address buffer(thread, in_bytes(JavaThread::satb_mark_queue_offset() +
6744 PtrQueue::byte_offset_of_buf()));
6747 Label done;
6748 Label runtime;
6750 // if (!marking_in_progress) goto done;
6751 if (in_bytes(PtrQueue::byte_width_of_active()) == 4) {
6752 cmpl(in_progress, 0);
6753 } else {
6754 assert(in_bytes(PtrQueue::byte_width_of_active()) == 1, "Assumption");
6755 cmpb(in_progress, 0);
6756 }
6757 jcc(Assembler::equal, done);
6759 // if (x.f == NULL) goto done;
6760 cmpptr(Address(obj, 0), NULL_WORD);
6761 jcc(Assembler::equal, done);
6763 // Can we store original value in the thread's buffer?
6765 LP64_ONLY(movslq(tmp, index);)
6766 movptr(tmp2, Address(obj, 0));
6767 #ifdef _LP64
6768 cmpq(tmp, 0);
6769 #else
6770 cmpl(index, 0);
6771 #endif
6772 jcc(Assembler::equal, runtime);
6773 #ifdef _LP64
6774 subq(tmp, wordSize);
6775 movl(index, tmp);
6776 addq(tmp, buffer);
6777 #else
6778 subl(index, wordSize);
6779 movl(tmp, buffer);
6780 addl(tmp, index);
6781 #endif
6782 movptr(Address(tmp, 0), tmp2);
6783 jmp(done);
6784 bind(runtime);
6785 // save the live input values
6786 if(tosca_live) push(rax);
6787 push(obj);
6788 #ifdef _LP64
6789 movq(c_rarg0, Address(obj, 0));
6790 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), c_rarg0, r15_thread);
6791 #else
6792 push(thread);
6793 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_pre), tmp2, thread);
6794 pop(thread);
6795 #endif
6796 pop(obj);
6797 if(tosca_live) pop(rax);
6798 bind(done);
6800 }
6802 void MacroAssembler::g1_write_barrier_post(Register store_addr,
6803 Register new_val,
6804 #ifndef _LP64
6805 Register thread,
6806 #endif
6807 Register tmp,
6808 Register tmp2) {
6810 LP64_ONLY(Register thread = r15_thread;)
6811 Address queue_index(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
6812 PtrQueue::byte_offset_of_index()));
6813 Address buffer(thread, in_bytes(JavaThread::dirty_card_queue_offset() +
6814 PtrQueue::byte_offset_of_buf()));
6815 BarrierSet* bs = Universe::heap()->barrier_set();
6816 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
6817 Label done;
6818 Label runtime;
6820 // Does store cross heap regions?
6822 movptr(tmp, store_addr);
6823 xorptr(tmp, new_val);
6824 shrptr(tmp, HeapRegion::LogOfHRGrainBytes);
6825 jcc(Assembler::equal, done);
6827 // crosses regions, storing NULL?
6829 cmpptr(new_val, (int32_t) NULL_WORD);
6830 jcc(Assembler::equal, done);
6832 // storing region crossing non-NULL, is card already dirty?
6834 ExternalAddress cardtable((address) ct->byte_map_base);
6835 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
6836 #ifdef _LP64
6837 const Register card_addr = tmp;
6839 movq(card_addr, store_addr);
6840 shrq(card_addr, CardTableModRefBS::card_shift);
6842 lea(tmp2, cardtable);
6844 // get the address of the card
6845 addq(card_addr, tmp2);
6846 #else
6847 const Register card_index = tmp;
6849 movl(card_index, store_addr);
6850 shrl(card_index, CardTableModRefBS::card_shift);
6852 Address index(noreg, card_index, Address::times_1);
6853 const Register card_addr = tmp;
6854 lea(card_addr, as_Address(ArrayAddress(cardtable, index)));
6855 #endif
6856 cmpb(Address(card_addr, 0), 0);
6857 jcc(Assembler::equal, done);
6859 // storing a region crossing, non-NULL oop, card is clean.
6860 // dirty card and log.
6862 movb(Address(card_addr, 0), 0);
6864 cmpl(queue_index, 0);
6865 jcc(Assembler::equal, runtime);
6866 subl(queue_index, wordSize);
6867 movptr(tmp2, buffer);
6868 #ifdef _LP64
6869 movslq(rscratch1, queue_index);
6870 addq(tmp2, rscratch1);
6871 movq(Address(tmp2, 0), card_addr);
6872 #else
6873 addl(tmp2, queue_index);
6874 movl(Address(tmp2, 0), card_index);
6875 #endif
6876 jmp(done);
6878 bind(runtime);
6879 // save the live input values
6880 push(store_addr);
6881 push(new_val);
6882 #ifdef _LP64
6883 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, r15_thread);
6884 #else
6885 push(thread);
6886 call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::g1_wb_post), card_addr, thread);
6887 pop(thread);
6888 #endif
6889 pop(new_val);
6890 pop(store_addr);
6892 bind(done);
6894 }
6896 #endif // SERIALGC
6897 //////////////////////////////////////////////////////////////////////////////////
6900 void MacroAssembler::store_check(Register obj) {
6901 // Does a store check for the oop in register obj. The content of
6902 // register obj is destroyed afterwards.
6903 store_check_part_1(obj);
6904 store_check_part_2(obj);
6905 }
6907 void MacroAssembler::store_check(Register obj, Address dst) {
6908 store_check(obj);
6909 }
6912 // split the store check operation so that other instructions can be scheduled inbetween
6913 void MacroAssembler::store_check_part_1(Register obj) {
6914 BarrierSet* bs = Universe::heap()->barrier_set();
6915 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
6916 shrptr(obj, CardTableModRefBS::card_shift);
6917 }
6919 void MacroAssembler::store_check_part_2(Register obj) {
6920 BarrierSet* bs = Universe::heap()->barrier_set();
6921 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
6922 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
6923 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
6925 // The calculation for byte_map_base is as follows:
6926 // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
6927 // So this essentially converts an address to a displacement and
6928 // it will never need to be relocated. On 64bit however the value may be too
6929 // large for a 32bit displacement
6931 intptr_t disp = (intptr_t) ct->byte_map_base;
6932 if (is_simm32(disp)) {
6933 Address cardtable(noreg, obj, Address::times_1, disp);
6934 movb(cardtable, 0);
6935 } else {
6936 // By doing it as an ExternalAddress disp could be converted to a rip-relative
6937 // displacement and done in a single instruction given favorable mapping and
6938 // a smarter version of as_Address. Worst case it is two instructions which
6939 // is no worse off then loading disp into a register and doing as a simple
6940 // Address() as above.
6941 // We can't do as ExternalAddress as the only style since if disp == 0 we'll
6942 // assert since NULL isn't acceptable in a reloci (see 6644928). In any case
6943 // in some cases we'll get a single instruction version.
6945 ExternalAddress cardtable((address)disp);
6946 Address index(noreg, obj, Address::times_1);
6947 movb(as_Address(ArrayAddress(cardtable, index)), 0);
6948 }
6949 }
6951 void MacroAssembler::subptr(Register dst, int32_t imm32) {
6952 LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
6953 }
6955 void MacroAssembler::subptr(Register dst, Register src) {
6956 LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
6957 }
6959 void MacroAssembler::test32(Register src1, AddressLiteral src2) {
6960 // src2 must be rval
6962 if (reachable(src2)) {
6963 testl(src1, as_Address(src2));
6964 } else {
6965 lea(rscratch1, src2);
6966 testl(src1, Address(rscratch1, 0));
6967 }
6968 }
6970 // C++ bool manipulation
6971 void MacroAssembler::testbool(Register dst) {
6972 if(sizeof(bool) == 1)
6973 testb(dst, 0xff);
6974 else if(sizeof(bool) == 2) {
6975 // testw implementation needed for two byte bools
6976 ShouldNotReachHere();
6977 } else if(sizeof(bool) == 4)
6978 testl(dst, dst);
6979 else
6980 // unsupported
6981 ShouldNotReachHere();
6982 }
6984 void MacroAssembler::testptr(Register dst, Register src) {
6985 LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
6986 }
6988 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
6989 void MacroAssembler::tlab_allocate(Register obj,
6990 Register var_size_in_bytes,
6991 int con_size_in_bytes,
6992 Register t1,
6993 Register t2,
6994 Label& slow_case) {
6995 assert_different_registers(obj, t1, t2);
6996 assert_different_registers(obj, var_size_in_bytes, t1);
6997 Register end = t2;
6998 Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
7000 verify_tlab();
7002 NOT_LP64(get_thread(thread));
7004 movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
7005 if (var_size_in_bytes == noreg) {
7006 lea(end, Address(obj, con_size_in_bytes));
7007 } else {
7008 lea(end, Address(obj, var_size_in_bytes, Address::times_1));
7009 }
7010 cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
7011 jcc(Assembler::above, slow_case);
7013 // update the tlab top pointer
7014 movptr(Address(thread, JavaThread::tlab_top_offset()), end);
7016 // recover var_size_in_bytes if necessary
7017 if (var_size_in_bytes == end) {
7018 subptr(var_size_in_bytes, obj);
7019 }
7020 verify_tlab();
7021 }
7023 // Preserves rbx, and rdx.
7024 void MacroAssembler::tlab_refill(Label& retry,
7025 Label& try_eden,
7026 Label& slow_case) {
7027 Register top = rax;
7028 Register t1 = rcx;
7029 Register t2 = rsi;
7030 Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
7031 assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
7032 Label do_refill, discard_tlab;
7034 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
7035 // No allocation in the shared eden.
7036 jmp(slow_case);
7037 }
7039 NOT_LP64(get_thread(thread_reg));
7041 movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
7042 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
7044 // calculate amount of free space
7045 subptr(t1, top);
7046 shrptr(t1, LogHeapWordSize);
7048 // Retain tlab and allocate object in shared space if
7049 // the amount free in the tlab is too large to discard.
7050 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
7051 jcc(Assembler::lessEqual, discard_tlab);
7053 // Retain
7054 // %%% yuck as movptr...
7055 movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
7056 addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
7057 if (TLABStats) {
7058 // increment number of slow_allocations
7059 addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
7060 }
7061 jmp(try_eden);
7063 bind(discard_tlab);
7064 if (TLABStats) {
7065 // increment number of refills
7066 addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
7067 // accumulate wastage -- t1 is amount free in tlab
7068 addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
7069 }
7071 // if tlab is currently allocated (top or end != null) then
7072 // fill [top, end + alignment_reserve) with array object
7073 testptr (top, top);
7074 jcc(Assembler::zero, do_refill);
7076 // set up the mark word
7077 movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
7078 // set the length to the remaining space
7079 subptr(t1, typeArrayOopDesc::header_size(T_INT));
7080 addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
7081 shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
7082 movptr(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
7083 // set klass to intArrayKlass
7084 // dubious reloc why not an oop reloc?
7085 movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr()));
7086 // store klass last. concurrent gcs assumes klass length is valid if
7087 // klass field is not null.
7088 store_klass(top, t1);
7090 // refill the tlab with an eden allocation
7091 bind(do_refill);
7092 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
7093 shlptr(t1, LogHeapWordSize);
7094 // add object_size ??
7095 eden_allocate(top, t1, 0, t2, slow_case);
7097 // Check that t1 was preserved in eden_allocate.
7098 #ifdef ASSERT
7099 if (UseTLAB) {
7100 Label ok;
7101 Register tsize = rsi;
7102 assert_different_registers(tsize, thread_reg, t1);
7103 push(tsize);
7104 movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
7105 shlptr(tsize, LogHeapWordSize);
7106 cmpptr(t1, tsize);
7107 jcc(Assembler::equal, ok);
7108 stop("assert(t1 != tlab size)");
7109 should_not_reach_here();
7111 bind(ok);
7112 pop(tsize);
7113 }
7114 #endif
7115 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
7116 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
7117 addptr(top, t1);
7118 subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
7119 movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
7120 verify_tlab();
7121 jmp(retry);
7122 }
7124 static const double pi_4 = 0.7853981633974483;
7126 void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
7127 // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
7128 // was attempted in this code; unfortunately it appears that the
7129 // switch to 80-bit precision and back causes this to be
7130 // unprofitable compared with simply performing a runtime call if
7131 // the argument is out of the (-pi/4, pi/4) range.
7133 Register tmp = noreg;
7134 if (!VM_Version::supports_cmov()) {
7135 // fcmp needs a temporary so preserve rbx,
7136 tmp = rbx;
7137 push(tmp);
7138 }
7140 Label slow_case, done;
7142 ExternalAddress pi4_adr = (address)&pi_4;
7143 if (reachable(pi4_adr)) {
7144 // x ?<= pi/4
7145 fld_d(pi4_adr);
7146 fld_s(1); // Stack: X PI/4 X
7147 fabs(); // Stack: |X| PI/4 X
7148 fcmp(tmp);
7149 jcc(Assembler::above, slow_case);
7151 // fastest case: -pi/4 <= x <= pi/4
7152 switch(trig) {
7153 case 's':
7154 fsin();
7155 break;
7156 case 'c':
7157 fcos();
7158 break;
7159 case 't':
7160 ftan();
7161 break;
7162 default:
7163 assert(false, "bad intrinsic");
7164 break;
7165 }
7166 jmp(done);
7167 }
7169 // slow case: runtime call
7170 bind(slow_case);
7171 // Preserve registers across runtime call
7172 pusha();
7173 int incoming_argument_and_return_value_offset = -1;
7174 if (num_fpu_regs_in_use > 1) {
7175 // Must preserve all other FPU regs (could alternatively convert
7176 // SharedRuntime::dsin and dcos into assembly routines known not to trash
7177 // FPU state, but can not trust C compiler)
7178 NEEDS_CLEANUP;
7179 // NOTE that in this case we also push the incoming argument to
7180 // the stack and restore it later; we also use this stack slot to
7181 // hold the return value from dsin or dcos.
7182 for (int i = 0; i < num_fpu_regs_in_use; i++) {
7183 subptr(rsp, sizeof(jdouble));
7184 fstp_d(Address(rsp, 0));
7185 }
7186 incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1);
7187 fld_d(Address(rsp, incoming_argument_and_return_value_offset));
7188 }
7189 subptr(rsp, sizeof(jdouble));
7190 fstp_d(Address(rsp, 0));
7191 #ifdef _LP64
7192 movdbl(xmm0, Address(rsp, 0));
7193 #endif // _LP64
7195 // NOTE: we must not use call_VM_leaf here because that requires a
7196 // complete interpreter frame in debug mode -- same bug as 4387334
7197 // MacroAssembler::call_VM_leaf_base is perfectly safe and will
7198 // do proper 64bit abi
7200 NEEDS_CLEANUP;
7201 // Need to add stack banging before this runtime call if it needs to
7202 // be taken; however, there is no generic stack banging routine at
7203 // the MacroAssembler level
7204 switch(trig) {
7205 case 's':
7206 {
7207 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 0);
7208 }
7209 break;
7210 case 'c':
7211 {
7212 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 0);
7213 }
7214 break;
7215 case 't':
7216 {
7217 MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 0);
7218 }
7219 break;
7220 default:
7221 assert(false, "bad intrinsic");
7222 break;
7223 }
7224 #ifdef _LP64
7225 movsd(Address(rsp, 0), xmm0);
7226 fld_d(Address(rsp, 0));
7227 #endif // _LP64
7228 addptr(rsp, sizeof(jdouble));
7229 if (num_fpu_regs_in_use > 1) {
7230 // Must save return value to stack and then restore entire FPU stack
7231 fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
7232 for (int i = 0; i < num_fpu_regs_in_use; i++) {
7233 fld_d(Address(rsp, 0));
7234 addptr(rsp, sizeof(jdouble));
7235 }
7236 }
7237 popa();
7239 // Come here with result in F-TOS
7240 bind(done);
7242 if (tmp != noreg) {
7243 pop(tmp);
7244 }
7245 }
7248 // Look up the method for a megamorphic invokeinterface call.
7249 // The target method is determined by <intf_klass, itable_index>.
7250 // The receiver klass is in recv_klass.
7251 // On success, the result will be in method_result, and execution falls through.
7252 // On failure, execution transfers to the given label.
7253 void MacroAssembler::lookup_interface_method(Register recv_klass,
7254 Register intf_klass,
7255 RegisterOrConstant itable_index,
7256 Register method_result,
7257 Register scan_temp,
7258 Label& L_no_such_interface) {
7259 assert_different_registers(recv_klass, intf_klass, method_result, scan_temp);
7260 assert(itable_index.is_constant() || itable_index.as_register() == method_result,
7261 "caller must use same register for non-constant itable index as for method");
7263 // Compute start of first itableOffsetEntry (which is at the end of the vtable)
7264 int vtable_base = instanceKlass::vtable_start_offset() * wordSize;
7265 int itentry_off = itableMethodEntry::method_offset_in_bytes();
7266 int scan_step = itableOffsetEntry::size() * wordSize;
7267 int vte_size = vtableEntry::size() * wordSize;
7268 Address::ScaleFactor times_vte_scale = Address::times_ptr;
7269 assert(vte_size == wordSize, "else adjust times_vte_scale");
7271 movl(scan_temp, Address(recv_klass, instanceKlass::vtable_length_offset() * wordSize));
7273 // %%% Could store the aligned, prescaled offset in the klassoop.
7274 lea(scan_temp, Address(recv_klass, scan_temp, times_vte_scale, vtable_base));
7275 if (HeapWordsPerLong > 1) {
7276 // Round up to align_object_offset boundary
7277 // see code for instanceKlass::start_of_itable!
7278 round_to(scan_temp, BytesPerLong);
7279 }
7281 // Adjust recv_klass by scaled itable_index, so we can free itable_index.
7282 assert(itableMethodEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
7283 lea(recv_klass, Address(recv_klass, itable_index, Address::times_ptr, itentry_off));
7285 // for (scan = klass->itable(); scan->interface() != NULL; scan += scan_step) {
7286 // if (scan->interface() == intf) {
7287 // result = (klass + scan->offset() + itable_index);
7288 // }
7289 // }
7290 Label search, found_method;
7292 for (int peel = 1; peel >= 0; peel--) {
7293 movptr(method_result, Address(scan_temp, itableOffsetEntry::interface_offset_in_bytes()));
7294 cmpptr(intf_klass, method_result);
7296 if (peel) {
7297 jccb(Assembler::equal, found_method);
7298 } else {
7299 jccb(Assembler::notEqual, search);
7300 // (invert the test to fall through to found_method...)
7301 }
7303 if (!peel) break;
7305 bind(search);
7307 // Check that the previous entry is non-null. A null entry means that
7308 // the receiver class doesn't implement the interface, and wasn't the
7309 // same as when the caller was compiled.
7310 testptr(method_result, method_result);
7311 jcc(Assembler::zero, L_no_such_interface);
7312 addptr(scan_temp, scan_step);
7313 }
7315 bind(found_method);
7317 // Got a hit.
7318 movl(scan_temp, Address(scan_temp, itableOffsetEntry::offset_offset_in_bytes()));
7319 movptr(method_result, Address(recv_klass, scan_temp, Address::times_1));
7320 }
7323 void MacroAssembler::check_klass_subtype(Register sub_klass,
7324 Register super_klass,
7325 Register temp_reg,
7326 Label& L_success) {
7327 Label L_failure;
7328 check_klass_subtype_fast_path(sub_klass, super_klass, temp_reg, &L_success, &L_failure, NULL);
7329 check_klass_subtype_slow_path(sub_klass, super_klass, temp_reg, noreg, &L_success, NULL);
7330 bind(L_failure);
7331 }
7334 void MacroAssembler::check_klass_subtype_fast_path(Register sub_klass,
7335 Register super_klass,
7336 Register temp_reg,
7337 Label* L_success,
7338 Label* L_failure,
7339 Label* L_slow_path,
7340 RegisterOrConstant super_check_offset) {
7341 assert_different_registers(sub_klass, super_klass, temp_reg);
7342 bool must_load_sco = (super_check_offset.constant_or_zero() == -1);
7343 if (super_check_offset.is_register()) {
7344 assert_different_registers(sub_klass, super_klass,
7345 super_check_offset.as_register());
7346 } else if (must_load_sco) {
7347 assert(temp_reg != noreg, "supply either a temp or a register offset");
7348 }
7350 Label L_fallthrough;
7351 int label_nulls = 0;
7352 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
7353 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
7354 if (L_slow_path == NULL) { L_slow_path = &L_fallthrough; label_nulls++; }
7355 assert(label_nulls <= 1, "at most one NULL in the batch");
7357 int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
7358 Klass::secondary_super_cache_offset_in_bytes());
7359 int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
7360 Klass::super_check_offset_offset_in_bytes());
7361 Address super_check_offset_addr(super_klass, sco_offset);
7363 // Hacked jcc, which "knows" that L_fallthrough, at least, is in
7364 // range of a jccb. If this routine grows larger, reconsider at
7365 // least some of these.
7366 #define local_jcc(assembler_cond, label) \
7367 if (&(label) == &L_fallthrough) jccb(assembler_cond, label); \
7368 else jcc( assembler_cond, label) /*omit semi*/
7370 // Hacked jmp, which may only be used just before L_fallthrough.
7371 #define final_jmp(label) \
7372 if (&(label) == &L_fallthrough) { /*do nothing*/ } \
7373 else jmp(label) /*omit semi*/
7375 // If the pointers are equal, we are done (e.g., String[] elements).
7376 // This self-check enables sharing of secondary supertype arrays among
7377 // non-primary types such as array-of-interface. Otherwise, each such
7378 // type would need its own customized SSA.
7379 // We move this check to the front of the fast path because many
7380 // type checks are in fact trivially successful in this manner,
7381 // so we get a nicely predicted branch right at the start of the check.
7382 cmpptr(sub_klass, super_klass);
7383 local_jcc(Assembler::equal, *L_success);
7385 // Check the supertype display:
7386 if (must_load_sco) {
7387 // Positive movl does right thing on LP64.
7388 movl(temp_reg, super_check_offset_addr);
7389 super_check_offset = RegisterOrConstant(temp_reg);
7390 }
7391 Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
7392 cmpptr(super_klass, super_check_addr); // load displayed supertype
7394 // This check has worked decisively for primary supers.
7395 // Secondary supers are sought in the super_cache ('super_cache_addr').
7396 // (Secondary supers are interfaces and very deeply nested subtypes.)
7397 // This works in the same check above because of a tricky aliasing
7398 // between the super_cache and the primary super display elements.
7399 // (The 'super_check_addr' can address either, as the case requires.)
7400 // Note that the cache is updated below if it does not help us find
7401 // what we need immediately.
7402 // So if it was a primary super, we can just fail immediately.
7403 // Otherwise, it's the slow path for us (no success at this point).
7405 if (super_check_offset.is_register()) {
7406 local_jcc(Assembler::equal, *L_success);
7407 cmpl(super_check_offset.as_register(), sc_offset);
7408 if (L_failure == &L_fallthrough) {
7409 local_jcc(Assembler::equal, *L_slow_path);
7410 } else {
7411 local_jcc(Assembler::notEqual, *L_failure);
7412 final_jmp(*L_slow_path);
7413 }
7414 } else if (super_check_offset.as_constant() == sc_offset) {
7415 // Need a slow path; fast failure is impossible.
7416 if (L_slow_path == &L_fallthrough) {
7417 local_jcc(Assembler::equal, *L_success);
7418 } else {
7419 local_jcc(Assembler::notEqual, *L_slow_path);
7420 final_jmp(*L_success);
7421 }
7422 } else {
7423 // No slow path; it's a fast decision.
7424 if (L_failure == &L_fallthrough) {
7425 local_jcc(Assembler::equal, *L_success);
7426 } else {
7427 local_jcc(Assembler::notEqual, *L_failure);
7428 final_jmp(*L_success);
7429 }
7430 }
7432 bind(L_fallthrough);
7434 #undef local_jcc
7435 #undef final_jmp
7436 }
7439 void MacroAssembler::check_klass_subtype_slow_path(Register sub_klass,
7440 Register super_klass,
7441 Register temp_reg,
7442 Register temp2_reg,
7443 Label* L_success,
7444 Label* L_failure,
7445 bool set_cond_codes) {
7446 assert_different_registers(sub_klass, super_klass, temp_reg);
7447 if (temp2_reg != noreg)
7448 assert_different_registers(sub_klass, super_klass, temp_reg, temp2_reg);
7449 #define IS_A_TEMP(reg) ((reg) == temp_reg || (reg) == temp2_reg)
7451 Label L_fallthrough;
7452 int label_nulls = 0;
7453 if (L_success == NULL) { L_success = &L_fallthrough; label_nulls++; }
7454 if (L_failure == NULL) { L_failure = &L_fallthrough; label_nulls++; }
7455 assert(label_nulls <= 1, "at most one NULL in the batch");
7457 // a couple of useful fields in sub_klass:
7458 int ss_offset = (klassOopDesc::header_size() * HeapWordSize +
7459 Klass::secondary_supers_offset_in_bytes());
7460 int sc_offset = (klassOopDesc::header_size() * HeapWordSize +
7461 Klass::secondary_super_cache_offset_in_bytes());
7462 Address secondary_supers_addr(sub_klass, ss_offset);
7463 Address super_cache_addr( sub_klass, sc_offset);
7465 // Do a linear scan of the secondary super-klass chain.
7466 // This code is rarely used, so simplicity is a virtue here.
7467 // The repne_scan instruction uses fixed registers, which we must spill.
7468 // Don't worry too much about pre-existing connections with the input regs.
7470 assert(sub_klass != rax, "killed reg"); // killed by mov(rax, super)
7471 assert(sub_klass != rcx, "killed reg"); // killed by lea(rcx, &pst_counter)
7473 // Get super_klass value into rax (even if it was in rdi or rcx).
7474 bool pushed_rax = false, pushed_rcx = false, pushed_rdi = false;
7475 if (super_klass != rax || UseCompressedOops) {
7476 if (!IS_A_TEMP(rax)) { push(rax); pushed_rax = true; }
7477 mov(rax, super_klass);
7478 }
7479 if (!IS_A_TEMP(rcx)) { push(rcx); pushed_rcx = true; }
7480 if (!IS_A_TEMP(rdi)) { push(rdi); pushed_rdi = true; }
7482 #ifndef PRODUCT
7483 int* pst_counter = &SharedRuntime::_partial_subtype_ctr;
7484 ExternalAddress pst_counter_addr((address) pst_counter);
7485 NOT_LP64( incrementl(pst_counter_addr) );
7486 LP64_ONLY( lea(rcx, pst_counter_addr) );
7487 LP64_ONLY( incrementl(Address(rcx, 0)) );
7488 #endif //PRODUCT
7490 // We will consult the secondary-super array.
7491 movptr(rdi, secondary_supers_addr);
7492 // Load the array length. (Positive movl does right thing on LP64.)
7493 movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
7494 // Skip to start of data.
7495 addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
7497 // Scan RCX words at [RDI] for an occurrence of RAX.
7498 // Set NZ/Z based on last compare.
7499 #ifdef _LP64
7500 // This part is tricky, as values in supers array could be 32 or 64 bit wide
7501 // and we store values in objArrays always encoded, thus we need to encode
7502 // the value of rax before repne. Note that rax is dead after the repne.
7503 if (UseCompressedOops) {
7504 encode_heap_oop_not_null(rax);
7505 // The superclass is never null; it would be a basic system error if a null
7506 // pointer were to sneak in here. Note that we have already loaded the
7507 // Klass::super_check_offset from the super_klass in the fast path,
7508 // so if there is a null in that register, we are already in the afterlife.
7509 repne_scanl();
7510 } else
7511 #endif // _LP64
7512 repne_scan();
7514 // Unspill the temp. registers:
7515 if (pushed_rdi) pop(rdi);
7516 if (pushed_rcx) pop(rcx);
7517 if (pushed_rax) pop(rax);
7519 if (set_cond_codes) {
7520 // Special hack for the AD files: rdi is guaranteed non-zero.
7521 assert(!pushed_rdi, "rdi must be left non-NULL");
7522 // Also, the condition codes are properly set Z/NZ on succeed/failure.
7523 }
7525 if (L_failure == &L_fallthrough)
7526 jccb(Assembler::notEqual, *L_failure);
7527 else jcc(Assembler::notEqual, *L_failure);
7529 // Success. Cache the super we found and proceed in triumph.
7530 movptr(super_cache_addr, super_klass);
7532 if (L_success != &L_fallthrough) {
7533 jmp(*L_success);
7534 }
7536 #undef IS_A_TEMP
7538 bind(L_fallthrough);
7539 }
7542 void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
7543 ucomisd(dst, as_Address(src));
7544 }
7546 void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
7547 ucomiss(dst, as_Address(src));
7548 }
7550 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
7551 if (reachable(src)) {
7552 xorpd(dst, as_Address(src));
7553 } else {
7554 lea(rscratch1, src);
7555 xorpd(dst, Address(rscratch1, 0));
7556 }
7557 }
7559 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
7560 if (reachable(src)) {
7561 xorps(dst, as_Address(src));
7562 } else {
7563 lea(rscratch1, src);
7564 xorps(dst, Address(rscratch1, 0));
7565 }
7566 }
7568 void MacroAssembler::verify_oop(Register reg, const char* s) {
7569 if (!VerifyOops) return;
7571 // Pass register number to verify_oop_subroutine
7572 char* b = new char[strlen(s) + 50];
7573 sprintf(b, "verify_oop: %s: %s", reg->name(), s);
7574 push(rax); // save rax,
7575 push(reg); // pass register argument
7576 ExternalAddress buffer((address) b);
7577 // avoid using pushptr, as it modifies scratch registers
7578 // and our contract is not to modify anything
7579 movptr(rax, buffer.addr());
7580 push(rax);
7581 // call indirectly to solve generation ordering problem
7582 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
7583 call(rax);
7584 }
7587 RegisterOrConstant MacroAssembler::delayed_value_impl(intptr_t* delayed_value_addr,
7588 Register tmp,
7589 int offset) {
7590 intptr_t value = *delayed_value_addr;
7591 if (value != 0)
7592 return RegisterOrConstant(value + offset);
7594 // load indirectly to solve generation ordering problem
7595 movptr(tmp, ExternalAddress((address) delayed_value_addr));
7597 #ifdef ASSERT
7598 Label L;
7599 testl(tmp, tmp);
7600 jccb(Assembler::notZero, L);
7601 hlt();
7602 bind(L);
7603 #endif
7605 if (offset != 0)
7606 addptr(tmp, offset);
7608 return RegisterOrConstant(tmp);
7609 }
7612 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
7613 if (!VerifyOops) return;
7615 // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
7616 // Pass register number to verify_oop_subroutine
7617 char* b = new char[strlen(s) + 50];
7618 sprintf(b, "verify_oop_addr: %s", s);
7620 push(rax); // save rax,
7621 // addr may contain rsp so we will have to adjust it based on the push
7622 // we just did
7623 // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
7624 // stores rax into addr which is backwards of what was intended.
7625 if (addr.uses(rsp)) {
7626 lea(rax, addr);
7627 pushptr(Address(rax, BytesPerWord));
7628 } else {
7629 pushptr(addr);
7630 }
7632 ExternalAddress buffer((address) b);
7633 // pass msg argument
7634 // avoid using pushptr, as it modifies scratch registers
7635 // and our contract is not to modify anything
7636 movptr(rax, buffer.addr());
7637 push(rax);
7639 // call indirectly to solve generation ordering problem
7640 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
7641 call(rax);
7642 // Caller pops the arguments and restores rax, from the stack
7643 }
7645 void MacroAssembler::verify_tlab() {
7646 #ifdef ASSERT
7647 if (UseTLAB && VerifyOops) {
7648 Label next, ok;
7649 Register t1 = rsi;
7650 Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
7652 push(t1);
7653 NOT_LP64(push(thread_reg));
7654 NOT_LP64(get_thread(thread_reg));
7656 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
7657 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
7658 jcc(Assembler::aboveEqual, next);
7659 stop("assert(top >= start)");
7660 should_not_reach_here();
7662 bind(next);
7663 movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
7664 cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
7665 jcc(Assembler::aboveEqual, ok);
7666 stop("assert(top <= end)");
7667 should_not_reach_here();
7669 bind(ok);
7670 NOT_LP64(pop(thread_reg));
7671 pop(t1);
7672 }
7673 #endif
7674 }
7676 class ControlWord {
7677 public:
7678 int32_t _value;
7680 int rounding_control() const { return (_value >> 10) & 3 ; }
7681 int precision_control() const { return (_value >> 8) & 3 ; }
7682 bool precision() const { return ((_value >> 5) & 1) != 0; }
7683 bool underflow() const { return ((_value >> 4) & 1) != 0; }
7684 bool overflow() const { return ((_value >> 3) & 1) != 0; }
7685 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
7686 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
7687 bool invalid() const { return ((_value >> 0) & 1) != 0; }
7689 void print() const {
7690 // rounding control
7691 const char* rc;
7692 switch (rounding_control()) {
7693 case 0: rc = "round near"; break;
7694 case 1: rc = "round down"; break;
7695 case 2: rc = "round up "; break;
7696 case 3: rc = "chop "; break;
7697 };
7698 // precision control
7699 const char* pc;
7700 switch (precision_control()) {
7701 case 0: pc = "24 bits "; break;
7702 case 1: pc = "reserved"; break;
7703 case 2: pc = "53 bits "; break;
7704 case 3: pc = "64 bits "; break;
7705 };
7706 // flags
7707 char f[9];
7708 f[0] = ' ';
7709 f[1] = ' ';
7710 f[2] = (precision ()) ? 'P' : 'p';
7711 f[3] = (underflow ()) ? 'U' : 'u';
7712 f[4] = (overflow ()) ? 'O' : 'o';
7713 f[5] = (zero_divide ()) ? 'Z' : 'z';
7714 f[6] = (denormalized()) ? 'D' : 'd';
7715 f[7] = (invalid ()) ? 'I' : 'i';
7716 f[8] = '\x0';
7717 // output
7718 printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
7719 }
7721 };
7723 class StatusWord {
7724 public:
7725 int32_t _value;
7727 bool busy() const { return ((_value >> 15) & 1) != 0; }
7728 bool C3() const { return ((_value >> 14) & 1) != 0; }
7729 bool C2() const { return ((_value >> 10) & 1) != 0; }
7730 bool C1() const { return ((_value >> 9) & 1) != 0; }
7731 bool C0() const { return ((_value >> 8) & 1) != 0; }
7732 int top() const { return (_value >> 11) & 7 ; }
7733 bool error_status() const { return ((_value >> 7) & 1) != 0; }
7734 bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
7735 bool precision() const { return ((_value >> 5) & 1) != 0; }
7736 bool underflow() const { return ((_value >> 4) & 1) != 0; }
7737 bool overflow() const { return ((_value >> 3) & 1) != 0; }
7738 bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
7739 bool denormalized() const { return ((_value >> 1) & 1) != 0; }
7740 bool invalid() const { return ((_value >> 0) & 1) != 0; }
7742 void print() const {
7743 // condition codes
7744 char c[5];
7745 c[0] = (C3()) ? '3' : '-';
7746 c[1] = (C2()) ? '2' : '-';
7747 c[2] = (C1()) ? '1' : '-';
7748 c[3] = (C0()) ? '0' : '-';
7749 c[4] = '\x0';
7750 // flags
7751 char f[9];
7752 f[0] = (error_status()) ? 'E' : '-';
7753 f[1] = (stack_fault ()) ? 'S' : '-';
7754 f[2] = (precision ()) ? 'P' : '-';
7755 f[3] = (underflow ()) ? 'U' : '-';
7756 f[4] = (overflow ()) ? 'O' : '-';
7757 f[5] = (zero_divide ()) ? 'Z' : '-';
7758 f[6] = (denormalized()) ? 'D' : '-';
7759 f[7] = (invalid ()) ? 'I' : '-';
7760 f[8] = '\x0';
7761 // output
7762 printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
7763 }
7765 };
7767 class TagWord {
7768 public:
7769 int32_t _value;
7771 int tag_at(int i) const { return (_value >> (i*2)) & 3; }
7773 void print() const {
7774 printf("%04x", _value & 0xFFFF);
7775 }
7777 };
7779 class FPU_Register {
7780 public:
7781 int32_t _m0;
7782 int32_t _m1;
7783 int16_t _ex;
7785 bool is_indefinite() const {
7786 return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
7787 }
7789 void print() const {
7790 char sign = (_ex < 0) ? '-' : '+';
7791 const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
7792 printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
7793 };
7795 };
7797 class FPU_State {
7798 public:
7799 enum {
7800 register_size = 10,
7801 number_of_registers = 8,
7802 register_mask = 7
7803 };
7805 ControlWord _control_word;
7806 StatusWord _status_word;
7807 TagWord _tag_word;
7808 int32_t _error_offset;
7809 int32_t _error_selector;
7810 int32_t _data_offset;
7811 int32_t _data_selector;
7812 int8_t _register[register_size * number_of_registers];
7814 int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
7815 FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
7817 const char* tag_as_string(int tag) const {
7818 switch (tag) {
7819 case 0: return "valid";
7820 case 1: return "zero";
7821 case 2: return "special";
7822 case 3: return "empty";
7823 }
7824 ShouldNotReachHere()
7825 return NULL;
7826 }
7828 void print() const {
7829 // print computation registers
7830 { int t = _status_word.top();
7831 for (int i = 0; i < number_of_registers; i++) {
7832 int j = (i - t) & register_mask;
7833 printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
7834 st(j)->print();
7835 printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
7836 }
7837 }
7838 printf("\n");
7839 // print control registers
7840 printf("ctrl = "); _control_word.print(); printf("\n");
7841 printf("stat = "); _status_word .print(); printf("\n");
7842 printf("tags = "); _tag_word .print(); printf("\n");
7843 }
7845 };
7847 class Flag_Register {
7848 public:
7849 int32_t _value;
7851 bool overflow() const { return ((_value >> 11) & 1) != 0; }
7852 bool direction() const { return ((_value >> 10) & 1) != 0; }
7853 bool sign() const { return ((_value >> 7) & 1) != 0; }
7854 bool zero() const { return ((_value >> 6) & 1) != 0; }
7855 bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
7856 bool parity() const { return ((_value >> 2) & 1) != 0; }
7857 bool carry() const { return ((_value >> 0) & 1) != 0; }
7859 void print() const {
7860 // flags
7861 char f[8];
7862 f[0] = (overflow ()) ? 'O' : '-';
7863 f[1] = (direction ()) ? 'D' : '-';
7864 f[2] = (sign ()) ? 'S' : '-';
7865 f[3] = (zero ()) ? 'Z' : '-';
7866 f[4] = (auxiliary_carry()) ? 'A' : '-';
7867 f[5] = (parity ()) ? 'P' : '-';
7868 f[6] = (carry ()) ? 'C' : '-';
7869 f[7] = '\x0';
7870 // output
7871 printf("%08x flags = %s", _value, f);
7872 }
7874 };
7876 class IU_Register {
7877 public:
7878 int32_t _value;
7880 void print() const {
7881 printf("%08x %11d", _value, _value);
7882 }
7884 };
7886 class IU_State {
7887 public:
7888 Flag_Register _eflags;
7889 IU_Register _rdi;
7890 IU_Register _rsi;
7891 IU_Register _rbp;
7892 IU_Register _rsp;
7893 IU_Register _rbx;
7894 IU_Register _rdx;
7895 IU_Register _rcx;
7896 IU_Register _rax;
7898 void print() const {
7899 // computation registers
7900 printf("rax, = "); _rax.print(); printf("\n");
7901 printf("rbx, = "); _rbx.print(); printf("\n");
7902 printf("rcx = "); _rcx.print(); printf("\n");
7903 printf("rdx = "); _rdx.print(); printf("\n");
7904 printf("rdi = "); _rdi.print(); printf("\n");
7905 printf("rsi = "); _rsi.print(); printf("\n");
7906 printf("rbp, = "); _rbp.print(); printf("\n");
7907 printf("rsp = "); _rsp.print(); printf("\n");
7908 printf("\n");
7909 // control registers
7910 printf("flgs = "); _eflags.print(); printf("\n");
7911 }
7912 };
7915 class CPU_State {
7916 public:
7917 FPU_State _fpu_state;
7918 IU_State _iu_state;
7920 void print() const {
7921 printf("--------------------------------------------------\n");
7922 _iu_state .print();
7923 printf("\n");
7924 _fpu_state.print();
7925 printf("--------------------------------------------------\n");
7926 }
7928 };
7931 static void _print_CPU_state(CPU_State* state) {
7932 state->print();
7933 };
7936 void MacroAssembler::print_CPU_state() {
7937 push_CPU_state();
7938 push(rsp); // pass CPU state
7939 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
7940 addptr(rsp, wordSize); // discard argument
7941 pop_CPU_state();
7942 }
7945 static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
7946 static int counter = 0;
7947 FPU_State* fs = &state->_fpu_state;
7948 counter++;
7949 // For leaf calls, only verify that the top few elements remain empty.
7950 // We only need 1 empty at the top for C2 code.
7951 if( stack_depth < 0 ) {
7952 if( fs->tag_for_st(7) != 3 ) {
7953 printf("FPR7 not empty\n");
7954 state->print();
7955 assert(false, "error");
7956 return false;
7957 }
7958 return true; // All other stack states do not matter
7959 }
7961 assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
7962 "bad FPU control word");
7964 // compute stack depth
7965 int i = 0;
7966 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++;
7967 int d = i;
7968 while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
7969 // verify findings
7970 if (i != FPU_State::number_of_registers) {
7971 // stack not contiguous
7972 printf("%s: stack not contiguous at ST%d\n", s, i);
7973 state->print();
7974 assert(false, "error");
7975 return false;
7976 }
7977 // check if computed stack depth corresponds to expected stack depth
7978 if (stack_depth < 0) {
7979 // expected stack depth is -stack_depth or less
7980 if (d > -stack_depth) {
7981 // too many elements on the stack
7982 printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
7983 state->print();
7984 assert(false, "error");
7985 return false;
7986 }
7987 } else {
7988 // expected stack depth is stack_depth
7989 if (d != stack_depth) {
7990 // wrong stack depth
7991 printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
7992 state->print();
7993 assert(false, "error");
7994 return false;
7995 }
7996 }
7997 // everything is cool
7998 return true;
7999 }
8002 void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
8003 if (!VerifyFPU) return;
8004 push_CPU_state();
8005 push(rsp); // pass CPU state
8006 ExternalAddress msg((address) s);
8007 // pass message string s
8008 pushptr(msg.addr());
8009 push(stack_depth); // pass stack depth
8010 call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
8011 addptr(rsp, 3 * wordSize); // discard arguments
8012 // check for error
8013 { Label L;
8014 testl(rax, rax);
8015 jcc(Assembler::notZero, L);
8016 int3(); // break if error condition
8017 bind(L);
8018 }
8019 pop_CPU_state();
8020 }
8022 void MacroAssembler::load_klass(Register dst, Register src) {
8023 #ifdef _LP64
8024 if (UseCompressedOops) {
8025 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
8026 decode_heap_oop_not_null(dst);
8027 } else
8028 #endif
8029 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
8030 }
8032 void MacroAssembler::load_prototype_header(Register dst, Register src) {
8033 #ifdef _LP64
8034 if (UseCompressedOops) {
8035 assert (Universe::heap() != NULL, "java heap should be initialized");
8036 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
8037 if (Universe::narrow_oop_shift() != 0) {
8038 assert(Address::times_8 == LogMinObjAlignmentInBytes &&
8039 Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
8040 movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
8041 } else {
8042 movq(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
8043 }
8044 } else
8045 #endif
8046 {
8047 movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
8048 movptr(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
8049 }
8050 }
8052 void MacroAssembler::store_klass(Register dst, Register src) {
8053 #ifdef _LP64
8054 if (UseCompressedOops) {
8055 encode_heap_oop_not_null(src);
8056 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
8057 } else
8058 #endif
8059 movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
8060 }
8062 #ifdef _LP64
8063 void MacroAssembler::store_klass_gap(Register dst, Register src) {
8064 if (UseCompressedOops) {
8065 // Store to klass gap in destination
8066 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
8067 }
8068 }
8070 void MacroAssembler::load_heap_oop(Register dst, Address src) {
8071 if (UseCompressedOops) {
8072 movl(dst, src);
8073 decode_heap_oop(dst);
8074 } else {
8075 movq(dst, src);
8076 }
8077 }
8079 void MacroAssembler::store_heap_oop(Address dst, Register src) {
8080 if (UseCompressedOops) {
8081 assert(!dst.uses(src), "not enough registers");
8082 encode_heap_oop(src);
8083 movl(dst, src);
8084 } else {
8085 movq(dst, src);
8086 }
8087 }
8089 // Algorithm must match oop.inline.hpp encode_heap_oop.
8090 void MacroAssembler::encode_heap_oop(Register r) {
8091 assert (UseCompressedOops, "should be compressed");
8092 assert (Universe::heap() != NULL, "java heap should be initialized");
8093 if (Universe::narrow_oop_base() == NULL) {
8094 verify_oop(r, "broken oop in encode_heap_oop");
8095 if (Universe::narrow_oop_shift() != 0) {
8096 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
8097 shrq(r, LogMinObjAlignmentInBytes);
8098 }
8099 return;
8100 }
8101 #ifdef ASSERT
8102 if (CheckCompressedOops) {
8103 Label ok;
8104 push(rscratch1); // cmpptr trashes rscratch1
8105 cmpptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
8106 jcc(Assembler::equal, ok);
8107 stop("MacroAssembler::encode_heap_oop: heap base corrupted?");
8108 bind(ok);
8109 pop(rscratch1);
8110 }
8111 #endif
8112 verify_oop(r, "broken oop in encode_heap_oop");
8113 testq(r, r);
8114 cmovq(Assembler::equal, r, r12_heapbase);
8115 subq(r, r12_heapbase);
8116 shrq(r, LogMinObjAlignmentInBytes);
8117 }
8119 void MacroAssembler::encode_heap_oop_not_null(Register r) {
8120 assert (UseCompressedOops, "should be compressed");
8121 assert (Universe::heap() != NULL, "java heap should be initialized");
8122 #ifdef ASSERT
8123 if (CheckCompressedOops) {
8124 Label ok;
8125 testq(r, r);
8126 jcc(Assembler::notEqual, ok);
8127 stop("null oop passed to encode_heap_oop_not_null");
8128 bind(ok);
8129 }
8130 #endif
8131 verify_oop(r, "broken oop in encode_heap_oop_not_null");
8132 if (Universe::narrow_oop_base() != NULL) {
8133 subq(r, r12_heapbase);
8134 }
8135 if (Universe::narrow_oop_shift() != 0) {
8136 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
8137 shrq(r, LogMinObjAlignmentInBytes);
8138 }
8139 }
8141 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
8142 assert (UseCompressedOops, "should be compressed");
8143 assert (Universe::heap() != NULL, "java heap should be initialized");
8144 #ifdef ASSERT
8145 if (CheckCompressedOops) {
8146 Label ok;
8147 testq(src, src);
8148 jcc(Assembler::notEqual, ok);
8149 stop("null oop passed to encode_heap_oop_not_null2");
8150 bind(ok);
8151 }
8152 #endif
8153 verify_oop(src, "broken oop in encode_heap_oop_not_null2");
8154 if (dst != src) {
8155 movq(dst, src);
8156 }
8157 if (Universe::narrow_oop_base() != NULL) {
8158 subq(dst, r12_heapbase);
8159 }
8160 if (Universe::narrow_oop_shift() != 0) {
8161 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
8162 shrq(dst, LogMinObjAlignmentInBytes);
8163 }
8164 }
8166 void MacroAssembler::decode_heap_oop(Register r) {
8167 assert (UseCompressedOops, "should be compressed");
8168 assert (Universe::heap() != NULL, "java heap should be initialized");
8169 if (Universe::narrow_oop_base() == NULL) {
8170 if (Universe::narrow_oop_shift() != 0) {
8171 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
8172 shlq(r, LogMinObjAlignmentInBytes);
8173 }
8174 verify_oop(r, "broken oop in decode_heap_oop");
8175 return;
8176 }
8177 #ifdef ASSERT
8178 if (CheckCompressedOops) {
8179 Label ok;
8180 push(rscratch1);
8181 cmpptr(r12_heapbase,
8182 ExternalAddress((address)Universe::narrow_oop_base_addr()));
8183 jcc(Assembler::equal, ok);
8184 stop("MacroAssembler::decode_heap_oop: heap base corrupted?");
8185 bind(ok);
8186 pop(rscratch1);
8187 }
8188 #endif
8190 Label done;
8191 shlq(r, LogMinObjAlignmentInBytes);
8192 jccb(Assembler::equal, done);
8193 addq(r, r12_heapbase);
8194 #if 0
8195 // alternate decoding probably a wash.
8196 testq(r, r);
8197 jccb(Assembler::equal, done);
8198 leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
8199 #endif
8200 bind(done);
8201 verify_oop(r, "broken oop in decode_heap_oop");
8202 }
8204 void MacroAssembler::decode_heap_oop_not_null(Register r) {
8205 assert (UseCompressedOops, "should only be used for compressed headers");
8206 assert (Universe::heap() != NULL, "java heap should be initialized");
8207 // Cannot assert, unverified entry point counts instructions (see .ad file)
8208 // vtableStubs also counts instructions in pd_code_size_limit.
8209 // Also do not verify_oop as this is called by verify_oop.
8210 if (Universe::narrow_oop_base() == NULL) {
8211 if (Universe::narrow_oop_shift() != 0) {
8212 assert (LogMinObjAlignmentInBytes == Universe::narrow_oop_shift(), "decode alg wrong");
8213 shlq(r, LogMinObjAlignmentInBytes);
8214 }
8215 } else {
8216 assert (Address::times_8 == LogMinObjAlignmentInBytes &&
8217 Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
8218 leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
8219 }
8220 }
8222 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
8223 assert (UseCompressedOops, "should only be used for compressed headers");
8224 assert (Universe::heap() != NULL, "java heap should be initialized");
8225 // Cannot assert, unverified entry point counts instructions (see .ad file)
8226 // vtableStubs also counts instructions in pd_code_size_limit.
8227 // Also do not verify_oop as this is called by verify_oop.
8228 if (Universe::narrow_oop_shift() != 0) {
8229 assert (Address::times_8 == LogMinObjAlignmentInBytes &&
8230 Address::times_8 == Universe::narrow_oop_shift(), "decode alg wrong");
8231 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
8232 } else if (dst != src) {
8233 movq(dst, src);
8234 }
8235 }
8237 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
8238 assert (UseCompressedOops, "should only be used for compressed headers");
8239 assert (Universe::heap() != NULL, "java heap should be initialized");
8240 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
8241 int oop_index = oop_recorder()->find_index(obj);
8242 RelocationHolder rspec = oop_Relocation::spec(oop_index);
8243 mov_narrow_oop(dst, oop_index, rspec);
8244 }
8246 void MacroAssembler::set_narrow_oop(Address dst, jobject obj) {
8247 assert (UseCompressedOops, "should only be used for compressed headers");
8248 assert (Universe::heap() != NULL, "java heap should be initialized");
8249 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
8250 int oop_index = oop_recorder()->find_index(obj);
8251 RelocationHolder rspec = oop_Relocation::spec(oop_index);
8252 mov_narrow_oop(dst, oop_index, rspec);
8253 }
8255 void MacroAssembler::cmp_narrow_oop(Register dst, jobject obj) {
8256 assert (UseCompressedOops, "should only be used for compressed headers");
8257 assert (Universe::heap() != NULL, "java heap should be initialized");
8258 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
8259 int oop_index = oop_recorder()->find_index(obj);
8260 RelocationHolder rspec = oop_Relocation::spec(oop_index);
8261 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
8262 }
8264 void MacroAssembler::cmp_narrow_oop(Address dst, jobject obj) {
8265 assert (UseCompressedOops, "should only be used for compressed headers");
8266 assert (Universe::heap() != NULL, "java heap should be initialized");
8267 assert (oop_recorder() != NULL, "this assembler needs an OopRecorder");
8268 int oop_index = oop_recorder()->find_index(obj);
8269 RelocationHolder rspec = oop_Relocation::spec(oop_index);
8270 Assembler::cmp_narrow_oop(dst, oop_index, rspec);
8271 }
8273 void MacroAssembler::reinit_heapbase() {
8274 if (UseCompressedOops) {
8275 movptr(r12_heapbase, ExternalAddress((address)Universe::narrow_oop_base_addr()));
8276 }
8277 }
8278 #endif // _LP64
8280 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
8281 switch (cond) {
8282 // Note some conditions are synonyms for others
8283 case Assembler::zero: return Assembler::notZero;
8284 case Assembler::notZero: return Assembler::zero;
8285 case Assembler::less: return Assembler::greaterEqual;
8286 case Assembler::lessEqual: return Assembler::greater;
8287 case Assembler::greater: return Assembler::lessEqual;
8288 case Assembler::greaterEqual: return Assembler::less;
8289 case Assembler::below: return Assembler::aboveEqual;
8290 case Assembler::belowEqual: return Assembler::above;
8291 case Assembler::above: return Assembler::belowEqual;
8292 case Assembler::aboveEqual: return Assembler::below;
8293 case Assembler::overflow: return Assembler::noOverflow;
8294 case Assembler::noOverflow: return Assembler::overflow;
8295 case Assembler::negative: return Assembler::positive;
8296 case Assembler::positive: return Assembler::negative;
8297 case Assembler::parity: return Assembler::noParity;
8298 case Assembler::noParity: return Assembler::parity;
8299 }
8300 ShouldNotReachHere(); return Assembler::overflow;
8301 }
8303 SkipIfEqual::SkipIfEqual(
8304 MacroAssembler* masm, const bool* flag_addr, bool value) {
8305 _masm = masm;
8306 _masm->cmp8(ExternalAddress((address)flag_addr), value);
8307 _masm->jcc(Assembler::equal, _label);
8308 }
8310 SkipIfEqual::~SkipIfEqual() {
8311 _masm->bind(_label);
8312 }