Wed, 28 May 2008 21:06:24 -0700
6696264: assert("narrow oop can never be zero") for GCBasher & ParNewGC
Summary: decouple set_klass() with zeroing the gap when compressed.
Reviewed-by: kvn, ysr, jrose
1 /*
2 * Copyright 2003-2007 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
20 * CA 95054 USA or visit www.sun.com if you need additional information or
21 * have any questions.
22 *
23 */
25 #include "incls/_precompiled.incl"
26 #include "incls/_assembler_x86_64.cpp.incl"
28 // Implementation of AddressLiteral
30 AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
31 _is_lval = false;
32 _target = target;
33 switch (rtype) {
34 case relocInfo::oop_type:
35 // Oops are a special case. Normally they would be their own section
36 // but in cases like icBuffer they are literals in the code stream that
37 // we don't have a section for. We use none so that we get a literal address
38 // which is always patchable.
39 break;
40 case relocInfo::external_word_type:
41 _rspec = external_word_Relocation::spec(target);
42 break;
43 case relocInfo::internal_word_type:
44 _rspec = internal_word_Relocation::spec(target);
45 break;
46 case relocInfo::opt_virtual_call_type:
47 _rspec = opt_virtual_call_Relocation::spec();
48 break;
49 case relocInfo::static_call_type:
50 _rspec = static_call_Relocation::spec();
51 break;
52 case relocInfo::runtime_call_type:
53 _rspec = runtime_call_Relocation::spec();
54 break;
55 case relocInfo::none:
56 break;
57 default:
58 ShouldNotReachHere();
59 break;
60 }
61 }
63 // Implementation of Address
65 Address Address::make_array(ArrayAddress adr) {
66 #ifdef _LP64
67 // Not implementable on 64bit machines
68 // Should have been handled higher up the call chain.
69 ShouldNotReachHere();
70 return Address();
71 #else
72 AddressLiteral base = adr.base();
73 Address index = adr.index();
74 assert(index._disp == 0, "must not have disp"); // maybe it can?
75 Address array(index._base, index._index, index._scale, (intptr_t) base.target());
76 array._rspec = base._rspec;
77 return array;
78 #endif // _LP64
79 }
81 // exceedingly dangerous constructor
82 Address::Address(int disp, address loc, relocInfo::relocType rtype) {
83 _base = noreg;
84 _index = noreg;
85 _scale = no_scale;
86 _disp = disp;
87 switch (rtype) {
88 case relocInfo::external_word_type:
89 _rspec = external_word_Relocation::spec(loc);
90 break;
91 case relocInfo::internal_word_type:
92 _rspec = internal_word_Relocation::spec(loc);
93 break;
94 case relocInfo::runtime_call_type:
95 // HMM
96 _rspec = runtime_call_Relocation::spec();
97 break;
98 case relocInfo::none:
99 break;
100 default:
101 ShouldNotReachHere();
102 }
103 }
105 // Convert the raw encoding form into the form expected by the constructor for
106 // Address. An index of 4 (rsp) corresponds to having no index, so convert
107 // that to noreg for the Address constructor.
108 Address Address::make_raw(int base, int index, int scale, int disp) {
109 bool valid_index = index != rsp->encoding();
110 if (valid_index) {
111 Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
112 return madr;
113 } else {
114 Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
115 return madr;
116 }
117 }
120 // Implementation of Assembler
121 int AbstractAssembler::code_fill_byte() {
122 return (u_char)'\xF4'; // hlt
123 }
125 // This should only be used by 64bit instructions that can use rip-relative
126 // it cannot be used by instructions that want an immediate value.
128 bool Assembler::reachable(AddressLiteral adr) {
129 int64_t disp;
131 // None will force a 64bit literal to the code stream. Likely a placeholder
132 // for something that will be patched later and we need to certain it will
133 // always be reachable.
134 if (adr.reloc() == relocInfo::none) {
135 return false;
136 }
137 if (adr.reloc() == relocInfo::internal_word_type) {
138 // This should be rip relative and easily reachable.
139 return true;
140 }
141 if (adr.reloc() != relocInfo::external_word_type &&
142 adr.reloc() != relocInfo::runtime_call_type ) {
143 return false;
144 }
146 // Stress the correction code
147 if (ForceUnreachable) {
148 // Must be runtimecall reloc, see if it is in the codecache
149 // Flipping stuff in the codecache to be unreachable causes issues
150 // with things like inline caches where the additional instructions
151 // are not handled.
152 if (CodeCache::find_blob(adr._target) == NULL) {
153 return false;
154 }
155 }
156 // For external_word_type/runtime_call_type if it is reachable from where we
157 // are now (possibly a temp buffer) and where we might end up
158 // anywhere in the codeCache then we are always reachable.
159 // This would have to change if we ever save/restore shared code
160 // to be more pessimistic.
162 disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
163 if (!is_simm32(disp)) return false;
164 disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
165 if (!is_simm32(disp)) return false;
167 disp = (int64_t)adr._target - ((int64_t)_code_pos + sizeof(int));
169 // Because rip relative is a disp + address_of_next_instruction and we
170 // don't know the value of address_of_next_instruction we apply a fudge factor
171 // to make sure we will be ok no matter the size of the instruction we get placed into.
172 // We don't have to fudge the checks above here because they are already worst case.
174 // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
175 // + 4 because better safe than sorry.
176 const int fudge = 12 + 4;
177 if (disp < 0) {
178 disp -= fudge;
179 } else {
180 disp += fudge;
181 }
182 return is_simm32(disp);
183 }
186 // make this go away eventually
187 void Assembler::emit_data(jint data,
188 relocInfo::relocType rtype,
189 int format) {
190 if (rtype == relocInfo::none) {
191 emit_long(data);
192 } else {
193 emit_data(data, Relocation::spec_simple(rtype), format);
194 }
195 }
197 void Assembler::emit_data(jint data,
198 RelocationHolder const& rspec,
199 int format) {
200 assert(imm64_operand == 0, "default format must be imm64 in this file");
201 assert(imm64_operand != format, "must not be imm64");
202 assert(inst_mark() != NULL, "must be inside InstructionMark");
203 if (rspec.type() != relocInfo::none) {
204 #ifdef ASSERT
205 check_relocation(rspec, format);
206 #endif
207 // Do not use AbstractAssembler::relocate, which is not intended for
208 // embedded words. Instead, relocate to the enclosing instruction.
210 // hack. call32 is too wide for mask so use disp32
211 if (format == call32_operand)
212 code_section()->relocate(inst_mark(), rspec, disp32_operand);
213 else
214 code_section()->relocate(inst_mark(), rspec, format);
215 }
216 emit_long(data);
217 }
219 void Assembler::emit_data64(jlong data,
220 relocInfo::relocType rtype,
221 int format) {
222 if (rtype == relocInfo::none) {
223 emit_long64(data);
224 } else {
225 emit_data64(data, Relocation::spec_simple(rtype), format);
226 }
227 }
229 void Assembler::emit_data64(jlong data,
230 RelocationHolder const& rspec,
231 int format) {
232 assert(imm64_operand == 0, "default format must be imm64 in this file");
233 assert(imm64_operand == format, "must be imm64");
234 assert(inst_mark() != NULL, "must be inside InstructionMark");
235 // Do not use AbstractAssembler::relocate, which is not intended for
236 // embedded words. Instead, relocate to the enclosing instruction.
237 code_section()->relocate(inst_mark(), rspec, format);
238 #ifdef ASSERT
239 check_relocation(rspec, format);
240 #endif
241 emit_long64(data);
242 }
244 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
245 assert(isByte(op1) && isByte(op2), "wrong opcode");
246 assert(isByte(imm8), "not a byte");
247 assert((op1 & 0x01) == 0, "should be 8bit operation");
248 int dstenc = dst->encoding();
249 if (dstenc >= 8) {
250 dstenc -= 8;
251 }
252 emit_byte(op1);
253 emit_byte(op2 | dstenc);
254 emit_byte(imm8);
255 }
257 void Assembler::emit_arith(int op1, int op2, Register dst, int imm32) {
258 assert(isByte(op1) && isByte(op2), "wrong opcode");
259 assert((op1 & 0x01) == 1, "should be 32bit operation");
260 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
261 int dstenc = dst->encoding();
262 if (dstenc >= 8) {
263 dstenc -= 8;
264 }
265 if (is8bit(imm32)) {
266 emit_byte(op1 | 0x02); // set sign bit
267 emit_byte(op2 | dstenc);
268 emit_byte(imm32 & 0xFF);
269 } else {
270 emit_byte(op1);
271 emit_byte(op2 | dstenc);
272 emit_long(imm32);
273 }
274 }
276 // immediate-to-memory forms
277 void Assembler::emit_arith_operand(int op1,
278 Register rm, Address adr,
279 int imm32) {
280 assert((op1 & 0x01) == 1, "should be 32bit operation");
281 assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
282 if (is8bit(imm32)) {
283 emit_byte(op1 | 0x02); // set sign bit
284 emit_operand(rm, adr, 1);
285 emit_byte(imm32 & 0xFF);
286 } else {
287 emit_byte(op1);
288 emit_operand(rm, adr, 4);
289 emit_long(imm32);
290 }
291 }
294 void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
295 assert(isByte(op1) && isByte(op2), "wrong opcode");
296 int dstenc = dst->encoding();
297 int srcenc = src->encoding();
298 if (dstenc >= 8) {
299 dstenc -= 8;
300 }
301 if (srcenc >= 8) {
302 srcenc -= 8;
303 }
304 emit_byte(op1);
305 emit_byte(op2 | dstenc << 3 | srcenc);
306 }
308 void Assembler::emit_operand(Register reg, Register base, Register index,
309 Address::ScaleFactor scale, int disp,
310 RelocationHolder const& rspec,
311 int rip_relative_correction) {
312 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
313 int regenc = reg->encoding();
314 if (regenc >= 8) {
315 regenc -= 8;
316 }
317 if (base->is_valid()) {
318 if (index->is_valid()) {
319 assert(scale != Address::no_scale, "inconsistent address");
320 int indexenc = index->encoding();
321 if (indexenc >= 8) {
322 indexenc -= 8;
323 }
324 int baseenc = base->encoding();
325 if (baseenc >= 8) {
326 baseenc -= 8;
327 }
328 // [base + index*scale + disp]
329 if (disp == 0 && rtype == relocInfo::none &&
330 base != rbp && base != r13) {
331 // [base + index*scale]
332 // [00 reg 100][ss index base]
333 assert(index != rsp, "illegal addressing mode");
334 emit_byte(0x04 | regenc << 3);
335 emit_byte(scale << 6 | indexenc << 3 | baseenc);
336 } else if (is8bit(disp) && rtype == relocInfo::none) {
337 // [base + index*scale + imm8]
338 // [01 reg 100][ss index base] imm8
339 assert(index != rsp, "illegal addressing mode");
340 emit_byte(0x44 | regenc << 3);
341 emit_byte(scale << 6 | indexenc << 3 | baseenc);
342 emit_byte(disp & 0xFF);
343 } else {
344 // [base + index*scale + disp32]
345 // [10 reg 100][ss index base] disp32
346 assert(index != rsp, "illegal addressing mode");
347 emit_byte(0x84 | regenc << 3);
348 emit_byte(scale << 6 | indexenc << 3 | baseenc);
349 emit_data(disp, rspec, disp32_operand);
350 }
351 } else if (base == rsp || base == r12) {
352 // [rsp + disp]
353 if (disp == 0 && rtype == relocInfo::none) {
354 // [rsp]
355 // [00 reg 100][00 100 100]
356 emit_byte(0x04 | regenc << 3);
357 emit_byte(0x24);
358 } else if (is8bit(disp) && rtype == relocInfo::none) {
359 // [rsp + imm8]
360 // [01 reg 100][00 100 100] disp8
361 emit_byte(0x44 | regenc << 3);
362 emit_byte(0x24);
363 emit_byte(disp & 0xFF);
364 } else {
365 // [rsp + imm32]
366 // [10 reg 100][00 100 100] disp32
367 emit_byte(0x84 | regenc << 3);
368 emit_byte(0x24);
369 emit_data(disp, rspec, disp32_operand);
370 }
371 } else {
372 // [base + disp]
373 assert(base != rsp && base != r12, "illegal addressing mode");
374 int baseenc = base->encoding();
375 if (baseenc >= 8) {
376 baseenc -= 8;
377 }
378 if (disp == 0 && rtype == relocInfo::none &&
379 base != rbp && base != r13) {
380 // [base]
381 // [00 reg base]
382 emit_byte(0x00 | regenc << 3 | baseenc);
383 } else if (is8bit(disp) && rtype == relocInfo::none) {
384 // [base + disp8]
385 // [01 reg base] disp8
386 emit_byte(0x40 | regenc << 3 | baseenc);
387 emit_byte(disp & 0xFF);
388 } else {
389 // [base + disp32]
390 // [10 reg base] disp32
391 emit_byte(0x80 | regenc << 3 | baseenc);
392 emit_data(disp, rspec, disp32_operand);
393 }
394 }
395 } else {
396 if (index->is_valid()) {
397 assert(scale != Address::no_scale, "inconsistent address");
398 int indexenc = index->encoding();
399 if (indexenc >= 8) {
400 indexenc -= 8;
401 }
402 // [index*scale + disp]
403 // [00 reg 100][ss index 101] disp32
404 assert(index != rsp, "illegal addressing mode");
405 emit_byte(0x04 | regenc << 3);
406 emit_byte(scale << 6 | indexenc << 3 | 0x05);
407 emit_data(disp, rspec, disp32_operand);
408 #ifdef _LP64
409 } else if (rtype != relocInfo::none ) {
410 // [disp] RIP-RELATIVE
411 // [00 000 101] disp32
413 emit_byte(0x05 | regenc << 3);
414 // Note that the RIP-rel. correction applies to the generated
415 // disp field, but _not_ to the target address in the rspec.
417 // disp was created by converting the target address minus the pc
418 // at the start of the instruction. That needs more correction here.
419 // intptr_t disp = target - next_ip;
420 assert(inst_mark() != NULL, "must be inside InstructionMark");
421 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
422 int64_t adjusted = (int64_t) disp - (next_ip - inst_mark());
423 assert(is_simm32(adjusted),
424 "must be 32bit offset (RIP relative address)");
425 emit_data((int) adjusted, rspec, disp32_operand);
427 #endif // _LP64
428 } else {
429 // [disp] ABSOLUTE
430 // [00 reg 100][00 100 101] disp32
431 emit_byte(0x04 | regenc << 3);
432 emit_byte(0x25);
433 emit_data(disp, rspec, disp32_operand);
434 }
435 }
436 }
438 void Assembler::emit_operand(XMMRegister reg, Register base, Register index,
439 Address::ScaleFactor scale, int disp,
440 RelocationHolder const& rspec,
441 int rip_relative_correction) {
442 relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
443 int regenc = reg->encoding();
444 if (regenc >= 8) {
445 regenc -= 8;
446 }
447 if (base->is_valid()) {
448 if (index->is_valid()) {
449 assert(scale != Address::no_scale, "inconsistent address");
450 int indexenc = index->encoding();
451 if (indexenc >= 8) {
452 indexenc -= 8;
453 }
454 int baseenc = base->encoding();
455 if (baseenc >= 8) {
456 baseenc -= 8;
457 }
458 // [base + index*scale + disp]
459 if (disp == 0 && rtype == relocInfo::none &&
460 base != rbp && base != r13) {
461 // [base + index*scale]
462 // [00 reg 100][ss index base]
463 assert(index != rsp, "illegal addressing mode");
464 emit_byte(0x04 | regenc << 3);
465 emit_byte(scale << 6 | indexenc << 3 | baseenc);
466 } else if (is8bit(disp) && rtype == relocInfo::none) {
467 // [base + index*scale + disp8]
468 // [01 reg 100][ss index base] disp8
469 assert(index != rsp, "illegal addressing mode");
470 emit_byte(0x44 | regenc << 3);
471 emit_byte(scale << 6 | indexenc << 3 | baseenc);
472 emit_byte(disp & 0xFF);
473 } else {
474 // [base + index*scale + disp32]
475 // [10 reg 100][ss index base] disp32
476 assert(index != rsp, "illegal addressing mode");
477 emit_byte(0x84 | regenc << 3);
478 emit_byte(scale << 6 | indexenc << 3 | baseenc);
479 emit_data(disp, rspec, disp32_operand);
480 }
481 } else if (base == rsp || base == r12) {
482 // [rsp + disp]
483 if (disp == 0 && rtype == relocInfo::none) {
484 // [rsp]
485 // [00 reg 100][00 100 100]
486 emit_byte(0x04 | regenc << 3);
487 emit_byte(0x24);
488 } else if (is8bit(disp) && rtype == relocInfo::none) {
489 // [rsp + imm8]
490 // [01 reg 100][00 100 100] disp8
491 emit_byte(0x44 | regenc << 3);
492 emit_byte(0x24);
493 emit_byte(disp & 0xFF);
494 } else {
495 // [rsp + imm32]
496 // [10 reg 100][00 100 100] disp32
497 emit_byte(0x84 | regenc << 3);
498 emit_byte(0x24);
499 emit_data(disp, rspec, disp32_operand);
500 }
501 } else {
502 // [base + disp]
503 assert(base != rsp && base != r12, "illegal addressing mode");
504 int baseenc = base->encoding();
505 if (baseenc >= 8) {
506 baseenc -= 8;
507 }
508 if (disp == 0 && rtype == relocInfo::none &&
509 base != rbp && base != r13) {
510 // [base]
511 // [00 reg base]
512 emit_byte(0x00 | regenc << 3 | baseenc);
513 } else if (is8bit(disp) && rtype == relocInfo::none) {
514 // [base + imm8]
515 // [01 reg base] disp8
516 emit_byte(0x40 | regenc << 3 | baseenc);
517 emit_byte(disp & 0xFF);
518 } else {
519 // [base + imm32]
520 // [10 reg base] disp32
521 emit_byte(0x80 | regenc << 3 | baseenc);
522 emit_data(disp, rspec, disp32_operand);
523 }
524 }
525 } else {
526 if (index->is_valid()) {
527 assert(scale != Address::no_scale, "inconsistent address");
528 int indexenc = index->encoding();
529 if (indexenc >= 8) {
530 indexenc -= 8;
531 }
532 // [index*scale + disp]
533 // [00 reg 100][ss index 101] disp32
534 assert(index != rsp, "illegal addressing mode");
535 emit_byte(0x04 | regenc << 3);
536 emit_byte(scale << 6 | indexenc << 3 | 0x05);
537 emit_data(disp, rspec, disp32_operand);
538 #ifdef _LP64
539 } else if ( rtype != relocInfo::none ) {
540 // [disp] RIP-RELATIVE
541 // [00 reg 101] disp32
542 emit_byte(0x05 | regenc << 3);
543 // Note that the RIP-rel. correction applies to the generated
544 // disp field, but _not_ to the target address in the rspec.
546 // disp was created by converting the target address minus the pc
547 // at the start of the instruction. That needs more correction here.
548 // intptr_t disp = target - next_ip;
550 assert(inst_mark() != NULL, "must be inside InstructionMark");
551 address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
553 int64_t adjusted = (int64_t) disp - (next_ip - inst_mark());
554 assert(is_simm32(adjusted),
555 "must be 32bit offset (RIP relative address)");
556 emit_data((int) adjusted, rspec, disp32_operand);
557 #endif // _LP64
558 } else {
559 // [disp] ABSOLUTE
560 // [00 reg 100][00 100 101] disp32
561 emit_byte(0x04 | regenc << 3);
562 emit_byte(0x25);
563 emit_data(disp, rspec, disp32_operand);
564 }
565 }
566 }
568 // Secret local extension to Assembler::WhichOperand:
569 #define end_pc_operand (_WhichOperand_limit)
571 address Assembler::locate_operand(address inst, WhichOperand which) {
572 // Decode the given instruction, and return the address of
573 // an embedded 32-bit operand word.
575 // If "which" is disp32_operand, selects the displacement portion
576 // of an effective address specifier.
577 // If "which" is imm64_operand, selects the trailing immediate constant.
578 // If "which" is call32_operand, selects the displacement of a call or jump.
579 // Caller is responsible for ensuring that there is such an operand,
580 // and that it is 32/64 bits wide.
582 // If "which" is end_pc_operand, find the end of the instruction.
584 address ip = inst;
585 bool is_64bit = false;
587 debug_only(bool has_disp32 = false);
588 int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
590 again_after_prefix:
591 switch (0xFF & *ip++) {
593 // These convenience macros generate groups of "case" labels for the switch.
594 #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
595 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
596 case (x)+4: case (x)+5: case (x)+6: case (x)+7
597 #define REP16(x) REP8((x)+0): \
598 case REP8((x)+8)
600 case CS_segment:
601 case SS_segment:
602 case DS_segment:
603 case ES_segment:
604 case FS_segment:
605 case GS_segment:
606 assert(0, "shouldn't have that prefix");
607 assert(ip == inst + 1 || ip == inst + 2, "only two prefixes allowed");
608 goto again_after_prefix;
610 case 0x67:
611 case REX:
612 case REX_B:
613 case REX_X:
614 case REX_XB:
615 case REX_R:
616 case REX_RB:
617 case REX_RX:
618 case REX_RXB:
619 // assert(ip == inst + 1, "only one prefix allowed");
620 goto again_after_prefix;
622 case REX_W:
623 case REX_WB:
624 case REX_WX:
625 case REX_WXB:
626 case REX_WR:
627 case REX_WRB:
628 case REX_WRX:
629 case REX_WRXB:
630 is_64bit = true;
631 // assert(ip == inst + 1, "only one prefix allowed");
632 goto again_after_prefix;
634 case 0xFF: // pushq a; decl a; incl a; call a; jmp a
635 case 0x88: // movb a, r
636 case 0x89: // movl a, r
637 case 0x8A: // movb r, a
638 case 0x8B: // movl r, a
639 case 0x8F: // popl a
640 debug_only(has_disp32 = true;)
641 break;
643 case 0x68: // pushq #32
644 if (which == end_pc_operand) {
645 return ip + 4;
646 }
647 assert(0, "pushq has no disp32 or imm64");
648 ShouldNotReachHere();
650 case 0x66: // movw ... (size prefix)
651 again_after_size_prefix2:
652 switch (0xFF & *ip++) {
653 case REX:
654 case REX_B:
655 case REX_X:
656 case REX_XB:
657 case REX_R:
658 case REX_RB:
659 case REX_RX:
660 case REX_RXB:
661 case REX_W:
662 case REX_WB:
663 case REX_WX:
664 case REX_WXB:
665 case REX_WR:
666 case REX_WRB:
667 case REX_WRX:
668 case REX_WRXB:
669 goto again_after_size_prefix2;
670 case 0x8B: // movw r, a
671 case 0x89: // movw a, r
672 break;
673 case 0xC7: // movw a, #16
674 tail_size = 2; // the imm16
675 break;
676 case 0x0F: // several SSE/SSE2 variants
677 ip--; // reparse the 0x0F
678 goto again_after_prefix;
679 default:
680 ShouldNotReachHere();
681 }
682 break;
684 case REP8(0xB8): // movl/q r, #32/#64(oop?)
685 if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4);
686 assert((which == call32_operand || which == imm64_operand) && is_64bit ||
687 which == narrow_oop_operand && !is_64bit, "");
688 return ip;
690 case 0x69: // imul r, a, #32
691 case 0xC7: // movl a, #32(oop?)
692 tail_size = 4;
693 debug_only(has_disp32 = true); // has both kinds of operands!
694 break;
696 case 0x0F: // movx..., etc.
697 switch (0xFF & *ip++) {
698 case 0x12: // movlps
699 case 0x28: // movaps
700 case 0x2E: // ucomiss
701 case 0x2F: // comiss
702 case 0x54: // andps
703 case 0x57: // xorps
704 case 0x6E: // movd
705 case 0x7E: // movd
706 case 0xAE: // ldmxcsr a
707 debug_only(has_disp32 = true); // has both kinds of operands!
708 break;
709 case 0xAD: // shrd r, a, %cl
710 case 0xAF: // imul r, a
711 case 0xBE: // movsbl r, a
712 case 0xBF: // movswl r, a
713 case 0xB6: // movzbl r, a
714 case 0xB7: // movzwl r, a
715 case REP16(0x40): // cmovl cc, r, a
716 case 0xB0: // cmpxchgb
717 case 0xB1: // cmpxchg
718 case 0xC1: // xaddl
719 case 0xC7: // cmpxchg8
720 case REP16(0x90): // setcc a
721 debug_only(has_disp32 = true);
722 // fall out of the switch to decode the address
723 break;
724 case 0xAC: // shrd r, a, #8
725 debug_only(has_disp32 = true);
726 tail_size = 1; // the imm8
727 break;
728 case REP16(0x80): // jcc rdisp32
729 if (which == end_pc_operand) return ip + 4;
730 assert(which == call32_operand, "jcc has no disp32 or imm64");
731 return ip;
732 default:
733 ShouldNotReachHere();
734 }
735 break;
737 case 0x81: // addl a, #32; addl r, #32
738 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
739 tail_size = 4;
740 debug_only(has_disp32 = true); // has both kinds of operands!
741 break;
743 case 0x83: // addl a, #8; addl r, #8
744 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
745 debug_only(has_disp32 = true); // has both kinds of operands!
746 tail_size = 1;
747 break;
749 case 0x9B:
750 switch (0xFF & *ip++) {
751 case 0xD9: // fnstcw a
752 debug_only(has_disp32 = true);
753 break;
754 default:
755 ShouldNotReachHere();
756 }
757 break;
759 case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
760 case REP4(0x10): // adc...
761 case REP4(0x20): // and...
762 case REP4(0x30): // xor...
763 case REP4(0x08): // or...
764 case REP4(0x18): // sbb...
765 case REP4(0x28): // sub...
766 case 0xF7: // mull a
767 case 0x87: // xchg r, a
768 debug_only(has_disp32 = true);
769 break;
770 case REP4(0x38): // cmp...
771 case 0x8D: // lea r, a
772 case 0x85: // test r, a
773 debug_only(has_disp32 = true); // has both kinds of operands!
774 break;
776 case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
777 case 0xC6: // movb a, #8
778 case 0x80: // cmpb a, #8
779 case 0x6B: // imul r, a, #8
780 debug_only(has_disp32 = true); // has both kinds of operands!
781 tail_size = 1; // the imm8
782 break;
784 case 0xE8: // call rdisp32
785 case 0xE9: // jmp rdisp32
786 if (which == end_pc_operand) return ip + 4;
787 assert(which == call32_operand, "call has no disp32 or imm32");
788 return ip;
790 case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
791 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
792 case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
793 case 0xDD: // fld_d a; fst_d a; fstp_d a
794 case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
795 case 0xDF: // fild_d a; fistp_d a
796 case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
797 case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
798 case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
799 debug_only(has_disp32 = true);
800 break;
802 case 0xF3: // For SSE
803 case 0xF2: // For SSE2
804 switch (0xFF & *ip++) {
805 case REX:
806 case REX_B:
807 case REX_X:
808 case REX_XB:
809 case REX_R:
810 case REX_RB:
811 case REX_RX:
812 case REX_RXB:
813 case REX_W:
814 case REX_WB:
815 case REX_WX:
816 case REX_WXB:
817 case REX_WR:
818 case REX_WRB:
819 case REX_WRX:
820 case REX_WRXB:
821 ip++;
822 default:
823 ip++;
824 }
825 debug_only(has_disp32 = true); // has both kinds of operands!
826 break;
828 default:
829 ShouldNotReachHere();
831 #undef REP8
832 #undef REP16
833 }
835 assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
836 assert(which != imm64_operand, "instruction is not a movq reg, imm64");
837 assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");
839 // parse the output of emit_operand
840 int op2 = 0xFF & *ip++;
841 int base = op2 & 0x07;
842 int op3 = -1;
843 const int b100 = 4;
844 const int b101 = 5;
845 if (base == b100 && (op2 >> 6) != 3) {
846 op3 = 0xFF & *ip++;
847 base = op3 & 0x07; // refetch the base
848 }
849 // now ip points at the disp (if any)
851 switch (op2 >> 6) {
852 case 0:
853 // [00 reg 100][ss index base]
854 // [00 reg 100][00 100 esp]
855 // [00 reg base]
856 // [00 reg 100][ss index 101][disp32]
857 // [00 reg 101] [disp32]
859 if (base == b101) {
860 if (which == disp32_operand)
861 return ip; // caller wants the disp32
862 ip += 4; // skip the disp32
863 }
864 break;
866 case 1:
867 // [01 reg 100][ss index base][disp8]
868 // [01 reg 100][00 100 esp][disp8]
869 // [01 reg base] [disp8]
870 ip += 1; // skip the disp8
871 break;
873 case 2:
874 // [10 reg 100][ss index base][disp32]
875 // [10 reg 100][00 100 esp][disp32]
876 // [10 reg base] [disp32]
877 if (which == disp32_operand)
878 return ip; // caller wants the disp32
879 ip += 4; // skip the disp32
880 break;
882 case 3:
883 // [11 reg base] (not a memory addressing mode)
884 break;
885 }
887 if (which == end_pc_operand) {
888 return ip + tail_size;
889 }
891 assert(0, "fix locate_operand");
892 return ip;
893 }
895 address Assembler::locate_next_instruction(address inst) {
896 // Secretly share code with locate_operand:
897 return locate_operand(inst, end_pc_operand);
898 }
900 #ifdef ASSERT
901 void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
902 address inst = inst_mark();
903 assert(inst != NULL && inst < pc(),
904 "must point to beginning of instruction");
905 address opnd;
907 Relocation* r = rspec.reloc();
908 if (r->type() == relocInfo::none) {
909 return;
910 } else if (r->is_call() || format == call32_operand) {
911 opnd = locate_operand(inst, call32_operand);
912 } else if (r->is_data()) {
913 assert(format == imm64_operand || format == disp32_operand ||
914 format == narrow_oop_operand, "format ok");
915 opnd = locate_operand(inst, (WhichOperand) format);
916 } else {
917 assert(format == 0, "cannot specify a format");
918 return;
919 }
920 assert(opnd == pc(), "must put operand where relocs can find it");
921 }
922 #endif
924 int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
925 if (reg_enc >= 8) {
926 prefix(REX_B);
927 reg_enc -= 8;
928 } else if (byteinst && reg_enc >= 4) {
929 prefix(REX);
930 }
931 return reg_enc;
932 }
934 int Assembler::prefixq_and_encode(int reg_enc) {
935 if (reg_enc < 8) {
936 prefix(REX_W);
937 } else {
938 prefix(REX_WB);
939 reg_enc -= 8;
940 }
941 return reg_enc;
942 }
944 int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) {
945 if (dst_enc < 8) {
946 if (src_enc >= 8) {
947 prefix(REX_B);
948 src_enc -= 8;
949 } else if (byteinst && src_enc >= 4) {
950 prefix(REX);
951 }
952 } else {
953 if (src_enc < 8) {
954 prefix(REX_R);
955 } else {
956 prefix(REX_RB);
957 src_enc -= 8;
958 }
959 dst_enc -= 8;
960 }
961 return dst_enc << 3 | src_enc;
962 }
964 int Assembler::prefixq_and_encode(int dst_enc, int src_enc) {
965 if (dst_enc < 8) {
966 if (src_enc < 8) {
967 prefix(REX_W);
968 } else {
969 prefix(REX_WB);
970 src_enc -= 8;
971 }
972 } else {
973 if (src_enc < 8) {
974 prefix(REX_WR);
975 } else {
976 prefix(REX_WRB);
977 src_enc -= 8;
978 }
979 dst_enc -= 8;
980 }
981 return dst_enc << 3 | src_enc;
982 }
984 void Assembler::prefix(Register reg) {
985 if (reg->encoding() >= 8) {
986 prefix(REX_B);
987 }
988 }
990 void Assembler::prefix(Address adr) {
991 if (adr.base_needs_rex()) {
992 if (adr.index_needs_rex()) {
993 prefix(REX_XB);
994 } else {
995 prefix(REX_B);
996 }
997 } else {
998 if (adr.index_needs_rex()) {
999 prefix(REX_X);
1000 }
1001 }
1002 }
1004 void Assembler::prefixq(Address adr) {
1005 if (adr.base_needs_rex()) {
1006 if (adr.index_needs_rex()) {
1007 prefix(REX_WXB);
1008 } else {
1009 prefix(REX_WB);
1010 }
1011 } else {
1012 if (adr.index_needs_rex()) {
1013 prefix(REX_WX);
1014 } else {
1015 prefix(REX_W);
1016 }
1017 }
1018 }
1021 void Assembler::prefix(Address adr, Register reg, bool byteinst) {
1022 if (reg->encoding() < 8) {
1023 if (adr.base_needs_rex()) {
1024 if (adr.index_needs_rex()) {
1025 prefix(REX_XB);
1026 } else {
1027 prefix(REX_B);
1028 }
1029 } else {
1030 if (adr.index_needs_rex()) {
1031 prefix(REX_X);
1032 } else if (reg->encoding() >= 4 ) {
1033 prefix(REX);
1034 }
1035 }
1036 } else {
1037 if (adr.base_needs_rex()) {
1038 if (adr.index_needs_rex()) {
1039 prefix(REX_RXB);
1040 } else {
1041 prefix(REX_RB);
1042 }
1043 } else {
1044 if (adr.index_needs_rex()) {
1045 prefix(REX_RX);
1046 } else {
1047 prefix(REX_R);
1048 }
1049 }
1050 }
1051 }
1053 void Assembler::prefixq(Address adr, Register src) {
1054 if (src->encoding() < 8) {
1055 if (adr.base_needs_rex()) {
1056 if (adr.index_needs_rex()) {
1057 prefix(REX_WXB);
1058 } else {
1059 prefix(REX_WB);
1060 }
1061 } else {
1062 if (adr.index_needs_rex()) {
1063 prefix(REX_WX);
1064 } else {
1065 prefix(REX_W);
1066 }
1067 }
1068 } else {
1069 if (adr.base_needs_rex()) {
1070 if (adr.index_needs_rex()) {
1071 prefix(REX_WRXB);
1072 } else {
1073 prefix(REX_WRB);
1074 }
1075 } else {
1076 if (adr.index_needs_rex()) {
1077 prefix(REX_WRX);
1078 } else {
1079 prefix(REX_WR);
1080 }
1081 }
1082 }
1083 }
1085 void Assembler::prefix(Address adr, XMMRegister reg) {
1086 if (reg->encoding() < 8) {
1087 if (adr.base_needs_rex()) {
1088 if (adr.index_needs_rex()) {
1089 prefix(REX_XB);
1090 } else {
1091 prefix(REX_B);
1092 }
1093 } else {
1094 if (adr.index_needs_rex()) {
1095 prefix(REX_X);
1096 }
1097 }
1098 } else {
1099 if (adr.base_needs_rex()) {
1100 if (adr.index_needs_rex()) {
1101 prefix(REX_RXB);
1102 } else {
1103 prefix(REX_RB);
1104 }
1105 } else {
1106 if (adr.index_needs_rex()) {
1107 prefix(REX_RX);
1108 } else {
1109 prefix(REX_R);
1110 }
1111 }
1112 }
1113 }
1115 void Assembler::emit_operand(Register reg, Address adr,
1116 int rip_relative_correction) {
1117 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
1118 adr._rspec,
1119 rip_relative_correction);
1120 }
1122 void Assembler::emit_operand(XMMRegister reg, Address adr,
1123 int rip_relative_correction) {
1124 emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
1125 adr._rspec,
1126 rip_relative_correction);
1127 }
1129 void Assembler::emit_farith(int b1, int b2, int i) {
1130 assert(isByte(b1) && isByte(b2), "wrong opcode");
1131 assert(0 <= i && i < 8, "illegal stack offset");
1132 emit_byte(b1);
1133 emit_byte(b2 + i);
1134 }
1136 // pushad is invalid, use this instead.
1137 // NOTE: Kills flags!!
1138 void Assembler::pushaq() {
1139 // we have to store original rsp. ABI says that 128 bytes
1140 // below rsp are local scratch.
1141 movq(Address(rsp, -5 * wordSize), rsp);
1143 subq(rsp, 16 * wordSize);
1145 movq(Address(rsp, 15 * wordSize), rax);
1146 movq(Address(rsp, 14 * wordSize), rcx);
1147 movq(Address(rsp, 13 * wordSize), rdx);
1148 movq(Address(rsp, 12 * wordSize), rbx);
1149 // skip rsp
1150 movq(Address(rsp, 10 * wordSize), rbp);
1151 movq(Address(rsp, 9 * wordSize), rsi);
1152 movq(Address(rsp, 8 * wordSize), rdi);
1153 movq(Address(rsp, 7 * wordSize), r8);
1154 movq(Address(rsp, 6 * wordSize), r9);
1155 movq(Address(rsp, 5 * wordSize), r10);
1156 movq(Address(rsp, 4 * wordSize), r11);
1157 movq(Address(rsp, 3 * wordSize), r12);
1158 movq(Address(rsp, 2 * wordSize), r13);
1159 movq(Address(rsp, wordSize), r14);
1160 movq(Address(rsp, 0), r15);
1161 }
1163 // popad is invalid, use this instead
1164 // NOTE: Kills flags!!
1165 void Assembler::popaq() {
1166 movq(r15, Address(rsp, 0));
1167 movq(r14, Address(rsp, wordSize));
1168 movq(r13, Address(rsp, 2 * wordSize));
1169 movq(r12, Address(rsp, 3 * wordSize));
1170 movq(r11, Address(rsp, 4 * wordSize));
1171 movq(r10, Address(rsp, 5 * wordSize));
1172 movq(r9, Address(rsp, 6 * wordSize));
1173 movq(r8, Address(rsp, 7 * wordSize));
1174 movq(rdi, Address(rsp, 8 * wordSize));
1175 movq(rsi, Address(rsp, 9 * wordSize));
1176 movq(rbp, Address(rsp, 10 * wordSize));
1177 // skip rsp
1178 movq(rbx, Address(rsp, 12 * wordSize));
1179 movq(rdx, Address(rsp, 13 * wordSize));
1180 movq(rcx, Address(rsp, 14 * wordSize));
1181 movq(rax, Address(rsp, 15 * wordSize));
1183 addq(rsp, 16 * wordSize);
1184 }
1186 void Assembler::pushfq() {
1187 emit_byte(0x9C);
1188 }
1190 void Assembler::popfq() {
1191 emit_byte(0x9D);
1192 }
1194 void Assembler::pushq(int imm32) {
1195 emit_byte(0x68);
1196 emit_long(imm32);
1197 }
1199 void Assembler::pushq(Register src) {
1200 int encode = prefix_and_encode(src->encoding());
1202 emit_byte(0x50 | encode);
1203 }
1205 void Assembler::pushq(Address src) {
1206 InstructionMark im(this);
1207 prefix(src);
1208 emit_byte(0xFF);
1209 emit_operand(rsi, src);
1210 }
1212 void Assembler::popq(Register dst) {
1213 int encode = prefix_and_encode(dst->encoding());
1214 emit_byte(0x58 | encode);
1215 }
1217 void Assembler::popq(Address dst) {
1218 InstructionMark im(this);
1219 prefix(dst);
1220 emit_byte(0x8F);
1221 emit_operand(rax, dst);
1222 }
1224 void Assembler::prefix(Prefix p) {
1225 a_byte(p);
1226 }
1228 void Assembler::movb(Register dst, Address src) {
1229 InstructionMark im(this);
1230 prefix(src, dst, true);
1231 emit_byte(0x8A);
1232 emit_operand(dst, src);
1233 }
1235 void Assembler::movb(Address dst, int imm8) {
1236 InstructionMark im(this);
1237 prefix(dst);
1238 emit_byte(0xC6);
1239 emit_operand(rax, dst, 1);
1240 emit_byte(imm8);
1241 }
1243 void Assembler::movb(Address dst, Register src) {
1244 InstructionMark im(this);
1245 prefix(dst, src, true);
1246 emit_byte(0x88);
1247 emit_operand(src, dst);
1248 }
1250 void Assembler::movw(Address dst, int imm16) {
1251 InstructionMark im(this);
1252 emit_byte(0x66); // switch to 16-bit mode
1253 prefix(dst);
1254 emit_byte(0xC7);
1255 emit_operand(rax, dst, 2);
1256 emit_word(imm16);
1257 }
1259 void Assembler::movw(Register dst, Address src) {
1260 InstructionMark im(this);
1261 emit_byte(0x66);
1262 prefix(src, dst);
1263 emit_byte(0x8B);
1264 emit_operand(dst, src);
1265 }
1267 void Assembler::movw(Address dst, Register src) {
1268 InstructionMark im(this);
1269 emit_byte(0x66);
1270 prefix(dst, src);
1271 emit_byte(0x89);
1272 emit_operand(src, dst);
1273 }
1275 // Uses zero extension.
1276 void Assembler::movl(Register dst, int imm32) {
1277 int encode = prefix_and_encode(dst->encoding());
1278 emit_byte(0xB8 | encode);
1279 emit_long(imm32);
1280 }
1282 void Assembler::movl(Register dst, Register src) {
1283 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1284 emit_byte(0x8B);
1285 emit_byte(0xC0 | encode);
1286 }
1288 void Assembler::movl(Register dst, Address src) {
1289 InstructionMark im(this);
1290 prefix(src, dst);
1291 emit_byte(0x8B);
1292 emit_operand(dst, src);
1293 }
1295 void Assembler::movl(Address dst, int imm32) {
1296 InstructionMark im(this);
1297 prefix(dst);
1298 emit_byte(0xC7);
1299 emit_operand(rax, dst, 4);
1300 emit_long(imm32);
1301 }
1303 void Assembler::movl(Address dst, Register src) {
1304 InstructionMark im(this);
1305 prefix(dst, src);
1306 emit_byte(0x89);
1307 emit_operand(src, dst);
1308 }
1310 void Assembler::mov64(Register dst, intptr_t imm64) {
1311 InstructionMark im(this);
1312 int encode = prefixq_and_encode(dst->encoding());
1313 emit_byte(0xB8 | encode);
1314 emit_long64(imm64);
1315 }
1317 void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
1318 InstructionMark im(this);
1319 int encode = prefixq_and_encode(dst->encoding());
1320 emit_byte(0xB8 | encode);
1321 emit_data64(imm64, rspec);
1322 }
1324 void Assembler::movq(Register dst, Register src) {
1325 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
1326 emit_byte(0x8B);
1327 emit_byte(0xC0 | encode);
1328 }
1330 void Assembler::movq(Register dst, Address src) {
1331 InstructionMark im(this);
1332 prefixq(src, dst);
1333 emit_byte(0x8B);
1334 emit_operand(dst, src);
1335 }
1337 void Assembler::mov64(Address dst, intptr_t imm32) {
1338 assert(is_simm32(imm32), "lost bits");
1339 InstructionMark im(this);
1340 prefixq(dst);
1341 emit_byte(0xC7);
1342 emit_operand(rax, dst, 4);
1343 emit_long(imm32);
1344 }
1346 void Assembler::movq(Address dst, Register src) {
1347 InstructionMark im(this);
1348 prefixq(dst, src);
1349 emit_byte(0x89);
1350 emit_operand(src, dst);
1351 }
1353 void Assembler::movsbl(Register dst, Address src) {
1354 InstructionMark im(this);
1355 prefix(src, dst);
1356 emit_byte(0x0F);
1357 emit_byte(0xBE);
1358 emit_operand(dst, src);
1359 }
1361 void Assembler::movsbl(Register dst, Register src) {
1362 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1363 emit_byte(0x0F);
1364 emit_byte(0xBE);
1365 emit_byte(0xC0 | encode);
1366 }
1368 void Assembler::movswl(Register dst, Address src) {
1369 InstructionMark im(this);
1370 prefix(src, dst);
1371 emit_byte(0x0F);
1372 emit_byte(0xBF);
1373 emit_operand(dst, src);
1374 }
1376 void Assembler::movswl(Register dst, Register src) {
1377 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1378 emit_byte(0x0F);
1379 emit_byte(0xBF);
1380 emit_byte(0xC0 | encode);
1381 }
1383 void Assembler::movslq(Register dst, Address src) {
1384 InstructionMark im(this);
1385 prefixq(src, dst);
1386 emit_byte(0x63);
1387 emit_operand(dst, src);
1388 }
1390 void Assembler::movslq(Register dst, Register src) {
1391 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
1392 emit_byte(0x63);
1393 emit_byte(0xC0 | encode);
1394 }
1396 void Assembler::movzbl(Register dst, Address src) {
1397 InstructionMark im(this);
1398 prefix(src, dst);
1399 emit_byte(0x0F);
1400 emit_byte(0xB6);
1401 emit_operand(dst, src);
1402 }
1404 void Assembler::movzbl(Register dst, Register src) {
1405 int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
1406 emit_byte(0x0F);
1407 emit_byte(0xB6);
1408 emit_byte(0xC0 | encode);
1409 }
1411 void Assembler::movzwl(Register dst, Address src) {
1412 InstructionMark im(this);
1413 prefix(src, dst);
1414 emit_byte(0x0F);
1415 emit_byte(0xB7);
1416 emit_operand(dst, src);
1417 }
1419 void Assembler::movzwl(Register dst, Register src) {
1420 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1421 emit_byte(0x0F);
1422 emit_byte(0xB7);
1423 emit_byte(0xC0 | encode);
1424 }
1426 void Assembler::movss(XMMRegister dst, XMMRegister src) {
1427 emit_byte(0xF3);
1428 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1429 emit_byte(0x0F);
1430 emit_byte(0x10);
1431 emit_byte(0xC0 | encode);
1432 }
1434 void Assembler::movss(XMMRegister dst, Address src) {
1435 InstructionMark im(this);
1436 emit_byte(0xF3);
1437 prefix(src, dst);
1438 emit_byte(0x0F);
1439 emit_byte(0x10);
1440 emit_operand(dst, src);
1441 }
1443 void Assembler::movss(Address dst, XMMRegister src) {
1444 InstructionMark im(this);
1445 emit_byte(0xF3);
1446 prefix(dst, src);
1447 emit_byte(0x0F);
1448 emit_byte(0x11);
1449 emit_operand(src, dst);
1450 }
1452 void Assembler::movsd(XMMRegister dst, XMMRegister src) {
1453 emit_byte(0xF2);
1454 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1455 emit_byte(0x0F);
1456 emit_byte(0x10);
1457 emit_byte(0xC0 | encode);
1458 }
1460 void Assembler::movsd(XMMRegister dst, Address src) {
1461 InstructionMark im(this);
1462 emit_byte(0xF2);
1463 prefix(src, dst);
1464 emit_byte(0x0F);
1465 emit_byte(0x10);
1466 emit_operand(dst, src);
1467 }
1469 void Assembler::movsd(Address dst, XMMRegister src) {
1470 InstructionMark im(this);
1471 emit_byte(0xF2);
1472 prefix(dst, src);
1473 emit_byte(0x0F);
1474 emit_byte(0x11);
1475 emit_operand(src, dst);
1476 }
1478 // New cpus require to use movsd and movss to avoid partial register stall
1479 // when loading from memory. But for old Opteron use movlpd instead of movsd.
1480 // The selection is done in MacroAssembler::movdbl() and movflt().
1481 void Assembler::movlpd(XMMRegister dst, Address src) {
1482 InstructionMark im(this);
1483 emit_byte(0x66);
1484 prefix(src, dst);
1485 emit_byte(0x0F);
1486 emit_byte(0x12);
1487 emit_operand(dst, src);
1488 }
1490 void Assembler::movapd(XMMRegister dst, XMMRegister src) {
1491 int dstenc = dst->encoding();
1492 int srcenc = src->encoding();
1493 emit_byte(0x66);
1494 if (dstenc < 8) {
1495 if (srcenc >= 8) {
1496 prefix(REX_B);
1497 srcenc -= 8;
1498 }
1499 } else {
1500 if (srcenc < 8) {
1501 prefix(REX_R);
1502 } else {
1503 prefix(REX_RB);
1504 srcenc -= 8;
1505 }
1506 dstenc -= 8;
1507 }
1508 emit_byte(0x0F);
1509 emit_byte(0x28);
1510 emit_byte(0xC0 | dstenc << 3 | srcenc);
1511 }
1513 void Assembler::movaps(XMMRegister dst, XMMRegister src) {
1514 int dstenc = dst->encoding();
1515 int srcenc = src->encoding();
1516 if (dstenc < 8) {
1517 if (srcenc >= 8) {
1518 prefix(REX_B);
1519 srcenc -= 8;
1520 }
1521 } else {
1522 if (srcenc < 8) {
1523 prefix(REX_R);
1524 } else {
1525 prefix(REX_RB);
1526 srcenc -= 8;
1527 }
1528 dstenc -= 8;
1529 }
1530 emit_byte(0x0F);
1531 emit_byte(0x28);
1532 emit_byte(0xC0 | dstenc << 3 | srcenc);
1533 }
1535 void Assembler::movdl(XMMRegister dst, Register src) {
1536 emit_byte(0x66);
1537 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1538 emit_byte(0x0F);
1539 emit_byte(0x6E);
1540 emit_byte(0xC0 | encode);
1541 }
1543 void Assembler::movdl(Register dst, XMMRegister src) {
1544 emit_byte(0x66);
1545 // swap src/dst to get correct prefix
1546 int encode = prefix_and_encode(src->encoding(), dst->encoding());
1547 emit_byte(0x0F);
1548 emit_byte(0x7E);
1549 emit_byte(0xC0 | encode);
1550 }
1552 void Assembler::movdq(XMMRegister dst, Register src) {
1553 emit_byte(0x66);
1554 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
1555 emit_byte(0x0F);
1556 emit_byte(0x6E);
1557 emit_byte(0xC0 | encode);
1558 }
1560 void Assembler::movdq(Register dst, XMMRegister src) {
1561 emit_byte(0x66);
1562 // swap src/dst to get correct prefix
1563 int encode = prefixq_and_encode(src->encoding(), dst->encoding());
1564 emit_byte(0x0F);
1565 emit_byte(0x7E);
1566 emit_byte(0xC0 | encode);
1567 }
1569 void Assembler::pxor(XMMRegister dst, Address src) {
1570 InstructionMark im(this);
1571 emit_byte(0x66);
1572 prefix(src, dst);
1573 emit_byte(0x0F);
1574 emit_byte(0xEF);
1575 emit_operand(dst, src);
1576 }
1578 void Assembler::pxor(XMMRegister dst, XMMRegister src) {
1579 InstructionMark im(this);
1580 emit_byte(0x66);
1581 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1582 emit_byte(0x0F);
1583 emit_byte(0xEF);
1584 emit_byte(0xC0 | encode);
1585 }
1587 void Assembler::movdqa(XMMRegister dst, Address src) {
1588 InstructionMark im(this);
1589 emit_byte(0x66);
1590 prefix(src, dst);
1591 emit_byte(0x0F);
1592 emit_byte(0x6F);
1593 emit_operand(dst, src);
1594 }
1596 void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
1597 emit_byte(0x66);
1598 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
1599 emit_byte(0x0F);
1600 emit_byte(0x6F);
1601 emit_byte(0xC0 | encode);
1602 }
1604 void Assembler::movdqa(Address dst, XMMRegister src) {
1605 InstructionMark im(this);
1606 emit_byte(0x66);
1607 prefix(dst, src);
1608 emit_byte(0x0F);
1609 emit_byte(0x7F);
1610 emit_operand(src, dst);
1611 }
1613 void Assembler::movq(XMMRegister dst, Address src) {
1614 InstructionMark im(this);
1615 emit_byte(0xF3);
1616 prefix(src, dst);
1617 emit_byte(0x0F);
1618 emit_byte(0x7E);
1619 emit_operand(dst, src);
1620 }
1622 void Assembler::movq(Address dst, XMMRegister src) {
1623 InstructionMark im(this);
1624 emit_byte(0x66);
1625 prefix(dst, src);
1626 emit_byte(0x0F);
1627 emit_byte(0xD6);
1628 emit_operand(src, dst);
1629 }
1631 void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
1632 assert(isByte(mode), "invalid value");
1633 emit_byte(0x66);
1634 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1635 emit_byte(0x0F);
1636 emit_byte(0x70);
1637 emit_byte(0xC0 | encode);
1638 emit_byte(mode & 0xFF);
1639 }
1641 void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
1642 assert(isByte(mode), "invalid value");
1643 InstructionMark im(this);
1644 emit_byte(0x66);
1645 emit_byte(0x0F);
1646 emit_byte(0x70);
1647 emit_operand(dst, src);
1648 emit_byte(mode & 0xFF);
1649 }
1651 void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
1652 assert(isByte(mode), "invalid value");
1653 emit_byte(0xF2);
1654 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1655 emit_byte(0x0F);
1656 emit_byte(0x70);
1657 emit_byte(0xC0 | encode);
1658 emit_byte(mode & 0xFF);
1659 }
1661 void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
1662 assert(isByte(mode), "invalid value");
1663 InstructionMark im(this);
1664 emit_byte(0xF2);
1665 emit_byte(0x0F);
1666 emit_byte(0x70);
1667 emit_operand(dst, src);
1668 emit_byte(mode & 0xFF);
1669 }
1671 void Assembler::cmovl(Condition cc, Register dst, Register src) {
1672 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1673 emit_byte(0x0F);
1674 emit_byte(0x40 | cc);
1675 emit_byte(0xC0 | encode);
1676 }
1678 void Assembler::cmovl(Condition cc, Register dst, Address src) {
1679 InstructionMark im(this);
1680 prefix(src, dst);
1681 emit_byte(0x0F);
1682 emit_byte(0x40 | cc);
1683 emit_operand(dst, src);
1684 }
1686 void Assembler::cmovq(Condition cc, Register dst, Register src) {
1687 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
1688 emit_byte(0x0F);
1689 emit_byte(0x40 | cc);
1690 emit_byte(0xC0 | encode);
1691 }
1693 void Assembler::cmovq(Condition cc, Register dst, Address src) {
1694 InstructionMark im(this);
1695 prefixq(src, dst);
1696 emit_byte(0x0F);
1697 emit_byte(0x40 | cc);
1698 emit_operand(dst, src);
1699 }
1701 void Assembler::prefetch_prefix(Address src) {
1702 prefix(src);
1703 emit_byte(0x0F);
1704 }
1706 void Assembler::prefetcht0(Address src) {
1707 InstructionMark im(this);
1708 prefetch_prefix(src);
1709 emit_byte(0x18);
1710 emit_operand(rcx, src); // 1, src
1711 }
1713 void Assembler::prefetcht1(Address src) {
1714 InstructionMark im(this);
1715 prefetch_prefix(src);
1716 emit_byte(0x18);
1717 emit_operand(rdx, src); // 2, src
1718 }
1720 void Assembler::prefetcht2(Address src) {
1721 InstructionMark im(this);
1722 prefetch_prefix(src);
1723 emit_byte(0x18);
1724 emit_operand(rbx, src); // 3, src
1725 }
1727 void Assembler::prefetchnta(Address src) {
1728 InstructionMark im(this);
1729 prefetch_prefix(src);
1730 emit_byte(0x18);
1731 emit_operand(rax, src); // 0, src
1732 }
1734 void Assembler::prefetchw(Address src) {
1735 InstructionMark im(this);
1736 prefetch_prefix(src);
1737 emit_byte(0x0D);
1738 emit_operand(rcx, src); // 1, src
1739 }
1741 void Assembler::adcl(Register dst, int imm32) {
1742 prefix(dst);
1743 emit_arith(0x81, 0xD0, dst, imm32);
1744 }
1746 void Assembler::adcl(Register dst, Address src) {
1747 InstructionMark im(this);
1748 prefix(src, dst);
1749 emit_byte(0x13);
1750 emit_operand(dst, src);
1751 }
1753 void Assembler::adcl(Register dst, Register src) {
1754 (void) prefix_and_encode(dst->encoding(), src->encoding());
1755 emit_arith(0x13, 0xC0, dst, src);
1756 }
1758 void Assembler::adcq(Register dst, int imm32) {
1759 (void) prefixq_and_encode(dst->encoding());
1760 emit_arith(0x81, 0xD0, dst, imm32);
1761 }
1763 void Assembler::adcq(Register dst, Address src) {
1764 InstructionMark im(this);
1765 prefixq(src, dst);
1766 emit_byte(0x13);
1767 emit_operand(dst, src);
1768 }
1770 void Assembler::adcq(Register dst, Register src) {
1771 (int) prefixq_and_encode(dst->encoding(), src->encoding());
1772 emit_arith(0x13, 0xC0, dst, src);
1773 }
1775 void Assembler::addl(Address dst, int imm32) {
1776 InstructionMark im(this);
1777 prefix(dst);
1778 emit_arith_operand(0x81, rax, dst,imm32);
1779 }
1781 void Assembler::addl(Address dst, Register src) {
1782 InstructionMark im(this);
1783 prefix(dst, src);
1784 emit_byte(0x01);
1785 emit_operand(src, dst);
1786 }
1788 void Assembler::addl(Register dst, int imm32) {
1789 prefix(dst);
1790 emit_arith(0x81, 0xC0, dst, imm32);
1791 }
1793 void Assembler::addl(Register dst, Address src) {
1794 InstructionMark im(this);
1795 prefix(src, dst);
1796 emit_byte(0x03);
1797 emit_operand(dst, src);
1798 }
1800 void Assembler::addl(Register dst, Register src) {
1801 (void) prefix_and_encode(dst->encoding(), src->encoding());
1802 emit_arith(0x03, 0xC0, dst, src);
1803 }
1805 void Assembler::addq(Address dst, int imm32) {
1806 InstructionMark im(this);
1807 prefixq(dst);
1808 emit_arith_operand(0x81, rax, dst,imm32);
1809 }
1811 void Assembler::addq(Address dst, Register src) {
1812 InstructionMark im(this);
1813 prefixq(dst, src);
1814 emit_byte(0x01);
1815 emit_operand(src, dst);
1816 }
1818 void Assembler::addq(Register dst, int imm32) {
1819 (void) prefixq_and_encode(dst->encoding());
1820 emit_arith(0x81, 0xC0, dst, imm32);
1821 }
1823 void Assembler::addq(Register dst, Address src) {
1824 InstructionMark im(this);
1825 prefixq(src, dst);
1826 emit_byte(0x03);
1827 emit_operand(dst, src);
1828 }
1830 void Assembler::addq(Register dst, Register src) {
1831 (void) prefixq_and_encode(dst->encoding(), src->encoding());
1832 emit_arith(0x03, 0xC0, dst, src);
1833 }
1835 void Assembler::andl(Register dst, int imm32) {
1836 prefix(dst);
1837 emit_arith(0x81, 0xE0, dst, imm32);
1838 }
1840 void Assembler::andl(Register dst, Address src) {
1841 InstructionMark im(this);
1842 prefix(src, dst);
1843 emit_byte(0x23);
1844 emit_operand(dst, src);
1845 }
1847 void Assembler::andl(Register dst, Register src) {
1848 (void) prefix_and_encode(dst->encoding(), src->encoding());
1849 emit_arith(0x23, 0xC0, dst, src);
1850 }
1852 void Assembler::andq(Register dst, int imm32) {
1853 (void) prefixq_and_encode(dst->encoding());
1854 emit_arith(0x81, 0xE0, dst, imm32);
1855 }
1857 void Assembler::andq(Register dst, Address src) {
1858 InstructionMark im(this);
1859 prefixq(src, dst);
1860 emit_byte(0x23);
1861 emit_operand(dst, src);
1862 }
1864 void Assembler::andq(Register dst, Register src) {
1865 (int) prefixq_and_encode(dst->encoding(), src->encoding());
1866 emit_arith(0x23, 0xC0, dst, src);
1867 }
1869 void Assembler::cmpb(Address dst, int imm8) {
1870 InstructionMark im(this);
1871 prefix(dst);
1872 emit_byte(0x80);
1873 emit_operand(rdi, dst, 1);
1874 emit_byte(imm8);
1875 }
1877 void Assembler::cmpl(Address dst, int imm32) {
1878 InstructionMark im(this);
1879 prefix(dst);
1880 emit_byte(0x81);
1881 emit_operand(rdi, dst, 4);
1882 emit_long(imm32);
1883 }
1885 void Assembler::cmpl(Register dst, int imm32) {
1886 prefix(dst);
1887 emit_arith(0x81, 0xF8, dst, imm32);
1888 }
1890 void Assembler::cmpl(Register dst, Register src) {
1891 (void) prefix_and_encode(dst->encoding(), src->encoding());
1892 emit_arith(0x3B, 0xC0, dst, src);
1893 }
1895 void Assembler::cmpl(Register dst, Address src) {
1896 InstructionMark im(this);
1897 prefix(src, dst);
1898 emit_byte(0x3B);
1899 emit_operand(dst, src);
1900 }
1902 void Assembler::cmpq(Address dst, int imm32) {
1903 InstructionMark im(this);
1904 prefixq(dst);
1905 emit_byte(0x81);
1906 emit_operand(rdi, dst, 4);
1907 emit_long(imm32);
1908 }
1910 void Assembler::cmpq(Register dst, int imm32) {
1911 (void) prefixq_and_encode(dst->encoding());
1912 emit_arith(0x81, 0xF8, dst, imm32);
1913 }
1915 void Assembler::cmpq(Address dst, Register src) {
1916 prefixq(dst, src);
1917 emit_byte(0x3B);
1918 emit_operand(src, dst);
1919 }
1921 void Assembler::cmpq(Register dst, Register src) {
1922 (void) prefixq_and_encode(dst->encoding(), src->encoding());
1923 emit_arith(0x3B, 0xC0, dst, src);
1924 }
1926 void Assembler::cmpq(Register dst, Address src) {
1927 InstructionMark im(this);
1928 prefixq(src, dst);
1929 emit_byte(0x3B);
1930 emit_operand(dst, src);
1931 }
1933 void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
1934 int encode = prefix_and_encode(dst->encoding(), src->encoding());
1935 emit_byte(0x0F);
1936 emit_byte(0x2E);
1937 emit_byte(0xC0 | encode);
1938 }
1940 void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
1941 emit_byte(0x66);
1942 ucomiss(dst, src);
1943 }
1945 void Assembler::decl(Register dst) {
1946 // Don't use it directly. Use MacroAssembler::decrementl() instead.
1947 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
1948 int encode = prefix_and_encode(dst->encoding());
1949 emit_byte(0xFF);
1950 emit_byte(0xC8 | encode);
1951 }
1953 void Assembler::decl(Address dst) {
1954 // Don't use it directly. Use MacroAssembler::decrementl() instead.
1955 InstructionMark im(this);
1956 prefix(dst);
1957 emit_byte(0xFF);
1958 emit_operand(rcx, dst);
1959 }
1961 void Assembler::decq(Register dst) {
1962 // Don't use it directly. Use MacroAssembler::decrementq() instead.
1963 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
1964 int encode = prefixq_and_encode(dst->encoding());
1965 emit_byte(0xFF);
1966 emit_byte(0xC8 | encode);
1967 }
1969 void Assembler::decq(Address dst) {
1970 // Don't use it directly. Use MacroAssembler::decrementq() instead.
1971 InstructionMark im(this);
1972 prefixq(dst);
1973 emit_byte(0xFF);
1974 emit_operand(rcx, dst);
1975 }
1977 void Assembler::idivl(Register src) {
1978 int encode = prefix_and_encode(src->encoding());
1979 emit_byte(0xF7);
1980 emit_byte(0xF8 | encode);
1981 }
1983 void Assembler::idivq(Register src) {
1984 int encode = prefixq_and_encode(src->encoding());
1985 emit_byte(0xF7);
1986 emit_byte(0xF8 | encode);
1987 }
1989 void Assembler::cdql() {
1990 emit_byte(0x99);
1991 }
1993 void Assembler::cdqq() {
1994 prefix(REX_W);
1995 emit_byte(0x99);
1996 }
1998 void Assembler::imull(Register dst, Register src) {
1999 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2000 emit_byte(0x0F);
2001 emit_byte(0xAF);
2002 emit_byte(0xC0 | encode);
2003 }
2005 void Assembler::imull(Register dst, Register src, int value) {
2006 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2007 if (is8bit(value)) {
2008 emit_byte(0x6B);
2009 emit_byte(0xC0 | encode);
2010 emit_byte(value);
2011 } else {
2012 emit_byte(0x69);
2013 emit_byte(0xC0 | encode);
2014 emit_long(value);
2015 }
2016 }
2018 void Assembler::imulq(Register dst, Register src) {
2019 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
2020 emit_byte(0x0F);
2021 emit_byte(0xAF);
2022 emit_byte(0xC0 | encode);
2023 }
2025 void Assembler::imulq(Register dst, Register src, int value) {
2026 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
2027 if (is8bit(value)) {
2028 emit_byte(0x6B);
2029 emit_byte(0xC0 | encode);
2030 emit_byte(value);
2031 } else {
2032 emit_byte(0x69);
2033 emit_byte(0xC0 | encode);
2034 emit_long(value);
2035 }
2036 }
2038 void Assembler::incl(Register dst) {
2039 // Don't use it directly. Use MacroAssembler::incrementl() instead.
2040 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
2041 int encode = prefix_and_encode(dst->encoding());
2042 emit_byte(0xFF);
2043 emit_byte(0xC0 | encode);
2044 }
2046 void Assembler::incl(Address dst) {
2047 // Don't use it directly. Use MacroAssembler::incrementl() instead.
2048 InstructionMark im(this);
2049 prefix(dst);
2050 emit_byte(0xFF);
2051 emit_operand(rax, dst);
2052 }
2054 void Assembler::incq(Register dst) {
2055 // Don't use it directly. Use MacroAssembler::incrementq() instead.
2056 // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
2057 int encode = prefixq_and_encode(dst->encoding());
2058 emit_byte(0xFF);
2059 emit_byte(0xC0 | encode);
2060 }
2062 void Assembler::incq(Address dst) {
2063 // Don't use it directly. Use MacroAssembler::incrementq() instead.
2064 InstructionMark im(this);
2065 prefixq(dst);
2066 emit_byte(0xFF);
2067 emit_operand(rax, dst);
2068 }
2070 void Assembler::leal(Register dst, Address src) {
2071 InstructionMark im(this);
2072 emit_byte(0x67); // addr32
2073 prefix(src, dst);
2074 emit_byte(0x8D);
2075 emit_operand(dst, src);
2076 }
2078 void Assembler::leaq(Register dst, Address src) {
2079 InstructionMark im(this);
2080 prefixq(src, dst);
2081 emit_byte(0x8D);
2082 emit_operand(dst, src);
2083 }
2085 void Assembler::mull(Address src) {
2086 InstructionMark im(this);
2087 // was missing
2088 prefix(src);
2089 emit_byte(0xF7);
2090 emit_operand(rsp, src);
2091 }
2093 void Assembler::mull(Register src) {
2094 // was missing
2095 int encode = prefix_and_encode(src->encoding());
2096 emit_byte(0xF7);
2097 emit_byte(0xE0 | encode);
2098 }
2100 void Assembler::negl(Register dst) {
2101 int encode = prefix_and_encode(dst->encoding());
2102 emit_byte(0xF7);
2103 emit_byte(0xD8 | encode);
2104 }
2106 void Assembler::negq(Register dst) {
2107 int encode = prefixq_and_encode(dst->encoding());
2108 emit_byte(0xF7);
2109 emit_byte(0xD8 | encode);
2110 }
2112 void Assembler::notl(Register dst) {
2113 int encode = prefix_and_encode(dst->encoding());
2114 emit_byte(0xF7);
2115 emit_byte(0xD0 | encode);
2116 }
2118 void Assembler::notq(Register dst) {
2119 int encode = prefixq_and_encode(dst->encoding());
2120 emit_byte(0xF7);
2121 emit_byte(0xD0 | encode);
2122 }
2124 void Assembler::orl(Address dst, int imm32) {
2125 InstructionMark im(this);
2126 prefix(dst);
2127 emit_byte(0x81);
2128 emit_operand(rcx, dst, 4);
2129 emit_long(imm32);
2130 }
2132 void Assembler::orl(Register dst, int imm32) {
2133 prefix(dst);
2134 emit_arith(0x81, 0xC8, dst, imm32);
2135 }
2137 void Assembler::orl(Register dst, Address src) {
2138 InstructionMark im(this);
2139 prefix(src, dst);
2140 emit_byte(0x0B);
2141 emit_operand(dst, src);
2142 }
2144 void Assembler::orl(Register dst, Register src) {
2145 (void) prefix_and_encode(dst->encoding(), src->encoding());
2146 emit_arith(0x0B, 0xC0, dst, src);
2147 }
2149 void Assembler::orq(Address dst, int imm32) {
2150 InstructionMark im(this);
2151 prefixq(dst);
2152 emit_byte(0x81);
2153 emit_operand(rcx, dst, 4);
2154 emit_long(imm32);
2155 }
2157 void Assembler::orq(Register dst, int imm32) {
2158 (void) prefixq_and_encode(dst->encoding());
2159 emit_arith(0x81, 0xC8, dst, imm32);
2160 }
2162 void Assembler::orq(Register dst, Address src) {
2163 InstructionMark im(this);
2164 prefixq(src, dst);
2165 emit_byte(0x0B);
2166 emit_operand(dst, src);
2167 }
2169 void Assembler::orq(Register dst, Register src) {
2170 (void) prefixq_and_encode(dst->encoding(), src->encoding());
2171 emit_arith(0x0B, 0xC0, dst, src);
2172 }
2174 void Assembler::rcll(Register dst, int imm8) {
2175 assert(isShiftCount(imm8), "illegal shift count");
2176 int encode = prefix_and_encode(dst->encoding());
2177 if (imm8 == 1) {
2178 emit_byte(0xD1);
2179 emit_byte(0xD0 | encode);
2180 } else {
2181 emit_byte(0xC1);
2182 emit_byte(0xD0 | encode);
2183 emit_byte(imm8);
2184 }
2185 }
2187 void Assembler::rclq(Register dst, int imm8) {
2188 assert(isShiftCount(imm8 >> 1), "illegal shift count");
2189 int encode = prefixq_and_encode(dst->encoding());
2190 if (imm8 == 1) {
2191 emit_byte(0xD1);
2192 emit_byte(0xD0 | encode);
2193 } else {
2194 emit_byte(0xC1);
2195 emit_byte(0xD0 | encode);
2196 emit_byte(imm8);
2197 }
2198 }
2200 void Assembler::sarl(Register dst, int imm8) {
2201 int encode = prefix_and_encode(dst->encoding());
2202 assert(isShiftCount(imm8), "illegal shift count");
2203 if (imm8 == 1) {
2204 emit_byte(0xD1);
2205 emit_byte(0xF8 | encode);
2206 } else {
2207 emit_byte(0xC1);
2208 emit_byte(0xF8 | encode);
2209 emit_byte(imm8);
2210 }
2211 }
2213 void Assembler::sarl(Register dst) {
2214 int encode = prefix_and_encode(dst->encoding());
2215 emit_byte(0xD3);
2216 emit_byte(0xF8 | encode);
2217 }
2219 void Assembler::sarq(Register dst, int imm8) {
2220 assert(isShiftCount(imm8 >> 1), "illegal shift count");
2221 int encode = prefixq_and_encode(dst->encoding());
2222 if (imm8 == 1) {
2223 emit_byte(0xD1);
2224 emit_byte(0xF8 | encode);
2225 } else {
2226 emit_byte(0xC1);
2227 emit_byte(0xF8 | encode);
2228 emit_byte(imm8);
2229 }
2230 }
2232 void Assembler::sarq(Register dst) {
2233 int encode = prefixq_and_encode(dst->encoding());
2234 emit_byte(0xD3);
2235 emit_byte(0xF8 | encode);
2236 }
2238 void Assembler::sbbl(Address dst, int imm32) {
2239 InstructionMark im(this);
2240 prefix(dst);
2241 emit_arith_operand(0x81, rbx, dst, imm32);
2242 }
2244 void Assembler::sbbl(Register dst, int imm32) {
2245 prefix(dst);
2246 emit_arith(0x81, 0xD8, dst, imm32);
2247 }
2249 void Assembler::sbbl(Register dst, Address src) {
2250 InstructionMark im(this);
2251 prefix(src, dst);
2252 emit_byte(0x1B);
2253 emit_operand(dst, src);
2254 }
2256 void Assembler::sbbl(Register dst, Register src) {
2257 (void) prefix_and_encode(dst->encoding(), src->encoding());
2258 emit_arith(0x1B, 0xC0, dst, src);
2259 }
2261 void Assembler::sbbq(Address dst, int imm32) {
2262 InstructionMark im(this);
2263 prefixq(dst);
2264 emit_arith_operand(0x81, rbx, dst, imm32);
2265 }
2267 void Assembler::sbbq(Register dst, int imm32) {
2268 (void) prefixq_and_encode(dst->encoding());
2269 emit_arith(0x81, 0xD8, dst, imm32);
2270 }
2272 void Assembler::sbbq(Register dst, Address src) {
2273 InstructionMark im(this);
2274 prefixq(src, dst);
2275 emit_byte(0x1B);
2276 emit_operand(dst, src);
2277 }
2279 void Assembler::sbbq(Register dst, Register src) {
2280 (void) prefixq_and_encode(dst->encoding(), src->encoding());
2281 emit_arith(0x1B, 0xC0, dst, src);
2282 }
2284 void Assembler::shll(Register dst, int imm8) {
2285 assert(isShiftCount(imm8), "illegal shift count");
2286 int encode = prefix_and_encode(dst->encoding());
2287 if (imm8 == 1 ) {
2288 emit_byte(0xD1);
2289 emit_byte(0xE0 | encode);
2290 } else {
2291 emit_byte(0xC1);
2292 emit_byte(0xE0 | encode);
2293 emit_byte(imm8);
2294 }
2295 }
2297 void Assembler::shll(Register dst) {
2298 int encode = prefix_and_encode(dst->encoding());
2299 emit_byte(0xD3);
2300 emit_byte(0xE0 | encode);
2301 }
2303 void Assembler::shlq(Register dst, int imm8) {
2304 assert(isShiftCount(imm8 >> 1), "illegal shift count");
2305 int encode = prefixq_and_encode(dst->encoding());
2306 if (imm8 == 1) {
2307 emit_byte(0xD1);
2308 emit_byte(0xE0 | encode);
2309 } else {
2310 emit_byte(0xC1);
2311 emit_byte(0xE0 | encode);
2312 emit_byte(imm8);
2313 }
2314 }
2316 void Assembler::shlq(Register dst) {
2317 int encode = prefixq_and_encode(dst->encoding());
2318 emit_byte(0xD3);
2319 emit_byte(0xE0 | encode);
2320 }
2322 void Assembler::shrl(Register dst, int imm8) {
2323 assert(isShiftCount(imm8), "illegal shift count");
2324 int encode = prefix_and_encode(dst->encoding());
2325 emit_byte(0xC1);
2326 emit_byte(0xE8 | encode);
2327 emit_byte(imm8);
2328 }
2330 void Assembler::shrl(Register dst) {
2331 int encode = prefix_and_encode(dst->encoding());
2332 emit_byte(0xD3);
2333 emit_byte(0xE8 | encode);
2334 }
2336 void Assembler::shrq(Register dst, int imm8) {
2337 assert(isShiftCount(imm8 >> 1), "illegal shift count");
2338 int encode = prefixq_and_encode(dst->encoding());
2339 emit_byte(0xC1);
2340 emit_byte(0xE8 | encode);
2341 emit_byte(imm8);
2342 }
2344 void Assembler::shrq(Register dst) {
2345 int encode = prefixq_and_encode(dst->encoding());
2346 emit_byte(0xD3);
2347 emit_byte(0xE8 | encode);
2348 }
2350 void Assembler::subl(Address dst, int imm32) {
2351 InstructionMark im(this);
2352 prefix(dst);
2353 if (is8bit(imm32)) {
2354 emit_byte(0x83);
2355 emit_operand(rbp, dst, 1);
2356 emit_byte(imm32 & 0xFF);
2357 } else {
2358 emit_byte(0x81);
2359 emit_operand(rbp, dst, 4);
2360 emit_long(imm32);
2361 }
2362 }
2364 void Assembler::subl(Register dst, int imm32) {
2365 prefix(dst);
2366 emit_arith(0x81, 0xE8, dst, imm32);
2367 }
2369 void Assembler::subl(Address dst, Register src) {
2370 InstructionMark im(this);
2371 prefix(dst, src);
2372 emit_byte(0x29);
2373 emit_operand(src, dst);
2374 }
2376 void Assembler::subl(Register dst, Address src) {
2377 InstructionMark im(this);
2378 prefix(src, dst);
2379 emit_byte(0x2B);
2380 emit_operand(dst, src);
2381 }
2383 void Assembler::subl(Register dst, Register src) {
2384 (void) prefix_and_encode(dst->encoding(), src->encoding());
2385 emit_arith(0x2B, 0xC0, dst, src);
2386 }
2388 void Assembler::subq(Address dst, int imm32) {
2389 InstructionMark im(this);
2390 prefixq(dst);
2391 if (is8bit(imm32)) {
2392 emit_byte(0x83);
2393 emit_operand(rbp, dst, 1);
2394 emit_byte(imm32 & 0xFF);
2395 } else {
2396 emit_byte(0x81);
2397 emit_operand(rbp, dst, 4);
2398 emit_long(imm32);
2399 }
2400 }
2402 void Assembler::subq(Register dst, int imm32) {
2403 (void) prefixq_and_encode(dst->encoding());
2404 emit_arith(0x81, 0xE8, dst, imm32);
2405 }
2407 void Assembler::subq(Address dst, Register src) {
2408 InstructionMark im(this);
2409 prefixq(dst, src);
2410 emit_byte(0x29);
2411 emit_operand(src, dst);
2412 }
2414 void Assembler::subq(Register dst, Address src) {
2415 InstructionMark im(this);
2416 prefixq(src, dst);
2417 emit_byte(0x2B);
2418 emit_operand(dst, src);
2419 }
2421 void Assembler::subq(Register dst, Register src) {
2422 (void) prefixq_and_encode(dst->encoding(), src->encoding());
2423 emit_arith(0x2B, 0xC0, dst, src);
2424 }
2426 void Assembler::testb(Register dst, int imm8) {
2427 (void) prefix_and_encode(dst->encoding(), true);
2428 emit_arith_b(0xF6, 0xC0, dst, imm8);
2429 }
2431 void Assembler::testl(Register dst, int imm32) {
2432 // not using emit_arith because test
2433 // doesn't support sign-extension of
2434 // 8bit operands
2435 int encode = dst->encoding();
2436 if (encode == 0) {
2437 emit_byte(0xA9);
2438 } else {
2439 encode = prefix_and_encode(encode);
2440 emit_byte(0xF7);
2441 emit_byte(0xC0 | encode);
2442 }
2443 emit_long(imm32);
2444 }
2446 void Assembler::testl(Register dst, Register src) {
2447 (void) prefix_and_encode(dst->encoding(), src->encoding());
2448 emit_arith(0x85, 0xC0, dst, src);
2449 }
2451 void Assembler::testq(Register dst, int imm32) {
2452 // not using emit_arith because test
2453 // doesn't support sign-extension of
2454 // 8bit operands
2455 int encode = dst->encoding();
2456 if (encode == 0) {
2457 prefix(REX_W);
2458 emit_byte(0xA9);
2459 } else {
2460 encode = prefixq_and_encode(encode);
2461 emit_byte(0xF7);
2462 emit_byte(0xC0 | encode);
2463 }
2464 emit_long(imm32);
2465 }
2467 void Assembler::testq(Register dst, Register src) {
2468 (void) prefixq_and_encode(dst->encoding(), src->encoding());
2469 emit_arith(0x85, 0xC0, dst, src);
2470 }
2472 void Assembler::xaddl(Address dst, Register src) {
2473 InstructionMark im(this);
2474 prefix(dst, src);
2475 emit_byte(0x0F);
2476 emit_byte(0xC1);
2477 emit_operand(src, dst);
2478 }
2480 void Assembler::xaddq(Address dst, Register src) {
2481 InstructionMark im(this);
2482 prefixq(dst, src);
2483 emit_byte(0x0F);
2484 emit_byte(0xC1);
2485 emit_operand(src, dst);
2486 }
2488 void Assembler::xorl(Register dst, int imm32) {
2489 prefix(dst);
2490 emit_arith(0x81, 0xF0, dst, imm32);
2491 }
2493 void Assembler::xorl(Register dst, Register src) {
2494 (void) prefix_and_encode(dst->encoding(), src->encoding());
2495 emit_arith(0x33, 0xC0, dst, src);
2496 }
2498 void Assembler::xorl(Register dst, Address src) {
2499 InstructionMark im(this);
2500 prefix(src, dst);
2501 emit_byte(0x33);
2502 emit_operand(dst, src);
2503 }
2505 void Assembler::xorq(Register dst, int imm32) {
2506 (void) prefixq_and_encode(dst->encoding());
2507 emit_arith(0x81, 0xF0, dst, imm32);
2508 }
2510 void Assembler::xorq(Register dst, Register src) {
2511 (void) prefixq_and_encode(dst->encoding(), src->encoding());
2512 emit_arith(0x33, 0xC0, dst, src);
2513 }
2515 void Assembler::xorq(Register dst, Address src) {
2516 InstructionMark im(this);
2517 prefixq(src, dst);
2518 emit_byte(0x33);
2519 emit_operand(dst, src);
2520 }
2522 void Assembler::bswapl(Register reg) {
2523 int encode = prefix_and_encode(reg->encoding());
2524 emit_byte(0x0F);
2525 emit_byte(0xC8 | encode);
2526 }
2528 void Assembler::bswapq(Register reg) {
2529 int encode = prefixq_and_encode(reg->encoding());
2530 emit_byte(0x0F);
2531 emit_byte(0xC8 | encode);
2532 }
2534 void Assembler::lock() {
2535 emit_byte(0xF0);
2536 }
2538 void Assembler::xchgl(Register dst, Address src) {
2539 InstructionMark im(this);
2540 prefix(src, dst);
2541 emit_byte(0x87);
2542 emit_operand(dst, src);
2543 }
2545 void Assembler::xchgl(Register dst, Register src) {
2546 int encode = prefix_and_encode(dst->encoding(), src->encoding());
2547 emit_byte(0x87);
2548 emit_byte(0xc0 | encode);
2549 }
2551 void Assembler::xchgq(Register dst, Address src) {
2552 InstructionMark im(this);
2553 prefixq(src, dst);
2554 emit_byte(0x87);
2555 emit_operand(dst, src);
2556 }
2558 void Assembler::xchgq(Register dst, Register src) {
2559 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
2560 emit_byte(0x87);
2561 emit_byte(0xc0 | encode);
2562 }
2564 void Assembler::cmpxchgl(Register reg, Address adr) {
2565 InstructionMark im(this);
2566 prefix(adr, reg);
2567 emit_byte(0x0F);
2568 emit_byte(0xB1);
2569 emit_operand(reg, adr);
2570 }
2572 void Assembler::cmpxchgq(Register reg, Address adr) {
2573 InstructionMark im(this);
2574 prefixq(adr, reg);
2575 emit_byte(0x0F);
2576 emit_byte(0xB1);
2577 emit_operand(reg, adr);
2578 }
2580 void Assembler::hlt() {
2581 emit_byte(0xF4);
2582 }
2585 void Assembler::addr_nop_4() {
2586 // 4 bytes: NOP DWORD PTR [EAX+0]
2587 emit_byte(0x0F);
2588 emit_byte(0x1F);
2589 emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
2590 emit_byte(0); // 8-bits offset (1 byte)
2591 }
2593 void Assembler::addr_nop_5() {
2594 // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
2595 emit_byte(0x0F);
2596 emit_byte(0x1F);
2597 emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
2598 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
2599 emit_byte(0); // 8-bits offset (1 byte)
2600 }
2602 void Assembler::addr_nop_7() {
2603 // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
2604 emit_byte(0x0F);
2605 emit_byte(0x1F);
2606 emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
2607 emit_long(0); // 32-bits offset (4 bytes)
2608 }
2610 void Assembler::addr_nop_8() {
2611 // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
2612 emit_byte(0x0F);
2613 emit_byte(0x1F);
2614 emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
2615 emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
2616 emit_long(0); // 32-bits offset (4 bytes)
2617 }
2619 void Assembler::nop(int i) {
2620 assert(i > 0, " ");
2621 if (UseAddressNop && VM_Version::is_intel()) {
2622 //
2623 // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
2624 // 1: 0x90
2625 // 2: 0x66 0x90
2626 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
2627 // 4: 0x0F 0x1F 0x40 0x00
2628 // 5: 0x0F 0x1F 0x44 0x00 0x00
2629 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
2630 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
2631 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2632 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2633 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2634 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2636 // The rest coding is Intel specific - don't use consecutive address nops
2638 // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
2639 // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
2640 // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
2641 // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
2643 while(i >= 15) {
2644 // For Intel don't generate consecutive addess nops (mix with regular nops)
2645 i -= 15;
2646 emit_byte(0x66); // size prefix
2647 emit_byte(0x66); // size prefix
2648 emit_byte(0x66); // size prefix
2649 addr_nop_8();
2650 emit_byte(0x66); // size prefix
2651 emit_byte(0x66); // size prefix
2652 emit_byte(0x66); // size prefix
2653 emit_byte(0x90); // nop
2654 }
2655 switch (i) {
2656 case 14:
2657 emit_byte(0x66); // size prefix
2658 case 13:
2659 emit_byte(0x66); // size prefix
2660 case 12:
2661 addr_nop_8();
2662 emit_byte(0x66); // size prefix
2663 emit_byte(0x66); // size prefix
2664 emit_byte(0x66); // size prefix
2665 emit_byte(0x90); // nop
2666 break;
2667 case 11:
2668 emit_byte(0x66); // size prefix
2669 case 10:
2670 emit_byte(0x66); // size prefix
2671 case 9:
2672 emit_byte(0x66); // size prefix
2673 case 8:
2674 addr_nop_8();
2675 break;
2676 case 7:
2677 addr_nop_7();
2678 break;
2679 case 6:
2680 emit_byte(0x66); // size prefix
2681 case 5:
2682 addr_nop_5();
2683 break;
2684 case 4:
2685 addr_nop_4();
2686 break;
2687 case 3:
2688 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
2689 emit_byte(0x66); // size prefix
2690 case 2:
2691 emit_byte(0x66); // size prefix
2692 case 1:
2693 emit_byte(0x90); // nop
2694 break;
2695 default:
2696 assert(i == 0, " ");
2697 }
2698 return;
2699 }
2700 if (UseAddressNop && VM_Version::is_amd()) {
2701 //
2702 // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
2703 // 1: 0x90
2704 // 2: 0x66 0x90
2705 // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
2706 // 4: 0x0F 0x1F 0x40 0x00
2707 // 5: 0x0F 0x1F 0x44 0x00 0x00
2708 // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
2709 // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
2710 // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2711 // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2712 // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2713 // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2715 // The rest coding is AMD specific - use consecutive address nops
2717 // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
2718 // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
2719 // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
2720 // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
2721 // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
2722 // Size prefixes (0x66) are added for larger sizes
2724 while(i >= 22) {
2725 i -= 11;
2726 emit_byte(0x66); // size prefix
2727 emit_byte(0x66); // size prefix
2728 emit_byte(0x66); // size prefix
2729 addr_nop_8();
2730 }
2731 // Generate first nop for size between 21-12
2732 switch (i) {
2733 case 21:
2734 i -= 1;
2735 emit_byte(0x66); // size prefix
2736 case 20:
2737 case 19:
2738 i -= 1;
2739 emit_byte(0x66); // size prefix
2740 case 18:
2741 case 17:
2742 i -= 1;
2743 emit_byte(0x66); // size prefix
2744 case 16:
2745 case 15:
2746 i -= 8;
2747 addr_nop_8();
2748 break;
2749 case 14:
2750 case 13:
2751 i -= 7;
2752 addr_nop_7();
2753 break;
2754 case 12:
2755 i -= 6;
2756 emit_byte(0x66); // size prefix
2757 addr_nop_5();
2758 break;
2759 default:
2760 assert(i < 12, " ");
2761 }
2763 // Generate second nop for size between 11-1
2764 switch (i) {
2765 case 11:
2766 emit_byte(0x66); // size prefix
2767 case 10:
2768 emit_byte(0x66); // size prefix
2769 case 9:
2770 emit_byte(0x66); // size prefix
2771 case 8:
2772 addr_nop_8();
2773 break;
2774 case 7:
2775 addr_nop_7();
2776 break;
2777 case 6:
2778 emit_byte(0x66); // size prefix
2779 case 5:
2780 addr_nop_5();
2781 break;
2782 case 4:
2783 addr_nop_4();
2784 break;
2785 case 3:
2786 // Don't use "0x0F 0x1F 0x00" - need patching safe padding
2787 emit_byte(0x66); // size prefix
2788 case 2:
2789 emit_byte(0x66); // size prefix
2790 case 1:
2791 emit_byte(0x90); // nop
2792 break;
2793 default:
2794 assert(i == 0, " ");
2795 }
2796 return;
2797 }
2799 // Using nops with size prefixes "0x66 0x90".
2800 // From AMD Optimization Guide:
2801 // 1: 0x90
2802 // 2: 0x66 0x90
2803 // 3: 0x66 0x66 0x90
2804 // 4: 0x66 0x66 0x66 0x90
2805 // 5: 0x66 0x66 0x90 0x66 0x90
2806 // 6: 0x66 0x66 0x90 0x66 0x66 0x90
2807 // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
2808 // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
2809 // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
2810 // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
2811 //
2812 while(i > 12) {
2813 i -= 4;
2814 emit_byte(0x66); // size prefix
2815 emit_byte(0x66);
2816 emit_byte(0x66);
2817 emit_byte(0x90); // nop
2818 }
2819 // 1 - 12 nops
2820 if(i > 8) {
2821 if(i > 9) {
2822 i -= 1;
2823 emit_byte(0x66);
2824 }
2825 i -= 3;
2826 emit_byte(0x66);
2827 emit_byte(0x66);
2828 emit_byte(0x90);
2829 }
2830 // 1 - 8 nops
2831 if(i > 4) {
2832 if(i > 6) {
2833 i -= 1;
2834 emit_byte(0x66);
2835 }
2836 i -= 3;
2837 emit_byte(0x66);
2838 emit_byte(0x66);
2839 emit_byte(0x90);
2840 }
2841 switch (i) {
2842 case 4:
2843 emit_byte(0x66);
2844 case 3:
2845 emit_byte(0x66);
2846 case 2:
2847 emit_byte(0x66);
2848 case 1:
2849 emit_byte(0x90);
2850 break;
2851 default:
2852 assert(i == 0, " ");
2853 }
2854 }
2856 void Assembler::ret(int imm16) {
2857 if (imm16 == 0) {
2858 emit_byte(0xC3);
2859 } else {
2860 emit_byte(0xC2);
2861 emit_word(imm16);
2862 }
2863 }
2865 // copies a single word from [esi] to [edi]
2866 void Assembler::smovl() {
2867 emit_byte(0xA5);
2868 }
2870 // copies data from [rsi] to [rdi] using rcx words (m32)
2871 void Assembler::rep_movl() {
2872 // REP
2873 emit_byte(0xF3);
2874 // MOVSL
2875 emit_byte(0xA5);
2876 }
2878 // copies data from [rsi] to [rdi] using rcx double words (m64)
2879 void Assembler::rep_movq() {
2880 // REP
2881 emit_byte(0xF3);
2882 // MOVSQ
2883 prefix(REX_W);
2884 emit_byte(0xA5);
2885 }
2887 // sets rcx double words (m64) with rax value at [rdi]
2888 void Assembler::rep_set() {
2889 // REP
2890 emit_byte(0xF3);
2891 // STOSQ
2892 prefix(REX_W);
2893 emit_byte(0xAB);
2894 }
2896 // scans rcx double words (m64) at [rdi] for occurance of rax
2897 void Assembler::repne_scanq() {
2898 // REPNE/REPNZ
2899 emit_byte(0xF2);
2900 // SCASQ
2901 prefix(REX_W);
2902 emit_byte(0xAF);
2903 }
2905 void Assembler::repne_scanl() {
2906 // REPNE/REPNZ
2907 emit_byte(0xF2);
2908 // SCASL
2909 emit_byte(0xAF);
2910 }
2913 void Assembler::setb(Condition cc, Register dst) {
2914 assert(0 <= cc && cc < 16, "illegal cc");
2915 int encode = prefix_and_encode(dst->encoding(), true);
2916 emit_byte(0x0F);
2917 emit_byte(0x90 | cc);
2918 emit_byte(0xC0 | encode);
2919 }
2921 void Assembler::clflush(Address adr) {
2922 prefix(adr);
2923 emit_byte(0x0F);
2924 emit_byte(0xAE);
2925 emit_operand(rdi, adr);
2926 }
2928 void Assembler::call(Label& L, relocInfo::relocType rtype) {
2929 if (L.is_bound()) {
2930 const int long_size = 5;
2931 int offs = (int)( target(L) - pc() );
2932 assert(offs <= 0, "assembler error");
2933 InstructionMark im(this);
2934 // 1110 1000 #32-bit disp
2935 emit_byte(0xE8);
2936 emit_data(offs - long_size, rtype, disp32_operand);
2937 } else {
2938 InstructionMark im(this);
2939 // 1110 1000 #32-bit disp
2940 L.add_patch_at(code(), locator());
2942 emit_byte(0xE8);
2943 emit_data(int(0), rtype, disp32_operand);
2944 }
2945 }
2947 void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
2948 assert(entry != NULL, "call most probably wrong");
2949 InstructionMark im(this);
2950 emit_byte(0xE8);
2951 intptr_t disp = entry - (_code_pos + sizeof(int32_t));
2952 assert(is_simm32(disp), "must be 32bit offset (call2)");
2953 // Technically, should use call32_operand, but this format is
2954 // implied by the fact that we're emitting a call instruction.
2955 emit_data((int) disp, rspec, disp32_operand);
2956 }
2959 void Assembler::call(Register dst) {
2960 // This was originally using a 32bit register encoding
2961 // and surely we want 64bit!
2962 // this is a 32bit encoding but in 64bit mode the default
2963 // operand size is 64bit so there is no need for the
2964 // wide prefix. So prefix only happens if we use the
2965 // new registers. Much like push/pop.
2966 int encode = prefixq_and_encode(dst->encoding());
2967 emit_byte(0xFF);
2968 emit_byte(0xD0 | encode);
2969 }
2971 void Assembler::call(Address adr) {
2972 InstructionMark im(this);
2973 prefix(adr);
2974 emit_byte(0xFF);
2975 emit_operand(rdx, adr);
2976 }
2978 void Assembler::jmp(Register reg) {
2979 int encode = prefix_and_encode(reg->encoding());
2980 emit_byte(0xFF);
2981 emit_byte(0xE0 | encode);
2982 }
2984 void Assembler::jmp(Address adr) {
2985 InstructionMark im(this);
2986 prefix(adr);
2987 emit_byte(0xFF);
2988 emit_operand(rsp, adr);
2989 }
2991 void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
2992 InstructionMark im(this);
2993 emit_byte(0xE9);
2994 assert(dest != NULL, "must have a target");
2995 intptr_t disp = dest - (_code_pos + sizeof(int32_t));
2996 assert(is_simm32(disp), "must be 32bit offset (jmp)");
2997 emit_data(disp, rspec.reloc(), call32_operand);
2998 }
3000 void Assembler::jmp(Label& L, relocInfo::relocType rtype) {
3001 if (L.is_bound()) {
3002 address entry = target(L);
3003 assert(entry != NULL, "jmp most probably wrong");
3004 InstructionMark im(this);
3005 const int short_size = 2;
3006 const int long_size = 5;
3007 intptr_t offs = entry - _code_pos;
3008 if (rtype == relocInfo::none && is8bit(offs - short_size)) {
3009 emit_byte(0xEB);
3010 emit_byte((offs - short_size) & 0xFF);
3011 } else {
3012 emit_byte(0xE9);
3013 emit_long(offs - long_size);
3014 }
3015 } else {
3016 // By default, forward jumps are always 32-bit displacements, since
3017 // we can't yet know where the label will be bound. If you're sure that
3018 // the forward jump will not run beyond 256 bytes, use jmpb to
3019 // force an 8-bit displacement.
3020 InstructionMark im(this);
3021 relocate(rtype);
3022 L.add_patch_at(code(), locator());
3023 emit_byte(0xE9);
3024 emit_long(0);
3025 }
3026 }
3028 void Assembler::jmpb(Label& L) {
3029 if (L.is_bound()) {
3030 const int short_size = 2;
3031 address entry = target(L);
3032 assert(is8bit((entry - _code_pos) + short_size),
3033 "Dispacement too large for a short jmp");
3034 assert(entry != NULL, "jmp most probably wrong");
3035 intptr_t offs = entry - _code_pos;
3036 emit_byte(0xEB);
3037 emit_byte((offs - short_size) & 0xFF);
3038 } else {
3039 InstructionMark im(this);
3040 L.add_patch_at(code(), locator());
3041 emit_byte(0xEB);
3042 emit_byte(0);
3043 }
3044 }
3046 void Assembler::jcc(Condition cc, Label& L, relocInfo::relocType rtype) {
3047 InstructionMark im(this);
3048 relocate(rtype);
3049 assert((0 <= cc) && (cc < 16), "illegal cc");
3050 if (L.is_bound()) {
3051 address dst = target(L);
3052 assert(dst != NULL, "jcc most probably wrong");
3054 const int short_size = 2;
3055 const int long_size = 6;
3056 intptr_t offs = (intptr_t)dst - (intptr_t)_code_pos;
3057 if (rtype == relocInfo::none && is8bit(offs - short_size)) {
3058 // 0111 tttn #8-bit disp
3059 emit_byte(0x70 | cc);
3060 emit_byte((offs - short_size) & 0xFF);
3061 } else {
3062 // 0000 1111 1000 tttn #32-bit disp
3063 assert(is_simm32(offs - long_size),
3064 "must be 32bit offset (call4)");
3065 emit_byte(0x0F);
3066 emit_byte(0x80 | cc);
3067 emit_long(offs - long_size);
3068 }
3069 } else {
3070 // Note: could eliminate cond. jumps to this jump if condition
3071 // is the same however, seems to be rather unlikely case.
3072 // Note: use jccb() if label to be bound is very close to get
3073 // an 8-bit displacement
3074 L.add_patch_at(code(), locator());
3075 emit_byte(0x0F);
3076 emit_byte(0x80 | cc);
3077 emit_long(0);
3078 }
3079 }
3081 void Assembler::jccb(Condition cc, Label& L) {
3082 if (L.is_bound()) {
3083 const int short_size = 2;
3084 const int long_size = 6;
3085 address entry = target(L);
3086 assert(is8bit((intptr_t)entry - ((intptr_t)_code_pos + short_size)),
3087 "Dispacement too large for a short jmp");
3088 intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos;
3089 // 0111 tttn #8-bit disp
3090 emit_byte(0x70 | cc);
3091 emit_byte((offs - short_size) & 0xFF);
3092 } else {
3093 InstructionMark im(this);
3094 L.add_patch_at(code(), locator());
3095 emit_byte(0x70 | cc);
3096 emit_byte(0);
3097 }
3098 }
3100 // FP instructions
3102 void Assembler::fxsave(Address dst) {
3103 prefixq(dst);
3104 emit_byte(0x0F);
3105 emit_byte(0xAE);
3106 emit_operand(as_Register(0), dst);
3107 }
3109 void Assembler::fxrstor(Address src) {
3110 prefixq(src);
3111 emit_byte(0x0F);
3112 emit_byte(0xAE);
3113 emit_operand(as_Register(1), src);
3114 }
3116 void Assembler::ldmxcsr(Address src) {
3117 InstructionMark im(this);
3118 prefix(src);
3119 emit_byte(0x0F);
3120 emit_byte(0xAE);
3121 emit_operand(as_Register(2), src);
3122 }
3124 void Assembler::stmxcsr(Address dst) {
3125 InstructionMark im(this);
3126 prefix(dst);
3127 emit_byte(0x0F);
3128 emit_byte(0xAE);
3129 emit_operand(as_Register(3), dst);
3130 }
3132 void Assembler::addss(XMMRegister dst, XMMRegister src) {
3133 emit_byte(0xF3);
3134 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3135 emit_byte(0x0F);
3136 emit_byte(0x58);
3137 emit_byte(0xC0 | encode);
3138 }
3140 void Assembler::addss(XMMRegister dst, Address src) {
3141 InstructionMark im(this);
3142 emit_byte(0xF3);
3143 prefix(src, dst);
3144 emit_byte(0x0F);
3145 emit_byte(0x58);
3146 emit_operand(dst, src);
3147 }
3149 void Assembler::subss(XMMRegister dst, XMMRegister src) {
3150 emit_byte(0xF3);
3151 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3152 emit_byte(0x0F);
3153 emit_byte(0x5C);
3154 emit_byte(0xC0 | encode);
3155 }
3157 void Assembler::subss(XMMRegister dst, Address src) {
3158 InstructionMark im(this);
3159 emit_byte(0xF3);
3160 prefix(src, dst);
3161 emit_byte(0x0F);
3162 emit_byte(0x5C);
3163 emit_operand(dst, src);
3164 }
3166 void Assembler::mulss(XMMRegister dst, XMMRegister src) {
3167 emit_byte(0xF3);
3168 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3169 emit_byte(0x0F);
3170 emit_byte(0x59);
3171 emit_byte(0xC0 | encode);
3172 }
3174 void Assembler::mulss(XMMRegister dst, Address src) {
3175 InstructionMark im(this);
3176 emit_byte(0xF3);
3177 prefix(src, dst);
3178 emit_byte(0x0F);
3179 emit_byte(0x59);
3180 emit_operand(dst, src);
3181 }
3183 void Assembler::divss(XMMRegister dst, XMMRegister src) {
3184 emit_byte(0xF3);
3185 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3186 emit_byte(0x0F);
3187 emit_byte(0x5E);
3188 emit_byte(0xC0 | encode);
3189 }
3191 void Assembler::divss(XMMRegister dst, Address src) {
3192 InstructionMark im(this);
3193 emit_byte(0xF3);
3194 prefix(src, dst);
3195 emit_byte(0x0F);
3196 emit_byte(0x5E);
3197 emit_operand(dst, src);
3198 }
3200 void Assembler::addsd(XMMRegister dst, XMMRegister src) {
3201 emit_byte(0xF2);
3202 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3203 emit_byte(0x0F);
3204 emit_byte(0x58);
3205 emit_byte(0xC0 | encode);
3206 }
3208 void Assembler::addsd(XMMRegister dst, Address src) {
3209 InstructionMark im(this);
3210 emit_byte(0xF2);
3211 prefix(src, dst);
3212 emit_byte(0x0F);
3213 emit_byte(0x58);
3214 emit_operand(dst, src);
3215 }
3217 void Assembler::subsd(XMMRegister dst, XMMRegister src) {
3218 emit_byte(0xF2);
3219 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3220 emit_byte(0x0F);
3221 emit_byte(0x5C);
3222 emit_byte(0xC0 | encode);
3223 }
3225 void Assembler::subsd(XMMRegister dst, Address src) {
3226 InstructionMark im(this);
3227 emit_byte(0xF2);
3228 prefix(src, dst);
3229 emit_byte(0x0F);
3230 emit_byte(0x5C);
3231 emit_operand(dst, src);
3232 }
3234 void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
3235 emit_byte(0xF2);
3236 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3237 emit_byte(0x0F);
3238 emit_byte(0x59);
3239 emit_byte(0xC0 | encode);
3240 }
3242 void Assembler::mulsd(XMMRegister dst, Address src) {
3243 InstructionMark im(this);
3244 emit_byte(0xF2);
3245 prefix(src, dst);
3246 emit_byte(0x0F);
3247 emit_byte(0x59);
3248 emit_operand(dst, src);
3249 }
3251 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
3252 emit_byte(0xF2);
3253 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3254 emit_byte(0x0F);
3255 emit_byte(0x5E);
3256 emit_byte(0xC0 | encode);
3257 }
3259 void Assembler::divsd(XMMRegister dst, Address src) {
3260 InstructionMark im(this);
3261 emit_byte(0xF2);
3262 prefix(src, dst);
3263 emit_byte(0x0F);
3264 emit_byte(0x5E);
3265 emit_operand(dst, src);
3266 }
3268 void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
3269 emit_byte(0xF2);
3270 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3271 emit_byte(0x0F);
3272 emit_byte(0x51);
3273 emit_byte(0xC0 | encode);
3274 }
3276 void Assembler::sqrtsd(XMMRegister dst, Address src) {
3277 InstructionMark im(this);
3278 emit_byte(0xF2);
3279 prefix(src, dst);
3280 emit_byte(0x0F);
3281 emit_byte(0x51);
3282 emit_operand(dst, src);
3283 }
3285 void Assembler::xorps(XMMRegister dst, XMMRegister src) {
3286 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3287 emit_byte(0x0F);
3288 emit_byte(0x57);
3289 emit_byte(0xC0 | encode);
3290 }
3292 void Assembler::xorps(XMMRegister dst, Address src) {
3293 InstructionMark im(this);
3294 prefix(src, dst);
3295 emit_byte(0x0F);
3296 emit_byte(0x57);
3297 emit_operand(dst, src);
3298 }
3300 void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
3301 emit_byte(0x66);
3302 xorps(dst, src);
3303 }
3305 void Assembler::xorpd(XMMRegister dst, Address src) {
3306 InstructionMark im(this);
3307 emit_byte(0x66);
3308 prefix(src, dst);
3309 emit_byte(0x0F);
3310 emit_byte(0x57);
3311 emit_operand(dst, src);
3312 }
3314 void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
3315 emit_byte(0xF3);
3316 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3317 emit_byte(0x0F);
3318 emit_byte(0x2A);
3319 emit_byte(0xC0 | encode);
3320 }
3322 void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
3323 emit_byte(0xF3);
3324 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3325 emit_byte(0x0F);
3326 emit_byte(0x2A);
3327 emit_byte(0xC0 | encode);
3328 }
3330 void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
3331 emit_byte(0xF2);
3332 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3333 emit_byte(0x0F);
3334 emit_byte(0x2A);
3335 emit_byte(0xC0 | encode);
3336 }
3338 void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
3339 emit_byte(0xF2);
3340 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3341 emit_byte(0x0F);
3342 emit_byte(0x2A);
3343 emit_byte(0xC0 | encode);
3344 }
3346 void Assembler::cvttss2sil(Register dst, XMMRegister src) {
3347 emit_byte(0xF3);
3348 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3349 emit_byte(0x0F);
3350 emit_byte(0x2C);
3351 emit_byte(0xC0 | encode);
3352 }
3354 void Assembler::cvttss2siq(Register dst, XMMRegister src) {
3355 emit_byte(0xF3);
3356 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3357 emit_byte(0x0F);
3358 emit_byte(0x2C);
3359 emit_byte(0xC0 | encode);
3360 }
3362 void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
3363 emit_byte(0xF2);
3364 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3365 emit_byte(0x0F);
3366 emit_byte(0x2C);
3367 emit_byte(0xC0 | encode);
3368 }
3370 void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
3371 emit_byte(0xF2);
3372 int encode = prefixq_and_encode(dst->encoding(), src->encoding());
3373 emit_byte(0x0F);
3374 emit_byte(0x2C);
3375 emit_byte(0xC0 | encode);
3376 }
3378 void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
3379 emit_byte(0xF3);
3380 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3381 emit_byte(0x0F);
3382 emit_byte(0x5A);
3383 emit_byte(0xC0 | encode);
3384 }
3386 void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
3387 emit_byte(0xF3);
3388 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3389 emit_byte(0x0F);
3390 emit_byte(0xE6);
3391 emit_byte(0xC0 | encode);
3392 }
3394 void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
3395 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3396 emit_byte(0x0F);
3397 emit_byte(0x5B);
3398 emit_byte(0xC0 | encode);
3399 }
3401 void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
3402 emit_byte(0xF2);
3403 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3404 emit_byte(0x0F);
3405 emit_byte(0x5A);
3406 emit_byte(0xC0 | encode);
3407 }
3409 void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
3410 emit_byte(0x66);
3411 int encode = prefix_and_encode(dst->encoding(), src->encoding());
3412 emit_byte(0x0F);
3413 emit_byte(0x60);
3414 emit_byte(0xC0 | encode);
3415 }
3417 // Implementation of MacroAssembler
3419 // On 32 bit it returns a vanilla displacement on 64 bit is a rip relative displacement
3420 Address MacroAssembler::as_Address(AddressLiteral adr) {
3421 assert(!adr.is_lval(), "must be rval");
3422 assert(reachable(adr), "must be");
3423 return Address((int)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
3424 }
3426 Address MacroAssembler::as_Address(ArrayAddress adr) {
3427 #ifdef _LP64
3428 AddressLiteral base = adr.base();
3429 lea(rscratch1, base);
3430 Address index = adr.index();
3431 assert(index._disp == 0, "must not have disp"); // maybe it can?
3432 Address array(rscratch1, index._index, index._scale, index._disp);
3433 return array;
3434 #else
3435 return Address::make_array(adr);
3436 #endif // _LP64
3438 }
3440 void MacroAssembler::fat_nop() {
3441 // A 5 byte nop that is safe for patching (see patch_verified_entry)
3442 // Recommened sequence from 'Software Optimization Guide for the AMD
3443 // Hammer Processor'
3444 emit_byte(0x66);
3445 emit_byte(0x66);
3446 emit_byte(0x90);
3447 emit_byte(0x66);
3448 emit_byte(0x90);
3449 }
3451 static Assembler::Condition reverse[] = {
3452 Assembler::noOverflow /* overflow = 0x0 */ ,
3453 Assembler::overflow /* noOverflow = 0x1 */ ,
3454 Assembler::aboveEqual /* carrySet = 0x2, below = 0x2 */ ,
3455 Assembler::below /* aboveEqual = 0x3, carryClear = 0x3 */ ,
3456 Assembler::notZero /* zero = 0x4, equal = 0x4 */ ,
3457 Assembler::zero /* notZero = 0x5, notEqual = 0x5 */ ,
3458 Assembler::above /* belowEqual = 0x6 */ ,
3459 Assembler::belowEqual /* above = 0x7 */ ,
3460 Assembler::positive /* negative = 0x8 */ ,
3461 Assembler::negative /* positive = 0x9 */ ,
3462 Assembler::noParity /* parity = 0xa */ ,
3463 Assembler::parity /* noParity = 0xb */ ,
3464 Assembler::greaterEqual /* less = 0xc */ ,
3465 Assembler::less /* greaterEqual = 0xd */ ,
3466 Assembler::greater /* lessEqual = 0xe */ ,
3467 Assembler::lessEqual /* greater = 0xf, */
3469 };
3471 // 32bit can do a case table jump in one instruction but we no longer allow the base
3472 // to be installed in the Address class
3473 void MacroAssembler::jump(ArrayAddress entry) {
3474 #ifdef _LP64
3475 lea(rscratch1, entry.base());
3476 Address dispatch = entry.index();
3477 assert(dispatch._base == noreg, "must be");
3478 dispatch._base = rscratch1;
3479 jmp(dispatch);
3480 #else
3481 jmp(as_Address(entry));
3482 #endif // _LP64
3483 }
3485 void MacroAssembler::jump(AddressLiteral dst) {
3486 if (reachable(dst)) {
3487 jmp_literal(dst.target(), dst.rspec());
3488 } else {
3489 lea(rscratch1, dst);
3490 jmp(rscratch1);
3491 }
3492 }
3494 void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
3495 if (reachable(dst)) {
3496 InstructionMark im(this);
3497 relocate(dst.reloc());
3498 const int short_size = 2;
3499 const int long_size = 6;
3500 int offs = (intptr_t)dst.target() - ((intptr_t)_code_pos);
3501 if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
3502 // 0111 tttn #8-bit disp
3503 emit_byte(0x70 | cc);
3504 emit_byte((offs - short_size) & 0xFF);
3505 } else {
3506 // 0000 1111 1000 tttn #32-bit disp
3507 emit_byte(0x0F);
3508 emit_byte(0x80 | cc);
3509 emit_long(offs - long_size);
3510 }
3511 } else {
3512 #ifdef ASSERT
3513 warning("reversing conditional branch");
3514 #endif /* ASSERT */
3515 Label skip;
3516 jccb(reverse[cc], skip);
3517 lea(rscratch1, dst);
3518 Assembler::jmp(rscratch1);
3519 bind(skip);
3520 }
3521 }
3523 // Wouldn't need if AddressLiteral version had new name
3524 void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
3525 Assembler::call(L, rtype);
3526 }
3528 // Wouldn't need if AddressLiteral version had new name
3529 void MacroAssembler::call(Register entry) {
3530 Assembler::call(entry);
3531 }
3533 void MacroAssembler::call(AddressLiteral entry) {
3534 if (reachable(entry)) {
3535 Assembler::call_literal(entry.target(), entry.rspec());
3536 } else {
3537 lea(rscratch1, entry);
3538 Assembler::call(rscratch1);
3539 }
3540 }
3542 void MacroAssembler::cmp8(AddressLiteral src1, int8_t src2) {
3543 if (reachable(src1)) {
3544 cmpb(as_Address(src1), src2);
3545 } else {
3546 lea(rscratch1, src1);
3547 cmpb(Address(rscratch1, 0), src2);
3548 }
3549 }
3551 void MacroAssembler::cmp32(AddressLiteral src1, int32_t src2) {
3552 if (reachable(src1)) {
3553 cmpl(as_Address(src1), src2);
3554 } else {
3555 lea(rscratch1, src1);
3556 cmpl(Address(rscratch1, 0), src2);
3557 }
3558 }
3560 void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
3561 if (reachable(src2)) {
3562 cmpl(src1, as_Address(src2));
3563 } else {
3564 lea(rscratch1, src2);
3565 cmpl(src1, Address(rscratch1, 0));
3566 }
3567 }
3569 void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
3570 #ifdef _LP64
3571 if (src2.is_lval()) {
3572 movptr(rscratch1, src2);
3573 Assembler::cmpq(src1, rscratch1);
3574 } else if (reachable(src2)) {
3575 cmpq(src1, as_Address(src2));
3576 } else {
3577 lea(rscratch1, src2);
3578 Assembler::cmpq(src1, Address(rscratch1, 0));
3579 }
3580 #else
3581 if (src2.is_lval()) {
3582 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
3583 } else {
3584 cmpl(src1, as_Address(src2));
3585 }
3586 #endif // _LP64
3587 }
3589 void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
3590 assert(src2.is_lval(), "not a mem-mem compare");
3591 #ifdef _LP64
3592 // moves src2's literal address
3593 movptr(rscratch1, src2);
3594 Assembler::cmpq(src1, rscratch1);
3595 #else
3596 cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
3597 #endif // _LP64
3598 }
3600 void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
3601 assert(!src2.is_lval(), "should use cmpptr");
3603 if (reachable(src2)) {
3604 #ifdef _LP64
3605 cmpq(src1, as_Address(src2));
3606 #else
3607 ShouldNotReachHere();
3608 #endif // _LP64
3609 } else {
3610 lea(rscratch1, src2);
3611 Assembler::cmpq(src1, Address(rscratch1, 0));
3612 }
3613 }
3615 void MacroAssembler::cmpxchgptr(Register reg, AddressLiteral adr) {
3616 if (reachable(adr)) {
3617 #ifdef _LP64
3618 cmpxchgq(reg, as_Address(adr));
3619 #else
3620 cmpxchgl(reg, as_Address(adr));
3621 #endif // _LP64
3622 } else {
3623 lea(rscratch1, adr);
3624 cmpxchgq(reg, Address(rscratch1, 0));
3625 }
3626 }
3628 void MacroAssembler::incrementl(AddressLiteral dst) {
3629 if (reachable(dst)) {
3630 incrementl(as_Address(dst));
3631 } else {
3632 lea(rscratch1, dst);
3633 incrementl(Address(rscratch1, 0));
3634 }
3635 }
3637 void MacroAssembler::incrementl(ArrayAddress dst) {
3638 incrementl(as_Address(dst));
3639 }
3641 void MacroAssembler::lea(Register dst, Address src) {
3642 #ifdef _LP64
3643 leaq(dst, src);
3644 #else
3645 leal(dst, src);
3646 #endif // _LP64
3647 }
3649 void MacroAssembler::lea(Register dst, AddressLiteral src) {
3650 #ifdef _LP64
3651 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
3652 #else
3653 mov_literal32(dst, (intptr_t)src.target(), src.rspec());
3654 #endif // _LP64
3655 }
3657 void MacroAssembler::mov32(AddressLiteral dst, Register src) {
3658 if (reachable(dst)) {
3659 movl(as_Address(dst), src);
3660 } else {
3661 lea(rscratch1, dst);
3662 movl(Address(rscratch1, 0), src);
3663 }
3664 }
3666 void MacroAssembler::mov32(Register dst, AddressLiteral src) {
3667 if (reachable(src)) {
3668 movl(dst, as_Address(src));
3669 } else {
3670 lea(rscratch1, src);
3671 movl(dst, Address(rscratch1, 0));
3672 }
3673 }
3675 void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
3676 if (reachable(src)) {
3677 if (UseXmmLoadAndClearUpper) {
3678 movsd (dst, as_Address(src));
3679 } else {
3680 movlpd(dst, as_Address(src));
3681 }
3682 } else {
3683 lea(rscratch1, src);
3684 if (UseXmmLoadAndClearUpper) {
3685 movsd (dst, Address(rscratch1, 0));
3686 } else {
3687 movlpd(dst, Address(rscratch1, 0));
3688 }
3689 }
3690 }
3692 void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
3693 if (reachable(src)) {
3694 movss(dst, as_Address(src));
3695 } else {
3696 lea(rscratch1, src);
3697 movss(dst, Address(rscratch1, 0));
3698 }
3699 }
3701 void MacroAssembler::movoop(Register dst, jobject obj) {
3702 mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
3703 }
3705 void MacroAssembler::movoop(Address dst, jobject obj) {
3706 mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
3707 movq(dst, rscratch1);
3708 }
3710 void MacroAssembler::movptr(Register dst, AddressLiteral src) {
3711 #ifdef _LP64
3712 if (src.is_lval()) {
3713 mov_literal64(dst, (intptr_t)src.target(), src.rspec());
3714 } else {
3715 if (reachable(src)) {
3716 movq(dst, as_Address(src));
3717 } else {
3718 lea(rscratch1, src);
3719 movq(dst, Address(rscratch1,0));
3720 }
3721 }
3722 #else
3723 if (src.is_lval()) {
3724 mov_literal32(dst, (intptr_t)src.target(), src.rspec());
3725 } else {
3726 movl(dst, as_Address(src));
3727 }
3728 #endif // LP64
3729 }
3731 void MacroAssembler::movptr(ArrayAddress dst, Register src) {
3732 #ifdef _LP64
3733 movq(as_Address(dst), src);
3734 #else
3735 movl(as_Address(dst), src);
3736 #endif // _LP64
3737 }
3739 void MacroAssembler::pushoop(jobject obj) {
3740 #ifdef _LP64
3741 movoop(rscratch1, obj);
3742 pushq(rscratch1);
3743 #else
3744 push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
3745 #endif // _LP64
3746 }
3748 void MacroAssembler::pushptr(AddressLiteral src) {
3749 #ifdef _LP64
3750 lea(rscratch1, src);
3751 if (src.is_lval()) {
3752 pushq(rscratch1);
3753 } else {
3754 pushq(Address(rscratch1, 0));
3755 }
3756 #else
3757 if (src.is_lval()) {
3758 push_literal((int32_t)src.target(), src.rspec());
3759 else {
3760 pushl(as_Address(src));
3761 }
3762 #endif // _LP64
3763 }
3765 void MacroAssembler::ldmxcsr(AddressLiteral src) {
3766 if (reachable(src)) {
3767 Assembler::ldmxcsr(as_Address(src));
3768 } else {
3769 lea(rscratch1, src);
3770 Assembler::ldmxcsr(Address(rscratch1, 0));
3771 }
3772 }
3774 void MacroAssembler::movlpd(XMMRegister dst, AddressLiteral src) {
3775 if (reachable(src)) {
3776 movlpd(dst, as_Address(src));
3777 } else {
3778 lea(rscratch1, src);
3779 movlpd(dst, Address(rscratch1, 0));
3780 }
3781 }
3783 void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
3784 if (reachable(src)) {
3785 movss(dst, as_Address(src));
3786 } else {
3787 lea(rscratch1, src);
3788 movss(dst, Address(rscratch1, 0));
3789 }
3790 }
3791 void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
3792 if (reachable(src)) {
3793 xorpd(dst, as_Address(src));
3794 } else {
3795 lea(rscratch1, src);
3796 xorpd(dst, Address(rscratch1, 0));
3797 }
3798 }
3800 void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
3801 if (reachable(src)) {
3802 xorps(dst, as_Address(src));
3803 } else {
3804 lea(rscratch1, src);
3805 xorps(dst, Address(rscratch1, 0));
3806 }
3807 }
3809 void MacroAssembler::null_check(Register reg, int offset) {
3810 if (needs_explicit_null_check(offset)) {
3811 // provoke OS NULL exception if reg = NULL by
3812 // accessing M[reg] w/o changing any (non-CC) registers
3813 cmpq(rax, Address(reg, 0));
3814 // Note: should probably use testl(rax, Address(reg, 0));
3815 // may be shorter code (however, this version of
3816 // testl needs to be implemented first)
3817 } else {
3818 // nothing to do, (later) access of M[reg + offset]
3819 // will provoke OS NULL exception if reg = NULL
3820 }
3821 }
3823 int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
3824 int off = offset();
3825 movzbl(dst, src);
3826 return off;
3827 }
3829 int MacroAssembler::load_unsigned_word(Register dst, Address src) {
3830 int off = offset();
3831 movzwl(dst, src);
3832 return off;
3833 }
3835 int MacroAssembler::load_signed_byte(Register dst, Address src) {
3836 int off = offset();
3837 movsbl(dst, src);
3838 return off;
3839 }
3841 int MacroAssembler::load_signed_word(Register dst, Address src) {
3842 int off = offset();
3843 movswl(dst, src);
3844 return off;
3845 }
3847 void MacroAssembler::incrementl(Register reg, int value) {
3848 if (value == min_jint) { addl(reg, value); return; }
3849 if (value < 0) { decrementl(reg, -value); return; }
3850 if (value == 0) { ; return; }
3851 if (value == 1 && UseIncDec) { incl(reg) ; return; }
3852 /* else */ { addl(reg, value) ; return; }
3853 }
3855 void MacroAssembler::decrementl(Register reg, int value) {
3856 if (value == min_jint) { subl(reg, value); return; }
3857 if (value < 0) { incrementl(reg, -value); return; }
3858 if (value == 0) { ; return; }
3859 if (value == 1 && UseIncDec) { decl(reg) ; return; }
3860 /* else */ { subl(reg, value) ; return; }
3861 }
3863 void MacroAssembler::incrementq(Register reg, int value) {
3864 if (value == min_jint) { addq(reg, value); return; }
3865 if (value < 0) { decrementq(reg, -value); return; }
3866 if (value == 0) { ; return; }
3867 if (value == 1 && UseIncDec) { incq(reg) ; return; }
3868 /* else */ { addq(reg, value) ; return; }
3869 }
3871 void MacroAssembler::decrementq(Register reg, int value) {
3872 if (value == min_jint) { subq(reg, value); return; }
3873 if (value < 0) { incrementq(reg, -value); return; }
3874 if (value == 0) { ; return; }
3875 if (value == 1 && UseIncDec) { decq(reg) ; return; }
3876 /* else */ { subq(reg, value) ; return; }
3877 }
3879 void MacroAssembler::incrementl(Address dst, int value) {
3880 if (value == min_jint) { addl(dst, value); return; }
3881 if (value < 0) { decrementl(dst, -value); return; }
3882 if (value == 0) { ; return; }
3883 if (value == 1 && UseIncDec) { incl(dst) ; return; }
3884 /* else */ { addl(dst, value) ; return; }
3885 }
3887 void MacroAssembler::decrementl(Address dst, int value) {
3888 if (value == min_jint) { subl(dst, value); return; }
3889 if (value < 0) { incrementl(dst, -value); return; }
3890 if (value == 0) { ; return; }
3891 if (value == 1 && UseIncDec) { decl(dst) ; return; }
3892 /* else */ { subl(dst, value) ; return; }
3893 }
3895 void MacroAssembler::incrementq(Address dst, int value) {
3896 if (value == min_jint) { addq(dst, value); return; }
3897 if (value < 0) { decrementq(dst, -value); return; }
3898 if (value == 0) { ; return; }
3899 if (value == 1 && UseIncDec) { incq(dst) ; return; }
3900 /* else */ { addq(dst, value) ; return; }
3901 }
3903 void MacroAssembler::decrementq(Address dst, int value) {
3904 if (value == min_jint) { subq(dst, value); return; }
3905 if (value < 0) { incrementq(dst, -value); return; }
3906 if (value == 0) { ; return; }
3907 if (value == 1 && UseIncDec) { decq(dst) ; return; }
3908 /* else */ { subq(dst, value) ; return; }
3909 }
3911 void MacroAssembler::align(int modulus) {
3912 if (offset() % modulus != 0) {
3913 nop(modulus - (offset() % modulus));
3914 }
3915 }
3917 void MacroAssembler::enter() {
3918 pushq(rbp);
3919 movq(rbp, rsp);
3920 }
3922 void MacroAssembler::leave() {
3923 emit_byte(0xC9); // LEAVE
3924 }
3926 // C++ bool manipulation
3928 void MacroAssembler::movbool(Register dst, Address src) {
3929 if(sizeof(bool) == 1)
3930 movb(dst, src);
3931 else if(sizeof(bool) == 2)
3932 movw(dst, src);
3933 else if(sizeof(bool) == 4)
3934 movl(dst, src);
3935 else {
3936 // unsupported
3937 ShouldNotReachHere();
3938 }
3939 }
3941 void MacroAssembler::movbool(Address dst, bool boolconst) {
3942 if(sizeof(bool) == 1)
3943 movb(dst, (int) boolconst);
3944 else if(sizeof(bool) == 2)
3945 movw(dst, (int) boolconst);
3946 else if(sizeof(bool) == 4)
3947 movl(dst, (int) boolconst);
3948 else {
3949 // unsupported
3950 ShouldNotReachHere();
3951 }
3952 }
3954 void MacroAssembler::movbool(Address dst, Register src) {
3955 if(sizeof(bool) == 1)
3956 movb(dst, src);
3957 else if(sizeof(bool) == 2)
3958 movw(dst, src);
3959 else if(sizeof(bool) == 4)
3960 movl(dst, src);
3961 else {
3962 // unsupported
3963 ShouldNotReachHere();
3964 }
3965 }
3967 void MacroAssembler::testbool(Register dst) {
3968 if(sizeof(bool) == 1)
3969 testb(dst, (int) 0xff);
3970 else if(sizeof(bool) == 2) {
3971 // need testw impl
3972 ShouldNotReachHere();
3973 } else if(sizeof(bool) == 4)
3974 testl(dst, dst);
3975 else {
3976 // unsupported
3977 ShouldNotReachHere();
3978 }
3979 }
3981 void MacroAssembler::set_last_Java_frame(Register last_java_sp,
3982 Register last_java_fp,
3983 address last_java_pc) {
3984 // determine last_java_sp register
3985 if (!last_java_sp->is_valid()) {
3986 last_java_sp = rsp;
3987 }
3989 // last_java_fp is optional
3990 if (last_java_fp->is_valid()) {
3991 movq(Address(r15_thread, JavaThread::last_Java_fp_offset()),
3992 last_java_fp);
3993 }
3995 // last_java_pc is optional
3996 if (last_java_pc != NULL) {
3997 Address java_pc(r15_thread,
3998 JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
3999 lea(rscratch1, InternalAddress(last_java_pc));
4000 movq(java_pc, rscratch1);
4001 }
4003 movq(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
4004 }
4006 void MacroAssembler::reset_last_Java_frame(bool clear_fp,
4007 bool clear_pc) {
4008 // we must set sp to zero to clear frame
4009 movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
4010 // must clear fp, so that compiled frames are not confused; it is
4011 // possible that we need it only for debugging
4012 if (clear_fp) {
4013 movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
4014 }
4016 if (clear_pc) {
4017 movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
4018 }
4019 }
4022 // Implementation of call_VM versions
4024 void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
4025 Label L, E;
4027 #ifdef _WIN64
4028 // Windows always allocates space for it's register args
4029 assert(num_args <= 4, "only register arguments supported");
4030 subq(rsp, frame::arg_reg_save_area_bytes);
4031 #endif
4033 // Align stack if necessary
4034 testl(rsp, 15);
4035 jcc(Assembler::zero, L);
4037 subq(rsp, 8);
4038 {
4039 call(RuntimeAddress(entry_point));
4040 }
4041 addq(rsp, 8);
4042 jmp(E);
4044 bind(L);
4045 {
4046 call(RuntimeAddress(entry_point));
4047 }
4049 bind(E);
4051 #ifdef _WIN64
4052 // restore stack pointer
4053 addq(rsp, frame::arg_reg_save_area_bytes);
4054 #endif
4056 }
4059 void MacroAssembler::call_VM_base(Register oop_result,
4060 Register java_thread,
4061 Register last_java_sp,
4062 address entry_point,
4063 int num_args,
4064 bool check_exceptions) {
4065 // determine last_java_sp register
4066 if (!last_java_sp->is_valid()) {
4067 last_java_sp = rsp;
4068 }
4070 // debugging support
4071 assert(num_args >= 0, "cannot have negative number of arguments");
4072 assert(r15_thread != oop_result,
4073 "cannot use the same register for java_thread & oop_result");
4074 assert(r15_thread != last_java_sp,
4075 "cannot use the same register for java_thread & last_java_sp");
4077 // set last Java frame before call
4079 // This sets last_Java_fp which is only needed from interpreted frames
4080 // and should really be done only from the interp_masm version before
4081 // calling the underlying call_VM. That doesn't happen yet so we set
4082 // last_Java_fp here even though some callers don't need it and
4083 // also clear it below.
4084 set_last_Java_frame(last_java_sp, rbp, NULL);
4086 {
4087 Label L, E;
4089 // Align stack if necessary
4090 #ifdef _WIN64
4091 assert(num_args <= 4, "only register arguments supported");
4092 // Windows always allocates space for it's register args
4093 subq(rsp, frame::arg_reg_save_area_bytes);
4094 #endif
4095 testl(rsp, 15);
4096 jcc(Assembler::zero, L);
4098 subq(rsp, 8);
4099 {
4100 call(RuntimeAddress(entry_point));
4101 }
4102 addq(rsp, 8);
4103 jmp(E);
4106 bind(L);
4107 {
4108 call(RuntimeAddress(entry_point));
4109 }
4111 bind(E);
4113 #ifdef _WIN64
4114 // restore stack pointer
4115 addq(rsp, frame::arg_reg_save_area_bytes);
4116 #endif
4117 }
4119 #ifdef ASSERT
4120 pushq(rax);
4121 {
4122 Label L;
4123 get_thread(rax);
4124 cmpq(r15_thread, rax);
4125 jcc(Assembler::equal, L);
4126 stop("MacroAssembler::call_VM_base: register not callee saved?");
4127 bind(L);
4128 }
4129 popq(rax);
4130 #endif
4132 // reset last Java frame
4133 // This really shouldn't have to clear fp set note above at the
4134 // call to set_last_Java_frame
4135 reset_last_Java_frame(true, false);
4137 check_and_handle_popframe(noreg);
4138 check_and_handle_earlyret(noreg);
4140 if (check_exceptions) {
4141 cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL);
4142 // This used to conditionally jump to forward_exception however it is
4143 // possible if we relocate that the branch will not reach. So we must jump
4144 // around so we can always reach
4145 Label ok;
4146 jcc(Assembler::equal, ok);
4147 jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
4148 bind(ok);
4149 }
4151 // get oop result if there is one and reset the value in the thread
4152 if (oop_result->is_valid()) {
4153 movq(oop_result, Address(r15_thread, JavaThread::vm_result_offset()));
4154 movptr(Address(r15_thread, JavaThread::vm_result_offset()), NULL_WORD);
4155 verify_oop(oop_result, "broken oop in call_VM_base");
4156 }
4157 }
4159 void MacroAssembler::check_and_handle_popframe(Register java_thread) {}
4160 void MacroAssembler::check_and_handle_earlyret(Register java_thread) {}
4162 void MacroAssembler::call_VM_helper(Register oop_result,
4163 address entry_point,
4164 int num_args,
4165 bool check_exceptions) {
4166 // Java thread becomes first argument of C function
4167 movq(c_rarg0, r15_thread);
4169 // We've pushed one address, correct last_Java_sp
4170 leaq(rax, Address(rsp, wordSize));
4172 call_VM_base(oop_result, noreg, rax, entry_point, num_args,
4173 check_exceptions);
4174 }
4177 void MacroAssembler::call_VM(Register oop_result,
4178 address entry_point,
4179 bool check_exceptions) {
4180 Label C, E;
4181 Assembler::call(C, relocInfo::none);
4182 jmp(E);
4184 bind(C);
4185 call_VM_helper(oop_result, entry_point, 0, check_exceptions);
4186 ret(0);
4188 bind(E);
4189 }
4192 void MacroAssembler::call_VM(Register oop_result,
4193 address entry_point,
4194 Register arg_1,
4195 bool check_exceptions) {
4196 assert(rax != arg_1, "smashed argument");
4197 assert(c_rarg0 != arg_1, "smashed argument");
4199 Label C, E;
4200 Assembler::call(C, relocInfo::none);
4201 jmp(E);
4203 bind(C);
4204 // c_rarg0 is reserved for thread
4205 if (c_rarg1 != arg_1) {
4206 movq(c_rarg1, arg_1);
4207 }
4208 call_VM_helper(oop_result, entry_point, 1, check_exceptions);
4209 ret(0);
4211 bind(E);
4212 }
4214 void MacroAssembler::call_VM(Register oop_result,
4215 address entry_point,
4216 Register arg_1,
4217 Register arg_2,
4218 bool check_exceptions) {
4219 assert(rax != arg_1, "smashed argument");
4220 assert(rax != arg_2, "smashed argument");
4221 assert(c_rarg0 != arg_1, "smashed argument");
4222 assert(c_rarg0 != arg_2, "smashed argument");
4223 assert(c_rarg1 != arg_2, "smashed argument");
4224 assert(c_rarg2 != arg_1, "smashed argument");
4226 Label C, E;
4227 Assembler::call(C, relocInfo::none);
4228 jmp(E);
4230 bind(C);
4231 // c_rarg0 is reserved for thread
4232 if (c_rarg1 != arg_1) {
4233 movq(c_rarg1, arg_1);
4234 }
4235 if (c_rarg2 != arg_2) {
4236 movq(c_rarg2, arg_2);
4237 }
4238 call_VM_helper(oop_result, entry_point, 2, check_exceptions);
4239 ret(0);
4241 bind(E);
4242 }
4245 void MacroAssembler::call_VM(Register oop_result,
4246 address entry_point,
4247 Register arg_1,
4248 Register arg_2,
4249 Register arg_3,
4250 bool check_exceptions) {
4251 assert(rax != arg_1, "smashed argument");
4252 assert(rax != arg_2, "smashed argument");
4253 assert(rax != arg_3, "smashed argument");
4254 assert(c_rarg0 != arg_1, "smashed argument");
4255 assert(c_rarg0 != arg_2, "smashed argument");
4256 assert(c_rarg0 != arg_3, "smashed argument");
4257 assert(c_rarg1 != arg_2, "smashed argument");
4258 assert(c_rarg1 != arg_3, "smashed argument");
4259 assert(c_rarg2 != arg_1, "smashed argument");
4260 assert(c_rarg2 != arg_3, "smashed argument");
4261 assert(c_rarg3 != arg_1, "smashed argument");
4262 assert(c_rarg3 != arg_2, "smashed argument");
4264 Label C, E;
4265 Assembler::call(C, relocInfo::none);
4266 jmp(E);
4268 bind(C);
4269 // c_rarg0 is reserved for thread
4270 if (c_rarg1 != arg_1) {
4271 movq(c_rarg1, arg_1);
4272 }
4273 if (c_rarg2 != arg_2) {
4274 movq(c_rarg2, arg_2);
4275 }
4276 if (c_rarg3 != arg_3) {
4277 movq(c_rarg3, arg_3);
4278 }
4279 call_VM_helper(oop_result, entry_point, 3, check_exceptions);
4280 ret(0);
4282 bind(E);
4283 }
4285 void MacroAssembler::call_VM(Register oop_result,
4286 Register last_java_sp,
4287 address entry_point,
4288 int num_args,
4289 bool check_exceptions) {
4290 call_VM_base(oop_result, noreg, last_java_sp, entry_point, num_args,
4291 check_exceptions);
4292 }
4294 void MacroAssembler::call_VM(Register oop_result,
4295 Register last_java_sp,
4296 address entry_point,
4297 Register arg_1,
4298 bool check_exceptions) {
4299 assert(c_rarg0 != arg_1, "smashed argument");
4300 assert(c_rarg1 != last_java_sp, "smashed argument");
4301 // c_rarg0 is reserved for thread
4302 if (c_rarg1 != arg_1) {
4303 movq(c_rarg1, arg_1);
4304 }
4305 call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
4306 }
4308 void MacroAssembler::call_VM(Register oop_result,
4309 Register last_java_sp,
4310 address entry_point,
4311 Register arg_1,
4312 Register arg_2,
4313 bool check_exceptions) {
4314 assert(c_rarg0 != arg_1, "smashed argument");
4315 assert(c_rarg0 != arg_2, "smashed argument");
4316 assert(c_rarg1 != arg_2, "smashed argument");
4317 assert(c_rarg1 != last_java_sp, "smashed argument");
4318 assert(c_rarg2 != arg_1, "smashed argument");
4319 assert(c_rarg2 != last_java_sp, "smashed argument");
4320 // c_rarg0 is reserved for thread
4321 if (c_rarg1 != arg_1) {
4322 movq(c_rarg1, arg_1);
4323 }
4324 if (c_rarg2 != arg_2) {
4325 movq(c_rarg2, arg_2);
4326 }
4327 call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
4328 }
4331 void MacroAssembler::call_VM(Register oop_result,
4332 Register last_java_sp,
4333 address entry_point,
4334 Register arg_1,
4335 Register arg_2,
4336 Register arg_3,
4337 bool check_exceptions) {
4338 assert(c_rarg0 != arg_1, "smashed argument");
4339 assert(c_rarg0 != arg_2, "smashed argument");
4340 assert(c_rarg0 != arg_3, "smashed argument");
4341 assert(c_rarg1 != arg_2, "smashed argument");
4342 assert(c_rarg1 != arg_3, "smashed argument");
4343 assert(c_rarg1 != last_java_sp, "smashed argument");
4344 assert(c_rarg2 != arg_1, "smashed argument");
4345 assert(c_rarg2 != arg_3, "smashed argument");
4346 assert(c_rarg2 != last_java_sp, "smashed argument");
4347 assert(c_rarg3 != arg_1, "smashed argument");
4348 assert(c_rarg3 != arg_2, "smashed argument");
4349 assert(c_rarg3 != last_java_sp, "smashed argument");
4350 // c_rarg0 is reserved for thread
4351 if (c_rarg1 != arg_1) {
4352 movq(c_rarg1, arg_1);
4353 }
4354 if (c_rarg2 != arg_2) {
4355 movq(c_rarg2, arg_2);
4356 }
4357 if (c_rarg3 != arg_3) {
4358 movq(c_rarg2, arg_3);
4359 }
4360 call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
4361 }
4363 void MacroAssembler::call_VM_leaf(address entry_point, int num_args) {
4364 call_VM_leaf_base(entry_point, num_args);
4365 }
4367 void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
4368 if (c_rarg0 != arg_1) {
4369 movq(c_rarg0, arg_1);
4370 }
4371 call_VM_leaf(entry_point, 1);
4372 }
4374 void MacroAssembler::call_VM_leaf(address entry_point,
4375 Register arg_1,
4376 Register arg_2) {
4377 assert(c_rarg0 != arg_2, "smashed argument");
4378 assert(c_rarg1 != arg_1, "smashed argument");
4379 if (c_rarg0 != arg_1) {
4380 movq(c_rarg0, arg_1);
4381 }
4382 if (c_rarg1 != arg_2) {
4383 movq(c_rarg1, arg_2);
4384 }
4385 call_VM_leaf(entry_point, 2);
4386 }
4388 void MacroAssembler::call_VM_leaf(address entry_point,
4389 Register arg_1,
4390 Register arg_2,
4391 Register arg_3) {
4392 assert(c_rarg0 != arg_2, "smashed argument");
4393 assert(c_rarg0 != arg_3, "smashed argument");
4394 assert(c_rarg1 != arg_1, "smashed argument");
4395 assert(c_rarg1 != arg_3, "smashed argument");
4396 assert(c_rarg2 != arg_1, "smashed argument");
4397 assert(c_rarg2 != arg_2, "smashed argument");
4398 if (c_rarg0 != arg_1) {
4399 movq(c_rarg0, arg_1);
4400 }
4401 if (c_rarg1 != arg_2) {
4402 movq(c_rarg1, arg_2);
4403 }
4404 if (c_rarg2 != arg_3) {
4405 movq(c_rarg2, arg_3);
4406 }
4407 call_VM_leaf(entry_point, 3);
4408 }
4411 // Calls to C land
4412 //
4413 // When entering C land, the rbp & rsp of the last Java frame have to
4414 // be recorded in the (thread-local) JavaThread object. When leaving C
4415 // land, the last Java fp has to be reset to 0. This is required to
4416 // allow proper stack traversal.
4417 void MacroAssembler::store_check(Register obj) {
4418 // Does a store check for the oop in register obj. The content of
4419 // register obj is destroyed afterwards.
4420 store_check_part_1(obj);
4421 store_check_part_2(obj);
4422 }
4424 void MacroAssembler::store_check(Register obj, Address dst) {
4425 store_check(obj);
4426 }
4428 // split the store check operation so that other instructions can be
4429 // scheduled inbetween
4430 void MacroAssembler::store_check_part_1(Register obj) {
4431 BarrierSet* bs = Universe::heap()->barrier_set();
4432 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
4433 shrq(obj, CardTableModRefBS::card_shift);
4434 }
4436 void MacroAssembler::store_check_part_2(Register obj) {
4437 BarrierSet* bs = Universe::heap()->barrier_set();
4438 assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
4439 CardTableModRefBS* ct = (CardTableModRefBS*)bs;
4440 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
4442 // The calculation for byte_map_base is as follows:
4443 // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
4444 // So this essentially converts an address to a displacement and
4445 // it will never need to be relocated. On 64bit however the value may be too
4446 // large for a 32bit displacement
4448 intptr_t disp = (intptr_t) ct->byte_map_base;
4449 if (is_simm32(disp)) {
4450 Address cardtable(noreg, obj, Address::times_1, disp);
4451 movb(cardtable, 0);
4452 } else {
4453 // By doing it as an ExternalAddress disp could be converted to a rip-relative
4454 // displacement and done in a single instruction given favorable mapping and
4455 // a smarter version of as_Address. Worst case it is two instructions which
4456 // is no worse off then loading disp into a register and doing as a simple
4457 // Address() as above.
4458 // We can't do as ExternalAddress as the only style since if disp == 0 we'll
4459 // assert since NULL isn't acceptable in a reloci (see 6644928). In any case
4460 // in some cases we'll get a single instruction version.
4462 ExternalAddress cardtable((address)disp);
4463 Address index(noreg, obj, Address::times_1);
4464 movb(as_Address(ArrayAddress(cardtable, index)), 0);
4465 }
4467 }
4469 void MacroAssembler::c2bool(Register x) {
4470 // implements x == 0 ? 0 : 1
4471 // note: must only look at least-significant byte of x
4472 // since C-style booleans are stored in one byte
4473 // only! (was bug)
4474 andl(x, 0xFF);
4475 setb(Assembler::notZero, x);
4476 }
4478 int MacroAssembler::corrected_idivl(Register reg) {
4479 // Full implementation of Java idiv and irem; checks for special
4480 // case as described in JVM spec., p.243 & p.271. The function
4481 // returns the (pc) offset of the idivl instruction - may be needed
4482 // for implicit exceptions.
4483 //
4484 // normal case special case
4485 //
4486 // input : eax: dividend min_int
4487 // reg: divisor (may not be eax/edx) -1
4488 //
4489 // output: eax: quotient (= eax idiv reg) min_int
4490 // edx: remainder (= eax irem reg) 0
4491 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
4492 const int min_int = 0x80000000;
4493 Label normal_case, special_case;
4495 // check for special case
4496 cmpl(rax, min_int);
4497 jcc(Assembler::notEqual, normal_case);
4498 xorl(rdx, rdx); // prepare edx for possible special case (where
4499 // remainder = 0)
4500 cmpl(reg, -1);
4501 jcc(Assembler::equal, special_case);
4503 // handle normal case
4504 bind(normal_case);
4505 cdql();
4506 int idivl_offset = offset();
4507 idivl(reg);
4509 // normal and special case exit
4510 bind(special_case);
4512 return idivl_offset;
4513 }
4515 int MacroAssembler::corrected_idivq(Register reg) {
4516 // Full implementation of Java ldiv and lrem; checks for special
4517 // case as described in JVM spec., p.243 & p.271. The function
4518 // returns the (pc) offset of the idivl instruction - may be needed
4519 // for implicit exceptions.
4520 //
4521 // normal case special case
4522 //
4523 // input : rax: dividend min_long
4524 // reg: divisor (may not be eax/edx) -1
4525 //
4526 // output: rax: quotient (= rax idiv reg) min_long
4527 // rdx: remainder (= rax irem reg) 0
4528 assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
4529 static const int64_t min_long = 0x8000000000000000;
4530 Label normal_case, special_case;
4532 // check for special case
4533 cmp64(rax, ExternalAddress((address) &min_long));
4534 jcc(Assembler::notEqual, normal_case);
4535 xorl(rdx, rdx); // prepare rdx for possible special case (where
4536 // remainder = 0)
4537 cmpq(reg, -1);
4538 jcc(Assembler::equal, special_case);
4540 // handle normal case
4541 bind(normal_case);
4542 cdqq();
4543 int idivq_offset = offset();
4544 idivq(reg);
4546 // normal and special case exit
4547 bind(special_case);
4549 return idivq_offset;
4550 }
4552 void MacroAssembler::push_IU_state() {
4553 pushfq(); // Push flags first because pushaq kills them
4554 subq(rsp, 8); // Make sure rsp stays 16-byte aligned
4555 pushaq();
4556 }
4558 void MacroAssembler::pop_IU_state() {
4559 popaq();
4560 addq(rsp, 8);
4561 popfq();
4562 }
4564 void MacroAssembler::push_FPU_state() {
4565 subq(rsp, FPUStateSizeInWords * wordSize);
4566 fxsave(Address(rsp, 0));
4567 }
4569 void MacroAssembler::pop_FPU_state() {
4570 fxrstor(Address(rsp, 0));
4571 addq(rsp, FPUStateSizeInWords * wordSize);
4572 }
4574 // Save Integer and Float state
4575 // Warning: Stack must be 16 byte aligned
4576 void MacroAssembler::push_CPU_state() {
4577 push_IU_state();
4578 push_FPU_state();
4579 }
4581 void MacroAssembler::pop_CPU_state() {
4582 pop_FPU_state();
4583 pop_IU_state();
4584 }
4586 void MacroAssembler::sign_extend_short(Register reg) {
4587 movswl(reg, reg);
4588 }
4590 void MacroAssembler::sign_extend_byte(Register reg) {
4591 movsbl(reg, reg);
4592 }
4594 void MacroAssembler::division_with_shift(Register reg, int shift_value) {
4595 assert (shift_value > 0, "illegal shift value");
4596 Label _is_positive;
4597 testl (reg, reg);
4598 jcc (Assembler::positive, _is_positive);
4599 int offset = (1 << shift_value) - 1 ;
4601 if (offset == 1) {
4602 incrementl(reg);
4603 } else {
4604 addl(reg, offset);
4605 }
4607 bind (_is_positive);
4608 sarl(reg, shift_value);
4609 }
4611 void MacroAssembler::round_to_l(Register reg, int modulus) {
4612 addl(reg, modulus - 1);
4613 andl(reg, -modulus);
4614 }
4616 void MacroAssembler::round_to_q(Register reg, int modulus) {
4617 addq(reg, modulus - 1);
4618 andq(reg, -modulus);
4619 }
4621 void MacroAssembler::verify_oop(Register reg, const char* s) {
4622 if (!VerifyOops) {
4623 return;
4624 }
4626 // Pass register number to verify_oop_subroutine
4627 char* b = new char[strlen(s) + 50];
4628 sprintf(b, "verify_oop: %s: %s", reg->name(), s);
4630 pushq(rax); // save rax, restored by receiver
4632 // pass args on stack, only touch rax
4633 pushq(reg);
4634 // avoid using pushptr, as it modifies scratch registers
4635 // and our contract is not to modify anything
4636 ExternalAddress buffer((address)b);
4637 movptr(rax, buffer.addr());
4638 pushq(rax);
4640 // call indirectly to solve generation ordering problem
4641 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
4642 call(rax); // no alignment requirement
4643 // everything popped by receiver
4644 }
4646 void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
4647 if (!VerifyOops) return;
4648 // Pass register number to verify_oop_subroutine
4649 char* b = new char[strlen(s) + 50];
4650 sprintf(b, "verify_oop_addr: %s", s);
4651 pushq(rax); // save rax
4652 movq(addr, rax);
4653 pushq(rax); // pass register argument
4656 // avoid using pushptr, as it modifies scratch registers
4657 // and our contract is not to modify anything
4658 ExternalAddress buffer((address)b);
4659 movptr(rax, buffer.addr());
4660 pushq(rax);
4662 // call indirectly to solve generation ordering problem
4663 movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
4664 call(rax); // no alignment requirement
4665 // everything popped by receiver
4666 }
4669 void MacroAssembler::stop(const char* msg) {
4670 address rip = pc();
4671 pushaq(); // get regs on stack
4672 lea(c_rarg0, ExternalAddress((address) msg));
4673 lea(c_rarg1, InternalAddress(rip));
4674 movq(c_rarg2, rsp); // pass pointer to regs array
4675 andq(rsp, -16); // align stack as required by ABI
4676 call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug)));
4677 hlt();
4678 }
4680 void MacroAssembler::warn(const char* msg) {
4681 pushq(r12);
4682 movq(r12, rsp);
4683 andq(rsp, -16); // align stack as required by push_CPU_state and call
4685 push_CPU_state(); // keeps alignment at 16 bytes
4686 lea(c_rarg0, ExternalAddress((address) msg));
4687 call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
4688 pop_CPU_state();
4690 movq(rsp, r12);
4691 popq(r12);
4692 }
4694 #ifndef PRODUCT
4695 extern "C" void findpc(intptr_t x);
4696 #endif
4698 void MacroAssembler::debug(char* msg, int64_t pc, int64_t regs[]) {
4699 // In order to get locks to work, we need to fake a in_VM state
4700 if (ShowMessageBoxOnError ) {
4701 JavaThread* thread = JavaThread::current();
4702 JavaThreadState saved_state = thread->thread_state();
4703 thread->set_thread_state(_thread_in_vm);
4704 #ifndef PRODUCT
4705 if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
4706 ttyLocker ttyl;
4707 BytecodeCounter::print();
4708 }
4709 #endif
4710 // To see where a verify_oop failed, get $ebx+40/X for this frame.
4711 // XXX correct this offset for amd64
4712 // This is the value of eip which points to where verify_oop will return.
4713 if (os::message_box(msg, "Execution stopped, print registers?")) {
4714 ttyLocker ttyl;
4715 tty->print_cr("rip = 0x%016lx", pc);
4716 #ifndef PRODUCT
4717 tty->cr();
4718 findpc(pc);
4719 tty->cr();
4720 #endif
4721 tty->print_cr("rax = 0x%016lx", regs[15]);
4722 tty->print_cr("rbx = 0x%016lx", regs[12]);
4723 tty->print_cr("rcx = 0x%016lx", regs[14]);
4724 tty->print_cr("rdx = 0x%016lx", regs[13]);
4725 tty->print_cr("rdi = 0x%016lx", regs[8]);
4726 tty->print_cr("rsi = 0x%016lx", regs[9]);
4727 tty->print_cr("rbp = 0x%016lx", regs[10]);
4728 tty->print_cr("rsp = 0x%016lx", regs[11]);
4729 tty->print_cr("r8 = 0x%016lx", regs[7]);
4730 tty->print_cr("r9 = 0x%016lx", regs[6]);
4731 tty->print_cr("r10 = 0x%016lx", regs[5]);
4732 tty->print_cr("r11 = 0x%016lx", regs[4]);
4733 tty->print_cr("r12 = 0x%016lx", regs[3]);
4734 tty->print_cr("r13 = 0x%016lx", regs[2]);
4735 tty->print_cr("r14 = 0x%016lx", regs[1]);
4736 tty->print_cr("r15 = 0x%016lx", regs[0]);
4737 BREAKPOINT;
4738 }
4739 ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
4740 } else {
4741 ttyLocker ttyl;
4742 ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
4743 msg);
4744 }
4745 }
4747 void MacroAssembler::os_breakpoint() {
4748 // instead of directly emitting a breakpoint, call os:breakpoint for
4749 // better debugability
4750 // This shouldn't need alignment, it's an empty function
4751 call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
4752 }
4754 // Write serialization page so VM thread can do a pseudo remote membar.
4755 // We use the current thread pointer to calculate a thread specific
4756 // offset to write to within the page. This minimizes bus traffic
4757 // due to cache line collision.
4758 void MacroAssembler::serialize_memory(Register thread,
4759 Register tmp) {
4761 movl(tmp, thread);
4762 shrl(tmp, os::get_serialize_page_shift_count());
4763 andl(tmp, (os::vm_page_size() - sizeof(int)));
4765 Address index(noreg, tmp, Address::times_1);
4766 ExternalAddress page(os::get_memory_serialize_page());
4768 movptr(ArrayAddress(page, index), tmp);
4769 }
4771 void MacroAssembler::verify_tlab() {
4772 #ifdef ASSERT
4773 if (UseTLAB) {
4774 Label next, ok;
4775 Register t1 = rsi;
4777 pushq(t1);
4779 movq(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
4780 cmpq(t1, Address(r15_thread, in_bytes(JavaThread::tlab_start_offset())));
4781 jcc(Assembler::aboveEqual, next);
4782 stop("assert(top >= start)");
4783 should_not_reach_here();
4785 bind(next);
4786 movq(t1, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
4787 cmpq(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
4788 jcc(Assembler::aboveEqual, ok);
4789 stop("assert(top <= end)");
4790 should_not_reach_here();
4792 bind(ok);
4794 popq(t1);
4795 }
4796 #endif
4797 }
4799 // Defines obj, preserves var_size_in_bytes
4800 void MacroAssembler::eden_allocate(Register obj,
4801 Register var_size_in_bytes,
4802 int con_size_in_bytes,
4803 Register t1,
4804 Label& slow_case) {
4805 assert(obj == rax, "obj must be in rax for cmpxchg");
4806 assert_different_registers(obj, var_size_in_bytes, t1);
4807 Register end = t1;
4808 Label retry;
4809 bind(retry);
4810 ExternalAddress heap_top((address) Universe::heap()->top_addr());
4811 movptr(obj, heap_top);
4812 if (var_size_in_bytes == noreg) {
4813 leaq(end, Address(obj, con_size_in_bytes));
4814 } else {
4815 leaq(end, Address(obj, var_size_in_bytes, Address::times_1));
4816 }
4817 // if end < obj then we wrapped around => object too long => slow case
4818 cmpq(end, obj);
4819 jcc(Assembler::below, slow_case);
4820 cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
4822 jcc(Assembler::above, slow_case);
4823 // Compare obj with the top addr, and if still equal, store the new
4824 // top addr in end at the address of the top addr pointer. Sets ZF
4825 // if was equal, and clears it otherwise. Use lock prefix for
4826 // atomicity on MPs.
4827 if (os::is_MP()) {
4828 lock();
4829 }
4830 cmpxchgptr(end, heap_top);
4831 // if someone beat us on the allocation, try again, otherwise continue
4832 jcc(Assembler::notEqual, retry);
4833 }
4835 // Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
4836 void MacroAssembler::tlab_allocate(Register obj,
4837 Register var_size_in_bytes,
4838 int con_size_in_bytes,
4839 Register t1,
4840 Register t2,
4841 Label& slow_case) {
4842 assert_different_registers(obj, t1, t2);
4843 assert_different_registers(obj, var_size_in_bytes, t1);
4844 Register end = t2;
4846 verify_tlab();
4848 movq(obj, Address(r15_thread, JavaThread::tlab_top_offset()));
4849 if (var_size_in_bytes == noreg) {
4850 leaq(end, Address(obj, con_size_in_bytes));
4851 } else {
4852 leaq(end, Address(obj, var_size_in_bytes, Address::times_1));
4853 }
4854 cmpq(end, Address(r15_thread, JavaThread::tlab_end_offset()));
4855 jcc(Assembler::above, slow_case);
4857 // update the tlab top pointer
4858 movq(Address(r15_thread, JavaThread::tlab_top_offset()), end);
4860 // recover var_size_in_bytes if necessary
4861 if (var_size_in_bytes == end) {
4862 subq(var_size_in_bytes, obj);
4863 }
4864 verify_tlab();
4865 }
4867 // Preserves rbx and rdx.
4868 void MacroAssembler::tlab_refill(Label& retry,
4869 Label& try_eden,
4870 Label& slow_case) {
4871 Register top = rax;
4872 Register t1 = rcx;
4873 Register t2 = rsi;
4874 Register t3 = r10;
4875 Register thread_reg = r15_thread;
4876 assert_different_registers(top, thread_reg, t1, t2, t3,
4877 /* preserve: */ rbx, rdx);
4878 Label do_refill, discard_tlab;
4880 if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
4881 // No allocation in the shared eden.
4882 jmp(slow_case);
4883 }
4885 movq(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
4886 movq(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
4888 // calculate amount of free space
4889 subq(t1, top);
4890 shrq(t1, LogHeapWordSize);
4892 // Retain tlab and allocate object in shared space if
4893 // the amount free in the tlab is too large to discard.
4894 cmpq(t1, Address(thread_reg, // size_t
4895 in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
4896 jcc(Assembler::lessEqual, discard_tlab);
4898 // Retain
4899 mov64(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment());
4900 addq(Address(thread_reg, // size_t
4901 in_bytes(JavaThread::tlab_refill_waste_limit_offset())),
4902 t2);
4903 if (TLABStats) {
4904 // increment number of slow_allocations
4905 addl(Address(thread_reg, // unsigned int
4906 in_bytes(JavaThread::tlab_slow_allocations_offset())),
4907 1);
4908 }
4909 jmp(try_eden);
4911 bind(discard_tlab);
4912 if (TLABStats) {
4913 // increment number of refills
4914 addl(Address(thread_reg, // unsigned int
4915 in_bytes(JavaThread::tlab_number_of_refills_offset())),
4916 1);
4917 // accumulate wastage -- t1 is amount free in tlab
4918 addl(Address(thread_reg, // unsigned int
4919 in_bytes(JavaThread::tlab_fast_refill_waste_offset())),
4920 t1);
4921 }
4923 // if tlab is currently allocated (top or end != null) then
4924 // fill [top, end + alignment_reserve) with array object
4925 testq(top, top);
4926 jcc(Assembler::zero, do_refill);
4928 // set up the mark word
4929 mov64(t3, (int64_t) markOopDesc::prototype()->copy_set_hash(0x2));
4930 movq(Address(top, oopDesc::mark_offset_in_bytes()), t3);
4931 // set the length to the remaining space
4932 subq(t1, typeArrayOopDesc::header_size(T_INT));
4933 addq(t1, (int)ThreadLocalAllocBuffer::alignment_reserve());
4934 shlq(t1, log2_intptr(HeapWordSize / sizeof(jint)));
4935 movq(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
4936 // set klass to intArrayKlass
4937 movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr()));
4938 // store klass last. concurrent gcs assumes klass length is valid if
4939 // klass field is not null.
4940 store_klass(top, t1);
4942 // refill the tlab with an eden allocation
4943 bind(do_refill);
4944 movq(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
4945 shlq(t1, LogHeapWordSize);
4946 // add object_size ??
4947 eden_allocate(top, t1, 0, t2, slow_case);
4949 // Check that t1 was preserved in eden_allocate.
4950 #ifdef ASSERT
4951 if (UseTLAB) {
4952 Label ok;
4953 Register tsize = rsi;
4954 assert_different_registers(tsize, thread_reg, t1);
4955 pushq(tsize);
4956 movq(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
4957 shlq(tsize, LogHeapWordSize);
4958 cmpq(t1, tsize);
4959 jcc(Assembler::equal, ok);
4960 stop("assert(t1 != tlab size)");
4961 should_not_reach_here();
4963 bind(ok);
4964 popq(tsize);
4965 }
4966 #endif
4967 movq(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
4968 movq(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
4969 addq(top, t1);
4970 subq(top, (int)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
4971 movq(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
4972 verify_tlab();
4973 jmp(retry);
4974 }
4977 int MacroAssembler::biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg,
4978 bool swap_reg_contains_mark,
4979 Label& done, Label* slow_case,
4980 BiasedLockingCounters* counters) {
4981 assert(UseBiasedLocking, "why call this otherwise?");
4982 assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
4983 assert(tmp_reg != noreg, "tmp_reg must be supplied");
4984 assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
4985 assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
4986 Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
4987 Address saved_mark_addr(lock_reg, 0);
4989 if (PrintBiasedLockingStatistics && counters == NULL)
4990 counters = BiasedLocking::counters();
4992 // Biased locking
4993 // See whether the lock is currently biased toward our thread and
4994 // whether the epoch is still valid
4995 // Note that the runtime guarantees sufficient alignment of JavaThread
4996 // pointers to allow age to be placed into low bits
4997 // First check to see whether biasing is even enabled for this object
4998 Label cas_label;
4999 int null_check_offset = -1;
5000 if (!swap_reg_contains_mark) {
5001 null_check_offset = offset();
5002 movq(swap_reg, mark_addr);
5003 }
5004 movq(tmp_reg, swap_reg);
5005 andq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
5006 cmpq(tmp_reg, markOopDesc::biased_lock_pattern);
5007 jcc(Assembler::notEqual, cas_label);
5008 // The bias pattern is present in the object's header. Need to check
5009 // whether the bias owner and the epoch are both still current.
5010 load_klass(tmp_reg, obj_reg);
5011 movq(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
5012 orq(tmp_reg, r15_thread);
5013 xorq(tmp_reg, swap_reg);
5014 andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place));
5015 if (counters != NULL) {
5016 cond_inc32(Assembler::zero,
5017 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
5018 }
5019 jcc(Assembler::equal, done);
5021 Label try_revoke_bias;
5022 Label try_rebias;
5024 // At this point we know that the header has the bias pattern and
5025 // that we are not the bias owner in the current epoch. We need to
5026 // figure out more details about the state of the header in order to
5027 // know what operations can be legally performed on the object's
5028 // header.
5030 // If the low three bits in the xor result aren't clear, that means
5031 // the prototype header is no longer biased and we have to revoke
5032 // the bias on this object.
5033 testq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
5034 jcc(Assembler::notZero, try_revoke_bias);
5036 // Biasing is still enabled for this data type. See whether the
5037 // epoch of the current bias is still valid, meaning that the epoch
5038 // bits of the mark word are equal to the epoch bits of the
5039 // prototype header. (Note that the prototype header's epoch bits
5040 // only change at a safepoint.) If not, attempt to rebias the object
5041 // toward the current thread. Note that we must be absolutely sure
5042 // that the current epoch is invalid in order to do this because
5043 // otherwise the manipulations it performs on the mark word are
5044 // illegal.
5045 testq(tmp_reg, markOopDesc::epoch_mask_in_place);
5046 jcc(Assembler::notZero, try_rebias);
5048 // The epoch of the current bias is still valid but we know nothing
5049 // about the owner; it might be set or it might be clear. Try to
5050 // acquire the bias of the object using an atomic operation. If this
5051 // fails we will go in to the runtime to revoke the object's bias.
5052 // Note that we first construct the presumed unbiased header so we
5053 // don't accidentally blow away another thread's valid bias.
5054 andq(swap_reg,
5055 markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
5056 movq(tmp_reg, swap_reg);
5057 orq(tmp_reg, r15_thread);
5058 if (os::is_MP()) {
5059 lock();
5060 }
5061 cmpxchgq(tmp_reg, Address(obj_reg, 0));
5062 // If the biasing toward our thread failed, this means that
5063 // another thread succeeded in biasing it toward itself and we
5064 // need to revoke that bias. The revocation will occur in the
5065 // interpreter runtime in the slow case.
5066 if (counters != NULL) {
5067 cond_inc32(Assembler::zero,
5068 ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
5069 }
5070 if (slow_case != NULL) {
5071 jcc(Assembler::notZero, *slow_case);
5072 }
5073 jmp(done);
5075 bind(try_rebias);
5076 // At this point we know the epoch has expired, meaning that the
5077 // current "bias owner", if any, is actually invalid. Under these
5078 // circumstances _only_, we are allowed to use the current header's
5079 // value as the comparison value when doing the cas to acquire the
5080 // bias in the current epoch. In other words, we allow transfer of
5081 // the bias from one thread to another directly in this situation.
5082 //
5083 // FIXME: due to a lack of registers we currently blow away the age
5084 // bits in this situation. Should attempt to preserve them.
5085 load_klass(tmp_reg, obj_reg);
5086 movq(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
5087 orq(tmp_reg, r15_thread);
5088 if (os::is_MP()) {
5089 lock();
5090 }
5091 cmpxchgq(tmp_reg, Address(obj_reg, 0));
5092 // If the biasing toward our thread failed, then another thread
5093 // succeeded in biasing it toward itself and we need to revoke that
5094 // bias. The revocation will occur in the runtime in the slow case.
5095 if (counters != NULL) {
5096 cond_inc32(Assembler::zero,
5097 ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
5098 }
5099 if (slow_case != NULL) {
5100 jcc(Assembler::notZero, *slow_case);
5101 }
5102 jmp(done);
5104 bind(try_revoke_bias);
5105 // The prototype mark in the klass doesn't have the bias bit set any
5106 // more, indicating that objects of this data type are not supposed
5107 // to be biased any more. We are going to try to reset the mark of
5108 // this object to the prototype value and fall through to the
5109 // CAS-based locking scheme. Note that if our CAS fails, it means
5110 // that another thread raced us for the privilege of revoking the
5111 // bias of this particular object, so it's okay to continue in the
5112 // normal locking code.
5113 //
5114 // FIXME: due to a lack of registers we currently blow away the age
5115 // bits in this situation. Should attempt to preserve them.
5116 load_klass(tmp_reg, obj_reg);
5117 movq(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
5118 if (os::is_MP()) {
5119 lock();
5120 }
5121 cmpxchgq(tmp_reg, Address(obj_reg, 0));
5122 // Fall through to the normal CAS-based lock, because no matter what
5123 // the result of the above CAS, some thread must have succeeded in
5124 // removing the bias bit from the object's header.
5125 if (counters != NULL) {
5126 cond_inc32(Assembler::zero,
5127 ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
5128 }
5130 bind(cas_label);
5132 return null_check_offset;
5133 }
5136 void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
5137 assert(UseBiasedLocking, "why call this otherwise?");
5139 // Check for biased locking unlock case, which is a no-op
5140 // Note: we do not have to check the thread ID for two reasons.
5141 // First, the interpreter checks for IllegalMonitorStateException at
5142 // a higher level. Second, if the bias was revoked while we held the
5143 // lock, the object could not be rebiased toward another thread, so
5144 // the bias bit would be clear.
5145 movq(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
5146 andq(temp_reg, markOopDesc::biased_lock_mask_in_place);
5147 cmpq(temp_reg, markOopDesc::biased_lock_pattern);
5148 jcc(Assembler::equal, done);
5149 }
5152 void MacroAssembler::load_klass(Register dst, Register src) {
5153 if (UseCompressedOops) {
5154 movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5155 decode_heap_oop_not_null(dst);
5156 } else {
5157 movq(dst, Address(src, oopDesc::klass_offset_in_bytes()));
5158 }
5159 }
5161 void MacroAssembler::store_klass(Register dst, Register src) {
5162 if (UseCompressedOops) {
5163 encode_heap_oop_not_null(src);
5164 movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5165 } else {
5166 movq(Address(dst, oopDesc::klass_offset_in_bytes()), src);
5167 }
5168 }
5170 void MacroAssembler::store_klass_gap(Register dst, Register src) {
5171 if (UseCompressedOops) {
5172 // Store to klass gap in destination
5173 movl(Address(dst, oopDesc::klass_gap_offset_in_bytes()), src);
5174 }
5175 }
5177 void MacroAssembler::load_heap_oop(Register dst, Address src) {
5178 if (UseCompressedOops) {
5179 movl(dst, src);
5180 decode_heap_oop(dst);
5181 } else {
5182 movq(dst, src);
5183 }
5184 }
5186 void MacroAssembler::store_heap_oop(Address dst, Register src) {
5187 if (UseCompressedOops) {
5188 assert(!dst.uses(src), "not enough registers");
5189 encode_heap_oop(src);
5190 movl(dst, src);
5191 } else {
5192 movq(dst, src);
5193 }
5194 }
5196 // Algorithm must match oop.inline.hpp encode_heap_oop.
5197 void MacroAssembler::encode_heap_oop(Register r) {
5198 assert (UseCompressedOops, "should be compressed");
5199 #ifdef ASSERT
5200 if (CheckCompressedOops) {
5201 Label ok;
5202 pushq(rscratch1); // cmpptr trashes rscratch1
5203 cmpptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
5204 jcc(Assembler::equal, ok);
5205 stop("MacroAssembler::encode_heap_oop: heap base corrupted?");
5206 bind(ok);
5207 popq(rscratch1);
5208 }
5209 #endif
5210 verify_oop(r, "broken oop in encode_heap_oop");
5211 testq(r, r);
5212 cmovq(Assembler::equal, r, r12_heapbase);
5213 subq(r, r12_heapbase);
5214 shrq(r, LogMinObjAlignmentInBytes);
5215 }
5217 void MacroAssembler::encode_heap_oop_not_null(Register r) {
5218 assert (UseCompressedOops, "should be compressed");
5219 #ifdef ASSERT
5220 if (CheckCompressedOops) {
5221 Label ok;
5222 testq(r, r);
5223 jcc(Assembler::notEqual, ok);
5224 stop("null oop passed to encode_heap_oop_not_null");
5225 bind(ok);
5226 }
5227 #endif
5228 verify_oop(r, "broken oop in encode_heap_oop_not_null");
5229 subq(r, r12_heapbase);
5230 shrq(r, LogMinObjAlignmentInBytes);
5231 }
5233 void MacroAssembler::encode_heap_oop_not_null(Register dst, Register src) {
5234 assert (UseCompressedOops, "should be compressed");
5235 #ifdef ASSERT
5236 if (CheckCompressedOops) {
5237 Label ok;
5238 testq(src, src);
5239 jcc(Assembler::notEqual, ok);
5240 stop("null oop passed to encode_heap_oop_not_null2");
5241 bind(ok);
5242 }
5243 #endif
5244 verify_oop(src, "broken oop in encode_heap_oop_not_null2");
5245 if (dst != src) {
5246 movq(dst, src);
5247 }
5248 subq(dst, r12_heapbase);
5249 shrq(dst, LogMinObjAlignmentInBytes);
5250 }
5252 void MacroAssembler::decode_heap_oop(Register r) {
5253 assert (UseCompressedOops, "should be compressed");
5254 #ifdef ASSERT
5255 if (CheckCompressedOops) {
5256 Label ok;
5257 pushq(rscratch1);
5258 cmpptr(r12_heapbase,
5259 ExternalAddress((address)Universe::heap_base_addr()));
5260 jcc(Assembler::equal, ok);
5261 stop("MacroAssembler::decode_heap_oop: heap base corrupted?");
5262 bind(ok);
5263 popq(rscratch1);
5264 }
5265 #endif
5267 Label done;
5268 shlq(r, LogMinObjAlignmentInBytes);
5269 jccb(Assembler::equal, done);
5270 addq(r, r12_heapbase);
5271 #if 0
5272 // alternate decoding probably a wash.
5273 testq(r, r);
5274 jccb(Assembler::equal, done);
5275 leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
5276 #endif
5277 bind(done);
5278 verify_oop(r, "broken oop in decode_heap_oop");
5279 }
5281 void MacroAssembler::decode_heap_oop_not_null(Register r) {
5282 assert (UseCompressedOops, "should only be used for compressed headers");
5283 // Cannot assert, unverified entry point counts instructions (see .ad file)
5284 // vtableStubs also counts instructions in pd_code_size_limit.
5285 assert(Address::times_8 == LogMinObjAlignmentInBytes, "decode alg wrong");
5286 leaq(r, Address(r12_heapbase, r, Address::times_8, 0));
5287 }
5289 void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
5290 assert (UseCompressedOops, "should only be used for compressed headers");
5291 // Cannot assert, unverified entry point counts instructions (see .ad file)
5292 // vtableStubs also counts instructions in pd_code_size_limit.
5293 assert(Address::times_8 == LogMinObjAlignmentInBytes, "decode alg wrong");
5294 leaq(dst, Address(r12_heapbase, src, Address::times_8, 0));
5295 }
5297 void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
5298 assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
5299 int oop_index = oop_recorder()->find_index(obj);
5300 RelocationHolder rspec = oop_Relocation::spec(oop_index);
5302 // movl dst,obj
5303 InstructionMark im(this);
5304 int encode = prefix_and_encode(dst->encoding());
5305 emit_byte(0xB8 | encode);
5306 emit_data(oop_index, rspec, narrow_oop_operand);
5307 }
5310 Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
5311 switch (cond) {
5312 // Note some conditions are synonyms for others
5313 case Assembler::zero: return Assembler::notZero;
5314 case Assembler::notZero: return Assembler::zero;
5315 case Assembler::less: return Assembler::greaterEqual;
5316 case Assembler::lessEqual: return Assembler::greater;
5317 case Assembler::greater: return Assembler::lessEqual;
5318 case Assembler::greaterEqual: return Assembler::less;
5319 case Assembler::below: return Assembler::aboveEqual;
5320 case Assembler::belowEqual: return Assembler::above;
5321 case Assembler::above: return Assembler::belowEqual;
5322 case Assembler::aboveEqual: return Assembler::below;
5323 case Assembler::overflow: return Assembler::noOverflow;
5324 case Assembler::noOverflow: return Assembler::overflow;
5325 case Assembler::negative: return Assembler::positive;
5326 case Assembler::positive: return Assembler::negative;
5327 case Assembler::parity: return Assembler::noParity;
5328 case Assembler::noParity: return Assembler::parity;
5329 }
5330 ShouldNotReachHere(); return Assembler::overflow;
5331 }
5334 void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
5335 Condition negated_cond = negate_condition(cond);
5336 Label L;
5337 jcc(negated_cond, L);
5338 atomic_incl(counter_addr);
5339 bind(L);
5340 }
5342 void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
5343 pushfq();
5344 if (os::is_MP())
5345 lock();
5346 incrementl(counter_addr);
5347 popfq();
5348 }
5350 SkipIfEqual::SkipIfEqual(
5351 MacroAssembler* masm, const bool* flag_addr, bool value) {
5352 _masm = masm;
5353 _masm->cmp8(ExternalAddress((address)flag_addr), value);
5354 _masm->jcc(Assembler::equal, _label);
5355 }
5357 SkipIfEqual::~SkipIfEqual() {
5358 _masm->bind(_label);
5359 }
5361 void MacroAssembler::bang_stack_size(Register size, Register tmp) {
5362 movq(tmp, rsp);
5363 // Bang stack for total size given plus shadow page size.
5364 // Bang one page at a time because large size can bang beyond yellow and
5365 // red zones.
5366 Label loop;
5367 bind(loop);
5368 movl(Address(tmp, (-os::vm_page_size())), size );
5369 subq(tmp, os::vm_page_size());
5370 subl(size, os::vm_page_size());
5371 jcc(Assembler::greater, loop);
5373 // Bang down shadow pages too.
5374 // The -1 because we already subtracted 1 page.
5375 for (int i = 0; i< StackShadowPages-1; i++) {
5376 movq(Address(tmp, (-i*os::vm_page_size())), size );
5377 }
5378 }
5380 void MacroAssembler::reinit_heapbase() {
5381 if (UseCompressedOops) {
5382 movptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
5383 }
5384 }