Tue, 08 Aug 2017 15:57:29 +0800
merge
1 /*
2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 /*
26 * This file has been modified by Loongson Technology in 2015. These
27 * modifications are Copyright (c) 2015 Loongson Technology, and are made
28 * available on the same license terms set forth above.
29 */
31 #ifndef SHARE_VM_ASM_ASSEMBLER_HPP
32 #define SHARE_VM_ASM_ASSEMBLER_HPP
34 #include "asm/codeBuffer.hpp"
35 #include "code/oopRecorder.hpp"
36 #include "code/relocInfo.hpp"
37 #include "memory/allocation.hpp"
38 #include "utilities/debug.hpp"
39 #include "utilities/growableArray.hpp"
40 #include "utilities/top.hpp"
42 #ifdef TARGET_ARCH_x86
43 # include "register_x86.hpp"
44 # include "vm_version_x86.hpp"
45 #endif
46 #ifdef TARGET_ARCH_sparc
47 # include "register_sparc.hpp"
48 # include "vm_version_sparc.hpp"
49 #endif
50 #ifdef TARGET_ARCH_zero
51 # include "register_zero.hpp"
52 # include "vm_version_zero.hpp"
53 #endif
54 #ifdef TARGET_ARCH_arm
55 # include "register_arm.hpp"
56 # include "vm_version_arm.hpp"
57 #endif
58 #ifdef TARGET_ARCH_ppc
59 # include "register_ppc.hpp"
60 # include "vm_version_ppc.hpp"
61 #endif
62 #ifdef TARGET_ARCH_mips
63 # include "register_mips.hpp"
64 # include "vm_version_mips.hpp"
65 #endif
67 // This file contains platform-independent assembler declarations.
69 class MacroAssembler;
70 class AbstractAssembler;
71 class Label;
73 /**
74 * Labels represent destinations for control transfer instructions. Such
75 * instructions can accept a Label as their target argument. A Label is
76 * bound to the current location in the code stream by calling the
77 * MacroAssembler's 'bind' method, which in turn calls the Label's 'bind'
78 * method. A Label may be referenced by an instruction before it's bound
79 * (i.e., 'forward referenced'). 'bind' stores the current code offset
80 * in the Label object.
81 *
82 * If an instruction references a bound Label, the offset field(s) within
83 * the instruction are immediately filled in based on the Label's code
84 * offset. If an instruction references an unbound label, that
85 * instruction is put on a list of instructions that must be patched
86 * (i.e., 'resolved') when the Label is bound.
87 *
88 * 'bind' will call the platform-specific 'patch_instruction' method to
89 * fill in the offset field(s) for each unresolved instruction (if there
90 * are any). 'patch_instruction' lives in one of the
91 * cpu/<arch>/vm/assembler_<arch>* files.
92 *
93 * Instead of using a linked list of unresolved instructions, a Label has
94 * an array of unresolved instruction code offsets. _patch_index
95 * contains the total number of forward references. If the Label's array
96 * overflows (i.e., _patch_index grows larger than the array size), a
97 * GrowableArray is allocated to hold the remaining offsets. (The cache
98 * size is 4 for now, which handles over 99.5% of the cases)
99 *
100 * Labels may only be used within a single CodeSection. If you need
101 * to create references between code sections, use explicit relocations.
102 */
103 class Label VALUE_OBJ_CLASS_SPEC {
104 private:
105 enum { PatchCacheSize = 4 };
107 // _loc encodes both the binding state (via its sign)
108 // and the binding locator (via its value) of a label.
109 //
110 // _loc >= 0 bound label, loc() encodes the target (jump) position
111 // _loc == -1 unbound label
112 int _loc;
114 // References to instructions that jump to this unresolved label.
115 // These instructions need to be patched when the label is bound
116 // using the platform-specific patchInstruction() method.
117 //
118 // To avoid having to allocate from the C-heap each time, we provide
119 // a local cache and use the overflow only if we exceed the local cache
120 int _patches[PatchCacheSize];
121 int _patch_index;
122 GrowableArray<int>* _patch_overflow;
124 Label(const Label&) { ShouldNotReachHere(); }
126 public:
128 /**
129 * After binding, be sure 'patch_instructions' is called later to link
130 */
131 void bind_loc(int loc) {
132 assert(loc >= 0, "illegal locator");
133 assert(_loc == -1, "already bound");
134 _loc = loc;
135 }
136 void bind_loc(int pos, int sect) { bind_loc(CodeBuffer::locator(pos, sect)); }
138 #ifndef PRODUCT
139 // Iterates over all unresolved instructions for printing
140 void print_instructions(MacroAssembler* masm) const;
141 #endif // PRODUCT
143 /**
144 * Returns the position of the the Label in the code buffer
145 * The position is a 'locator', which encodes both offset and section.
146 */
147 int loc() const {
148 assert(_loc >= 0, "unbound label");
149 return _loc;
150 }
151 int loc_pos() const { return CodeBuffer::locator_pos(loc()); }
152 int loc_sect() const { return CodeBuffer::locator_sect(loc()); }
154 bool is_bound() const { return _loc >= 0; }
155 bool is_unbound() const { return _loc == -1 && _patch_index > 0; }
156 bool is_unused() const { return _loc == -1 && _patch_index == 0; }
158 /**
159 * Adds a reference to an unresolved displacement instruction to
160 * this unbound label
161 *
162 * @param cb the code buffer being patched
163 * @param branch_loc the locator of the branch instruction in the code buffer
164 */
165 void add_patch_at(CodeBuffer* cb, int branch_loc);
167 /**
168 * Iterate over the list of patches, resolving the instructions
169 * Call patch_instruction on each 'branch_loc' value
170 */
171 void patch_instructions(MacroAssembler* masm);
173 void init() {
174 _loc = -1;
175 _patch_index = 0;
176 _patch_overflow = NULL;
177 }
179 Label() {
180 init();
181 }
182 };
184 // A union type for code which has to assemble both constant and
185 // non-constant operands, when the distinction cannot be made
186 // statically.
187 class RegisterOrConstant VALUE_OBJ_CLASS_SPEC {
188 private:
189 Register _r;
190 intptr_t _c;
192 public:
193 RegisterOrConstant(): _r(noreg), _c(0) {}
194 RegisterOrConstant(Register r): _r(r), _c(0) {}
195 RegisterOrConstant(intptr_t c): _r(noreg), _c(c) {}
197 Register as_register() const { assert(is_register(),""); return _r; }
198 intptr_t as_constant() const { assert(is_constant(),""); return _c; }
200 Register register_or_noreg() const { return _r; }
201 intptr_t constant_or_zero() const { return _c; }
203 bool is_register() const { return _r != noreg; }
204 bool is_constant() const { return _r == noreg; }
205 };
207 // The Abstract Assembler: Pure assembler doing NO optimizations on the
208 // instruction level; i.e., what you write is what you get.
209 // The Assembler is generating code into a CodeBuffer.
210 class AbstractAssembler : public ResourceObj {
211 friend class Label;
213 protected:
214 CodeSection* _code_section; // section within the code buffer
215 OopRecorder* _oop_recorder; // support for relocInfo::oop_type
217 public:
218 // Code emission & accessing
219 address addr_at(int pos) const { return code_section()->start() + pos; }
221 protected:
222 // This routine is called with a label is used for an address.
223 // Labels and displacements truck in offsets, but target must return a PC.
224 address target(Label& L) { return code_section()->target(L, pc()); }
226 bool is8bit(int x) const { return -0x80 <= x && x < 0x80; }
227 bool isByte(int x) const { return 0 <= x && x < 0x100; }
228 bool isShiftCount(int x) const { return 0 <= x && x < 32; }
230 // Instruction boundaries (required when emitting relocatable values).
231 class InstructionMark: public StackObj {
232 private:
233 AbstractAssembler* _assm;
235 public:
236 InstructionMark(AbstractAssembler* assm) : _assm(assm) {
237 assert(assm->inst_mark() == NULL, "overlapping instructions");
238 _assm->set_inst_mark();
239 }
240 ~InstructionMark() {
241 _assm->clear_inst_mark();
242 }
243 };
244 friend class InstructionMark;
245 #ifdef ASSERT
246 // Make it return true on platforms which need to verify
247 // instruction boundaries for some operations.
248 static bool pd_check_instruction_mark();
250 // Add delta to short branch distance to verify that it still fit into imm8.
251 int _short_branch_delta;
253 int short_branch_delta() const { return _short_branch_delta; }
254 void set_short_branch_delta() { _short_branch_delta = 32; }
255 void clear_short_branch_delta() { _short_branch_delta = 0; }
257 class ShortBranchVerifier: public StackObj {
258 private:
259 AbstractAssembler* _assm;
261 public:
262 ShortBranchVerifier(AbstractAssembler* assm) : _assm(assm) {
263 assert(assm->short_branch_delta() == 0, "overlapping instructions");
264 _assm->set_short_branch_delta();
265 }
266 ~ShortBranchVerifier() {
267 _assm->clear_short_branch_delta();
268 }
269 };
270 #else
271 // Dummy in product.
272 class ShortBranchVerifier: public StackObj {
273 public:
274 ShortBranchVerifier(AbstractAssembler* assm) {}
275 };
276 #endif
278 public:
280 // Creation
281 AbstractAssembler(CodeBuffer* code);
283 // ensure buf contains all code (call this before using/copying the code)
284 void flush();
286 void emit_int8( int8_t x) { code_section()->emit_int8( x); }
287 void emit_int16( int16_t x) { code_section()->emit_int16( x); }
288 void emit_int32( int32_t x) { code_section()->emit_int32( x); }
289 void emit_int64( int64_t x) { code_section()->emit_int64( x); }
291 void emit_float( jfloat x) { code_section()->emit_float( x); }
292 void emit_double( jdouble x) { code_section()->emit_double( x); }
293 void emit_address(address x) { code_section()->emit_address(x); }
295 // min and max values for signed immediate ranges
296 static int min_simm(int nbits) { return -(intptr_t(1) << (nbits - 1)) ; }
297 static int max_simm(int nbits) { return (intptr_t(1) << (nbits - 1)) - 1; }
299 // Define some:
300 static int min_simm10() { return min_simm(10); }
301 static int min_simm13() { return min_simm(13); }
302 static int min_simm16() { return min_simm(16); }
304 // Test if x is within signed immediate range for nbits
305 static bool is_simm(intptr_t x, int nbits) { return min_simm(nbits) <= x && x <= max_simm(nbits); }
307 // Define some:
308 static bool is_simm5( intptr_t x) { return is_simm(x, 5 ); }
309 static bool is_simm8( intptr_t x) { return is_simm(x, 8 ); }
310 static bool is_simm10(intptr_t x) { return is_simm(x, 10); }
311 static bool is_simm11(intptr_t x) { return is_simm(x, 11); }
312 static bool is_simm12(intptr_t x) { return is_simm(x, 12); }
313 static bool is_simm13(intptr_t x) { return is_simm(x, 13); }
314 static bool is_simm16(intptr_t x) { return is_simm(x, 16); }
315 static bool is_simm26(intptr_t x) { return is_simm(x, 26); }
316 static bool is_simm32(intptr_t x) { return is_simm(x, 32); }
318 // Accessors
319 CodeSection* code_section() const { return _code_section; }
320 CodeBuffer* code() const { return code_section()->outer(); }
321 int sect() const { return code_section()->index(); }
322 address pc() const { return code_section()->end(); }
323 int offset() const { return code_section()->size(); }
324 int locator() const { return CodeBuffer::locator(offset(), sect()); }
326 OopRecorder* oop_recorder() const { return _oop_recorder; }
327 void set_oop_recorder(OopRecorder* r) { _oop_recorder = r; }
329 address inst_mark() const { return code_section()->mark(); }
330 void set_inst_mark() { code_section()->set_mark(); }
331 void clear_inst_mark() { code_section()->clear_mark(); }
333 // Constants in code
334 void relocate(RelocationHolder const& rspec, int format = 0) {
335 assert(!pd_check_instruction_mark()
336 || inst_mark() == NULL || inst_mark() == code_section()->end(),
337 "call relocate() between instructions");
338 code_section()->relocate(code_section()->end(), rspec, format);
339 }
340 void relocate( relocInfo::relocType rtype, int format = 0) {
341 code_section()->relocate(code_section()->end(), rtype, format);
342 }
344 static int code_fill_byte(); // used to pad out odd-sized code buffers
346 // Associate a comment with the current offset. It will be printed
347 // along with the disassembly when printing nmethods. Currently
348 // only supported in the instruction section of the code buffer.
349 void block_comment(const char* comment);
350 // Copy str to a buffer that has the same lifetime as the CodeBuffer
351 const char* code_string(const char* str);
353 // Label functions
354 void bind(Label& L); // binds an unbound label L to the current code position
356 // Move to a different section in the same code buffer.
357 void set_code_section(CodeSection* cs);
359 // Inform assembler when generating stub code and relocation info
360 address start_a_stub(int required_space);
361 void end_a_stub();
362 // Ditto for constants.
363 address start_a_const(int required_space, int required_align = sizeof(double));
364 void end_a_const(CodeSection* cs); // Pass the codesection to continue in (insts or stubs?).
366 // constants support
367 //
368 // We must remember the code section (insts or stubs) in c1
369 // so we can reset to the proper section in end_a_const().
370 address long_constant(jlong c) {
371 CodeSection* c1 = _code_section;
372 address ptr = start_a_const(sizeof(c), sizeof(c));
373 if (ptr != NULL) {
374 emit_int64(c);
375 end_a_const(c1);
376 }
377 return ptr;
378 }
379 address double_constant(jdouble c) {
380 CodeSection* c1 = _code_section;
381 address ptr = start_a_const(sizeof(c), sizeof(c));
382 if (ptr != NULL) {
383 emit_double(c);
384 end_a_const(c1);
385 }
386 return ptr;
387 }
388 address float_constant(jfloat c) {
389 CodeSection* c1 = _code_section;
390 address ptr = start_a_const(sizeof(c), sizeof(c));
391 if (ptr != NULL) {
392 emit_float(c);
393 end_a_const(c1);
394 }
395 return ptr;
396 }
397 address address_constant(address c) {
398 CodeSection* c1 = _code_section;
399 address ptr = start_a_const(sizeof(c), sizeof(c));
400 if (ptr != NULL) {
401 emit_address(c);
402 end_a_const(c1);
403 }
404 return ptr;
405 }
406 address address_constant(address c, RelocationHolder const& rspec) {
407 CodeSection* c1 = _code_section;
408 address ptr = start_a_const(sizeof(c), sizeof(c));
409 if (ptr != NULL) {
410 relocate(rspec);
411 emit_address(c);
412 end_a_const(c1);
413 }
414 return ptr;
415 }
417 // Bootstrapping aid to cope with delayed determination of constants.
418 // Returns a static address which will eventually contain the constant.
419 // The value zero (NULL) stands instead of a constant which is still uncomputed.
420 // Thus, the eventual value of the constant must not be zero.
421 // This is fine, since this is designed for embedding object field
422 // offsets in code which must be generated before the object class is loaded.
423 // Field offsets are never zero, since an object's header (mark word)
424 // is located at offset zero.
425 RegisterOrConstant delayed_value(int(*value_fn)(), Register tmp, int offset = 0);
426 RegisterOrConstant delayed_value(address(*value_fn)(), Register tmp, int offset = 0);
427 virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset) = 0;
428 // Last overloading is platform-dependent; look in assembler_<arch>.cpp.
429 static intptr_t* delayed_value_addr(int(*constant_fn)());
430 static intptr_t* delayed_value_addr(address(*constant_fn)());
431 static void update_delayed_values();
433 // Bang stack to trigger StackOverflowError at a safe location
434 // implementation delegates to machine-specific bang_stack_with_offset
435 void generate_stack_overflow_check( int frame_size_in_bytes );
436 virtual void bang_stack_with_offset(int offset) = 0;
439 /**
440 * A platform-dependent method to patch a jump instruction that refers
441 * to this label.
442 *
443 * @param branch the location of the instruction to patch
444 * @param masm the assembler which generated the branch
445 */
446 void pd_patch_instruction(address branch, address target);
448 };
450 #ifdef TARGET_ARCH_x86
451 # include "assembler_x86.hpp"
452 #endif
453 #ifdef TARGET_ARCH_sparc
454 # include "assembler_sparc.hpp"
455 #endif
456 #ifdef TARGET_ARCH_zero
457 # include "assembler_zero.hpp"
458 #endif
459 #ifdef TARGET_ARCH_arm
460 # include "assembler_arm.hpp"
461 #endif
462 #ifdef TARGET_ARCH_ppc
463 # include "assembler_ppc.hpp"
464 #endif
465 #ifdef TARGET_ARCH_mips
466 # include "assembler_mips.hpp"
467 #endif
470 #endif // SHARE_VM_ASM_ASSEMBLER_HPP