src/share/vm/asm/codeBuffer.hpp

changeset 0
f90c822e73f8
child 1
2d8a650513c2
equal deleted inserted replaced
-1:000000000000 0:f90c822e73f8
1 /*
2 * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_ASM_CODEBUFFER_HPP
26 #define SHARE_VM_ASM_CODEBUFFER_HPP
27
28 #include "code/oopRecorder.hpp"
29 #include "code/relocInfo.hpp"
30
31 class CodeStrings;
32 class PhaseCFG;
33 class Compile;
34 class BufferBlob;
35 class CodeBuffer;
36 class Label;
37
38 class CodeOffsets: public StackObj {
39 public:
40 enum Entries { Entry,
41 Verified_Entry,
42 Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete
43 OSR_Entry,
44 Dtrace_trap = OSR_Entry, // dtrace probes can never have an OSR entry so reuse it
45 Exceptions, // Offset where exception handler lives
46 Deopt, // Offset where deopt handler lives
47 DeoptMH, // Offset where MethodHandle deopt handler lives
48 UnwindHandler, // Offset to default unwind handler
49 max_Entries };
50
51 // special value to note codeBlobs where profile (forte) stack walking is
52 // always dangerous and suspect.
53
54 enum { frame_never_safe = -1 };
55
56 private:
57 int _values[max_Entries];
58
59 public:
60 CodeOffsets() {
61 _values[Entry ] = 0;
62 _values[Verified_Entry] = 0;
63 _values[Frame_Complete] = frame_never_safe;
64 _values[OSR_Entry ] = 0;
65 _values[Exceptions ] = -1;
66 _values[Deopt ] = -1;
67 _values[DeoptMH ] = -1;
68 _values[UnwindHandler ] = -1;
69 }
70
71 int value(Entries e) { return _values[e]; }
72 void set_value(Entries e, int val) { _values[e] = val; }
73 };
74
75 // This class represents a stream of code and associated relocations.
76 // There are a few in each CodeBuffer.
77 // They are filled concurrently, and concatenated at the end.
78 class CodeSection VALUE_OBJ_CLASS_SPEC {
79 friend class CodeBuffer;
80 public:
81 typedef int csize_t; // code size type; would be size_t except for history
82
83 private:
84 address _start; // first byte of contents (instructions)
85 address _mark; // user mark, usually an instruction beginning
86 address _end; // current end address
87 address _limit; // last possible (allocated) end address
88 relocInfo* _locs_start; // first byte of relocation information
89 relocInfo* _locs_end; // first byte after relocation information
90 relocInfo* _locs_limit; // first byte after relocation information buf
91 address _locs_point; // last relocated position (grows upward)
92 bool _locs_own; // did I allocate the locs myself?
93 bool _frozen; // no more expansion of this section
94 char _index; // my section number (SECT_INST, etc.)
95 CodeBuffer* _outer; // enclosing CodeBuffer
96
97 // (Note: _locs_point used to be called _last_reloc_offset.)
98
99 CodeSection() {
100 _start = NULL;
101 _mark = NULL;
102 _end = NULL;
103 _limit = NULL;
104 _locs_start = NULL;
105 _locs_end = NULL;
106 _locs_limit = NULL;
107 _locs_point = NULL;
108 _locs_own = false;
109 _frozen = false;
110 debug_only(_index = (char)-1);
111 debug_only(_outer = (CodeBuffer*)badAddress);
112 }
113
114 void initialize_outer(CodeBuffer* outer, int index) {
115 _outer = outer;
116 _index = index;
117 }
118
119 void initialize(address start, csize_t size = 0) {
120 assert(_start == NULL, "only one init step, please");
121 _start = start;
122 _mark = NULL;
123 _end = start;
124
125 _limit = start + size;
126 _locs_point = start;
127 }
128
129 void initialize_locs(int locs_capacity);
130 void expand_locs(int new_capacity);
131 void initialize_locs_from(const CodeSection* source_cs);
132
133 // helper for CodeBuffer::expand()
134 void take_over_code_from(CodeSection* cs) {
135 _start = cs->_start;
136 _mark = cs->_mark;
137 _end = cs->_end;
138 _limit = cs->_limit;
139 _locs_point = cs->_locs_point;
140 }
141
142 public:
143 address start() const { return _start; }
144 address mark() const { return _mark; }
145 address end() const { return _end; }
146 address limit() const { return _limit; }
147 csize_t size() const { return (csize_t)(_end - _start); }
148 csize_t mark_off() const { assert(_mark != NULL, "not an offset");
149 return (csize_t)(_mark - _start); }
150 csize_t capacity() const { return (csize_t)(_limit - _start); }
151 csize_t remaining() const { return (csize_t)(_limit - _end); }
152
153 relocInfo* locs_start() const { return _locs_start; }
154 relocInfo* locs_end() const { return _locs_end; }
155 int locs_count() const { return (int)(_locs_end - _locs_start); }
156 relocInfo* locs_limit() const { return _locs_limit; }
157 address locs_point() const { return _locs_point; }
158 csize_t locs_point_off() const{ return (csize_t)(_locs_point - _start); }
159 csize_t locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); }
160 csize_t locs_remaining()const { return (csize_t)(_locs_limit - _locs_end); }
161
162 int index() const { return _index; }
163 bool is_allocated() const { return _start != NULL; }
164 bool is_empty() const { return _start == _end; }
165 bool is_frozen() const { return _frozen; }
166 bool has_locs() const { return _locs_end != NULL; }
167
168 CodeBuffer* outer() const { return _outer; }
169
170 // is a given address in this section? (2nd version is end-inclusive)
171 bool contains(address pc) const { return pc >= _start && pc < _end; }
172 bool contains2(address pc) const { return pc >= _start && pc <= _end; }
173 bool allocates(address pc) const { return pc >= _start && pc < _limit; }
174 bool allocates2(address pc) const { return pc >= _start && pc <= _limit; }
175
176 void set_end(address pc) { assert(allocates2(pc), err_msg("not in CodeBuffer memory: " PTR_FORMAT " <= " PTR_FORMAT " <= " INTPTR_FORMAT, p2i(_start), p2i(pc), p2i(_limit))); _end = pc; }
177 void set_mark(address pc) { assert(contains2(pc), "not in codeBuffer");
178 _mark = pc; }
179 void set_mark_off(int offset) { assert(contains2(offset+_start),"not in codeBuffer");
180 _mark = offset + _start; }
181 void set_mark() { _mark = _end; }
182 void clear_mark() { _mark = NULL; }
183
184 void set_locs_end(relocInfo* p) {
185 assert(p <= locs_limit(), "locs data fits in allocated buffer");
186 _locs_end = p;
187 }
188 void set_locs_point(address pc) {
189 assert(pc >= locs_point(), "relocation addr may not decrease");
190 assert(allocates2(pc), "relocation addr must be in this section");
191 _locs_point = pc;
192 }
193
194 // Code emission
195 void emit_int8 ( int8_t x) { *((int8_t*) end()) = x; set_end(end() + sizeof(int8_t)); }
196 void emit_int16( int16_t x) { *((int16_t*) end()) = x; set_end(end() + sizeof(int16_t)); }
197 void emit_int32( int32_t x) { *((int32_t*) end()) = x; set_end(end() + sizeof(int32_t)); }
198 void emit_int64( int64_t x) { *((int64_t*) end()) = x; set_end(end() + sizeof(int64_t)); }
199
200 void emit_float( jfloat x) { *((jfloat*) end()) = x; set_end(end() + sizeof(jfloat)); }
201 void emit_double(jdouble x) { *((jdouble*) end()) = x; set_end(end() + sizeof(jdouble)); }
202 void emit_address(address x) { *((address*) end()) = x; set_end(end() + sizeof(address)); }
203
204 // Share a scratch buffer for relocinfo. (Hacky; saves a resource allocation.)
205 void initialize_shared_locs(relocInfo* buf, int length);
206
207 // Manage labels and their addresses.
208 address target(Label& L, address branch_pc);
209
210 // Emit a relocation.
211 void relocate(address at, RelocationHolder const& rspec, int format = 0);
212 void relocate(address at, relocInfo::relocType rtype, int format = 0) {
213 if (rtype != relocInfo::none)
214 relocate(at, Relocation::spec_simple(rtype), format);
215 }
216
217 // alignment requirement for starting offset
218 // Requirements are that the instruction area and the
219 // stubs area must start on CodeEntryAlignment, and
220 // the ctable on sizeof(jdouble)
221 int alignment() const { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
222
223 // Slop between sections, used only when allocating temporary BufferBlob buffers.
224 static csize_t end_slop() { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
225
226 csize_t align_at_start(csize_t off) const { return (csize_t) align_size_up(off, alignment()); }
227
228 // Mark a section frozen. Assign its remaining space to
229 // the following section. It will never expand after this point.
230 inline void freeze(); // { _outer->freeze_section(this); }
231
232 // Ensure there's enough space left in the current section.
233 // Return true if there was an expansion.
234 bool maybe_expand_to_ensure_remaining(csize_t amount);
235
236 #ifndef PRODUCT
237 void decode();
238 void dump();
239 void print(const char* name);
240 #endif //PRODUCT
241 };
242
243 class CodeString;
244 class CodeStrings VALUE_OBJ_CLASS_SPEC {
245 private:
246 #ifndef PRODUCT
247 CodeString* _strings;
248 #endif
249
250 CodeString* find(intptr_t offset) const;
251 CodeString* find_last(intptr_t offset) const;
252
253 public:
254 CodeStrings() {
255 #ifndef PRODUCT
256 _strings = NULL;
257 #endif
258 }
259
260 const char* add_string(const char * string) PRODUCT_RETURN_(return NULL;);
261
262 void add_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
263 void print_block_comment(outputStream* stream, intptr_t offset) const PRODUCT_RETURN;
264 void assign(CodeStrings& other) PRODUCT_RETURN;
265 void free() PRODUCT_RETURN;
266 };
267
268 // A CodeBuffer describes a memory space into which assembly
269 // code is generated. This memory space usually occupies the
270 // interior of a single BufferBlob, but in some cases it may be
271 // an arbitrary span of memory, even outside the code cache.
272 //
273 // A code buffer comes in two variants:
274 //
275 // (1) A CodeBuffer referring to an already allocated piece of memory:
276 // This is used to direct 'static' code generation (e.g. for interpreter
277 // or stubroutine generation, etc.). This code comes with NO relocation
278 // information.
279 //
280 // (2) A CodeBuffer referring to a piece of memory allocated when the
281 // CodeBuffer is allocated. This is used for nmethod generation.
282 //
283 // The memory can be divided up into several parts called sections.
284 // Each section independently accumulates code (or data) an relocations.
285 // Sections can grow (at the expense of a reallocation of the BufferBlob
286 // and recopying of all active sections). When the buffered code is finally
287 // written to an nmethod (or other CodeBlob), the contents (code, data,
288 // and relocations) of the sections are padded to an alignment and concatenated.
289 // Instructions and data in one section can contain relocatable references to
290 // addresses in a sibling section.
291
292 class CodeBuffer: public StackObj {
293 friend class CodeSection;
294
295 private:
296 // CodeBuffers must be allocated on the stack except for a single
297 // special case during expansion which is handled internally. This
298 // is done to guarantee proper cleanup of resources.
299 void* operator new(size_t size) throw() { return ResourceObj::operator new(size); }
300 void operator delete(void* p) { ShouldNotCallThis(); }
301
302 public:
303 typedef int csize_t; // code size type; would be size_t except for history
304 enum {
305 // Here is the list of all possible sections. The order reflects
306 // the final layout.
307 SECT_FIRST = 0,
308 SECT_CONSTS = SECT_FIRST, // Non-instruction data: Floats, jump tables, etc.
309 SECT_INSTS, // Executable instructions.
310 SECT_STUBS, // Outbound trampolines for supporting call sites.
311 SECT_LIMIT, SECT_NONE = -1
312 };
313
314 private:
315 enum {
316 sect_bits = 2, // assert (SECT_LIMIT <= (1<<sect_bits))
317 sect_mask = (1<<sect_bits)-1
318 };
319
320 const char* _name;
321
322 CodeSection _consts; // constants, jump tables
323 CodeSection _insts; // instructions (the main section)
324 CodeSection _stubs; // stubs (call site support), deopt, exception handling
325
326 CodeBuffer* _before_expand; // dead buffer, from before the last expansion
327
328 BufferBlob* _blob; // optional buffer in CodeCache for generated code
329 address _total_start; // first address of combined memory buffer
330 csize_t _total_size; // size in bytes of combined memory buffer
331
332 OopRecorder* _oop_recorder;
333 CodeStrings _strings;
334 OopRecorder _default_oop_recorder; // override with initialize_oop_recorder
335 Arena* _overflow_arena;
336
337 address _decode_begin; // start address for decode
338 address decode_begin();
339
340 void initialize_misc(const char * name) {
341 // all pointers other than code_start/end and those inside the sections
342 assert(name != NULL, "must have a name");
343 _name = name;
344 _before_expand = NULL;
345 _blob = NULL;
346 _oop_recorder = NULL;
347 _decode_begin = NULL;
348 _overflow_arena = NULL;
349 }
350
351 void initialize(address code_start, csize_t code_size) {
352 _consts.initialize_outer(this, SECT_CONSTS);
353 _insts.initialize_outer(this, SECT_INSTS);
354 _stubs.initialize_outer(this, SECT_STUBS);
355 _total_start = code_start;
356 _total_size = code_size;
357 // Initialize the main section:
358 _insts.initialize(code_start, code_size);
359 assert(!_stubs.is_allocated(), "no garbage here");
360 assert(!_consts.is_allocated(), "no garbage here");
361 _oop_recorder = &_default_oop_recorder;
362 }
363
364 void initialize_section_size(CodeSection* cs, csize_t size);
365
366 void freeze_section(CodeSection* cs);
367
368 // helper for CodeBuffer::expand()
369 void take_over_code_from(CodeBuffer* cs);
370
371 // ensure sections are disjoint, ordered, and contained in the blob
372 void verify_section_allocation();
373
374 // copies combined relocations to the blob, returns bytes copied
375 // (if target is null, it is a dry run only, just for sizing)
376 csize_t copy_relocations_to(CodeBlob* blob) const;
377
378 // copies combined code to the blob (assumes relocs are already in there)
379 void copy_code_to(CodeBlob* blob);
380
381 // moves code sections to new buffer (assumes relocs are already in there)
382 void relocate_code_to(CodeBuffer* cb) const;
383
384 // set up a model of the final layout of my contents
385 void compute_final_layout(CodeBuffer* dest) const;
386
387 // Expand the given section so at least 'amount' is remaining.
388 // Creates a new, larger BufferBlob, and rewrites the code & relocs.
389 void expand(CodeSection* which_cs, csize_t amount);
390
391 // Helper for expand.
392 csize_t figure_expanded_capacities(CodeSection* which_cs, csize_t amount, csize_t* new_capacity);
393
394 public:
395 // (1) code buffer referring to pre-allocated instruction memory
396 CodeBuffer(address code_start, csize_t code_size) {
397 assert(code_start != NULL, "sanity");
398 initialize_misc("static buffer");
399 initialize(code_start, code_size);
400 verify_section_allocation();
401 }
402
403 // (2) CodeBuffer referring to pre-allocated CodeBlob.
404 CodeBuffer(CodeBlob* blob);
405
406 // (3) code buffer allocating codeBlob memory for code & relocation
407 // info but with lazy initialization. The name must be something
408 // informative.
409 CodeBuffer(const char* name) {
410 initialize_misc(name);
411 }
412
413
414 // (4) code buffer allocating codeBlob memory for code & relocation
415 // info. The name must be something informative and code_size must
416 // include both code and stubs sizes.
417 CodeBuffer(const char* name, csize_t code_size, csize_t locs_size) {
418 initialize_misc(name);
419 initialize(code_size, locs_size);
420 }
421
422 ~CodeBuffer();
423
424 // Initialize a CodeBuffer constructed using constructor 3. Using
425 // constructor 4 is equivalent to calling constructor 3 and then
426 // calling this method. It's been factored out for convenience of
427 // construction.
428 void initialize(csize_t code_size, csize_t locs_size);
429
430 CodeSection* consts() { return &_consts; }
431 CodeSection* insts() { return &_insts; }
432 CodeSection* stubs() { return &_stubs; }
433
434 // present sections in order; return NULL at end; consts is #0, etc.
435 CodeSection* code_section(int n) {
436 // This makes the slightly questionable but portable assumption
437 // that the various members (_consts, _insts, _stubs, etc.) are
438 // adjacent in the layout of CodeBuffer.
439 CodeSection* cs = &_consts + n;
440 assert(cs->index() == n || !cs->is_allocated(), "sanity");
441 return cs;
442 }
443 const CodeSection* code_section(int n) const { // yucky const stuff
444 return ((CodeBuffer*)this)->code_section(n);
445 }
446 static const char* code_section_name(int n);
447 int section_index_of(address addr) const;
448 bool contains(address addr) const {
449 // handy for debugging
450 return section_index_of(addr) > SECT_NONE;
451 }
452
453 // A stable mapping between 'locators' (small ints) and addresses.
454 static int locator_pos(int locator) { return locator >> sect_bits; }
455 static int locator_sect(int locator) { return locator & sect_mask; }
456 static int locator(int pos, int sect) { return (pos << sect_bits) | sect; }
457 int locator(address addr) const;
458 address locator_address(int locator) const;
459
460 // Heuristic for pre-packing the taken/not-taken bit of a predicted branch.
461 bool is_backward_branch(Label& L);
462
463 // Properties
464 const char* name() const { return _name; }
465 CodeBuffer* before_expand() const { return _before_expand; }
466 BufferBlob* blob() const { return _blob; }
467 void set_blob(BufferBlob* blob);
468 void free_blob(); // Free the blob, if we own one.
469
470 // Properties relative to the insts section:
471 address insts_begin() const { return _insts.start(); }
472 address insts_end() const { return _insts.end(); }
473 void set_insts_end(address end) { _insts.set_end(end); }
474 address insts_limit() const { return _insts.limit(); }
475 address insts_mark() const { return _insts.mark(); }
476 void set_insts_mark() { _insts.set_mark(); }
477 void clear_insts_mark() { _insts.clear_mark(); }
478
479 // is there anything in the buffer other than the current section?
480 bool is_pure() const { return insts_size() == total_content_size(); }
481
482 // size in bytes of output so far in the insts sections
483 csize_t insts_size() const { return _insts.size(); }
484
485 // same as insts_size(), except that it asserts there is no non-code here
486 csize_t pure_insts_size() const { assert(is_pure(), "no non-code");
487 return insts_size(); }
488 // capacity in bytes of the insts sections
489 csize_t insts_capacity() const { return _insts.capacity(); }
490
491 // number of bytes remaining in the insts section
492 csize_t insts_remaining() const { return _insts.remaining(); }
493
494 // is a given address in the insts section? (2nd version is end-inclusive)
495 bool insts_contains(address pc) const { return _insts.contains(pc); }
496 bool insts_contains2(address pc) const { return _insts.contains2(pc); }
497
498 // Record any extra oops required to keep embedded metadata alive
499 void finalize_oop_references(methodHandle method);
500
501 // Allocated size in all sections, when aligned and concatenated
502 // (this is the eventual state of the content in its final
503 // CodeBlob).
504 csize_t total_content_size() const;
505
506 // Combined offset (relative to start of first section) of given
507 // section, as eventually found in the final CodeBlob.
508 csize_t total_offset_of(CodeSection* cs) const;
509
510 // allocated size of all relocation data, including index, rounded up
511 csize_t total_relocation_size() const;
512
513 // allocated size of any and all recorded oops
514 csize_t total_oop_size() const {
515 OopRecorder* recorder = oop_recorder();
516 return (recorder == NULL)? 0: recorder->oop_size();
517 }
518
519 // allocated size of any and all recorded metadata
520 csize_t total_metadata_size() const {
521 OopRecorder* recorder = oop_recorder();
522 return (recorder == NULL)? 0: recorder->metadata_size();
523 }
524
525 // Configuration functions, called immediately after the CB is constructed.
526 // The section sizes are subtracted from the original insts section.
527 // Note: Call them in reverse section order, because each steals from insts.
528 void initialize_consts_size(csize_t size) { initialize_section_size(&_consts, size); }
529 void initialize_stubs_size(csize_t size) { initialize_section_size(&_stubs, size); }
530 // Override default oop recorder.
531 void initialize_oop_recorder(OopRecorder* r);
532
533 OopRecorder* oop_recorder() const { return _oop_recorder; }
534 CodeStrings& strings() { return _strings; }
535
536 // Code generation
537 void relocate(address at, RelocationHolder const& rspec, int format = 0) {
538 _insts.relocate(at, rspec, format);
539 }
540 void relocate(address at, relocInfo::relocType rtype, int format = 0) {
541 _insts.relocate(at, rtype, format);
542 }
543
544 // Management of overflow storage for binding of Labels.
545 GrowableArray<int>* create_patch_overflow();
546
547 // NMethod generation
548 void copy_code_and_locs_to(CodeBlob* blob) {
549 assert(blob != NULL, "sane");
550 copy_relocations_to(blob);
551 copy_code_to(blob);
552 }
553 void copy_values_to(nmethod* nm) {
554 if (!oop_recorder()->is_unused()) {
555 oop_recorder()->copy_values_to(nm);
556 }
557 }
558
559 // Transform an address from the code in this code buffer to a specified code buffer
560 address transform_address(const CodeBuffer &cb, address addr) const;
561
562 void block_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
563 const char* code_string(const char* str) PRODUCT_RETURN_(return NULL;);
564
565 // Log a little info about section usage in the CodeBuffer
566 void log_section_sizes(const char* name);
567
568 #ifndef PRODUCT
569 public:
570 // Printing / Decoding
571 // decodes from decode_begin() to code_end() and sets decode_begin to end
572 void decode();
573 void decode_all(); // decodes all the code
574 void skip_decode(); // sets decode_begin to code_end();
575 void print();
576 #endif
577
578
579 // The following header contains architecture-specific implementations
580 #ifdef TARGET_ARCH_x86
581 # include "codeBuffer_x86.hpp"
582 #endif
583 #ifdef TARGET_ARCH_sparc
584 # include "codeBuffer_sparc.hpp"
585 #endif
586 #ifdef TARGET_ARCH_zero
587 # include "codeBuffer_zero.hpp"
588 #endif
589 #ifdef TARGET_ARCH_arm
590 # include "codeBuffer_arm.hpp"
591 #endif
592 #ifdef TARGET_ARCH_ppc
593 # include "codeBuffer_ppc.hpp"
594 #endif
595
596 };
597
598
599 inline void CodeSection::freeze() {
600 _outer->freeze_section(this);
601 }
602
603 inline bool CodeSection::maybe_expand_to_ensure_remaining(csize_t amount) {
604 if (remaining() < amount) { _outer->expand(this, amount); return true; }
605 return false;
606 }
607
608 #endif // SHARE_VM_ASM_CODEBUFFER_HPP

mercurial