duke@435: /* trims@1907: * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved. duke@435: * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. duke@435: * duke@435: * This code is free software; you can redistribute it and/or modify it duke@435: * under the terms of the GNU General Public License version 2 only, as duke@435: * published by the Free Software Foundation. duke@435: * duke@435: * This code is distributed in the hope that it will be useful, but WITHOUT duke@435: * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or duke@435: * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License duke@435: * version 2 for more details (a copy is included in the LICENSE file that duke@435: * accompanied this code). duke@435: * duke@435: * You should have received a copy of the GNU General Public License version duke@435: * 2 along with this work; if not, write to the Free Software Foundation, duke@435: * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. duke@435: * trims@1907: * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA trims@1907: * or visit www.oracle.com if you need additional information or have any trims@1907: * questions. duke@435: * duke@435: */ duke@435: duke@435: class CodeComments; duke@435: class AbstractAssembler; duke@435: class MacroAssembler; duke@435: class PhaseCFG; duke@435: class Compile; duke@435: class BufferBlob; duke@435: class CodeBuffer; duke@435: duke@435: class CodeOffsets: public StackObj { duke@435: public: duke@435: enum Entries { Entry, duke@435: Verified_Entry, duke@435: Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete duke@435: OSR_Entry, kamg@551: Dtrace_trap = OSR_Entry, // dtrace probes can never have an OSR entry so reuse it duke@435: Exceptions, // Offset where exception handler lives duke@435: Deopt, // Offset where deopt handler lives twisti@1639: DeoptMH, // Offset where MethodHandle deopt handler lives never@1813: UnwindHandler, // Offset to default unwind handler duke@435: max_Entries }; duke@435: duke@435: // special value to note codeBlobs where profile (forte) stack walking is duke@435: // always dangerous and suspect. duke@435: duke@435: enum { frame_never_safe = -1 }; duke@435: duke@435: private: duke@435: int _values[max_Entries]; duke@435: duke@435: public: duke@435: CodeOffsets() { twisti@1639: _values[Entry ] = 0; duke@435: _values[Verified_Entry] = 0; duke@435: _values[Frame_Complete] = frame_never_safe; twisti@1639: _values[OSR_Entry ] = 0; twisti@1639: _values[Exceptions ] = -1; twisti@1639: _values[Deopt ] = -1; twisti@1639: _values[DeoptMH ] = -1; never@1813: _values[UnwindHandler ] = -1; duke@435: } duke@435: duke@435: int value(Entries e) { return _values[e]; } duke@435: void set_value(Entries e, int val) { _values[e] = val; } duke@435: }; duke@435: duke@435: // This class represents a stream of code and associated relocations. duke@435: // There are a few in each CodeBuffer. duke@435: // They are filled concurrently, and concatenated at the end. duke@435: class CodeSection VALUE_OBJ_CLASS_SPEC { duke@435: friend class CodeBuffer; duke@435: public: duke@435: typedef int csize_t; // code size type; would be size_t except for history duke@435: duke@435: private: duke@435: address _start; // first byte of contents (instructions) duke@435: address _mark; // user mark, usually an instruction beginning duke@435: address _end; // current end address duke@435: address _limit; // last possible (allocated) end address duke@435: relocInfo* _locs_start; // first byte of relocation information duke@435: relocInfo* _locs_end; // first byte after relocation information duke@435: relocInfo* _locs_limit; // first byte after relocation information buf duke@435: address _locs_point; // last relocated position (grows upward) duke@435: bool _locs_own; // did I allocate the locs myself? duke@435: bool _frozen; // no more expansion of this section duke@435: char _index; // my section number (SECT_INST, etc.) duke@435: CodeBuffer* _outer; // enclosing CodeBuffer duke@435: duke@435: // (Note: _locs_point used to be called _last_reloc_offset.) duke@435: duke@435: CodeSection() { duke@435: _start = NULL; duke@435: _mark = NULL; duke@435: _end = NULL; duke@435: _limit = NULL; duke@435: _locs_start = NULL; duke@435: _locs_end = NULL; duke@435: _locs_limit = NULL; duke@435: _locs_point = NULL; duke@435: _locs_own = false; duke@435: _frozen = false; bobv@2036: debug_only(_index = (char)-1); duke@435: debug_only(_outer = (CodeBuffer*)badAddress); duke@435: } duke@435: duke@435: void initialize_outer(CodeBuffer* outer, int index) { duke@435: _outer = outer; duke@435: _index = index; duke@435: } duke@435: duke@435: void initialize(address start, csize_t size = 0) { duke@435: assert(_start == NULL, "only one init step, please"); duke@435: _start = start; duke@435: _mark = NULL; duke@435: _end = start; duke@435: duke@435: _limit = start + size; duke@435: _locs_point = start; duke@435: } duke@435: duke@435: void initialize_locs(int locs_capacity); duke@435: void expand_locs(int new_capacity); duke@435: void initialize_locs_from(const CodeSection* source_cs); duke@435: duke@435: // helper for CodeBuffer::expand() duke@435: void take_over_code_from(CodeSection* cs) { duke@435: _start = cs->_start; duke@435: _mark = cs->_mark; duke@435: _end = cs->_end; duke@435: _limit = cs->_limit; duke@435: _locs_point = cs->_locs_point; duke@435: } duke@435: duke@435: public: duke@435: address start() const { return _start; } duke@435: address mark() const { return _mark; } duke@435: address end() const { return _end; } duke@435: address limit() const { return _limit; } duke@435: csize_t size() const { return (csize_t)(_end - _start); } duke@435: csize_t mark_off() const { assert(_mark != NULL, "not an offset"); duke@435: return (csize_t)(_mark - _start); } duke@435: csize_t capacity() const { return (csize_t)(_limit - _start); } duke@435: csize_t remaining() const { return (csize_t)(_limit - _end); } duke@435: duke@435: relocInfo* locs_start() const { return _locs_start; } duke@435: relocInfo* locs_end() const { return _locs_end; } duke@435: int locs_count() const { return (int)(_locs_end - _locs_start); } duke@435: relocInfo* locs_limit() const { return _locs_limit; } duke@435: address locs_point() const { return _locs_point; } duke@435: csize_t locs_point_off() const{ return (csize_t)(_locs_point - _start); } duke@435: csize_t locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); } duke@435: csize_t locs_remaining()const { return (csize_t)(_locs_limit - _locs_end); } duke@435: duke@435: int index() const { return _index; } duke@435: bool is_allocated() const { return _start != NULL; } duke@435: bool is_empty() const { return _start == _end; } duke@435: bool is_frozen() const { return _frozen; } duke@435: bool has_locs() const { return _locs_end != NULL; } duke@435: duke@435: CodeBuffer* outer() const { return _outer; } duke@435: duke@435: // is a given address in this section? (2nd version is end-inclusive) duke@435: bool contains(address pc) const { return pc >= _start && pc < _end; } duke@435: bool contains2(address pc) const { return pc >= _start && pc <= _end; } duke@435: bool allocates(address pc) const { return pc >= _start && pc < _limit; } duke@435: bool allocates2(address pc) const { return pc >= _start && pc <= _limit; } duke@435: duke@435: void set_end(address pc) { assert(allocates2(pc),""); _end = pc; } duke@435: void set_mark(address pc) { assert(contains2(pc),"not in codeBuffer"); duke@435: _mark = pc; } duke@435: void set_mark_off(int offset) { assert(contains2(offset+_start),"not in codeBuffer"); duke@435: _mark = offset + _start; } duke@435: void set_mark() { _mark = _end; } duke@435: void clear_mark() { _mark = NULL; } duke@435: duke@435: void set_locs_end(relocInfo* p) { duke@435: assert(p <= locs_limit(), "locs data fits in allocated buffer"); duke@435: _locs_end = p; duke@435: } duke@435: void set_locs_point(address pc) { duke@435: assert(pc >= locs_point(), "relocation addr may not decrease"); duke@435: assert(allocates2(pc), "relocation addr must be in this section"); duke@435: _locs_point = pc; duke@435: } duke@435: twisti@2103: // Code emission twisti@2103: void emit_int8 (int8_t x) { *((int8_t*) end()) = x; set_end(end() + 1); } twisti@2103: void emit_int16(int16_t x) { *((int16_t*) end()) = x; set_end(end() + 2); } twisti@2103: void emit_int32(int32_t x) { *((int32_t*) end()) = x; set_end(end() + 4); } twisti@2103: void emit_int64(int64_t x) { *((int64_t*) end()) = x; set_end(end() + 8); } twisti@2103: duke@435: // Share a scratch buffer for relocinfo. (Hacky; saves a resource allocation.) duke@435: void initialize_shared_locs(relocInfo* buf, int length); duke@435: duke@435: // Manage labels and their addresses. duke@435: address target(Label& L, address branch_pc); duke@435: duke@435: // Emit a relocation. duke@435: void relocate(address at, RelocationHolder const& rspec, int format = 0); duke@435: void relocate(address at, relocInfo::relocType rtype, int format = 0) { duke@435: if (rtype != relocInfo::none) duke@435: relocate(at, Relocation::spec_simple(rtype), format); duke@435: } duke@435: duke@435: // alignment requirement for starting offset duke@435: // Requirements are that the instruction area and the duke@435: // stubs area must start on CodeEntryAlignment, and duke@435: // the ctable on sizeof(jdouble) duke@435: int alignment() const { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); } duke@435: duke@435: // Slop between sections, used only when allocating temporary BufferBlob buffers. duke@435: static csize_t end_slop() { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); } duke@435: duke@435: csize_t align_at_start(csize_t off) const { return (csize_t) align_size_up(off, alignment()); } duke@435: duke@435: // Mark a section frozen. Assign its remaining space to duke@435: // the following section. It will never expand after this point. duke@435: inline void freeze(); // { _outer->freeze_section(this); } duke@435: duke@435: // Ensure there's enough space left in the current section. duke@435: // Return true if there was an expansion. duke@435: bool maybe_expand_to_ensure_remaining(csize_t amount); duke@435: duke@435: #ifndef PRODUCT duke@435: void decode(); duke@435: void dump(); duke@435: void print(const char* name); duke@435: #endif //PRODUCT duke@435: }; duke@435: duke@435: class CodeComment; duke@435: class CodeComments VALUE_OBJ_CLASS_SPEC { duke@435: private: duke@435: #ifndef PRODUCT duke@435: CodeComment* _comments; duke@435: #endif duke@435: duke@435: public: duke@435: CodeComments() { duke@435: #ifndef PRODUCT duke@435: _comments = NULL; duke@435: #endif duke@435: } duke@435: duke@435: void add_comment(intptr_t offset, const char * comment) PRODUCT_RETURN; duke@435: void print_block_comment(outputStream* stream, intptr_t offset) PRODUCT_RETURN; duke@435: void assign(CodeComments& other) PRODUCT_RETURN; duke@435: void free() PRODUCT_RETURN; duke@435: }; duke@435: duke@435: duke@435: // A CodeBuffer describes a memory space into which assembly duke@435: // code is generated. This memory space usually occupies the duke@435: // interior of a single BufferBlob, but in some cases it may be duke@435: // an arbitrary span of memory, even outside the code cache. duke@435: // duke@435: // A code buffer comes in two variants: duke@435: // duke@435: // (1) A CodeBuffer referring to an already allocated piece of memory: duke@435: // This is used to direct 'static' code generation (e.g. for interpreter duke@435: // or stubroutine generation, etc.). This code comes with NO relocation duke@435: // information. duke@435: // duke@435: // (2) A CodeBuffer referring to a piece of memory allocated when the duke@435: // CodeBuffer is allocated. This is used for nmethod generation. duke@435: // duke@435: // The memory can be divided up into several parts called sections. duke@435: // Each section independently accumulates code (or data) an relocations. duke@435: // Sections can grow (at the expense of a reallocation of the BufferBlob duke@435: // and recopying of all active sections). When the buffered code is finally duke@435: // written to an nmethod (or other CodeBlob), the contents (code, data, duke@435: // and relocations) of the sections are padded to an alignment and concatenated. duke@435: // Instructions and data in one section can contain relocatable references to duke@435: // addresses in a sibling section. duke@435: duke@435: class CodeBuffer: public StackObj { duke@435: friend class CodeSection; duke@435: duke@435: private: duke@435: // CodeBuffers must be allocated on the stack except for a single duke@435: // special case during expansion which is handled internally. This duke@435: // is done to guarantee proper cleanup of resources. duke@435: void* operator new(size_t size) { return ResourceObj::operator new(size); } kvn@2040: void operator delete(void* p) { ShouldNotCallThis(); } duke@435: duke@435: public: duke@435: typedef int csize_t; // code size type; would be size_t except for history duke@435: enum { duke@435: // Here is the list of all possible sections, in order of ascending address. duke@435: SECT_INSTS, // Executable instructions. duke@435: SECT_STUBS, // Outbound trampolines for supporting call sites. duke@435: SECT_CONSTS, // Non-instruction data: Floats, jump tables, etc. duke@435: SECT_LIMIT, SECT_NONE = -1 duke@435: }; duke@435: duke@435: private: duke@435: enum { duke@435: sect_bits = 2, // assert (SECT_LIMIT <= (1<index() == n || !cs->is_allocated(), "sanity"); duke@435: return cs; duke@435: } duke@435: const CodeSection* code_section(int n) const { // yucky const stuff duke@435: return ((CodeBuffer*)this)->code_section(n); duke@435: } duke@435: static const char* code_section_name(int n); duke@435: int section_index_of(address addr) const; duke@435: bool contains(address addr) const { duke@435: // handy for debugging duke@435: return section_index_of(addr) > SECT_NONE; duke@435: } duke@435: duke@435: // A stable mapping between 'locators' (small ints) and addresses. duke@435: static int locator_pos(int locator) { return locator >> sect_bits; } duke@435: static int locator_sect(int locator) { return locator & sect_mask; } duke@435: static int locator(int pos, int sect) { return (pos << sect_bits) | sect; } duke@435: int locator(address addr) const; duke@435: address locator_address(int locator) const; duke@435: duke@435: // Properties duke@435: const char* name() const { return _name; } duke@435: CodeBuffer* before_expand() const { return _before_expand; } duke@435: BufferBlob* blob() const { return _blob; } duke@435: void set_blob(BufferBlob* blob); duke@435: void free_blob(); // Free the blob, if we own one. duke@435: duke@435: // Properties relative to the insts section: twisti@2103: address insts_begin() const { return _insts.start(); } twisti@2103: address insts_end() const { return _insts.end(); } twisti@2103: void set_insts_end(address end) { _insts.set_end(end); } twisti@2103: address insts_limit() const { return _insts.limit(); } twisti@2103: address insts_mark() const { return _insts.mark(); } twisti@2103: void set_insts_mark() { _insts.set_mark(); } twisti@2103: void clear_insts_mark() { _insts.clear_mark(); } duke@435: duke@435: // is there anything in the buffer other than the current section? twisti@2103: bool is_pure() const { return insts_size() == total_content_size(); } duke@435: duke@435: // size in bytes of output so far in the insts sections twisti@2103: csize_t insts_size() const { return _insts.size(); } duke@435: twisti@2103: // same as insts_size(), except that it asserts there is no non-code here twisti@2103: csize_t pure_insts_size() const { assert(is_pure(), "no non-code"); twisti@2103: return insts_size(); } duke@435: // capacity in bytes of the insts sections twisti@2103: csize_t insts_capacity() const { return _insts.capacity(); } duke@435: duke@435: // number of bytes remaining in the insts section twisti@2103: csize_t insts_remaining() const { return _insts.remaining(); } duke@435: duke@435: // is a given address in the insts section? (2nd version is end-inclusive) twisti@2103: bool insts_contains(address pc) const { return _insts.contains(pc); } twisti@2103: bool insts_contains2(address pc) const { return _insts.contains2(pc); } duke@435: twisti@2103: // Allocated size in all sections, when aligned and concatenated twisti@2103: // (this is the eventual state of the content in its final twisti@2103: // CodeBlob). twisti@2103: csize_t total_content_size() const; duke@435: duke@435: // combined offset (relative to start of insts) of given address, duke@435: // as eventually found in the final CodeBlob duke@435: csize_t total_offset_of(address addr) const; duke@435: duke@435: // allocated size of all relocation data, including index, rounded up duke@435: csize_t total_relocation_size() const; duke@435: duke@435: // allocated size of any and all recorded oops duke@435: csize_t total_oop_size() const { duke@435: OopRecorder* recorder = oop_recorder(); duke@435: return (recorder == NULL)? 0: recorder->oop_size(); duke@435: } duke@435: duke@435: // Configuration functions, called immediately after the CB is constructed. duke@435: // The section sizes are subtracted from the original insts section. duke@435: // Note: Call them in reverse section order, because each steals from insts. duke@435: void initialize_consts_size(csize_t size) { initialize_section_size(&_consts, size); } duke@435: void initialize_stubs_size(csize_t size) { initialize_section_size(&_stubs, size); } duke@435: // Override default oop recorder. duke@435: void initialize_oop_recorder(OopRecorder* r); duke@435: duke@435: OopRecorder* oop_recorder() const { return _oop_recorder; } duke@435: CodeComments& comments() { return _comments; } duke@435: duke@435: // Code generation duke@435: void relocate(address at, RelocationHolder const& rspec, int format = 0) { duke@435: _insts.relocate(at, rspec, format); duke@435: } duke@435: void relocate(address at, relocInfo::relocType rtype, int format = 0) { duke@435: _insts.relocate(at, rtype, format); duke@435: } duke@435: duke@435: // Management of overflow storage for binding of Labels. duke@435: GrowableArray* create_patch_overflow(); duke@435: duke@435: // NMethod generation duke@435: void copy_code_and_locs_to(CodeBlob* blob) { duke@435: assert(blob != NULL, "sane"); duke@435: copy_relocations_to(blob); duke@435: copy_code_to(blob); duke@435: } twisti@1918: void copy_oops_to(nmethod* nm) { duke@435: if (!oop_recorder()->is_unused()) { twisti@1918: oop_recorder()->copy_to(nm); duke@435: } duke@435: } duke@435: duke@435: // Transform an address from the code in this code buffer to a specified code buffer duke@435: address transform_address(const CodeBuffer &cb, address addr) const; duke@435: duke@435: void block_comment(intptr_t offset, const char * comment) PRODUCT_RETURN; duke@435: duke@435: #ifndef PRODUCT duke@435: public: duke@435: // Printing / Decoding duke@435: // decodes from decode_begin() to code_end() and sets decode_begin to end duke@435: void decode(); duke@435: void decode_all(); // decodes all the code duke@435: void skip_decode(); // sets decode_begin to code_end(); duke@435: void print(); duke@435: #endif duke@435: duke@435: duke@435: // The following header contains architecture-specific implementations duke@435: #include "incls/_codeBuffer_pd.hpp.incl" duke@435: }; duke@435: duke@435: duke@435: inline void CodeSection::freeze() { duke@435: _outer->freeze_section(this); duke@435: } duke@435: duke@435: inline bool CodeSection::maybe_expand_to_ensure_remaining(csize_t amount) { duke@435: if (remaining() < amount) { _outer->expand(this, amount); return true; } duke@435: return false; duke@435: }