src/share/vm/asm/codeBuffer.hpp

Tue, 03 Aug 2010 15:55:03 -0700

author
kvn
date
Tue, 03 Aug 2010 15:55:03 -0700
changeset 2040
0e35fa8ebccd
parent 1934
e9ff18c4ace7
child 2044
f4f596978298
permissions
-rw-r--r--

6973963: SEGV in ciBlock::start_bci() with EA
Summary: Added more checks into ResourceObj and growableArray to verify correctness of allocation type.
Reviewed-by: never, coleenp, dholmes

     1 /*
     2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 class  CodeComments;
    26 class  AbstractAssembler;
    27 class  MacroAssembler;
    28 class  PhaseCFG;
    29 class  Compile;
    30 class  BufferBlob;
    31 class  CodeBuffer;
    33 class CodeOffsets: public StackObj {
    34 public:
    35   enum Entries { Entry,
    36                  Verified_Entry,
    37                  Frame_Complete, // Offset in the code where the frame setup is (for forte stackwalks) is complete
    38                  OSR_Entry,
    39                  Dtrace_trap = OSR_Entry,  // dtrace probes can never have an OSR entry so reuse it
    40                  Exceptions,     // Offset where exception handler lives
    41                  Deopt,          // Offset where deopt handler lives
    42                  DeoptMH,        // Offset where MethodHandle deopt handler lives
    43                  UnwindHandler,  // Offset to default unwind handler
    44                  max_Entries };
    46   // special value to note codeBlobs where profile (forte) stack walking is
    47   // always dangerous and suspect.
    49   enum { frame_never_safe = -1 };
    51 private:
    52   int _values[max_Entries];
    54 public:
    55   CodeOffsets() {
    56     _values[Entry         ] = 0;
    57     _values[Verified_Entry] = 0;
    58     _values[Frame_Complete] = frame_never_safe;
    59     _values[OSR_Entry     ] = 0;
    60     _values[Exceptions    ] = -1;
    61     _values[Deopt         ] = -1;
    62     _values[DeoptMH       ] = -1;
    63     _values[UnwindHandler ] = -1;
    64   }
    66   int value(Entries e) { return _values[e]; }
    67   void set_value(Entries e, int val) { _values[e] = val; }
    68 };
    70 // This class represents a stream of code and associated relocations.
    71 // There are a few in each CodeBuffer.
    72 // They are filled concurrently, and concatenated at the end.
    73 class CodeSection VALUE_OBJ_CLASS_SPEC {
    74   friend class CodeBuffer;
    75  public:
    76   typedef int csize_t;  // code size type; would be size_t except for history
    78  private:
    79   address     _start;           // first byte of contents (instructions)
    80   address     _mark;            // user mark, usually an instruction beginning
    81   address     _end;             // current end address
    82   address     _limit;           // last possible (allocated) end address
    83   relocInfo*  _locs_start;      // first byte of relocation information
    84   relocInfo*  _locs_end;        // first byte after relocation information
    85   relocInfo*  _locs_limit;      // first byte after relocation information buf
    86   address     _locs_point;      // last relocated position (grows upward)
    87   bool        _locs_own;        // did I allocate the locs myself?
    88   bool        _frozen;          // no more expansion of this section
    89   char        _index;           // my section number (SECT_INST, etc.)
    90   CodeBuffer* _outer;           // enclosing CodeBuffer
    92   // (Note:  _locs_point used to be called _last_reloc_offset.)
    94   CodeSection() {
    95     _start         = NULL;
    96     _mark          = NULL;
    97     _end           = NULL;
    98     _limit         = NULL;
    99     _locs_start    = NULL;
   100     _locs_end      = NULL;
   101     _locs_limit    = NULL;
   102     _locs_point    = NULL;
   103     _locs_own      = false;
   104     _frozen        = false;
   105     debug_only(_index = -1);
   106     debug_only(_outer = (CodeBuffer*)badAddress);
   107   }
   109   void initialize_outer(CodeBuffer* outer, int index) {
   110     _outer = outer;
   111     _index = index;
   112   }
   114   void initialize(address start, csize_t size = 0) {
   115     assert(_start == NULL, "only one init step, please");
   116     _start         = start;
   117     _mark          = NULL;
   118     _end           = start;
   120     _limit         = start + size;
   121     _locs_point    = start;
   122   }
   124   void initialize_locs(int locs_capacity);
   125   void expand_locs(int new_capacity);
   126   void initialize_locs_from(const CodeSection* source_cs);
   128   // helper for CodeBuffer::expand()
   129   void take_over_code_from(CodeSection* cs) {
   130     _start      = cs->_start;
   131     _mark       = cs->_mark;
   132     _end        = cs->_end;
   133     _limit      = cs->_limit;
   134     _locs_point = cs->_locs_point;
   135   }
   137  public:
   138   address     start() const         { return _start; }
   139   address     mark() const          { return _mark; }
   140   address     end() const           { return _end; }
   141   address     limit() const         { return _limit; }
   142   csize_t     size() const          { return (csize_t)(_end - _start); }
   143   csize_t     mark_off() const      { assert(_mark != NULL, "not an offset");
   144                                       return (csize_t)(_mark - _start); }
   145   csize_t     capacity() const      { return (csize_t)(_limit - _start); }
   146   csize_t     remaining() const     { return (csize_t)(_limit - _end); }
   148   relocInfo*  locs_start() const    { return _locs_start; }
   149   relocInfo*  locs_end() const      { return _locs_end; }
   150   int         locs_count() const    { return (int)(_locs_end - _locs_start); }
   151   relocInfo*  locs_limit() const    { return _locs_limit; }
   152   address     locs_point() const    { return _locs_point; }
   153   csize_t     locs_point_off() const{ return (csize_t)(_locs_point - _start); }
   154   csize_t     locs_capacity() const { return (csize_t)(_locs_limit - _locs_start); }
   155   csize_t     locs_remaining()const { return (csize_t)(_locs_limit - _locs_end); }
   157   int         index() const         { return _index; }
   158   bool        is_allocated() const  { return _start != NULL; }
   159   bool        is_empty() const      { return _start == _end; }
   160   bool        is_frozen() const     { return _frozen; }
   161   bool        has_locs() const      { return _locs_end != NULL; }
   163   CodeBuffer* outer() const         { return _outer; }
   165   // is a given address in this section?  (2nd version is end-inclusive)
   166   bool contains(address pc) const   { return pc >= _start && pc <  _end; }
   167   bool contains2(address pc) const  { return pc >= _start && pc <= _end; }
   168   bool allocates(address pc) const  { return pc >= _start && pc <  _limit; }
   169   bool allocates2(address pc) const { return pc >= _start && pc <= _limit; }
   171   void    set_end(address pc)       { assert(allocates2(pc),""); _end = pc; }
   172   void    set_mark(address pc)      { assert(contains2(pc),"not in codeBuffer");
   173                                       _mark = pc; }
   174   void    set_mark_off(int offset)  { assert(contains2(offset+_start),"not in codeBuffer");
   175                                       _mark = offset + _start; }
   176   void    set_mark()                { _mark = _end; }
   177   void    clear_mark()              { _mark = NULL; }
   179   void    set_locs_end(relocInfo* p) {
   180     assert(p <= locs_limit(), "locs data fits in allocated buffer");
   181     _locs_end = p;
   182   }
   183   void    set_locs_point(address pc) {
   184     assert(pc >= locs_point(), "relocation addr may not decrease");
   185     assert(allocates2(pc),     "relocation addr must be in this section");
   186     _locs_point = pc;
   187   }
   189   // Share a scratch buffer for relocinfo.  (Hacky; saves a resource allocation.)
   190   void initialize_shared_locs(relocInfo* buf, int length);
   192   // Manage labels and their addresses.
   193   address target(Label& L, address branch_pc);
   195   // Emit a relocation.
   196   void relocate(address at, RelocationHolder const& rspec, int format = 0);
   197   void relocate(address at,    relocInfo::relocType rtype, int format = 0) {
   198     if (rtype != relocInfo::none)
   199       relocate(at, Relocation::spec_simple(rtype), format);
   200   }
   202   // alignment requirement for starting offset
   203   // Requirements are that the instruction area and the
   204   // stubs area must start on CodeEntryAlignment, and
   205   // the ctable on sizeof(jdouble)
   206   int alignment() const             { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
   208   // Slop between sections, used only when allocating temporary BufferBlob buffers.
   209   static csize_t end_slop()         { return MAX2((int)sizeof(jdouble), (int)CodeEntryAlignment); }
   211   csize_t align_at_start(csize_t off) const { return (csize_t) align_size_up(off, alignment()); }
   213   // Mark a section frozen.  Assign its remaining space to
   214   // the following section.  It will never expand after this point.
   215   inline void freeze();         //  { _outer->freeze_section(this); }
   217   // Ensure there's enough space left in the current section.
   218   // Return true if there was an expansion.
   219   bool maybe_expand_to_ensure_remaining(csize_t amount);
   221 #ifndef PRODUCT
   222   void decode();
   223   void dump();
   224   void print(const char* name);
   225 #endif //PRODUCT
   226 };
   228 class CodeComment;
   229 class CodeComments VALUE_OBJ_CLASS_SPEC {
   230 private:
   231 #ifndef PRODUCT
   232   CodeComment* _comments;
   233 #endif
   235 public:
   236   CodeComments() {
   237 #ifndef PRODUCT
   238     _comments = NULL;
   239 #endif
   240   }
   242   void add_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
   243   void print_block_comment(outputStream* stream, intptr_t offset)  PRODUCT_RETURN;
   244   void assign(CodeComments& other)  PRODUCT_RETURN;
   245   void free() PRODUCT_RETURN;
   246 };
   249 // A CodeBuffer describes a memory space into which assembly
   250 // code is generated.  This memory space usually occupies the
   251 // interior of a single BufferBlob, but in some cases it may be
   252 // an arbitrary span of memory, even outside the code cache.
   253 //
   254 // A code buffer comes in two variants:
   255 //
   256 // (1) A CodeBuffer referring to an already allocated piece of memory:
   257 //     This is used to direct 'static' code generation (e.g. for interpreter
   258 //     or stubroutine generation, etc.).  This code comes with NO relocation
   259 //     information.
   260 //
   261 // (2) A CodeBuffer referring to a piece of memory allocated when the
   262 //     CodeBuffer is allocated.  This is used for nmethod generation.
   263 //
   264 // The memory can be divided up into several parts called sections.
   265 // Each section independently accumulates code (or data) an relocations.
   266 // Sections can grow (at the expense of a reallocation of the BufferBlob
   267 // and recopying of all active sections).  When the buffered code is finally
   268 // written to an nmethod (or other CodeBlob), the contents (code, data,
   269 // and relocations) of the sections are padded to an alignment and concatenated.
   270 // Instructions and data in one section can contain relocatable references to
   271 // addresses in a sibling section.
   273 class CodeBuffer: public StackObj {
   274   friend class CodeSection;
   276  private:
   277   // CodeBuffers must be allocated on the stack except for a single
   278   // special case during expansion which is handled internally.  This
   279   // is done to guarantee proper cleanup of resources.
   280   void* operator new(size_t size) { return ResourceObj::operator new(size); }
   281   void  operator delete(void* p)  { ShouldNotCallThis(); }
   283  public:
   284   typedef int csize_t;  // code size type; would be size_t except for history
   285   enum {
   286     // Here is the list of all possible sections, in order of ascending address.
   287     SECT_INSTS,               // Executable instructions.
   288     SECT_STUBS,               // Outbound trampolines for supporting call sites.
   289     SECT_CONSTS,              // Non-instruction data:  Floats, jump tables, etc.
   290     SECT_LIMIT, SECT_NONE = -1
   291   };
   293  private:
   294   enum {
   295     sect_bits = 2,      // assert (SECT_LIMIT <= (1<<sect_bits))
   296     sect_mask = (1<<sect_bits)-1
   297   };
   299   const char*  _name;
   301   CodeSection  _insts;              // instructions (the main section)
   302   CodeSection  _stubs;              // stubs (call site support), deopt, exception handling
   303   CodeSection  _consts;             // constants, jump tables
   305   CodeBuffer*  _before_expand;  // dead buffer, from before the last expansion
   307   BufferBlob*  _blob;           // optional buffer in CodeCache for generated code
   308   address      _total_start;    // first address of combined memory buffer
   309   csize_t      _total_size;     // size in bytes of combined memory buffer
   311   OopRecorder* _oop_recorder;
   312   CodeComments _comments;
   313   OopRecorder  _default_oop_recorder;  // override with initialize_oop_recorder
   314   Arena*       _overflow_arena;
   316   address      _decode_begin;   // start address for decode
   317   address      decode_begin();
   319   void initialize_misc(const char * name) {
   320     // all pointers other than code_start/end and those inside the sections
   321     assert(name != NULL, "must have a name");
   322     _name            = name;
   323     _before_expand   = NULL;
   324     _blob            = NULL;
   325     _oop_recorder    = NULL;
   326     _decode_begin    = NULL;
   327     _overflow_arena  = NULL;
   328   }
   330   void initialize(address code_start, csize_t code_size) {
   331     _insts.initialize_outer(this,   SECT_INSTS);
   332     _stubs.initialize_outer(this,   SECT_STUBS);
   333     _consts.initialize_outer(this,  SECT_CONSTS);
   334     _total_start = code_start;
   335     _total_size  = code_size;
   336     // Initialize the main section:
   337     _insts.initialize(code_start, code_size);
   338     assert(!_stubs.is_allocated(),  "no garbage here");
   339     assert(!_consts.is_allocated(), "no garbage here");
   340     _oop_recorder = &_default_oop_recorder;
   341   }
   343   void initialize_section_size(CodeSection* cs, csize_t size);
   345   void freeze_section(CodeSection* cs);
   347   // helper for CodeBuffer::expand()
   348   void take_over_code_from(CodeBuffer* cs);
   350 #ifdef ASSERT
   351   // ensure sections are disjoint, ordered, and contained in the blob
   352   bool verify_section_allocation();
   353 #endif
   355   // copies combined relocations to the blob, returns bytes copied
   356   // (if target is null, it is a dry run only, just for sizing)
   357   csize_t copy_relocations_to(CodeBlob* blob) const;
   359   // copies combined code to the blob (assumes relocs are already in there)
   360   void copy_code_to(CodeBlob* blob);
   362   // moves code sections to new buffer (assumes relocs are already in there)
   363   void relocate_code_to(CodeBuffer* cb) const;
   365   // set up a model of the final layout of my contents
   366   void compute_final_layout(CodeBuffer* dest) const;
   368   // Expand the given section so at least 'amount' is remaining.
   369   // Creates a new, larger BufferBlob, and rewrites the code & relocs.
   370   void expand(CodeSection* which_cs, csize_t amount);
   372   // Helper for expand.
   373   csize_t figure_expanded_capacities(CodeSection* which_cs, csize_t amount, csize_t* new_capacity);
   375  public:
   376   // (1) code buffer referring to pre-allocated instruction memory
   377   CodeBuffer(address code_start, csize_t code_size);
   379   // (2) code buffer allocating codeBlob memory for code & relocation
   380   // info but with lazy initialization.  The name must be something
   381   // informative.
   382   CodeBuffer(const char* name) {
   383     initialize_misc(name);
   384   }
   387   // (3) code buffer allocating codeBlob memory for code & relocation
   388   // info.  The name must be something informative and code_size must
   389   // include both code and stubs sizes.
   390   CodeBuffer(const char* name, csize_t code_size, csize_t locs_size) {
   391     initialize_misc(name);
   392     initialize(code_size, locs_size);
   393   }
   395   ~CodeBuffer();
   397   // Initialize a CodeBuffer constructed using constructor 2.  Using
   398   // constructor 3 is equivalent to calling constructor 2 and then
   399   // calling this method.  It's been factored out for convenience of
   400   // construction.
   401   void initialize(csize_t code_size, csize_t locs_size);
   403   CodeSection* insts()             { return &_insts; }
   404   CodeSection* stubs()             { return &_stubs; }
   405   CodeSection* consts()            { return &_consts; }
   407   // present sections in order; return NULL at end; insts is #0, etc.
   408   CodeSection* code_section(int n) {
   409     // This makes the slightly questionable but portable assumption that
   410     // the various members (_insts, _stubs, etc.) are adjacent in the
   411     // layout of CodeBuffer.
   412     CodeSection* cs = &_insts + n;
   413     assert(cs->index() == n || !cs->is_allocated(), "sanity");
   414     return cs;
   415   }
   416   const CodeSection* code_section(int n) const {  // yucky const stuff
   417     return ((CodeBuffer*)this)->code_section(n);
   418   }
   419   static const char* code_section_name(int n);
   420   int section_index_of(address addr) const;
   421   bool contains(address addr) const {
   422     // handy for debugging
   423     return section_index_of(addr) > SECT_NONE;
   424   }
   426   // A stable mapping between 'locators' (small ints) and addresses.
   427   static int locator_pos(int locator)   { return locator >> sect_bits; }
   428   static int locator_sect(int locator)  { return locator &  sect_mask; }
   429   static int locator(int pos, int sect) { return (pos << sect_bits) | sect; }
   430   int        locator(address addr) const;
   431   address    locator_address(int locator) const;
   433   // Properties
   434   const char* name() const                  { return _name; }
   435   CodeBuffer* before_expand() const         { return _before_expand; }
   436   BufferBlob* blob() const                  { return _blob; }
   437   void    set_blob(BufferBlob* blob);
   438   void   free_blob();                       // Free the blob, if we own one.
   440   // Properties relative to the insts section:
   441   address code_begin() const            { return _insts.start(); }
   442   address code_end() const              { return _insts.end();   }
   443   void set_code_end(address end)        { _insts.set_end(end); }
   444   address code_limit() const            { return _insts.limit(); }
   445   address inst_mark() const             { return _insts.mark(); }
   446   void set_inst_mark()                  { _insts.set_mark(); }
   447   void clear_inst_mark()                { _insts.clear_mark(); }
   449   // is there anything in the buffer other than the current section?
   450   bool    is_pure() const               { return code_size() == total_code_size(); }
   452   // size in bytes of output so far in the insts sections
   453   csize_t code_size() const             { return _insts.size(); }
   455   // same as code_size(), except that it asserts there is no non-code here
   456   csize_t pure_code_size() const        { assert(is_pure(), "no non-code");
   457                                           return code_size(); }
   458   // capacity in bytes of the insts sections
   459   csize_t code_capacity() const         { return _insts.capacity(); }
   461   // number of bytes remaining in the insts section
   462   csize_t code_remaining() const        { return _insts.remaining(); }
   464   // is a given address in the insts section?  (2nd version is end-inclusive)
   465   bool code_contains(address pc) const  { return _insts.contains(pc); }
   466   bool code_contains2(address pc) const { return _insts.contains2(pc); }
   468   // allocated size of code in all sections, when aligned and concatenated
   469   // (this is the eventual state of the code in its final CodeBlob)
   470   csize_t total_code_size() const;
   472   // combined offset (relative to start of insts) of given address,
   473   // as eventually found in the final CodeBlob
   474   csize_t total_offset_of(address addr) const;
   476   // allocated size of all relocation data, including index, rounded up
   477   csize_t total_relocation_size() const;
   479   // allocated size of any and all recorded oops
   480   csize_t total_oop_size() const {
   481     OopRecorder* recorder = oop_recorder();
   482     return (recorder == NULL)? 0: recorder->oop_size();
   483   }
   485   // Configuration functions, called immediately after the CB is constructed.
   486   // The section sizes are subtracted from the original insts section.
   487   // Note:  Call them in reverse section order, because each steals from insts.
   488   void initialize_consts_size(csize_t size)            { initialize_section_size(&_consts,  size); }
   489   void initialize_stubs_size(csize_t size)             { initialize_section_size(&_stubs,   size); }
   490   // Override default oop recorder.
   491   void initialize_oop_recorder(OopRecorder* r);
   493   OopRecorder* oop_recorder() const   { return _oop_recorder; }
   494   CodeComments& comments()            { return _comments; }
   496   // Code generation
   497   void relocate(address at, RelocationHolder const& rspec, int format = 0) {
   498     _insts.relocate(at, rspec, format);
   499   }
   500   void relocate(address at,    relocInfo::relocType rtype, int format = 0) {
   501     _insts.relocate(at, rtype, format);
   502   }
   504   // Management of overflow storage for binding of Labels.
   505   GrowableArray<int>* create_patch_overflow();
   507   // NMethod generation
   508   void copy_code_and_locs_to(CodeBlob* blob) {
   509     assert(blob != NULL, "sane");
   510     copy_relocations_to(blob);
   511     copy_code_to(blob);
   512   }
   513   void copy_oops_to(nmethod* nm) {
   514     if (!oop_recorder()->is_unused()) {
   515       oop_recorder()->copy_to(nm);
   516     }
   517   }
   519   // Transform an address from the code in this code buffer to a specified code buffer
   520   address transform_address(const CodeBuffer &cb, address addr) const;
   522   void block_comment(intptr_t offset, const char * comment) PRODUCT_RETURN;
   524 #ifndef PRODUCT
   525  public:
   526   // Printing / Decoding
   527   // decodes from decode_begin() to code_end() and sets decode_begin to end
   528   void    decode();
   529   void    decode_all();         // decodes all the code
   530   void    skip_decode();        // sets decode_begin to code_end();
   531   void    print();
   532 #endif
   535   // The following header contains architecture-specific implementations
   536   #include "incls/_codeBuffer_pd.hpp.incl"
   537 };
   540 inline void CodeSection::freeze() {
   541   _outer->freeze_section(this);
   542 }
   544 inline bool CodeSection::maybe_expand_to_ensure_remaining(csize_t amount) {
   545   if (remaining() < amount) { _outer->expand(this, amount); return true; }
   546   return false;
   547 }

mercurial