src/share/vm/asm/assembler.cpp

Mon, 09 Aug 2010 17:51:56 -0700

author
never
date
Mon, 09 Aug 2010 17:51:56 -0700
changeset 2044
f4f596978298
parent 1907
c18cbe5936b8
child 2314
f95d63e2154a
permissions
-rw-r--r--

Merge

     1 /*
     2  * Copyright (c) 1997, 2009, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "incls/_precompiled.incl"
    26 #include "incls/_assembler.cpp.incl"
    29 // Implementation of AbstractAssembler
    30 //
    31 // The AbstractAssembler is generating code into a CodeBuffer. To make code generation faster,
    32 // the assembler keeps a copy of the code buffers boundaries & modifies them when
    33 // emitting bytes rather than using the code buffers accessor functions all the time.
    34 // The code buffer is updated via set_code_end(...) after emitting a whole instruction.
    36 AbstractAssembler::AbstractAssembler(CodeBuffer* code) {
    37   if (code == NULL)  return;
    38   CodeSection* cs = code->insts();
    39   cs->clear_mark();   // new assembler kills old mark
    40   _code_section = cs;
    41   _code_begin  = cs->start();
    42   _code_limit  = cs->limit();
    43   _code_pos    = cs->end();
    44   _oop_recorder= code->oop_recorder();
    45   if (_code_begin == NULL)  {
    46     vm_exit_out_of_memory(0, err_msg("CodeCache: no room for %s",
    47                                      code->name()));
    48   }
    49 }
    51 void AbstractAssembler::set_code_section(CodeSection* cs) {
    52   assert(cs->outer() == code_section()->outer(), "sanity");
    53   assert(cs->is_allocated(), "need to pre-allocate this section");
    54   cs->clear_mark();  // new assembly into this section kills old mark
    55   _code_section = cs;
    56   _code_begin  = cs->start();
    57   _code_limit  = cs->limit();
    58   _code_pos    = cs->end();
    59 }
    61 // Inform CodeBuffer that incoming code and relocation will be for stubs
    62 address AbstractAssembler::start_a_stub(int required_space) {
    63   CodeBuffer*  cb = code();
    64   CodeSection* cs = cb->stubs();
    65   assert(_code_section == cb->insts(), "not in insts?");
    66   sync();
    67   if (cs->maybe_expand_to_ensure_remaining(required_space)
    68       && cb->blob() == NULL) {
    69     return NULL;
    70   }
    71   set_code_section(cs);
    72   return pc();
    73 }
    75 // Inform CodeBuffer that incoming code and relocation will be code
    76 // Should not be called if start_a_stub() returned NULL
    77 void AbstractAssembler::end_a_stub() {
    78   assert(_code_section == code()->stubs(), "not in stubs?");
    79   sync();
    80   set_code_section(code()->insts());
    81 }
    83 // Inform CodeBuffer that incoming code and relocation will be for stubs
    84 address AbstractAssembler::start_a_const(int required_space, int required_align) {
    85   CodeBuffer*  cb = code();
    86   CodeSection* cs = cb->consts();
    87   assert(_code_section == cb->insts(), "not in insts?");
    88   sync();
    89   address end = cs->end();
    90   int pad = -(intptr_t)end & (required_align-1);
    91   if (cs->maybe_expand_to_ensure_remaining(pad + required_space)) {
    92     if (cb->blob() == NULL)  return NULL;
    93     end = cs->end();  // refresh pointer
    94   }
    95   if (pad > 0) {
    96     while (--pad >= 0) { *end++ = 0; }
    97     cs->set_end(end);
    98   }
    99   set_code_section(cs);
   100   return end;
   101 }
   103 // Inform CodeBuffer that incoming code and relocation will be code
   104 // Should not be called if start_a_const() returned NULL
   105 void AbstractAssembler::end_a_const() {
   106   assert(_code_section == code()->consts(), "not in consts?");
   107   sync();
   108   set_code_section(code()->insts());
   109 }
   112 void AbstractAssembler::flush() {
   113   sync();
   114   ICache::invalidate_range(addr_at(0), offset());
   115 }
   118 void AbstractAssembler::a_byte(int x) {
   119   emit_byte(x);
   120 }
   123 void AbstractAssembler::a_long(jint x) {
   124   emit_long(x);
   125 }
   127 // Labels refer to positions in the (to be) generated code.  There are bound
   128 // and unbound
   129 //
   130 // Bound labels refer to known positions in the already generated code.
   131 // offset() is the position the label refers to.
   132 //
   133 // Unbound labels refer to unknown positions in the code to be generated; it
   134 // may contain a list of unresolved displacements that refer to it
   135 #ifndef PRODUCT
   136 void AbstractAssembler::print(Label& L) {
   137   if (L.is_bound()) {
   138     tty->print_cr("bound label to %d|%d", L.loc_pos(), L.loc_sect());
   139   } else if (L.is_unbound()) {
   140     L.print_instructions((MacroAssembler*)this);
   141   } else {
   142     tty->print_cr("label in inconsistent state (loc = %d)", L.loc());
   143   }
   144 }
   145 #endif // PRODUCT
   148 void AbstractAssembler::bind(Label& L) {
   149   if (L.is_bound()) {
   150     // Assembler can bind a label more than once to the same place.
   151     guarantee(L.loc() == locator(), "attempt to redefine label");
   152     return;
   153   }
   154   L.bind_loc(locator());
   155   L.patch_instructions((MacroAssembler*)this);
   156 }
   158 void AbstractAssembler::generate_stack_overflow_check( int frame_size_in_bytes) {
   159   if (UseStackBanging) {
   160     // Each code entry causes one stack bang n pages down the stack where n
   161     // is configurable by StackBangPages.  The setting depends on the maximum
   162     // depth of VM call stack or native before going back into java code,
   163     // since only java code can raise a stack overflow exception using the
   164     // stack banging mechanism.  The VM and native code does not detect stack
   165     // overflow.
   166     // The code in JavaCalls::call() checks that there is at least n pages
   167     // available, so all entry code needs to do is bang once for the end of
   168     // this shadow zone.
   169     // The entry code may need to bang additional pages if the framesize
   170     // is greater than a page.
   172     const int page_size = os::vm_page_size();
   173     int bang_end = StackShadowPages*page_size;
   175     // This is how far the previous frame's stack banging extended.
   176     const int bang_end_safe = bang_end;
   178     if (frame_size_in_bytes > page_size) {
   179       bang_end += frame_size_in_bytes;
   180     }
   182     int bang_offset = bang_end_safe;
   183     while (bang_offset <= bang_end) {
   184       // Need at least one stack bang at end of shadow zone.
   185       bang_stack_with_offset(bang_offset);
   186       bang_offset += page_size;
   187     }
   188   } // end (UseStackBanging)
   189 }
   191 void Label::add_patch_at(CodeBuffer* cb, int branch_loc) {
   192   assert(_loc == -1, "Label is unbound");
   193   if (_patch_index < PatchCacheSize) {
   194     _patches[_patch_index] = branch_loc;
   195   } else {
   196     if (_patch_overflow == NULL) {
   197       _patch_overflow = cb->create_patch_overflow();
   198     }
   199     _patch_overflow->push(branch_loc);
   200   }
   201   ++_patch_index;
   202 }
   204 void Label::patch_instructions(MacroAssembler* masm) {
   205   assert(is_bound(), "Label is bound");
   206   CodeBuffer* cb = masm->code();
   207   int target_sect = CodeBuffer::locator_sect(loc());
   208   address target = cb->locator_address(loc());
   209   while (_patch_index > 0) {
   210     --_patch_index;
   211     int branch_loc;
   212     if (_patch_index >= PatchCacheSize) {
   213       branch_loc = _patch_overflow->pop();
   214     } else {
   215       branch_loc = _patches[_patch_index];
   216     }
   217     int branch_sect = CodeBuffer::locator_sect(branch_loc);
   218     address branch = cb->locator_address(branch_loc);
   219     if (branch_sect == CodeBuffer::SECT_CONSTS) {
   220       // The thing to patch is a constant word.
   221       *(address*)branch = target;
   222       continue;
   223     }
   225 #ifdef ASSERT
   226     // Cross-section branches only work if the
   227     // intermediate section boundaries are frozen.
   228     if (target_sect != branch_sect) {
   229       for (int n = MIN2(target_sect, branch_sect),
   230                nlimit = (target_sect + branch_sect) - n;
   231            n < nlimit; n++) {
   232         CodeSection* cs = cb->code_section(n);
   233         assert(cs->is_frozen(), "cross-section branch needs stable offsets");
   234       }
   235     }
   236 #endif //ASSERT
   238     // Push the target offset into the branch instruction.
   239     masm->pd_patch_instruction(branch, target);
   240   }
   241 }
   243 struct DelayedConstant {
   244   typedef void (*value_fn_t)();
   245   BasicType type;
   246   intptr_t value;
   247   value_fn_t value_fn;
   248   // This limit of 20 is generous for initial uses.
   249   // The limit needs to be large enough to store the field offsets
   250   // into classes which do not have statically fixed layouts.
   251   // (Initial use is for method handle object offsets.)
   252   // Look for uses of "delayed_value" in the source code
   253   // and make sure this number is generous enough to handle all of them.
   254   enum { DC_LIMIT = 20 };
   255   static DelayedConstant delayed_constants[DC_LIMIT];
   256   static DelayedConstant* add(BasicType type, value_fn_t value_fn);
   257   bool match(BasicType t, value_fn_t cfn) {
   258     return type == t && value_fn == cfn;
   259   }
   260   static void update_all();
   261 };
   263 DelayedConstant DelayedConstant::delayed_constants[DC_LIMIT];
   264 // Default C structure initialization rules have the following effect here:
   265 // = { { (BasicType)0, (intptr_t)NULL }, ... };
   267 DelayedConstant* DelayedConstant::add(BasicType type,
   268                                       DelayedConstant::value_fn_t cfn) {
   269   for (int i = 0; i < DC_LIMIT; i++) {
   270     DelayedConstant* dcon = &delayed_constants[i];
   271     if (dcon->match(type, cfn))
   272       return dcon;
   273     if (dcon->value_fn == NULL) {
   274       // (cmpxchg not because this is multi-threaded but because I'm paranoid)
   275       if (Atomic::cmpxchg_ptr(CAST_FROM_FN_PTR(void*, cfn), &dcon->value_fn, NULL) == NULL) {
   276         dcon->type = type;
   277         return dcon;
   278       }
   279     }
   280   }
   281   // If this assert is hit (in pre-integration testing!) then re-evaluate
   282   // the comment on the definition of DC_LIMIT.
   283   guarantee(false, "too many delayed constants");
   284   return NULL;
   285 }
   287 void DelayedConstant::update_all() {
   288   for (int i = 0; i < DC_LIMIT; i++) {
   289     DelayedConstant* dcon = &delayed_constants[i];
   290     if (dcon->value_fn != NULL && dcon->value == 0) {
   291       typedef int     (*int_fn_t)();
   292       typedef address (*address_fn_t)();
   293       switch (dcon->type) {
   294       case T_INT:     dcon->value = (intptr_t) ((int_fn_t)    dcon->value_fn)(); break;
   295       case T_ADDRESS: dcon->value = (intptr_t) ((address_fn_t)dcon->value_fn)(); break;
   296       }
   297     }
   298   }
   299 }
   301 intptr_t* AbstractAssembler::delayed_value_addr(int(*value_fn)()) {
   302   DelayedConstant* dcon = DelayedConstant::add(T_INT, (DelayedConstant::value_fn_t) value_fn);
   303   return &dcon->value;
   304 }
   305 intptr_t* AbstractAssembler::delayed_value_addr(address(*value_fn)()) {
   306   DelayedConstant* dcon = DelayedConstant::add(T_ADDRESS, (DelayedConstant::value_fn_t) value_fn);
   307   return &dcon->value;
   308 }
   309 void AbstractAssembler::update_delayed_values() {
   310   DelayedConstant::update_all();
   311 }
   316 void AbstractAssembler::block_comment(const char* comment) {
   317   if (sect() == CodeBuffer::SECT_INSTS) {
   318     code_section()->outer()->block_comment(offset(), comment);
   319   }
   320 }
   322 bool MacroAssembler::needs_explicit_null_check(intptr_t offset) {
   323   // Exception handler checks the nmethod's implicit null checks table
   324   // only when this method returns false.
   325 #ifdef _LP64
   326   if (UseCompressedOops && Universe::narrow_oop_base() != NULL) {
   327     assert (Universe::heap() != NULL, "java heap should be initialized");
   328     // The first page after heap_base is unmapped and
   329     // the 'offset' is equal to [heap_base + offset] for
   330     // narrow oop implicit null checks.
   331     uintptr_t base = (uintptr_t)Universe::narrow_oop_base();
   332     if ((uintptr_t)offset >= base) {
   333       // Normalize offset for the next check.
   334       offset = (intptr_t)(pointer_delta((void*)offset, (void*)base, 1));
   335     }
   336   }
   337 #endif
   338   return offset < 0 || os::vm_page_size() <= offset;
   339 }
   341 #ifndef PRODUCT
   342 void Label::print_instructions(MacroAssembler* masm) const {
   343   CodeBuffer* cb = masm->code();
   344   for (int i = 0; i < _patch_index; ++i) {
   345     int branch_loc;
   346     if (i >= PatchCacheSize) {
   347       branch_loc = _patch_overflow->at(i - PatchCacheSize);
   348     } else {
   349       branch_loc = _patches[i];
   350     }
   351     int branch_pos  = CodeBuffer::locator_pos(branch_loc);
   352     int branch_sect = CodeBuffer::locator_sect(branch_loc);
   353     address branch = cb->locator_address(branch_loc);
   354     tty->print_cr("unbound label");
   355     tty->print("@ %d|%d ", branch_pos, branch_sect);
   356     if (branch_sect == CodeBuffer::SECT_CONSTS) {
   357       tty->print_cr(PTR_FORMAT, *(address*)branch);
   358       continue;
   359     }
   360     masm->pd_print_patched_instruction(branch);
   361     tty->cr();
   362   }
   363 }
   364 #endif // ndef PRODUCT

mercurial