src/cpu/sparc/vm/frame_sparc.cpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/cpu/sparc/vm/frame_sparc.cpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,858 @@
     1.4 +/*
     1.5 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#include "precompiled.hpp"
    1.29 +#include "interpreter/interpreter.hpp"
    1.30 +#include "memory/resourceArea.hpp"
    1.31 +#include "oops/markOop.hpp"
    1.32 +#include "oops/method.hpp"
    1.33 +#include "oops/oop.inline.hpp"
    1.34 +#include "prims/methodHandles.hpp"
    1.35 +#include "runtime/frame.inline.hpp"
    1.36 +#include "runtime/handles.inline.hpp"
    1.37 +#include "runtime/javaCalls.hpp"
    1.38 +#include "runtime/monitorChunk.hpp"
    1.39 +#include "runtime/signature.hpp"
    1.40 +#include "runtime/stubCodeGenerator.hpp"
    1.41 +#include "runtime/stubRoutines.hpp"
    1.42 +#include "vmreg_sparc.inline.hpp"
    1.43 +#ifdef COMPILER1
    1.44 +#include "c1/c1_Runtime1.hpp"
    1.45 +#include "runtime/vframeArray.hpp"
    1.46 +#endif
    1.47 +
    1.48 +void RegisterMap::pd_clear() {
    1.49 +  if (_thread->has_last_Java_frame()) {
    1.50 +    frame fr = _thread->last_frame();
    1.51 +    _window = fr.sp();
    1.52 +  } else {
    1.53 +    _window = NULL;
    1.54 +  }
    1.55 +  _younger_window = NULL;
    1.56 +}
    1.57 +
    1.58 +
    1.59 +// Unified register numbering scheme: each 32-bits counts as a register
    1.60 +// number, so all the V9 registers take 2 slots.
    1.61 +const static int R_L_nums[] = {0+040,2+040,4+040,6+040,8+040,10+040,12+040,14+040};
    1.62 +const static int R_I_nums[] = {0+060,2+060,4+060,6+060,8+060,10+060,12+060,14+060};
    1.63 +const static int R_O_nums[] = {0+020,2+020,4+020,6+020,8+020,10+020,12+020,14+020};
    1.64 +const static int R_G_nums[] = {0+000,2+000,4+000,6+000,8+000,10+000,12+000,14+000};
    1.65 +static RegisterMap::LocationValidType bad_mask = 0;
    1.66 +static RegisterMap::LocationValidType R_LIO_mask = 0;
    1.67 +static bool register_map_inited = false;
    1.68 +
    1.69 +static void register_map_init() {
    1.70 +  if (!register_map_inited) {
    1.71 +    register_map_inited = true;
    1.72 +    int i;
    1.73 +    for (i = 0; i < 8; i++) {
    1.74 +      assert(R_L_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
    1.75 +      assert(R_I_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
    1.76 +      assert(R_O_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
    1.77 +      assert(R_G_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
    1.78 +    }
    1.79 +
    1.80 +    bad_mask |= (1LL << R_O_nums[6]); // SP
    1.81 +    bad_mask |= (1LL << R_O_nums[7]); // cPC
    1.82 +    bad_mask |= (1LL << R_I_nums[6]); // FP
    1.83 +    bad_mask |= (1LL << R_I_nums[7]); // rPC
    1.84 +    bad_mask |= (1LL << R_G_nums[2]); // TLS
    1.85 +    bad_mask |= (1LL << R_G_nums[7]); // reserved by libthread
    1.86 +
    1.87 +    for (i = 0; i < 8; i++) {
    1.88 +      R_LIO_mask |= (1LL << R_L_nums[i]);
    1.89 +      R_LIO_mask |= (1LL << R_I_nums[i]);
    1.90 +      R_LIO_mask |= (1LL << R_O_nums[i]);
    1.91 +    }
    1.92 +  }
    1.93 +}
    1.94 +
    1.95 +
    1.96 +address RegisterMap::pd_location(VMReg regname) const {
    1.97 +  register_map_init();
    1.98 +
    1.99 +  assert(regname->is_reg(), "sanity check");
   1.100 +  // Only the GPRs get handled this way
   1.101 +  if( !regname->is_Register())
   1.102 +    return NULL;
   1.103 +
   1.104 +  // don't talk about bad registers
   1.105 +  if ((bad_mask & ((LocationValidType)1 << regname->value())) != 0) {
   1.106 +    return NULL;
   1.107 +  }
   1.108 +
   1.109 +  // Convert to a GPR
   1.110 +  Register reg;
   1.111 +  int second_word = 0;
   1.112 +  // 32-bit registers for in, out and local
   1.113 +  if (!regname->is_concrete()) {
   1.114 +    // HMM ought to return NULL for any non-concrete (odd) vmreg
   1.115 +    // this all tied up in the fact we put out double oopMaps for
   1.116 +    // register locations. When that is fixed we'd will return NULL
   1.117 +    // (or assert here).
   1.118 +    reg = regname->prev()->as_Register();
   1.119 +#ifdef _LP64
   1.120 +    second_word = sizeof(jint);
   1.121 +#else
   1.122 +    return NULL;
   1.123 +#endif // _LP64
   1.124 +  } else {
   1.125 +    reg = regname->as_Register();
   1.126 +  }
   1.127 +  if (reg->is_out()) {
   1.128 +    assert(_younger_window != NULL, "Younger window should be available");
   1.129 +    return second_word + (address)&_younger_window[reg->after_save()->sp_offset_in_saved_window()];
   1.130 +  }
   1.131 +  if (reg->is_local() || reg->is_in()) {
   1.132 +    assert(_window != NULL, "Window should be available");
   1.133 +    return second_word + (address)&_window[reg->sp_offset_in_saved_window()];
   1.134 +  }
   1.135 +  // Only the window'd GPRs get handled this way; not the globals.
   1.136 +  return NULL;
   1.137 +}
   1.138 +
   1.139 +
   1.140 +#ifdef ASSERT
   1.141 +void RegisterMap::check_location_valid() {
   1.142 +  register_map_init();
   1.143 +  assert((_location_valid[0] & bad_mask) == 0, "cannot have special locations for SP,FP,TLS,etc.");
   1.144 +}
   1.145 +#endif
   1.146 +
   1.147 +// We are shifting windows.  That means we are moving all %i to %o,
   1.148 +// getting rid of all current %l, and keeping all %g.  This is only
   1.149 +// complicated if any of the location pointers for these are valid.
   1.150 +// The normal case is that everything is in its standard register window
   1.151 +// home, and _location_valid[0] is zero.  In that case, this routine
   1.152 +// does exactly nothing.
   1.153 +void RegisterMap::shift_individual_registers() {
   1.154 +  if (!update_map())  return;  // this only applies to maps with locations
   1.155 +  register_map_init();
   1.156 +  check_location_valid();
   1.157 +
   1.158 +  LocationValidType lv = _location_valid[0];
   1.159 +  LocationValidType lv0 = lv;
   1.160 +
   1.161 +  lv &= ~R_LIO_mask;  // clear %l, %o, %i regs
   1.162 +
   1.163 +  // if we cleared some non-%g locations, we may have to do some shifting
   1.164 +  if (lv != lv0) {
   1.165 +    // copy %i0-%i5 to %o0-%o5, if they have special locations
   1.166 +    // This can happen in within stubs which spill argument registers
   1.167 +    // around a dynamic link operation, such as resolve_opt_virtual_call.
   1.168 +    for (int i = 0; i < 8; i++) {
   1.169 +      if (lv0 & (1LL << R_I_nums[i])) {
   1.170 +        _location[R_O_nums[i]] = _location[R_I_nums[i]];
   1.171 +        lv |=  (1LL << R_O_nums[i]);
   1.172 +      }
   1.173 +    }
   1.174 +  }
   1.175 +
   1.176 +  _location_valid[0] = lv;
   1.177 +  check_location_valid();
   1.178 +}
   1.179 +
   1.180 +bool frame::safe_for_sender(JavaThread *thread) {
   1.181 +
   1.182 +  address _SP = (address) sp();
   1.183 +  address _FP = (address) fp();
   1.184 +  address _UNEXTENDED_SP = (address) unextended_sp();
   1.185 +  // sp must be within the stack
   1.186 +  bool sp_safe = (_SP <= thread->stack_base()) &&
   1.187 +                 (_SP >= thread->stack_base() - thread->stack_size());
   1.188 +
   1.189 +  if (!sp_safe) {
   1.190 +    return false;
   1.191 +  }
   1.192 +
   1.193 +  // unextended sp must be within the stack and above or equal sp
   1.194 +  bool unextended_sp_safe = (_UNEXTENDED_SP <= thread->stack_base()) &&
   1.195 +                            (_UNEXTENDED_SP >= _SP);
   1.196 +
   1.197 +  if (!unextended_sp_safe) return false;
   1.198 +
   1.199 +  // an fp must be within the stack and above (but not equal) sp
   1.200 +  bool fp_safe = (_FP <= thread->stack_base()) &&
   1.201 +                 (_FP > _SP);
   1.202 +
   1.203 +  // We know sp/unextended_sp are safe only fp is questionable here
   1.204 +
   1.205 +  // If the current frame is known to the code cache then we can attempt to
   1.206 +  // to construct the sender and do some validation of it. This goes a long way
   1.207 +  // toward eliminating issues when we get in frame construction code
   1.208 +
   1.209 +  if (_cb != NULL ) {
   1.210 +
   1.211 +    // First check if frame is complete and tester is reliable
   1.212 +    // Unfortunately we can only check frame complete for runtime stubs and nmethod
   1.213 +    // other generic buffer blobs are more problematic so we just assume they are
   1.214 +    // ok. adapter blobs never have a frame complete and are never ok.
   1.215 +
   1.216 +    if (!_cb->is_frame_complete_at(_pc)) {
   1.217 +      if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
   1.218 +        return false;
   1.219 +      }
   1.220 +    }
   1.221 +
   1.222 +    // Could just be some random pointer within the codeBlob
   1.223 +    if (!_cb->code_contains(_pc)) {
   1.224 +      return false;
   1.225 +    }
   1.226 +
   1.227 +    // Entry frame checks
   1.228 +    if (is_entry_frame()) {
   1.229 +      // an entry frame must have a valid fp.
   1.230 +
   1.231 +      if (!fp_safe) {
   1.232 +        return false;
   1.233 +      }
   1.234 +
   1.235 +      // Validate the JavaCallWrapper an entry frame must have
   1.236 +
   1.237 +      address jcw = (address)entry_frame_call_wrapper();
   1.238 +
   1.239 +      bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > _FP);
   1.240 +
   1.241 +      return jcw_safe;
   1.242 +
   1.243 +    }
   1.244 +
   1.245 +    intptr_t* younger_sp = sp();
   1.246 +    intptr_t* _SENDER_SP = sender_sp(); // sender is actually just _FP
   1.247 +    bool adjusted_stack = is_interpreted_frame();
   1.248 +
   1.249 +    address   sender_pc = (address)younger_sp[I7->sp_offset_in_saved_window()] + pc_return_offset;
   1.250 +
   1.251 +
   1.252 +    // We must always be able to find a recognizable pc
   1.253 +    CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
   1.254 +    if (sender_pc == NULL ||  sender_blob == NULL) {
   1.255 +      return false;
   1.256 +    }
   1.257 +
   1.258 +    // Could be a zombie method
   1.259 +    if (sender_blob->is_zombie() || sender_blob->is_unloaded()) {
   1.260 +      return false;
   1.261 +    }
   1.262 +
   1.263 +    // It should be safe to construct the sender though it might not be valid
   1.264 +
   1.265 +    frame sender(_SENDER_SP, younger_sp, adjusted_stack);
   1.266 +
   1.267 +    // Do we have a valid fp?
   1.268 +    address sender_fp = (address) sender.fp();
   1.269 +
   1.270 +    // an fp must be within the stack and above (but not equal) current frame's _FP
   1.271 +
   1.272 +    bool sender_fp_safe = (sender_fp <= thread->stack_base()) &&
   1.273 +                   (sender_fp > _FP);
   1.274 +
   1.275 +    if (!sender_fp_safe) {
   1.276 +      return false;
   1.277 +    }
   1.278 +
   1.279 +
   1.280 +    // If the potential sender is the interpreter then we can do some more checking
   1.281 +    if (Interpreter::contains(sender_pc)) {
   1.282 +      return sender.is_interpreted_frame_valid(thread);
   1.283 +    }
   1.284 +
   1.285 +    // Could just be some random pointer within the codeBlob
   1.286 +    if (!sender.cb()->code_contains(sender_pc)) {
   1.287 +      return false;
   1.288 +    }
   1.289 +
   1.290 +    // We should never be able to see an adapter if the current frame is something from code cache
   1.291 +    if (sender_blob->is_adapter_blob()) {
   1.292 +      return false;
   1.293 +    }
   1.294 +
   1.295 +    if( sender.is_entry_frame()) {
   1.296 +      // Validate the JavaCallWrapper an entry frame must have
   1.297 +
   1.298 +      address jcw = (address)sender.entry_frame_call_wrapper();
   1.299 +
   1.300 +      bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > sender_fp);
   1.301 +
   1.302 +      return jcw_safe;
   1.303 +    }
   1.304 +
   1.305 +    // If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
   1.306 +    // because you must allocate window space
   1.307 +
   1.308 +    if (sender_blob->frame_size() <= 0) {
   1.309 +      assert(!sender_blob->is_nmethod(), "should count return address at least");
   1.310 +      return false;
   1.311 +    }
   1.312 +
   1.313 +    // The sender should positively be an nmethod or call_stub. On sparc we might in fact see something else.
   1.314 +    // The cause of this is because at a save instruction the O7 we get is a leftover from an earlier
   1.315 +    // window use. So if a runtime stub creates two frames (common in fastdebug/debug) then we see the
   1.316 +    // stale pc. So if the sender blob is not something we'd expect we have little choice but to declare
   1.317 +    // the stack unwalkable. pd_get_top_frame_for_signal_handler tries to recover from this by unwinding
   1.318 +    // that initial frame and retrying.
   1.319 +
   1.320 +    if (!sender_blob->is_nmethod()) {
   1.321 +      return false;
   1.322 +    }
   1.323 +
   1.324 +    // Could put some more validation for the potential non-interpreted sender
   1.325 +    // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
   1.326 +
   1.327 +    // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
   1.328 +
   1.329 +    // We've validated the potential sender that would be created
   1.330 +
   1.331 +    return true;
   1.332 +
   1.333 +  }
   1.334 +
   1.335 +  // Must be native-compiled frame. Since sender will try and use fp to find
   1.336 +  // linkages it must be safe
   1.337 +
   1.338 +  if (!fp_safe) return false;
   1.339 +
   1.340 +  // could try and do some more potential verification of native frame if we could think of some...
   1.341 +
   1.342 +  return true;
   1.343 +}
   1.344 +
   1.345 +// constructors
   1.346 +
   1.347 +// Construct an unpatchable, deficient frame
   1.348 +frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
   1.349 +#ifdef _LP64
   1.350 +  assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
   1.351 +#endif
   1.352 +  _sp = sp;
   1.353 +  _younger_sp = NULL;
   1.354 +  _pc = pc;
   1.355 +  _cb = cb;
   1.356 +  _sp_adjustment_by_callee = 0;
   1.357 +  assert(pc == NULL && cb == NULL || pc != NULL, "can't have a cb and no pc!");
   1.358 +  if (_cb == NULL && _pc != NULL ) {
   1.359 +    _cb = CodeCache::find_blob(_pc);
   1.360 +  }
   1.361 +  _deopt_state = unknown;
   1.362 +#ifdef ASSERT
   1.363 +  if ( _cb != NULL && _cb->is_nmethod()) {
   1.364 +    // Without a valid unextended_sp() we can't convert the pc to "original"
   1.365 +    assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant broken");
   1.366 +  }
   1.367 +#endif // ASSERT
   1.368 +}
   1.369 +
   1.370 +frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) :
   1.371 +  _sp(sp),
   1.372 +  _younger_sp(younger_sp),
   1.373 +  _deopt_state(unknown),
   1.374 +  _sp_adjustment_by_callee(0) {
   1.375 +  if (younger_sp == NULL) {
   1.376 +    // make a deficient frame which doesn't know where its PC is
   1.377 +    _pc = NULL;
   1.378 +    _cb = NULL;
   1.379 +  } else {
   1.380 +    _pc = (address)younger_sp[I7->sp_offset_in_saved_window()] + pc_return_offset;
   1.381 +    assert( (intptr_t*)younger_sp[FP->sp_offset_in_saved_window()] == (intptr_t*)((intptr_t)sp - STACK_BIAS), "younger_sp must be valid");
   1.382 +    // Any frame we ever build should always "safe" therefore we should not have to call
   1.383 +    // find_blob_unsafe
   1.384 +    // In case of native stubs, the pc retrieved here might be
   1.385 +    // wrong.  (the _last_native_pc will have the right value)
   1.386 +    // So do not put add any asserts on the _pc here.
   1.387 +  }
   1.388 +
   1.389 +  if (_pc != NULL)
   1.390 +    _cb = CodeCache::find_blob(_pc);
   1.391 +
   1.392 +  // Check for MethodHandle call sites.
   1.393 +  if (_cb != NULL) {
   1.394 +    nmethod* nm = _cb->as_nmethod_or_null();
   1.395 +    if (nm != NULL) {
   1.396 +      if (nm->is_deopt_mh_entry(_pc) || nm->is_method_handle_return(_pc)) {
   1.397 +        _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) sp[L7_mh_SP_save->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
   1.398 +        // The SP is already adjusted by this MH call site, don't
   1.399 +        // overwrite this value with the wrong interpreter value.
   1.400 +        younger_frame_is_interpreted = false;
   1.401 +      }
   1.402 +    }
   1.403 +  }
   1.404 +
   1.405 +  if (younger_frame_is_interpreted) {
   1.406 +    // compute adjustment to this frame's SP made by its interpreted callee
   1.407 +    _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) younger_sp[I5_savedSP->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
   1.408 +  }
   1.409 +
   1.410 +  // It is important that the frame is fully constructed when we do
   1.411 +  // this lookup as get_deopt_original_pc() needs a correct value for
   1.412 +  // unextended_sp() which uses _sp_adjustment_by_callee.
   1.413 +  if (_pc != NULL) {
   1.414 +    address original_pc = nmethod::get_deopt_original_pc(this);
   1.415 +    if (original_pc != NULL) {
   1.416 +      _pc = original_pc;
   1.417 +      _deopt_state = is_deoptimized;
   1.418 +    } else {
   1.419 +      _deopt_state = not_deoptimized;
   1.420 +    }
   1.421 +  }
   1.422 +}
   1.423 +
   1.424 +bool frame::is_interpreted_frame() const  {
   1.425 +  return Interpreter::contains(pc());
   1.426 +}
   1.427 +
   1.428 +// sender_sp
   1.429 +
   1.430 +intptr_t* frame::interpreter_frame_sender_sp() const {
   1.431 +  assert(is_interpreted_frame(), "interpreted frame expected");
   1.432 +  return fp();
   1.433 +}
   1.434 +
   1.435 +#ifndef CC_INTERP
   1.436 +void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
   1.437 +  assert(is_interpreted_frame(), "interpreted frame expected");
   1.438 +  Unimplemented();
   1.439 +}
   1.440 +#endif // CC_INTERP
   1.441 +
   1.442 +
   1.443 +#ifdef ASSERT
   1.444 +// Debugging aid
   1.445 +static frame nth_sender(int n) {
   1.446 +  frame f = JavaThread::current()->last_frame();
   1.447 +
   1.448 +  for(int i = 0; i < n; ++i)
   1.449 +    f = f.sender((RegisterMap*)NULL);
   1.450 +
   1.451 +  printf("first frame %d\n",          f.is_first_frame()       ? 1 : 0);
   1.452 +  printf("interpreted frame %d\n",    f.is_interpreted_frame() ? 1 : 0);
   1.453 +  printf("java frame %d\n",           f.is_java_frame()        ? 1 : 0);
   1.454 +  printf("entry frame %d\n",          f.is_entry_frame()       ? 1 : 0);
   1.455 +  printf("native frame %d\n",         f.is_native_frame()      ? 1 : 0);
   1.456 +  if (f.is_compiled_frame()) {
   1.457 +    if (f.is_deoptimized_frame())
   1.458 +      printf("deoptimized frame 1\n");
   1.459 +    else
   1.460 +      printf("compiled frame 1\n");
   1.461 +  }
   1.462 +
   1.463 +  return f;
   1.464 +}
   1.465 +#endif
   1.466 +
   1.467 +
   1.468 +frame frame::sender_for_entry_frame(RegisterMap *map) const {
   1.469 +  assert(map != NULL, "map must be set");
   1.470 +  // Java frame called from C; skip all C frames and return top C
   1.471 +  // frame of that chunk as the sender
   1.472 +  JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
   1.473 +  assert(!entry_frame_is_first(), "next Java fp must be non zero");
   1.474 +  assert(jfa->last_Java_sp() > _sp, "must be above this frame on stack");
   1.475 +  intptr_t* last_Java_sp = jfa->last_Java_sp();
   1.476 +  // Since we are walking the stack now this nested anchor is obviously walkable
   1.477 +  // even if it wasn't when it was stacked.
   1.478 +  if (!jfa->walkable()) {
   1.479 +    // Capture _last_Java_pc (if needed) and mark anchor walkable.
   1.480 +    jfa->capture_last_Java_pc(_sp);
   1.481 +  }
   1.482 +  assert(jfa->last_Java_pc() != NULL, "No captured pc!");
   1.483 +  map->clear();
   1.484 +  map->make_integer_regs_unsaved();
   1.485 +  map->shift_window(last_Java_sp, NULL);
   1.486 +  assert(map->include_argument_oops(), "should be set by clear");
   1.487 +  return frame(last_Java_sp, frame::unpatchable, jfa->last_Java_pc());
   1.488 +}
   1.489 +
   1.490 +frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
   1.491 +  ShouldNotCallThis();
   1.492 +  return sender(map);
   1.493 +}
   1.494 +
   1.495 +frame frame::sender_for_compiled_frame(RegisterMap *map) const {
   1.496 +  ShouldNotCallThis();
   1.497 +  return sender(map);
   1.498 +}
   1.499 +
   1.500 +frame frame::sender(RegisterMap* map) const {
   1.501 +  assert(map != NULL, "map must be set");
   1.502 +
   1.503 +  assert(CodeCache::find_blob_unsafe(_pc) == _cb, "inconsistent");
   1.504 +
   1.505 +  // Default is not to follow arguments; update it accordingly below
   1.506 +  map->set_include_argument_oops(false);
   1.507 +
   1.508 +  if (is_entry_frame()) return sender_for_entry_frame(map);
   1.509 +
   1.510 +  intptr_t* younger_sp = sp();
   1.511 +  intptr_t* sp         = sender_sp();
   1.512 +
   1.513 +  // Note:  The version of this operation on any platform with callee-save
   1.514 +  //        registers must update the register map (if not null).
   1.515 +  //        In order to do this correctly, the various subtypes of
   1.516 +  //        of frame (interpreted, compiled, glue, native),
   1.517 +  //        must be distinguished.  There is no need on SPARC for
   1.518 +  //        such distinctions, because all callee-save registers are
   1.519 +  //        preserved for all frames via SPARC-specific mechanisms.
   1.520 +  //
   1.521 +  //        *** HOWEVER, *** if and when we make any floating-point
   1.522 +  //        registers callee-saved, then we will have to copy over
   1.523 +  //        the RegisterMap update logic from the Intel code.
   1.524 +
   1.525 +  // The constructor of the sender must know whether this frame is interpreted so it can set the
   1.526 +  // sender's _sp_adjustment_by_callee field.  An osr adapter frame was originally
   1.527 +  // interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
   1.528 +  // explicitly recognized.
   1.529 +
   1.530 +
   1.531 +  bool frame_is_interpreted = is_interpreted_frame();
   1.532 +  if (frame_is_interpreted) {
   1.533 +    map->make_integer_regs_unsaved();
   1.534 +    map->shift_window(sp, younger_sp);
   1.535 +  } else if (_cb != NULL) {
   1.536 +    // Update the locations of implicitly saved registers to be their
   1.537 +    // addresses in the register save area.
   1.538 +    // For %o registers, the addresses of %i registers in the next younger
   1.539 +    // frame are used.
   1.540 +    map->shift_window(sp, younger_sp);
   1.541 +    if (map->update_map()) {
   1.542 +      // Tell GC to use argument oopmaps for some runtime stubs that need it.
   1.543 +      // For C1, the runtime stub might not have oop maps, so set this flag
   1.544 +      // outside of update_register_map.
   1.545 +      map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
   1.546 +      if (_cb->oop_maps() != NULL) {
   1.547 +        OopMapSet::update_register_map(this, map);
   1.548 +      }
   1.549 +    }
   1.550 +  }
   1.551 +  return frame(sp, younger_sp, frame_is_interpreted);
   1.552 +}
   1.553 +
   1.554 +
   1.555 +void frame::patch_pc(Thread* thread, address pc) {
   1.556 +  if(thread == Thread::current()) {
   1.557 +   StubRoutines::Sparc::flush_callers_register_windows_func()();
   1.558 +  }
   1.559 +  if (TracePcPatching) {
   1.560 +    // QQQ this assert is invalid (or too strong anyway) sice _pc could
   1.561 +    // be original pc and frame could have the deopt pc.
   1.562 +    // assert(_pc == *O7_addr() + pc_return_offset, "frame has wrong pc");
   1.563 +    tty->print_cr("patch_pc at address  0x%x [0x%x -> 0x%x] ", O7_addr(), _pc, pc);
   1.564 +  }
   1.565 +  _cb = CodeCache::find_blob(pc);
   1.566 +  *O7_addr() = pc - pc_return_offset;
   1.567 +  _cb = CodeCache::find_blob(_pc);
   1.568 +  address original_pc = nmethod::get_deopt_original_pc(this);
   1.569 +  if (original_pc != NULL) {
   1.570 +    assert(original_pc == _pc, "expected original to be stored before patching");
   1.571 +    _deopt_state = is_deoptimized;
   1.572 +  } else {
   1.573 +    _deopt_state = not_deoptimized;
   1.574 +  }
   1.575 +}
   1.576 +
   1.577 +
   1.578 +static bool sp_is_valid(intptr_t* old_sp, intptr_t* young_sp, intptr_t* sp) {
   1.579 +  return (((intptr_t)sp & (2*wordSize-1)) == 0 &&
   1.580 +          sp <= old_sp &&
   1.581 +          sp >= young_sp);
   1.582 +}
   1.583 +
   1.584 +
   1.585 +/*
   1.586 +  Find the (biased) sp that is just younger than old_sp starting at sp.
   1.587 +  If not found return NULL. Register windows are assumed to be flushed.
   1.588 +*/
   1.589 +intptr_t* frame::next_younger_sp_or_null(intptr_t* old_sp, intptr_t* sp) {
   1.590 +
   1.591 +  intptr_t* previous_sp = NULL;
   1.592 +  intptr_t* orig_sp = sp;
   1.593 +
   1.594 +  int max_frames = (old_sp - sp) / 16; // Minimum frame size is 16
   1.595 +  int max_frame2 = max_frames;
   1.596 +  while(sp != old_sp && sp_is_valid(old_sp, orig_sp, sp)) {
   1.597 +    if (max_frames-- <= 0)
   1.598 +      // too many frames have gone by; invalid parameters given to this function
   1.599 +      break;
   1.600 +    previous_sp = sp;
   1.601 +    sp = (intptr_t*)sp[FP->sp_offset_in_saved_window()];
   1.602 +    sp = (intptr_t*)((intptr_t)sp + STACK_BIAS);
   1.603 +  }
   1.604 +
   1.605 +  return (sp == old_sp ? previous_sp : NULL);
   1.606 +}
   1.607 +
   1.608 +/*
   1.609 +  Determine if "sp" is a valid stack pointer. "sp" is assumed to be younger than
   1.610 +  "valid_sp". So if "sp" is valid itself then it should be possible to walk frames
   1.611 +  from "sp" to "valid_sp". The assumption is that the registers windows for the
   1.612 +  thread stack in question are flushed.
   1.613 +*/
   1.614 +bool frame::is_valid_stack_pointer(intptr_t* valid_sp, intptr_t* sp) {
   1.615 +  return next_younger_sp_or_null(valid_sp, sp) != NULL;
   1.616 +}
   1.617 +
   1.618 +
   1.619 +bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
   1.620 +  assert(is_interpreted_frame(), "must be interpreter frame");
   1.621 +  return this->fp() == fp;
   1.622 +}
   1.623 +
   1.624 +
   1.625 +void frame::pd_gc_epilog() {
   1.626 +  if (is_interpreted_frame()) {
   1.627 +    // set constant pool cache entry for interpreter
   1.628 +    Method* m = interpreter_frame_method();
   1.629 +
   1.630 +    *interpreter_frame_cpoolcache_addr() = m->constants()->cache();
   1.631 +  }
   1.632 +}
   1.633 +
   1.634 +
   1.635 +bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
   1.636 +#ifdef CC_INTERP
   1.637 +  // Is there anything to do?
   1.638 +#else
   1.639 +  assert(is_interpreted_frame(), "Not an interpreted frame");
   1.640 +  // These are reasonable sanity checks
   1.641 +  if (fp() == 0 || (intptr_t(fp()) & (2*wordSize-1)) != 0) {
   1.642 +    return false;
   1.643 +  }
   1.644 +  if (sp() == 0 || (intptr_t(sp()) & (2*wordSize-1)) != 0) {
   1.645 +    return false;
   1.646 +  }
   1.647 +
   1.648 +  const intptr_t interpreter_frame_initial_sp_offset = interpreter_frame_vm_local_words;
   1.649 +  if (fp() + interpreter_frame_initial_sp_offset < sp()) {
   1.650 +    return false;
   1.651 +  }
   1.652 +  // These are hacks to keep us out of trouble.
   1.653 +  // The problem with these is that they mask other problems
   1.654 +  if (fp() <= sp()) {        // this attempts to deal with unsigned comparison above
   1.655 +    return false;
   1.656 +  }
   1.657 +  // do some validation of frame elements
   1.658 +
   1.659 +  // first the method
   1.660 +
   1.661 +  Method* m = *interpreter_frame_method_addr();
   1.662 +
   1.663 +  // validate the method we'd find in this potential sender
   1.664 +  if (!m->is_valid_method()) return false;
   1.665 +
   1.666 +  // stack frames shouldn't be much larger than max_stack elements
   1.667 +
   1.668 +  if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
   1.669 +    return false;
   1.670 +  }
   1.671 +
   1.672 +  // validate bci/bcx
   1.673 +
   1.674 +  intptr_t  bcx    = interpreter_frame_bcx();
   1.675 +  if (m->validate_bci_from_bcx(bcx) < 0) {
   1.676 +    return false;
   1.677 +  }
   1.678 +
   1.679 +  // validate ConstantPoolCache*
   1.680 +  ConstantPoolCache* cp = *interpreter_frame_cache_addr();
   1.681 +  if (cp == NULL || !cp->is_metaspace_object()) return false;
   1.682 +
   1.683 +  // validate locals
   1.684 +
   1.685 +  address locals =  (address) *interpreter_frame_locals_addr();
   1.686 +
   1.687 +  if (locals > thread->stack_base() || locals < (address) fp()) return false;
   1.688 +
   1.689 +  // We'd have to be pretty unlucky to be mislead at this point
   1.690 +#endif /* CC_INTERP */
   1.691 +  return true;
   1.692 +}
   1.693 +
   1.694 +
   1.695 +// Windows have been flushed on entry (but not marked). Capture the pc that
   1.696 +// is the return address to the frame that contains "sp" as its stack pointer.
   1.697 +// This pc resides in the called of the frame corresponding to "sp".
   1.698 +// As a side effect we mark this JavaFrameAnchor as having flushed the windows.
   1.699 +// This side effect lets us mark stacked JavaFrameAnchors (stacked in the
   1.700 +// call_helper) as flushed when we have flushed the windows for the most
   1.701 +// recent (i.e. current) JavaFrameAnchor. This saves useless flushing calls
   1.702 +// and lets us find the pc just once rather than multiple times as it did
   1.703 +// in the bad old _post_Java_state days.
   1.704 +//
   1.705 +void JavaFrameAnchor::capture_last_Java_pc(intptr_t* sp) {
   1.706 +  if (last_Java_sp() != NULL && last_Java_pc() == NULL) {
   1.707 +    // try and find the sp just younger than _last_Java_sp
   1.708 +    intptr_t* _post_Java_sp = frame::next_younger_sp_or_null(last_Java_sp(), sp);
   1.709 +    // Really this should never fail otherwise VM call must have non-standard
   1.710 +    // frame linkage (bad) or stack is not properly flushed (worse).
   1.711 +    guarantee(_post_Java_sp != NULL, "bad stack!");
   1.712 +    _last_Java_pc = (address) _post_Java_sp[ I7->sp_offset_in_saved_window()] + frame::pc_return_offset;
   1.713 +
   1.714 +  }
   1.715 +  set_window_flushed();
   1.716 +}
   1.717 +
   1.718 +void JavaFrameAnchor::make_walkable(JavaThread* thread) {
   1.719 +  if (walkable()) return;
   1.720 +  // Eventually make an assert
   1.721 +  guarantee(Thread::current() == (Thread*)thread, "only current thread can flush its registers");
   1.722 +  // We always flush in case the profiler wants it but we won't mark
   1.723 +  // the windows as flushed unless we have a last_Java_frame
   1.724 +  intptr_t* sp = StubRoutines::Sparc::flush_callers_register_windows_func()();
   1.725 +  if (last_Java_sp() != NULL ) {
   1.726 +    capture_last_Java_pc(sp);
   1.727 +  }
   1.728 +}
   1.729 +
   1.730 +intptr_t* frame::entry_frame_argument_at(int offset) const {
   1.731 +  // convert offset to index to deal with tsi
   1.732 +  int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
   1.733 +
   1.734 +  intptr_t* LSP = (intptr_t*) sp()[Lentry_args->sp_offset_in_saved_window()];
   1.735 +  return &LSP[index+1];
   1.736 +}
   1.737 +
   1.738 +
   1.739 +BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
   1.740 +  assert(is_interpreted_frame(), "interpreted frame expected");
   1.741 +  Method* method = interpreter_frame_method();
   1.742 +  BasicType type = method->result_type();
   1.743 +
   1.744 +  if (method->is_native()) {
   1.745 +    // Prior to notifying the runtime of the method_exit the possible result
   1.746 +    // value is saved to l_scratch and d_scratch.
   1.747 +
   1.748 +#ifdef CC_INTERP
   1.749 +    interpreterState istate = get_interpreterState();
   1.750 +    intptr_t* l_scratch = (intptr_t*) &istate->_native_lresult;
   1.751 +    intptr_t* d_scratch = (intptr_t*) &istate->_native_fresult;
   1.752 +#else /* CC_INTERP */
   1.753 +    intptr_t* l_scratch = fp() + interpreter_frame_l_scratch_fp_offset;
   1.754 +    intptr_t* d_scratch = fp() + interpreter_frame_d_scratch_fp_offset;
   1.755 +#endif /* CC_INTERP */
   1.756 +
   1.757 +    address l_addr = (address)l_scratch;
   1.758 +#ifdef _LP64
   1.759 +    // On 64-bit the result for 1/8/16/32-bit result types is in the other
   1.760 +    // word half
   1.761 +    l_addr += wordSize/2;
   1.762 +#endif
   1.763 +
   1.764 +    switch (type) {
   1.765 +      case T_OBJECT:
   1.766 +      case T_ARRAY: {
   1.767 +#ifdef CC_INTERP
   1.768 +        *oop_result = istate->_oop_temp;
   1.769 +#else
   1.770 +        oop obj = cast_to_oop(at(interpreter_frame_oop_temp_offset));
   1.771 +        assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
   1.772 +        *oop_result = obj;
   1.773 +#endif // CC_INTERP
   1.774 +        break;
   1.775 +      }
   1.776 +
   1.777 +      case T_BOOLEAN : { jint* p = (jint*)l_addr; value_result->z = (jboolean)((*p) & 0x1); break; }
   1.778 +      case T_BYTE    : { jint* p = (jint*)l_addr; value_result->b = (jbyte)((*p) & 0xff); break; }
   1.779 +      case T_CHAR    : { jint* p = (jint*)l_addr; value_result->c = (jchar)((*p) & 0xffff); break; }
   1.780 +      case T_SHORT   : { jint* p = (jint*)l_addr; value_result->s = (jshort)((*p) & 0xffff); break; }
   1.781 +      case T_INT     : value_result->i = *(jint*)l_addr; break;
   1.782 +      case T_LONG    : value_result->j = *(jlong*)l_scratch; break;
   1.783 +      case T_FLOAT   : value_result->f = *(jfloat*)d_scratch; break;
   1.784 +      case T_DOUBLE  : value_result->d = *(jdouble*)d_scratch; break;
   1.785 +      case T_VOID    : /* Nothing to do */ break;
   1.786 +      default        : ShouldNotReachHere();
   1.787 +    }
   1.788 +  } else {
   1.789 +    intptr_t* tos_addr = interpreter_frame_tos_address();
   1.790 +
   1.791 +    switch(type) {
   1.792 +      case T_OBJECT:
   1.793 +      case T_ARRAY: {
   1.794 +        oop obj = cast_to_oop(*tos_addr);
   1.795 +        assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
   1.796 +        *oop_result = obj;
   1.797 +        break;
   1.798 +      }
   1.799 +      case T_BOOLEAN : { jint* p = (jint*)tos_addr; value_result->z = (jboolean)((*p) & 0x1); break; }
   1.800 +      case T_BYTE    : { jint* p = (jint*)tos_addr; value_result->b = (jbyte)((*p) & 0xff); break; }
   1.801 +      case T_CHAR    : { jint* p = (jint*)tos_addr; value_result->c = (jchar)((*p) & 0xffff); break; }
   1.802 +      case T_SHORT   : { jint* p = (jint*)tos_addr; value_result->s = (jshort)((*p) & 0xffff); break; }
   1.803 +      case T_INT     : value_result->i = *(jint*)tos_addr; break;
   1.804 +      case T_LONG    : value_result->j = *(jlong*)tos_addr; break;
   1.805 +      case T_FLOAT   : value_result->f = *(jfloat*)tos_addr; break;
   1.806 +      case T_DOUBLE  : value_result->d = *(jdouble*)tos_addr; break;
   1.807 +      case T_VOID    : /* Nothing to do */ break;
   1.808 +      default        : ShouldNotReachHere();
   1.809 +    }
   1.810 +  };
   1.811 +
   1.812 +  return type;
   1.813 +}
   1.814 +
   1.815 +// Lesp pointer is one word lower than the top item on the stack.
   1.816 +intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
   1.817 +  int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize) - 1;
   1.818 +  return &interpreter_frame_tos_address()[index];
   1.819 +}
   1.820 +
   1.821 +
   1.822 +#ifndef PRODUCT
   1.823 +
   1.824 +#define DESCRIBE_FP_OFFSET(name) \
   1.825 +  values.describe(frame_no, fp() + frame::name##_offset, #name)
   1.826 +
   1.827 +void frame::describe_pd(FrameValues& values, int frame_no) {
   1.828 +  for (int w = 0; w < frame::register_save_words; w++) {
   1.829 +    values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1);
   1.830 +  }
   1.831 +
   1.832 +  if (is_interpreted_frame()) {
   1.833 +    DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
   1.834 +    DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
   1.835 +    DESCRIBE_FP_OFFSET(interpreter_frame_padding);
   1.836 +    DESCRIBE_FP_OFFSET(interpreter_frame_oop_temp);
   1.837 +
   1.838 +    // esp, according to Lesp (e.g. not depending on bci), if seems valid
   1.839 +    intptr_t* esp = *interpreter_frame_esp_addr();
   1.840 +    if ((esp >= sp()) && (esp < fp())) {
   1.841 +      values.describe(-1, esp, "*Lesp");
   1.842 +    }
   1.843 +  }
   1.844 +
   1.845 +  if (!is_compiled_frame()) {
   1.846 +    if (frame::callee_aggregate_return_pointer_words != 0) {
   1.847 +      values.describe(frame_no, sp() + frame::callee_aggregate_return_pointer_sp_offset, "callee_aggregate_return_pointer_word");
   1.848 +    }
   1.849 +    for (int w = 0; w < frame::callee_register_argument_save_area_words; w++) {
   1.850 +      values.describe(frame_no, sp() + frame::callee_register_argument_save_area_sp_offset + w,
   1.851 +                      err_msg("callee_register_argument_save_area_words %d", w));
   1.852 +    }
   1.853 +  }
   1.854 +}
   1.855 +
   1.856 +#endif
   1.857 +
   1.858 +intptr_t *frame::initial_deoptimization_info() {
   1.859 +  // unused... but returns fp() to minimize changes introduced by 7087445
   1.860 +  return fp();
   1.861 +}

mercurial