src/cpu/sparc/vm/frame_sparc.cpp

Wed, 28 Nov 2012 17:50:21 -0500

author
coleenp
date
Wed, 28 Nov 2012 17:50:21 -0500
changeset 4295
59c790074993
parent 4037
da91efe96a93
child 4645
ec2eddfed950
permissions
-rw-r--r--

8003635: NPG: AsynchGetCallTrace broken by Method* virtual call
Summary: Make metaspace::contains be lock free and used to see if something is in metaspace, also compare Method* with vtbl pointer.
Reviewed-by: dholmes, sspitsyn, dcubed, jmasa

     1 /*
     2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "interpreter/interpreter.hpp"
    27 #include "memory/resourceArea.hpp"
    28 #include "oops/markOop.hpp"
    29 #include "oops/method.hpp"
    30 #include "oops/oop.inline.hpp"
    31 #include "prims/methodHandles.hpp"
    32 #include "runtime/frame.inline.hpp"
    33 #include "runtime/handles.inline.hpp"
    34 #include "runtime/javaCalls.hpp"
    35 #include "runtime/monitorChunk.hpp"
    36 #include "runtime/signature.hpp"
    37 #include "runtime/stubCodeGenerator.hpp"
    38 #include "runtime/stubRoutines.hpp"
    39 #include "vmreg_sparc.inline.hpp"
    40 #ifdef COMPILER1
    41 #include "c1/c1_Runtime1.hpp"
    42 #include "runtime/vframeArray.hpp"
    43 #endif
    45 void RegisterMap::pd_clear() {
    46   if (_thread->has_last_Java_frame()) {
    47     frame fr = _thread->last_frame();
    48     _window = fr.sp();
    49   } else {
    50     _window = NULL;
    51   }
    52   _younger_window = NULL;
    53 }
    56 // Unified register numbering scheme: each 32-bits counts as a register
    57 // number, so all the V9 registers take 2 slots.
    58 const static int R_L_nums[] = {0+040,2+040,4+040,6+040,8+040,10+040,12+040,14+040};
    59 const static int R_I_nums[] = {0+060,2+060,4+060,6+060,8+060,10+060,12+060,14+060};
    60 const static int R_O_nums[] = {0+020,2+020,4+020,6+020,8+020,10+020,12+020,14+020};
    61 const static int R_G_nums[] = {0+000,2+000,4+000,6+000,8+000,10+000,12+000,14+000};
    62 static RegisterMap::LocationValidType bad_mask = 0;
    63 static RegisterMap::LocationValidType R_LIO_mask = 0;
    64 static bool register_map_inited = false;
    66 static void register_map_init() {
    67   if (!register_map_inited) {
    68     register_map_inited = true;
    69     int i;
    70     for (i = 0; i < 8; i++) {
    71       assert(R_L_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
    72       assert(R_I_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
    73       assert(R_O_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
    74       assert(R_G_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
    75     }
    77     bad_mask |= (1LL << R_O_nums[6]); // SP
    78     bad_mask |= (1LL << R_O_nums[7]); // cPC
    79     bad_mask |= (1LL << R_I_nums[6]); // FP
    80     bad_mask |= (1LL << R_I_nums[7]); // rPC
    81     bad_mask |= (1LL << R_G_nums[2]); // TLS
    82     bad_mask |= (1LL << R_G_nums[7]); // reserved by libthread
    84     for (i = 0; i < 8; i++) {
    85       R_LIO_mask |= (1LL << R_L_nums[i]);
    86       R_LIO_mask |= (1LL << R_I_nums[i]);
    87       R_LIO_mask |= (1LL << R_O_nums[i]);
    88     }
    89   }
    90 }
    93 address RegisterMap::pd_location(VMReg regname) const {
    94   register_map_init();
    96   assert(regname->is_reg(), "sanity check");
    97   // Only the GPRs get handled this way
    98   if( !regname->is_Register())
    99     return NULL;
   101   // don't talk about bad registers
   102   if ((bad_mask & ((LocationValidType)1 << regname->value())) != 0) {
   103     return NULL;
   104   }
   106   // Convert to a GPR
   107   Register reg;
   108   int second_word = 0;
   109   // 32-bit registers for in, out and local
   110   if (!regname->is_concrete()) {
   111     // HMM ought to return NULL for any non-concrete (odd) vmreg
   112     // this all tied up in the fact we put out double oopMaps for
   113     // register locations. When that is fixed we'd will return NULL
   114     // (or assert here).
   115     reg = regname->prev()->as_Register();
   116 #ifdef _LP64
   117     second_word = sizeof(jint);
   118 #else
   119     return NULL;
   120 #endif // _LP64
   121   } else {
   122     reg = regname->as_Register();
   123   }
   124   if (reg->is_out()) {
   125     assert(_younger_window != NULL, "Younger window should be available");
   126     return second_word + (address)&_younger_window[reg->after_save()->sp_offset_in_saved_window()];
   127   }
   128   if (reg->is_local() || reg->is_in()) {
   129     assert(_window != NULL, "Window should be available");
   130     return second_word + (address)&_window[reg->sp_offset_in_saved_window()];
   131   }
   132   // Only the window'd GPRs get handled this way; not the globals.
   133   return NULL;
   134 }
   137 #ifdef ASSERT
   138 void RegisterMap::check_location_valid() {
   139   register_map_init();
   140   assert((_location_valid[0] & bad_mask) == 0, "cannot have special locations for SP,FP,TLS,etc.");
   141 }
   142 #endif
   144 // We are shifting windows.  That means we are moving all %i to %o,
   145 // getting rid of all current %l, and keeping all %g.  This is only
   146 // complicated if any of the location pointers for these are valid.
   147 // The normal case is that everything is in its standard register window
   148 // home, and _location_valid[0] is zero.  In that case, this routine
   149 // does exactly nothing.
   150 void RegisterMap::shift_individual_registers() {
   151   if (!update_map())  return;  // this only applies to maps with locations
   152   register_map_init();
   153   check_location_valid();
   155   LocationValidType lv = _location_valid[0];
   156   LocationValidType lv0 = lv;
   158   lv &= ~R_LIO_mask;  // clear %l, %o, %i regs
   160   // if we cleared some non-%g locations, we may have to do some shifting
   161   if (lv != lv0) {
   162     // copy %i0-%i5 to %o0-%o5, if they have special locations
   163     // This can happen in within stubs which spill argument registers
   164     // around a dynamic link operation, such as resolve_opt_virtual_call.
   165     for (int i = 0; i < 8; i++) {
   166       if (lv0 & (1LL << R_I_nums[i])) {
   167         _location[R_O_nums[i]] = _location[R_I_nums[i]];
   168         lv |=  (1LL << R_O_nums[i]);
   169       }
   170     }
   171   }
   173   _location_valid[0] = lv;
   174   check_location_valid();
   175 }
   177 bool frame::safe_for_sender(JavaThread *thread) {
   179   address _SP = (address) sp();
   180   address _FP = (address) fp();
   181   address _UNEXTENDED_SP = (address) unextended_sp();
   182   // sp must be within the stack
   183   bool sp_safe = (_SP <= thread->stack_base()) &&
   184                  (_SP >= thread->stack_base() - thread->stack_size());
   186   if (!sp_safe) {
   187     return false;
   188   }
   190   // unextended sp must be within the stack and above or equal sp
   191   bool unextended_sp_safe = (_UNEXTENDED_SP <= thread->stack_base()) &&
   192                             (_UNEXTENDED_SP >= _SP);
   194   if (!unextended_sp_safe) return false;
   196   // an fp must be within the stack and above (but not equal) sp
   197   bool fp_safe = (_FP <= thread->stack_base()) &&
   198                  (_FP > _SP);
   200   // We know sp/unextended_sp are safe only fp is questionable here
   202   // If the current frame is known to the code cache then we can attempt to
   203   // to construct the sender and do some validation of it. This goes a long way
   204   // toward eliminating issues when we get in frame construction code
   206   if (_cb != NULL ) {
   208     // First check if frame is complete and tester is reliable
   209     // Unfortunately we can only check frame complete for runtime stubs and nmethod
   210     // other generic buffer blobs are more problematic so we just assume they are
   211     // ok. adapter blobs never have a frame complete and are never ok.
   213     if (!_cb->is_frame_complete_at(_pc)) {
   214       if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
   215         return false;
   216       }
   217     }
   219     // Entry frame checks
   220     if (is_entry_frame()) {
   221       // an entry frame must have a valid fp.
   223       if (!fp_safe) {
   224         return false;
   225       }
   227       // Validate the JavaCallWrapper an entry frame must have
   229       address jcw = (address)entry_frame_call_wrapper();
   231       bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > _FP);
   233       return jcw_safe;
   235     }
   237     intptr_t* younger_sp = sp();
   238     intptr_t* _SENDER_SP = sender_sp(); // sender is actually just _FP
   239     bool adjusted_stack = is_interpreted_frame();
   241     address   sender_pc = (address)younger_sp[I7->sp_offset_in_saved_window()] + pc_return_offset;
   244     // We must always be able to find a recognizable pc
   245     CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
   246     if (sender_pc == NULL ||  sender_blob == NULL) {
   247       return false;
   248     }
   250     // It should be safe to construct the sender though it might not be valid
   252     frame sender(_SENDER_SP, younger_sp, adjusted_stack);
   254     // Do we have a valid fp?
   255     address sender_fp = (address) sender.fp();
   257     // an fp must be within the stack and above (but not equal) current frame's _FP
   259     bool sender_fp_safe = (sender_fp <= thread->stack_base()) &&
   260                    (sender_fp > _FP);
   262     if (!sender_fp_safe) {
   263       return false;
   264     }
   267     // If the potential sender is the interpreter then we can do some more checking
   268     if (Interpreter::contains(sender_pc)) {
   269       return sender.is_interpreted_frame_valid(thread);
   270     }
   272     // Could just be some random pointer within the codeBlob
   273     if (!sender.cb()->code_contains(sender_pc)) {
   274       return false;
   275     }
   277     // We should never be able to see an adapter if the current frame is something from code cache
   278     if (sender_blob->is_adapter_blob()) {
   279       return false;
   280     }
   282     if( sender.is_entry_frame()) {
   283       // Validate the JavaCallWrapper an entry frame must have
   285       address jcw = (address)sender.entry_frame_call_wrapper();
   287       bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > sender_fp);
   289       return jcw_safe;
   290     }
   292     // If the frame size is 0 something is bad because every nmethod has a non-zero frame size
   293     // because you must allocate window space
   295     if (sender_blob->frame_size() == 0) {
   296       assert(!sender_blob->is_nmethod(), "should count return address at least");
   297       return false;
   298     }
   300     // The sender should positively be an nmethod or call_stub. On sparc we might in fact see something else.
   301     // The cause of this is because at a save instruction the O7 we get is a leftover from an earlier
   302     // window use. So if a runtime stub creates two frames (common in fastdebug/jvmg) then we see the
   303     // stale pc. So if the sender blob is not something we'd expect we have little choice but to declare
   304     // the stack unwalkable. pd_get_top_frame_for_signal_handler tries to recover from this by unwinding
   305     // that initial frame and retrying.
   307     if (!sender_blob->is_nmethod()) {
   308       return false;
   309     }
   311     // Could put some more validation for the potential non-interpreted sender
   312     // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
   314     // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
   316     // We've validated the potential sender that would be created
   318     return true;
   320   }
   322   // Must be native-compiled frame. Since sender will try and use fp to find
   323   // linkages it must be safe
   325   if (!fp_safe) return false;
   327   // could try and do some more potential verification of native frame if we could think of some...
   329   return true;
   330 }
   332 // constructors
   334 // Construct an unpatchable, deficient frame
   335 frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
   336 #ifdef _LP64
   337   assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
   338 #endif
   339   _sp = sp;
   340   _younger_sp = NULL;
   341   _pc = pc;
   342   _cb = cb;
   343   _sp_adjustment_by_callee = 0;
   344   assert(pc == NULL && cb == NULL || pc != NULL, "can't have a cb and no pc!");
   345   if (_cb == NULL && _pc != NULL ) {
   346     _cb = CodeCache::find_blob(_pc);
   347   }
   348   _deopt_state = unknown;
   349 #ifdef ASSERT
   350   if ( _cb != NULL && _cb->is_nmethod()) {
   351     // Without a valid unextended_sp() we can't convert the pc to "original"
   352     assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant broken");
   353   }
   354 #endif // ASSERT
   355 }
   357 frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) :
   358   _sp(sp),
   359   _younger_sp(younger_sp),
   360   _deopt_state(unknown),
   361   _sp_adjustment_by_callee(0) {
   362   if (younger_sp == NULL) {
   363     // make a deficient frame which doesn't know where its PC is
   364     _pc = NULL;
   365     _cb = NULL;
   366   } else {
   367     _pc = (address)younger_sp[I7->sp_offset_in_saved_window()] + pc_return_offset;
   368     assert( (intptr_t*)younger_sp[FP->sp_offset_in_saved_window()] == (intptr_t*)((intptr_t)sp - STACK_BIAS), "younger_sp must be valid");
   369     // Any frame we ever build should always "safe" therefore we should not have to call
   370     // find_blob_unsafe
   371     // In case of native stubs, the pc retrieved here might be
   372     // wrong.  (the _last_native_pc will have the right value)
   373     // So do not put add any asserts on the _pc here.
   374   }
   376   if (_pc != NULL)
   377     _cb = CodeCache::find_blob(_pc);
   379   // Check for MethodHandle call sites.
   380   if (_cb != NULL) {
   381     nmethod* nm = _cb->as_nmethod_or_null();
   382     if (nm != NULL) {
   383       if (nm->is_deopt_mh_entry(_pc) || nm->is_method_handle_return(_pc)) {
   384         _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) sp[L7_mh_SP_save->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
   385         // The SP is already adjusted by this MH call site, don't
   386         // overwrite this value with the wrong interpreter value.
   387         younger_frame_is_interpreted = false;
   388       }
   389     }
   390   }
   392   if (younger_frame_is_interpreted) {
   393     // compute adjustment to this frame's SP made by its interpreted callee
   394     _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) younger_sp[I5_savedSP->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
   395   }
   397   // It is important that the frame is fully constructed when we do
   398   // this lookup as get_deopt_original_pc() needs a correct value for
   399   // unextended_sp() which uses _sp_adjustment_by_callee.
   400   if (_pc != NULL) {
   401     address original_pc = nmethod::get_deopt_original_pc(this);
   402     if (original_pc != NULL) {
   403       _pc = original_pc;
   404       _deopt_state = is_deoptimized;
   405     } else {
   406       _deopt_state = not_deoptimized;
   407     }
   408   }
   409 }
   411 bool frame::is_interpreted_frame() const  {
   412   return Interpreter::contains(pc());
   413 }
   415 // sender_sp
   417 intptr_t* frame::interpreter_frame_sender_sp() const {
   418   assert(is_interpreted_frame(), "interpreted frame expected");
   419   return fp();
   420 }
   422 #ifndef CC_INTERP
   423 void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
   424   assert(is_interpreted_frame(), "interpreted frame expected");
   425   Unimplemented();
   426 }
   427 #endif // CC_INTERP
   430 #ifdef ASSERT
   431 // Debugging aid
   432 static frame nth_sender(int n) {
   433   frame f = JavaThread::current()->last_frame();
   435   for(int i = 0; i < n; ++i)
   436     f = f.sender((RegisterMap*)NULL);
   438   printf("first frame %d\n",          f.is_first_frame()       ? 1 : 0);
   439   printf("interpreted frame %d\n",    f.is_interpreted_frame() ? 1 : 0);
   440   printf("java frame %d\n",           f.is_java_frame()        ? 1 : 0);
   441   printf("entry frame %d\n",          f.is_entry_frame()       ? 1 : 0);
   442   printf("native frame %d\n",         f.is_native_frame()      ? 1 : 0);
   443   if (f.is_compiled_frame()) {
   444     if (f.is_deoptimized_frame())
   445       printf("deoptimized frame 1\n");
   446     else
   447       printf("compiled frame 1\n");
   448   }
   450   return f;
   451 }
   452 #endif
   455 frame frame::sender_for_entry_frame(RegisterMap *map) const {
   456   assert(map != NULL, "map must be set");
   457   // Java frame called from C; skip all C frames and return top C
   458   // frame of that chunk as the sender
   459   JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
   460   assert(!entry_frame_is_first(), "next Java fp must be non zero");
   461   assert(jfa->last_Java_sp() > _sp, "must be above this frame on stack");
   462   intptr_t* last_Java_sp = jfa->last_Java_sp();
   463   // Since we are walking the stack now this nested anchor is obviously walkable
   464   // even if it wasn't when it was stacked.
   465   if (!jfa->walkable()) {
   466     // Capture _last_Java_pc (if needed) and mark anchor walkable.
   467     jfa->capture_last_Java_pc(_sp);
   468   }
   469   assert(jfa->last_Java_pc() != NULL, "No captured pc!");
   470   map->clear();
   471   map->make_integer_regs_unsaved();
   472   map->shift_window(last_Java_sp, NULL);
   473   assert(map->include_argument_oops(), "should be set by clear");
   474   return frame(last_Java_sp, frame::unpatchable, jfa->last_Java_pc());
   475 }
   477 frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
   478   ShouldNotCallThis();
   479   return sender(map);
   480 }
   482 frame frame::sender_for_compiled_frame(RegisterMap *map) const {
   483   ShouldNotCallThis();
   484   return sender(map);
   485 }
   487 frame frame::sender(RegisterMap* map) const {
   488   assert(map != NULL, "map must be set");
   490   assert(CodeCache::find_blob_unsafe(_pc) == _cb, "inconsistent");
   492   // Default is not to follow arguments; update it accordingly below
   493   map->set_include_argument_oops(false);
   495   if (is_entry_frame()) return sender_for_entry_frame(map);
   497   intptr_t* younger_sp = sp();
   498   intptr_t* sp         = sender_sp();
   500   // Note:  The version of this operation on any platform with callee-save
   501   //        registers must update the register map (if not null).
   502   //        In order to do this correctly, the various subtypes of
   503   //        of frame (interpreted, compiled, glue, native),
   504   //        must be distinguished.  There is no need on SPARC for
   505   //        such distinctions, because all callee-save registers are
   506   //        preserved for all frames via SPARC-specific mechanisms.
   507   //
   508   //        *** HOWEVER, *** if and when we make any floating-point
   509   //        registers callee-saved, then we will have to copy over
   510   //        the RegisterMap update logic from the Intel code.
   512   // The constructor of the sender must know whether this frame is interpreted so it can set the
   513   // sender's _sp_adjustment_by_callee field.  An osr adapter frame was originally
   514   // interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
   515   // explicitly recognized.
   518   bool frame_is_interpreted = is_interpreted_frame();
   519   if (frame_is_interpreted) {
   520     map->make_integer_regs_unsaved();
   521     map->shift_window(sp, younger_sp);
   522   } else if (_cb != NULL) {
   523     // Update the locations of implicitly saved registers to be their
   524     // addresses in the register save area.
   525     // For %o registers, the addresses of %i registers in the next younger
   526     // frame are used.
   527     map->shift_window(sp, younger_sp);
   528     if (map->update_map()) {
   529       // Tell GC to use argument oopmaps for some runtime stubs that need it.
   530       // For C1, the runtime stub might not have oop maps, so set this flag
   531       // outside of update_register_map.
   532       map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
   533       if (_cb->oop_maps() != NULL) {
   534         OopMapSet::update_register_map(this, map);
   535       }
   536     }
   537   }
   538   return frame(sp, younger_sp, frame_is_interpreted);
   539 }
   542 void frame::patch_pc(Thread* thread, address pc) {
   543   if(thread == Thread::current()) {
   544    StubRoutines::Sparc::flush_callers_register_windows_func()();
   545   }
   546   if (TracePcPatching) {
   547     // QQQ this assert is invalid (or too strong anyway) sice _pc could
   548     // be original pc and frame could have the deopt pc.
   549     // assert(_pc == *O7_addr() + pc_return_offset, "frame has wrong pc");
   550     tty->print_cr("patch_pc at address  0x%x [0x%x -> 0x%x] ", O7_addr(), _pc, pc);
   551   }
   552   _cb = CodeCache::find_blob(pc);
   553   *O7_addr() = pc - pc_return_offset;
   554   _cb = CodeCache::find_blob(_pc);
   555   address original_pc = nmethod::get_deopt_original_pc(this);
   556   if (original_pc != NULL) {
   557     assert(original_pc == _pc, "expected original to be stored before patching");
   558     _deopt_state = is_deoptimized;
   559   } else {
   560     _deopt_state = not_deoptimized;
   561   }
   562 }
   565 static bool sp_is_valid(intptr_t* old_sp, intptr_t* young_sp, intptr_t* sp) {
   566   return (((intptr_t)sp & (2*wordSize-1)) == 0 &&
   567           sp <= old_sp &&
   568           sp >= young_sp);
   569 }
   572 /*
   573   Find the (biased) sp that is just younger than old_sp starting at sp.
   574   If not found return NULL. Register windows are assumed to be flushed.
   575 */
   576 intptr_t* frame::next_younger_sp_or_null(intptr_t* old_sp, intptr_t* sp) {
   578   intptr_t* previous_sp = NULL;
   579   intptr_t* orig_sp = sp;
   581   int max_frames = (old_sp - sp) / 16; // Minimum frame size is 16
   582   int max_frame2 = max_frames;
   583   while(sp != old_sp && sp_is_valid(old_sp, orig_sp, sp)) {
   584     if (max_frames-- <= 0)
   585       // too many frames have gone by; invalid parameters given to this function
   586       break;
   587     previous_sp = sp;
   588     sp = (intptr_t*)sp[FP->sp_offset_in_saved_window()];
   589     sp = (intptr_t*)((intptr_t)sp + STACK_BIAS);
   590   }
   592   return (sp == old_sp ? previous_sp : NULL);
   593 }
   595 /*
   596   Determine if "sp" is a valid stack pointer. "sp" is assumed to be younger than
   597   "valid_sp". So if "sp" is valid itself then it should be possible to walk frames
   598   from "sp" to "valid_sp". The assumption is that the registers windows for the
   599   thread stack in question are flushed.
   600 */
   601 bool frame::is_valid_stack_pointer(intptr_t* valid_sp, intptr_t* sp) {
   602   return next_younger_sp_or_null(valid_sp, sp) != NULL;
   603 }
   606 bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
   607   assert(is_interpreted_frame(), "must be interpreter frame");
   608   return this->fp() == fp;
   609 }
   612 void frame::pd_gc_epilog() {
   613   if (is_interpreted_frame()) {
   614     // set constant pool cache entry for interpreter
   615     Method* m = interpreter_frame_method();
   617     *interpreter_frame_cpoolcache_addr() = m->constants()->cache();
   618   }
   619 }
   622 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
   623 #ifdef CC_INTERP
   624   // Is there anything to do?
   625 #else
   626   assert(is_interpreted_frame(), "Not an interpreted frame");
   627   // These are reasonable sanity checks
   628   if (fp() == 0 || (intptr_t(fp()) & (2*wordSize-1)) != 0) {
   629     return false;
   630   }
   631   if (sp() == 0 || (intptr_t(sp()) & (2*wordSize-1)) != 0) {
   632     return false;
   633   }
   635   const intptr_t interpreter_frame_initial_sp_offset = interpreter_frame_vm_local_words;
   636   if (fp() + interpreter_frame_initial_sp_offset < sp()) {
   637     return false;
   638   }
   639   // These are hacks to keep us out of trouble.
   640   // The problem with these is that they mask other problems
   641   if (fp() <= sp()) {        // this attempts to deal with unsigned comparison above
   642     return false;
   643   }
   644   // do some validation of frame elements
   646   // first the method
   648   Method* m = *interpreter_frame_method_addr();
   650   // validate the method we'd find in this potential sender
   651   if (!m->is_valid_method()) return false;
   653   // stack frames shouldn't be much larger than max_stack elements
   655   if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
   656     return false;
   657   }
   659   // validate bci/bcx
   661   intptr_t  bcx    = interpreter_frame_bcx();
   662   if (m->validate_bci_from_bcx(bcx) < 0) {
   663     return false;
   664   }
   666   // validate ConstantPoolCache*
   667   ConstantPoolCache* cp = *interpreter_frame_cache_addr();
   668   if (cp == NULL || !cp->is_metadata()) return false;
   670   // validate locals
   672   address locals =  (address) *interpreter_frame_locals_addr();
   674   if (locals > thread->stack_base() || locals < (address) fp()) return false;
   676   // We'd have to be pretty unlucky to be mislead at this point
   677 #endif /* CC_INTERP */
   678   return true;
   679 }
   682 // Windows have been flushed on entry (but not marked). Capture the pc that
   683 // is the return address to the frame that contains "sp" as its stack pointer.
   684 // This pc resides in the called of the frame corresponding to "sp".
   685 // As a side effect we mark this JavaFrameAnchor as having flushed the windows.
   686 // This side effect lets us mark stacked JavaFrameAnchors (stacked in the
   687 // call_helper) as flushed when we have flushed the windows for the most
   688 // recent (i.e. current) JavaFrameAnchor. This saves useless flushing calls
   689 // and lets us find the pc just once rather than multiple times as it did
   690 // in the bad old _post_Java_state days.
   691 //
   692 void JavaFrameAnchor::capture_last_Java_pc(intptr_t* sp) {
   693   if (last_Java_sp() != NULL && last_Java_pc() == NULL) {
   694     // try and find the sp just younger than _last_Java_sp
   695     intptr_t* _post_Java_sp = frame::next_younger_sp_or_null(last_Java_sp(), sp);
   696     // Really this should never fail otherwise VM call must have non-standard
   697     // frame linkage (bad) or stack is not properly flushed (worse).
   698     guarantee(_post_Java_sp != NULL, "bad stack!");
   699     _last_Java_pc = (address) _post_Java_sp[ I7->sp_offset_in_saved_window()] + frame::pc_return_offset;
   701   }
   702   set_window_flushed();
   703 }
   705 void JavaFrameAnchor::make_walkable(JavaThread* thread) {
   706   if (walkable()) return;
   707   // Eventually make an assert
   708   guarantee(Thread::current() == (Thread*)thread, "only current thread can flush its registers");
   709   // We always flush in case the profiler wants it but we won't mark
   710   // the windows as flushed unless we have a last_Java_frame
   711   intptr_t* sp = StubRoutines::Sparc::flush_callers_register_windows_func()();
   712   if (last_Java_sp() != NULL ) {
   713     capture_last_Java_pc(sp);
   714   }
   715 }
   717 intptr_t* frame::entry_frame_argument_at(int offset) const {
   718   // convert offset to index to deal with tsi
   719   int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
   721   intptr_t* LSP = (intptr_t*) sp()[Lentry_args->sp_offset_in_saved_window()];
   722   return &LSP[index+1];
   723 }
   726 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
   727   assert(is_interpreted_frame(), "interpreted frame expected");
   728   Method* method = interpreter_frame_method();
   729   BasicType type = method->result_type();
   731   if (method->is_native()) {
   732     // Prior to notifying the runtime of the method_exit the possible result
   733     // value is saved to l_scratch and d_scratch.
   735 #ifdef CC_INTERP
   736     interpreterState istate = get_interpreterState();
   737     intptr_t* l_scratch = (intptr_t*) &istate->_native_lresult;
   738     intptr_t* d_scratch = (intptr_t*) &istate->_native_fresult;
   739 #else /* CC_INTERP */
   740     intptr_t* l_scratch = fp() + interpreter_frame_l_scratch_fp_offset;
   741     intptr_t* d_scratch = fp() + interpreter_frame_d_scratch_fp_offset;
   742 #endif /* CC_INTERP */
   744     address l_addr = (address)l_scratch;
   745 #ifdef _LP64
   746     // On 64-bit the result for 1/8/16/32-bit result types is in the other
   747     // word half
   748     l_addr += wordSize/2;
   749 #endif
   751     switch (type) {
   752       case T_OBJECT:
   753       case T_ARRAY: {
   754 #ifdef CC_INTERP
   755         *oop_result = istate->_oop_temp;
   756 #else
   757         oop obj = (oop) at(interpreter_frame_oop_temp_offset);
   758         assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
   759         *oop_result = obj;
   760 #endif // CC_INTERP
   761         break;
   762       }
   764       case T_BOOLEAN : { jint* p = (jint*)l_addr; value_result->z = (jboolean)((*p) & 0x1); break; }
   765       case T_BYTE    : { jint* p = (jint*)l_addr; value_result->b = (jbyte)((*p) & 0xff); break; }
   766       case T_CHAR    : { jint* p = (jint*)l_addr; value_result->c = (jchar)((*p) & 0xffff); break; }
   767       case T_SHORT   : { jint* p = (jint*)l_addr; value_result->s = (jshort)((*p) & 0xffff); break; }
   768       case T_INT     : value_result->i = *(jint*)l_addr; break;
   769       case T_LONG    : value_result->j = *(jlong*)l_scratch; break;
   770       case T_FLOAT   : value_result->f = *(jfloat*)d_scratch; break;
   771       case T_DOUBLE  : value_result->d = *(jdouble*)d_scratch; break;
   772       case T_VOID    : /* Nothing to do */ break;
   773       default        : ShouldNotReachHere();
   774     }
   775   } else {
   776     intptr_t* tos_addr = interpreter_frame_tos_address();
   778     switch(type) {
   779       case T_OBJECT:
   780       case T_ARRAY: {
   781         oop obj = (oop)*tos_addr;
   782         assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
   783         *oop_result = obj;
   784         break;
   785       }
   786       case T_BOOLEAN : { jint* p = (jint*)tos_addr; value_result->z = (jboolean)((*p) & 0x1); break; }
   787       case T_BYTE    : { jint* p = (jint*)tos_addr; value_result->b = (jbyte)((*p) & 0xff); break; }
   788       case T_CHAR    : { jint* p = (jint*)tos_addr; value_result->c = (jchar)((*p) & 0xffff); break; }
   789       case T_SHORT   : { jint* p = (jint*)tos_addr; value_result->s = (jshort)((*p) & 0xffff); break; }
   790       case T_INT     : value_result->i = *(jint*)tos_addr; break;
   791       case T_LONG    : value_result->j = *(jlong*)tos_addr; break;
   792       case T_FLOAT   : value_result->f = *(jfloat*)tos_addr; break;
   793       case T_DOUBLE  : value_result->d = *(jdouble*)tos_addr; break;
   794       case T_VOID    : /* Nothing to do */ break;
   795       default        : ShouldNotReachHere();
   796     }
   797   };
   799   return type;
   800 }
   802 // Lesp pointer is one word lower than the top item on the stack.
   803 intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
   804   int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize) - 1;
   805   return &interpreter_frame_tos_address()[index];
   806 }
   809 #ifndef PRODUCT
   811 #define DESCRIBE_FP_OFFSET(name) \
   812   values.describe(frame_no, fp() + frame::name##_offset, #name)
   814 void frame::describe_pd(FrameValues& values, int frame_no) {
   815   for (int w = 0; w < frame::register_save_words; w++) {
   816     values.describe(frame_no, sp() + w, err_msg("register save area word %d", w), 1);
   817   }
   819   if (is_interpreted_frame()) {
   820     DESCRIBE_FP_OFFSET(interpreter_frame_d_scratch_fp);
   821     DESCRIBE_FP_OFFSET(interpreter_frame_l_scratch_fp);
   822     DESCRIBE_FP_OFFSET(interpreter_frame_padding);
   823     DESCRIBE_FP_OFFSET(interpreter_frame_oop_temp);
   825     // esp, according to Lesp (e.g. not depending on bci), if seems valid
   826     intptr_t* esp = *interpreter_frame_esp_addr();
   827     if ((esp >= sp()) && (esp < fp())) {
   828       values.describe(-1, esp, "*Lesp");
   829     }
   830   }
   832   if (!is_compiled_frame()) {
   833     if (frame::callee_aggregate_return_pointer_words != 0) {
   834       values.describe(frame_no, sp() + frame::callee_aggregate_return_pointer_sp_offset, "callee_aggregate_return_pointer_word");
   835     }
   836     for (int w = 0; w < frame::callee_register_argument_save_area_words; w++) {
   837       values.describe(frame_no, sp() + frame::callee_register_argument_save_area_sp_offset + w,
   838                       err_msg("callee_register_argument_save_area_words %d", w));
   839     }
   840   }
   841 }
   843 #endif
   845 intptr_t *frame::initial_deoptimization_info() {
   846   // unused... but returns fp() to minimize changes introduced by 7087445
   847   return fp();
   848 }

mercurial