src/cpu/sparc/vm/frame_sparc.cpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2314
f95d63e2154a
child 2868
2e038ad0c1d0
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

     1 /*
     2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #include "precompiled.hpp"
    26 #include "interpreter/interpreter.hpp"
    27 #include "memory/resourceArea.hpp"
    28 #include "oops/markOop.hpp"
    29 #include "oops/methodOop.hpp"
    30 #include "oops/oop.inline.hpp"
    31 #include "runtime/frame.inline.hpp"
    32 #include "runtime/handles.inline.hpp"
    33 #include "runtime/javaCalls.hpp"
    34 #include "runtime/monitorChunk.hpp"
    35 #include "runtime/signature.hpp"
    36 #include "runtime/stubCodeGenerator.hpp"
    37 #include "runtime/stubRoutines.hpp"
    38 #include "vmreg_sparc.inline.hpp"
    39 #ifdef COMPILER1
    40 #include "c1/c1_Runtime1.hpp"
    41 #include "runtime/vframeArray.hpp"
    42 #endif
    44 void RegisterMap::pd_clear() {
    45   if (_thread->has_last_Java_frame()) {
    46     frame fr = _thread->last_frame();
    47     _window = fr.sp();
    48   } else {
    49     _window = NULL;
    50   }
    51   _younger_window = NULL;
    52 }
    55 // Unified register numbering scheme: each 32-bits counts as a register
    56 // number, so all the V9 registers take 2 slots.
    57 const static int R_L_nums[] = {0+040,2+040,4+040,6+040,8+040,10+040,12+040,14+040};
    58 const static int R_I_nums[] = {0+060,2+060,4+060,6+060,8+060,10+060,12+060,14+060};
    59 const static int R_O_nums[] = {0+020,2+020,4+020,6+020,8+020,10+020,12+020,14+020};
    60 const static int R_G_nums[] = {0+000,2+000,4+000,6+000,8+000,10+000,12+000,14+000};
    61 static RegisterMap::LocationValidType bad_mask = 0;
    62 static RegisterMap::LocationValidType R_LIO_mask = 0;
    63 static bool register_map_inited = false;
    65 static void register_map_init() {
    66   if (!register_map_inited) {
    67     register_map_inited = true;
    68     int i;
    69     for (i = 0; i < 8; i++) {
    70       assert(R_L_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
    71       assert(R_I_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
    72       assert(R_O_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
    73       assert(R_G_nums[i] < RegisterMap::location_valid_type_size, "in first chunk");
    74     }
    76     bad_mask |= (1LL << R_O_nums[6]); // SP
    77     bad_mask |= (1LL << R_O_nums[7]); // cPC
    78     bad_mask |= (1LL << R_I_nums[6]); // FP
    79     bad_mask |= (1LL << R_I_nums[7]); // rPC
    80     bad_mask |= (1LL << R_G_nums[2]); // TLS
    81     bad_mask |= (1LL << R_G_nums[7]); // reserved by libthread
    83     for (i = 0; i < 8; i++) {
    84       R_LIO_mask |= (1LL << R_L_nums[i]);
    85       R_LIO_mask |= (1LL << R_I_nums[i]);
    86       R_LIO_mask |= (1LL << R_O_nums[i]);
    87     }
    88   }
    89 }
    92 address RegisterMap::pd_location(VMReg regname) const {
    93   register_map_init();
    95   assert(regname->is_reg(), "sanity check");
    96   // Only the GPRs get handled this way
    97   if( !regname->is_Register())
    98     return NULL;
   100   // don't talk about bad registers
   101   if ((bad_mask & ((LocationValidType)1 << regname->value())) != 0) {
   102     return NULL;
   103   }
   105   // Convert to a GPR
   106   Register reg;
   107   int second_word = 0;
   108   // 32-bit registers for in, out and local
   109   if (!regname->is_concrete()) {
   110     // HMM ought to return NULL for any non-concrete (odd) vmreg
   111     // this all tied up in the fact we put out double oopMaps for
   112     // register locations. When that is fixed we'd will return NULL
   113     // (or assert here).
   114     reg = regname->prev()->as_Register();
   115 #ifdef _LP64
   116     second_word = sizeof(jint);
   117 #else
   118     return NULL;
   119 #endif // _LP64
   120   } else {
   121     reg = regname->as_Register();
   122   }
   123   if (reg->is_out()) {
   124     assert(_younger_window != NULL, "Younger window should be available");
   125     return second_word + (address)&_younger_window[reg->after_save()->sp_offset_in_saved_window()];
   126   }
   127   if (reg->is_local() || reg->is_in()) {
   128     assert(_window != NULL, "Window should be available");
   129     return second_word + (address)&_window[reg->sp_offset_in_saved_window()];
   130   }
   131   // Only the window'd GPRs get handled this way; not the globals.
   132   return NULL;
   133 }
   136 #ifdef ASSERT
   137 void RegisterMap::check_location_valid() {
   138   register_map_init();
   139   assert((_location_valid[0] & bad_mask) == 0, "cannot have special locations for SP,FP,TLS,etc.");
   140 }
   141 #endif
   143 // We are shifting windows.  That means we are moving all %i to %o,
   144 // getting rid of all current %l, and keeping all %g.  This is only
   145 // complicated if any of the location pointers for these are valid.
   146 // The normal case is that everything is in its standard register window
   147 // home, and _location_valid[0] is zero.  In that case, this routine
   148 // does exactly nothing.
   149 void RegisterMap::shift_individual_registers() {
   150   if (!update_map())  return;  // this only applies to maps with locations
   151   register_map_init();
   152   check_location_valid();
   154   LocationValidType lv = _location_valid[0];
   155   LocationValidType lv0 = lv;
   157   lv &= ~R_LIO_mask;  // clear %l, %o, %i regs
   159   // if we cleared some non-%g locations, we may have to do some shifting
   160   if (lv != lv0) {
   161     // copy %i0-%i5 to %o0-%o5, if they have special locations
   162     // This can happen in within stubs which spill argument registers
   163     // around a dynamic link operation, such as resolve_opt_virtual_call.
   164     for (int i = 0; i < 8; i++) {
   165       if (lv0 & (1LL << R_I_nums[i])) {
   166         _location[R_O_nums[i]] = _location[R_I_nums[i]];
   167         lv |=  (1LL << R_O_nums[i]);
   168       }
   169     }
   170   }
   172   _location_valid[0] = lv;
   173   check_location_valid();
   174 }
   176 bool frame::safe_for_sender(JavaThread *thread) {
   178   address _SP = (address) sp();
   179   address _FP = (address) fp();
   180   address _UNEXTENDED_SP = (address) unextended_sp();
   181   // sp must be within the stack
   182   bool sp_safe = (_SP <= thread->stack_base()) &&
   183                  (_SP >= thread->stack_base() - thread->stack_size());
   185   if (!sp_safe) {
   186     return false;
   187   }
   189   // unextended sp must be within the stack and above or equal sp
   190   bool unextended_sp_safe = (_UNEXTENDED_SP <= thread->stack_base()) &&
   191                             (_UNEXTENDED_SP >= _SP);
   193   if (!unextended_sp_safe) return false;
   195   // an fp must be within the stack and above (but not equal) sp
   196   bool fp_safe = (_FP <= thread->stack_base()) &&
   197                  (_FP > _SP);
   199   // We know sp/unextended_sp are safe only fp is questionable here
   201   // If the current frame is known to the code cache then we can attempt to
   202   // to construct the sender and do some validation of it. This goes a long way
   203   // toward eliminating issues when we get in frame construction code
   205   if (_cb != NULL ) {
   207     // First check if frame is complete and tester is reliable
   208     // Unfortunately we can only check frame complete for runtime stubs and nmethod
   209     // other generic buffer blobs are more problematic so we just assume they are
   210     // ok. adapter blobs never have a frame complete and are never ok.
   212     if (!_cb->is_frame_complete_at(_pc)) {
   213       if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
   214         return false;
   215       }
   216     }
   218     // Entry frame checks
   219     if (is_entry_frame()) {
   220       // an entry frame must have a valid fp.
   222       if (!fp_safe) {
   223         return false;
   224       }
   226       // Validate the JavaCallWrapper an entry frame must have
   228       address jcw = (address)entry_frame_call_wrapper();
   230       bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > _FP);
   232       return jcw_safe;
   234     }
   236     intptr_t* younger_sp = sp();
   237     intptr_t* _SENDER_SP = sender_sp(); // sender is actually just _FP
   238     bool adjusted_stack = is_interpreted_frame();
   240     address   sender_pc = (address)younger_sp[I7->sp_offset_in_saved_window()] + pc_return_offset;
   243     // We must always be able to find a recognizable pc
   244     CodeBlob* sender_blob = CodeCache::find_blob_unsafe(sender_pc);
   245     if (sender_pc == NULL ||  sender_blob == NULL) {
   246       return false;
   247     }
   249     // It should be safe to construct the sender though it might not be valid
   251     frame sender(_SENDER_SP, younger_sp, adjusted_stack);
   253     // Do we have a valid fp?
   254     address sender_fp = (address) sender.fp();
   256     // an fp must be within the stack and above (but not equal) current frame's _FP
   258     bool sender_fp_safe = (sender_fp <= thread->stack_base()) &&
   259                    (sender_fp > _FP);
   261     if (!sender_fp_safe) {
   262       return false;
   263     }
   266     // If the potential sender is the interpreter then we can do some more checking
   267     if (Interpreter::contains(sender_pc)) {
   268       return sender.is_interpreted_frame_valid(thread);
   269     }
   271     // Could just be some random pointer within the codeBlob
   272     if (!sender.cb()->code_contains(sender_pc)) {
   273       return false;
   274     }
   276     // We should never be able to see an adapter if the current frame is something from code cache
   277     if (sender_blob->is_adapter_blob()) {
   278       return false;
   279     }
   281     if( sender.is_entry_frame()) {
   282       // Validate the JavaCallWrapper an entry frame must have
   284       address jcw = (address)sender.entry_frame_call_wrapper();
   286       bool jcw_safe = (jcw <= thread->stack_base()) && ( jcw > sender_fp);
   288       return jcw_safe;
   289     }
   291     // If the frame size is 0 something is bad because every nmethod has a non-zero frame size
   292     // because you must allocate window space
   294     if (sender_blob->frame_size() == 0) {
   295       assert(!sender_blob->is_nmethod(), "should count return address at least");
   296       return false;
   297     }
   299     // The sender should positively be an nmethod or call_stub. On sparc we might in fact see something else.
   300     // The cause of this is because at a save instruction the O7 we get is a leftover from an earlier
   301     // window use. So if a runtime stub creates two frames (common in fastdebug/jvmg) then we see the
   302     // stale pc. So if the sender blob is not something we'd expect we have little choice but to declare
   303     // the stack unwalkable. pd_get_top_frame_for_signal_handler tries to recover from this by unwinding
   304     // that initial frame and retrying.
   306     if (!sender_blob->is_nmethod()) {
   307       return false;
   308     }
   310     // Could put some more validation for the potential non-interpreted sender
   311     // frame we'd create by calling sender if I could think of any. Wait for next crash in forte...
   313     // One idea is seeing if the sender_pc we have is one that we'd expect to call to current cb
   315     // We've validated the potential sender that would be created
   317     return true;
   319   }
   321   // Must be native-compiled frame. Since sender will try and use fp to find
   322   // linkages it must be safe
   324   if (!fp_safe) return false;
   326   // could try and do some more potential verification of native frame if we could think of some...
   328   return true;
   329 }
   331 // constructors
   333 // Construct an unpatchable, deficient frame
   334 frame::frame(intptr_t* sp, unpatchable_t, address pc, CodeBlob* cb) {
   335 #ifdef _LP64
   336   assert( (((intptr_t)sp & (wordSize-1)) == 0), "frame constructor passed an invalid sp");
   337 #endif
   338   _sp = sp;
   339   _younger_sp = NULL;
   340   _pc = pc;
   341   _cb = cb;
   342   _sp_adjustment_by_callee = 0;
   343   assert(pc == NULL && cb == NULL || pc != NULL, "can't have a cb and no pc!");
   344   if (_cb == NULL && _pc != NULL ) {
   345     _cb = CodeCache::find_blob(_pc);
   346   }
   347   _deopt_state = unknown;
   348 #ifdef ASSERT
   349   if ( _cb != NULL && _cb->is_nmethod()) {
   350     // Without a valid unextended_sp() we can't convert the pc to "original"
   351     assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant broken");
   352   }
   353 #endif // ASSERT
   354 }
   356 frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpreted) :
   357   _sp(sp),
   358   _younger_sp(younger_sp),
   359   _deopt_state(unknown),
   360   _sp_adjustment_by_callee(0) {
   361   if (younger_sp == NULL) {
   362     // make a deficient frame which doesn't know where its PC is
   363     _pc = NULL;
   364     _cb = NULL;
   365   } else {
   366     _pc = (address)younger_sp[I7->sp_offset_in_saved_window()] + pc_return_offset;
   367     assert( (intptr_t*)younger_sp[FP->sp_offset_in_saved_window()] == (intptr_t*)((intptr_t)sp - STACK_BIAS), "younger_sp must be valid");
   368     // Any frame we ever build should always "safe" therefore we should not have to call
   369     // find_blob_unsafe
   370     // In case of native stubs, the pc retrieved here might be
   371     // wrong.  (the _last_native_pc will have the right value)
   372     // So do not put add any asserts on the _pc here.
   373   }
   375   if (_pc != NULL)
   376     _cb = CodeCache::find_blob(_pc);
   378   // Check for MethodHandle call sites.
   379   if (_cb != NULL) {
   380     nmethod* nm = _cb->as_nmethod_or_null();
   381     if (nm != NULL) {
   382       if (nm->is_deopt_mh_entry(_pc) || nm->is_method_handle_return(_pc)) {
   383         _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) sp[L7_mh_SP_save->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
   384         // The SP is already adjusted by this MH call site, don't
   385         // overwrite this value with the wrong interpreter value.
   386         younger_frame_is_interpreted = false;
   387       }
   388     }
   389   }
   391   if (younger_frame_is_interpreted) {
   392     // compute adjustment to this frame's SP made by its interpreted callee
   393     _sp_adjustment_by_callee = (intptr_t*) ((intptr_t) younger_sp[I5_savedSP->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
   394   }
   396   // It is important that the frame is fully constructed when we do
   397   // this lookup as get_deopt_original_pc() needs a correct value for
   398   // unextended_sp() which uses _sp_adjustment_by_callee.
   399   if (_pc != NULL) {
   400     address original_pc = nmethod::get_deopt_original_pc(this);
   401     if (original_pc != NULL) {
   402       _pc = original_pc;
   403       _deopt_state = is_deoptimized;
   404     } else {
   405       _deopt_state = not_deoptimized;
   406     }
   407   }
   408 }
   410 bool frame::is_interpreted_frame() const  {
   411   return Interpreter::contains(pc());
   412 }
   414 // sender_sp
   416 intptr_t* frame::interpreter_frame_sender_sp() const {
   417   assert(is_interpreted_frame(), "interpreted frame expected");
   418   return fp();
   419 }
   421 #ifndef CC_INTERP
   422 void frame::set_interpreter_frame_sender_sp(intptr_t* sender_sp) {
   423   assert(is_interpreted_frame(), "interpreted frame expected");
   424   Unimplemented();
   425 }
   426 #endif // CC_INTERP
   429 #ifdef ASSERT
   430 // Debugging aid
   431 static frame nth_sender(int n) {
   432   frame f = JavaThread::current()->last_frame();
   434   for(int i = 0; i < n; ++i)
   435     f = f.sender((RegisterMap*)NULL);
   437   printf("first frame %d\n",          f.is_first_frame()       ? 1 : 0);
   438   printf("interpreted frame %d\n",    f.is_interpreted_frame() ? 1 : 0);
   439   printf("java frame %d\n",           f.is_java_frame()        ? 1 : 0);
   440   printf("entry frame %d\n",          f.is_entry_frame()       ? 1 : 0);
   441   printf("native frame %d\n",         f.is_native_frame()      ? 1 : 0);
   442   if (f.is_compiled_frame()) {
   443     if (f.is_deoptimized_frame())
   444       printf("deoptimized frame 1\n");
   445     else
   446       printf("compiled frame 1\n");
   447   }
   449   return f;
   450 }
   451 #endif
   454 frame frame::sender_for_entry_frame(RegisterMap *map) const {
   455   assert(map != NULL, "map must be set");
   456   // Java frame called from C; skip all C frames and return top C
   457   // frame of that chunk as the sender
   458   JavaFrameAnchor* jfa = entry_frame_call_wrapper()->anchor();
   459   assert(!entry_frame_is_first(), "next Java fp must be non zero");
   460   assert(jfa->last_Java_sp() > _sp, "must be above this frame on stack");
   461   intptr_t* last_Java_sp = jfa->last_Java_sp();
   462   // Since we are walking the stack now this nested anchor is obviously walkable
   463   // even if it wasn't when it was stacked.
   464   if (!jfa->walkable()) {
   465     // Capture _last_Java_pc (if needed) and mark anchor walkable.
   466     jfa->capture_last_Java_pc(_sp);
   467   }
   468   assert(jfa->last_Java_pc() != NULL, "No captured pc!");
   469   map->clear();
   470   map->make_integer_regs_unsaved();
   471   map->shift_window(last_Java_sp, NULL);
   472   assert(map->include_argument_oops(), "should be set by clear");
   473   return frame(last_Java_sp, frame::unpatchable, jfa->last_Java_pc());
   474 }
   476 frame frame::sender_for_interpreter_frame(RegisterMap *map) const {
   477   ShouldNotCallThis();
   478   return sender(map);
   479 }
   481 frame frame::sender_for_compiled_frame(RegisterMap *map) const {
   482   ShouldNotCallThis();
   483   return sender(map);
   484 }
   486 frame frame::sender(RegisterMap* map) const {
   487   assert(map != NULL, "map must be set");
   489   assert(CodeCache::find_blob_unsafe(_pc) == _cb, "inconsistent");
   491   // Default is not to follow arguments; update it accordingly below
   492   map->set_include_argument_oops(false);
   494   if (is_entry_frame()) return sender_for_entry_frame(map);
   496   intptr_t* younger_sp = sp();
   497   intptr_t* sp         = sender_sp();
   499   // Note:  The version of this operation on any platform with callee-save
   500   //        registers must update the register map (if not null).
   501   //        In order to do this correctly, the various subtypes of
   502   //        of frame (interpreted, compiled, glue, native),
   503   //        must be distinguished.  There is no need on SPARC for
   504   //        such distinctions, because all callee-save registers are
   505   //        preserved for all frames via SPARC-specific mechanisms.
   506   //
   507   //        *** HOWEVER, *** if and when we make any floating-point
   508   //        registers callee-saved, then we will have to copy over
   509   //        the RegisterMap update logic from the Intel code.
   511   // The constructor of the sender must know whether this frame is interpreted so it can set the
   512   // sender's _sp_adjustment_by_callee field.  An osr adapter frame was originally
   513   // interpreted but its pc is in the code cache (for c1 -> osr_frame_return_id stub), so it must be
   514   // explicitly recognized.
   516   bool frame_is_interpreted = is_interpreted_frame();
   517   if (frame_is_interpreted) {
   518     map->make_integer_regs_unsaved();
   519     map->shift_window(sp, younger_sp);
   520   } else if (_cb != NULL) {
   521     // Update the locations of implicitly saved registers to be their
   522     // addresses in the register save area.
   523     // For %o registers, the addresses of %i registers in the next younger
   524     // frame are used.
   525     map->shift_window(sp, younger_sp);
   526     if (map->update_map()) {
   527       // Tell GC to use argument oopmaps for some runtime stubs that need it.
   528       // For C1, the runtime stub might not have oop maps, so set this flag
   529       // outside of update_register_map.
   530       map->set_include_argument_oops(_cb->caller_must_gc_arguments(map->thread()));
   531       if (_cb->oop_maps() != NULL) {
   532         OopMapSet::update_register_map(this, map);
   533       }
   534     }
   535   }
   536   return frame(sp, younger_sp, frame_is_interpreted);
   537 }
   540 void frame::patch_pc(Thread* thread, address pc) {
   541   if(thread == Thread::current()) {
   542    StubRoutines::Sparc::flush_callers_register_windows_func()();
   543   }
   544   if (TracePcPatching) {
   545     // QQQ this assert is invalid (or too strong anyway) sice _pc could
   546     // be original pc and frame could have the deopt pc.
   547     // assert(_pc == *O7_addr() + pc_return_offset, "frame has wrong pc");
   548     tty->print_cr("patch_pc at address  0x%x [0x%x -> 0x%x] ", O7_addr(), _pc, pc);
   549   }
   550   _cb = CodeCache::find_blob(pc);
   551   *O7_addr() = pc - pc_return_offset;
   552   _cb = CodeCache::find_blob(_pc);
   553   address original_pc = nmethod::get_deopt_original_pc(this);
   554   if (original_pc != NULL) {
   555     assert(original_pc == _pc, "expected original to be stored before patching");
   556     _deopt_state = is_deoptimized;
   557   } else {
   558     _deopt_state = not_deoptimized;
   559   }
   560 }
   563 static bool sp_is_valid(intptr_t* old_sp, intptr_t* young_sp, intptr_t* sp) {
   564   return (((intptr_t)sp & (2*wordSize-1)) == 0 &&
   565           sp <= old_sp &&
   566           sp >= young_sp);
   567 }
   570 /*
   571   Find the (biased) sp that is just younger than old_sp starting at sp.
   572   If not found return NULL. Register windows are assumed to be flushed.
   573 */
   574 intptr_t* frame::next_younger_sp_or_null(intptr_t* old_sp, intptr_t* sp) {
   576   intptr_t* previous_sp = NULL;
   577   intptr_t* orig_sp = sp;
   579   int max_frames = (old_sp - sp) / 16; // Minimum frame size is 16
   580   int max_frame2 = max_frames;
   581   while(sp != old_sp && sp_is_valid(old_sp, orig_sp, sp)) {
   582     if (max_frames-- <= 0)
   583       // too many frames have gone by; invalid parameters given to this function
   584       break;
   585     previous_sp = sp;
   586     sp = (intptr_t*)sp[FP->sp_offset_in_saved_window()];
   587     sp = (intptr_t*)((intptr_t)sp + STACK_BIAS);
   588   }
   590   return (sp == old_sp ? previous_sp : NULL);
   591 }
   593 /*
   594   Determine if "sp" is a valid stack pointer. "sp" is assumed to be younger than
   595   "valid_sp". So if "sp" is valid itself then it should be possible to walk frames
   596   from "sp" to "valid_sp". The assumption is that the registers windows for the
   597   thread stack in question are flushed.
   598 */
   599 bool frame::is_valid_stack_pointer(intptr_t* valid_sp, intptr_t* sp) {
   600   return next_younger_sp_or_null(valid_sp, sp) != NULL;
   601 }
   604 bool frame::interpreter_frame_equals_unpacked_fp(intptr_t* fp) {
   605   assert(is_interpreted_frame(), "must be interpreter frame");
   606   return this->fp() == fp;
   607 }
   610 void frame::pd_gc_epilog() {
   611   if (is_interpreted_frame()) {
   612     // set constant pool cache entry for interpreter
   613     methodOop m = interpreter_frame_method();
   615     *interpreter_frame_cpoolcache_addr() = m->constants()->cache();
   616   }
   617 }
   620 bool frame::is_interpreted_frame_valid(JavaThread* thread) const {
   621 #ifdef CC_INTERP
   622   // Is there anything to do?
   623 #else
   624   assert(is_interpreted_frame(), "Not an interpreted frame");
   625   // These are reasonable sanity checks
   626   if (fp() == 0 || (intptr_t(fp()) & (2*wordSize-1)) != 0) {
   627     return false;
   628   }
   629   if (sp() == 0 || (intptr_t(sp()) & (2*wordSize-1)) != 0) {
   630     return false;
   631   }
   633   const intptr_t interpreter_frame_initial_sp_offset = interpreter_frame_vm_local_words;
   634   if (fp() + interpreter_frame_initial_sp_offset < sp()) {
   635     return false;
   636   }
   637   // These are hacks to keep us out of trouble.
   638   // The problem with these is that they mask other problems
   639   if (fp() <= sp()) {        // this attempts to deal with unsigned comparison above
   640     return false;
   641   }
   642   // do some validation of frame elements
   644   // first the method
   646   methodOop m = *interpreter_frame_method_addr();
   648   // validate the method we'd find in this potential sender
   649   if (!Universe::heap()->is_valid_method(m)) return false;
   651   // stack frames shouldn't be much larger than max_stack elements
   653   if (fp() - sp() > 1024 + m->max_stack()*Interpreter::stackElementSize) {
   654     return false;
   655   }
   657   // validate bci/bcx
   659   intptr_t  bcx    = interpreter_frame_bcx();
   660   if (m->validate_bci_from_bcx(bcx) < 0) {
   661     return false;
   662   }
   664   // validate constantPoolCacheOop
   666   constantPoolCacheOop cp = *interpreter_frame_cache_addr();
   668   if (cp == NULL ||
   669       !Space::is_aligned(cp) ||
   670       !Universe::heap()->is_permanent((void*)cp)) return false;
   672   // validate locals
   674   address locals =  (address) *interpreter_frame_locals_addr();
   676   if (locals > thread->stack_base() || locals < (address) fp()) return false;
   678   // We'd have to be pretty unlucky to be mislead at this point
   679 #endif /* CC_INTERP */
   680   return true;
   681 }
   684 // Windows have been flushed on entry (but not marked). Capture the pc that
   685 // is the return address to the frame that contains "sp" as its stack pointer.
   686 // This pc resides in the called of the frame corresponding to "sp".
   687 // As a side effect we mark this JavaFrameAnchor as having flushed the windows.
   688 // This side effect lets us mark stacked JavaFrameAnchors (stacked in the
   689 // call_helper) as flushed when we have flushed the windows for the most
   690 // recent (i.e. current) JavaFrameAnchor. This saves useless flushing calls
   691 // and lets us find the pc just once rather than multiple times as it did
   692 // in the bad old _post_Java_state days.
   693 //
   694 void JavaFrameAnchor::capture_last_Java_pc(intptr_t* sp) {
   695   if (last_Java_sp() != NULL && last_Java_pc() == NULL) {
   696     // try and find the sp just younger than _last_Java_sp
   697     intptr_t* _post_Java_sp = frame::next_younger_sp_or_null(last_Java_sp(), sp);
   698     // Really this should never fail otherwise VM call must have non-standard
   699     // frame linkage (bad) or stack is not properly flushed (worse).
   700     guarantee(_post_Java_sp != NULL, "bad stack!");
   701     _last_Java_pc = (address) _post_Java_sp[ I7->sp_offset_in_saved_window()] + frame::pc_return_offset;
   703   }
   704   set_window_flushed();
   705 }
   707 void JavaFrameAnchor::make_walkable(JavaThread* thread) {
   708   if (walkable()) return;
   709   // Eventually make an assert
   710   guarantee(Thread::current() == (Thread*)thread, "only current thread can flush its registers");
   711   // We always flush in case the profiler wants it but we won't mark
   712   // the windows as flushed unless we have a last_Java_frame
   713   intptr_t* sp = StubRoutines::Sparc::flush_callers_register_windows_func()();
   714   if (last_Java_sp() != NULL ) {
   715     capture_last_Java_pc(sp);
   716   }
   717 }
   719 intptr_t* frame::entry_frame_argument_at(int offset) const {
   720   // convert offset to index to deal with tsi
   721   int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize);
   723   intptr_t* LSP = (intptr_t*) sp()[Lentry_args->sp_offset_in_saved_window()];
   724   return &LSP[index+1];
   725 }
   728 BasicType frame::interpreter_frame_result(oop* oop_result, jvalue* value_result) {
   729   assert(is_interpreted_frame(), "interpreted frame expected");
   730   methodOop method = interpreter_frame_method();
   731   BasicType type = method->result_type();
   733   if (method->is_native()) {
   734     // Prior to notifying the runtime of the method_exit the possible result
   735     // value is saved to l_scratch and d_scratch.
   737 #ifdef CC_INTERP
   738     interpreterState istate = get_interpreterState();
   739     intptr_t* l_scratch = (intptr_t*) &istate->_native_lresult;
   740     intptr_t* d_scratch = (intptr_t*) &istate->_native_fresult;
   741 #else /* CC_INTERP */
   742     intptr_t* l_scratch = fp() + interpreter_frame_l_scratch_fp_offset;
   743     intptr_t* d_scratch = fp() + interpreter_frame_d_scratch_fp_offset;
   744 #endif /* CC_INTERP */
   746     address l_addr = (address)l_scratch;
   747 #ifdef _LP64
   748     // On 64-bit the result for 1/8/16/32-bit result types is in the other
   749     // word half
   750     l_addr += wordSize/2;
   751 #endif
   753     switch (type) {
   754       case T_OBJECT:
   755       case T_ARRAY: {
   756 #ifdef CC_INTERP
   757         *oop_result = istate->_oop_temp;
   758 #else
   759         oop obj = (oop) at(interpreter_frame_oop_temp_offset);
   760         assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
   761         *oop_result = obj;
   762 #endif // CC_INTERP
   763         break;
   764       }
   766       case T_BOOLEAN : { jint* p = (jint*)l_addr; value_result->z = (jboolean)((*p) & 0x1); break; }
   767       case T_BYTE    : { jint* p = (jint*)l_addr; value_result->b = (jbyte)((*p) & 0xff); break; }
   768       case T_CHAR    : { jint* p = (jint*)l_addr; value_result->c = (jchar)((*p) & 0xffff); break; }
   769       case T_SHORT   : { jint* p = (jint*)l_addr; value_result->s = (jshort)((*p) & 0xffff); break; }
   770       case T_INT     : value_result->i = *(jint*)l_addr; break;
   771       case T_LONG    : value_result->j = *(jlong*)l_scratch; break;
   772       case T_FLOAT   : value_result->f = *(jfloat*)d_scratch; break;
   773       case T_DOUBLE  : value_result->d = *(jdouble*)d_scratch; break;
   774       case T_VOID    : /* Nothing to do */ break;
   775       default        : ShouldNotReachHere();
   776     }
   777   } else {
   778     intptr_t* tos_addr = interpreter_frame_tos_address();
   780     switch(type) {
   781       case T_OBJECT:
   782       case T_ARRAY: {
   783         oop obj = (oop)*tos_addr;
   784         assert(obj == NULL || Universe::heap()->is_in(obj), "sanity check");
   785         *oop_result = obj;
   786         break;
   787       }
   788       case T_BOOLEAN : { jint* p = (jint*)tos_addr; value_result->z = (jboolean)((*p) & 0x1); break; }
   789       case T_BYTE    : { jint* p = (jint*)tos_addr; value_result->b = (jbyte)((*p) & 0xff); break; }
   790       case T_CHAR    : { jint* p = (jint*)tos_addr; value_result->c = (jchar)((*p) & 0xffff); break; }
   791       case T_SHORT   : { jint* p = (jint*)tos_addr; value_result->s = (jshort)((*p) & 0xffff); break; }
   792       case T_INT     : value_result->i = *(jint*)tos_addr; break;
   793       case T_LONG    : value_result->j = *(jlong*)tos_addr; break;
   794       case T_FLOAT   : value_result->f = *(jfloat*)tos_addr; break;
   795       case T_DOUBLE  : value_result->d = *(jdouble*)tos_addr; break;
   796       case T_VOID    : /* Nothing to do */ break;
   797       default        : ShouldNotReachHere();
   798     }
   799   };
   801   return type;
   802 }
   804 // Lesp pointer is one word lower than the top item on the stack.
   805 intptr_t* frame::interpreter_frame_tos_at(jint offset) const {
   806   int index = (Interpreter::expr_offset_in_bytes(offset)/wordSize) - 1;
   807   return &interpreter_frame_tos_address()[index];
   808 }

mercurial