src/cpu/sparc/vm/frame_sparc.inline.hpp

Thu, 07 Apr 2011 09:53:20 -0700

author
johnc
date
Thu, 07 Apr 2011 09:53:20 -0700
changeset 2781
e1162778c1c8
parent 2314
f95d63e2154a
child 3433
eaa9557116a2
permissions
-rw-r--r--

7009266: G1: assert(obj->is_oop_or_null(true )) failed: Error
Summary: A referent object that is only weakly reachable at the start of concurrent marking but is re-attached to the strongly reachable object graph during marking may not be marked as live. This can cause the reference object to be processed prematurely and leave dangling pointers to the referent object. Implement a read barrier for the java.lang.ref.Reference::referent field by intrinsifying the Reference.get() method, and intercepting accesses though JNI, reflection, and Unsafe, so that when a non-null referent object is read it is also logged in an SATB buffer.
Reviewed-by: kvn, iveresov, never, tonyp, dholmes

     1 /*
     2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
     3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     4  *
     5  * This code is free software; you can redistribute it and/or modify it
     6  * under the terms of the GNU General Public License version 2 only, as
     7  * published by the Free Software Foundation.
     8  *
     9  * This code is distributed in the hope that it will be useful, but WITHOUT
    10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    12  * version 2 for more details (a copy is included in the LICENSE file that
    13  * accompanied this code).
    14  *
    15  * You should have received a copy of the GNU General Public License version
    16  * 2 along with this work; if not, write to the Free Software Foundation,
    17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    18  *
    19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    20  * or visit www.oracle.com if you need additional information or have any
    21  * questions.
    22  *
    23  */
    25 #ifndef CPU_SPARC_VM_FRAME_SPARC_INLINE_HPP
    26 #define CPU_SPARC_VM_FRAME_SPARC_INLINE_HPP
    28 // Inline functions for SPARC frames:
    30 // Constructors
    32 inline frame::frame() {
    33   _pc = NULL;
    34   _sp = NULL;
    35   _younger_sp = NULL;
    36   _cb = NULL;
    37   _deopt_state = unknown;
    38   _sp_adjustment_by_callee = 0;
    39 }
    41 // Accessors:
    43 inline bool frame::equal(frame other) const {
    44   bool ret =  sp() == other.sp()
    45            && fp() == other.fp()
    46            && pc() == other.pc();
    47   assert(!ret || ret && cb() == other.cb() && _deopt_state == other._deopt_state, "inconsistent construction");
    48   return ret;
    49 }
    51 // Return unique id for this frame. The id must have a value where we can distinguish
    52 // identity and younger/older relationship. NULL represents an invalid (incomparable)
    53 // frame.
    54 inline intptr_t* frame::id(void) const { return unextended_sp(); }
    56 // Relationals on frames based
    57 // Return true if the frame is younger (more recent activation) than the frame represented by id
    58 inline bool frame::is_younger(intptr_t* id) const { assert(this->id() != NULL && id != NULL, "NULL frame id");
    59                                                     return this->id() < id ; }
    61 // Return true if the frame is older (less recent activation) than the frame represented by id
    62 inline bool frame::is_older(intptr_t* id) const   { assert(this->id() != NULL && id != NULL, "NULL frame id");
    63                                                     return this->id() > id ; }
    65 inline int frame::frame_size(RegisterMap* map) const { return sender_sp() - sp(); }
    67 inline intptr_t* frame::link() const { return (intptr_t *)(fp()[FP->sp_offset_in_saved_window()] + STACK_BIAS); }
    69 inline void frame::set_link(intptr_t* addr) { assert(link()==addr, "frame nesting is controlled by hardware"); }
    71 inline intptr_t* frame::unextended_sp() const { return sp() + _sp_adjustment_by_callee; }
    73 // return address:
    75 inline address  frame::sender_pc()        const    { return *I7_addr() + pc_return_offset; }
    77 inline address* frame::I7_addr() const  { return (address*) &sp()[ I7->sp_offset_in_saved_window()]; }
    78 inline address* frame::I0_addr() const  { return (address*) &sp()[ I0->sp_offset_in_saved_window()]; }
    80 inline address* frame::O7_addr() const  { return (address*) &younger_sp()[ I7->sp_offset_in_saved_window()]; }
    81 inline address* frame::O0_addr() const  { return (address*) &younger_sp()[ I0->sp_offset_in_saved_window()]; }
    83 inline intptr_t*    frame::sender_sp() const  { return fp(); }
    85 // Used only in frame::oopmapreg_to_location
    86 // This return a value in VMRegImpl::slot_size
    87 inline int frame::pd_oop_map_offset_adjustment() const {
    88   return _sp_adjustment_by_callee * VMRegImpl::slots_per_word;
    89 }
    91 #ifdef CC_INTERP
    92 inline intptr_t** frame::interpreter_frame_locals_addr() const {
    93   interpreterState istate = get_interpreterState();
    94   return (intptr_t**) &istate->_locals;
    95 }
    97 inline intptr_t* frame::interpreter_frame_bcx_addr() const {
    98   interpreterState istate = get_interpreterState();
    99   return (intptr_t*) &istate->_bcp;
   100 }
   102 inline intptr_t* frame::interpreter_frame_mdx_addr() const {
   103   interpreterState istate = get_interpreterState();
   104   return (intptr_t*) &istate->_mdx;
   105 }
   107 inline jint frame::interpreter_frame_expression_stack_direction() { return -1; }
   109 // bottom(base) of the expression stack (highest address)
   110 inline intptr_t* frame::interpreter_frame_expression_stack() const {
   111   return (intptr_t*)interpreter_frame_monitor_end() - 1;
   112 }
   114 // top of expression stack (lowest address)
   115 inline intptr_t* frame::interpreter_frame_tos_address() const {
   116   interpreterState istate = get_interpreterState();
   117   return istate->_stack + 1; // Is this off by one? QQQ
   118 }
   120 // monitor elements
   122 // in keeping with Intel side: end is lower in memory than begin;
   123 // and beginning element is oldest element
   124 // Also begin is one past last monitor.
   126 inline BasicObjectLock* frame::interpreter_frame_monitor_begin()       const  {
   127   return get_interpreterState()->monitor_base();
   128 }
   130 inline BasicObjectLock* frame::interpreter_frame_monitor_end()         const  {
   131   return (BasicObjectLock*) get_interpreterState()->stack_base();
   132 }
   135 inline int frame::interpreter_frame_monitor_size() {
   136   return round_to(BasicObjectLock::size(), WordsPerLong);
   137 }
   139 inline methodOop* frame::interpreter_frame_method_addr() const {
   140   interpreterState istate = get_interpreterState();
   141   return &istate->_method;
   142 }
   145 // Constant pool cache
   147 // where LcpoolCache is saved:
   148 inline constantPoolCacheOop* frame::interpreter_frame_cpoolcache_addr() const {
   149   interpreterState istate = get_interpreterState();
   150   return &istate->_constants; // should really use accessor
   151   }
   153 inline constantPoolCacheOop* frame::interpreter_frame_cache_addr() const {
   154   interpreterState istate = get_interpreterState();
   155   return &istate->_constants;
   156 }
   158 #else // !CC_INTERP
   160 inline intptr_t** frame::interpreter_frame_locals_addr() const {
   161   return (intptr_t**) sp_addr_at( Llocals->sp_offset_in_saved_window());
   162 }
   164 inline intptr_t* frame::interpreter_frame_bcx_addr() const {
   165   // %%%%% reinterpreting Lbcp as a bcx
   166   return (intptr_t*) sp_addr_at( Lbcp->sp_offset_in_saved_window());
   167 }
   169 inline intptr_t* frame::interpreter_frame_mdx_addr() const {
   170   // %%%%% reinterpreting ImethodDataPtr as a mdx
   171   return (intptr_t*) sp_addr_at( ImethodDataPtr->sp_offset_in_saved_window());
   172 }
   174 inline jint frame::interpreter_frame_expression_stack_direction() { return -1; }
   176 // bottom(base) of the expression stack (highest address)
   177 inline intptr_t* frame::interpreter_frame_expression_stack() const {
   178   return (intptr_t*)interpreter_frame_monitors() - 1;
   179 }
   181 // top of expression stack (lowest address)
   182 inline intptr_t* frame::interpreter_frame_tos_address() const {
   183   return *interpreter_frame_esp_addr() + 1;
   184 }
   186 inline void frame::interpreter_frame_set_tos_address( intptr_t* x ) {
   187   *interpreter_frame_esp_addr() = x - 1;
   188 }
   190 // monitor elements
   192 // in keeping with Intel side: end is lower in memory than begin;
   193 // and beginning element is oldest element
   194 // Also begin is one past last monitor.
   196 inline BasicObjectLock* frame::interpreter_frame_monitor_begin()       const  {
   197   int rounded_vm_local_words = round_to(frame::interpreter_frame_vm_local_words, WordsPerLong);
   198   return (BasicObjectLock *)fp_addr_at(-rounded_vm_local_words);
   199 }
   201 inline BasicObjectLock* frame::interpreter_frame_monitor_end()         const  {
   202   return interpreter_frame_monitors();
   203 }
   206 inline void frame::interpreter_frame_set_monitor_end(BasicObjectLock* value) {
   207   interpreter_frame_set_monitors(value);
   208 }
   210 inline int frame::interpreter_frame_monitor_size() {
   211   return round_to(BasicObjectLock::size(), WordsPerLong);
   212 }
   214 inline methodOop* frame::interpreter_frame_method_addr() const {
   215   return (methodOop*)sp_addr_at( Lmethod->sp_offset_in_saved_window());
   216 }
   219 // Constant pool cache
   221 // where LcpoolCache is saved:
   222 inline constantPoolCacheOop* frame::interpreter_frame_cpoolcache_addr() const {
   223     return (constantPoolCacheOop*)sp_addr_at(LcpoolCache->sp_offset_in_saved_window());
   224   }
   226 inline constantPoolCacheOop* frame::interpreter_frame_cache_addr() const {
   227   return (constantPoolCacheOop*)sp_addr_at( LcpoolCache->sp_offset_in_saved_window());
   228 }
   229 #endif // CC_INTERP
   232 inline JavaCallWrapper* frame::entry_frame_call_wrapper() const {
   233   // note: adjust this code if the link argument in StubGenerator::call_stub() changes!
   234   const Argument link = Argument(0, false);
   235   return (JavaCallWrapper*)sp()[link.as_in().as_register()->sp_offset_in_saved_window()];
   236 }
   239 inline int frame::local_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
   240    // always allocate non-argument locals 0..5 as if they were arguments:
   241   int allocated_above_frame = nof_args;
   242   if (allocated_above_frame < callee_register_argument_save_area_words)
   243     allocated_above_frame = callee_register_argument_save_area_words;
   244   if (allocated_above_frame > max_nof_locals)
   245     allocated_above_frame = max_nof_locals;
   247   // Note: monitors (BasicLock blocks) are never allocated in argument slots
   248   //assert(local_index >= 0 && local_index < max_nof_locals, "bad local index");
   249   if (local_index < allocated_above_frame)
   250     return local_index + callee_register_argument_save_area_sp_offset;
   251   else
   252     return local_index - (max_nof_locals + max_nof_monitors*2) + compiler_frame_vm_locals_fp_offset;
   253 }
   255 inline int frame::monitor_offset_for_compiler(int local_index, int nof_args, int max_nof_locals, int max_nof_monitors) {
   256   assert(local_index >= max_nof_locals && ((local_index - max_nof_locals) & 1) && (local_index - max_nof_locals) < max_nof_monitors*2, "bad monitor index");
   258   // The compiler uses the __higher__ of two indexes allocated to the monitor.
   259   // Increasing local indexes are mapped to increasing memory locations,
   260   // so the start of the BasicLock is associated with the __lower__ index.
   262   int offset = (local_index-1) - (max_nof_locals + max_nof_monitors*2) + compiler_frame_vm_locals_fp_offset;
   264   // We allocate monitors aligned zero mod 8:
   265   assert((offset & 1) == 0, "monitor must be an an even address.");
   266   // This works because all monitors are allocated after
   267   // all locals, and because the highest address corresponding to any
   268   // monitor index is always even.
   269   assert((compiler_frame_vm_locals_fp_offset & 1) == 0, "end of monitors must be even address");
   271   return offset;
   272 }
   274 inline int frame::min_local_offset_for_compiler(int nof_args, int max_nof_locals, int max_nof_monitors) {
   275    // always allocate non-argument locals 0..5 as if they were arguments:
   276   int allocated_above_frame = nof_args;
   277   if (allocated_above_frame < callee_register_argument_save_area_words)
   278     allocated_above_frame = callee_register_argument_save_area_words;
   279   if (allocated_above_frame > max_nof_locals)
   280     allocated_above_frame = max_nof_locals;
   282   int allocated_in_frame = (max_nof_locals + max_nof_monitors*2) - allocated_above_frame;
   284   return compiler_frame_vm_locals_fp_offset - allocated_in_frame;
   285 }
   287 // On SPARC, the %lN and %iN registers are non-volatile.
   288 inline bool frame::volatile_across_calls(Register reg) {
   289   // This predicate is (presently) applied only to temporary registers,
   290   // and so it need not recognize non-volatile globals.
   291   return reg->is_out() || reg->is_global();
   292 }
   294 inline oop  frame::saved_oop_result(RegisterMap* map) const      {
   295   return *((oop*) map->location(O0->as_VMReg()));
   296 }
   298 inline void frame::set_saved_oop_result(RegisterMap* map, oop obj) {
   299   *((oop*) map->location(O0->as_VMReg())) = obj;
   300 }
   302 #endif // CPU_SPARC_VM_FRAME_SPARC_INLINE_HPP

mercurial