src/cpu/sparc/vm/macroAssembler_sparc.hpp

changeset 0
f90c822e73f8
child 6876
710a3c8b516e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/src/cpu/sparc/vm/macroAssembler_sparc.hpp	Wed Apr 27 01:25:04 2016 +0800
     1.3 @@ -0,0 +1,1463 @@
     1.4 +/*
     1.5 + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
     1.6 + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
     1.7 + *
     1.8 + * This code is free software; you can redistribute it and/or modify it
     1.9 + * under the terms of the GNU General Public License version 2 only, as
    1.10 + * published by the Free Software Foundation.
    1.11 + *
    1.12 + * This code is distributed in the hope that it will be useful, but WITHOUT
    1.13 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
    1.14 + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
    1.15 + * version 2 for more details (a copy is included in the LICENSE file that
    1.16 + * accompanied this code).
    1.17 + *
    1.18 + * You should have received a copy of the GNU General Public License version
    1.19 + * 2 along with this work; if not, write to the Free Software Foundation,
    1.20 + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
    1.21 + *
    1.22 + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
    1.23 + * or visit www.oracle.com if you need additional information or have any
    1.24 + * questions.
    1.25 + *
    1.26 + */
    1.27 +
    1.28 +#ifndef CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP
    1.29 +#define CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP
    1.30 +
    1.31 +#include "asm/assembler.hpp"
    1.32 +#include "utilities/macros.hpp"
    1.33 +
    1.34 +// <sys/trap.h> promises that the system will not use traps 16-31
    1.35 +#define ST_RESERVED_FOR_USER_0 0x10
    1.36 +
    1.37 +class BiasedLockingCounters;
    1.38 +
    1.39 +
    1.40 +// Register aliases for parts of the system:
    1.41 +
    1.42 +// 64 bit values can be kept in g1-g5, o1-o5 and o7 and all 64 bits are safe
    1.43 +// across context switches in V8+ ABI.  Of course, there are no 64 bit regs
    1.44 +// in V8 ABI. All 64 bits are preserved in V9 ABI for all registers.
    1.45 +
    1.46 +// g2-g4 are scratch registers called "application globals".  Their
    1.47 +// meaning is reserved to the "compilation system"--which means us!
    1.48 +// They are are not supposed to be touched by ordinary C code, although
    1.49 +// highly-optimized C code might steal them for temps.  They are safe
    1.50 +// across thread switches, and the ABI requires that they be safe
    1.51 +// across function calls.
    1.52 +//
    1.53 +// g1 and g3 are touched by more modules.  V8 allows g1 to be clobbered
    1.54 +// across func calls, and V8+ also allows g5 to be clobbered across
    1.55 +// func calls.  Also, g1 and g5 can get touched while doing shared
    1.56 +// library loading.
    1.57 +//
    1.58 +// We must not touch g7 (it is the thread-self register) and g6 is
    1.59 +// reserved for certain tools.  g0, of course, is always zero.
    1.60 +//
    1.61 +// (Sources:  SunSoft Compilers Group, thread library engineers.)
    1.62 +
    1.63 +// %%%% The interpreter should be revisited to reduce global scratch regs.
    1.64 +
    1.65 +// This global always holds the current JavaThread pointer:
    1.66 +
    1.67 +REGISTER_DECLARATION(Register, G2_thread , G2);
    1.68 +REGISTER_DECLARATION(Register, G6_heapbase , G6);
    1.69 +
    1.70 +// The following globals are part of the Java calling convention:
    1.71 +
    1.72 +REGISTER_DECLARATION(Register, G5_method             , G5);
    1.73 +REGISTER_DECLARATION(Register, G5_megamorphic_method , G5_method);
    1.74 +REGISTER_DECLARATION(Register, G5_inline_cache_reg   , G5_method);
    1.75 +
    1.76 +// The following globals are used for the new C1 & interpreter calling convention:
    1.77 +REGISTER_DECLARATION(Register, Gargs        , G4); // pointing to the last argument
    1.78 +
    1.79 +// This local is used to preserve G2_thread in the interpreter and in stubs:
    1.80 +REGISTER_DECLARATION(Register, L7_thread_cache , L7);
    1.81 +
    1.82 +// These globals are used as scratch registers in the interpreter:
    1.83 +
    1.84 +REGISTER_DECLARATION(Register, Gframe_size   , G1); // SAME REG as G1_scratch
    1.85 +REGISTER_DECLARATION(Register, G1_scratch    , G1); // also SAME
    1.86 +REGISTER_DECLARATION(Register, G3_scratch    , G3);
    1.87 +REGISTER_DECLARATION(Register, G4_scratch    , G4);
    1.88 +
    1.89 +// These globals are used as short-lived scratch registers in the compiler:
    1.90 +
    1.91 +REGISTER_DECLARATION(Register, Gtemp  , G5);
    1.92 +
    1.93 +// JSR 292 fixed register usages:
    1.94 +REGISTER_DECLARATION(Register, G5_method_type        , G5);
    1.95 +REGISTER_DECLARATION(Register, G3_method_handle      , G3);
    1.96 +REGISTER_DECLARATION(Register, L7_mh_SP_save         , L7);
    1.97 +
    1.98 +// The compiler requires that G5_megamorphic_method is G5_inline_cache_klass,
    1.99 +// because a single patchable "set" instruction (NativeMovConstReg,
   1.100 +// or NativeMovConstPatching for compiler1) instruction
   1.101 +// serves to set up either quantity, depending on whether the compiled
   1.102 +// call site is an inline cache or is megamorphic.  See the function
   1.103 +// CompiledIC::set_to_megamorphic.
   1.104 +//
   1.105 +// If a inline cache targets an interpreted method, then the
   1.106 +// G5 register will be used twice during the call.  First,
   1.107 +// the call site will be patched to load a compiledICHolder
   1.108 +// into G5. (This is an ordered pair of ic_klass, method.)
   1.109 +// The c2i adapter will first check the ic_klass, then load
   1.110 +// G5_method with the method part of the pair just before
   1.111 +// jumping into the interpreter.
   1.112 +//
   1.113 +// Note that G5_method is only the method-self for the interpreter,
   1.114 +// and is logically unrelated to G5_megamorphic_method.
   1.115 +//
   1.116 +// Invariants on G2_thread (the JavaThread pointer):
   1.117 +//  - it should not be used for any other purpose anywhere
   1.118 +//  - it must be re-initialized by StubRoutines::call_stub()
   1.119 +//  - it must be preserved around every use of call_VM
   1.120 +
   1.121 +// We can consider using g2/g3/g4 to cache more values than the
   1.122 +// JavaThread, such as the card-marking base or perhaps pointers into
   1.123 +// Eden.  It's something of a waste to use them as scratch temporaries,
   1.124 +// since they are not supposed to be volatile.  (Of course, if we find
   1.125 +// that Java doesn't benefit from application globals, then we can just
   1.126 +// use them as ordinary temporaries.)
   1.127 +//
   1.128 +// Since g1 and g5 (and/or g6) are the volatile (caller-save) registers,
   1.129 +// it makes sense to use them routinely for procedure linkage,
   1.130 +// whenever the On registers are not applicable.  Examples:  G5_method,
   1.131 +// G5_inline_cache_klass, and a double handful of miscellaneous compiler
   1.132 +// stubs.  This means that compiler stubs, etc., should be kept to a
   1.133 +// maximum of two or three G-register arguments.
   1.134 +
   1.135 +
   1.136 +// stub frames
   1.137 +
   1.138 +REGISTER_DECLARATION(Register, Lentry_args      , L0); // pointer to args passed to callee (interpreter) not stub itself
   1.139 +
   1.140 +// Interpreter frames
   1.141 +
   1.142 +#ifdef CC_INTERP
   1.143 +REGISTER_DECLARATION(Register, Lstate           , L0); // interpreter state object pointer
   1.144 +REGISTER_DECLARATION(Register, L1_scratch       , L1); // scratch
   1.145 +REGISTER_DECLARATION(Register, Lmirror          , L1); // mirror (for native methods only)
   1.146 +REGISTER_DECLARATION(Register, L2_scratch       , L2);
   1.147 +REGISTER_DECLARATION(Register, L3_scratch       , L3);
   1.148 +REGISTER_DECLARATION(Register, L4_scratch       , L4);
   1.149 +REGISTER_DECLARATION(Register, Lscratch         , L5); // C1 uses
   1.150 +REGISTER_DECLARATION(Register, Lscratch2        , L6); // C1 uses
   1.151 +REGISTER_DECLARATION(Register, L7_scratch       , L7); // constant pool cache
   1.152 +REGISTER_DECLARATION(Register, O5_savedSP       , O5);
   1.153 +REGISTER_DECLARATION(Register, I5_savedSP       , I5); // Saved SP before bumping for locals.  This is simply
   1.154 +                                                       // a copy SP, so in 64-bit it's a biased value.  The bias
   1.155 +                                                       // is added and removed as needed in the frame code.
   1.156 +// Interface to signature handler
   1.157 +REGISTER_DECLARATION(Register, Llocals          , L7); // pointer to locals for signature handler
   1.158 +REGISTER_DECLARATION(Register, Lmethod          , L6); // Method* when calling signature handler
   1.159 +
   1.160 +#else
   1.161 +REGISTER_DECLARATION(Register, Lesp             , L0); // expression stack pointer
   1.162 +REGISTER_DECLARATION(Register, Lbcp             , L1); // pointer to next bytecode
   1.163 +REGISTER_DECLARATION(Register, Lmethod          , L2);
   1.164 +REGISTER_DECLARATION(Register, Llocals          , L3);
   1.165 +REGISTER_DECLARATION(Register, Largs            , L3); // pointer to locals for signature handler
   1.166 +                                                       // must match Llocals in asm interpreter
   1.167 +REGISTER_DECLARATION(Register, Lmonitors        , L4);
   1.168 +REGISTER_DECLARATION(Register, Lbyte_code       , L5);
   1.169 +// When calling out from the interpreter we record SP so that we can remove any extra stack
   1.170 +// space allocated during adapter transitions. This register is only live from the point
   1.171 +// of the call until we return.
   1.172 +REGISTER_DECLARATION(Register, Llast_SP         , L5);
   1.173 +REGISTER_DECLARATION(Register, Lscratch         , L5);
   1.174 +REGISTER_DECLARATION(Register, Lscratch2        , L6);
   1.175 +REGISTER_DECLARATION(Register, LcpoolCache      , L6); // constant pool cache
   1.176 +
   1.177 +REGISTER_DECLARATION(Register, O5_savedSP       , O5);
   1.178 +REGISTER_DECLARATION(Register, I5_savedSP       , I5); // Saved SP before bumping for locals.  This is simply
   1.179 +                                                       // a copy SP, so in 64-bit it's a biased value.  The bias
   1.180 +                                                       // is added and removed as needed in the frame code.
   1.181 +REGISTER_DECLARATION(Register, IdispatchTables  , I4); // Base address of the bytecode dispatch tables
   1.182 +REGISTER_DECLARATION(Register, IdispatchAddress , I3); // Register which saves the dispatch address for each bytecode
   1.183 +REGISTER_DECLARATION(Register, ImethodDataPtr   , I2); // Pointer to the current method data
   1.184 +#endif /* CC_INTERP */
   1.185 +
   1.186 +// NOTE: Lscratch2 and LcpoolCache point to the same registers in
   1.187 +//       the interpreter code. If Lscratch2 needs to be used for some
   1.188 +//       purpose than LcpoolCache should be restore after that for
   1.189 +//       the interpreter to work right
   1.190 +// (These assignments must be compatible with L7_thread_cache; see above.)
   1.191 +
   1.192 +// Since Lbcp points into the middle of the method object,
   1.193 +// it is temporarily converted into a "bcx" during GC.
   1.194 +
   1.195 +// Exception processing
   1.196 +// These registers are passed into exception handlers.
   1.197 +// All exception handlers require the exception object being thrown.
   1.198 +// In addition, an nmethod's exception handler must be passed
   1.199 +// the address of the call site within the nmethod, to allow
   1.200 +// proper selection of the applicable catch block.
   1.201 +// (Interpreter frames use their own bcp() for this purpose.)
   1.202 +//
   1.203 +// The Oissuing_pc value is not always needed.  When jumping to a
   1.204 +// handler that is known to be interpreted, the Oissuing_pc value can be
   1.205 +// omitted.  An actual catch block in compiled code receives (from its
   1.206 +// nmethod's exception handler) the thrown exception in the Oexception,
   1.207 +// but it doesn't need the Oissuing_pc.
   1.208 +//
   1.209 +// If an exception handler (either interpreted or compiled)
   1.210 +// discovers there is no applicable catch block, it updates
   1.211 +// the Oissuing_pc to the continuation PC of its own caller,
   1.212 +// pops back to that caller's stack frame, and executes that
   1.213 +// caller's exception handler.  Obviously, this process will
   1.214 +// iterate until the control stack is popped back to a method
   1.215 +// containing an applicable catch block.  A key invariant is
   1.216 +// that the Oissuing_pc value is always a value local to
   1.217 +// the method whose exception handler is currently executing.
   1.218 +//
   1.219 +// Note:  The issuing PC value is __not__ a raw return address (I7 value).
   1.220 +// It is a "return pc", the address __following__ the call.
   1.221 +// Raw return addresses are converted to issuing PCs by frame::pc(),
   1.222 +// or by stubs.  Issuing PCs can be used directly with PC range tables.
   1.223 +//
   1.224 +REGISTER_DECLARATION(Register, Oexception  , O0); // exception being thrown
   1.225 +REGISTER_DECLARATION(Register, Oissuing_pc , O1); // where the exception is coming from
   1.226 +
   1.227 +
   1.228 +// These must occur after the declarations above
   1.229 +#ifndef DONT_USE_REGISTER_DEFINES
   1.230 +
   1.231 +#define Gthread             AS_REGISTER(Register, Gthread)
   1.232 +#define Gmethod             AS_REGISTER(Register, Gmethod)
   1.233 +#define Gmegamorphic_method AS_REGISTER(Register, Gmegamorphic_method)
   1.234 +#define Ginline_cache_reg   AS_REGISTER(Register, Ginline_cache_reg)
   1.235 +#define Gargs               AS_REGISTER(Register, Gargs)
   1.236 +#define Lthread_cache       AS_REGISTER(Register, Lthread_cache)
   1.237 +#define Gframe_size         AS_REGISTER(Register, Gframe_size)
   1.238 +#define Gtemp               AS_REGISTER(Register, Gtemp)
   1.239 +
   1.240 +#ifdef CC_INTERP
   1.241 +#define Lstate              AS_REGISTER(Register, Lstate)
   1.242 +#define Lesp                AS_REGISTER(Register, Lesp)
   1.243 +#define L1_scratch          AS_REGISTER(Register, L1_scratch)
   1.244 +#define Lmirror             AS_REGISTER(Register, Lmirror)
   1.245 +#define L2_scratch          AS_REGISTER(Register, L2_scratch)
   1.246 +#define L3_scratch          AS_REGISTER(Register, L3_scratch)
   1.247 +#define L4_scratch          AS_REGISTER(Register, L4_scratch)
   1.248 +#define Lscratch            AS_REGISTER(Register, Lscratch)
   1.249 +#define Lscratch2           AS_REGISTER(Register, Lscratch2)
   1.250 +#define L7_scratch          AS_REGISTER(Register, L7_scratch)
   1.251 +#define Ostate              AS_REGISTER(Register, Ostate)
   1.252 +#else
   1.253 +#define Lesp                AS_REGISTER(Register, Lesp)
   1.254 +#define Lbcp                AS_REGISTER(Register, Lbcp)
   1.255 +#define Lmethod             AS_REGISTER(Register, Lmethod)
   1.256 +#define Llocals             AS_REGISTER(Register, Llocals)
   1.257 +#define Lmonitors           AS_REGISTER(Register, Lmonitors)
   1.258 +#define Lbyte_code          AS_REGISTER(Register, Lbyte_code)
   1.259 +#define Lscratch            AS_REGISTER(Register, Lscratch)
   1.260 +#define Lscratch2           AS_REGISTER(Register, Lscratch2)
   1.261 +#define LcpoolCache         AS_REGISTER(Register, LcpoolCache)
   1.262 +#endif /* ! CC_INTERP */
   1.263 +
   1.264 +#define Lentry_args         AS_REGISTER(Register, Lentry_args)
   1.265 +#define I5_savedSP          AS_REGISTER(Register, I5_savedSP)
   1.266 +#define O5_savedSP          AS_REGISTER(Register, O5_savedSP)
   1.267 +#define IdispatchAddress    AS_REGISTER(Register, IdispatchAddress)
   1.268 +#define ImethodDataPtr      AS_REGISTER(Register, ImethodDataPtr)
   1.269 +#define IdispatchTables     AS_REGISTER(Register, IdispatchTables)
   1.270 +
   1.271 +#define Oexception          AS_REGISTER(Register, Oexception)
   1.272 +#define Oissuing_pc         AS_REGISTER(Register, Oissuing_pc)
   1.273 +
   1.274 +#endif
   1.275 +
   1.276 +
   1.277 +// Address is an abstraction used to represent a memory location.
   1.278 +//
   1.279 +// Note: A register location is represented via a Register, not
   1.280 +//       via an address for efficiency & simplicity reasons.
   1.281 +
   1.282 +class Address VALUE_OBJ_CLASS_SPEC {
   1.283 + private:
   1.284 +  Register           _base;           // Base register.
   1.285 +  RegisterOrConstant _index_or_disp;  // Index register or constant displacement.
   1.286 +  RelocationHolder   _rspec;
   1.287 +
   1.288 + public:
   1.289 +  Address() : _base(noreg), _index_or_disp(noreg) {}
   1.290 +
   1.291 +  Address(Register base, RegisterOrConstant index_or_disp)
   1.292 +    : _base(base),
   1.293 +      _index_or_disp(index_or_disp) {
   1.294 +  }
   1.295 +
   1.296 +  Address(Register base, Register index)
   1.297 +    : _base(base),
   1.298 +      _index_or_disp(index) {
   1.299 +  }
   1.300 +
   1.301 +  Address(Register base, int disp)
   1.302 +    : _base(base),
   1.303 +      _index_or_disp(disp) {
   1.304 +  }
   1.305 +
   1.306 +#ifdef ASSERT
   1.307 +  // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
   1.308 +  Address(Register base, ByteSize disp)
   1.309 +    : _base(base),
   1.310 +      _index_or_disp(in_bytes(disp)) {
   1.311 +  }
   1.312 +#endif
   1.313 +
   1.314 +  // accessors
   1.315 +  Register base()             const { return _base; }
   1.316 +  Register index()            const { return _index_or_disp.as_register(); }
   1.317 +  int      disp()             const { return _index_or_disp.as_constant(); }
   1.318 +
   1.319 +  bool     has_index()        const { return _index_or_disp.is_register(); }
   1.320 +  bool     has_disp()         const { return _index_or_disp.is_constant(); }
   1.321 +
   1.322 +  bool     uses(Register reg) const { return base() == reg || (has_index() && index() == reg); }
   1.323 +
   1.324 +  const relocInfo::relocType rtype() { return _rspec.type(); }
   1.325 +  const RelocationHolder&    rspec() { return _rspec; }
   1.326 +
   1.327 +  RelocationHolder rspec(int offset) const {
   1.328 +    return offset == 0 ? _rspec : _rspec.plus(offset);
   1.329 +  }
   1.330 +
   1.331 +  inline bool is_simm13(int offset = 0);  // check disp+offset for overflow
   1.332 +
   1.333 +  Address plus_disp(int plusdisp) const {     // bump disp by a small amount
   1.334 +    assert(_index_or_disp.is_constant(), "must have a displacement");
   1.335 +    Address a(base(), disp() + plusdisp);
   1.336 +    return a;
   1.337 +  }
   1.338 +  bool is_same_address(Address a) const {
   1.339 +    // disregard _rspec
   1.340 +    return base() == a.base() && (has_index() ? index() == a.index() : disp() == a.disp());
   1.341 +  }
   1.342 +
   1.343 +  Address after_save() const {
   1.344 +    Address a = (*this);
   1.345 +    a._base = a._base->after_save();
   1.346 +    return a;
   1.347 +  }
   1.348 +
   1.349 +  Address after_restore() const {
   1.350 +    Address a = (*this);
   1.351 +    a._base = a._base->after_restore();
   1.352 +    return a;
   1.353 +  }
   1.354 +
   1.355 +  // Convert the raw encoding form into the form expected by the
   1.356 +  // constructor for Address.
   1.357 +  static Address make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc);
   1.358 +
   1.359 +  friend class Assembler;
   1.360 +};
   1.361 +
   1.362 +
   1.363 +class AddressLiteral VALUE_OBJ_CLASS_SPEC {
   1.364 + private:
   1.365 +  address          _address;
   1.366 +  RelocationHolder _rspec;
   1.367 +
   1.368 +  RelocationHolder rspec_from_rtype(relocInfo::relocType rtype, address addr) {
   1.369 +    switch (rtype) {
   1.370 +    case relocInfo::external_word_type:
   1.371 +      return external_word_Relocation::spec(addr);
   1.372 +    case relocInfo::internal_word_type:
   1.373 +      return internal_word_Relocation::spec(addr);
   1.374 +#ifdef _LP64
   1.375 +    case relocInfo::opt_virtual_call_type:
   1.376 +      return opt_virtual_call_Relocation::spec();
   1.377 +    case relocInfo::static_call_type:
   1.378 +      return static_call_Relocation::spec();
   1.379 +    case relocInfo::runtime_call_type:
   1.380 +      return runtime_call_Relocation::spec();
   1.381 +#endif
   1.382 +    case relocInfo::none:
   1.383 +      return RelocationHolder();
   1.384 +    default:
   1.385 +      ShouldNotReachHere();
   1.386 +      return RelocationHolder();
   1.387 +    }
   1.388 +  }
   1.389 +
   1.390 + protected:
   1.391 +  // creation
   1.392 +  AddressLiteral() : _address(NULL), _rspec(NULL) {}
   1.393 +
   1.394 + public:
   1.395 +  AddressLiteral(address addr, RelocationHolder const& rspec)
   1.396 +    : _address(addr),
   1.397 +      _rspec(rspec) {}
   1.398 +
   1.399 +  // Some constructors to avoid casting at the call site.
   1.400 +  AddressLiteral(jobject obj, RelocationHolder const& rspec)
   1.401 +    : _address((address) obj),
   1.402 +      _rspec(rspec) {}
   1.403 +
   1.404 +  AddressLiteral(intptr_t value, RelocationHolder const& rspec)
   1.405 +    : _address((address) value),
   1.406 +      _rspec(rspec) {}
   1.407 +
   1.408 +  AddressLiteral(address addr, relocInfo::relocType rtype = relocInfo::none)
   1.409 +    : _address((address) addr),
   1.410 +    _rspec(rspec_from_rtype(rtype, (address) addr)) {}
   1.411 +
   1.412 +  // Some constructors to avoid casting at the call site.
   1.413 +  AddressLiteral(address* addr, relocInfo::relocType rtype = relocInfo::none)
   1.414 +    : _address((address) addr),
   1.415 +    _rspec(rspec_from_rtype(rtype, (address) addr)) {}
   1.416 +
   1.417 +  AddressLiteral(bool* addr, relocInfo::relocType rtype = relocInfo::none)
   1.418 +    : _address((address) addr),
   1.419 +      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
   1.420 +
   1.421 +  AddressLiteral(const bool* addr, relocInfo::relocType rtype = relocInfo::none)
   1.422 +    : _address((address) addr),
   1.423 +      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
   1.424 +
   1.425 +  AddressLiteral(signed char* addr, relocInfo::relocType rtype = relocInfo::none)
   1.426 +    : _address((address) addr),
   1.427 +      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
   1.428 +
   1.429 +  AddressLiteral(int* addr, relocInfo::relocType rtype = relocInfo::none)
   1.430 +    : _address((address) addr),
   1.431 +      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
   1.432 +
   1.433 +  AddressLiteral(intptr_t addr, relocInfo::relocType rtype = relocInfo::none)
   1.434 +    : _address((address) addr),
   1.435 +      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
   1.436 +
   1.437 +#ifdef _LP64
   1.438 +  // 32-bit complains about a multiple declaration for int*.
   1.439 +  AddressLiteral(intptr_t* addr, relocInfo::relocType rtype = relocInfo::none)
   1.440 +    : _address((address) addr),
   1.441 +      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
   1.442 +#endif
   1.443 +
   1.444 +  AddressLiteral(Metadata* addr, relocInfo::relocType rtype = relocInfo::none)
   1.445 +    : _address((address) addr),
   1.446 +      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
   1.447 +
   1.448 +  AddressLiteral(Metadata** addr, relocInfo::relocType rtype = relocInfo::none)
   1.449 +    : _address((address) addr),
   1.450 +      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
   1.451 +
   1.452 +  AddressLiteral(float* addr, relocInfo::relocType rtype = relocInfo::none)
   1.453 +    : _address((address) addr),
   1.454 +      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
   1.455 +
   1.456 +  AddressLiteral(double* addr, relocInfo::relocType rtype = relocInfo::none)
   1.457 +    : _address((address) addr),
   1.458 +      _rspec(rspec_from_rtype(rtype, (address) addr)) {}
   1.459 +
   1.460 +  intptr_t value() const { return (intptr_t) _address; }
   1.461 +  int      low10() const;
   1.462 +
   1.463 +  const relocInfo::relocType rtype() const { return _rspec.type(); }
   1.464 +  const RelocationHolder&    rspec() const { return _rspec; }
   1.465 +
   1.466 +  RelocationHolder rspec(int offset) const {
   1.467 +    return offset == 0 ? _rspec : _rspec.plus(offset);
   1.468 +  }
   1.469 +};
   1.470 +
   1.471 +// Convenience classes
   1.472 +class ExternalAddress: public AddressLiteral {
   1.473 + private:
   1.474 +  static relocInfo::relocType reloc_for_target(address target) {
   1.475 +    // Sometimes ExternalAddress is used for values which aren't
   1.476 +    // exactly addresses, like the card table base.
   1.477 +    // external_word_type can't be used for values in the first page
   1.478 +    // so just skip the reloc in that case.
   1.479 +    return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
   1.480 +  }
   1.481 +
   1.482 + public:
   1.483 +  ExternalAddress(address target) : AddressLiteral(target, reloc_for_target(          target)) {}
   1.484 +  ExternalAddress(Metadata** target) : AddressLiteral(target, reloc_for_target((address) target)) {}
   1.485 +};
   1.486 +
   1.487 +inline Address RegisterImpl::address_in_saved_window() const {
   1.488 +   return (Address(SP, (sp_offset_in_saved_window() * wordSize) + STACK_BIAS));
   1.489 +}
   1.490 +
   1.491 +
   1.492 +
   1.493 +// Argument is an abstraction used to represent an outgoing
   1.494 +// actual argument or an incoming formal parameter, whether
   1.495 +// it resides in memory or in a register, in a manner consistent
   1.496 +// with the SPARC Application Binary Interface, or ABI.  This is
   1.497 +// often referred to as the native or C calling convention.
   1.498 +
   1.499 +class Argument VALUE_OBJ_CLASS_SPEC {
   1.500 + private:
   1.501 +  int _number;
   1.502 +  bool _is_in;
   1.503 +
   1.504 + public:
   1.505 +#ifdef _LP64
   1.506 +  enum {
   1.507 +    n_register_parameters = 6,          // only 6 registers may contain integer parameters
   1.508 +    n_float_register_parameters = 16    // Can have up to 16 floating registers
   1.509 +  };
   1.510 +#else
   1.511 +  enum {
   1.512 +    n_register_parameters = 6           // only 6 registers may contain integer parameters
   1.513 +  };
   1.514 +#endif
   1.515 +
   1.516 +  // creation
   1.517 +  Argument(int number, bool is_in) : _number(number), _is_in(is_in) {}
   1.518 +
   1.519 +  int  number() const  { return _number;  }
   1.520 +  bool is_in()  const  { return _is_in;   }
   1.521 +  bool is_out() const  { return !is_in(); }
   1.522 +
   1.523 +  Argument successor() const  { return Argument(number() + 1, is_in()); }
   1.524 +  Argument as_in()     const  { return Argument(number(), true ); }
   1.525 +  Argument as_out()    const  { return Argument(number(), false); }
   1.526 +
   1.527 +  // locating register-based arguments:
   1.528 +  bool is_register() const { return _number < n_register_parameters; }
   1.529 +
   1.530 +#ifdef _LP64
   1.531 +  // locating Floating Point register-based arguments:
   1.532 +  bool is_float_register() const { return _number < n_float_register_parameters; }
   1.533 +
   1.534 +  FloatRegister as_float_register() const {
   1.535 +    assert(is_float_register(), "must be a register argument");
   1.536 +    return as_FloatRegister(( number() *2 ) + 1);
   1.537 +  }
   1.538 +  FloatRegister as_double_register() const {
   1.539 +    assert(is_float_register(), "must be a register argument");
   1.540 +    return as_FloatRegister(( number() *2 ));
   1.541 +  }
   1.542 +#endif
   1.543 +
   1.544 +  Register as_register() const {
   1.545 +    assert(is_register(), "must be a register argument");
   1.546 +    return is_in() ? as_iRegister(number()) : as_oRegister(number());
   1.547 +  }
   1.548 +
   1.549 +  // locating memory-based arguments
   1.550 +  Address as_address() const {
   1.551 +    assert(!is_register(), "must be a memory argument");
   1.552 +    return address_in_frame();
   1.553 +  }
   1.554 +
   1.555 +  // When applied to a register-based argument, give the corresponding address
   1.556 +  // into the 6-word area "into which callee may store register arguments"
   1.557 +  // (This is a different place than the corresponding register-save area location.)
   1.558 +  Address address_in_frame() const;
   1.559 +
   1.560 +  // debugging
   1.561 +  const char* name() const;
   1.562 +
   1.563 +  friend class Assembler;
   1.564 +};
   1.565 +
   1.566 +
   1.567 +class RegistersForDebugging : public StackObj {
   1.568 + public:
   1.569 +  intptr_t i[8], l[8], o[8], g[8];
   1.570 +  float    f[32];
   1.571 +  double   d[32];
   1.572 +
   1.573 +  void print(outputStream* s);
   1.574 +
   1.575 +  static int i_offset(int j) { return offset_of(RegistersForDebugging, i[j]); }
   1.576 +  static int l_offset(int j) { return offset_of(RegistersForDebugging, l[j]); }
   1.577 +  static int o_offset(int j) { return offset_of(RegistersForDebugging, o[j]); }
   1.578 +  static int g_offset(int j) { return offset_of(RegistersForDebugging, g[j]); }
   1.579 +  static int f_offset(int j) { return offset_of(RegistersForDebugging, f[j]); }
   1.580 +  static int d_offset(int j) { return offset_of(RegistersForDebugging, d[j / 2]); }
   1.581 +
   1.582 +  // gen asm code to save regs
   1.583 +  static void save_registers(MacroAssembler* a);
   1.584 +
   1.585 +  // restore global registers in case C code disturbed them
   1.586 +  static void restore_registers(MacroAssembler* a, Register r);
   1.587 +};
   1.588 +
   1.589 +
   1.590 +// MacroAssembler extends Assembler by a few frequently used macros.
   1.591 +//
   1.592 +// Most of the standard SPARC synthetic ops are defined here.
   1.593 +// Instructions for which a 'better' code sequence exists depending
   1.594 +// on arguments should also go in here.
   1.595 +
   1.596 +#define JMP2(r1, r2) jmp(r1, r2, __FILE__, __LINE__)
   1.597 +#define JMP(r1, off) jmp(r1, off, __FILE__, __LINE__)
   1.598 +#define JUMP(a, temp, off)     jump(a, temp, off, __FILE__, __LINE__)
   1.599 +#define JUMPL(a, temp, d, off) jumpl(a, temp, d, off, __FILE__, __LINE__)
   1.600 +
   1.601 +
   1.602 +class MacroAssembler : public Assembler {
   1.603 +  // code patchers need various routines like inv_wdisp()
   1.604 +  friend class NativeInstruction;
   1.605 +  friend class NativeGeneralJump;
   1.606 +  friend class Relocation;
   1.607 +  friend class Label;
   1.608 +
   1.609 + protected:
   1.610 +  static int  patched_branch(int dest_pos, int inst, int inst_pos);
   1.611 +  static int  branch_destination(int inst, int pos);
   1.612 +
   1.613 +  // Support for VM calls
   1.614 +  // This is the base routine called by the different versions of call_VM_leaf. The interpreter
   1.615 +  // may customize this version by overriding it for its purposes (e.g., to save/restore
   1.616 +  // additional registers when doing a VM call).
   1.617 +#ifdef CC_INTERP
   1.618 +  #define VIRTUAL
   1.619 +#else
   1.620 +  #define VIRTUAL virtual
   1.621 +#endif
   1.622 +
   1.623 +  VIRTUAL void call_VM_leaf_base(Register thread_cache, address entry_point, int number_of_arguments);
   1.624 +
   1.625 +  //
   1.626 +  // It is imperative that all calls into the VM are handled via the call_VM macros.
   1.627 +  // They make sure that the stack linkage is setup correctly. call_VM's correspond
   1.628 +  // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
   1.629 +  //
   1.630 +  // This is the base routine called by the different versions of call_VM. The interpreter
   1.631 +  // may customize this version by overriding it for its purposes (e.g., to save/restore
   1.632 +  // additional registers when doing a VM call).
   1.633 +  //
   1.634 +  // A non-volatile java_thread_cache register should be specified so
   1.635 +  // that the G2_thread value can be preserved across the call.
   1.636 +  // (If java_thread_cache is noreg, then a slow get_thread call
   1.637 +  // will re-initialize the G2_thread.) call_VM_base returns the register that contains the
   1.638 +  // thread.
   1.639 +  //
   1.640 +  // If no last_java_sp is specified (noreg) than SP will be used instead.
   1.641 +
   1.642 +  virtual void call_VM_base(
   1.643 +    Register        oop_result,             // where an oop-result ends up if any; use noreg otherwise
   1.644 +    Register        java_thread_cache,      // the thread if computed before     ; use noreg otherwise
   1.645 +    Register        last_java_sp,           // to set up last_Java_frame in stubs; use noreg otherwise
   1.646 +    address         entry_point,            // the entry point
   1.647 +    int             number_of_arguments,    // the number of arguments (w/o thread) to pop after call
   1.648 +    bool            check_exception=true    // flag which indicates if exception should be checked
   1.649 +  );
   1.650 +
   1.651 +  // This routine should emit JVMTI PopFrame and ForceEarlyReturn handling code.
   1.652 +  // The implementation is only non-empty for the InterpreterMacroAssembler,
   1.653 +  // as only the interpreter handles and ForceEarlyReturn PopFrame requests.
   1.654 +  virtual void check_and_handle_popframe(Register scratch_reg);
   1.655 +  virtual void check_and_handle_earlyret(Register scratch_reg);
   1.656 +
   1.657 + public:
   1.658 +  MacroAssembler(CodeBuffer* code) : Assembler(code) {}
   1.659 +
   1.660 +  // Support for NULL-checks
   1.661 +  //
   1.662 +  // Generates code that causes a NULL OS exception if the content of reg is NULL.
   1.663 +  // If the accessed location is M[reg + offset] and the offset is known, provide the
   1.664 +  // offset.  No explicit code generation is needed if the offset is within a certain
   1.665 +  // range (0 <= offset <= page_size).
   1.666 +  //
   1.667 +  // %%%%%% Currently not done for SPARC
   1.668 +
   1.669 +  void null_check(Register reg, int offset = -1);
   1.670 +  static bool needs_explicit_null_check(intptr_t offset);
   1.671 +
   1.672 +  // support for delayed instructions
   1.673 +  MacroAssembler* delayed() { Assembler::delayed();  return this; }
   1.674 +
   1.675 +  // branches that use right instruction for v8 vs. v9
   1.676 +  inline void br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
   1.677 +  inline void br( Condition c, bool a, Predict p, Label& L );
   1.678 +
   1.679 +  inline void fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
   1.680 +  inline void fb( Condition c, bool a, Predict p, Label& L );
   1.681 +
   1.682 +  // compares register with zero (32 bit) and branches (V9 and V8 instructions)
   1.683 +  void cmp_zero_and_br( Condition c, Register s1, Label& L, bool a = false, Predict p = pn );
   1.684 +  // Compares a pointer register with zero and branches on (not)null.
   1.685 +  // Does a test & branch on 32-bit systems and a register-branch on 64-bit.
   1.686 +  void br_null   ( Register s1, bool a, Predict p, Label& L );
   1.687 +  void br_notnull( Register s1, bool a, Predict p, Label& L );
   1.688 +
   1.689 +  //
   1.690 +  // Compare registers and branch with nop in delay slot or cbcond without delay slot.
   1.691 +  //
   1.692 +  // ATTENTION: use these instructions with caution because cbcond instruction
   1.693 +  //            has very short distance: 512 instructions (2Kbyte).
   1.694 +
   1.695 +  // Compare integer (32 bit) values (icc only).
   1.696 +  void cmp_and_br_short(Register s1, Register s2, Condition c, Predict p, Label& L);
   1.697 +  void cmp_and_br_short(Register s1, int simm13a, Condition c, Predict p, Label& L);
   1.698 +  // Platform depending version for pointer compare (icc on !LP64 and xcc on LP64).
   1.699 +  void cmp_and_brx_short(Register s1, Register s2, Condition c, Predict p, Label& L);
   1.700 +  void cmp_and_brx_short(Register s1, int simm13a, Condition c, Predict p, Label& L);
   1.701 +
   1.702 +  // Short branch version for compares a pointer pwith zero.
   1.703 +  void br_null_short   ( Register s1, Predict p, Label& L );
   1.704 +  void br_notnull_short( Register s1, Predict p, Label& L );
   1.705 +
   1.706 +  // unconditional short branch
   1.707 +  void ba_short(Label& L);
   1.708 +
   1.709 +  inline void bp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
   1.710 +  inline void bp( Condition c, bool a, CC cc, Predict p, Label& L );
   1.711 +
   1.712 +  // Branch that tests xcc in LP64 and icc in !LP64
   1.713 +  inline void brx( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
   1.714 +  inline void brx( Condition c, bool a, Predict p, Label& L );
   1.715 +
   1.716 +  // unconditional branch
   1.717 +  inline void ba( Label& L );
   1.718 +
   1.719 +  // Branch that tests fp condition codes
   1.720 +  inline void fbp( Condition c, bool a, CC cc, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
   1.721 +  inline void fbp( Condition c, bool a, CC cc, Predict p, Label& L );
   1.722 +
   1.723 +  // get PC the best way
   1.724 +  inline int get_pc( Register d );
   1.725 +
   1.726 +  // Sparc shorthands(pp 85, V8 manual, pp 289 V9 manual)
   1.727 +  inline void cmp(  Register s1, Register s2 ) { subcc( s1, s2, G0 ); }
   1.728 +  inline void cmp(  Register s1, int simm13a ) { subcc( s1, simm13a, G0 ); }
   1.729 +
   1.730 +  inline void jmp( Register s1, Register s2 );
   1.731 +  inline void jmp( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
   1.732 +
   1.733 +  // Check if the call target is out of wdisp30 range (relative to the code cache)
   1.734 +  static inline bool is_far_target(address d);
   1.735 +  inline void call( address d,  relocInfo::relocType rt = relocInfo::runtime_call_type );
   1.736 +  inline void call( Label& L,   relocInfo::relocType rt = relocInfo::runtime_call_type );
   1.737 +  inline void callr( Register s1, Register s2 );
   1.738 +  inline void callr( Register s1, int simm13a, RelocationHolder const& rspec = RelocationHolder() );
   1.739 +
   1.740 +  // Emits nothing on V8
   1.741 +  inline void iprefetch( address d, relocInfo::relocType rt = relocInfo::none );
   1.742 +  inline void iprefetch( Label& L);
   1.743 +
   1.744 +  inline void tst( Register s ) { orcc( G0, s, G0 ); }
   1.745 +
   1.746 +#ifdef PRODUCT
   1.747 +  inline void ret(  bool trace = TraceJumps )   { if (trace) {
   1.748 +                                                    mov(I7, O7); // traceable register
   1.749 +                                                    JMP(O7, 2 * BytesPerInstWord);
   1.750 +                                                  } else {
   1.751 +                                                    jmpl( I7, 2 * BytesPerInstWord, G0 );
   1.752 +                                                  }
   1.753 +                                                }
   1.754 +
   1.755 +  inline void retl( bool trace = TraceJumps )  { if (trace) JMP(O7, 2 * BytesPerInstWord);
   1.756 +                                                 else jmpl( O7, 2 * BytesPerInstWord, G0 ); }
   1.757 +#else
   1.758 +  void ret(  bool trace = TraceJumps );
   1.759 +  void retl( bool trace = TraceJumps );
   1.760 +#endif /* PRODUCT */
   1.761 +
   1.762 +  // Required platform-specific helpers for Label::patch_instructions.
   1.763 +  // They _shadow_ the declarations in AbstractAssembler, which are undefined.
   1.764 +  void pd_patch_instruction(address branch, address target);
   1.765 +
   1.766 +  // sethi Macro handles optimizations and relocations
   1.767 +private:
   1.768 +  void internal_sethi(const AddressLiteral& addrlit, Register d, bool ForceRelocatable);
   1.769 +public:
   1.770 +  void sethi(const AddressLiteral& addrlit, Register d);
   1.771 +  void patchable_sethi(const AddressLiteral& addrlit, Register d);
   1.772 +
   1.773 +  // compute the number of instructions for a sethi/set
   1.774 +  static int  insts_for_sethi( address a, bool worst_case = false );
   1.775 +  static int  worst_case_insts_for_set();
   1.776 +
   1.777 +  // set may be either setsw or setuw (high 32 bits may be zero or sign)
   1.778 +private:
   1.779 +  void internal_set(const AddressLiteral& al, Register d, bool ForceRelocatable);
   1.780 +  static int insts_for_internal_set(intptr_t value);
   1.781 +public:
   1.782 +  void set(const AddressLiteral& addrlit, Register d);
   1.783 +  void set(intptr_t value, Register d);
   1.784 +  void set(address addr, Register d, RelocationHolder const& rspec);
   1.785 +  static int insts_for_set(intptr_t value) { return insts_for_internal_set(value); }
   1.786 +
   1.787 +  void patchable_set(const AddressLiteral& addrlit, Register d);
   1.788 +  void patchable_set(intptr_t value, Register d);
   1.789 +  void set64(jlong value, Register d, Register tmp);
   1.790 +  static int insts_for_set64(jlong value);
   1.791 +
   1.792 +  // sign-extend 32 to 64
   1.793 +  inline void signx( Register s, Register d ) { sra( s, G0, d); }
   1.794 +  inline void signx( Register d )             { sra( d, G0, d); }
   1.795 +
   1.796 +  inline void not1( Register s, Register d ) { xnor( s, G0, d ); }
   1.797 +  inline void not1( Register d )             { xnor( d, G0, d ); }
   1.798 +
   1.799 +  inline void neg( Register s, Register d ) { sub( G0, s, d ); }
   1.800 +  inline void neg( Register d )             { sub( G0, d, d ); }
   1.801 +
   1.802 +  inline void cas(  Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY); }
   1.803 +  inline void casx( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY); }
   1.804 +  // Functions for isolating 64 bit atomic swaps for LP64
   1.805 +  // cas_ptr will perform cas for 32 bit VM's and casx for 64 bit VM's
   1.806 +  inline void cas_ptr(  Register s1, Register s2, Register d) {
   1.807 +#ifdef _LP64
   1.808 +    casx( s1, s2, d );
   1.809 +#else
   1.810 +    cas( s1, s2, d );
   1.811 +#endif
   1.812 +  }
   1.813 +
   1.814 +  // Functions for isolating 64 bit shifts for LP64
   1.815 +  inline void sll_ptr( Register s1, Register s2, Register d );
   1.816 +  inline void sll_ptr( Register s1, int imm6a,   Register d );
   1.817 +  inline void sll_ptr( Register s1, RegisterOrConstant s2, Register d );
   1.818 +  inline void srl_ptr( Register s1, Register s2, Register d );
   1.819 +  inline void srl_ptr( Register s1, int imm6a,   Register d );
   1.820 +
   1.821 +  // little-endian
   1.822 +  inline void casl(  Register s1, Register s2, Register d) { casa( s1, s2, d, ASI_PRIMARY_LITTLE); }
   1.823 +  inline void casxl( Register s1, Register s2, Register d) { casxa(s1, s2, d, ASI_PRIMARY_LITTLE); }
   1.824 +
   1.825 +  inline void inc(   Register d,  int const13 = 1 ) { add(   d, const13, d); }
   1.826 +  inline void inccc( Register d,  int const13 = 1 ) { addcc( d, const13, d); }
   1.827 +
   1.828 +  inline void dec(   Register d,  int const13 = 1 ) { sub(   d, const13, d); }
   1.829 +  inline void deccc( Register d,  int const13 = 1 ) { subcc( d, const13, d); }
   1.830 +
   1.831 +  using Assembler::add;
   1.832 +  inline void add(Register s1, int simm13a, Register d, relocInfo::relocType rtype);
   1.833 +  inline void add(Register s1, int simm13a, Register d, RelocationHolder const& rspec);
   1.834 +  inline void add(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
   1.835 +  inline void add(const Address& a, Register d, int offset = 0);
   1.836 +
   1.837 +  using Assembler::andn;
   1.838 +  inline void andn(  Register s1, RegisterOrConstant s2, Register d);
   1.839 +
   1.840 +  inline void btst( Register s1,  Register s2 ) { andcc( s1, s2, G0 ); }
   1.841 +  inline void btst( int simm13a,  Register s )  { andcc( s,  simm13a, G0 ); }
   1.842 +
   1.843 +  inline void bset( Register s1,  Register s2 ) { or3( s1, s2, s2 ); }
   1.844 +  inline void bset( int simm13a,  Register s )  { or3( s,  simm13a, s ); }
   1.845 +
   1.846 +  inline void bclr( Register s1,  Register s2 ) { andn( s1, s2, s2 ); }
   1.847 +  inline void bclr( int simm13a,  Register s )  { andn( s,  simm13a, s ); }
   1.848 +
   1.849 +  inline void btog( Register s1,  Register s2 ) { xor3( s1, s2, s2 ); }
   1.850 +  inline void btog( int simm13a,  Register s )  { xor3( s,  simm13a, s ); }
   1.851 +
   1.852 +  inline void clr( Register d ) { or3( G0, G0, d ); }
   1.853 +
   1.854 +  inline void clrb( Register s1, Register s2);
   1.855 +  inline void clrh( Register s1, Register s2);
   1.856 +  inline void clr(  Register s1, Register s2);
   1.857 +  inline void clrx( Register s1, Register s2);
   1.858 +
   1.859 +  inline void clrb( Register s1, int simm13a);
   1.860 +  inline void clrh( Register s1, int simm13a);
   1.861 +  inline void clr(  Register s1, int simm13a);
   1.862 +  inline void clrx( Register s1, int simm13a);
   1.863 +
   1.864 +  // copy & clear upper word
   1.865 +  inline void clruw( Register s, Register d ) { srl( s, G0, d); }
   1.866 +  // clear upper word
   1.867 +  inline void clruwu( Register d ) { srl( d, G0, d); }
   1.868 +
   1.869 +  using Assembler::ldsb;
   1.870 +  using Assembler::ldsh;
   1.871 +  using Assembler::ldsw;
   1.872 +  using Assembler::ldub;
   1.873 +  using Assembler::lduh;
   1.874 +  using Assembler::lduw;
   1.875 +  using Assembler::ldx;
   1.876 +  using Assembler::ldd;
   1.877 +
   1.878 +#ifdef ASSERT
   1.879 +  // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
   1.880 +  inline void ld(Register s1, ByteSize simm13a, Register d);
   1.881 +#endif
   1.882 +
   1.883 +  inline void ld(Register s1, Register s2, Register d);
   1.884 +  inline void ld(Register s1, int simm13a, Register d);
   1.885 +
   1.886 +  inline void ldsb(const Address& a, Register d, int offset = 0);
   1.887 +  inline void ldsh(const Address& a, Register d, int offset = 0);
   1.888 +  inline void ldsw(const Address& a, Register d, int offset = 0);
   1.889 +  inline void ldub(const Address& a, Register d, int offset = 0);
   1.890 +  inline void lduh(const Address& a, Register d, int offset = 0);
   1.891 +  inline void lduw(const Address& a, Register d, int offset = 0);
   1.892 +  inline void ldx( const Address& a, Register d, int offset = 0);
   1.893 +  inline void ld(  const Address& a, Register d, int offset = 0);
   1.894 +  inline void ldd( const Address& a, Register d, int offset = 0);
   1.895 +
   1.896 +  inline void ldub(Register s1, RegisterOrConstant s2, Register d );
   1.897 +  inline void ldsb(Register s1, RegisterOrConstant s2, Register d );
   1.898 +  inline void lduh(Register s1, RegisterOrConstant s2, Register d );
   1.899 +  inline void ldsh(Register s1, RegisterOrConstant s2, Register d );
   1.900 +  inline void lduw(Register s1, RegisterOrConstant s2, Register d );
   1.901 +  inline void ldsw(Register s1, RegisterOrConstant s2, Register d );
   1.902 +  inline void ldx( Register s1, RegisterOrConstant s2, Register d );
   1.903 +  inline void ld(  Register s1, RegisterOrConstant s2, Register d );
   1.904 +  inline void ldd( Register s1, RegisterOrConstant s2, Register d );
   1.905 +
   1.906 +  using Assembler::ldf;
   1.907 +  inline void ldf(FloatRegisterImpl::Width w, Register s1, RegisterOrConstant s2, FloatRegister d);
   1.908 +  inline void ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset = 0);
   1.909 +
   1.910 +  // membar psuedo instruction.  takes into account target memory model.
   1.911 +  inline void membar( Assembler::Membar_mask_bits const7a );
   1.912 +
   1.913 +  // returns if membar generates anything.
   1.914 +  inline bool membar_has_effect( Assembler::Membar_mask_bits const7a );
   1.915 +
   1.916 +  // mov pseudo instructions
   1.917 +  inline void mov( Register s,  Register d) {
   1.918 +    if ( s != d )    or3( G0, s, d);
   1.919 +    else             assert_not_delayed();  // Put something useful in the delay slot!
   1.920 +  }
   1.921 +
   1.922 +  inline void mov_or_nop( Register s,  Register d) {
   1.923 +    if ( s != d )    or3( G0, s, d);
   1.924 +    else             nop();
   1.925 +  }
   1.926 +
   1.927 +  inline void mov( int simm13a, Register d) { or3( G0, simm13a, d); }
   1.928 +
   1.929 +  using Assembler::prefetch;
   1.930 +  inline void prefetch(const Address& a, PrefetchFcn F, int offset = 0);
   1.931 +
   1.932 +  using Assembler::stb;
   1.933 +  using Assembler::sth;
   1.934 +  using Assembler::stw;
   1.935 +  using Assembler::stx;
   1.936 +  using Assembler::std;
   1.937 +
   1.938 +#ifdef ASSERT
   1.939 +  // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
   1.940 +  inline void st(Register d, Register s1, ByteSize simm13a);
   1.941 +#endif
   1.942 +
   1.943 +  inline void st(Register d, Register s1, Register s2);
   1.944 +  inline void st(Register d, Register s1, int simm13a);
   1.945 +
   1.946 +  inline void stb(Register d, const Address& a, int offset = 0 );
   1.947 +  inline void sth(Register d, const Address& a, int offset = 0 );
   1.948 +  inline void stw(Register d, const Address& a, int offset = 0 );
   1.949 +  inline void stx(Register d, const Address& a, int offset = 0 );
   1.950 +  inline void st( Register d, const Address& a, int offset = 0 );
   1.951 +  inline void std(Register d, const Address& a, int offset = 0 );
   1.952 +
   1.953 +  inline void stb(Register d, Register s1, RegisterOrConstant s2 );
   1.954 +  inline void sth(Register d, Register s1, RegisterOrConstant s2 );
   1.955 +  inline void stw(Register d, Register s1, RegisterOrConstant s2 );
   1.956 +  inline void stx(Register d, Register s1, RegisterOrConstant s2 );
   1.957 +  inline void std(Register d, Register s1, RegisterOrConstant s2 );
   1.958 +  inline void st( Register d, Register s1, RegisterOrConstant s2 );
   1.959 +
   1.960 +  using Assembler::stf;
   1.961 +  inline void stf(FloatRegisterImpl::Width w, FloatRegister d, Register s1, RegisterOrConstant s2);
   1.962 +  inline void stf(FloatRegisterImpl::Width w, FloatRegister d, const Address& a, int offset = 0);
   1.963 +
   1.964 +  // Note: offset is added to s2.
   1.965 +  using Assembler::sub;
   1.966 +  inline void sub(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
   1.967 +
   1.968 +  using Assembler::swap;
   1.969 +  inline void swap(const Address& a, Register d, int offset = 0);
   1.970 +
   1.971 +  // address pseudos: make these names unlike instruction names to avoid confusion
   1.972 +  inline intptr_t load_pc_address( Register reg, int bytes_to_skip );
   1.973 +  inline void load_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
   1.974 +  inline void load_bool_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
   1.975 +  inline void load_ptr_contents(const AddressLiteral& addrlit, Register d, int offset = 0);
   1.976 +  inline void store_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
   1.977 +  inline void store_ptr_contents(Register s, const AddressLiteral& addrlit, Register temp, int offset = 0);
   1.978 +  inline void jumpl_to(const AddressLiteral& addrlit, Register temp, Register d, int offset = 0);
   1.979 +  inline void jump_to(const AddressLiteral& addrlit, Register temp, int offset = 0);
   1.980 +  inline void jump_indirect_to(Address& a, Register temp, int ld_offset = 0, int jmp_offset = 0);
   1.981 +
   1.982 +  // ring buffer traceable jumps
   1.983 +
   1.984 +  void jmp2( Register r1, Register r2, const char* file, int line );
   1.985 +  void jmp ( Register r1, int offset,  const char* file, int line );
   1.986 +
   1.987 +  void jumpl(const AddressLiteral& addrlit, Register temp, Register d, int offset, const char* file, int line);
   1.988 +  void jump (const AddressLiteral& addrlit, Register temp,             int offset, const char* file, int line);
   1.989 +
   1.990 +
   1.991 +  // argument pseudos:
   1.992 +
   1.993 +  inline void load_argument( Argument& a, Register  d );
   1.994 +  inline void store_argument( Register s, Argument& a );
   1.995 +  inline void store_ptr_argument( Register s, Argument& a );
   1.996 +  inline void store_float_argument( FloatRegister s, Argument& a );
   1.997 +  inline void store_double_argument( FloatRegister s, Argument& a );
   1.998 +  inline void store_long_argument( Register s, Argument& a );
   1.999 +
  1.1000 +  // handy macros:
  1.1001 +
  1.1002 +  inline void round_to( Register r, int modulus ) {
  1.1003 +    assert_not_delayed();
  1.1004 +    inc( r, modulus - 1 );
  1.1005 +    and3( r, -modulus, r );
  1.1006 +  }
  1.1007 +
  1.1008 +  // --------------------------------------------------
  1.1009 +
  1.1010 +  // Functions for isolating 64 bit loads for LP64
  1.1011 +  // ld_ptr will perform ld for 32 bit VM's and ldx for 64 bit VM's
  1.1012 +  // st_ptr will perform st for 32 bit VM's and stx for 64 bit VM's
  1.1013 +  inline void ld_ptr(Register s1, Register s2, Register d);
  1.1014 +  inline void ld_ptr(Register s1, int simm13a, Register d);
  1.1015 +  inline void ld_ptr(Register s1, RegisterOrConstant s2, Register d);
  1.1016 +  inline void ld_ptr(const Address& a, Register d, int offset = 0);
  1.1017 +  inline void st_ptr(Register d, Register s1, Register s2);
  1.1018 +  inline void st_ptr(Register d, Register s1, int simm13a);
  1.1019 +  inline void st_ptr(Register d, Register s1, RegisterOrConstant s2);
  1.1020 +  inline void st_ptr(Register d, const Address& a, int offset = 0);
  1.1021 +
  1.1022 +#ifdef ASSERT
  1.1023 +  // ByteSize is only a class when ASSERT is defined, otherwise it's an int.
  1.1024 +  inline void ld_ptr(Register s1, ByteSize simm13a, Register d);
  1.1025 +  inline void st_ptr(Register d, Register s1, ByteSize simm13a);
  1.1026 +#endif
  1.1027 +
  1.1028 +  // ld_long will perform ldd for 32 bit VM's and ldx for 64 bit VM's
  1.1029 +  // st_long will perform std for 32 bit VM's and stx for 64 bit VM's
  1.1030 +  inline void ld_long(Register s1, Register s2, Register d);
  1.1031 +  inline void ld_long(Register s1, int simm13a, Register d);
  1.1032 +  inline void ld_long(Register s1, RegisterOrConstant s2, Register d);
  1.1033 +  inline void ld_long(const Address& a, Register d, int offset = 0);
  1.1034 +  inline void st_long(Register d, Register s1, Register s2);
  1.1035 +  inline void st_long(Register d, Register s1, int simm13a);
  1.1036 +  inline void st_long(Register d, Register s1, RegisterOrConstant s2);
  1.1037 +  inline void st_long(Register d, const Address& a, int offset = 0);
  1.1038 +
  1.1039 +  // Helpers for address formation.
  1.1040 +  // - They emit only a move if s2 is a constant zero.
  1.1041 +  // - If dest is a constant and either s1 or s2 is a register, the temp argument is required and becomes the result.
  1.1042 +  // - If dest is a register and either s1 or s2 is a non-simm13 constant, the temp argument is required and used to materialize the constant.
  1.1043 +  RegisterOrConstant regcon_andn_ptr(RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
  1.1044 +  RegisterOrConstant regcon_inc_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
  1.1045 +  RegisterOrConstant regcon_sll_ptr( RegisterOrConstant s1, RegisterOrConstant s2, RegisterOrConstant d, Register temp = noreg);
  1.1046 +
  1.1047 +  RegisterOrConstant ensure_simm13_or_reg(RegisterOrConstant src, Register temp) {
  1.1048 +    if (is_simm13(src.constant_or_zero()))
  1.1049 +      return src;               // register or short constant
  1.1050 +    guarantee(temp != noreg, "constant offset overflow");
  1.1051 +    set(src.as_constant(), temp);
  1.1052 +    return temp;
  1.1053 +  }
  1.1054 +
  1.1055 +  // --------------------------------------------------
  1.1056 +
  1.1057 + public:
  1.1058 +  // traps as per trap.h (SPARC ABI?)
  1.1059 +
  1.1060 +  void breakpoint_trap();
  1.1061 +  void breakpoint_trap(Condition c, CC cc);
  1.1062 +
  1.1063 +  // Support for serializing memory accesses between threads
  1.1064 +  void serialize_memory(Register thread, Register tmp1, Register tmp2);
  1.1065 +
  1.1066 +  // Stack frame creation/removal
  1.1067 +  void enter();
  1.1068 +  void leave();
  1.1069 +
  1.1070 +  // Manipulation of C++ bools
  1.1071 +  // These are idioms to flag the need for care with accessing bools but on
  1.1072 +  // this platform we assume byte size
  1.1073 +
  1.1074 +  inline void stbool(Register d, const Address& a) { stb(d, a); }
  1.1075 +  inline void ldbool(const Address& a, Register d) { ldub(a, d); }
  1.1076 +  inline void movbool( bool boolconst, Register d) { mov( (int) boolconst, d); }
  1.1077 +
  1.1078 +  // klass oop manipulations if compressed
  1.1079 +  void load_klass(Register src_oop, Register klass);
  1.1080 +  void store_klass(Register klass, Register dst_oop);
  1.1081 +  void store_klass_gap(Register s, Register dst_oop);
  1.1082 +
  1.1083 +   // oop manipulations
  1.1084 +  void load_heap_oop(const Address& s, Register d);
  1.1085 +  void load_heap_oop(Register s1, Register s2, Register d);
  1.1086 +  void load_heap_oop(Register s1, int simm13a, Register d);
  1.1087 +  void load_heap_oop(Register s1, RegisterOrConstant s2, Register d);
  1.1088 +  void store_heap_oop(Register d, Register s1, Register s2);
  1.1089 +  void store_heap_oop(Register d, Register s1, int simm13a);
  1.1090 +  void store_heap_oop(Register d, const Address& a, int offset = 0);
  1.1091 +
  1.1092 +  void encode_heap_oop(Register src, Register dst);
  1.1093 +  void encode_heap_oop(Register r) {
  1.1094 +    encode_heap_oop(r, r);
  1.1095 +  }
  1.1096 +  void decode_heap_oop(Register src, Register dst);
  1.1097 +  void decode_heap_oop(Register r) {
  1.1098 +    decode_heap_oop(r, r);
  1.1099 +  }
  1.1100 +  void encode_heap_oop_not_null(Register r);
  1.1101 +  void decode_heap_oop_not_null(Register r);
  1.1102 +  void encode_heap_oop_not_null(Register src, Register dst);
  1.1103 +  void decode_heap_oop_not_null(Register src, Register dst);
  1.1104 +
  1.1105 +  void encode_klass_not_null(Register r);
  1.1106 +  void decode_klass_not_null(Register r);
  1.1107 +  void encode_klass_not_null(Register src, Register dst);
  1.1108 +  void decode_klass_not_null(Register src, Register dst);
  1.1109 +
  1.1110 +  // Support for managing the JavaThread pointer (i.e.; the reference to
  1.1111 +  // thread-local information).
  1.1112 +  void get_thread();                                // load G2_thread
  1.1113 +  void verify_thread();                             // verify G2_thread contents
  1.1114 +  void save_thread   (const Register threache); // save to cache
  1.1115 +  void restore_thread(const Register thread_cache); // restore from cache
  1.1116 +
  1.1117 +  // Support for last Java frame (but use call_VM instead where possible)
  1.1118 +  void set_last_Java_frame(Register last_java_sp, Register last_Java_pc);
  1.1119 +  void reset_last_Java_frame(void);
  1.1120 +
  1.1121 +  // Call into the VM.
  1.1122 +  // Passes the thread pointer (in O0) as a prepended argument.
  1.1123 +  // Makes sure oop return values are visible to the GC.
  1.1124 +  void call_VM(Register oop_result, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
  1.1125 +  void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
  1.1126 +  void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
  1.1127 +  void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
  1.1128 +
  1.1129 +  // these overloadings are not presently used on SPARC:
  1.1130 +  void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
  1.1131 +  void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
  1.1132 +  void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
  1.1133 +  void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
  1.1134 +
  1.1135 +  void call_VM_leaf(Register thread_cache, address entry_point, int number_of_arguments = 0);
  1.1136 +  void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1);
  1.1137 +  void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2);
  1.1138 +  void call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2, Register arg_3);
  1.1139 +
  1.1140 +  void get_vm_result  (Register oop_result);
  1.1141 +  void get_vm_result_2(Register metadata_result);
  1.1142 +
  1.1143 +  // vm result is currently getting hijacked to for oop preservation
  1.1144 +  void set_vm_result(Register oop_result);
  1.1145 +
  1.1146 +  // Emit the CompiledIC call idiom
  1.1147 +  void ic_call(address entry, bool emit_delay = true);
  1.1148 +
  1.1149 +  // if call_VM_base was called with check_exceptions=false, then call
  1.1150 +  // check_and_forward_exception to handle exceptions when it is safe
  1.1151 +  void check_and_forward_exception(Register scratch_reg);
  1.1152 +
  1.1153 +  // Write to card table for - register is destroyed afterwards.
  1.1154 +  void card_table_write(jbyte* byte_map_base, Register tmp, Register obj);
  1.1155 +
  1.1156 +  void card_write_barrier_post(Register store_addr, Register new_val, Register tmp);
  1.1157 +
  1.1158 +#if INCLUDE_ALL_GCS
  1.1159 +  // General G1 pre-barrier generator.
  1.1160 +  void g1_write_barrier_pre(Register obj, Register index, int offset, Register pre_val, Register tmp, bool preserve_o_regs);
  1.1161 +
  1.1162 +  // General G1 post-barrier generator
  1.1163 +  void g1_write_barrier_post(Register store_addr, Register new_val, Register tmp);
  1.1164 +#endif // INCLUDE_ALL_GCS
  1.1165 +
  1.1166 +  // pushes double TOS element of FPU stack on CPU stack; pops from FPU stack
  1.1167 +  void push_fTOS();
  1.1168 +
  1.1169 +  // pops double TOS element from CPU stack and pushes on FPU stack
  1.1170 +  void pop_fTOS();
  1.1171 +
  1.1172 +  void empty_FPU_stack();
  1.1173 +
  1.1174 +  void push_IU_state();
  1.1175 +  void pop_IU_state();
  1.1176 +
  1.1177 +  void push_FPU_state();
  1.1178 +  void pop_FPU_state();
  1.1179 +
  1.1180 +  void push_CPU_state();
  1.1181 +  void pop_CPU_state();
  1.1182 +
  1.1183 +  // Returns the byte size of the instructions generated by decode_klass_not_null().
  1.1184 +  static int instr_size_for_decode_klass_not_null();
  1.1185 +
  1.1186 +  // if heap base register is used - reinit it with the correct value
  1.1187 +  void reinit_heapbase();
  1.1188 +
  1.1189 +  // Debugging
  1.1190 +  void _verify_oop(Register reg, const char * msg, const char * file, int line);
  1.1191 +  void _verify_oop_addr(Address addr, const char * msg, const char * file, int line);
  1.1192 +
  1.1193 +  // TODO: verify_method and klass metadata (compare against vptr?)
  1.1194 +  void _verify_method_ptr(Register reg, const char * msg, const char * file, int line) {}
  1.1195 +  void _verify_klass_ptr(Register reg, const char * msg, const char * file, int line){}
  1.1196 +
  1.1197 +#define verify_oop(reg) _verify_oop(reg, "broken oop " #reg, __FILE__, __LINE__)
  1.1198 +#define verify_oop_addr(addr) _verify_oop_addr(addr, "broken oop addr ", __FILE__, __LINE__)
  1.1199 +#define verify_method_ptr(reg) _verify_method_ptr(reg, "broken method " #reg, __FILE__, __LINE__)
  1.1200 +#define verify_klass_ptr(reg) _verify_klass_ptr(reg, "broken klass " #reg, __FILE__, __LINE__)
  1.1201 +
  1.1202 +        // only if +VerifyOops
  1.1203 +  void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
  1.1204 +        // only if +VerifyFPU
  1.1205 +  void stop(const char* msg);                          // prints msg, dumps registers and stops execution
  1.1206 +  void warn(const char* msg);                          // prints msg, but don't stop
  1.1207 +  void untested(const char* what = "");
  1.1208 +  void unimplemented(const char* what = "")      { char* b = new char[1024];  jio_snprintf(b, 1024, "unimplemented: %s", what);  stop(b); }
  1.1209 +  void should_not_reach_here()                   { stop("should not reach here"); }
  1.1210 +  void print_CPU_state();
  1.1211 +
  1.1212 +  // oops in code
  1.1213 +  AddressLiteral allocate_oop_address(jobject obj);                          // allocate_index
  1.1214 +  AddressLiteral constant_oop_address(jobject obj);                          // find_index
  1.1215 +  inline void    set_oop             (jobject obj, Register d);              // uses allocate_oop_address
  1.1216 +  inline void    set_oop_constant    (jobject obj, Register d);              // uses constant_oop_address
  1.1217 +  inline void    set_oop             (const AddressLiteral& obj_addr, Register d); // same as load_address
  1.1218 +
  1.1219 +  // metadata in code that we have to keep track of
  1.1220 +  AddressLiteral allocate_metadata_address(Metadata* obj); // allocate_index
  1.1221 +  AddressLiteral constant_metadata_address(Metadata* obj); // find_index
  1.1222 +  inline void    set_metadata             (Metadata* obj, Register d);              // uses allocate_metadata_address
  1.1223 +  inline void    set_metadata_constant    (Metadata* obj, Register d);              // uses constant_metadata_address
  1.1224 +  inline void    set_metadata             (const AddressLiteral& obj_addr, Register d); // same as load_address
  1.1225 +
  1.1226 +  void set_narrow_oop( jobject obj, Register d );
  1.1227 +  void set_narrow_klass( Klass* k, Register d );
  1.1228 +
  1.1229 +  // nop padding
  1.1230 +  void align(int modulus);
  1.1231 +
  1.1232 +  // declare a safepoint
  1.1233 +  void safepoint();
  1.1234 +
  1.1235 +  // factor out part of stop into subroutine to save space
  1.1236 +  void stop_subroutine();
  1.1237 +  // factor out part of verify_oop into subroutine to save space
  1.1238 +  void verify_oop_subroutine();
  1.1239 +
  1.1240 +  // side-door communication with signalHandler in os_solaris.cpp
  1.1241 +  static address _verify_oop_implicit_branch[3];
  1.1242 +
  1.1243 +  int total_frame_size_in_bytes(int extraWords);
  1.1244 +
  1.1245 +  // used when extraWords known statically
  1.1246 +  void save_frame(int extraWords = 0);
  1.1247 +  void save_frame_c1(int size_in_bytes);
  1.1248 +  // make a frame, and simultaneously pass up one or two register value
  1.1249 +  // into the new register window
  1.1250 +  void save_frame_and_mov(int extraWords, Register s1, Register d1, Register s2 = Register(), Register d2 = Register());
  1.1251 +
  1.1252 +  // give no. (outgoing) params, calc # of words will need on frame
  1.1253 +  void calc_mem_param_words(Register Rparam_words, Register Rresult);
  1.1254 +
  1.1255 +  // used to calculate frame size dynamically
  1.1256 +  // result is in bytes and must be negated for save inst
  1.1257 +  void calc_frame_size(Register extraWords, Register resultReg);
  1.1258 +
  1.1259 +  // calc and also save
  1.1260 +  void calc_frame_size_and_save(Register extraWords, Register resultReg);
  1.1261 +
  1.1262 +  static void debug(char* msg, RegistersForDebugging* outWindow);
  1.1263 +
  1.1264 +  // implementations of bytecodes used by both interpreter and compiler
  1.1265 +
  1.1266 +  void lcmp( Register Ra_hi, Register Ra_low,
  1.1267 +             Register Rb_hi, Register Rb_low,
  1.1268 +             Register Rresult);
  1.1269 +
  1.1270 +  void lneg( Register Rhi, Register Rlow );
  1.1271 +
  1.1272 +  void lshl(  Register Rin_high,  Register Rin_low,  Register Rcount,
  1.1273 +              Register Rout_high, Register Rout_low, Register Rtemp );
  1.1274 +
  1.1275 +  void lshr(  Register Rin_high,  Register Rin_low,  Register Rcount,
  1.1276 +              Register Rout_high, Register Rout_low, Register Rtemp );
  1.1277 +
  1.1278 +  void lushr( Register Rin_high,  Register Rin_low,  Register Rcount,
  1.1279 +              Register Rout_high, Register Rout_low, Register Rtemp );
  1.1280 +
  1.1281 +#ifdef _LP64
  1.1282 +  void lcmp( Register Ra, Register Rb, Register Rresult);
  1.1283 +#endif
  1.1284 +
  1.1285 +  // Load and store values by size and signed-ness
  1.1286 +  void load_sized_value( Address src, Register dst, size_t size_in_bytes, bool is_signed);
  1.1287 +  void store_sized_value(Register src, Address dst, size_t size_in_bytes);
  1.1288 +
  1.1289 +  void float_cmp( bool is_float, int unordered_result,
  1.1290 +                  FloatRegister Fa, FloatRegister Fb,
  1.1291 +                  Register Rresult);
  1.1292 +
  1.1293 +  void save_all_globals_into_locals();
  1.1294 +  void restore_globals_from_locals();
  1.1295 +
  1.1296 +  // These set the icc condition code to equal if the lock succeeded
  1.1297 +  // and notEqual if it failed and requires a slow case
  1.1298 +  void compiler_lock_object(Register Roop, Register Rmark, Register Rbox,
  1.1299 +                            Register Rscratch,
  1.1300 +                            BiasedLockingCounters* counters = NULL,
  1.1301 +                            bool try_bias = UseBiasedLocking);
  1.1302 +  void compiler_unlock_object(Register Roop, Register Rmark, Register Rbox,
  1.1303 +                              Register Rscratch,
  1.1304 +                              bool try_bias = UseBiasedLocking);
  1.1305 +
  1.1306 +  // Biased locking support
  1.1307 +  // Upon entry, lock_reg must point to the lock record on the stack,
  1.1308 +  // obj_reg must contain the target object, and mark_reg must contain
  1.1309 +  // the target object's header.
  1.1310 +  // Destroys mark_reg if an attempt is made to bias an anonymously
  1.1311 +  // biased lock. In this case a failure will go either to the slow
  1.1312 +  // case or fall through with the notEqual condition code set with
  1.1313 +  // the expectation that the slow case in the runtime will be called.
  1.1314 +  // In the fall-through case where the CAS-based lock is done,
  1.1315 +  // mark_reg is not destroyed.
  1.1316 +  void biased_locking_enter(Register obj_reg, Register mark_reg, Register temp_reg,
  1.1317 +                            Label& done, Label* slow_case = NULL,
  1.1318 +                            BiasedLockingCounters* counters = NULL);
  1.1319 +  // Upon entry, the base register of mark_addr must contain the oop.
  1.1320 +  // Destroys temp_reg.
  1.1321 +
  1.1322 +  // If allow_delay_slot_filling is set to true, the next instruction
  1.1323 +  // emitted after this one will go in an annulled delay slot if the
  1.1324 +  // biased locking exit case failed.
  1.1325 +  void biased_locking_exit(Address mark_addr, Register temp_reg, Label& done, bool allow_delay_slot_filling = false);
  1.1326 +
  1.1327 +  // allocation
  1.1328 +  void eden_allocate(
  1.1329 +    Register obj,                      // result: pointer to object after successful allocation
  1.1330 +    Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
  1.1331 +    int      con_size_in_bytes,        // object size in bytes if   known at compile time
  1.1332 +    Register t1,                       // temp register
  1.1333 +    Register t2,                       // temp register
  1.1334 +    Label&   slow_case                 // continuation point if fast allocation fails
  1.1335 +  );
  1.1336 +  void tlab_allocate(
  1.1337 +    Register obj,                      // result: pointer to object after successful allocation
  1.1338 +    Register var_size_in_bytes,        // object size in bytes if unknown at compile time; invalid otherwise
  1.1339 +    int      con_size_in_bytes,        // object size in bytes if   known at compile time
  1.1340 +    Register t1,                       // temp register
  1.1341 +    Label&   slow_case                 // continuation point if fast allocation fails
  1.1342 +  );
  1.1343 +  void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
  1.1344 +  void incr_allocated_bytes(RegisterOrConstant size_in_bytes,
  1.1345 +                            Register t1, Register t2);
  1.1346 +
  1.1347 +  // interface method calling
  1.1348 +  void lookup_interface_method(Register recv_klass,
  1.1349 +                               Register intf_klass,
  1.1350 +                               RegisterOrConstant itable_index,
  1.1351 +                               Register method_result,
  1.1352 +                               Register temp_reg, Register temp2_reg,
  1.1353 +                               Label& no_such_interface);
  1.1354 +
  1.1355 +  // virtual method calling
  1.1356 +  void lookup_virtual_method(Register recv_klass,
  1.1357 +                             RegisterOrConstant vtable_index,
  1.1358 +                             Register method_result);
  1.1359 +
  1.1360 +  // Test sub_klass against super_klass, with fast and slow paths.
  1.1361 +
  1.1362 +  // The fast path produces a tri-state answer: yes / no / maybe-slow.
  1.1363 +  // One of the three labels can be NULL, meaning take the fall-through.
  1.1364 +  // If super_check_offset is -1, the value is loaded up from super_klass.
  1.1365 +  // No registers are killed, except temp_reg and temp2_reg.
  1.1366 +  // If super_check_offset is not -1, temp2_reg is not used and can be noreg.
  1.1367 +  void check_klass_subtype_fast_path(Register sub_klass,
  1.1368 +                                     Register super_klass,
  1.1369 +                                     Register temp_reg,
  1.1370 +                                     Register temp2_reg,
  1.1371 +                                     Label* L_success,
  1.1372 +                                     Label* L_failure,
  1.1373 +                                     Label* L_slow_path,
  1.1374 +                RegisterOrConstant super_check_offset = RegisterOrConstant(-1));
  1.1375 +
  1.1376 +  // The rest of the type check; must be wired to a corresponding fast path.
  1.1377 +  // It does not repeat the fast path logic, so don't use it standalone.
  1.1378 +  // The temp_reg can be noreg, if no temps are available.
  1.1379 +  // It can also be sub_klass or super_klass, meaning it's OK to kill that one.
  1.1380 +  // Updates the sub's secondary super cache as necessary.
  1.1381 +  void check_klass_subtype_slow_path(Register sub_klass,
  1.1382 +                                     Register super_klass,
  1.1383 +                                     Register temp_reg,
  1.1384 +                                     Register temp2_reg,
  1.1385 +                                     Register temp3_reg,
  1.1386 +                                     Register temp4_reg,
  1.1387 +                                     Label* L_success,
  1.1388 +                                     Label* L_failure);
  1.1389 +
  1.1390 +  // Simplified, combined version, good for typical uses.
  1.1391 +  // Falls through on failure.
  1.1392 +  void check_klass_subtype(Register sub_klass,
  1.1393 +                           Register super_klass,
  1.1394 +                           Register temp_reg,
  1.1395 +                           Register temp2_reg,
  1.1396 +                           Label& L_success);
  1.1397 +
  1.1398 +  // method handles (JSR 292)
  1.1399 +  // offset relative to Gargs of argument at tos[arg_slot].
  1.1400 +  // (arg_slot == 0 means the last argument, not the first).
  1.1401 +  RegisterOrConstant argument_offset(RegisterOrConstant arg_slot,
  1.1402 +                                     Register temp_reg,
  1.1403 +                                     int extra_slot_offset = 0);
  1.1404 +  // Address of Gargs and argument_offset.
  1.1405 +  Address            argument_address(RegisterOrConstant arg_slot,
  1.1406 +                                      Register temp_reg = noreg,
  1.1407 +                                      int extra_slot_offset = 0);
  1.1408 +
  1.1409 +  // Stack overflow checking
  1.1410 +
  1.1411 +  // Note: this clobbers G3_scratch
  1.1412 +  void bang_stack_with_offset(int offset) {
  1.1413 +    // stack grows down, caller passes positive offset
  1.1414 +    assert(offset > 0, "must bang with negative offset");
  1.1415 +    set((-offset)+STACK_BIAS, G3_scratch);
  1.1416 +    st(G0, SP, G3_scratch);
  1.1417 +  }
  1.1418 +
  1.1419 +  // Writes to stack successive pages until offset reached to check for
  1.1420 +  // stack overflow + shadow pages.  Clobbers tsp and scratch registers.
  1.1421 +  void bang_stack_size(Register Rsize, Register Rtsp, Register Rscratch);
  1.1422 +
  1.1423 +  virtual RegisterOrConstant delayed_value_impl(intptr_t* delayed_value_addr, Register tmp, int offset);
  1.1424 +
  1.1425 +  void verify_tlab();
  1.1426 +
  1.1427 +  Condition negate_condition(Condition cond);
  1.1428 +
  1.1429 +  // Helper functions for statistics gathering.
  1.1430 +  // Conditionally (non-atomically) increments passed counter address, preserving condition codes.
  1.1431 +  void cond_inc(Condition cond, address counter_addr, Register Rtemp1, Register Rtemp2);
  1.1432 +  // Unconditional increment.
  1.1433 +  void inc_counter(address counter_addr, Register Rtmp1, Register Rtmp2);
  1.1434 +  void inc_counter(int*    counter_addr, Register Rtmp1, Register Rtmp2);
  1.1435 +
  1.1436 +  // Compare char[] arrays aligned to 4 bytes.
  1.1437 +  void char_arrays_equals(Register ary1, Register ary2,
  1.1438 +                          Register limit, Register result,
  1.1439 +                          Register chr1, Register chr2, Label& Ldone);
  1.1440 +  // Use BIS for zeroing
  1.1441 +  void bis_zeroing(Register to, Register count, Register temp, Label& Ldone);
  1.1442 +
  1.1443 +#undef VIRTUAL
  1.1444 +};
  1.1445 +
  1.1446 +/**
  1.1447 + * class SkipIfEqual:
  1.1448 + *
  1.1449 + * Instantiating this class will result in assembly code being output that will
  1.1450 + * jump around any code emitted between the creation of the instance and it's
  1.1451 + * automatic destruction at the end of a scope block, depending on the value of
  1.1452 + * the flag passed to the constructor, which will be checked at run-time.
  1.1453 + */
  1.1454 +class SkipIfEqual : public StackObj {
  1.1455 + private:
  1.1456 +  MacroAssembler* _masm;
  1.1457 +  Label _label;
  1.1458 +
  1.1459 + public:
  1.1460 +   // 'temp' is a temp register that this object can use (and trash)
  1.1461 +   SkipIfEqual(MacroAssembler*, Register temp,
  1.1462 +               const bool* flag_addr, Assembler::Condition condition);
  1.1463 +   ~SkipIfEqual();
  1.1464 +};
  1.1465 +
  1.1466 +#endif // CPU_SPARC_VM_MACROASSEMBLER_SPARC_HPP

mercurial