src/share/vm/opto/graphKit.hpp

Tue, 10 Sep 2013 14:51:48 -0700

author
vlivanov
date
Tue, 10 Sep 2013 14:51:48 -0700
changeset 5658
edb5ab0f3fe5
parent 5637
29aa8936f03c
child 5908
d9043b88eeb3
permissions
-rw-r--r--

8001107: @Stable annotation for constant folding of lazily evaluated variables
Reviewed-by: rbackman, twisti, kvn
Contributed-by: john.r.rose@oracle.com, vladimir.x.ivanov@oracle.com

duke@435 1 /*
kvn@3760 2 * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
duke@435 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
duke@435 4 *
duke@435 5 * This code is free software; you can redistribute it and/or modify it
duke@435 6 * under the terms of the GNU General Public License version 2 only, as
duke@435 7 * published by the Free Software Foundation.
duke@435 8 *
duke@435 9 * This code is distributed in the hope that it will be useful, but WITHOUT
duke@435 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
duke@435 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
duke@435 12 * version 2 for more details (a copy is included in the LICENSE file that
duke@435 13 * accompanied this code).
duke@435 14 *
duke@435 15 * You should have received a copy of the GNU General Public License version
duke@435 16 * 2 along with this work; if not, write to the Free Software Foundation,
duke@435 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
duke@435 18 *
trims@1907 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
trims@1907 20 * or visit www.oracle.com if you need additional information or have any
trims@1907 21 * questions.
duke@435 22 *
duke@435 23 */
duke@435 24
stefank@2314 25 #ifndef SHARE_VM_OPTO_GRAPHKIT_HPP
stefank@2314 26 #define SHARE_VM_OPTO_GRAPHKIT_HPP
stefank@2314 27
stefank@2314 28 #include "ci/ciEnv.hpp"
stefank@2314 29 #include "ci/ciMethodData.hpp"
stefank@2314 30 #include "opto/addnode.hpp"
stefank@2314 31 #include "opto/callnode.hpp"
stefank@2314 32 #include "opto/cfgnode.hpp"
stefank@2314 33 #include "opto/compile.hpp"
stefank@2314 34 #include "opto/divnode.hpp"
stefank@2314 35 #include "opto/mulnode.hpp"
stefank@2314 36 #include "opto/phaseX.hpp"
stefank@2314 37 #include "opto/subnode.hpp"
stefank@2314 38 #include "opto/type.hpp"
stefank@2314 39 #include "runtime/deoptimization.hpp"
stefank@2314 40
duke@435 41 class FastLockNode;
duke@435 42 class FastUnlockNode;
ysr@777 43 class IdealKit;
twisti@4313 44 class LibraryCallKit;
duke@435 45 class Parse;
duke@435 46 class RootNode;
duke@435 47
duke@435 48 //-----------------------------------------------------------------------------
duke@435 49 //----------------------------GraphKit-----------------------------------------
duke@435 50 // Toolkit for building the common sorts of subgraphs.
duke@435 51 // Does not know about bytecode parsing or type-flow results.
duke@435 52 // It is able to create graphs implementing the semantics of most
duke@435 53 // or all bytecodes, so that it can expand intrinsics and calls.
duke@435 54 // It may depend on JVMState structure, but it must not depend
duke@435 55 // on specific bytecode streams.
duke@435 56 class GraphKit : public Phase {
duke@435 57 friend class PreserveJVMState;
duke@435 58
duke@435 59 protected:
duke@435 60 ciEnv* _env; // Compilation environment
duke@435 61 PhaseGVN &_gvn; // Some optimizations while parsing
duke@435 62 SafePointNode* _map; // Parser map from JVM to Nodes
duke@435 63 SafePointNode* _exceptions;// Parser map(s) for exception state(s)
duke@435 64 int _bci; // JVM Bytecode Pointer
duke@435 65 ciMethod* _method; // JVM Current Method
duke@435 66
duke@435 67 private:
twisti@4313 68 int _sp; // JVM Expression Stack Pointer; don't modify directly!
twisti@4313 69
twisti@4313 70 private:
duke@435 71 SafePointNode* map_not_null() const {
duke@435 72 assert(_map != NULL, "must call stopped() to test for reset compiler map");
duke@435 73 return _map;
duke@435 74 }
duke@435 75
duke@435 76 public:
duke@435 77 GraphKit(); // empty constructor
duke@435 78 GraphKit(JVMState* jvms); // the JVM state on which to operate
duke@435 79
duke@435 80 #ifdef ASSERT
duke@435 81 ~GraphKit() {
duke@435 82 assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms");
duke@435 83 }
duke@435 84 #endif
duke@435 85
twisti@4313 86 virtual Parse* is_Parse() const { return NULL; }
twisti@4313 87 virtual LibraryCallKit* is_LibraryCallKit() const { return NULL; }
duke@435 88
duke@435 89 ciEnv* env() const { return _env; }
duke@435 90 PhaseGVN& gvn() const { return _gvn; }
duke@435 91
duke@435 92 void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile
duke@435 93
duke@435 94 // Handy well-known nodes:
duke@435 95 Node* null() const { return zerocon(T_OBJECT); }
duke@435 96 Node* top() const { return C->top(); }
duke@435 97 RootNode* root() const { return C->root(); }
duke@435 98
duke@435 99 // Create or find a constant node
duke@435 100 Node* intcon(jint con) const { return _gvn.intcon(con); }
duke@435 101 Node* longcon(jlong con) const { return _gvn.longcon(con); }
duke@435 102 Node* makecon(const Type *t) const { return _gvn.makecon(t); }
duke@435 103 Node* zerocon(BasicType bt) const { return _gvn.zerocon(bt); }
duke@435 104 // (See also macro MakeConX in type.hpp, which uses intcon or longcon.)
duke@435 105
never@998 106 // Helper for byte_map_base
never@998 107 Node* byte_map_base_node() {
never@998 108 // Get base of card map
never@998 109 CardTableModRefBS* ct = (CardTableModRefBS*)(Universe::heap()->barrier_set());
never@998 110 assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust users of this code");
never@998 111 if (ct->byte_map_base != NULL) {
never@998 112 return makecon(TypeRawPtr::make((address)ct->byte_map_base));
never@998 113 } else {
never@998 114 return null();
never@998 115 }
never@998 116 }
never@998 117
duke@435 118 jint find_int_con(Node* n, jint value_if_unknown) {
duke@435 119 return _gvn.find_int_con(n, value_if_unknown);
duke@435 120 }
duke@435 121 jlong find_long_con(Node* n, jlong value_if_unknown) {
duke@435 122 return _gvn.find_long_con(n, value_if_unknown);
duke@435 123 }
duke@435 124 // (See also macro find_intptr_t_con in type.hpp, which uses one of these.)
duke@435 125
duke@435 126 // JVM State accessors:
duke@435 127 // Parser mapping from JVM indices into Nodes.
duke@435 128 // Low slots are accessed by the StartNode::enum.
duke@435 129 // Then come the locals at StartNode::Parms to StartNode::Parms+max_locals();
duke@435 130 // Then come JVM stack slots.
duke@435 131 // Finally come the monitors, if any.
duke@435 132 // See layout accessors in class JVMState.
duke@435 133
duke@435 134 SafePointNode* map() const { return _map; }
duke@435 135 bool has_exceptions() const { return _exceptions != NULL; }
duke@435 136 JVMState* jvms() const { return map_not_null()->_jvms; }
duke@435 137 int sp() const { return _sp; }
duke@435 138 int bci() const { return _bci; }
duke@435 139 Bytecodes::Code java_bc() const;
duke@435 140 ciMethod* method() const { return _method; }
duke@435 141
duke@435 142 void set_jvms(JVMState* jvms) { set_map(jvms->map());
duke@435 143 assert(jvms == this->jvms(), "sanity");
duke@435 144 _sp = jvms->sp();
duke@435 145 _bci = jvms->bci();
duke@435 146 _method = jvms->has_method() ? jvms->method() : NULL; }
duke@435 147 void set_map(SafePointNode* m) { _map = m; debug_only(verify_map()); }
twisti@4313 148 void set_sp(int sp) { assert(sp >= 0, err_msg_res("sp must be non-negative: %d", sp)); _sp = sp; }
duke@435 149 void clean_stack(int from_sp); // clear garbage beyond from_sp to top
duke@435 150
duke@435 151 void inc_sp(int i) { set_sp(sp() + i); }
twisti@3969 152 void dec_sp(int i) { set_sp(sp() - i); }
duke@435 153 void set_bci(int bci) { _bci = bci; }
duke@435 154
duke@435 155 // Make sure jvms has current bci & sp.
twisti@4313 156 JVMState* sync_jvms() const;
twisti@4313 157 JVMState* sync_jvms_for_reexecute();
twisti@4313 158
duke@435 159 #ifdef ASSERT
duke@435 160 // Make sure JVMS has an updated copy of bci and sp.
duke@435 161 // Also sanity-check method, depth, and monitor depth.
duke@435 162 bool jvms_in_sync() const;
duke@435 163
duke@435 164 // Make sure the map looks OK.
duke@435 165 void verify_map() const;
duke@435 166
duke@435 167 // Make sure a proposed exception state looks OK.
duke@435 168 static void verify_exception_state(SafePointNode* ex_map);
duke@435 169 #endif
duke@435 170
duke@435 171 // Clone the existing map state. (Implements PreserveJVMState.)
duke@435 172 SafePointNode* clone_map();
duke@435 173
duke@435 174 // Set the map to a clone of the given one.
duke@435 175 void set_map_clone(SafePointNode* m);
duke@435 176
duke@435 177 // Tell if the compilation is failing.
duke@435 178 bool failing() const { return C->failing(); }
duke@435 179
duke@435 180 // Set _map to NULL, signalling a stop to further bytecode execution.
duke@435 181 // Preserve the map intact for future use, and return it back to the caller.
duke@435 182 SafePointNode* stop() { SafePointNode* m = map(); set_map(NULL); return m; }
duke@435 183
duke@435 184 // Stop, but first smash the map's inputs to NULL, to mark it dead.
duke@435 185 void stop_and_kill_map();
duke@435 186
duke@435 187 // Tell if _map is NULL, or control is top.
duke@435 188 bool stopped();
duke@435 189
duke@435 190 // Tell if this method or any caller method has exception handlers.
duke@435 191 bool has_ex_handler();
duke@435 192
duke@435 193 // Save an exception without blowing stack contents or other JVM state.
duke@435 194 // (The extra pointer is stuck with add_req on the map, beyond the JVMS.)
duke@435 195 static void set_saved_ex_oop(SafePointNode* ex_map, Node* ex_oop);
duke@435 196
duke@435 197 // Recover a saved exception from its map.
duke@435 198 static Node* saved_ex_oop(SafePointNode* ex_map);
duke@435 199
duke@435 200 // Recover a saved exception from its map, and remove it from the map.
duke@435 201 static Node* clear_saved_ex_oop(SafePointNode* ex_map);
duke@435 202
duke@435 203 #ifdef ASSERT
duke@435 204 // Recover a saved exception from its map, and remove it from the map.
duke@435 205 static bool has_saved_ex_oop(SafePointNode* ex_map);
duke@435 206 #endif
duke@435 207
duke@435 208 // Push an exception in the canonical position for handlers (stack(0)).
duke@435 209 void push_ex_oop(Node* ex_oop) {
duke@435 210 ensure_stack(1); // ensure room to push the exception
duke@435 211 set_stack(0, ex_oop);
duke@435 212 set_sp(1);
duke@435 213 clean_stack(1);
duke@435 214 }
duke@435 215
duke@435 216 // Detach and return an exception state.
duke@435 217 SafePointNode* pop_exception_state() {
duke@435 218 SafePointNode* ex_map = _exceptions;
duke@435 219 if (ex_map != NULL) {
duke@435 220 _exceptions = ex_map->next_exception();
duke@435 221 ex_map->set_next_exception(NULL);
duke@435 222 debug_only(verify_exception_state(ex_map));
duke@435 223 }
duke@435 224 return ex_map;
duke@435 225 }
duke@435 226
duke@435 227 // Add an exception, using the given JVM state, without commoning.
duke@435 228 void push_exception_state(SafePointNode* ex_map) {
duke@435 229 debug_only(verify_exception_state(ex_map));
duke@435 230 ex_map->set_next_exception(_exceptions);
duke@435 231 _exceptions = ex_map;
duke@435 232 }
duke@435 233
duke@435 234 // Turn the current JVM state into an exception state, appending the ex_oop.
duke@435 235 SafePointNode* make_exception_state(Node* ex_oop);
duke@435 236
duke@435 237 // Add an exception, using the given JVM state.
duke@435 238 // Combine all exceptions with a common exception type into a single state.
duke@435 239 // (This is done via combine_exception_states.)
duke@435 240 void add_exception_state(SafePointNode* ex_map);
duke@435 241
duke@435 242 // Combine all exceptions of any sort whatever into a single master state.
duke@435 243 SafePointNode* combine_and_pop_all_exception_states() {
duke@435 244 if (_exceptions == NULL) return NULL;
duke@435 245 SafePointNode* phi_map = pop_exception_state();
duke@435 246 SafePointNode* ex_map;
duke@435 247 while ((ex_map = pop_exception_state()) != NULL) {
duke@435 248 combine_exception_states(ex_map, phi_map);
duke@435 249 }
duke@435 250 return phi_map;
duke@435 251 }
duke@435 252
duke@435 253 // Combine the two exception states, building phis as necessary.
duke@435 254 // The second argument is updated to include contributions from the first.
duke@435 255 void combine_exception_states(SafePointNode* ex_map, SafePointNode* phi_map);
duke@435 256
duke@435 257 // Reset the map to the given state. If there are any half-finished phis
duke@435 258 // in it (created by combine_exception_states), transform them now.
duke@435 259 // Returns the exception oop. (Caller must call push_ex_oop if required.)
duke@435 260 Node* use_exception_state(SafePointNode* ex_map);
duke@435 261
duke@435 262 // Collect exceptions from a given JVM state into my exception list.
duke@435 263 void add_exception_states_from(JVMState* jvms);
duke@435 264
duke@435 265 // Collect all raised exceptions into the current JVM state.
duke@435 266 // Clear the current exception list and map, returns the combined states.
duke@435 267 JVMState* transfer_exceptions_into_jvms();
duke@435 268
duke@435 269 // Helper to throw a built-in exception.
duke@435 270 // Range checks take the offending index.
duke@435 271 // Cast and array store checks take the offending class.
duke@435 272 // Others do not take the optional argument.
duke@435 273 // The JVMS must allow the bytecode to be re-executed
duke@435 274 // via an uncommon trap.
duke@435 275 void builtin_throw(Deoptimization::DeoptReason reason, Node* arg = NULL);
duke@435 276
dcubed@1648 277 // Helper to check the JavaThread::_should_post_on_exceptions flag
dcubed@1648 278 // and branch to an uncommon_trap if it is true (with the specified reason and must_throw)
dcubed@1648 279 void uncommon_trap_if_should_post_on_exceptions(Deoptimization::DeoptReason reason,
dcubed@1648 280 bool must_throw) ;
dcubed@1648 281
duke@435 282 // Helper Functions for adding debug information
duke@435 283 void kill_dead_locals();
duke@435 284 #ifdef ASSERT
duke@435 285 bool dead_locals_are_killed();
duke@435 286 #endif
duke@435 287 // The call may deoptimize. Supply required JVM state as debug info.
duke@435 288 // If must_throw is true, the call is guaranteed not to return normally.
duke@435 289 void add_safepoint_edges(SafePointNode* call,
duke@435 290 bool must_throw = false);
duke@435 291
duke@435 292 // How many stack inputs does the current BC consume?
duke@435 293 // And, how does the stack change after the bytecode?
duke@435 294 // Returns false if unknown.
twisti@4313 295 bool compute_stack_effects(int& inputs, int& depth);
duke@435 296
duke@435 297 // Add a fixed offset to a pointer
duke@435 298 Node* basic_plus_adr(Node* base, Node* ptr, intptr_t offset) {
duke@435 299 return basic_plus_adr(base, ptr, MakeConX(offset));
duke@435 300 }
duke@435 301 Node* basic_plus_adr(Node* base, intptr_t offset) {
duke@435 302 return basic_plus_adr(base, base, MakeConX(offset));
duke@435 303 }
duke@435 304 // Add a variable offset to a pointer
duke@435 305 Node* basic_plus_adr(Node* base, Node* offset) {
duke@435 306 return basic_plus_adr(base, base, offset);
duke@435 307 }
duke@435 308 Node* basic_plus_adr(Node* base, Node* ptr, Node* offset);
duke@435 309
never@1515 310
never@1515 311 // Some convenient shortcuts for common nodes
kvn@4115 312 Node* IfTrue(IfNode* iff) { return _gvn.transform(new (C) IfTrueNode(iff)); }
kvn@4115 313 Node* IfFalse(IfNode* iff) { return _gvn.transform(new (C) IfFalseNode(iff)); }
never@1515 314
kvn@4115 315 Node* AddI(Node* l, Node* r) { return _gvn.transform(new (C) AddINode(l, r)); }
kvn@4115 316 Node* SubI(Node* l, Node* r) { return _gvn.transform(new (C) SubINode(l, r)); }
kvn@4115 317 Node* MulI(Node* l, Node* r) { return _gvn.transform(new (C) MulINode(l, r)); }
kvn@4115 318 Node* DivI(Node* ctl, Node* l, Node* r) { return _gvn.transform(new (C) DivINode(ctl, l, r)); }
never@1515 319
kvn@4115 320 Node* AndI(Node* l, Node* r) { return _gvn.transform(new (C) AndINode(l, r)); }
kvn@4115 321 Node* OrI(Node* l, Node* r) { return _gvn.transform(new (C) OrINode(l, r)); }
kvn@4115 322 Node* XorI(Node* l, Node* r) { return _gvn.transform(new (C) XorINode(l, r)); }
never@1515 323
kvn@4115 324 Node* MaxI(Node* l, Node* r) { return _gvn.transform(new (C) MaxINode(l, r)); }
kvn@4115 325 Node* MinI(Node* l, Node* r) { return _gvn.transform(new (C) MinINode(l, r)); }
never@1515 326
kvn@4115 327 Node* LShiftI(Node* l, Node* r) { return _gvn.transform(new (C) LShiftINode(l, r)); }
kvn@4115 328 Node* RShiftI(Node* l, Node* r) { return _gvn.transform(new (C) RShiftINode(l, r)); }
kvn@4115 329 Node* URShiftI(Node* l, Node* r) { return _gvn.transform(new (C) URShiftINode(l, r)); }
never@1515 330
kvn@4115 331 Node* CmpI(Node* l, Node* r) { return _gvn.transform(new (C) CmpINode(l, r)); }
kvn@4115 332 Node* CmpL(Node* l, Node* r) { return _gvn.transform(new (C) CmpLNode(l, r)); }
kvn@4115 333 Node* CmpP(Node* l, Node* r) { return _gvn.transform(new (C) CmpPNode(l, r)); }
kvn@4115 334 Node* Bool(Node* cmp, BoolTest::mask relop) { return _gvn.transform(new (C) BoolNode(cmp, relop)); }
never@1515 335
kvn@4115 336 Node* AddP(Node* b, Node* a, Node* o) { return _gvn.transform(new (C) AddPNode(b, a, o)); }
never@1515 337
duke@435 338 // Convert between int and long, and size_t.
duke@435 339 // (See macros ConvI2X, etc., in type.hpp for ConvI2X, etc.)
duke@435 340 Node* ConvI2L(Node* offset);
duke@435 341 Node* ConvL2I(Node* offset);
duke@435 342 // Find out the klass of an object.
duke@435 343 Node* load_object_klass(Node* object);
duke@435 344 // Find out the length of an array.
duke@435 345 Node* load_array_length(Node* array);
twisti@4313 346
twisti@4313 347
duke@435 348 // Helper function to do a NULL pointer check or ZERO check based on type.
duke@435 349 // Throw an exception if a given value is null.
duke@435 350 // Return the value cast to not-null.
duke@435 351 // Be clever about equivalent dominating null checks.
twisti@4313 352 Node* null_check_common(Node* value, BasicType type,
twisti@4313 353 bool assert_null = false, Node* *null_control = NULL);
twisti@4313 354 Node* null_check(Node* value, BasicType type = T_OBJECT) {
twisti@4313 355 return null_check_common(value, type);
twisti@4313 356 }
twisti@4313 357 Node* null_check_receiver() {
twisti@4313 358 assert(argument(0)->bottom_type()->isa_ptr(), "must be");
twisti@4313 359 return null_check(argument(0));
twisti@4313 360 }
twisti@4313 361 Node* zero_check_int(Node* value) {
twisti@4313 362 assert(value->bottom_type()->basic_type() == T_INT,
twisti@4313 363 err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
twisti@4313 364 return null_check_common(value, T_INT);
twisti@4313 365 }
twisti@4313 366 Node* zero_check_long(Node* value) {
twisti@4313 367 assert(value->bottom_type()->basic_type() == T_LONG,
twisti@4313 368 err_msg_res("wrong type: %s", type2name(value->bottom_type()->basic_type())));
twisti@4313 369 return null_check_common(value, T_LONG);
duke@435 370 }
duke@435 371 // Throw an uncommon trap if a given value is __not__ null.
duke@435 372 // Return the value cast to null, and be clever about dominating checks.
twisti@4313 373 Node* null_assert(Node* value, BasicType type = T_OBJECT) {
twisti@4313 374 return null_check_common(value, type, true);
duke@435 375 }
twisti@4313 376
duke@435 377 // Null check oop. Return null-path control into (*null_control).
duke@435 378 // Return a cast-not-null node which depends on the not-null control.
duke@435 379 // If never_see_null, use an uncommon trap (*null_control sees a top).
duke@435 380 // The cast is not valid along the null path; keep a copy of the original.
duke@435 381 Node* null_check_oop(Node* value, Node* *null_control,
duke@435 382 bool never_see_null = false);
duke@435 383
jrose@2101 384 // Check the null_seen bit.
jrose@2101 385 bool seems_never_null(Node* obj, ciProfileData* data);
jrose@2101 386
jrose@2101 387 // Use the type profile to narrow an object type.
jrose@2101 388 Node* maybe_cast_profiled_receiver(Node* not_null_obj,
jrose@2101 389 ciProfileData* data,
jrose@2101 390 ciKlass* require_klass);
jrose@2101 391
duke@435 392 // Cast obj to not-null on this path
duke@435 393 Node* cast_not_null(Node* obj, bool do_replace_in_map = true);
duke@435 394 // Replace all occurrences of one node by another.
duke@435 395 void replace_in_map(Node* old, Node* neww);
duke@435 396
twisti@4313 397 void push(Node* n) { map_not_null(); _map->set_stack(_map->_jvms, _sp++ , n); }
twisti@4313 398 Node* pop() { map_not_null(); return _map->stack( _map->_jvms, --_sp ); }
twisti@4313 399 Node* peek(int off = 0) { map_not_null(); return _map->stack( _map->_jvms, _sp - off - 1 ); }
duke@435 400
duke@435 401 void push_pair(Node* ldval) {
duke@435 402 push(ldval);
duke@435 403 push(top()); // the halfword is merely a placeholder
duke@435 404 }
duke@435 405 void push_pair_local(int i) {
duke@435 406 // longs are stored in locals in "push" order
duke@435 407 push( local(i+0) ); // the real value
duke@435 408 assert(local(i+1) == top(), "");
duke@435 409 push(top()); // halfword placeholder
duke@435 410 }
duke@435 411 Node* pop_pair() {
duke@435 412 // the second half is pushed last & popped first; it contains exactly nothing
duke@435 413 Node* halfword = pop();
duke@435 414 assert(halfword == top(), "");
duke@435 415 // the long bits are pushed first & popped last:
duke@435 416 return pop();
duke@435 417 }
duke@435 418 void set_pair_local(int i, Node* lval) {
duke@435 419 // longs are stored in locals as a value/half pair (like doubles)
duke@435 420 set_local(i+0, lval);
duke@435 421 set_local(i+1, top());
duke@435 422 }
duke@435 423
duke@435 424 // Push the node, which may be zero, one, or two words.
duke@435 425 void push_node(BasicType n_type, Node* n) {
duke@435 426 int n_size = type2size[n_type];
duke@435 427 if (n_size == 1) push( n ); // T_INT, ...
duke@435 428 else if (n_size == 2) push_pair( n ); // T_DOUBLE, T_LONG
duke@435 429 else { assert(n_size == 0, "must be T_VOID"); }
duke@435 430 }
duke@435 431
duke@435 432 Node* pop_node(BasicType n_type) {
duke@435 433 int n_size = type2size[n_type];
duke@435 434 if (n_size == 1) return pop();
duke@435 435 else if (n_size == 2) return pop_pair();
duke@435 436 else return NULL;
duke@435 437 }
duke@435 438
duke@435 439 Node* control() const { return map_not_null()->control(); }
duke@435 440 Node* i_o() const { return map_not_null()->i_o(); }
duke@435 441 Node* returnadr() const { return map_not_null()->returnadr(); }
duke@435 442 Node* frameptr() const { return map_not_null()->frameptr(); }
duke@435 443 Node* local(uint idx) const { map_not_null(); return _map->local( _map->_jvms, idx); }
duke@435 444 Node* stack(uint idx) const { map_not_null(); return _map->stack( _map->_jvms, idx); }
duke@435 445 Node* argument(uint idx) const { map_not_null(); return _map->argument( _map->_jvms, idx); }
duke@435 446 Node* monitor_box(uint idx) const { map_not_null(); return _map->monitor_box(_map->_jvms, idx); }
duke@435 447 Node* monitor_obj(uint idx) const { map_not_null(); return _map->monitor_obj(_map->_jvms, idx); }
duke@435 448
duke@435 449 void set_control (Node* c) { map_not_null()->set_control(c); }
duke@435 450 void set_i_o (Node* c) { map_not_null()->set_i_o(c); }
duke@435 451 void set_local(uint idx, Node* c) { map_not_null(); _map->set_local( _map->_jvms, idx, c); }
duke@435 452 void set_stack(uint idx, Node* c) { map_not_null(); _map->set_stack( _map->_jvms, idx, c); }
duke@435 453 void set_argument(uint idx, Node* c){ map_not_null(); _map->set_argument(_map->_jvms, idx, c); }
duke@435 454 void ensure_stack(uint stk_size) { map_not_null(); _map->ensure_stack(_map->_jvms, stk_size); }
duke@435 455
duke@435 456 // Access unaliased memory
duke@435 457 Node* memory(uint alias_idx);
duke@435 458 Node* memory(const TypePtr *tp) { return memory(C->get_alias_index(tp)); }
duke@435 459 Node* memory(Node* adr) { return memory(_gvn.type(adr)->is_ptr()); }
duke@435 460
duke@435 461 // Access immutable memory
duke@435 462 Node* immutable_memory() { return C->immutable_memory(); }
duke@435 463
duke@435 464 // Set unaliased memory
duke@435 465 void set_memory(Node* c, uint alias_idx) { merged_memory()->set_memory_at(alias_idx, c); }
duke@435 466 void set_memory(Node* c, const TypePtr *tp) { set_memory(c,C->get_alias_index(tp)); }
duke@435 467 void set_memory(Node* c, Node* adr) { set_memory(c,_gvn.type(adr)->is_ptr()); }
duke@435 468
duke@435 469 // Get the entire memory state (probably a MergeMemNode), and reset it
duke@435 470 // (The resetting prevents somebody from using the dangling Node pointer.)
duke@435 471 Node* reset_memory();
duke@435 472
duke@435 473 // Get the entire memory state, asserted to be a MergeMemNode.
duke@435 474 MergeMemNode* merged_memory() {
duke@435 475 Node* mem = map_not_null()->memory();
duke@435 476 assert(mem->is_MergeMem(), "parse memory is always pre-split");
duke@435 477 return mem->as_MergeMem();
duke@435 478 }
duke@435 479
duke@435 480 // Set the entire memory state; produce a new MergeMemNode.
duke@435 481 void set_all_memory(Node* newmem);
duke@435 482
duke@435 483 // Create a memory projection from the call, then set_all_memory.
never@1515 484 void set_all_memory_call(Node* call, bool separate_io_proj = false);
duke@435 485
duke@435 486 // Create a LoadNode, reading from the parser's memory state.
duke@435 487 // (Note: require_atomic_access is useful only with T_LONG.)
duke@435 488 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt,
duke@435 489 bool require_atomic_access = false) {
duke@435 490 // This version computes alias_index from bottom_type
duke@435 491 return make_load(ctl, adr, t, bt, adr->bottom_type()->is_ptr(),
duke@435 492 require_atomic_access);
duke@435 493 }
duke@435 494 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, const TypePtr* adr_type, bool require_atomic_access = false) {
duke@435 495 // This version computes alias_index from an address type
duke@435 496 assert(adr_type != NULL, "use other make_load factory");
duke@435 497 return make_load(ctl, adr, t, bt, C->get_alias_index(adr_type),
duke@435 498 require_atomic_access);
duke@435 499 }
duke@435 500 // This is the base version which is given an alias index.
duke@435 501 Node* make_load(Node* ctl, Node* adr, const Type* t, BasicType bt, int adr_idx, bool require_atomic_access = false);
duke@435 502
duke@435 503 // Create & transform a StoreNode and store the effect into the
duke@435 504 // parser's memory state.
duke@435 505 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
duke@435 506 const TypePtr* adr_type,
duke@435 507 bool require_atomic_access = false) {
duke@435 508 // This version computes alias_index from an address type
duke@435 509 assert(adr_type != NULL, "use other store_to_memory factory");
duke@435 510 return store_to_memory(ctl, adr, val, bt,
duke@435 511 C->get_alias_index(adr_type),
duke@435 512 require_atomic_access);
duke@435 513 }
duke@435 514 // This is the base version which is given alias index
duke@435 515 // Return the new StoreXNode
duke@435 516 Node* store_to_memory(Node* ctl, Node* adr, Node* val, BasicType bt,
duke@435 517 int adr_idx,
duke@435 518 bool require_atomic_access = false);
duke@435 519
duke@435 520
duke@435 521 // All in one pre-barrier, store, post_barrier
duke@435 522 // Insert a write-barrier'd store. This is to let generational GC
duke@435 523 // work; we have to flag all oop-stores before the next GC point.
duke@435 524 //
duke@435 525 // It comes in 3 flavors of store to an object, array, or unknown.
duke@435 526 // We use precise card marks for arrays to avoid scanning the entire
duke@435 527 // array. We use imprecise for object. We use precise for unknown
duke@435 528 // since we don't know if we have an array or and object or even
duke@435 529 // where the object starts.
duke@435 530 //
duke@435 531 // If val==NULL, it is taken to be a completely unknown value. QQQ
duke@435 532
kvn@1286 533 Node* store_oop(Node* ctl,
kvn@1286 534 Node* obj, // containing obj
kvn@1286 535 Node* adr, // actual adress to store val at
kvn@1286 536 const TypePtr* adr_type,
kvn@1286 537 Node* val,
kvn@1286 538 const TypeOopPtr* val_type,
kvn@1286 539 BasicType bt,
kvn@1286 540 bool use_precise);
kvn@1286 541
duke@435 542 Node* store_oop_to_object(Node* ctl,
duke@435 543 Node* obj, // containing obj
duke@435 544 Node* adr, // actual adress to store val at
duke@435 545 const TypePtr* adr_type,
duke@435 546 Node* val,
never@1260 547 const TypeOopPtr* val_type,
kvn@1286 548 BasicType bt) {
kvn@1286 549 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, false);
kvn@1286 550 }
duke@435 551
duke@435 552 Node* store_oop_to_array(Node* ctl,
duke@435 553 Node* obj, // containing obj
duke@435 554 Node* adr, // actual adress to store val at
duke@435 555 const TypePtr* adr_type,
duke@435 556 Node* val,
never@1260 557 const TypeOopPtr* val_type,
kvn@1286 558 BasicType bt) {
kvn@1286 559 return store_oop(ctl, obj, adr, adr_type, val, val_type, bt, true);
kvn@1286 560 }
duke@435 561
duke@435 562 // Could be an array or object we don't know at compile time (unsafe ref.)
duke@435 563 Node* store_oop_to_unknown(Node* ctl,
duke@435 564 Node* obj, // containing obj
duke@435 565 Node* adr, // actual adress to store val at
duke@435 566 const TypePtr* adr_type,
duke@435 567 Node* val,
duke@435 568 BasicType bt);
duke@435 569
duke@435 570 // For the few case where the barriers need special help
johnc@2781 571 void pre_barrier(bool do_load, Node* ctl,
johnc@2781 572 Node* obj, Node* adr, uint adr_idx, Node* val, const TypeOopPtr* val_type,
johnc@2781 573 Node* pre_val,
johnc@2781 574 BasicType bt);
duke@435 575
duke@435 576 void post_barrier(Node* ctl, Node* store, Node* obj, Node* adr, uint adr_idx,
duke@435 577 Node* val, BasicType bt, bool use_precise);
duke@435 578
duke@435 579 // Return addressing for an array element.
duke@435 580 Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
duke@435 581 // Optional constraint on the array size:
duke@435 582 const TypeInt* sizetype = NULL);
duke@435 583
duke@435 584 // Return a load of array element at idx.
duke@435 585 Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
duke@435 586
duke@435 587 //---------------- Dtrace support --------------------
duke@435 588 void make_dtrace_method_entry_exit(ciMethod* method, bool is_entry);
duke@435 589 void make_dtrace_method_entry(ciMethod* method) {
duke@435 590 make_dtrace_method_entry_exit(method, true);
duke@435 591 }
duke@435 592 void make_dtrace_method_exit(ciMethod* method) {
duke@435 593 make_dtrace_method_entry_exit(method, false);
duke@435 594 }
duke@435 595
duke@435 596 //--------------- stub generation -------------------
duke@435 597 public:
duke@435 598 void gen_stub(address C_function,
duke@435 599 const char *name,
duke@435 600 int is_fancy_jump,
duke@435 601 bool pass_tls,
duke@435 602 bool return_pc);
duke@435 603
duke@435 604 //---------- help for generating calls --------------
duke@435 605
twisti@4313 606 // Do a null check on the receiver as it would happen before the call to
twisti@4313 607 // callee (with all arguments still on the stack).
twisti@4313 608 Node* null_check_receiver_before_call(ciMethod* callee) {
duke@435 609 assert(!callee->is_static(), "must be a virtual method");
twisti@4313 610 const int nargs = callee->arg_size();
twisti@4313 611 inc_sp(nargs);
twisti@4313 612 Node* n = null_check_receiver();
twisti@4313 613 dec_sp(nargs);
twisti@4313 614 return n;
duke@435 615 }
duke@435 616
duke@435 617 // Fill in argument edges for the call from argument(0), argument(1), ...
duke@435 618 // (The next step is to call set_edges_for_java_call.)
duke@435 619 void set_arguments_for_java_call(CallJavaNode* call);
duke@435 620
duke@435 621 // Fill in non-argument edges for the call.
duke@435 622 // Transform the call, and update the basics: control, i_o, memory.
duke@435 623 // (The next step is usually to call set_results_for_java_call.)
duke@435 624 void set_edges_for_java_call(CallJavaNode* call,
never@1515 625 bool must_throw = false, bool separate_io_proj = false);
duke@435 626
duke@435 627 // Finish up a java call that was started by set_edges_for_java_call.
duke@435 628 // Call add_exception on any throw arising from the call.
duke@435 629 // Return the call result (transformed).
never@1515 630 Node* set_results_for_java_call(CallJavaNode* call, bool separate_io_proj = false);
duke@435 631
duke@435 632 // Similar to set_edges_for_java_call, but simplified for runtime calls.
duke@435 633 void set_predefined_output_for_runtime_call(Node* call) {
duke@435 634 set_predefined_output_for_runtime_call(call, NULL, NULL);
duke@435 635 }
duke@435 636 void set_predefined_output_for_runtime_call(Node* call,
duke@435 637 Node* keep_mem,
duke@435 638 const TypePtr* hook_mem);
duke@435 639 Node* set_predefined_input_for_runtime_call(SafePointNode* call);
duke@435 640
never@1515 641 // Replace the call with the current state of the kit. Requires
never@1515 642 // that the call was generated with separate io_projs so that
never@1515 643 // exceptional control flow can be handled properly.
never@1515 644 void replace_call(CallNode* call, Node* result);
never@1515 645
duke@435 646 // helper functions for statistics
duke@435 647 void increment_counter(address counter_addr); // increment a debug counter
duke@435 648 void increment_counter(Node* counter_addr); // increment a debug counter
duke@435 649
duke@435 650 // Bail out to the interpreter right now
duke@435 651 // The optional klass is the one causing the trap.
duke@435 652 // The optional reason is debug information written to the compile log.
duke@435 653 // Optional must_throw is the same as with add_safepoint_edges.
duke@435 654 void uncommon_trap(int trap_request,
duke@435 655 ciKlass* klass = NULL, const char* reason_string = NULL,
duke@435 656 bool must_throw = false, bool keep_exact_action = false);
duke@435 657
duke@435 658 // Shorthand, to avoid saying "Deoptimization::" so many times.
duke@435 659 void uncommon_trap(Deoptimization::DeoptReason reason,
duke@435 660 Deoptimization::DeoptAction action,
duke@435 661 ciKlass* klass = NULL, const char* reason_string = NULL,
duke@435 662 bool must_throw = false, bool keep_exact_action = false) {
duke@435 663 uncommon_trap(Deoptimization::make_trap_request(reason, action),
duke@435 664 klass, reason_string, must_throw, keep_exact_action);
duke@435 665 }
duke@435 666
twisti@4313 667 // SP when bytecode needs to be reexecuted.
twisti@4313 668 virtual int reexecute_sp() { return sp(); }
twisti@4313 669
duke@435 670 // Report if there were too many traps at the current method and bci.
duke@435 671 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
duke@435 672 // If there is no MDO at all, report no trap unless told to assume it.
duke@435 673 bool too_many_traps(Deoptimization::DeoptReason reason) {
duke@435 674 return C->too_many_traps(method(), bci(), reason);
duke@435 675 }
duke@435 676
duke@435 677 // Report if there were too many recompiles at the current method and bci.
duke@435 678 bool too_many_recompiles(Deoptimization::DeoptReason reason) {
duke@435 679 return C->too_many_recompiles(method(), bci(), reason);
duke@435 680 }
duke@435 681
duke@435 682 // Returns the object (if any) which was created the moment before.
duke@435 683 Node* just_allocated_object(Node* current_control);
duke@435 684
duke@435 685 static bool use_ReduceInitialCardMarks() {
duke@435 686 return (ReduceInitialCardMarks
duke@435 687 && Universe::heap()->can_elide_tlab_store_barriers());
duke@435 688 }
duke@435 689
kvn@2726 690 // Sync Ideal and Graph kits.
kvn@1286 691 void sync_kit(IdealKit& ideal);
kvn@2726 692 void final_sync(IdealKit& ideal);
kvn@1286 693
kvn@1286 694 // vanilla/CMS post barrier
cfang@1420 695 void write_barrier_post(Node *store, Node* obj,
cfang@1420 696 Node* adr, uint adr_idx, Node* val, bool use_precise);
kvn@1286 697
kvn@5637 698 // Allow reordering of pre-barrier with oop store and/or post-barrier.
kvn@5637 699 // Used for load_store operations which loads old value.
kvn@5637 700 bool can_move_pre_barrier() const;
kvn@5637 701
ysr@777 702 // G1 pre/post barriers
johnc@2781 703 void g1_write_barrier_pre(bool do_load,
johnc@2781 704 Node* obj,
ysr@777 705 Node* adr,
ysr@777 706 uint alias_idx,
ysr@777 707 Node* val,
never@1260 708 const TypeOopPtr* val_type,
johnc@2781 709 Node* pre_val,
ysr@777 710 BasicType bt);
ysr@777 711
ysr@777 712 void g1_write_barrier_post(Node* store,
ysr@777 713 Node* obj,
ysr@777 714 Node* adr,
ysr@777 715 uint alias_idx,
ysr@777 716 Node* val,
ysr@777 717 BasicType bt,
ysr@777 718 bool use_precise);
ysr@777 719 // Helper function for g1
ysr@777 720 private:
cfang@1420 721 void g1_mark_card(IdealKit& ideal, Node* card_adr, Node* store, uint oop_alias_idx,
cfang@1420 722 Node* index, Node* index_adr,
ysr@777 723 Node* buffer, const TypeFunc* tf);
ysr@777 724
ysr@777 725 public:
duke@435 726 // Helper function to round double arguments before a call
duke@435 727 void round_double_arguments(ciMethod* dest_method);
duke@435 728 void round_double_result(ciMethod* dest_method);
duke@435 729
duke@435 730 // rounding for strict float precision conformance
duke@435 731 Node* precision_rounding(Node* n);
duke@435 732
duke@435 733 // rounding for strict double precision conformance
duke@435 734 Node* dprecision_rounding(Node* n);
duke@435 735
duke@435 736 // rounding for non-strict double stores
duke@435 737 Node* dstore_rounding(Node* n);
duke@435 738
duke@435 739 // Helper functions for fast/slow path codes
duke@435 740 Node* opt_iff(Node* region, Node* iff);
duke@435 741 Node* make_runtime_call(int flags,
duke@435 742 const TypeFunc* call_type, address call_addr,
duke@435 743 const char* call_name,
duke@435 744 const TypePtr* adr_type, // NULL if no memory effects
duke@435 745 Node* parm0 = NULL, Node* parm1 = NULL,
duke@435 746 Node* parm2 = NULL, Node* parm3 = NULL,
duke@435 747 Node* parm4 = NULL, Node* parm5 = NULL,
duke@435 748 Node* parm6 = NULL, Node* parm7 = NULL);
duke@435 749 enum { // flag values for make_runtime_call
duke@435 750 RC_NO_FP = 1, // CallLeafNoFPNode
duke@435 751 RC_NO_IO = 2, // do not hook IO edges
duke@435 752 RC_NO_LEAF = 4, // CallStaticJavaNode
duke@435 753 RC_MUST_THROW = 8, // flag passed to add_safepoint_edges
duke@435 754 RC_NARROW_MEM = 16, // input memory is same as output
duke@435 755 RC_UNCOMMON = 32, // freq. expected to be like uncommon trap
duke@435 756 RC_LEAF = 0 // null value: no flags set
duke@435 757 };
duke@435 758
duke@435 759 // merge in all memory slices from new_mem, along the given path
duke@435 760 void merge_memory(Node* new_mem, Node* region, int new_path);
duke@435 761 void make_slow_call_ex(Node* call, ciInstanceKlass* ex_klass, bool separate_io_proj);
duke@435 762
duke@435 763 // Helper functions to build synchronizations
duke@435 764 int next_monitor();
duke@435 765 Node* insert_mem_bar(int opcode, Node* precedent = NULL);
duke@435 766 Node* insert_mem_bar_volatile(int opcode, int alias_idx, Node* precedent = NULL);
duke@435 767 // Optional 'precedent' is appended as an extra edge, to force ordering.
duke@435 768 FastLockNode* shared_lock(Node* obj);
duke@435 769 void shared_unlock(Node* box, Node* obj);
duke@435 770
duke@435 771 // helper functions for the fast path/slow path idioms
coleenp@4037 772 Node* fast_and_slow(Node* in, const Type *result_type, Node* null_result, IfNode* fast_test, Node* fast_result, address slow_call, const TypeFunc *slow_call_type, Node* slow_arg, Klass* ex_klass, Node* slow_result);
duke@435 773
duke@435 774 // Generate an instance-of idiom. Used by both the instance-of bytecode
duke@435 775 // and the reflective instance-of call.
duke@435 776 Node* gen_instanceof( Node *subobj, Node* superkls );
duke@435 777
duke@435 778 // Generate a check-cast idiom. Used by both the check-cast bytecode
duke@435 779 // and the array-store bytecode
duke@435 780 Node* gen_checkcast( Node *subobj, Node* superkls,
duke@435 781 Node* *failure_control = NULL );
duke@435 782
duke@435 783 // Generate a subtyping check. Takes as input the subtype and supertype.
duke@435 784 // Returns 2 values: sets the default control() to the true path and
duke@435 785 // returns the false path. Only reads from constant memory taken from the
duke@435 786 // default memory; does not write anything. It also doesn't take in an
duke@435 787 // Object; if you wish to check an Object you need to load the Object's
duke@435 788 // class prior to coming here.
duke@435 789 Node* gen_subtype_check(Node* subklass, Node* superklass);
duke@435 790
duke@435 791 // Static parse-time type checking logic for gen_subtype_check:
duke@435 792 enum { SSC_always_false, SSC_always_true, SSC_easy_test, SSC_full_test };
duke@435 793 int static_subtype_check(ciKlass* superk, ciKlass* subk);
duke@435 794
duke@435 795 // Exact type check used for predicted calls and casts.
duke@435 796 // Rewrites (*casted_receiver) to be casted to the stronger type.
duke@435 797 // (Caller is responsible for doing replace_in_map.)
duke@435 798 Node* type_check_receiver(Node* receiver, ciKlass* klass, float prob,
duke@435 799 Node* *casted_receiver);
duke@435 800
duke@435 801 // implementation of object creation
duke@435 802 Node* set_output_for_allocation(AllocateNode* alloc,
kvn@2810 803 const TypeOopPtr* oop_type);
duke@435 804 Node* get_layout_helper(Node* klass_node, jint& constant_value);
duke@435 805 Node* new_instance(Node* klass_node,
duke@435 806 Node* slow_test = NULL,
duke@435 807 Node* *return_size_val = NULL);
cfang@1165 808 Node* new_array(Node* klass_node, Node* count_val, int nargs,
kvn@2810 809 Node* *return_size_val = NULL);
duke@435 810
kvn@3760 811 // java.lang.String helpers
kvn@3760 812 Node* load_String_offset(Node* ctrl, Node* str);
kvn@3760 813 Node* load_String_length(Node* ctrl, Node* str);
kvn@3760 814 Node* load_String_value(Node* ctrl, Node* str);
kvn@3760 815 void store_String_offset(Node* ctrl, Node* str, Node* value);
kvn@3760 816 void store_String_length(Node* ctrl, Node* str, Node* value);
kvn@3760 817 void store_String_value(Node* ctrl, Node* str, Node* value);
kvn@3760 818
duke@435 819 // Handy for making control flow
duke@435 820 IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
kvn@4115 821 IfNode* iff = new (C) IfNode(ctrl, tst, prob, cnt);// New IfNode's
duke@435 822 _gvn.set_type(iff, iff->Value(&_gvn)); // Value may be known at parse-time
duke@435 823 // Place 'if' on worklist if it will be in graph
duke@435 824 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
duke@435 825 return iff;
duke@435 826 }
duke@435 827
duke@435 828 IfNode* create_and_xform_if(Node* ctrl, Node* tst, float prob, float cnt) {
kvn@4115 829 IfNode* iff = new (C) IfNode(ctrl, tst, prob, cnt);// New IfNode's
duke@435 830 _gvn.transform(iff); // Value may be known at parse-time
duke@435 831 // Place 'if' on worklist if it will be in graph
duke@435 832 if (!tst->is_Con()) record_for_igvn(iff); // Range-check and Null-check removal is later
duke@435 833 return iff;
duke@435 834 }
kvn@2665 835
kvn@2665 836 // Insert a loop predicate into the graph
kvn@2665 837 void add_predicate(int nargs = 0);
kvn@2665 838 void add_predicate_impl(Deoptimization::DeoptReason reason, int nargs);
vlivanov@5658 839
vlivanov@5658 840 // Produce new array node of stable type
vlivanov@5658 841 Node* cast_array_to_stable(Node* ary, const TypeAryPtr* ary_type);
duke@435 842 };
duke@435 843
duke@435 844 // Helper class to support building of control flow branches. Upon
duke@435 845 // creation the map and sp at bci are cloned and restored upon de-
duke@435 846 // struction. Typical use:
duke@435 847 //
duke@435 848 // { PreserveJVMState pjvms(this);
duke@435 849 // // code of new branch
duke@435 850 // }
duke@435 851 // // here the JVM state at bci is established
duke@435 852
duke@435 853 class PreserveJVMState: public StackObj {
duke@435 854 protected:
duke@435 855 GraphKit* _kit;
duke@435 856 #ifdef ASSERT
duke@435 857 int _block; // PO of current block, if a Parse
duke@435 858 int _bci;
duke@435 859 #endif
duke@435 860 SafePointNode* _map;
duke@435 861 uint _sp;
duke@435 862
duke@435 863 public:
duke@435 864 PreserveJVMState(GraphKit* kit, bool clone_map = true);
duke@435 865 ~PreserveJVMState();
duke@435 866 };
duke@435 867
duke@435 868 // Helper class to build cutouts of the form if (p) ; else {x...}.
duke@435 869 // The code {x...} must not fall through.
duke@435 870 // The kit's main flow of control is set to the "then" continuation of if(p).
duke@435 871 class BuildCutout: public PreserveJVMState {
duke@435 872 public:
duke@435 873 BuildCutout(GraphKit* kit, Node* p, float prob, float cnt = COUNT_UNKNOWN);
duke@435 874 ~BuildCutout();
duke@435 875 };
cfang@1335 876
cfang@1335 877 // Helper class to preserve the original _reexecute bit and _sp and restore
cfang@1335 878 // them back
cfang@1335 879 class PreserveReexecuteState: public StackObj {
cfang@1335 880 protected:
cfang@1335 881 GraphKit* _kit;
cfang@1335 882 uint _sp;
cfang@1335 883 JVMState::ReexecuteState _reexecute;
cfang@1335 884
cfang@1335 885 public:
cfang@1335 886 PreserveReexecuteState(GraphKit* kit);
cfang@1335 887 ~PreserveReexecuteState();
cfang@1335 888 };
stefank@2314 889
stefank@2314 890 #endif // SHARE_VM_OPTO_GRAPHKIT_HPP

mercurial