src/share/vm/opto/parse.hpp

Thu, 24 May 2018 19:26:50 +0800

author
aoqi
date
Thu, 24 May 2018 19:26:50 +0800
changeset 8862
fd13a567f179
parent 7994
04ff2f6cd0eb
child 9931
fd44df5e3bc3
permissions
-rw-r--r--

#7046 C2 supports long branch
Contributed-by: fujie

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #ifndef SHARE_VM_OPTO_PARSE_HPP
aoqi@0 26 #define SHARE_VM_OPTO_PARSE_HPP
aoqi@0 27
aoqi@0 28 #include "ci/ciMethodData.hpp"
aoqi@0 29 #include "ci/ciTypeFlow.hpp"
aoqi@0 30 #include "compiler/methodLiveness.hpp"
aoqi@0 31 #include "libadt/vectset.hpp"
aoqi@0 32 #include "oops/generateOopMap.hpp"
aoqi@0 33 #include "opto/graphKit.hpp"
aoqi@0 34 #include "opto/subnode.hpp"
aoqi@0 35
aoqi@0 36 class BytecodeParseHistogram;
aoqi@0 37 class InlineTree;
aoqi@0 38 class Parse;
aoqi@0 39 class SwitchRange;
aoqi@0 40
aoqi@0 41
aoqi@0 42 //------------------------------InlineTree-------------------------------------
aoqi@0 43 class InlineTree : public ResourceObj {
aoqi@0 44 friend class VMStructs;
aoqi@0 45
aoqi@0 46 Compile* C; // cache
aoqi@0 47 JVMState* _caller_jvms; // state of caller
aoqi@0 48 ciMethod* _method; // method being called by the caller_jvms
aoqi@0 49 InlineTree* _caller_tree;
aoqi@0 50 uint _count_inline_bcs; // Accumulated count of inlined bytecodes
aoqi@0 51 // Call-site count / interpreter invocation count, scaled recursively.
aoqi@0 52 // Always between 0.0 and 1.0. Represents the percentage of the method's
aoqi@0 53 // total execution time used at this call site.
aoqi@0 54 const float _site_invoke_ratio;
aoqi@0 55 const int _max_inline_level; // the maximum inline level for this sub-tree (may be adjusted)
aoqi@0 56 float compute_callee_frequency( int caller_bci ) const;
aoqi@0 57
aoqi@0 58 GrowableArray<InlineTree*> _subtrees;
aoqi@0 59
aoqi@0 60 void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN;
aoqi@0 61 const char* _msg;
aoqi@0 62 protected:
aoqi@0 63 InlineTree(Compile* C,
aoqi@0 64 const InlineTree* caller_tree,
aoqi@0 65 ciMethod* callee_method,
aoqi@0 66 JVMState* caller_jvms,
aoqi@0 67 int caller_bci,
aoqi@0 68 float site_invoke_ratio,
aoqi@0 69 int max_inline_level);
aoqi@0 70 InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
aoqi@0 71 JVMState* caller_jvms,
aoqi@0 72 int caller_bci);
aoqi@0 73 bool try_to_inline(ciMethod* callee_method,
aoqi@0 74 ciMethod* caller_method,
aoqi@0 75 int caller_bci,
aoqi@0 76 JVMState* jvms,
aoqi@0 77 ciCallProfile& profile,
aoqi@0 78 WarmCallInfo* wci_result,
aoqi@0 79 bool& should_delay);
aoqi@0 80 bool should_inline(ciMethod* callee_method,
aoqi@0 81 ciMethod* caller_method,
aoqi@0 82 int caller_bci,
aoqi@0 83 ciCallProfile& profile,
aoqi@0 84 WarmCallInfo* wci_result);
aoqi@0 85 bool should_not_inline(ciMethod* callee_method,
aoqi@0 86 ciMethod* caller_method,
aoqi@0 87 JVMState* jvms,
aoqi@0 88 WarmCallInfo* wci_result);
aoqi@0 89 void print_inlining(ciMethod* callee_method, int caller_bci,
aoqi@0 90 bool success) const;
aoqi@0 91
aoqi@0 92 InlineTree* caller_tree() const { return _caller_tree; }
aoqi@0 93 InlineTree* callee_at(int bci, ciMethod* m) const;
aoqi@0 94 int inline_level() const { return stack_depth(); }
aoqi@0 95 int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; }
aoqi@0 96 const char* msg() const { return _msg; }
aoqi@0 97 void set_msg(const char* msg) { _msg = msg; }
aoqi@0 98 public:
aoqi@0 99 static const char* check_can_parse(ciMethod* callee);
aoqi@0 100
aoqi@0 101 static InlineTree* build_inline_tree_root();
aoqi@0 102 static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee);
aoqi@0 103
aoqi@0 104 // For temporary (stack-allocated, stateless) ilts:
aoqi@0 105 InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level);
aoqi@0 106
aoqi@0 107 // InlineTree enum
aoqi@0 108 enum InlineStyle {
aoqi@0 109 Inline_do_not_inline = 0, //
aoqi@0 110 Inline_cha_is_monomorphic = 1, //
aoqi@0 111 Inline_type_profile_monomorphic = 2 //
aoqi@0 112 };
aoqi@0 113
aoqi@0 114 // See if it is OK to inline.
aoqi@0 115 // The receiver is the inline tree for the caller.
aoqi@0 116 //
aoqi@0 117 // The result is a temperature indication. If it is hot or cold,
aoqi@0 118 // inlining is immediate or undesirable. Otherwise, the info block
aoqi@0 119 // returned is newly allocated and may be enqueued.
aoqi@0 120 //
aoqi@0 121 // If the method is inlinable, a new inline subtree is created on the fly,
aoqi@0 122 // and may be accessed by find_subtree_from_root.
aoqi@0 123 // The call_method is the dest_method for a special or static invocation.
aoqi@0 124 // The call_method is an optimized virtual method candidate otherwise.
aoqi@0 125 WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci, bool& should_delay);
aoqi@0 126
aoqi@0 127 // Information about inlined method
aoqi@0 128 JVMState* caller_jvms() const { return _caller_jvms; }
aoqi@0 129 ciMethod *method() const { return _method; }
aoqi@0 130 int caller_bci() const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; }
aoqi@0 131 uint count_inline_bcs() const { return _count_inline_bcs; }
aoqi@0 132 float site_invoke_ratio() const { return _site_invoke_ratio; };
aoqi@0 133
aoqi@0 134 #ifndef PRODUCT
aoqi@0 135 private:
aoqi@0 136 uint _count_inlines; // Count of inlined methods
aoqi@0 137 public:
aoqi@0 138 // Debug information collected during parse
aoqi@0 139 uint count_inlines() const { return _count_inlines; };
aoqi@0 140 #endif
aoqi@0 141 GrowableArray<InlineTree*> subtrees() { return _subtrees; }
aoqi@0 142
aoqi@0 143 void print_value_on(outputStream* st) const PRODUCT_RETURN;
aoqi@0 144
vlivanov@7182 145 bool _forced_inline; // Inlining was forced by CompilerOracle, ciReplay or annotation
aoqi@0 146 bool forced_inline() const { return _forced_inline; }
aoqi@0 147 // Count number of nodes in this subtree
aoqi@0 148 int count() const;
aoqi@0 149 // Dump inlining replay data to the stream.
aoqi@0 150 void dump_replay_data(outputStream* out);
aoqi@0 151 };
aoqi@0 152
aoqi@0 153
aoqi@0 154 //-----------------------------------------------------------------------------
aoqi@0 155 //------------------------------Parse------------------------------------------
aoqi@0 156 // Parse bytecodes, build a Graph
aoqi@0 157 class Parse : public GraphKit {
aoqi@0 158 public:
aoqi@0 159 // Per-block information needed by the parser:
aoqi@0 160 class Block {
aoqi@0 161 private:
aoqi@0 162 ciTypeFlow::Block* _flow;
aoqi@0 163 int _pred_count; // how many predecessors in CFG?
aoqi@0 164 int _preds_parsed; // how many of these have been parsed?
aoqi@0 165 uint _count; // how many times executed? Currently only set by _goto's
aoqi@0 166 bool _is_parsed; // has this block been parsed yet?
aoqi@0 167 bool _is_handler; // is this block an exception handler?
aoqi@0 168 bool _has_merged_backedge; // does this block have merged backedge?
aoqi@0 169 SafePointNode* _start_map; // all values flowing into this block
aoqi@0 170 MethodLivenessResult _live_locals; // lazily initialized liveness bitmap
aoqi@0 171
aoqi@0 172 int _num_successors; // Includes only normal control flow.
aoqi@0 173 int _all_successors; // Include exception paths also.
aoqi@0 174 Block** _successors;
aoqi@0 175
aoqi@0 176 // Use init_node/init_graph to initialize Blocks.
aoqi@0 177 // Block() : _live_locals((uintptr_t*)NULL,0) { ShouldNotReachHere(); }
aoqi@0 178 Block() : _live_locals(NULL,0) { ShouldNotReachHere(); }
aoqi@0 179
aoqi@0 180 public:
aoqi@0 181
aoqi@0 182 // Set up the block data structure itself.
aoqi@0 183 void init_node(Parse* outer, int po);
aoqi@0 184 // Set up the block's relations to other blocks.
aoqi@0 185 void init_graph(Parse* outer);
aoqi@0 186
aoqi@0 187 ciTypeFlow::Block* flow() const { return _flow; }
aoqi@0 188 int pred_count() const { return _pred_count; }
aoqi@0 189 int preds_parsed() const { return _preds_parsed; }
aoqi@0 190 bool is_parsed() const { return _is_parsed; }
aoqi@0 191 bool is_handler() const { return _is_handler; }
aoqi@0 192 void set_count( uint x ) { _count = x; }
aoqi@0 193 uint count() const { return _count; }
aoqi@0 194
aoqi@0 195 SafePointNode* start_map() const { assert(is_merged(),""); return _start_map; }
aoqi@0 196 void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; }
aoqi@0 197
aoqi@0 198 // True after any predecessor flows control into this block
aoqi@0 199 bool is_merged() const { return _start_map != NULL; }
aoqi@0 200
aoqi@0 201 #ifdef ASSERT
aoqi@0 202 // True after backedge predecessor flows control into this block
aoqi@0 203 bool has_merged_backedge() const { return _has_merged_backedge; }
aoqi@0 204 void mark_merged_backedge(Block* pred) {
aoqi@0 205 assert(is_SEL_head(), "should be loop head");
aoqi@0 206 if (pred != NULL && is_SEL_backedge(pred)) {
aoqi@0 207 assert(is_parsed(), "block should be parsed before merging backedges");
aoqi@0 208 _has_merged_backedge = true;
aoqi@0 209 }
aoqi@0 210 }
aoqi@0 211 #endif
aoqi@0 212
aoqi@0 213 // True when all non-exception predecessors have been parsed.
aoqi@0 214 bool is_ready() const { return preds_parsed() == pred_count(); }
aoqi@0 215
aoqi@0 216 int num_successors() const { return _num_successors; }
aoqi@0 217 int all_successors() const { return _all_successors; }
aoqi@0 218 Block* successor_at(int i) const {
aoqi@0 219 assert((uint)i < (uint)all_successors(), "");
aoqi@0 220 return _successors[i];
aoqi@0 221 }
aoqi@0 222 Block* successor_for_bci(int bci);
aoqi@0 223
aoqi@0 224 int start() const { return flow()->start(); }
aoqi@0 225 int limit() const { return flow()->limit(); }
aoqi@0 226 int rpo() const { return flow()->rpo(); }
aoqi@0 227 int start_sp() const { return flow()->stack_size(); }
aoqi@0 228
aoqi@0 229 bool is_loop_head() const { return flow()->is_loop_head(); }
aoqi@0 230 bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); }
aoqi@0 231 bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); }
aoqi@0 232 bool is_invariant_local(uint i) const {
aoqi@0 233 const JVMState* jvms = start_map()->jvms();
aoqi@0 234 if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false;
aoqi@0 235 return flow()->is_invariant_local(i - jvms->locoff());
aoqi@0 236 }
aoqi@0 237 bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); }
aoqi@0 238
aoqi@0 239 const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); }
aoqi@0 240
aoqi@0 241 const Type* stack_type_at(int i) const;
aoqi@0 242 const Type* local_type_at(int i) const;
aoqi@0 243 static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); }
aoqi@0 244
aoqi@0 245 bool has_trap_at(int bci) const { return flow()->has_trap() && flow()->trap_bci() == bci; }
aoqi@0 246
aoqi@0 247 // Call this just before parsing a block.
aoqi@0 248 void mark_parsed() {
aoqi@0 249 assert(!_is_parsed, "must parse each block exactly once");
aoqi@0 250 _is_parsed = true;
aoqi@0 251 }
aoqi@0 252
aoqi@0 253 // Return the phi/region input index for the "current" pred,
aoqi@0 254 // and bump the pred number. For historical reasons these index
aoqi@0 255 // numbers are handed out in descending order. The last index is
aoqi@0 256 // always PhiNode::Input (i.e., 1). The value returned is known
aoqi@0 257 // as a "path number" because it distinguishes by which path we are
aoqi@0 258 // entering the block.
aoqi@0 259 int next_path_num() {
aoqi@0 260 assert(preds_parsed() < pred_count(), "too many preds?");
aoqi@0 261 return pred_count() - _preds_parsed++;
aoqi@0 262 }
aoqi@0 263
aoqi@0 264 // Add a previously unaccounted predecessor to this block.
aoqi@0 265 // This operates by increasing the size of the block's region
aoqi@0 266 // and all its phi nodes (if any). The value returned is a
aoqi@0 267 // path number ("pnum").
aoqi@0 268 int add_new_path();
aoqi@0 269
aoqi@0 270 // Initialize me by recording the parser's map. My own map must be NULL.
aoqi@0 271 void record_state(Parse* outer);
aoqi@0 272 };
aoqi@0 273
aoqi@0 274 #ifndef PRODUCT
aoqi@0 275 // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations.
aoqi@0 276 class BytecodeParseHistogram : public ResourceObj {
aoqi@0 277 private:
aoqi@0 278 enum BPHType {
aoqi@0 279 BPH_transforms,
aoqi@0 280 BPH_values
aoqi@0 281 };
aoqi@0 282 static bool _initialized;
aoqi@0 283 static uint _bytecodes_parsed [Bytecodes::number_of_codes];
aoqi@0 284 static uint _nodes_constructed[Bytecodes::number_of_codes];
aoqi@0 285 static uint _nodes_transformed[Bytecodes::number_of_codes];
aoqi@0 286 static uint _new_values [Bytecodes::number_of_codes];
aoqi@0 287
aoqi@0 288 Bytecodes::Code _initial_bytecode;
aoqi@0 289 int _initial_node_count;
aoqi@0 290 int _initial_transforms;
aoqi@0 291 int _initial_values;
aoqi@0 292
aoqi@0 293 Parse *_parser;
aoqi@0 294 Compile *_compiler;
aoqi@0 295
aoqi@0 296 // Initialization
aoqi@0 297 static void reset();
aoqi@0 298
aoqi@0 299 // Return info being collected, select with global flag 'BytecodeParseInfo'
aoqi@0 300 int current_count(BPHType info_selector);
aoqi@0 301
aoqi@0 302 public:
aoqi@0 303 BytecodeParseHistogram(Parse *p, Compile *c);
aoqi@0 304 static bool initialized();
aoqi@0 305
aoqi@0 306 // Record info when starting to parse one bytecode
aoqi@0 307 void set_initial_state( Bytecodes::Code bc );
aoqi@0 308 // Record results of parsing one bytecode
aoqi@0 309 void record_change();
aoqi@0 310
aoqi@0 311 // Profile printing
aoqi@0 312 static void print(float cutoff = 0.01F); // cutoff in percent
aoqi@0 313 };
aoqi@0 314
aoqi@0 315 public:
aoqi@0 316 // Record work done during parsing
aoqi@0 317 BytecodeParseHistogram* _parse_histogram;
aoqi@0 318 void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; }
aoqi@0 319 BytecodeParseHistogram* parse_histogram() { return _parse_histogram; }
aoqi@0 320 #endif
aoqi@0 321
aoqi@0 322 private:
aoqi@0 323 friend class Block;
aoqi@0 324
aoqi@0 325 // Variables which characterize this compilation as a whole:
aoqi@0 326
aoqi@0 327 JVMState* _caller; // JVMS which carries incoming args & state.
aoqi@0 328 float _expected_uses; // expected number of calls to this code
aoqi@0 329 float _prof_factor; // discount applied to my profile counts
aoqi@0 330 int _depth; // Inline tree depth, for debug printouts
aoqi@0 331 const TypeFunc*_tf; // My kind of function type
aoqi@0 332 int _entry_bci; // the osr bci or InvocationEntryBci
aoqi@0 333
aoqi@0 334 ciTypeFlow* _flow; // Results of previous flow pass.
aoqi@0 335 Block* _blocks; // Array of basic-block structs.
aoqi@0 336 int _block_count; // Number of elements in _blocks.
aoqi@0 337
aoqi@0 338 GraphKit _exits; // Record all normal returns and throws here.
aoqi@0 339 bool _wrote_final; // Did we write a final field?
aoqi@0 340 bool _wrote_volatile; // Did we write a volatile field?
aoqi@0 341 bool _count_invocations; // update and test invocation counter
aoqi@0 342 bool _method_data_update; // update method data oop
aoqi@0 343 Node* _alloc_with_final; // An allocation node with final field
aoqi@0 344
aoqi@0 345 // Variables which track Java semantics during bytecode parsing:
aoqi@0 346
aoqi@0 347 Block* _block; // block currently getting parsed
aoqi@0 348 ciBytecodeStream _iter; // stream of this method's bytecodes
aoqi@0 349
aoqi@0 350 int _blocks_merged; // Progress meter: state merges from BB preds
aoqi@0 351 int _blocks_parsed; // Progress meter: BBs actually parsed
aoqi@0 352
aoqi@0 353 const FastLockNode* _synch_lock; // FastLockNode for synchronized method
aoqi@0 354
aoqi@0 355 #ifndef PRODUCT
aoqi@0 356 int _max_switch_depth; // Debugging SwitchRanges.
aoqi@0 357 int _est_switch_depth; // Debugging SwitchRanges.
aoqi@0 358 #endif
aoqi@0 359
roland@7041 360 bool _first_return; // true if return is the first to be parsed
roland@7041 361 bool _replaced_nodes_for_exceptions; // needs processing of replaced nodes in exception paths?
roland@7041 362 uint _new_idx; // any node with _idx above were new during this parsing. Used to trim the replaced nodes list.
aoqi@0 363
aoqi@0 364 public:
aoqi@0 365 // Constructor
roland@7041 366 Parse(JVMState* caller, ciMethod* parse_method, float expected_uses);
aoqi@0 367
aoqi@0 368 virtual Parse* is_Parse() const { return (Parse*)this; }
aoqi@0 369
aoqi@0 370 // Accessors.
aoqi@0 371 JVMState* caller() const { return _caller; }
aoqi@0 372 float expected_uses() const { return _expected_uses; }
aoqi@0 373 float prof_factor() const { return _prof_factor; }
aoqi@0 374 int depth() const { return _depth; }
aoqi@0 375 const TypeFunc* tf() const { return _tf; }
aoqi@0 376 // entry_bci() -- see osr_bci, etc.
aoqi@0 377
aoqi@0 378 ciTypeFlow* flow() const { return _flow; }
aoqi@0 379 // blocks() -- see rpo_at, start_block, etc.
aoqi@0 380 int block_count() const { return _block_count; }
aoqi@0 381
aoqi@0 382 GraphKit& exits() { return _exits; }
aoqi@0 383 bool wrote_final() const { return _wrote_final; }
aoqi@0 384 void set_wrote_final(bool z) { _wrote_final = z; }
aoqi@0 385 bool wrote_volatile() const { return _wrote_volatile; }
aoqi@0 386 void set_wrote_volatile(bool z) { _wrote_volatile = z; }
aoqi@0 387 bool count_invocations() const { return _count_invocations; }
aoqi@0 388 bool method_data_update() const { return _method_data_update; }
aoqi@0 389 Node* alloc_with_final() const { return _alloc_with_final; }
aoqi@0 390 void set_alloc_with_final(Node* n) {
aoqi@0 391 assert((_alloc_with_final == NULL) || (_alloc_with_final == n), "different init objects?");
aoqi@0 392 _alloc_with_final = n;
aoqi@0 393 }
aoqi@0 394
aoqi@0 395 Block* block() const { return _block; }
aoqi@0 396 ciBytecodeStream& iter() { return _iter; }
aoqi@0 397 Bytecodes::Code bc() const { return _iter.cur_bc(); }
aoqi@0 398
aoqi@0 399 void set_block(Block* b) { _block = b; }
aoqi@0 400
aoqi@0 401 // Derived accessors:
aoqi@0 402 bool is_normal_parse() const { return _entry_bci == InvocationEntryBci; }
aoqi@0 403 bool is_osr_parse() const { return _entry_bci != InvocationEntryBci; }
aoqi@0 404 int osr_bci() const { assert(is_osr_parse(),""); return _entry_bci; }
aoqi@0 405
aoqi@0 406 void set_parse_bci(int bci);
aoqi@0 407
aoqi@0 408 // Must this parse be aborted?
aoqi@0 409 bool failing() { return C->failing(); }
aoqi@0 410
aoqi@0 411 Block* rpo_at(int rpo) {
aoqi@0 412 assert(0 <= rpo && rpo < _block_count, "oob");
aoqi@0 413 return &_blocks[rpo];
aoqi@0 414 }
aoqi@0 415 Block* start_block() {
aoqi@0 416 return rpo_at(flow()->start_block()->rpo());
aoqi@0 417 }
aoqi@0 418 // Can return NULL if the flow pass did not complete a block.
aoqi@0 419 Block* successor_for_bci(int bci) {
aoqi@0 420 return block()->successor_for_bci(bci);
aoqi@0 421 }
aoqi@0 422
aoqi@0 423 private:
aoqi@0 424 // Create a JVMS & map for the initial state of this method.
aoqi@0 425 SafePointNode* create_entry_map();
aoqi@0 426
aoqi@0 427 // OSR helpers
aoqi@0 428 Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base);
aoqi@0 429 Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit);
aoqi@0 430 void load_interpreter_state(Node* osr_buf);
aoqi@0 431
aoqi@0 432 // Functions for managing basic blocks:
aoqi@0 433 void init_blocks();
aoqi@0 434 void load_state_from(Block* b);
aoqi@0 435 void store_state_to(Block* b) { b->record_state(this); }
aoqi@0 436
aoqi@0 437 // Parse all the basic blocks.
aoqi@0 438 void do_all_blocks();
aoqi@0 439
aoqi@0 440 // Parse the current basic block
aoqi@0 441 void do_one_block();
aoqi@0 442
aoqi@0 443 // Raise an error if we get a bad ciTypeFlow CFG.
aoqi@0 444 void handle_missing_successor(int bci);
aoqi@0 445
aoqi@0 446 // first actions (before BCI 0)
aoqi@0 447 void do_method_entry();
aoqi@0 448
aoqi@0 449 // implementation of monitorenter/monitorexit
aoqi@0 450 void do_monitor_enter();
aoqi@0 451 void do_monitor_exit();
aoqi@0 452
aoqi@0 453 // Eagerly create phie throughout the state, to cope with back edges.
aoqi@0 454 void ensure_phis_everywhere();
aoqi@0 455
aoqi@0 456 // Merge the current mapping into the basic block starting at bci
aoqi@0 457 void merge( int target_bci);
aoqi@0 458 // Same as plain merge, except that it allocates a new path number.
aoqi@0 459 void merge_new_path( int target_bci);
aoqi@0 460 // Merge the current mapping into an exception handler.
aoqi@0 461 void merge_exception(int target_bci);
aoqi@0 462 // Helper: Merge the current mapping into the given basic block
aoqi@0 463 void merge_common(Block* target, int pnum);
aoqi@0 464 // Helper functions for merging individual cells.
aoqi@0 465 PhiNode *ensure_phi( int idx, bool nocreate = false);
aoqi@0 466 PhiNode *ensure_memory_phi(int idx, bool nocreate = false);
aoqi@0 467 // Helper to merge the current memory state into the given basic block
aoqi@0 468 void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi);
aoqi@0 469
aoqi@0 470 // Parse this bytecode, and alter the Parsers JVM->Node mapping
aoqi@0 471 void do_one_bytecode();
aoqi@0 472
aoqi@0 473 // helper function to generate array store check
aoqi@0 474 void array_store_check();
aoqi@0 475 // Helper function to generate array load
aoqi@0 476 void array_load(BasicType etype);
aoqi@0 477 // Helper function to generate array store
aoqi@0 478 void array_store(BasicType etype);
aoqi@0 479 // Helper function to compute array addressing
aoqi@0 480 Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL);
aoqi@0 481
aoqi@0 482 void rtm_deopt();
aoqi@0 483
aoqi@0 484 // Pass current map to exits
aoqi@0 485 void return_current(Node* value);
aoqi@0 486
aoqi@0 487 // Register finalizers on return from Object.<init>
aoqi@0 488 void call_register_finalizer();
aoqi@0 489
aoqi@0 490 // Insert a compiler safepoint into the graph
aoqi@0 491 void add_safepoint();
aoqi@0 492
aoqi@0 493 // Insert a compiler safepoint into the graph, if there is a back-branch.
aoqi@0 494 void maybe_add_safepoint(int target_bci) {
aoqi@0 495 if (UseLoopSafepoints && target_bci <= bci()) {
aoqi@0 496 add_safepoint();
aoqi@0 497 }
aoqi@0 498 }
aoqi@0 499
aoqi@0 500 // Note: Intrinsic generation routines may be found in library_call.cpp.
aoqi@0 501
aoqi@0 502 // Helper function to setup Ideal Call nodes
aoqi@0 503 void do_call();
aoqi@0 504
aoqi@0 505 // Helper function to uncommon-trap or bailout for non-compilable call-sites
aoqi@0 506 bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass);
aoqi@0 507
aoqi@0 508 // Helper function to setup for type-profile based inlining
aoqi@0 509 bool prepare_type_profile_inline(ciInstanceKlass* prof_klass, ciMethod* prof_method);
aoqi@0 510
aoqi@0 511 // Helper functions for type checking bytecodes:
aoqi@0 512 void do_checkcast();
aoqi@0 513 void do_instanceof();
aoqi@0 514
aoqi@0 515 // Helper functions for shifting & arithmetic
aoqi@0 516 void modf();
aoqi@0 517 void modd();
aoqi@0 518 void l2f();
aoqi@0 519
aoqi@0 520 void do_irem();
aoqi@0 521
aoqi@0 522 // implementation of _get* and _put* bytecodes
aoqi@0 523 void do_getstatic() { do_field_access(true, false); }
aoqi@0 524 void do_getfield () { do_field_access(true, true); }
aoqi@0 525 void do_putstatic() { do_field_access(false, false); }
aoqi@0 526 void do_putfield () { do_field_access(false, true); }
aoqi@0 527
aoqi@0 528 // common code for making initial checks and forming addresses
aoqi@0 529 void do_field_access(bool is_get, bool is_field);
aoqi@0 530 bool static_field_ok_in_clinit(ciField *field, ciMethod *method);
aoqi@0 531
aoqi@0 532 // common code for actually performing the load or store
aoqi@0 533 void do_get_xxx(Node* obj, ciField* field, bool is_field);
aoqi@0 534 void do_put_xxx(Node* obj, ciField* field, bool is_field);
aoqi@0 535
aoqi@0 536 // loading from a constant field or the constant pool
aoqi@0 537 // returns false if push failed (non-perm field constants only, not ldcs)
aoqi@0 538 bool push_constant(ciConstant con, bool require_constant = false, bool is_autobox_cache = false, const Type* basic_type = NULL);
aoqi@0 539
aoqi@0 540 // implementation of object creation bytecodes
aoqi@0 541 void emit_guard_for_new(ciInstanceKlass* klass);
aoqi@0 542 void do_new();
aoqi@0 543 void do_newarray(BasicType elemtype);
aoqi@0 544 void do_anewarray();
aoqi@0 545 void do_multianewarray();
aoqi@0 546 Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs);
aoqi@0 547
aoqi@0 548 // implementation of jsr/ret
aoqi@0 549 void do_jsr();
aoqi@0 550 void do_ret();
aoqi@0 551
vlivanov@7789 552 float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
vlivanov@7789 553 float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
rbackman@7154 554 bool seems_never_taken(float prob) const;
rbackman@7154 555 bool path_is_suitable_for_uncommon_trap(float prob) const;
rbackman@7154 556 bool seems_stable_comparison() const;
aoqi@0 557
aoqi@0 558 void do_ifnull(BoolTest::mask btest, Node* c);
aoqi@0 559 void do_if(BoolTest::mask btest, Node* c);
aoqi@0 560 int repush_if_args();
aoqi@0 561 void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob,
aoqi@0 562 Block* path, Block* other_path);
aoqi@0 563 void sharpen_type_after_if(BoolTest::mask btest,
aoqi@0 564 Node* con, const Type* tcon,
aoqi@0 565 Node* val, const Type* tval);
aoqi@0 566 IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask);
aoqi@0 567 Node* jump_if_join(Node* iffalse, Node* iftrue);
aoqi@0 568 void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index);
aoqi@0 569 void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index);
aoqi@0 570 void jump_if_always_fork(int dest_bci_if_true, int prof_table_index);
aoqi@0 571
aoqi@0 572 friend class SwitchRange;
aoqi@0 573 void do_tableswitch();
aoqi@0 574 void do_lookupswitch();
aoqi@0 575 void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0);
aoqi@0 576 bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi);
aoqi@0 577
aoqi@0 578 // helper functions for methodData style profiling
aoqi@0 579 void test_counter_against_threshold(Node* cnt, int limit);
aoqi@0 580 void increment_and_test_invocation_counter(int limit);
aoqi@0 581 void test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, int limit);
aoqi@0 582 Node* method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
aoqi@0 583 void increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0);
aoqi@0 584 void set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant);
aoqi@0 585
aoqi@0 586 void profile_method_entry();
aoqi@0 587 void profile_taken_branch(int target_bci, bool force_update = false);
aoqi@0 588 void profile_not_taken_branch(bool force_update = false);
aoqi@0 589 void profile_call(Node* receiver);
aoqi@0 590 void profile_generic_call();
aoqi@0 591 void profile_receiver_type(Node* receiver);
aoqi@0 592 void profile_ret(int target_bci);
aoqi@0 593 void profile_null_checkcast();
aoqi@0 594 void profile_switch_case(int table_index);
aoqi@0 595
aoqi@0 596 // helper function for call statistics
aoqi@0 597 void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN;
aoqi@0 598
aoqi@0 599 Node_Notes* make_node_notes(Node_Notes* caller_nn);
aoqi@0 600
aoqi@0 601 // Helper functions for handling normal and abnormal exits.
aoqi@0 602 void build_exits();
aoqi@0 603
aoqi@0 604 // Fix up all exceptional control flow exiting a single bytecode.
aoqi@0 605 void do_exceptions();
aoqi@0 606
aoqi@0 607 // Fix up all exiting control flow at the end of the parse.
aoqi@0 608 void do_exits();
aoqi@0 609
aoqi@0 610 // Add Catch/CatchProjs
aoqi@0 611 // The call is either a Java call or the VM's rethrow stub
aoqi@0 612 void catch_call_exceptions(ciExceptionHandlerStream&);
aoqi@0 613
aoqi@0 614 // Handle all exceptions thrown by the inlined method.
aoqi@0 615 // Also handles exceptions for individual bytecodes.
aoqi@0 616 void catch_inline_exceptions(SafePointNode* ex_map);
aoqi@0 617
aoqi@0 618 // Merge the given map into correct exceptional exit state.
aoqi@0 619 // Assumes that there is no applicable local handler.
aoqi@0 620 void throw_to_exit(SafePointNode* ex_map);
aoqi@0 621
aoqi@0 622 // Use speculative type to optimize CmpP node
aoqi@0 623 Node* optimize_cmp_with_klass(Node* c);
aoqi@0 624
aoqi@0 625 public:
aoqi@0 626 #ifndef PRODUCT
aoqi@0 627 // Handle PrintOpto, etc.
aoqi@0 628 void show_parse_info();
aoqi@0 629 void dump_map_adr_mem() const;
aoqi@0 630 static void print_statistics(); // Print some performance counters
aoqi@0 631 void dump();
aoqi@0 632 void dump_bci(int bci);
aoqi@0 633 #endif
aoqi@0 634 };
aoqi@0 635
aoqi@0 636 #endif // SHARE_VM_OPTO_PARSE_HPP

mercurial