Mon, 26 Sep 2011 10:24:05 -0700
7081933: Use zeroing elimination optimization for large array
Summary: Don't zero new typeArray during runtime call if the allocation is followed by arraycopy into it.
Reviewed-by: twisti
duke@435 | 1 | /* |
never@2658 | 2 | * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved. |
duke@435 | 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
duke@435 | 4 | * |
duke@435 | 5 | * This code is free software; you can redistribute it and/or modify it |
duke@435 | 6 | * under the terms of the GNU General Public License version 2 only, as |
duke@435 | 7 | * published by the Free Software Foundation. |
duke@435 | 8 | * |
duke@435 | 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
duke@435 | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
duke@435 | 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
duke@435 | 12 | * version 2 for more details (a copy is included in the LICENSE file that |
duke@435 | 13 | * accompanied this code). |
duke@435 | 14 | * |
duke@435 | 15 | * You should have received a copy of the GNU General Public License version |
duke@435 | 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
duke@435 | 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
duke@435 | 18 | * |
trims@1907 | 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
trims@1907 | 20 | * or visit www.oracle.com if you need additional information or have any |
trims@1907 | 21 | * questions. |
duke@435 | 22 | * |
duke@435 | 23 | */ |
duke@435 | 24 | |
stefank@2314 | 25 | #ifndef SHARE_VM_OPTO_PARSE_HPP |
stefank@2314 | 26 | #define SHARE_VM_OPTO_PARSE_HPP |
stefank@2314 | 27 | |
stefank@2314 | 28 | #include "ci/ciMethodData.hpp" |
stefank@2314 | 29 | #include "ci/ciTypeFlow.hpp" |
stefank@2314 | 30 | #include "compiler/methodLiveness.hpp" |
stefank@2314 | 31 | #include "libadt/vectset.hpp" |
stefank@2314 | 32 | #include "oops/generateOopMap.hpp" |
stefank@2314 | 33 | #include "opto/graphKit.hpp" |
stefank@2314 | 34 | #include "opto/subnode.hpp" |
stefank@2314 | 35 | |
duke@435 | 36 | class BytecodeParseHistogram; |
duke@435 | 37 | class InlineTree; |
duke@435 | 38 | class Parse; |
duke@435 | 39 | class SwitchRange; |
duke@435 | 40 | |
duke@435 | 41 | |
duke@435 | 42 | //------------------------------InlineTree------------------------------------- |
duke@435 | 43 | class InlineTree : public ResourceObj { |
never@3138 | 44 | friend class VMStructs; |
never@3138 | 45 | |
duke@435 | 46 | Compile* C; // cache |
duke@435 | 47 | JVMState* _caller_jvms; // state of caller |
duke@435 | 48 | ciMethod* _method; // method being called by the caller_jvms |
duke@435 | 49 | InlineTree* _caller_tree; |
duke@435 | 50 | uint _count_inline_bcs; // Accumulated count of inlined bytecodes |
duke@435 | 51 | // Call-site count / interpreter invocation count, scaled recursively. |
duke@435 | 52 | // Always between 0.0 and 1.0. Represents the percentage of the method's |
duke@435 | 53 | // total execution time used at this call site. |
duke@435 | 54 | const float _site_invoke_ratio; |
never@2981 | 55 | const int _max_inline_level; // the maximum inline level for this sub-tree (may be adjusted) |
duke@435 | 56 | float compute_callee_frequency( int caller_bci ) const; |
duke@435 | 57 | |
duke@435 | 58 | GrowableArray<InlineTree*> _subtrees; |
never@3138 | 59 | |
never@3138 | 60 | void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN; |
duke@435 | 61 | |
duke@435 | 62 | protected: |
duke@435 | 63 | InlineTree(Compile* C, |
duke@435 | 64 | const InlineTree* caller_tree, |
duke@435 | 65 | ciMethod* callee_method, |
duke@435 | 66 | JVMState* caller_jvms, |
duke@435 | 67 | int caller_bci, |
jrose@1592 | 68 | float site_invoke_ratio, |
never@2981 | 69 | int max_inline_level); |
duke@435 | 70 | InlineTree *build_inline_tree_for_callee(ciMethod* callee_method, |
duke@435 | 71 | JVMState* caller_jvms, |
duke@435 | 72 | int caller_bci); |
kvn@476 | 73 | const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result); |
twisti@2898 | 74 | const char* should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const; |
twisti@2898 | 75 | const char* should_not_inline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const; |
kvn@2877 | 76 | void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const; |
duke@435 | 77 | |
duke@435 | 78 | InlineTree *caller_tree() const { return _caller_tree; } |
duke@435 | 79 | InlineTree* callee_at(int bci, ciMethod* m) const; |
never@2981 | 80 | int inline_level() const { return stack_depth(); } |
jrose@1592 | 81 | int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; } |
duke@435 | 82 | |
duke@435 | 83 | public: |
twisti@3100 | 84 | static const char* check_can_parse(ciMethod* callee); |
twisti@3100 | 85 | |
duke@435 | 86 | static InlineTree* build_inline_tree_root(); |
duke@435 | 87 | static InlineTree* find_subtree_from_root(InlineTree* root, JVMState* jvms, ciMethod* callee, bool create_if_not_found = false); |
duke@435 | 88 | |
duke@435 | 89 | // For temporary (stack-allocated, stateless) ilts: |
never@2981 | 90 | InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvms, float site_invoke_ratio, int max_inline_level); |
duke@435 | 91 | |
duke@435 | 92 | // InlineTree enum |
duke@435 | 93 | enum InlineStyle { |
duke@435 | 94 | Inline_do_not_inline = 0, // |
duke@435 | 95 | Inline_cha_is_monomorphic = 1, // |
duke@435 | 96 | Inline_type_profile_monomorphic = 2 // |
duke@435 | 97 | }; |
duke@435 | 98 | |
duke@435 | 99 | // See if it is OK to inline. |
twisti@1040 | 100 | // The receiver is the inline tree for the caller. |
duke@435 | 101 | // |
duke@435 | 102 | // The result is a temperature indication. If it is hot or cold, |
duke@435 | 103 | // inlining is immediate or undesirable. Otherwise, the info block |
duke@435 | 104 | // returned is newly allocated and may be enqueued. |
duke@435 | 105 | // |
duke@435 | 106 | // If the method is inlinable, a new inline subtree is created on the fly, |
duke@435 | 107 | // and may be accessed by find_subtree_from_root. |
duke@435 | 108 | // The call_method is the dest_method for a special or static invocation. |
duke@435 | 109 | // The call_method is an optimized virtual method candidate otherwise. |
duke@435 | 110 | WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci); |
duke@435 | 111 | |
duke@435 | 112 | // Information about inlined method |
duke@435 | 113 | JVMState* caller_jvms() const { return _caller_jvms; } |
duke@435 | 114 | ciMethod *method() const { return _method; } |
duke@435 | 115 | int caller_bci() const { return _caller_jvms ? _caller_jvms->bci() : InvocationEntryBci; } |
duke@435 | 116 | uint count_inline_bcs() const { return _count_inline_bcs; } |
duke@435 | 117 | float site_invoke_ratio() const { return _site_invoke_ratio; }; |
duke@435 | 118 | |
duke@435 | 119 | #ifndef PRODUCT |
duke@435 | 120 | private: |
duke@435 | 121 | uint _count_inlines; // Count of inlined methods |
duke@435 | 122 | public: |
duke@435 | 123 | // Debug information collected during parse |
duke@435 | 124 | uint count_inlines() const { return _count_inlines; }; |
duke@435 | 125 | #endif |
duke@435 | 126 | GrowableArray<InlineTree*> subtrees() { return _subtrees; } |
never@3138 | 127 | |
never@3138 | 128 | void print_value_on(outputStream* st) const PRODUCT_RETURN; |
duke@435 | 129 | }; |
duke@435 | 130 | |
duke@435 | 131 | |
duke@435 | 132 | //----------------------------------------------------------------------------- |
duke@435 | 133 | //------------------------------Parse------------------------------------------ |
duke@435 | 134 | // Parse bytecodes, build a Graph |
duke@435 | 135 | class Parse : public GraphKit { |
duke@435 | 136 | public: |
duke@435 | 137 | // Per-block information needed by the parser: |
duke@435 | 138 | class Block { |
duke@435 | 139 | private: |
duke@435 | 140 | ciTypeFlow::Block* _flow; |
duke@435 | 141 | int _pred_count; // how many predecessors in CFG? |
duke@435 | 142 | int _preds_parsed; // how many of these have been parsed? |
duke@435 | 143 | uint _count; // how many times executed? Currently only set by _goto's |
duke@435 | 144 | bool _is_parsed; // has this block been parsed yet? |
duke@435 | 145 | bool _is_handler; // is this block an exception handler? |
kvn@2665 | 146 | bool _has_merged_backedge; // does this block have merged backedge? |
duke@435 | 147 | SafePointNode* _start_map; // all values flowing into this block |
duke@435 | 148 | MethodLivenessResult _live_locals; // lazily initialized liveness bitmap |
duke@435 | 149 | |
duke@435 | 150 | int _num_successors; // Includes only normal control flow. |
duke@435 | 151 | int _all_successors; // Include exception paths also. |
duke@435 | 152 | Block** _successors; |
duke@435 | 153 | |
duke@435 | 154 | // Use init_node/init_graph to initialize Blocks. |
duke@435 | 155 | // Block() : _live_locals((uintptr_t*)NULL,0) { ShouldNotReachHere(); } |
duke@435 | 156 | Block() : _live_locals(NULL,0) { ShouldNotReachHere(); } |
duke@435 | 157 | |
duke@435 | 158 | public: |
duke@435 | 159 | |
duke@435 | 160 | // Set up the block data structure itself. |
duke@435 | 161 | void init_node(Parse* outer, int po); |
duke@435 | 162 | // Set up the block's relations to other blocks. |
duke@435 | 163 | void init_graph(Parse* outer); |
duke@435 | 164 | |
duke@435 | 165 | ciTypeFlow::Block* flow() const { return _flow; } |
duke@435 | 166 | int pred_count() const { return _pred_count; } |
duke@435 | 167 | int preds_parsed() const { return _preds_parsed; } |
duke@435 | 168 | bool is_parsed() const { return _is_parsed; } |
duke@435 | 169 | bool is_handler() const { return _is_handler; } |
duke@435 | 170 | void set_count( uint x ) { _count = x; } |
duke@435 | 171 | uint count() const { return _count; } |
duke@435 | 172 | |
duke@435 | 173 | SafePointNode* start_map() const { assert(is_merged(),""); return _start_map; } |
duke@435 | 174 | void set_start_map(SafePointNode* m) { assert(!is_merged(), ""); _start_map = m; } |
duke@435 | 175 | |
duke@435 | 176 | // True after any predecessor flows control into this block |
duke@435 | 177 | bool is_merged() const { return _start_map != NULL; } |
duke@435 | 178 | |
kvn@2665 | 179 | #ifdef ASSERT |
kvn@2665 | 180 | // True after backedge predecessor flows control into this block |
kvn@2665 | 181 | bool has_merged_backedge() const { return _has_merged_backedge; } |
kvn@2665 | 182 | void mark_merged_backedge(Block* pred) { |
kvn@2665 | 183 | assert(is_SEL_head(), "should be loop head"); |
kvn@2665 | 184 | if (pred != NULL && is_SEL_backedge(pred)) { |
kvn@2665 | 185 | assert(is_parsed(), "block should be parsed before merging backedges"); |
kvn@2665 | 186 | _has_merged_backedge = true; |
kvn@2665 | 187 | } |
kvn@2665 | 188 | } |
kvn@2665 | 189 | #endif |
kvn@2665 | 190 | |
duke@435 | 191 | // True when all non-exception predecessors have been parsed. |
duke@435 | 192 | bool is_ready() const { return preds_parsed() == pred_count(); } |
duke@435 | 193 | |
duke@435 | 194 | int num_successors() const { return _num_successors; } |
duke@435 | 195 | int all_successors() const { return _all_successors; } |
duke@435 | 196 | Block* successor_at(int i) const { |
duke@435 | 197 | assert((uint)i < (uint)all_successors(), ""); |
duke@435 | 198 | return _successors[i]; |
duke@435 | 199 | } |
duke@435 | 200 | Block* successor_for_bci(int bci); |
duke@435 | 201 | |
duke@435 | 202 | int start() const { return flow()->start(); } |
duke@435 | 203 | int limit() const { return flow()->limit(); } |
never@802 | 204 | int rpo() const { return flow()->rpo(); } |
duke@435 | 205 | int start_sp() const { return flow()->stack_size(); } |
duke@435 | 206 | |
never@802 | 207 | bool is_loop_head() const { return flow()->is_loop_head(); } |
never@802 | 208 | bool is_SEL_head() const { return flow()->is_single_entry_loop_head(); } |
never@802 | 209 | bool is_SEL_backedge(Block* pred) const{ return is_SEL_head() && pred->rpo() >= rpo(); } |
never@802 | 210 | bool is_invariant_local(uint i) const { |
never@802 | 211 | const JVMState* jvms = start_map()->jvms(); |
kvn@870 | 212 | if (!jvms->is_loc(i) || flow()->outer()->has_irreducible_entry()) return false; |
never@802 | 213 | return flow()->is_invariant_local(i - jvms->locoff()); |
never@802 | 214 | } |
never@802 | 215 | bool can_elide_SEL_phi(uint i) const { assert(is_SEL_head(),""); return is_invariant_local(i); } |
never@802 | 216 | |
duke@435 | 217 | const Type* peek(int off=0) const { return stack_type_at(start_sp() - (off+1)); } |
duke@435 | 218 | |
duke@435 | 219 | const Type* stack_type_at(int i) const; |
duke@435 | 220 | const Type* local_type_at(int i) const; |
duke@435 | 221 | static const Type* get_type(ciType* t) { return Type::get_typeflow_type(t); } |
duke@435 | 222 | |
duke@435 | 223 | bool has_trap_at(int bci) const { return flow()->has_trap() && flow()->trap_bci() == bci; } |
duke@435 | 224 | |
duke@435 | 225 | // Call this just before parsing a block. |
duke@435 | 226 | void mark_parsed() { |
duke@435 | 227 | assert(!_is_parsed, "must parse each block exactly once"); |
duke@435 | 228 | _is_parsed = true; |
duke@435 | 229 | } |
duke@435 | 230 | |
duke@435 | 231 | // Return the phi/region input index for the "current" pred, |
duke@435 | 232 | // and bump the pred number. For historical reasons these index |
duke@435 | 233 | // numbers are handed out in descending order. The last index is |
duke@435 | 234 | // always PhiNode::Input (i.e., 1). The value returned is known |
duke@435 | 235 | // as a "path number" because it distinguishes by which path we are |
duke@435 | 236 | // entering the block. |
duke@435 | 237 | int next_path_num() { |
duke@435 | 238 | assert(preds_parsed() < pred_count(), "too many preds?"); |
duke@435 | 239 | return pred_count() - _preds_parsed++; |
duke@435 | 240 | } |
duke@435 | 241 | |
duke@435 | 242 | // Add a previously unaccounted predecessor to this block. |
duke@435 | 243 | // This operates by increasing the size of the block's region |
duke@435 | 244 | // and all its phi nodes (if any). The value returned is a |
duke@435 | 245 | // path number ("pnum"). |
duke@435 | 246 | int add_new_path(); |
duke@435 | 247 | |
duke@435 | 248 | // Initialize me by recording the parser's map. My own map must be NULL. |
duke@435 | 249 | void record_state(Parse* outer); |
duke@435 | 250 | }; |
duke@435 | 251 | |
duke@435 | 252 | #ifndef PRODUCT |
duke@435 | 253 | // BytecodeParseHistogram collects number of bytecodes parsed, nodes constructed, and transformations. |
duke@435 | 254 | class BytecodeParseHistogram : public ResourceObj { |
duke@435 | 255 | private: |
duke@435 | 256 | enum BPHType { |
duke@435 | 257 | BPH_transforms, |
duke@435 | 258 | BPH_values |
duke@435 | 259 | }; |
duke@435 | 260 | static bool _initialized; |
duke@435 | 261 | static uint _bytecodes_parsed [Bytecodes::number_of_codes]; |
duke@435 | 262 | static uint _nodes_constructed[Bytecodes::number_of_codes]; |
duke@435 | 263 | static uint _nodes_transformed[Bytecodes::number_of_codes]; |
duke@435 | 264 | static uint _new_values [Bytecodes::number_of_codes]; |
duke@435 | 265 | |
duke@435 | 266 | Bytecodes::Code _initial_bytecode; |
duke@435 | 267 | int _initial_node_count; |
duke@435 | 268 | int _initial_transforms; |
duke@435 | 269 | int _initial_values; |
duke@435 | 270 | |
duke@435 | 271 | Parse *_parser; |
duke@435 | 272 | Compile *_compiler; |
duke@435 | 273 | |
duke@435 | 274 | // Initialization |
duke@435 | 275 | static void reset(); |
duke@435 | 276 | |
duke@435 | 277 | // Return info being collected, select with global flag 'BytecodeParseInfo' |
duke@435 | 278 | int current_count(BPHType info_selector); |
duke@435 | 279 | |
duke@435 | 280 | public: |
duke@435 | 281 | BytecodeParseHistogram(Parse *p, Compile *c); |
duke@435 | 282 | static bool initialized(); |
duke@435 | 283 | |
duke@435 | 284 | // Record info when starting to parse one bytecode |
duke@435 | 285 | void set_initial_state( Bytecodes::Code bc ); |
duke@435 | 286 | // Record results of parsing one bytecode |
duke@435 | 287 | void record_change(); |
duke@435 | 288 | |
duke@435 | 289 | // Profile printing |
duke@435 | 290 | static void print(float cutoff = 0.01F); // cutoff in percent |
duke@435 | 291 | }; |
duke@435 | 292 | |
duke@435 | 293 | public: |
duke@435 | 294 | // Record work done during parsing |
duke@435 | 295 | BytecodeParseHistogram* _parse_histogram; |
duke@435 | 296 | void set_parse_histogram(BytecodeParseHistogram *bph) { _parse_histogram = bph; } |
duke@435 | 297 | BytecodeParseHistogram* parse_histogram() { return _parse_histogram; } |
duke@435 | 298 | #endif |
duke@435 | 299 | |
duke@435 | 300 | private: |
duke@435 | 301 | friend class Block; |
duke@435 | 302 | |
duke@435 | 303 | // Variables which characterize this compilation as a whole: |
duke@435 | 304 | |
duke@435 | 305 | JVMState* _caller; // JVMS which carries incoming args & state. |
duke@435 | 306 | float _expected_uses; // expected number of calls to this code |
duke@435 | 307 | float _prof_factor; // discount applied to my profile counts |
duke@435 | 308 | int _depth; // Inline tree depth, for debug printouts |
duke@435 | 309 | const TypeFunc*_tf; // My kind of function type |
duke@435 | 310 | int _entry_bci; // the osr bci or InvocationEntryBci |
duke@435 | 311 | |
duke@435 | 312 | ciTypeFlow* _flow; // Results of previous flow pass. |
duke@435 | 313 | Block* _blocks; // Array of basic-block structs. |
duke@435 | 314 | int _block_count; // Number of elements in _blocks. |
duke@435 | 315 | |
duke@435 | 316 | GraphKit _exits; // Record all normal returns and throws here. |
duke@435 | 317 | bool _wrote_final; // Did we write a final field? |
duke@435 | 318 | bool _count_invocations; // update and test invocation counter |
duke@435 | 319 | bool _method_data_update; // update method data oop |
duke@435 | 320 | |
duke@435 | 321 | // Variables which track Java semantics during bytecode parsing: |
duke@435 | 322 | |
duke@435 | 323 | Block* _block; // block currently getting parsed |
duke@435 | 324 | ciBytecodeStream _iter; // stream of this method's bytecodes |
duke@435 | 325 | |
duke@435 | 326 | int _blocks_merged; // Progress meter: state merges from BB preds |
duke@435 | 327 | int _blocks_parsed; // Progress meter: BBs actually parsed |
duke@435 | 328 | |
duke@435 | 329 | const FastLockNode* _synch_lock; // FastLockNode for synchronized method |
duke@435 | 330 | |
duke@435 | 331 | #ifndef PRODUCT |
duke@435 | 332 | int _max_switch_depth; // Debugging SwitchRanges. |
duke@435 | 333 | int _est_switch_depth; // Debugging SwitchRanges. |
duke@435 | 334 | #endif |
duke@435 | 335 | |
duke@435 | 336 | public: |
duke@435 | 337 | // Constructor |
duke@435 | 338 | Parse(JVMState* caller, ciMethod* parse_method, float expected_uses); |
duke@435 | 339 | |
duke@435 | 340 | virtual Parse* is_Parse() const { return (Parse*)this; } |
duke@435 | 341 | |
duke@435 | 342 | public: |
duke@435 | 343 | // Accessors. |
duke@435 | 344 | JVMState* caller() const { return _caller; } |
duke@435 | 345 | float expected_uses() const { return _expected_uses; } |
duke@435 | 346 | float prof_factor() const { return _prof_factor; } |
duke@435 | 347 | int depth() const { return _depth; } |
duke@435 | 348 | const TypeFunc* tf() const { return _tf; } |
duke@435 | 349 | // entry_bci() -- see osr_bci, etc. |
duke@435 | 350 | |
duke@435 | 351 | ciTypeFlow* flow() const { return _flow; } |
never@802 | 352 | // blocks() -- see rpo_at, start_block, etc. |
duke@435 | 353 | int block_count() const { return _block_count; } |
duke@435 | 354 | |
duke@435 | 355 | GraphKit& exits() { return _exits; } |
duke@435 | 356 | bool wrote_final() const { return _wrote_final; } |
duke@435 | 357 | void set_wrote_final(bool z) { _wrote_final = z; } |
duke@435 | 358 | bool count_invocations() const { return _count_invocations; } |
duke@435 | 359 | bool method_data_update() const { return _method_data_update; } |
duke@435 | 360 | |
duke@435 | 361 | Block* block() const { return _block; } |
duke@435 | 362 | ciBytecodeStream& iter() { return _iter; } |
duke@435 | 363 | Bytecodes::Code bc() const { return _iter.cur_bc(); } |
duke@435 | 364 | |
duke@435 | 365 | void set_block(Block* b) { _block = b; } |
duke@435 | 366 | |
duke@435 | 367 | // Derived accessors: |
duke@435 | 368 | bool is_normal_parse() const { return _entry_bci == InvocationEntryBci; } |
duke@435 | 369 | bool is_osr_parse() const { return _entry_bci != InvocationEntryBci; } |
duke@435 | 370 | int osr_bci() const { assert(is_osr_parse(),""); return _entry_bci; } |
duke@435 | 371 | |
duke@435 | 372 | void set_parse_bci(int bci); |
duke@435 | 373 | |
duke@435 | 374 | // Must this parse be aborted? |
duke@435 | 375 | bool failing() { return C->failing(); } |
duke@435 | 376 | |
never@802 | 377 | Block* rpo_at(int rpo) { |
never@802 | 378 | assert(0 <= rpo && rpo < _block_count, "oob"); |
never@802 | 379 | return &_blocks[rpo]; |
duke@435 | 380 | } |
duke@435 | 381 | Block* start_block() { |
never@802 | 382 | return rpo_at(flow()->start_block()->rpo()); |
duke@435 | 383 | } |
duke@435 | 384 | // Can return NULL if the flow pass did not complete a block. |
duke@435 | 385 | Block* successor_for_bci(int bci) { |
duke@435 | 386 | return block()->successor_for_bci(bci); |
duke@435 | 387 | } |
duke@435 | 388 | |
duke@435 | 389 | private: |
duke@435 | 390 | // Create a JVMS & map for the initial state of this method. |
duke@435 | 391 | SafePointNode* create_entry_map(); |
duke@435 | 392 | |
duke@435 | 393 | // OSR helpers |
duke@435 | 394 | Node *fetch_interpreter_state(int index, BasicType bt, Node *local_addrs, Node *local_addrs_base); |
duke@435 | 395 | Node* check_interpreter_type(Node* l, const Type* type, SafePointNode* &bad_type_exit); |
duke@435 | 396 | void load_interpreter_state(Node* osr_buf); |
duke@435 | 397 | |
duke@435 | 398 | // Functions for managing basic blocks: |
duke@435 | 399 | void init_blocks(); |
duke@435 | 400 | void load_state_from(Block* b); |
duke@435 | 401 | void store_state_to(Block* b) { b->record_state(this); } |
duke@435 | 402 | |
duke@435 | 403 | // Parse all the basic blocks. |
duke@435 | 404 | void do_all_blocks(); |
duke@435 | 405 | |
duke@435 | 406 | // Parse the current basic block |
duke@435 | 407 | void do_one_block(); |
duke@435 | 408 | |
duke@435 | 409 | // Raise an error if we get a bad ciTypeFlow CFG. |
duke@435 | 410 | void handle_missing_successor(int bci); |
duke@435 | 411 | |
duke@435 | 412 | // first actions (before BCI 0) |
duke@435 | 413 | void do_method_entry(); |
duke@435 | 414 | |
duke@435 | 415 | // implementation of monitorenter/monitorexit |
duke@435 | 416 | void do_monitor_enter(); |
duke@435 | 417 | void do_monitor_exit(); |
duke@435 | 418 | |
duke@435 | 419 | // Eagerly create phie throughout the state, to cope with back edges. |
duke@435 | 420 | void ensure_phis_everywhere(); |
duke@435 | 421 | |
duke@435 | 422 | // Merge the current mapping into the basic block starting at bci |
duke@435 | 423 | void merge( int target_bci); |
duke@435 | 424 | // Same as plain merge, except that it allocates a new path number. |
duke@435 | 425 | void merge_new_path( int target_bci); |
duke@435 | 426 | // Merge the current mapping into an exception handler. |
duke@435 | 427 | void merge_exception(int target_bci); |
duke@435 | 428 | // Helper: Merge the current mapping into the given basic block |
duke@435 | 429 | void merge_common(Block* target, int pnum); |
duke@435 | 430 | // Helper functions for merging individual cells. |
duke@435 | 431 | PhiNode *ensure_phi( int idx, bool nocreate = false); |
duke@435 | 432 | PhiNode *ensure_memory_phi(int idx, bool nocreate = false); |
duke@435 | 433 | // Helper to merge the current memory state into the given basic block |
duke@435 | 434 | void merge_memory_edges(MergeMemNode* n, int pnum, bool nophi); |
duke@435 | 435 | |
duke@435 | 436 | // Parse this bytecode, and alter the Parsers JVM->Node mapping |
duke@435 | 437 | void do_one_bytecode(); |
duke@435 | 438 | |
duke@435 | 439 | // helper function to generate array store check |
duke@435 | 440 | void array_store_check(); |
duke@435 | 441 | // Helper function to generate array load |
duke@435 | 442 | void array_load(BasicType etype); |
duke@435 | 443 | // Helper function to generate array store |
duke@435 | 444 | void array_store(BasicType etype); |
duke@435 | 445 | // Helper function to compute array addressing |
duke@435 | 446 | Node* array_addressing(BasicType type, int vals, const Type* *result2=NULL); |
duke@435 | 447 | |
duke@435 | 448 | // Pass current map to exits |
duke@435 | 449 | void return_current(Node* value); |
duke@435 | 450 | |
duke@435 | 451 | // Register finalizers on return from Object.<init> |
duke@435 | 452 | void call_register_finalizer(); |
duke@435 | 453 | |
duke@435 | 454 | // Insert a compiler safepoint into the graph |
duke@435 | 455 | void add_safepoint(); |
duke@435 | 456 | |
duke@435 | 457 | // Insert a compiler safepoint into the graph, if there is a back-branch. |
duke@435 | 458 | void maybe_add_safepoint(int target_bci) { |
duke@435 | 459 | if (UseLoopSafepoints && target_bci <= bci()) { |
duke@435 | 460 | add_safepoint(); |
duke@435 | 461 | } |
duke@435 | 462 | } |
duke@435 | 463 | |
duke@435 | 464 | // Note: Intrinsic generation routines may be found in library_call.cpp. |
duke@435 | 465 | |
duke@435 | 466 | // Helper function to setup Ideal Call nodes |
duke@435 | 467 | void do_call(); |
duke@435 | 468 | |
duke@435 | 469 | // Helper function to uncommon-trap or bailout for non-compilable call-sites |
duke@435 | 470 | bool can_not_compile_call_site(ciMethod *dest_method, ciInstanceKlass *klass); |
duke@435 | 471 | |
duke@435 | 472 | // Helper function to identify inlining potential at call-site |
duke@435 | 473 | ciMethod* optimize_inlining(ciMethod* caller, int bci, ciInstanceKlass* klass, |
duke@435 | 474 | ciMethod *dest_method, const TypeOopPtr* receiver_type); |
duke@435 | 475 | |
duke@435 | 476 | // Helper function to setup for type-profile based inlining |
duke@435 | 477 | bool prepare_type_profile_inline(ciInstanceKlass* prof_klass, ciMethod* prof_method); |
duke@435 | 478 | |
duke@435 | 479 | // Helper functions for type checking bytecodes: |
duke@435 | 480 | void do_checkcast(); |
duke@435 | 481 | void do_instanceof(); |
duke@435 | 482 | |
duke@435 | 483 | // Helper functions for shifting & arithmetic |
duke@435 | 484 | void modf(); |
duke@435 | 485 | void modd(); |
duke@435 | 486 | void l2f(); |
duke@435 | 487 | |
duke@435 | 488 | void do_irem(); |
duke@435 | 489 | |
duke@435 | 490 | // implementation of _get* and _put* bytecodes |
duke@435 | 491 | void do_getstatic() { do_field_access(true, false); } |
duke@435 | 492 | void do_getfield () { do_field_access(true, true); } |
duke@435 | 493 | void do_putstatic() { do_field_access(false, false); } |
duke@435 | 494 | void do_putfield () { do_field_access(false, true); } |
duke@435 | 495 | |
duke@435 | 496 | // common code for making initial checks and forming addresses |
duke@435 | 497 | void do_field_access(bool is_get, bool is_field); |
duke@435 | 498 | bool static_field_ok_in_clinit(ciField *field, ciMethod *method); |
duke@435 | 499 | |
duke@435 | 500 | // common code for actually performing the load or store |
never@2658 | 501 | void do_get_xxx(Node* obj, ciField* field, bool is_field); |
never@2658 | 502 | void do_put_xxx(Node* obj, ciField* field, bool is_field); |
duke@435 | 503 | |
duke@435 | 504 | // loading from a constant field or the constant pool |
duke@435 | 505 | // returns false if push failed (non-perm field constants only, not ldcs) |
jrose@1424 | 506 | bool push_constant(ciConstant con, bool require_constant = false); |
duke@435 | 507 | |
duke@435 | 508 | // implementation of object creation bytecodes |
never@2000 | 509 | void emit_guard_for_new(ciInstanceKlass* klass); |
duke@435 | 510 | void do_new(); |
duke@435 | 511 | void do_newarray(BasicType elemtype); |
duke@435 | 512 | void do_anewarray(); |
duke@435 | 513 | void do_multianewarray(); |
cfang@1165 | 514 | Node* expand_multianewarray(ciArrayKlass* array_klass, Node* *lengths, int ndimensions, int nargs); |
duke@435 | 515 | |
duke@435 | 516 | // implementation of jsr/ret |
duke@435 | 517 | void do_jsr(); |
duke@435 | 518 | void do_ret(); |
duke@435 | 519 | |
duke@435 | 520 | float dynamic_branch_prediction(float &cnt); |
duke@435 | 521 | float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci); |
duke@435 | 522 | bool seems_never_taken(float prob); |
jrose@2101 | 523 | bool seems_stable_comparison(BoolTest::mask btest, Node* c); |
duke@435 | 524 | |
rasbold@683 | 525 | void do_ifnull(BoolTest::mask btest, Node* c); |
duke@435 | 526 | void do_if(BoolTest::mask btest, Node* c); |
cfang@1607 | 527 | int repush_if_args(); |
duke@435 | 528 | void adjust_map_after_if(BoolTest::mask btest, Node* c, float prob, |
duke@435 | 529 | Block* path, Block* other_path); |
duke@435 | 530 | IfNode* jump_if_fork_int(Node* a, Node* b, BoolTest::mask mask); |
duke@435 | 531 | Node* jump_if_join(Node* iffalse, Node* iftrue); |
duke@435 | 532 | void jump_if_true_fork(IfNode *ifNode, int dest_bci_if_true, int prof_table_index); |
duke@435 | 533 | void jump_if_false_fork(IfNode *ifNode, int dest_bci_if_false, int prof_table_index); |
duke@435 | 534 | void jump_if_always_fork(int dest_bci_if_true, int prof_table_index); |
duke@435 | 535 | |
duke@435 | 536 | friend class SwitchRange; |
duke@435 | 537 | void do_tableswitch(); |
duke@435 | 538 | void do_lookupswitch(); |
duke@435 | 539 | void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0); |
duke@435 | 540 | bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi); |
duke@435 | 541 | |
duke@435 | 542 | // helper functions for methodData style profiling |
duke@435 | 543 | void test_counter_against_threshold(Node* cnt, int limit); |
duke@435 | 544 | void increment_and_test_invocation_counter(int limit); |
duke@435 | 545 | void test_for_osr_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, int limit); |
duke@435 | 546 | Node* method_data_addressing(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0); |
duke@435 | 547 | void increment_md_counter_at(ciMethodData* md, ciProfileData* data, ByteSize offset, Node* idx = NULL, uint stride = 0); |
duke@435 | 548 | void set_md_flag_at(ciMethodData* md, ciProfileData* data, int flag_constant); |
duke@435 | 549 | |
duke@435 | 550 | void profile_method_entry(); |
duke@435 | 551 | void profile_taken_branch(int target_bci, bool force_update = false); |
duke@435 | 552 | void profile_not_taken_branch(bool force_update = false); |
duke@435 | 553 | void profile_call(Node* receiver); |
duke@435 | 554 | void profile_generic_call(); |
duke@435 | 555 | void profile_receiver_type(Node* receiver); |
duke@435 | 556 | void profile_ret(int target_bci); |
duke@435 | 557 | void profile_null_checkcast(); |
duke@435 | 558 | void profile_switch_case(int table_index); |
duke@435 | 559 | |
duke@435 | 560 | // helper function for call statistics |
duke@435 | 561 | void count_compiled_calls(bool at_method_entry, bool is_inline) PRODUCT_RETURN; |
duke@435 | 562 | |
duke@435 | 563 | Node_Notes* make_node_notes(Node_Notes* caller_nn); |
duke@435 | 564 | |
duke@435 | 565 | // Helper functions for handling normal and abnormal exits. |
duke@435 | 566 | void build_exits(); |
duke@435 | 567 | |
duke@435 | 568 | // Fix up all exceptional control flow exiting a single bytecode. |
duke@435 | 569 | void do_exceptions(); |
duke@435 | 570 | |
duke@435 | 571 | // Fix up all exiting control flow at the end of the parse. |
duke@435 | 572 | void do_exits(); |
duke@435 | 573 | |
duke@435 | 574 | // Add Catch/CatchProjs |
duke@435 | 575 | // The call is either a Java call or the VM's rethrow stub |
duke@435 | 576 | void catch_call_exceptions(ciExceptionHandlerStream&); |
duke@435 | 577 | |
duke@435 | 578 | // Handle all exceptions thrown by the inlined method. |
duke@435 | 579 | // Also handles exceptions for individual bytecodes. |
duke@435 | 580 | void catch_inline_exceptions(SafePointNode* ex_map); |
duke@435 | 581 | |
duke@435 | 582 | // Merge the given map into correct exceptional exit state. |
duke@435 | 583 | // Assumes that there is no applicable local handler. |
duke@435 | 584 | void throw_to_exit(SafePointNode* ex_map); |
duke@435 | 585 | |
duke@435 | 586 | public: |
duke@435 | 587 | #ifndef PRODUCT |
duke@435 | 588 | // Handle PrintOpto, etc. |
duke@435 | 589 | void show_parse_info(); |
duke@435 | 590 | void dump_map_adr_mem() const; |
duke@435 | 591 | static void print_statistics(); // Print some performance counters |
duke@435 | 592 | void dump(); |
duke@435 | 593 | void dump_bci(int bci); |
duke@435 | 594 | #endif |
duke@435 | 595 | }; |
stefank@2314 | 596 | |
stefank@2314 | 597 | #endif // SHARE_VM_OPTO_PARSE_HPP |