src/share/vm/c1/c1_GraphBuilder.hpp

Thu, 24 May 2018 18:41:44 +0800

author
aoqi
date
Thu, 24 May 2018 18:41:44 +0800
changeset 8856
ac27a9c85bea
parent 6876
710a3c8b516e
permissions
-rw-r--r--

Merge

aoqi@0 1 /*
aoqi@0 2 * Copyright (c) 1999, 2013, Oracle and/or its affiliates. All rights reserved.
aoqi@0 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
aoqi@0 4 *
aoqi@0 5 * This code is free software; you can redistribute it and/or modify it
aoqi@0 6 * under the terms of the GNU General Public License version 2 only, as
aoqi@0 7 * published by the Free Software Foundation.
aoqi@0 8 *
aoqi@0 9 * This code is distributed in the hope that it will be useful, but WITHOUT
aoqi@0 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
aoqi@0 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
aoqi@0 12 * version 2 for more details (a copy is included in the LICENSE file that
aoqi@0 13 * accompanied this code).
aoqi@0 14 *
aoqi@0 15 * You should have received a copy of the GNU General Public License version
aoqi@0 16 * 2 along with this work; if not, write to the Free Software Foundation,
aoqi@0 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
aoqi@0 18 *
aoqi@0 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
aoqi@0 20 * or visit www.oracle.com if you need additional information or have any
aoqi@0 21 * questions.
aoqi@0 22 *
aoqi@0 23 */
aoqi@0 24
aoqi@0 25 #ifndef SHARE_VM_C1_C1_GRAPHBUILDER_HPP
aoqi@0 26 #define SHARE_VM_C1_C1_GRAPHBUILDER_HPP
aoqi@0 27
aoqi@0 28 #include "c1/c1_IR.hpp"
aoqi@0 29 #include "c1/c1_Instruction.hpp"
aoqi@0 30 #include "c1/c1_ValueMap.hpp"
aoqi@0 31 #include "c1/c1_ValueStack.hpp"
aoqi@0 32 #include "ci/ciMethodData.hpp"
aoqi@0 33 #include "ci/ciStreams.hpp"
aoqi@0 34 #include "compiler/compileLog.hpp"
aoqi@0 35
aoqi@0 36 class MemoryBuffer;
aoqi@0 37
aoqi@0 38 class GraphBuilder VALUE_OBJ_CLASS_SPEC {
aoqi@0 39 private:
aoqi@0 40 // Per-scope data. These are pushed and popped as we descend into
aoqi@0 41 // inlined methods. Currently in order to generate good code in the
aoqi@0 42 // inliner we have to attempt to inline methods directly into the
aoqi@0 43 // basic block we are parsing; this adds complexity.
aoqi@0 44 class ScopeData: public CompilationResourceObj {
aoqi@0 45 private:
aoqi@0 46 ScopeData* _parent;
aoqi@0 47 // bci-to-block mapping
aoqi@0 48 BlockList* _bci2block;
aoqi@0 49 // Scope
aoqi@0 50 IRScope* _scope;
aoqi@0 51 // Whether this scope or any parent scope has exception handlers
aoqi@0 52 bool _has_handler;
aoqi@0 53 // The bytecodes
aoqi@0 54 ciBytecodeStream* _stream;
aoqi@0 55
aoqi@0 56 // Work list
aoqi@0 57 BlockList* _work_list;
aoqi@0 58
aoqi@0 59 // Maximum inline size for this scope
aoqi@0 60 intx _max_inline_size;
aoqi@0 61 // Expression stack depth at point where inline occurred
aoqi@0 62 int _caller_stack_size;
aoqi@0 63
aoqi@0 64 // The continuation point for the inline. Currently only used in
aoqi@0 65 // multi-block inlines, but eventually would like to use this for
aoqi@0 66 // all inlines for uniformity and simplicity; in this case would
aoqi@0 67 // get the continuation point from the BlockList instead of
aoqi@0 68 // fabricating it anew because Invokes would be considered to be
aoqi@0 69 // BlockEnds.
aoqi@0 70 BlockBegin* _continuation;
aoqi@0 71
aoqi@0 72 // Was this ScopeData created only for the parsing and inlining of
aoqi@0 73 // a jsr?
aoqi@0 74 bool _parsing_jsr;
aoqi@0 75 // We track the destination bci of the jsr only to determine
aoqi@0 76 // bailout conditions, since we only handle a subset of all of the
aoqi@0 77 // possible jsr-ret control structures. Recursive invocations of a
aoqi@0 78 // jsr are disallowed by the verifier.
aoqi@0 79 int _jsr_entry_bci;
aoqi@0 80 // We need to track the local variable in which the return address
aoqi@0 81 // was stored to ensure we can handle inlining the jsr, because we
aoqi@0 82 // don't handle arbitrary jsr/ret constructs.
aoqi@0 83 int _jsr_ret_addr_local;
aoqi@0 84 // If we are parsing a jsr, the continuation point for rets
aoqi@0 85 BlockBegin* _jsr_continuation;
aoqi@0 86 // Cloned XHandlers for jsr-related ScopeDatas
aoqi@0 87 XHandlers* _jsr_xhandlers;
aoqi@0 88
aoqi@0 89 // Number of returns seen in this scope
aoqi@0 90 int _num_returns;
aoqi@0 91
aoqi@0 92 // In order to generate profitable code for inlining, we currently
aoqi@0 93 // have to perform an optimization for single-block inlined
aoqi@0 94 // methods where we continue parsing into the same block. This
aoqi@0 95 // allows us to perform CSE across inlined scopes and to avoid
aoqi@0 96 // storing parameters to the stack. Having a global register
aoqi@0 97 // allocator and being able to perform global CSE would allow this
aoqi@0 98 // code to be removed and thereby simplify the inliner.
aoqi@0 99 BlockBegin* _cleanup_block; // The block to which the return was added
aoqi@0 100 Instruction* _cleanup_return_prev; // Instruction before return instruction
aoqi@0 101 ValueStack* _cleanup_state; // State of that block (not yet pinned)
aoqi@0 102
aoqi@0 103 public:
aoqi@0 104 ScopeData(ScopeData* parent);
aoqi@0 105
aoqi@0 106 ScopeData* parent() const { return _parent; }
aoqi@0 107
aoqi@0 108 BlockList* bci2block() const { return _bci2block; }
aoqi@0 109 void set_bci2block(BlockList* bci2block) { _bci2block = bci2block; }
aoqi@0 110
aoqi@0 111 // NOTE: this has a different effect when parsing jsrs
aoqi@0 112 BlockBegin* block_at(int bci);
aoqi@0 113
aoqi@0 114 IRScope* scope() const { return _scope; }
aoqi@0 115 // Has side-effect of setting has_handler flag
aoqi@0 116 void set_scope(IRScope* scope);
aoqi@0 117
aoqi@0 118 // Whether this or any parent scope has exception handlers
aoqi@0 119 bool has_handler() const { return _has_handler; }
aoqi@0 120 void set_has_handler() { _has_handler = true; }
aoqi@0 121
aoqi@0 122 // Exception handlers list to be used for this scope
aoqi@0 123 XHandlers* xhandlers() const;
aoqi@0 124
aoqi@0 125 // How to get a block to be parsed
aoqi@0 126 void add_to_work_list(BlockBegin* block);
aoqi@0 127 // How to remove the next block to be parsed; returns NULL if none left
aoqi@0 128 BlockBegin* remove_from_work_list();
aoqi@0 129 // Indicates parse is over
aoqi@0 130 bool is_work_list_empty() const;
aoqi@0 131
aoqi@0 132 ciBytecodeStream* stream() { return _stream; }
aoqi@0 133 void set_stream(ciBytecodeStream* stream) { _stream = stream; }
aoqi@0 134
aoqi@0 135 intx max_inline_size() const { return _max_inline_size; }
aoqi@0 136
aoqi@0 137 BlockBegin* continuation() const { return _continuation; }
aoqi@0 138 void set_continuation(BlockBegin* cont) { _continuation = cont; }
aoqi@0 139
aoqi@0 140 // Indicates whether this ScopeData was pushed only for the
aoqi@0 141 // parsing and inlining of a jsr
aoqi@0 142 bool parsing_jsr() const { return _parsing_jsr; }
aoqi@0 143 void set_parsing_jsr() { _parsing_jsr = true; }
aoqi@0 144 int jsr_entry_bci() const { return _jsr_entry_bci; }
aoqi@0 145 void set_jsr_entry_bci(int bci) { _jsr_entry_bci = bci; }
aoqi@0 146 void set_jsr_return_address_local(int local_no){ _jsr_ret_addr_local = local_no; }
aoqi@0 147 int jsr_return_address_local() const { return _jsr_ret_addr_local; }
aoqi@0 148 // Must be called after scope is set up for jsr ScopeData
aoqi@0 149 void setup_jsr_xhandlers();
aoqi@0 150
aoqi@0 151 // The jsr continuation is only used when parsing_jsr is true, and
aoqi@0 152 // is different from the "normal" continuation since we can end up
aoqi@0 153 // doing a return (rather than a ret) from within a subroutine
aoqi@0 154 BlockBegin* jsr_continuation() const { return _jsr_continuation; }
aoqi@0 155 void set_jsr_continuation(BlockBegin* cont) { _jsr_continuation = cont; }
aoqi@0 156
aoqi@0 157 int num_returns();
aoqi@0 158 void incr_num_returns();
aoqi@0 159
aoqi@0 160 void set_inline_cleanup_info(BlockBegin* block,
aoqi@0 161 Instruction* return_prev,
aoqi@0 162 ValueStack* return_state);
aoqi@0 163 BlockBegin* inline_cleanup_block() const { return _cleanup_block; }
aoqi@0 164 Instruction* inline_cleanup_return_prev() const{ return _cleanup_return_prev; }
aoqi@0 165 ValueStack* inline_cleanup_state() const { return _cleanup_state; }
aoqi@0 166 };
aoqi@0 167
aoqi@0 168 // for all GraphBuilders
aoqi@0 169 static bool _can_trap[Bytecodes::number_of_java_codes];
aoqi@0 170
aoqi@0 171 // for each instance of GraphBuilder
aoqi@0 172 ScopeData* _scope_data; // Per-scope data; used for inlining
aoqi@0 173 Compilation* _compilation; // the current compilation
aoqi@0 174 ValueMap* _vmap; // the map of values encountered (for CSE)
aoqi@0 175 MemoryBuffer* _memory;
aoqi@0 176 const char* _inline_bailout_msg; // non-null if most recent inline attempt failed
aoqi@0 177 int _instruction_count; // for bailing out in pathological jsr/ret cases
aoqi@0 178 BlockBegin* _start; // the start block
aoqi@0 179 BlockBegin* _osr_entry; // the osr entry block block
aoqi@0 180 ValueStack* _initial_state; // The state for the start block
aoqi@0 181
aoqi@0 182 // for each call to connect_to_end; can also be set by inliner
aoqi@0 183 BlockBegin* _block; // the current block
aoqi@0 184 ValueStack* _state; // the current execution state
aoqi@0 185 Instruction* _last; // the last instruction added
aoqi@0 186 bool _skip_block; // skip processing of the rest of this block
aoqi@0 187
aoqi@0 188 // accessors
aoqi@0 189 ScopeData* scope_data() const { return _scope_data; }
aoqi@0 190 Compilation* compilation() const { return _compilation; }
aoqi@0 191 BlockList* bci2block() const { return scope_data()->bci2block(); }
aoqi@0 192 ValueMap* vmap() const { assert(UseLocalValueNumbering, "should not access otherwise"); return _vmap; }
aoqi@0 193 bool has_handler() const { return scope_data()->has_handler(); }
aoqi@0 194
aoqi@0 195 BlockBegin* block() const { return _block; }
aoqi@0 196 ValueStack* state() const { return _state; }
aoqi@0 197 void set_state(ValueStack* state) { _state = state; }
aoqi@0 198 IRScope* scope() const { return scope_data()->scope(); }
aoqi@0 199 ciMethod* method() const { return scope()->method(); }
aoqi@0 200 ciBytecodeStream* stream() const { return scope_data()->stream(); }
aoqi@0 201 Instruction* last() const { return _last; }
aoqi@0 202 Bytecodes::Code code() const { return stream()->cur_bc(); }
aoqi@0 203 int bci() const { return stream()->cur_bci(); }
aoqi@0 204 int next_bci() const { return stream()->next_bci(); }
aoqi@0 205
aoqi@0 206 // unified bailout support
aoqi@0 207 void bailout(const char* msg) const { compilation()->bailout(msg); }
aoqi@0 208 bool bailed_out() const { return compilation()->bailed_out(); }
aoqi@0 209
aoqi@0 210 // stack manipulation helpers
aoqi@0 211 void ipush(Value t) const { state()->ipush(t); }
aoqi@0 212 void lpush(Value t) const { state()->lpush(t); }
aoqi@0 213 void fpush(Value t) const { state()->fpush(t); }
aoqi@0 214 void dpush(Value t) const { state()->dpush(t); }
aoqi@0 215 void apush(Value t) const { state()->apush(t); }
aoqi@0 216 void push(ValueType* type, Value t) const { state()-> push(type, t); }
aoqi@0 217
aoqi@0 218 Value ipop() { return state()->ipop(); }
aoqi@0 219 Value lpop() { return state()->lpop(); }
aoqi@0 220 Value fpop() { return state()->fpop(); }
aoqi@0 221 Value dpop() { return state()->dpop(); }
aoqi@0 222 Value apop() { return state()->apop(); }
aoqi@0 223 Value pop(ValueType* type) { return state()-> pop(type); }
aoqi@0 224
aoqi@0 225 // instruction helpers
aoqi@0 226 void load_constant();
aoqi@0 227 void load_local(ValueType* type, int index);
aoqi@0 228 void store_local(ValueType* type, int index);
aoqi@0 229 void store_local(ValueStack* state, Value value, int index);
aoqi@0 230 void load_indexed (BasicType type);
aoqi@0 231 void store_indexed(BasicType type);
aoqi@0 232 void stack_op(Bytecodes::Code code);
aoqi@0 233 void arithmetic_op(ValueType* type, Bytecodes::Code code, ValueStack* state_before = NULL);
aoqi@0 234 void negate_op(ValueType* type);
aoqi@0 235 void shift_op(ValueType* type, Bytecodes::Code code);
aoqi@0 236 void logic_op(ValueType* type, Bytecodes::Code code);
aoqi@0 237 void compare_op(ValueType* type, Bytecodes::Code code);
aoqi@0 238 void convert(Bytecodes::Code op, BasicType from, BasicType to);
aoqi@0 239 void increment();
aoqi@0 240 void _goto(int from_bci, int to_bci);
aoqi@0 241 void if_node(Value x, If::Condition cond, Value y, ValueStack* stack_before);
aoqi@0 242 void if_zero(ValueType* type, If::Condition cond);
aoqi@0 243 void if_null(ValueType* type, If::Condition cond);
aoqi@0 244 void if_same(ValueType* type, If::Condition cond);
aoqi@0 245 void jsr(int dest);
aoqi@0 246 void ret(int local_index);
aoqi@0 247 void table_switch();
aoqi@0 248 void lookup_switch();
aoqi@0 249 void method_return(Value x);
aoqi@0 250 void call_register_finalizer();
aoqi@0 251 void access_field(Bytecodes::Code code);
aoqi@0 252 void invoke(Bytecodes::Code code);
aoqi@0 253 void new_instance(int klass_index);
aoqi@0 254 void new_type_array();
aoqi@0 255 void new_object_array();
aoqi@0 256 void check_cast(int klass_index);
aoqi@0 257 void instance_of(int klass_index);
aoqi@0 258 void monitorenter(Value x, int bci);
aoqi@0 259 void monitorexit(Value x, int bci);
aoqi@0 260 void new_multi_array(int dimensions);
aoqi@0 261 void throw_op(int bci);
aoqi@0 262 Value round_fp(Value fp_value);
aoqi@0 263
aoqi@0 264 // stack/code manipulation helpers
aoqi@0 265 Instruction* append_with_bci(Instruction* instr, int bci);
aoqi@0 266 Instruction* append(Instruction* instr);
aoqi@0 267 Instruction* append_split(StateSplit* instr);
aoqi@0 268
aoqi@0 269 // other helpers
aoqi@0 270 BlockBegin* block_at(int bci) { return scope_data()->block_at(bci); }
aoqi@0 271 XHandlers* handle_exception(Instruction* instruction);
aoqi@0 272 void connect_to_end(BlockBegin* beg);
aoqi@0 273 void null_check(Value value);
aoqi@0 274 void eliminate_redundant_phis(BlockBegin* start);
aoqi@0 275 BlockEnd* iterate_bytecodes_for_block(int bci);
aoqi@0 276 void iterate_all_blocks(bool start_in_current_block_for_inlining = false);
aoqi@0 277 Dependencies* dependency_recorder() const; // = compilation()->dependencies()
aoqi@0 278 bool direct_compare(ciKlass* k);
aoqi@0 279
aoqi@0 280 void kill_all();
aoqi@0 281
aoqi@0 282 // use of state copy routines (try to minimize unnecessary state
aoqi@0 283 // object allocations):
aoqi@0 284
aoqi@0 285 // - if the instruction unconditionally needs a full copy of the
aoqi@0 286 // state (for patching for example), then use copy_state_before*
aoqi@0 287
aoqi@0 288 // - if the instruction needs a full copy of the state only for
aoqi@0 289 // handler generation (Instruction::needs_exception_state() returns
aoqi@0 290 // false) then use copy_state_exhandling*
aoqi@0 291
aoqi@0 292 // - if the instruction needs either a full copy of the state for
aoqi@0 293 // handler generation and a least a minimal copy of the state (as
aoqi@0 294 // returned by Instruction::exception_state()) for debug info
aoqi@0 295 // generation (that is when Instruction::needs_exception_state()
aoqi@0 296 // returns true) then use copy_state_for_exception*
aoqi@0 297
aoqi@0 298 ValueStack* copy_state_before_with_bci(int bci);
aoqi@0 299 ValueStack* copy_state_before();
aoqi@0 300 ValueStack* copy_state_exhandling_with_bci(int bci);
aoqi@0 301 ValueStack* copy_state_exhandling();
aoqi@0 302 ValueStack* copy_state_for_exception_with_bci(int bci);
aoqi@0 303 ValueStack* copy_state_for_exception();
aoqi@0 304 ValueStack* copy_state_if_bb(bool is_bb) { return (is_bb || compilation()->is_optimistic()) ? copy_state_before() : NULL; }
aoqi@0 305 ValueStack* copy_state_indexed_access() { return compilation()->is_optimistic() ? copy_state_before() : copy_state_for_exception(); }
aoqi@0 306
aoqi@0 307 //
aoqi@0 308 // Inlining support
aoqi@0 309 //
aoqi@0 310
aoqi@0 311 // accessors
aoqi@0 312 bool parsing_jsr() const { return scope_data()->parsing_jsr(); }
aoqi@0 313 BlockBegin* continuation() const { return scope_data()->continuation(); }
aoqi@0 314 BlockBegin* jsr_continuation() const { return scope_data()->jsr_continuation(); }
aoqi@0 315 void set_continuation(BlockBegin* continuation) { scope_data()->set_continuation(continuation); }
aoqi@0 316 void set_inline_cleanup_info(BlockBegin* block,
aoqi@0 317 Instruction* return_prev,
aoqi@0 318 ValueStack* return_state) { scope_data()->set_inline_cleanup_info(block,
aoqi@0 319 return_prev,
aoqi@0 320 return_state); }
aoqi@0 321 void set_inline_cleanup_info() {
aoqi@0 322 set_inline_cleanup_info(_block, _last, _state);
aoqi@0 323 }
aoqi@0 324 BlockBegin* inline_cleanup_block() const { return scope_data()->inline_cleanup_block(); }
aoqi@0 325 Instruction* inline_cleanup_return_prev() const { return scope_data()->inline_cleanup_return_prev(); }
aoqi@0 326 ValueStack* inline_cleanup_state() const { return scope_data()->inline_cleanup_state(); }
aoqi@0 327 void restore_inline_cleanup_info() {
aoqi@0 328 _block = inline_cleanup_block();
aoqi@0 329 _last = inline_cleanup_return_prev();
aoqi@0 330 _state = inline_cleanup_state();
aoqi@0 331 }
aoqi@0 332 void incr_num_returns() { scope_data()->incr_num_returns(); }
aoqi@0 333 int num_returns() const { return scope_data()->num_returns(); }
aoqi@0 334 intx max_inline_size() const { return scope_data()->max_inline_size(); }
aoqi@0 335 int inline_level() const { return scope()->level(); }
aoqi@0 336 int recursive_inline_level(ciMethod* callee) const;
aoqi@0 337
aoqi@0 338 // inlining of synchronized methods
aoqi@0 339 void inline_sync_entry(Value lock, BlockBegin* sync_handler);
aoqi@0 340 void fill_sync_handler(Value lock, BlockBegin* sync_handler, bool default_handler = false);
aoqi@0 341
aoqi@0 342 // inliners
aoqi@0 343 bool try_inline( ciMethod* callee, bool holder_known, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = NULL);
aoqi@0 344 bool try_inline_intrinsics(ciMethod* callee);
aoqi@0 345 bool try_inline_full( ciMethod* callee, bool holder_known, Bytecodes::Code bc = Bytecodes::_illegal, Value receiver = NULL);
aoqi@0 346 bool try_inline_jsr(int jsr_dest_bci);
aoqi@0 347
aoqi@0 348 const char* check_can_parse(ciMethod* callee) const;
aoqi@0 349 const char* should_not_inline(ciMethod* callee) const;
aoqi@0 350
aoqi@0 351 // JSR 292 support
aoqi@0 352 bool try_method_handle_inline(ciMethod* callee);
aoqi@0 353
aoqi@0 354 // helpers
aoqi@0 355 void inline_bailout(const char* msg);
aoqi@0 356 BlockBegin* header_block(BlockBegin* entry, BlockBegin::Flag f, ValueStack* state);
aoqi@0 357 BlockBegin* setup_start_block(int osr_bci, BlockBegin* std_entry, BlockBegin* osr_entry, ValueStack* init_state);
aoqi@0 358 void setup_osr_entry_block();
aoqi@0 359 void clear_inline_bailout();
aoqi@0 360 ValueStack* state_at_entry();
aoqi@0 361 void push_root_scope(IRScope* scope, BlockList* bci2block, BlockBegin* start);
aoqi@0 362 void push_scope(ciMethod* callee, BlockBegin* continuation);
aoqi@0 363 void push_scope_for_jsr(BlockBegin* jsr_continuation, int jsr_dest_bci);
aoqi@0 364 void pop_scope();
aoqi@0 365 void pop_scope_for_jsr();
aoqi@0 366
aoqi@0 367 bool append_unsafe_get_obj(ciMethod* callee, BasicType t, bool is_volatile);
aoqi@0 368 bool append_unsafe_put_obj(ciMethod* callee, BasicType t, bool is_volatile);
aoqi@0 369 bool append_unsafe_get_raw(ciMethod* callee, BasicType t);
aoqi@0 370 bool append_unsafe_put_raw(ciMethod* callee, BasicType t);
aoqi@0 371 bool append_unsafe_prefetch(ciMethod* callee, bool is_store, bool is_static);
aoqi@0 372 void append_unsafe_CAS(ciMethod* callee);
aoqi@0 373 bool append_unsafe_get_and_set_obj(ciMethod* callee, bool is_add);
aoqi@0 374
aoqi@0 375 void print_inlining(ciMethod* callee, const char* msg = NULL, bool success = true);
aoqi@0 376
aoqi@0 377 void profile_call(ciMethod* callee, Value recv, ciKlass* predicted_holder, Values* obj_args, bool inlined);
aoqi@0 378 void profile_return_type(Value ret, ciMethod* callee, ciMethod* m = NULL, int bci = -1);
aoqi@0 379 void profile_invocation(ciMethod* inlinee, ValueStack* state);
aoqi@0 380
aoqi@0 381 // Shortcuts to profiling control.
aoqi@0 382 bool is_profiling() { return _compilation->is_profiling(); }
aoqi@0 383 bool count_invocations() { return _compilation->count_invocations(); }
aoqi@0 384 bool count_backedges() { return _compilation->count_backedges(); }
aoqi@0 385 bool profile_branches() { return _compilation->profile_branches(); }
aoqi@0 386 bool profile_calls() { return _compilation->profile_calls(); }
aoqi@0 387 bool profile_inlined_calls() { return _compilation->profile_inlined_calls(); }
aoqi@0 388 bool profile_checkcasts() { return _compilation->profile_checkcasts(); }
aoqi@0 389 bool profile_parameters() { return _compilation->profile_parameters(); }
aoqi@0 390 bool profile_arguments() { return _compilation->profile_arguments(); }
aoqi@0 391 bool profile_return() { return _compilation->profile_return(); }
aoqi@0 392
aoqi@0 393 Values* args_list_for_profiling(ciMethod* target, int& start, bool may_have_receiver);
aoqi@0 394 Values* collect_args_for_profiling(Values* args, ciMethod* target, bool may_have_receiver);
aoqi@0 395 void check_args_for_profiling(Values* obj_args, int expected);
aoqi@0 396
aoqi@0 397 public:
aoqi@0 398 NOT_PRODUCT(void print_stats();)
aoqi@0 399
aoqi@0 400 // initialization
aoqi@0 401 static void initialize();
aoqi@0 402
aoqi@0 403 // public
aoqi@0 404 static bool can_trap(ciMethod* method, Bytecodes::Code code) {
aoqi@0 405 assert(0 <= code && code < Bytecodes::number_of_java_codes, "illegal bytecode");
aoqi@0 406 if (_can_trap[code]) return true;
aoqi@0 407 // special handling for finalizer registration
aoqi@0 408 return code == Bytecodes::_return && method->intrinsic_id() == vmIntrinsics::_Object_init;
aoqi@0 409 }
aoqi@0 410
aoqi@0 411 // creation
aoqi@0 412 GraphBuilder(Compilation* compilation, IRScope* scope);
aoqi@0 413 static void sort_top_into_worklist(BlockList* worklist, BlockBegin* top);
aoqi@0 414
aoqi@0 415 BlockBegin* start() const { return _start; }
aoqi@0 416 };
aoqi@0 417
aoqi@0 418 #endif // SHARE_VM_C1_C1_GRAPHBUILDER_HPP

mercurial