Sat, 02 Apr 2011 10:54:15 -0700
7004535: Clone loop predicate during loop unswitch
Summary: Clone loop predicate for clonned loops
Reviewed-by: never
1 /*
2 * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
25 #ifndef SHARE_VM_OPTO_COMPILE_HPP
26 #define SHARE_VM_OPTO_COMPILE_HPP
28 #include "asm/codeBuffer.hpp"
29 #include "ci/compilerInterface.hpp"
30 #include "code/debugInfoRec.hpp"
31 #include "code/exceptionHandlerTable.hpp"
32 #include "compiler/compilerOracle.hpp"
33 #include "libadt/dict.hpp"
34 #include "libadt/port.hpp"
35 #include "libadt/vectset.hpp"
36 #include "memory/resourceArea.hpp"
37 #include "opto/idealGraphPrinter.hpp"
38 #include "opto/phase.hpp"
39 #include "opto/regmask.hpp"
40 #include "runtime/deoptimization.hpp"
41 #include "runtime/vmThread.hpp"
43 class Block;
44 class Bundle;
45 class C2Compiler;
46 class CallGenerator;
47 class ConnectionGraph;
48 class InlineTree;
49 class Int_Array;
50 class Matcher;
51 class MachConstantNode;
52 class MachConstantBaseNode;
53 class MachNode;
54 class MachOper;
55 class MachSafePointNode;
56 class Node;
57 class Node_Array;
58 class Node_Notes;
59 class OptoReg;
60 class PhaseCFG;
61 class PhaseGVN;
62 class PhaseIterGVN;
63 class PhaseRegAlloc;
64 class PhaseCCP;
65 class PhaseCCP_DCE;
66 class RootNode;
67 class relocInfo;
68 class Scope;
69 class StartNode;
70 class SafePointNode;
71 class JVMState;
72 class TypeData;
73 class TypePtr;
74 class TypeFunc;
75 class Unique_Node_List;
76 class nmethod;
77 class WarmCallInfo;
79 //------------------------------Compile----------------------------------------
80 // This class defines a top-level Compiler invocation.
82 class Compile : public Phase {
83 public:
84 // Fixed alias indexes. (See also MergeMemNode.)
85 enum {
86 AliasIdxTop = 1, // pseudo-index, aliases to nothing (used as sentinel value)
87 AliasIdxBot = 2, // pseudo-index, aliases to everything
88 AliasIdxRaw = 3 // hard-wired index for TypeRawPtr::BOTTOM
89 };
91 // Variant of TraceTime(NULL, &_t_accumulator, TimeCompiler);
92 // Integrated with logging. If logging is turned on, and dolog is true,
93 // then brackets are put into the log, with time stamps and node counts.
94 // (The time collection itself is always conditionalized on TimeCompiler.)
95 class TracePhase : public TraceTime {
96 private:
97 Compile* C;
98 CompileLog* _log;
99 public:
100 TracePhase(const char* name, elapsedTimer* accumulator, bool dolog);
101 ~TracePhase();
102 };
104 // Information per category of alias (memory slice)
105 class AliasType {
106 private:
107 friend class Compile;
109 int _index; // unique index, used with MergeMemNode
110 const TypePtr* _adr_type; // normalized address type
111 ciField* _field; // relevant instance field, or null if none
112 bool _is_rewritable; // false if the memory is write-once only
113 int _general_index; // if this is type is an instance, the general
114 // type that this is an instance of
116 void Init(int i, const TypePtr* at);
118 public:
119 int index() const { return _index; }
120 const TypePtr* adr_type() const { return _adr_type; }
121 ciField* field() const { return _field; }
122 bool is_rewritable() const { return _is_rewritable; }
123 bool is_volatile() const { return (_field ? _field->is_volatile() : false); }
124 int general_index() const { return (_general_index != 0) ? _general_index : _index; }
126 void set_rewritable(bool z) { _is_rewritable = z; }
127 void set_field(ciField* f) {
128 assert(!_field,"");
129 _field = f;
130 if (f->is_final()) _is_rewritable = false;
131 }
133 void print_on(outputStream* st) PRODUCT_RETURN;
134 };
136 enum {
137 logAliasCacheSize = 6,
138 AliasCacheSize = (1<<logAliasCacheSize)
139 };
140 struct AliasCacheEntry { const TypePtr* _adr_type; int _index; }; // simple duple type
141 enum {
142 trapHistLength = methodDataOopDesc::_trap_hist_limit
143 };
145 // Constant entry of the constant table.
146 class Constant {
147 private:
148 BasicType _type;
149 jvalue _value;
150 int _offset; // offset of this constant (in bytes) relative to the constant table base.
151 bool _can_be_reused; // true (default) if the value can be shared with other users.
153 public:
154 Constant() : _type(T_ILLEGAL), _offset(-1), _can_be_reused(true) { _value.l = 0; }
155 Constant(BasicType type, jvalue value, bool can_be_reused = true) :
156 _type(type),
157 _value(value),
158 _offset(-1),
159 _can_be_reused(can_be_reused)
160 {}
162 bool operator==(const Constant& other);
164 BasicType type() const { return _type; }
166 jlong get_jlong() const { return _value.j; }
167 jfloat get_jfloat() const { return _value.f; }
168 jdouble get_jdouble() const { return _value.d; }
169 jobject get_jobject() const { return _value.l; }
171 int offset() const { return _offset; }
172 void set_offset(int offset) { _offset = offset; }
174 bool can_be_reused() const { return _can_be_reused; }
175 };
177 // Constant table.
178 class ConstantTable {
179 private:
180 GrowableArray<Constant> _constants; // Constants of this table.
181 int _size; // Size in bytes the emitted constant table takes (including padding).
182 int _table_base_offset; // Offset of the table base that gets added to the constant offsets.
184 public:
185 ConstantTable() :
186 _size(-1),
187 _table_base_offset(-1) // We can use -1 here since the constant table is always bigger than 2 bytes (-(size / 2), see MachConstantBaseNode::emit).
188 {}
190 int size() const { assert(_size != -1, "size not yet calculated"); return _size; }
192 void set_table_base_offset(int x) { assert(_table_base_offset == -1, "set only once"); _table_base_offset = x; }
193 int table_base_offset() const { assert(_table_base_offset != -1, "table base offset not yet set"); return _table_base_offset; }
195 void emit(CodeBuffer& cb);
197 // Returns the offset of the last entry (the top) of the constant table.
198 int top_offset() const { assert(_constants.top().offset() != -1, "constant not yet bound"); return _constants.top().offset(); }
200 void calculate_offsets_and_size();
201 int find_offset(Constant& con) const;
203 void add(Constant& con);
204 Constant add(BasicType type, jvalue value);
205 Constant add(MachOper* oper);
206 Constant add(jfloat f) {
207 jvalue value; value.f = f;
208 return add(T_FLOAT, value);
209 }
210 Constant add(jdouble d) {
211 jvalue value; value.d = d;
212 return add(T_DOUBLE, value);
213 }
215 // Jump table
216 Constant allocate_jump_table(MachConstantNode* n);
217 void fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const;
218 };
220 private:
221 // Fixed parameters to this compilation.
222 const int _compile_id;
223 const bool _save_argument_registers; // save/restore arg regs for trampolines
224 const bool _subsume_loads; // Load can be matched as part of a larger op.
225 const bool _do_escape_analysis; // Do escape analysis.
226 ciMethod* _method; // The method being compiled.
227 int _entry_bci; // entry bci for osr methods.
228 const TypeFunc* _tf; // My kind of signature
229 InlineTree* _ilt; // Ditto (temporary).
230 address _stub_function; // VM entry for stub being compiled, or NULL
231 const char* _stub_name; // Name of stub or adapter being compiled, or NULL
232 address _stub_entry_point; // Compile code entry for generated stub, or NULL
234 // Control of this compilation.
235 int _num_loop_opts; // Number of iterations for doing loop optimiztions
236 int _max_inline_size; // Max inline size for this compilation
237 int _freq_inline_size; // Max hot method inline size for this compilation
238 int _fixed_slots; // count of frame slots not allocated by the register
239 // allocator i.e. locks, original deopt pc, etc.
240 // For deopt
241 int _orig_pc_slot;
242 int _orig_pc_slot_offset_in_bytes;
244 int _major_progress; // Count of something big happening
245 bool _has_loops; // True if the method _may_ have some loops
246 bool _has_split_ifs; // True if the method _may_ have some split-if
247 bool _has_unsafe_access; // True if the method _may_ produce faults in unsafe loads or stores.
248 bool _has_stringbuilder; // True StringBuffers or StringBuilders are allocated
249 uint _trap_hist[trapHistLength]; // Cumulative traps
250 bool _trap_can_recompile; // Have we emitted a recompiling trap?
251 uint _decompile_count; // Cumulative decompilation counts.
252 bool _do_inlining; // True if we intend to do inlining
253 bool _do_scheduling; // True if we intend to do scheduling
254 bool _do_freq_based_layout; // True if we intend to do frequency based block layout
255 bool _do_count_invocations; // True if we generate code to count invocations
256 bool _do_method_data_update; // True if we generate code to update methodDataOops
257 int _AliasLevel; // Locally-adjusted version of AliasLevel flag.
258 bool _print_assembly; // True if we should dump assembly code for this compilation
259 #ifndef PRODUCT
260 bool _trace_opto_output;
261 bool _parsed_irreducible_loop; // True if ciTypeFlow detected irreducible loops during parsing
262 #endif
264 // JSR 292
265 bool _has_method_handle_invokes; // True if this method has MethodHandle invokes.
267 // Compilation environment.
268 Arena _comp_arena; // Arena with lifetime equivalent to Compile
269 ciEnv* _env; // CI interface
270 CompileLog* _log; // from CompilerThread
271 const char* _failure_reason; // for record_failure/failing pattern
272 GrowableArray<CallGenerator*>* _intrinsics; // List of intrinsics.
273 GrowableArray<Node*>* _macro_nodes; // List of nodes which need to be expanded before matching.
274 GrowableArray<Node*>* _predicate_opaqs; // List of Opaque1 nodes for the loop predicates.
275 ConnectionGraph* _congraph;
276 #ifndef PRODUCT
277 IdealGraphPrinter* _printer;
278 #endif
280 // Node management
281 uint _unique; // Counter for unique Node indices
282 debug_only(static int _debug_idx;) // Monotonic counter (not reset), use -XX:BreakAtNode=<idx>
283 Arena _node_arena; // Arena for new-space Nodes
284 Arena _old_arena; // Arena for old-space Nodes, lifetime during xform
285 RootNode* _root; // Unique root of compilation, or NULL after bail-out.
286 Node* _top; // Unique top node. (Reset by various phases.)
288 Node* _immutable_memory; // Initial memory state
290 Node* _recent_alloc_obj;
291 Node* _recent_alloc_ctl;
293 // Constant table
294 ConstantTable _constant_table; // The constant table for this compile.
295 MachConstantBaseNode* _mach_constant_base_node; // Constant table base node singleton.
298 // Blocked array of debugging and profiling information,
299 // tracked per node.
300 enum { _log2_node_notes_block_size = 8,
301 _node_notes_block_size = (1<<_log2_node_notes_block_size)
302 };
303 GrowableArray<Node_Notes*>* _node_note_array;
304 Node_Notes* _default_node_notes; // default notes for new nodes
306 // After parsing and every bulk phase we hang onto the Root instruction.
307 // The RootNode instruction is where the whole program begins. It produces
308 // the initial Control and BOTTOM for everybody else.
310 // Type management
311 Arena _Compile_types; // Arena for all types
312 Arena* _type_arena; // Alias for _Compile_types except in Initialize_shared()
313 Dict* _type_dict; // Intern table
314 void* _type_hwm; // Last allocation (see Type::operator new/delete)
315 size_t _type_last_size; // Last allocation size (see Type::operator new/delete)
316 ciMethod* _last_tf_m; // Cache for
317 const TypeFunc* _last_tf; // TypeFunc::make
318 AliasType** _alias_types; // List of alias types seen so far.
319 int _num_alias_types; // Logical length of _alias_types
320 int _max_alias_types; // Physical length of _alias_types
321 AliasCacheEntry _alias_cache[AliasCacheSize]; // Gets aliases w/o data structure walking
323 // Parsing, optimization
324 PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN
325 Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN
326 WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining.
328 GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after
329 // main parsing has finished.
331 // Matching, CFG layout, allocation, code generation
332 PhaseCFG* _cfg; // Results of CFG finding
333 bool _select_24_bit_instr; // We selected an instruction with a 24-bit result
334 bool _in_24_bit_fp_mode; // We are emitting instructions with 24-bit results
335 int _java_calls; // Number of java calls in the method
336 int _inner_loops; // Number of inner loops in the method
337 Matcher* _matcher; // Engine to map ideal to machine instructions
338 PhaseRegAlloc* _regalloc; // Results of register allocation.
339 int _frame_slots; // Size of total frame in stack slots
340 CodeOffsets _code_offsets; // Offsets into the code for various interesting entries
341 RegMask _FIRST_STACK_mask; // All stack slots usable for spills (depends on frame layout)
342 Arena* _indexSet_arena; // control IndexSet allocation within PhaseChaitin
343 void* _indexSet_free_block_list; // free list of IndexSet bit blocks
345 uint _node_bundling_limit;
346 Bundle* _node_bundling_base; // Information for instruction bundling
348 // Instruction bits passed off to the VM
349 int _method_size; // Size of nmethod code segment in bytes
350 CodeBuffer _code_buffer; // Where the code is assembled
351 int _first_block_size; // Size of unvalidated entry point code / OSR poison code
352 ExceptionHandlerTable _handler_table; // Table of native-code exception handlers
353 ImplicitExceptionTable _inc_table; // Table of implicit null checks in native code
354 OopMapSet* _oop_map_set; // Table of oop maps (one for each safepoint location)
355 static int _CompiledZap_count; // counter compared against CompileZap[First/Last]
356 BufferBlob* _scratch_buffer_blob; // For temporary code buffers.
357 relocInfo* _scratch_locs_memory; // For temporary code buffers.
358 int _scratch_const_size; // For temporary code buffers.
359 bool _in_scratch_emit_size; // true when in scratch_emit_size.
361 public:
362 // Accessors
364 // The Compile instance currently active in this (compiler) thread.
365 static Compile* current() {
366 return (Compile*) ciEnv::current()->compiler_data();
367 }
369 // ID for this compilation. Useful for setting breakpoints in the debugger.
370 int compile_id() const { return _compile_id; }
372 // Does this compilation allow instructions to subsume loads? User
373 // instructions that subsume a load may result in an unschedulable
374 // instruction sequence.
375 bool subsume_loads() const { return _subsume_loads; }
376 // Do escape analysis.
377 bool do_escape_analysis() const { return _do_escape_analysis; }
378 bool save_argument_registers() const { return _save_argument_registers; }
381 // Other fixed compilation parameters.
382 ciMethod* method() const { return _method; }
383 int entry_bci() const { return _entry_bci; }
384 bool is_osr_compilation() const { return _entry_bci != InvocationEntryBci; }
385 bool is_method_compilation() const { return (_method != NULL && !_method->flags().is_native()); }
386 const TypeFunc* tf() const { assert(_tf!=NULL, ""); return _tf; }
387 void init_tf(const TypeFunc* tf) { assert(_tf==NULL, ""); _tf = tf; }
388 InlineTree* ilt() const { return _ilt; }
389 address stub_function() const { return _stub_function; }
390 const char* stub_name() const { return _stub_name; }
391 address stub_entry_point() const { return _stub_entry_point; }
393 // Control of this compilation.
394 int fixed_slots() const { assert(_fixed_slots >= 0, ""); return _fixed_slots; }
395 void set_fixed_slots(int n) { _fixed_slots = n; }
396 int major_progress() const { return _major_progress; }
397 void set_major_progress() { _major_progress++; }
398 void clear_major_progress() { _major_progress = 0; }
399 int num_loop_opts() const { return _num_loop_opts; }
400 void set_num_loop_opts(int n) { _num_loop_opts = n; }
401 int max_inline_size() const { return _max_inline_size; }
402 void set_freq_inline_size(int n) { _freq_inline_size = n; }
403 int freq_inline_size() const { return _freq_inline_size; }
404 void set_max_inline_size(int n) { _max_inline_size = n; }
405 bool has_loops() const { return _has_loops; }
406 void set_has_loops(bool z) { _has_loops = z; }
407 bool has_split_ifs() const { return _has_split_ifs; }
408 void set_has_split_ifs(bool z) { _has_split_ifs = z; }
409 bool has_unsafe_access() const { return _has_unsafe_access; }
410 void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
411 bool has_stringbuilder() const { return _has_stringbuilder; }
412 void set_has_stringbuilder(bool z) { _has_stringbuilder = z; }
413 void set_trap_count(uint r, uint c) { assert(r < trapHistLength, "oob"); _trap_hist[r] = c; }
414 uint trap_count(uint r) const { assert(r < trapHistLength, "oob"); return _trap_hist[r]; }
415 bool trap_can_recompile() const { return _trap_can_recompile; }
416 void set_trap_can_recompile(bool z) { _trap_can_recompile = z; }
417 uint decompile_count() const { return _decompile_count; }
418 void set_decompile_count(uint c) { _decompile_count = c; }
419 bool allow_range_check_smearing() const;
420 bool do_inlining() const { return _do_inlining; }
421 void set_do_inlining(bool z) { _do_inlining = z; }
422 bool do_scheduling() const { return _do_scheduling; }
423 void set_do_scheduling(bool z) { _do_scheduling = z; }
424 bool do_freq_based_layout() const{ return _do_freq_based_layout; }
425 void set_do_freq_based_layout(bool z){ _do_freq_based_layout = z; }
426 bool do_count_invocations() const{ return _do_count_invocations; }
427 void set_do_count_invocations(bool z){ _do_count_invocations = z; }
428 bool do_method_data_update() const { return _do_method_data_update; }
429 void set_do_method_data_update(bool z) { _do_method_data_update = z; }
430 int AliasLevel() const { return _AliasLevel; }
431 bool print_assembly() const { return _print_assembly; }
432 void set_print_assembly(bool z) { _print_assembly = z; }
433 // check the CompilerOracle for special behaviours for this compile
434 bool method_has_option(const char * option) {
435 return method() != NULL && method()->has_option(option);
436 }
437 #ifndef PRODUCT
438 bool trace_opto_output() const { return _trace_opto_output; }
439 bool parsed_irreducible_loop() const { return _parsed_irreducible_loop; }
440 void set_parsed_irreducible_loop(bool z) { _parsed_irreducible_loop = z; }
441 #endif
443 // JSR 292
444 bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
445 void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
447 void begin_method() {
448 #ifndef PRODUCT
449 if (_printer) _printer->begin_method(this);
450 #endif
451 }
452 void print_method(const char * name, int level = 1) {
453 #ifndef PRODUCT
454 if (_printer) _printer->print_method(this, name, level);
455 #endif
456 }
457 void end_method() {
458 #ifndef PRODUCT
459 if (_printer) _printer->end_method();
460 #endif
461 }
463 int macro_count() { return _macro_nodes->length(); }
464 int predicate_count() { return _predicate_opaqs->length();}
465 Node* macro_node(int idx) { return _macro_nodes->at(idx); }
466 Node* predicate_opaque1_node(int idx) { return _predicate_opaqs->at(idx);}
467 ConnectionGraph* congraph() { return _congraph;}
468 void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;}
469 void add_macro_node(Node * n) {
470 //assert(n->is_macro(), "must be a macro node");
471 assert(!_macro_nodes->contains(n), " duplicate entry in expand list");
472 _macro_nodes->append(n);
473 }
474 void remove_macro_node(Node * n) {
475 // this function may be called twice for a node so check
476 // that the node is in the array before attempting to remove it
477 if (_macro_nodes->contains(n))
478 _macro_nodes->remove(n);
479 // remove from _predicate_opaqs list also if it is there
480 if (predicate_count() > 0 && _predicate_opaqs->contains(n)){
481 _predicate_opaqs->remove(n);
482 }
483 }
484 void add_predicate_opaq(Node * n) {
485 assert(!_predicate_opaqs->contains(n), " duplicate entry in predicate opaque1");
486 assert(_macro_nodes->contains(n), "should have already been in macro list");
487 _predicate_opaqs->append(n);
488 }
489 // remove the opaque nodes that protect the predicates so that the unused checks and
490 // uncommon traps will be eliminated from the graph.
491 void cleanup_loop_predicates(PhaseIterGVN &igvn);
492 bool is_predicate_opaq(Node * n) {
493 return _predicate_opaqs->contains(n);
494 }
496 // Compilation environment.
497 Arena* comp_arena() { return &_comp_arena; }
498 ciEnv* env() const { return _env; }
499 CompileLog* log() const { return _log; }
500 bool failing() const { return _env->failing() || _failure_reason != NULL; }
501 const char* failure_reason() { return _failure_reason; }
502 bool failure_reason_is(const char* r) { return (r==_failure_reason) || (r!=NULL && _failure_reason!=NULL && strcmp(r, _failure_reason)==0); }
504 void record_failure(const char* reason);
505 void record_method_not_compilable(const char* reason, bool all_tiers = false) {
506 // All bailouts cover "all_tiers" when TieredCompilation is off.
507 if (!TieredCompilation) all_tiers = true;
508 env()->record_method_not_compilable(reason, all_tiers);
509 // Record failure reason.
510 record_failure(reason);
511 }
512 void record_method_not_compilable_all_tiers(const char* reason) {
513 record_method_not_compilable(reason, true);
514 }
515 bool check_node_count(uint margin, const char* reason) {
516 if (unique() + margin > (uint)MaxNodeLimit) {
517 record_method_not_compilable(reason);
518 return true;
519 } else {
520 return false;
521 }
522 }
524 // Node management
525 uint unique() const { return _unique; }
526 uint next_unique() { return _unique++; }
527 void set_unique(uint i) { _unique = i; }
528 static int debug_idx() { return debug_only(_debug_idx)+0; }
529 static void set_debug_idx(int i) { debug_only(_debug_idx = i); }
530 Arena* node_arena() { return &_node_arena; }
531 Arena* old_arena() { return &_old_arena; }
532 RootNode* root() const { return _root; }
533 void set_root(RootNode* r) { _root = r; }
534 StartNode* start() const; // (Derived from root.)
535 void init_start(StartNode* s);
536 Node* immutable_memory();
538 Node* recent_alloc_ctl() const { return _recent_alloc_ctl; }
539 Node* recent_alloc_obj() const { return _recent_alloc_obj; }
540 void set_recent_alloc(Node* ctl, Node* obj) {
541 _recent_alloc_ctl = ctl;
542 _recent_alloc_obj = obj;
543 }
545 // Constant table
546 ConstantTable& constant_table() { return _constant_table; }
548 MachConstantBaseNode* mach_constant_base_node();
549 bool has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; }
551 // Handy undefined Node
552 Node* top() const { return _top; }
554 // these are used by guys who need to know about creation and transformation of top:
555 Node* cached_top_node() { return _top; }
556 void set_cached_top_node(Node* tn);
558 GrowableArray<Node_Notes*>* node_note_array() const { return _node_note_array; }
559 void set_node_note_array(GrowableArray<Node_Notes*>* arr) { _node_note_array = arr; }
560 Node_Notes* default_node_notes() const { return _default_node_notes; }
561 void set_default_node_notes(Node_Notes* n) { _default_node_notes = n; }
563 Node_Notes* node_notes_at(int idx) {
564 return locate_node_notes(_node_note_array, idx, false);
565 }
566 inline bool set_node_notes_at(int idx, Node_Notes* value);
568 // Copy notes from source to dest, if they exist.
569 // Overwrite dest only if source provides something.
570 // Return true if information was moved.
571 bool copy_node_notes_to(Node* dest, Node* source);
573 // Workhorse function to sort out the blocked Node_Notes array:
574 inline Node_Notes* locate_node_notes(GrowableArray<Node_Notes*>* arr,
575 int idx, bool can_grow = false);
577 void grow_node_notes(GrowableArray<Node_Notes*>* arr, int grow_by);
579 // Type management
580 Arena* type_arena() { return _type_arena; }
581 Dict* type_dict() { return _type_dict; }
582 void* type_hwm() { return _type_hwm; }
583 size_t type_last_size() { return _type_last_size; }
584 int num_alias_types() { return _num_alias_types; }
586 void init_type_arena() { _type_arena = &_Compile_types; }
587 void set_type_arena(Arena* a) { _type_arena = a; }
588 void set_type_dict(Dict* d) { _type_dict = d; }
589 void set_type_hwm(void* p) { _type_hwm = p; }
590 void set_type_last_size(size_t sz) { _type_last_size = sz; }
592 const TypeFunc* last_tf(ciMethod* m) {
593 return (m == _last_tf_m) ? _last_tf : NULL;
594 }
595 void set_last_tf(ciMethod* m, const TypeFunc* tf) {
596 assert(m != NULL || tf == NULL, "");
597 _last_tf_m = m;
598 _last_tf = tf;
599 }
601 AliasType* alias_type(int idx) { assert(idx < num_alias_types(), "oob"); return _alias_types[idx]; }
602 AliasType* alias_type(const TypePtr* adr_type, ciField* field = NULL) { return find_alias_type(adr_type, false, field); }
603 bool have_alias_type(const TypePtr* adr_type);
604 AliasType* alias_type(ciField* field);
606 int get_alias_index(const TypePtr* at) { return alias_type(at)->index(); }
607 const TypePtr* get_adr_type(uint aidx) { return alias_type(aidx)->adr_type(); }
608 int get_general_index(uint aidx) { return alias_type(aidx)->general_index(); }
610 // Building nodes
611 void rethrow_exceptions(JVMState* jvms);
612 void return_values(JVMState* jvms);
613 JVMState* build_start_state(StartNode* start, const TypeFunc* tf);
615 // Decide how to build a call.
616 // The profile factor is a discount to apply to this site's interp. profile.
617 CallGenerator* call_generator(ciMethod* call_method, int vtable_index, bool call_is_virtual, JVMState* jvms, bool allow_inline, float profile_factor);
618 bool should_delay_inlining(ciMethod* call_method, JVMState* jvms);
620 // Report if there were too many traps at a current method and bci.
621 // Report if a trap was recorded, and/or PerMethodTrapLimit was exceeded.
622 // If there is no MDO at all, report no trap unless told to assume it.
623 bool too_many_traps(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
624 // This version, unspecific to a particular bci, asks if
625 // PerMethodTrapLimit was exceeded for all inlined methods seen so far.
626 bool too_many_traps(Deoptimization::DeoptReason reason,
627 // Privately used parameter for logging:
628 ciMethodData* logmd = NULL);
629 // Report if there were too many recompiles at a method and bci.
630 bool too_many_recompiles(ciMethod* method, int bci, Deoptimization::DeoptReason reason);
632 // Parsing, optimization
633 PhaseGVN* initial_gvn() { return _initial_gvn; }
634 Unique_Node_List* for_igvn() { return _for_igvn; }
635 inline void record_for_igvn(Node* n); // Body is after class Unique_Node_List.
636 void set_initial_gvn(PhaseGVN *gvn) { _initial_gvn = gvn; }
637 void set_for_igvn(Unique_Node_List *for_igvn) { _for_igvn = for_igvn; }
639 // Replace n by nn using initial_gvn, calling hash_delete and
640 // record_for_igvn as needed.
641 void gvn_replace_by(Node* n, Node* nn);
644 void identify_useful_nodes(Unique_Node_List &useful);
645 void remove_useless_nodes (Unique_Node_List &useful);
647 WarmCallInfo* warm_calls() const { return _warm_calls; }
648 void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
649 WarmCallInfo* pop_warm_call();
651 // Record this CallGenerator for inlining at the end of parsing.
652 void add_late_inline(CallGenerator* cg) { _late_inlines.push(cg); }
654 // Matching, CFG layout, allocation, code generation
655 PhaseCFG* cfg() { return _cfg; }
656 bool select_24_bit_instr() const { return _select_24_bit_instr; }
657 bool in_24_bit_fp_mode() const { return _in_24_bit_fp_mode; }
658 bool has_java_calls() const { return _java_calls > 0; }
659 int java_calls() const { return _java_calls; }
660 int inner_loops() const { return _inner_loops; }
661 Matcher* matcher() { return _matcher; }
662 PhaseRegAlloc* regalloc() { return _regalloc; }
663 int frame_slots() const { return _frame_slots; }
664 int frame_size_in_words() const; // frame_slots in units of the polymorphic 'words'
665 RegMask& FIRST_STACK_mask() { return _FIRST_STACK_mask; }
666 Arena* indexSet_arena() { return _indexSet_arena; }
667 void* indexSet_free_block_list() { return _indexSet_free_block_list; }
668 uint node_bundling_limit() { return _node_bundling_limit; }
669 Bundle* node_bundling_base() { return _node_bundling_base; }
670 void set_node_bundling_limit(uint n) { _node_bundling_limit = n; }
671 void set_node_bundling_base(Bundle* b) { _node_bundling_base = b; }
672 bool starts_bundle(const Node *n) const;
673 bool need_stack_bang(int frame_size_in_bytes) const;
674 bool need_register_stack_bang() const;
676 void set_matcher(Matcher* m) { _matcher = m; }
677 //void set_regalloc(PhaseRegAlloc* ra) { _regalloc = ra; }
678 void set_indexSet_arena(Arena* a) { _indexSet_arena = a; }
679 void set_indexSet_free_block_list(void* p) { _indexSet_free_block_list = p; }
681 // Remember if this compilation changes hardware mode to 24-bit precision
682 void set_24_bit_selection_and_mode(bool selection, bool mode) {
683 _select_24_bit_instr = selection;
684 _in_24_bit_fp_mode = mode;
685 }
687 void set_java_calls(int z) { _java_calls = z; }
688 void set_inner_loops(int z) { _inner_loops = z; }
690 // Instruction bits passed off to the VM
691 int code_size() { return _method_size; }
692 CodeBuffer* code_buffer() { return &_code_buffer; }
693 int first_block_size() { return _first_block_size; }
694 void set_frame_complete(int off) { _code_offsets.set_value(CodeOffsets::Frame_Complete, off); }
695 ExceptionHandlerTable* handler_table() { return &_handler_table; }
696 ImplicitExceptionTable* inc_table() { return &_inc_table; }
697 OopMapSet* oop_map_set() { return _oop_map_set; }
698 DebugInformationRecorder* debug_info() { return env()->debug_info(); }
699 Dependencies* dependencies() { return env()->dependencies(); }
700 static int CompiledZap_count() { return _CompiledZap_count; }
701 BufferBlob* scratch_buffer_blob() { return _scratch_buffer_blob; }
702 void init_scratch_buffer_blob(int const_size);
703 void clear_scratch_buffer_blob();
704 void set_scratch_buffer_blob(BufferBlob* b) { _scratch_buffer_blob = b; }
705 relocInfo* scratch_locs_memory() { return _scratch_locs_memory; }
706 void set_scratch_locs_memory(relocInfo* b) { _scratch_locs_memory = b; }
708 // emit to scratch blob, report resulting size
709 uint scratch_emit_size(const Node* n);
710 void set_in_scratch_emit_size(bool x) { _in_scratch_emit_size = x; }
711 bool in_scratch_emit_size() const { return _in_scratch_emit_size; }
713 enum ScratchBufferBlob {
714 MAX_inst_size = 1024,
715 MAX_locs_size = 128, // number of relocInfo elements
716 MAX_const_size = 128,
717 MAX_stubs_size = 128
718 };
720 // Major entry point. Given a Scope, compile the associated method.
721 // For normal compilations, entry_bci is InvocationEntryBci. For on stack
722 // replacement, entry_bci indicates the bytecode for which to compile a
723 // continuation.
724 Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target,
725 int entry_bci, bool subsume_loads, bool do_escape_analysis);
727 // Second major entry point. From the TypeFunc signature, generate code
728 // to pass arguments from the Java calling convention to the C calling
729 // convention.
730 Compile(ciEnv* ci_env, const TypeFunc *(*gen)(),
731 address stub_function, const char *stub_name,
732 int is_fancy_jump, bool pass_tls,
733 bool save_arg_registers, bool return_pc);
735 // From the TypeFunc signature, generate code to pass arguments
736 // from Compiled calling convention to Interpreter's calling convention
737 void Generate_Compiled_To_Interpreter_Graph(const TypeFunc *tf, address interpreter_entry);
739 // From the TypeFunc signature, generate code to pass arguments
740 // from Interpreter's calling convention to Compiler's calling convention
741 void Generate_Interpreter_To_Compiled_Graph(const TypeFunc *tf);
743 // Are we compiling a method?
744 bool has_method() { return method() != NULL; }
746 // Maybe print some information about this compile.
747 void print_compile_messages();
749 // Final graph reshaping, a post-pass after the regular optimizer is done.
750 bool final_graph_reshaping();
752 // returns true if adr is completely contained in the given alias category
753 bool must_alias(const TypePtr* adr, int alias_idx);
755 // returns true if adr overlaps with the given alias category
756 bool can_alias(const TypePtr* adr, int alias_idx);
758 // Driver for converting compiler's IR into machine code bits
759 void Output();
761 // Accessors for node bundling info.
762 Bundle* node_bundling(const Node *n);
763 bool valid_bundle_info(const Node *n);
765 // Schedule and Bundle the instructions
766 void ScheduleAndBundle();
768 // Build OopMaps for each GC point
769 void BuildOopMaps();
771 // Append debug info for the node "local" at safepoint node "sfpt" to the
772 // "array", May also consult and add to "objs", which describes the
773 // scalar-replaced objects.
774 void FillLocArray( int idx, MachSafePointNode* sfpt,
775 Node *local, GrowableArray<ScopeValue*> *array,
776 GrowableArray<ScopeValue*> *objs );
778 // If "objs" contains an ObjectValue whose id is "id", returns it, else NULL.
779 static ObjectValue* sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id);
780 // Requres that "objs" does not contains an ObjectValue whose id matches
781 // that of "sv. Appends "sv".
782 static void set_sv_for_object_node(GrowableArray<ScopeValue*> *objs,
783 ObjectValue* sv );
785 // Process an OopMap Element while emitting nodes
786 void Process_OopMap_Node(MachNode *mach, int code_offset);
788 // Write out basic block data to code buffer
789 void Fill_buffer();
791 // Determine which variable sized branches can be shortened
792 void Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size);
794 // Compute the size of first NumberOfLoopInstrToAlign instructions
795 // at the head of a loop.
796 void compute_loop_first_inst_sizes();
798 // Compute the information for the exception tables
799 void FillExceptionTables(uint cnt, uint *call_returns, uint *inct_starts, Label *blk_labels);
801 // Stack slots that may be unused by the calling convention but must
802 // otherwise be preserved. On Intel this includes the return address.
803 // On PowerPC it includes the 4 words holding the old TOC & LR glue.
804 uint in_preserve_stack_slots();
806 // "Top of Stack" slots that may be unused by the calling convention but must
807 // otherwise be preserved.
808 // On Intel these are not necessary and the value can be zero.
809 // On Sparc this describes the words reserved for storing a register window
810 // when an interrupt occurs.
811 static uint out_preserve_stack_slots();
813 // Number of outgoing stack slots killed above the out_preserve_stack_slots
814 // for calls to C. Supports the var-args backing area for register parms.
815 uint varargs_C_out_slots_killed() const;
817 // Number of Stack Slots consumed by a synchronization entry
818 int sync_stack_slots() const;
820 // Compute the name of old_SP. See <arch>.ad for frame layout.
821 OptoReg::Name compute_old_SP();
823 #ifdef ENABLE_ZAP_DEAD_LOCALS
824 static bool is_node_getting_a_safepoint(Node*);
825 void Insert_zap_nodes();
826 Node* call_zap_node(MachSafePointNode* n, int block_no);
827 #endif
829 private:
830 // Phase control:
831 void Init(int aliaslevel); // Prepare for a single compilation
832 int Inline_Warm(); // Find more inlining work.
833 void Finish_Warm(); // Give up on further inlines.
834 void Optimize(); // Given a graph, optimize it
835 void Code_Gen(); // Generate code from a graph
837 // Management of the AliasType table.
838 void grow_alias_types();
839 AliasCacheEntry* probe_alias_cache(const TypePtr* adr_type);
840 const TypePtr *flatten_alias_type(const TypePtr* adr_type) const;
841 AliasType* find_alias_type(const TypePtr* adr_type, bool no_create, ciField* field);
843 void verify_top(Node*) const PRODUCT_RETURN;
845 // Intrinsic setup.
846 void register_library_intrinsics(); // initializer
847 CallGenerator* make_vm_intrinsic(ciMethod* m, bool is_virtual); // constructor
848 int intrinsic_insertion_index(ciMethod* m, bool is_virtual); // helper
849 CallGenerator* find_intrinsic(ciMethod* m, bool is_virtual); // query fn
850 void register_intrinsic(CallGenerator* cg); // update fn
852 #ifndef PRODUCT
853 static juint _intrinsic_hist_count[vmIntrinsics::ID_LIMIT];
854 static jubyte _intrinsic_hist_flags[vmIntrinsics::ID_LIMIT];
855 #endif
857 public:
859 // Note: Histogram array size is about 1 Kb.
860 enum { // flag bits:
861 _intrinsic_worked = 1, // succeeded at least once
862 _intrinsic_failed = 2, // tried it but it failed
863 _intrinsic_disabled = 4, // was requested but disabled (e.g., -XX:-InlineUnsafeOps)
864 _intrinsic_virtual = 8, // was seen in the virtual form (rare)
865 _intrinsic_both = 16 // was seen in the non-virtual form (usual)
866 };
867 // Update histogram. Return boolean if this is a first-time occurrence.
868 static bool gather_intrinsic_statistics(vmIntrinsics::ID id,
869 bool is_virtual, int flags) PRODUCT_RETURN0;
870 static void print_intrinsic_statistics() PRODUCT_RETURN;
872 // Graph verification code
873 // Walk the node list, verifying that there is a one-to-one
874 // correspondence between Use-Def edges and Def-Use edges
875 // The option no_dead_code enables stronger checks that the
876 // graph is strongly connected from root in both directions.
877 void verify_graph_edges(bool no_dead_code = false) PRODUCT_RETURN;
879 // Print bytecodes, including the scope inlining tree
880 void print_codes();
882 // End-of-run dumps.
883 static void print_statistics() PRODUCT_RETURN;
885 // Dump formatted assembly
886 void dump_asm(int *pcs = NULL, uint pc_limit = 0) PRODUCT_RETURN;
887 void dump_pc(int *pcs, int pc_limit, Node *n);
889 // Verify ADLC assumptions during startup
890 static void adlc_verification() PRODUCT_RETURN;
892 // Definitions of pd methods
893 static void pd_compiler2_init();
894 };
896 #endif // SHARE_VM_OPTO_COMPILE_HPP